repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
spacemonkey01/pypastry
[ "b5c71d114af5740344aad7ba577c06fbf02a6046", "b5c71d114af5740344aad7ba577c06fbf02a6046" ]
[ "pypastry/experiment/evaluation.py", "tests/evaluation_test.py" ]
[ "from datetime import datetime\nfrom types import ModuleType\n\nimport numpy as np\nimport pandas as pd\nfrom git import Repo\nfrom joblib import Parallel, delayed\nfrom pypastry.experiment import Experiment\nfrom pypastry.experiment.hasher import get_dataset_hash\nfrom pypastry.experiment.results import ResultsRepo\nfrom scipy.stats import sem\nfrom sklearn.base import BaseEstimator, is_classifier, clone\nfrom sklearn.model_selection import check_cv\n\n\nclass ExperimentRunner:\n def __init__(self, git_repo: Repo, results_repo: ResultsRepo, results_display: ModuleType):\n self.git_repo = git_repo\n self.results_repo = results_repo\n self.results_display = results_display\n\n def run_experiment(self, experiment: Experiment, force: bool, message: str):\n print(\"Got dataset with {} rows\".format(len(experiment.dataset)))\n if force or self.git_repo.is_dirty():\n print(\"Running evaluation\")\n self._run_evaluation(experiment, message)\n results = self.results_repo.get_results(self.git_repo)\n self.results_display.cache_display(results)\n else:\n print(\"Clean repo, nothing to do\")\n self.results_display.print_cache_file()\n\n def _run_evaluation(self, experiment: Experiment, message: str):\n X = experiment.dataset.drop(columns=[experiment.label_column])\n y = experiment.dataset[experiment.label_column]\n predictors = [experiment.predictor]\n if experiment.group_column is None:\n groups = None\n else:\n groups = X[experiment.group_column]\n X = X.drop(columns=[experiment.group_column])\n run_infos = _evaluate_predictors(X, predictors, y, groups, experiment.cross_validator, experiment.scorer)\n self.git_repo.git.add(update=True)\n dataset_hash = get_dataset_hash(experiment.dataset)\n dataset_info = {\n 'hash': dataset_hash,\n 'columns': experiment.dataset.columns.tolist(),\n }\n new_filenames = self.results_repo.save_results(run_infos, dataset_info)\n self.git_repo.index.add(new_filenames)\n self.git_repo.index.commit(message)\n\n\ndef _evaluate_predictors(X, predictors, y, groups, cross_validator, scorer):\n run_infos = []\n for predictor in predictors:\n start = datetime.utcnow()\n\n scores = _get_scores(X, y, groups, cross_validator, predictor, scorer)\n\n end = datetime.utcnow()\n\n scores_array = np.hstack(scores)\n mean_score = scores_array.mean()\n sem_score = sem(scores_array)\n results = {'test_score': mean_score, 'test_score_sem': sem_score}\n\n model_info = get_model_info(predictor)\n\n run_info = {\n 'run_start': str(start),\n 'run_end': str(end),\n 'run_seconds': (end - start).total_seconds(),\n 'results': results,\n 'model_info': model_info,\n }\n run_infos.append(run_info)\n return run_infos\n\n\ndef get_model_info(model: BaseEstimator):\n info = model.get_params()\n info['type'] = type(model).__name__\n return info\n\n\ndef _get_scores(X, y, groups, cv, estimator, scorer):\n cv = check_cv(cv, y, classifier=is_classifier(estimator))\n\n # We clone the estimator to make sure that all the folds are\n # independent, and that it is pickle-able.\n parallel = Parallel(n_jobs=None, verbose=False,\n pre_dispatch='2*n_jobs')\n scores = parallel(\n delayed(_fit_and_predict)(\n clone(estimator), X, y, train, test, groups, scorer)\n for train, test in cv.split(X, y, groups))\n return scores\n\n\ndef _fit_and_predict(estimator: BaseEstimator, X, y, train, test, groups, scorer):\n if groups is not None:\n return _fit_and_predict_groups(X, estimator, groups, scorer, test, train, y)\n else:\n return _fit_and_predict_simple(X, estimator, scorer, test, train, y)\n\n\ndef _fit_and_predict_simple(X, estimator, scorer, test, train, y):\n X_train = X.iloc[train]\n y_train = y.iloc[train]\n estimator.fit(X_train, y_train)\n X_test = X.iloc[test]\n y_test = y.iloc[test]\n score = scorer(estimator, X_test, y_test)\n return [score]\n\n\ndef _fit_and_predict_groups(X, estimator, groups, scorer, test, train, y):\n X_train = X.iloc[train]\n y_train = y.iloc[train]\n estimator.fit(X_train, y_train)\n X_test = X.iloc[test]\n y_test = y.iloc[test]\n groups_test = groups.iloc[test]\n test_df = pd.DataFrame(X_test)\n test_df['y'] = y_test\n test_df['groups'] = groups_test\n # test_df = pd.DataFrame({'X': X_test.values, 'y': y_test.values, 'groups': groups_test.values})\n test_groups = test_df.groupby('groups')\n scores = []\n for key, group in test_groups:\n X_group = group[X.columns]\n score = scorer(estimator, X_group, group['y'])\n scores.append(score)\n print(\"SCORES\", scores)\n return scores\n\n", "from unittest.mock import Mock\n\nfrom pandas import DataFrame\nfrom sklearn.dummy import DummyClassifier\nfrom sklearn.metrics import accuracy_score, make_scorer\nfrom sklearn.model_selection import StratifiedShuffleSplit, GroupShuffleSplit\nfrom sklearn.tree import DecisionTreeClassifier\n\nfrom pypastry.experiment import Experiment\nfrom pypastry.experiment.evaluation import ExperimentRunner\n\n\ndef test_simple_evaluation():\n # Given\n dataset = DataFrame({\n 'a': [1, 1, 0, 0],\n 'b': [1, 1, 0, 0],\n })\n\n cross_validation = StratifiedShuffleSplit(n_splits=1, test_size=0.5)\n predictor = DecisionTreeClassifier()\n scorer = make_scorer(accuracy_score)\n experiment = Experiment(dataset, 'b', predictor, cross_validation, scorer)\n\n git_mock = Mock()\n results_repo_mock = Mock()\n new_results_files = ['results/abc.json']\n results_repo_mock.save_results.return_value = new_results_files\n results_display_mock = Mock()\n runner = ExperimentRunner(git_mock, results_repo_mock, results_display_mock)\n commit_message = \"Test commit message\"\n\n # When\n runner.run_experiment(experiment, False, commit_message)\n\n # Then\n call_args_list = results_repo_mock.save_results.call_args_list\n assert 1 == len(call_args_list)\n run_infos, dataset_info = call_args_list[0][0]\n\n assert len(run_infos) == 1\n\n results = run_infos[0]['results']\n assert 1.0 == results['test_score']\n assert ['a', 'b'] == dataset_info['columns']\n\n # TODO: check the hash. Need to find a way to make this consistent between python versions etc.\n # assert '28ea628a50a47c726a9b0ec437c88fc4742d81fd' == dataset_info['hash']\n\n git_mock.git.add.assert_called_once_with(update=True)\n git_mock.index.add.assert_called_once_with(new_results_files)\n git_mock.index.commit.assert_called_once_with(commit_message)\n\n assert 1 == len(results_display_mock.cache_display.call_args_list)\n print(results_display_mock.cache_display.call_args[0])\n assert len(results_display_mock.cache_display.call_args[0]) > 0\n assert 1 == len(results_display_mock.print_cache_file.call_args_list)\n\n\ndef test_grouped_evaluation():\n label = [i % 2 for i in range(100)]\n dataset = DataFrame({\n 'a': label,\n 'b': label,\n 'g': [i // 2 for i in range(100)]\n })\n\n cross_validation = GroupShuffleSplit(n_splits=1, test_size=0.5)\n predictor = DummyClassifier(strategy='constant', constant=1)\n scorer = make_scorer(accuracy_score)\n experiment = Experiment(dataset, 'b', predictor, cross_validation, scorer, group_column='g')\n\n git_mock = Mock()\n results_repo_mock = Mock()\n results_display_mock = Mock()\n runner = ExperimentRunner(git_mock, results_repo_mock, results_display_mock)\n\n runner.run_experiment(experiment, False, \"Test commit message\")\n\n assert 1 == len(results_repo_mock.save_results.call_args_list)\n run_infos, dataset_info = results_repo_mock.save_results.call_args[0]\n\n assert len(run_infos) == 1\n\n results = run_infos[0]['results']\n assert 0.5 == results['test_score']\n assert 0.0 == results['test_score_sem']\n" ]
[ [ "numpy.hstack", "pandas.DataFrame", "sklearn.base.clone", "sklearn.base.is_classifier", "scipy.stats.sem" ], [ "sklearn.model_selection.GroupShuffleSplit", "sklearn.dummy.DummyClassifier", "pandas.DataFrame", "sklearn.tree.DecisionTreeClassifier", "sklearn.metrics.make_scorer", "sklearn.model_selection.StratifiedShuffleSplit" ] ]
DeepThoughtTeam/tensorflow
[ "486a8950537c04fbd1c781b6e0bd0bf0999cc9a4" ]
[ "tensorflow/examples/tutorials/mnist/mnist_softmax.py" ]
[ "# Copyright 2015 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"A very simple MNIST classifier.\n\nSee extensive documentation at\nhttp://tensorflow.org/tutorials/mnist/beginners/index.md\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Import data\n# from tensorflow.examples.tutorials.mnist import input_data\nimport input_data\n\nimport tensorflow as tf\n\nmnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n\nsess = tf.InteractiveSession()\n\n# Create the model\nx = tf.placeholder(tf.float32, [None, 784])\nW = tf.Variable(tf.zeros([784, 10]))\nb = tf.Variable(tf.zeros([10]))\ny = tf.nn.softmax(tf.matmul(x, W) + b)\n\n# Define loss and optimizer\ny_ = tf.placeholder(tf.float32, [None, 10])\ncross_entropy = -tf.reduce_sum(y_ * tf.log(y))\ntrain_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)\n\n# Train\ntf.initialize_all_variables().run()\nfor i in range(1000):\n batch_xs, batch_ys = mnist.train.next_batch(100)\n train_step.run({x: batch_xs, y_: batch_ys})\n\n# Test trained model\ncorrect_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\nprint(accuracy.eval({x: mnist.test.images, y_: mnist.test.labels}))\n" ]
[ [ "tensorflow.matmul", "tensorflow.InteractiveSession", "tensorflow.zeros", "tensorflow.cast", "tensorflow.placeholder", "tensorflow.initialize_all_variables", "tensorflow.train.GradientDescentOptimizer", "tensorflow.log", "tensorflow.argmax" ] ]
robgon-art/stylegan2-ada-pytorch
[ "51b24a249469b08e17ff2d6b2cd618d9cbf79e0f" ]
[ "train.py" ]
[ "# from https://github.com/rosinality/stylegan2-pytorch\r\n\r\nimport argparse\r\nimport math\r\nimport random\r\nimport os\r\n\r\nimport numpy as np\r\nimport torch\r\nfrom torch import nn, autograd, optim\r\nfrom torch.nn import functional as F\r\nfrom torch.utils import data\r\nimport torch.distributed as dist\r\nfrom torchvision import transforms, utils\r\nfrom tqdm import tqdm\r\n\r\ntry:\r\n import wandb\r\n\r\nexcept ImportError:\r\n wandb = None\r\n\r\n\r\nfrom dataset import MultiResolutionDataset\r\nfrom distributed import (\r\n get_rank,\r\n synchronize,\r\n reduce_loss_dict,\r\n reduce_sum,\r\n get_world_size,\r\n)\r\nfrom op import conv2d_gradfix\r\nfrom non_leaking import augment, AdaptiveAugment\r\n\r\n\r\ndef data_sampler(dataset, shuffle, distributed):\r\n if distributed:\r\n return data.distributed.DistributedSampler(dataset, shuffle=shuffle)\r\n\r\n if shuffle:\r\n return data.RandomSampler(dataset)\r\n\r\n else:\r\n return data.SequentialSampler(dataset)\r\n\r\n\r\ndef requires_grad(model, flag=True):\r\n for p in model.parameters():\r\n p.requires_grad = flag\r\n\r\n\r\ndef accumulate(model1, model2, decay=0.999):\r\n par1 = dict(model1.named_parameters())\r\n par2 = dict(model2.named_parameters())\r\n\r\n for k in par1.keys():\r\n par1[k].data.mul_(decay).add_(par2[k].data, alpha=1 - decay)\r\n\r\n\r\ndef sample_data(loader):\r\n while True:\r\n for batch in loader:\r\n yield batch\r\n\r\n\r\ndef d_logistic_loss(real_pred, fake_pred):\r\n real_loss = F.softplus(-real_pred)\r\n fake_loss = F.softplus(fake_pred)\r\n\r\n return real_loss.mean() + fake_loss.mean()\r\n\r\n\r\ndef d_r1_loss(real_pred, real_img):\r\n with conv2d_gradfix.no_weight_gradients():\r\n grad_real, = autograd.grad(\r\n outputs=real_pred.sum(), inputs=real_img, create_graph=True\r\n )\r\n grad_penalty = grad_real.pow(2).reshape(grad_real.shape[0], -1).sum(1).mean()\r\n\r\n return grad_penalty\r\n\r\n\r\ndef g_nonsaturating_loss(fake_pred):\r\n loss = F.softplus(-fake_pred).mean()\r\n\r\n return loss\r\n\r\n\r\ndef g_path_regularize(fake_img, latents, mean_path_length, decay=0.01):\r\n noise = torch.randn_like(fake_img) / math.sqrt(\r\n fake_img.shape[2] * fake_img.shape[3]\r\n )\r\n grad, = autograd.grad(\r\n outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True\r\n )\r\n path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1))\r\n\r\n path_mean = mean_path_length + decay * (path_lengths.mean() - mean_path_length)\r\n\r\n path_penalty = (path_lengths - path_mean).pow(2).mean()\r\n\r\n return path_penalty, path_mean.detach(), path_lengths\r\n\r\n\r\ndef make_noise(batch, latent_dim, n_noise, device):\r\n if n_noise == 1:\r\n return torch.randn(batch, latent_dim, device=device)\r\n\r\n noises = torch.randn(n_noise, batch, latent_dim, device=device).unbind(0)\r\n\r\n return noises\r\n\r\n\r\ndef mixing_noise(batch, latent_dim, prob, device):\r\n if prob > 0 and random.random() < prob:\r\n return make_noise(batch, latent_dim, 2, device)\r\n\r\n else:\r\n return [make_noise(batch, latent_dim, 1, device)]\r\n\r\n\r\ndef set_grad_none(model, targets):\r\n for n, p in model.named_parameters():\r\n if n in targets:\r\n p.grad = None\r\n\r\n\r\ndef train(args, loader, generator, discriminator, g_optim, d_optim, g_ema, device):\r\n loader = sample_data(loader)\r\n\r\n pbar = range(args.iter)\r\n\r\n if get_rank() == 0:\r\n pbar = tqdm(pbar, initial=args.start_iter, dynamic_ncols=True, smoothing=0.01)\r\n\r\n mean_path_length = 0\r\n\r\n d_loss_val = 0\r\n r1_loss = torch.tensor(0.0, device=device)\r\n g_loss_val = 0\r\n path_loss = torch.tensor(0.0, device=device)\r\n path_lengths = torch.tensor(0.0, device=device)\r\n mean_path_length_avg = 0\r\n loss_dict = {}\r\n\r\n if args.distributed:\r\n g_module = generator.module\r\n d_module = discriminator.module\r\n\r\n else:\r\n g_module = generator\r\n d_module = discriminator\r\n\r\n accum = 0.5 ** (32 / (10 * 1000))\r\n ada_aug_p = args.augment_p if args.augment_p > 0 else 0.0\r\n r_t_stat = 0\r\n\r\n if args.augment and args.augment_p == 0:\r\n ada_augment = AdaptiveAugment(args.ada_target, args.ada_length, 8, device)\r\n\r\n sample_z = torch.randn(args.n_sample, args.latent, device=device)\r\n\r\n for idx in pbar:\r\n i = idx + args.start_iter\r\n\r\n if i > args.iter:\r\n print(\"Done!\")\r\n\r\n break\r\n\r\n real_img = next(loader)\r\n real_img = real_img.to(device)\r\n\r\n requires_grad(generator, False)\r\n requires_grad(discriminator, True)\r\n\r\n noise = mixing_noise(args.batch, args.latent, args.mixing, device)\r\n fake_img, _ = generator(noise)\r\n\r\n if args.augment:\r\n real_img_aug, _ = augment(real_img, ada_aug_p)\r\n fake_img, _ = augment(fake_img, ada_aug_p)\r\n\r\n else:\r\n real_img_aug = real_img\r\n\r\n fake_pred = discriminator(fake_img)\r\n real_pred = discriminator(real_img_aug)\r\n d_loss = d_logistic_loss(real_pred, fake_pred)\r\n\r\n loss_dict[\"d\"] = d_loss\r\n loss_dict[\"real_score\"] = real_pred.mean()\r\n loss_dict[\"fake_score\"] = fake_pred.mean()\r\n\r\n discriminator.zero_grad()\r\n d_loss.backward()\r\n d_optim.step()\r\n\r\n if args.augment and args.augment_p == 0:\r\n ada_aug_p = ada_augment.tune(real_pred)\r\n r_t_stat = ada_augment.r_t_stat\r\n\r\n d_regularize = i % args.d_reg_every == 0\r\n\r\n if d_regularize:\r\n real_img.requires_grad = True\r\n\r\n if args.augment:\r\n real_img_aug, _ = augment(real_img, ada_aug_p)\r\n\r\n else:\r\n real_img_aug = real_img\r\n\r\n real_pred = discriminator(real_img_aug)\r\n r1_loss = d_r1_loss(real_pred, real_img)\r\n\r\n discriminator.zero_grad()\r\n (args.r1 / 2 * r1_loss * args.d_reg_every + 0 * real_pred[0]).backward()\r\n\r\n d_optim.step()\r\n\r\n loss_dict[\"r1\"] = r1_loss\r\n\r\n requires_grad(generator, True)\r\n requires_grad(discriminator, False)\r\n\r\n noise = mixing_noise(args.batch, args.latent, args.mixing, device)\r\n fake_img, _ = generator(noise)\r\n\r\n if args.augment:\r\n fake_img, _ = augment(fake_img, ada_aug_p)\r\n\r\n fake_pred = discriminator(fake_img)\r\n g_loss = g_nonsaturating_loss(fake_pred)\r\n\r\n loss_dict[\"g\"] = g_loss\r\n\r\n generator.zero_grad()\r\n g_loss.backward()\r\n g_optim.step()\r\n\r\n g_regularize = i % args.g_reg_every == 0\r\n\r\n if g_regularize:\r\n path_batch_size = max(1, args.batch // args.path_batch_shrink)\r\n noise = mixing_noise(path_batch_size, args.latent, args.mixing, device)\r\n fake_img, latents = generator(noise, return_latents=True)\r\n\r\n path_loss, mean_path_length, path_lengths = g_path_regularize(\r\n fake_img, latents, mean_path_length\r\n )\r\n\r\n generator.zero_grad()\r\n weighted_path_loss = args.path_regularize * args.g_reg_every * path_loss\r\n\r\n if args.path_batch_shrink:\r\n weighted_path_loss += 0 * fake_img[0, 0, 0, 0]\r\n\r\n weighted_path_loss.backward()\r\n\r\n g_optim.step()\r\n\r\n mean_path_length_avg = (\r\n reduce_sum(mean_path_length).item() / get_world_size()\r\n )\r\n\r\n loss_dict[\"path\"] = path_loss\r\n loss_dict[\"path_length\"] = path_lengths.mean()\r\n\r\n accumulate(g_ema, g_module, accum)\r\n\r\n loss_reduced = reduce_loss_dict(loss_dict)\r\n\r\n d_loss_val = loss_reduced[\"d\"].mean().item()\r\n g_loss_val = loss_reduced[\"g\"].mean().item()\r\n r1_val = loss_reduced[\"r1\"].mean().item()\r\n path_loss_val = loss_reduced[\"path\"].mean().item()\r\n real_score_val = loss_reduced[\"real_score\"].mean().item()\r\n fake_score_val = loss_reduced[\"fake_score\"].mean().item()\r\n path_length_val = loss_reduced[\"path_length\"].mean().item()\r\n\r\n if get_rank() == 0:\r\n pbar.set_description(\r\n (\r\n f\"d: {d_loss_val:.4f}; g: {g_loss_val:.4f}; r1: {r1_val:.4f}; \"\r\n f\"path: {path_loss_val:.4f}; mean path: {mean_path_length_avg:.4f}; \"\r\n f\"augment: {ada_aug_p:.4f}\"\r\n )\r\n )\r\n\r\n if wandb and args.wandb:\r\n wandb.log(\r\n {\r\n \"Generator\": g_loss_val,\r\n \"Discriminator\": d_loss_val,\r\n \"Augment\": ada_aug_p,\r\n \"Rt\": r_t_stat,\r\n \"R1\": r1_val,\r\n \"Path Length Regularization\": path_loss_val,\r\n \"Mean Path Length\": mean_path_length,\r\n \"Real Score\": real_score_val,\r\n \"Fake Score\": fake_score_val,\r\n \"Path Length\": path_length_val,\r\n }\r\n )\r\n\r\n if i % 1000 == 0:\r\n with torch.no_grad():\r\n g_ema.eval()\r\n sample, _ = g_ema([sample_z])\r\n utils.save_image(\r\n sample,\r\n f\"{args.data_dir}/sample/{str(i).zfill(6)}.jpg\",\r\n nrow=int(args.n_sample ** 0.5),\r\n normalize=True,\r\n range=(-1, 1),\r\n )\r\n\r\n if i % 1000 == 0:\r\n torch.save(\r\n {\r\n \"g\": g_module.state_dict(),\r\n \"d\": d_module.state_dict(),\r\n \"g_ema\": g_ema.state_dict(),\r\n \"g_optim\": g_optim.state_dict(),\r\n \"d_optim\": d_optim.state_dict(),\r\n \"args\": args,\r\n \"ada_aug_p\": ada_aug_p,\r\n },\r\n f\"{args.data_dir}/checkpoint/{str(i).zfill(6)}.pt\",\r\n )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n device = \"cuda\"\r\n\r\n parser = argparse.ArgumentParser(description=\"StyleGAN2 trainer\")\r\n\r\n parser.add_argument(\"path\", type=str, help=\"path to the lmdb dataset\")\r\n parser.add_argument('--arch', type=str, default='stylegan2', help='model architectures (stylegan2 | swagan)')\r\n parser.add_argument(\r\n \"--iter\", type=int, default=800000, help=\"total training iterations\"\r\n )\r\n parser.add_argument(\r\n \"--batch\", type=int, default=16, help=\"batch sizes for each gpus\"\r\n )\r\n parser.add_argument(\r\n \"--n_sample\",\r\n type=int,\r\n default=64,\r\n help=\"number of the samples generated during training\",\r\n )\r\n parser.add_argument(\r\n \"--size\", type=int, default=256, help=\"image sizes for the model\"\r\n )\r\n parser.add_argument(\r\n \"--r1\", type=float, default=10, help=\"weight of the r1 regularization\"\r\n )\r\n parser.add_argument(\r\n \"--path_regularize\",\r\n type=float,\r\n default=2,\r\n help=\"weight of the path length regularization\",\r\n )\r\n parser.add_argument(\r\n \"--path_batch_shrink\",\r\n type=int,\r\n default=2,\r\n help=\"batch size reducing factor for the path length regularization (reduce memory consumption)\",\r\n )\r\n parser.add_argument(\r\n \"--d_reg_every\",\r\n type=int,\r\n default=16,\r\n help=\"interval of the applying r1 regularization\",\r\n )\r\n parser.add_argument(\r\n \"--g_reg_every\",\r\n type=int,\r\n default=4,\r\n help=\"interval of the applying path length regularization\",\r\n )\r\n parser.add_argument(\r\n \"--mixing\", type=float, default=0.9, help=\"probability of latent code mixing\"\r\n )\r\n parser.add_argument(\r\n \"--ckpt\",\r\n type=str,\r\n default=None,\r\n help=\"path to the checkpoints to resume training\",\r\n )\r\n parser.add_argument(\"--lr\", type=float, default=0.002, help=\"learning rate\")\r\n parser.add_argument(\r\n \"--channel_multiplier\",\r\n type=int,\r\n default=2,\r\n help=\"channel multiplier factor for the model. config-f = 2, else = 1\",\r\n )\r\n parser.add_argument(\r\n \"--wandb\", action=\"store_true\", help=\"use weights and biases logging\"\r\n )\r\n parser.add_argument(\r\n \"--local_rank\", type=int, default=0, help=\"local rank for distributed training\"\r\n )\r\n parser.add_argument(\r\n \"--augment\", action=\"store_true\", help=\"apply non leaking augmentation\"\r\n )\r\n parser.add_argument(\r\n \"--augment_p\",\r\n type=float,\r\n default=0,\r\n help=\"probability of applying augmentation. 0 = use adaptive augmentation\",\r\n )\r\n parser.add_argument(\r\n \"--ada_target\",\r\n type=float,\r\n default=0.6,\r\n help=\"target augmentation probability for adaptive augmentation\",\r\n )\r\n parser.add_argument(\r\n \"--ada_length\",\r\n type=int,\r\n default=500 * 1000,\r\n help=\"target duraing to reach augmentation probability for adaptive augmentation\",\r\n )\r\n parser.add_argument(\r\n \"--ada_every\",\r\n type=int,\r\n default=256,\r\n help=\"probability update interval of the adaptive augmentation\",\r\n )\r\n parser.add_argument(\r\n '--data_dir',\r\n type=str,\r\n default='.',\r\n help='Dataset root directory'\r\n )\r\n \r\n args = parser.parse_args()\r\n\r\n n_gpu = int(os.environ[\"WORLD_SIZE\"]) if \"WORLD_SIZE\" in os.environ else 1\r\n args.distributed = n_gpu > 1\r\n\r\n if args.distributed:\r\n torch.cuda.set_device(args.local_rank)\r\n torch.distributed.init_process_group(backend=\"nccl\", init_method=\"env://\")\r\n synchronize()\r\n\r\n args.latent = 512\r\n args.n_mlp = 8\r\n\r\n args.start_iter = 0\r\n\r\n if args.arch == 'stylegan2':\r\n from stylegan2 import Generator, Discriminator\r\n\r\n elif args.arch == 'stylegan2_alias_free':\r\n from stylegan2_alias_free import Generator, Discriminator\r\n\r\n elif args.arch == 'swagan':\r\n from swagan import Generator, Discriminator\r\n\r\n generator = Generator(\r\n args.size, args.latent, args.n_mlp, channel_multiplier=args.channel_multiplier\r\n ).to(device)\r\n discriminator = Discriminator(\r\n args.size, channel_multiplier=args.channel_multiplier\r\n ).to(device)\r\n g_ema = Generator(\r\n args.size, args.latent, args.n_mlp, channel_multiplier=args.channel_multiplier\r\n ).to(device)\r\n g_ema.eval()\r\n accumulate(g_ema, generator, 0)\r\n\r\n g_reg_ratio = args.g_reg_every / (args.g_reg_every + 1)\r\n d_reg_ratio = args.d_reg_every / (args.d_reg_every + 1)\r\n\r\n g_optim = optim.Adam(\r\n generator.parameters(),\r\n lr=args.lr * g_reg_ratio,\r\n betas=(0 ** g_reg_ratio, 0.99 ** g_reg_ratio),\r\n )\r\n d_optim = optim.Adam(\r\n discriminator.parameters(),\r\n lr=args.lr * d_reg_ratio,\r\n betas=(0 ** d_reg_ratio, 0.99 ** d_reg_ratio),\r\n )\r\n\r\n if args.ckpt is not None:\r\n print(\"load model:\", args.ckpt)\r\n\r\n ckpt = torch.load(args.ckpt, map_location=lambda storage, loc: storage)\r\n\r\n try:\r\n ckpt_name = os.path.basename(args.ckpt)\r\n args.start_iter = int(os.path.splitext(ckpt_name)[0])\r\n\r\n except ValueError:\r\n pass\r\n\r\n generator.load_state_dict(ckpt[\"g\"])\r\n discriminator.load_state_dict(ckpt[\"d\"])\r\n g_ema.load_state_dict(ckpt[\"g_ema\"])\r\n\r\n g_optim.load_state_dict(ckpt[\"g_optim\"])\r\n d_optim.load_state_dict(ckpt[\"d_optim\"])\r\n\r\n if args.distributed:\r\n generator = nn.parallel.DistributedDataParallel(\r\n generator,\r\n device_ids=[args.local_rank],\r\n output_device=args.local_rank,\r\n broadcast_buffers=False,\r\n )\r\n\r\n discriminator = nn.parallel.DistributedDataParallel(\r\n discriminator,\r\n device_ids=[args.local_rank],\r\n output_device=args.local_rank,\r\n broadcast_buffers=False,\r\n )\r\n\r\n transform = transforms.Compose(\r\n [\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True),\r\n ]\r\n )\r\n\r\n dataset = MultiResolutionDataset(args.path, transform, args.size)\r\n loader = data.DataLoader(\r\n dataset,\r\n batch_size=args.batch,\r\n sampler=data_sampler(dataset, shuffle=True, distributed=args.distributed),\r\n drop_last=True,\r\n )\r\n\r\n if get_rank() == 0 and wandb is not None and args.wandb:\r\n wandb.init(project=\"stylegan 2\")\r\n\r\n train(args, loader, generator, discriminator, g_optim, d_optim, g_ema, device)\r\n" ]
[ [ "torch.randn_like", "torch.distributed.init_process_group", "torch.utils.data.distributed.DistributedSampler", "torch.cuda.set_device", "torch.load", "torch.randn", "torch.utils.data.SequentialSampler", "torch.utils.data.RandomSampler", "torch.tensor", "torch.no_grad", "torch.nn.functional.softplus", "torch.nn.parallel.DistributedDataParallel" ] ]
memmelma/visual-prior
[ "6b9c65f291c587fcbb3fcc3f61f76cdd1c3eb175" ]
[ "visualpriors/transforms.py" ]
[ "from .taskonomy_network import TaskonomyEncoder, TaskonomyDecoder, TaskonomyNetwork, TASKONOMY_PRETRAINED_URLS, TASKS_TO_CHANNELS\nimport multiprocessing.dummy as mp\nimport torch\n\ndefault_device = 'cuda' if torch.cuda.is_available() else 'cpu'\n\ndef representation_transform(img, feature_task='normal', device=default_device):\n '''\n Transforms an RGB image into a feature driven by some vision task\n Expects inputs:\n shape (batch_size, 3, 256, 256)\n values [-1,1]\n Outputs:\n shape (batch_size, 8, 16, 16)\n '''\n return VisualPrior.to_representation(img, feature_tasks=[feature_task], device=device)\n\ndef multi_representation_transform(img, feature_tasks=['normal'], device=default_device):\n '''\n Transforms an RGB image into a features driven by some vision tasks\n Expects inputs:\n shape (batch_size, 3, 256, 256)\n values [-1,1]\n Outputs:\n shape (batch_size, 8, 16, 16)\n '''\n return VisualPrior.to_representation(img, feature_tasks, device)\n\ndef max_coverage_featureset_transform(img, k=4, device=default_device):\n '''\n Transforms an RGB image into a features driven by some vision tasks.\n The tasks are chosen according to the Max-Coverage Min-Distance Featureset\n From the paper:\n Mid-Level Visual Representations Improve Generalization and Sample Efficiency \n for Learning Visuomotor Policies.\n Alexander Sax, Bradley Emi, Amir R. Zamir, Silvio Savarese, Leonidas Guibas, Jitendra Malik.\n Arxiv preprint 2018.\n\n This function expects inputs:\n shape (batch_size, 3, 256, 256)\n values [-1,1]\n Outputs:\n shape (batch_size, 8*k, 16, 16)\n '''\n return VisualPrior.max_coverage_transform(img, k, device)\n\ndef feature_readout(img, feature_task='normal', device=default_device):\n '''\n Transforms an RGB image into a feature driven by some vision task, \n then returns the result of a readout of the feature.\n Expects inputs:\n shape (batch_size, 3, 256, 256)\n values [-1,1]\n Outputs:\n shape (batch_size, 8, 16, 16)\n '''\n return VisualPrior.to_predicted_label(img, feature_tasks=[feature_task], device=device)\n\ndef multi_feature_readout(img, feature_tasks=['normal'], device=default_device):\n '''\n Transforms an RGB image into a features driven by some vision tasks\n then returns the readouts of the features.\n Expects inputs:\n shape (batch_size, 3, 256, 256)\n values [-1,1]\n Outputs:\n shape (batch_size, 8, 16, 16)\n '''\n return VisualPrior.to_predicted_label(img, feature_tasks, device)\n\ndef get_networks(feature_tasks, train=False, decoder=False, device=torch.device('cpu')):\n '''\n Return taskonomy encoder (and decoder) (with or) without gradients.\n Expects inputs:\n feature_tasks list\n train bool\n decoder bool\n device torch.device\n Outputs:\n list(nn.Module)\n '''\n return VisualPrior.get_nets(feature_tasks, train, decoder, device)\n\ndef get_viable_feature_tasks():\n '''\n Return viable feature tasks as list of strings.\n \n Outputs:\n list(str)\n '''\n return VisualPrior.viable_feature_tasks\n\ndef get_max_coverate_featuresets():\n '''\n Return max coverate featuresets as list of list of strings.\n \n Outputs:\n list(list(str))\n '''\n return VisualPrior.max_coverate_featuresets\n\n\nclass VisualPrior(object):\n\n max_coverate_featuresets = [\n ['autoencoding'],\n ['segment_unsup2d', 'segment_unsup25d'],\n ['edge_texture', 'reshading', 'curvature'],\n ['normal', 'keypoints2d', 'segment_unsup2d', 'segment_semantic'],\n ]\n model_dir = None\n viable_feature_tasks = [\n 'autoencoding',\n 'colorization',\n 'curvature',\n 'denoising',\n 'edge_texture',\n 'edge_occlusion',\n 'egomotion', \n 'fixated_pose', \n 'jigsaw',\n 'keypoints2d',\n 'keypoints3d',\n 'nonfixated_pose',\n 'point_matching', \n 'reshading',\n 'depth_zbuffer',\n 'depth_euclidean',\n 'normal',\n 'room_layout',\n 'segment_unsup25d',\n 'segment_unsup2d',\n 'segment_semantic',\n 'class_object',\n 'class_scene',\n 'inpainting',\n 'vanishing_point']\n\n\n @classmethod\n def to_representation(cls, img, feature_tasks=['normal'], device=default_device):\n '''\n Transforms an RGB image into a feature driven by some vision task(s)\n Expects inputs:\n shape (batch_size, 3, 256, 256)\n values [-1,1]\n Outputs:\n shape (batch_size, 8, 16, 16)\n\n This funciton is technically unsupported and there are absolutely no guarantees. \n '''\n VisualPriorRepresentation._load_unloaded_nets(feature_tasks)\n for t in feature_tasks:\n VisualPriorRepresentation.feature_task_to_net[t] = VisualPriorRepresentation.feature_task_to_net[t].to(device)\n nets = [VisualPriorRepresentation.feature_task_to_net[t] for t in feature_tasks]\n with torch.no_grad():\n return torch.cat([net(img) for net in nets], dim=1)\n\n @classmethod\n def to_predicted_label(cls, img, feature_tasks=['normal'], device=default_device):\n '''\n Transforms an RGB image into a predicted label for some task.\n Expects inputs:\n shape (batch_size, 3, 256, 256)\n values [-1,1]\n Outputs:\n shape (batch_size, C, 256, 256)\n values [-1,1]\n\n This funciton is technically unsupported and there are absolutely no guarantees. \n '''\n VisualPriorPredictedLabel._load_unloaded_nets(feature_tasks)\n for t in feature_tasks:\n VisualPriorPredictedLabel.feature_task_to_net[t] = VisualPriorPredictedLabel.feature_task_to_net[t].to(device)\n nets = [VisualPriorPredictedLabel.feature_task_to_net[t] for t in feature_tasks]\n with torch.no_grad():\n return torch.cat([net(img) for net in nets], dim=1)\n \n @classmethod\n def max_coverage_transform(cls, img, k=4, device=default_device):\n assert k > 0, 'Number of features to use for the max_coverage_transform must be > 0'\n if k > 4:\n raise NotImplementedError(\"max_coverage_transform featureset not implemented for k > 4\")\n return cls.to_representation(img, feature_tasks=cls.max_coverate_featuresets[k - 1], device=device)\n\n @classmethod\n def set_model_dir(cls, model_dir):\n cls.model_dir = model_dir\n\n @classmethod\n def get_nets(cls, feature_tasks, train, decoder, device):\n \n if decoder:\n if len(feature_tasks) == 1:\n VisualPriorPredictedLabel._load_unloaded_nets(feature_tasks)\n for t in feature_tasks:\n VisualPriorPredictedLabel.feature_task_to_net[t] = VisualPriorPredictedLabel.feature_task_to_net[t].to(device)\n nets = [VisualPriorPredictedLabel.feature_task_to_net[t] for t in feature_tasks]\n else:\n raise NotImplementedError(\"Decoder retrieval only implemented for single feature task.\")\n else: \n VisualPriorRepresentation._load_unloaded_nets(feature_tasks)\n for t in feature_tasks:\n VisualPriorRepresentation.feature_task_to_net[t] = VisualPriorRepresentation.feature_task_to_net[t].to(device)\n nets = [VisualPriorRepresentation.feature_task_to_net[t] for t in feature_tasks]\n \n if train:\n for net in nets:\n # method override in taskonomy_network.py -> TaskonomyNetwork\n net.train(False)\n for p in net.parameters():\n p.requires_grad = True\n \n return nets\n\nclass VisualPriorRepresentation(object):\n '''\n Handles loading networks that transform images into encoded features.\n Expects inputs:\n shape (batch_size, 3, 256, 256)\n values [-1,1]\n Outputs:\n shape (batch_size, 8, 16, 16)\n '''\n feature_task_to_net = {}\n\n @classmethod\n def _load_unloaded_nets(cls, feature_tasks, model_dir=None):\n net_paths_to_load = []\n feature_tasks_to_load = []\n for feature_task in feature_tasks:\n if feature_task not in cls.feature_task_to_net:\n net_paths_to_load.append(TASKONOMY_PRETRAINED_URLS[feature_task + '_encoder'])\n feature_tasks_to_load.append(feature_task)\n nets = cls._load_networks(net_paths_to_load)\n for feature_task, net in zip(feature_tasks_to_load, nets):\n cls.feature_task_to_net[feature_task] = net\n\n @classmethod\n def _load_networks(cls, network_paths, model_dir=None):\n return [cls._load_encoder(url, model_dir) for url in network_paths]\n\n @classmethod\n def _load_encoder(cls, url, model_dir=None, progress=True):\n net = TaskonomyEncoder() #.cuda()\n net.eval()\n checkpoint = torch.utils.model_zoo.load_url(url, model_dir=model_dir, progress=progress)\n net.load_state_dict(checkpoint['state_dict'])\n for p in net.parameters():\n p.requires_grad = False\n # net = Compose(nn.GroupNorm(32, 32, affine=False), net)\n return net\n \n\n\nclass VisualPriorPredictedLabel(object):\n '''\n Handles loading networks that transform images into transformed images.\n Expects inputs:\n shape (batch_size, 3, 256, 256)\n values [-1,1]\n Outputs:\n shape (batch_size, C, 256, 256)\n values [-1,1]\n \n This class is technically unsupported and there are absolutely no guarantees. \n '''\n feature_task_to_net = {}\n \n @classmethod\n def _load_unloaded_nets(cls, feature_tasks, model_dir=None):\n net_paths_to_load = []\n feature_tasks_to_load = []\n for feature_task in feature_tasks:\n if feature_task not in cls.feature_task_to_net:\n if feature_task not in TASKS_TO_CHANNELS:\n raise NotImplementedError('Task {} not implemented in VisualPriorPredictedLabel'.format(feature_task))\n net_paths_to_load.append((TASKS_TO_CHANNELS[feature_task],\n TASKONOMY_PRETRAINED_URLS[feature_task + '_encoder'],\n TASKONOMY_PRETRAINED_URLS[feature_task + '_decoder']))\n feature_tasks_to_load.append(feature_task)\n nets = cls._load_networks(net_paths_to_load)\n for feature_task, net in zip(feature_tasks_to_load, nets):\n cls.feature_task_to_net[feature_task] = net\n\n @classmethod\n def _load_networks(cls, network_paths, model_dir=None, progress=True):\n nets = []\n for out_channels, encoder_path, decoder_path in network_paths:\n nets.append(TaskonomyNetwork(\n out_channels=out_channels,\n load_encoder_path=encoder_path,\n load_decoder_path=decoder_path,\n model_dir=model_dir,\n progress=progress))\n return nets" ]
[ [ "torch.device", "torch.no_grad", "torch.utils.model_zoo.load_url", "torch.cuda.is_available" ] ]
amithfernando/DeepPavlov
[ "d643000f0a5bb4c220a28899d727e12e195000e0" ]
[ "deeppavlov/dataset_iterators/ranking_iterator.py" ]
[ "from deeppavlov.core.common.registry import register\nfrom deeppavlov.core.data.data_learning_iterator import DataLearningIterator\n\nimport numpy as np\nimport random\nfrom typing import Dict, List, Tuple\n\n\n@register('ranking_iterator')\nclass RankingIterator(DataLearningIterator):\n \"\"\"The class contains methods for iterating over a dataset for ranking in training, validation and test mode.\n\n Note:\n Each sample in ``data['train']`` is arranged as follows:\n ``{'context': 21507, 'response': 7009, 'pos_pool': [7009, 7010], 'neg_pool': None}``.\n The context has a 'context' key in the data sample.\n It is represented by a single integer.\n The correct response has the 'response' key in the sample,\n its value is also always a single integer.\n The list of possible correct responses (there may be several) can be\n obtained\n with the 'pos\\_pool' key.\n The value of the 'response' should be equal to the one item from the\n list\n obtained using the 'pos\\_pool' key.\n The list of possible negative responses (there can be a lot of them,\n 100–10000) is represented by the key 'neg\\_pool'.\n Its value is None, when global sampling is used, or the list of fixed\n length, when sampling from predefined negative responses is used.\n It is important that values in 'pos\\_pool' and 'negative\\_pool' do\n not overlap.\n Single items in 'context', 'response', 'pos\\_pool', 'neg\\_pool' are\n represented\n by single integers that give lists of integers\n using some dictionary `integer–list of integers`.\n These lists of integers are converted to lists of tokens with\n some dictionary `integer–token`.\n Samples in ``data['valid']`` and ``data['test']`` representation are almost the same\n as the train sample shown above.\n\n Args:\n data: A dictionary containing training, validation and test parts of the dataset obtainable via\n ``train``, ``valid`` and ``test`` keys.\n sample_candidates_pool: Whether to sample candidates from a predefined pool of candidates\n for each sample in training mode. If ``False``, negative sampling from the whole data will be performed.\n sample_candidates_pool_valid: Whether to validate a model on a predefined pool of candidates for each sample.\n If ``False``, sampling from the whole data will be performed for validation.\n sample_candidates_pool_test: Whether to test a model on a predefined pool of candidates for each sample.\n If ``False``, sampling from the whole data will be performed for test.\n num_negative_samples: A size of a predefined pool of candidates\n or a size of data subsample from the whole data in training mode.\n num_ranking_samples_valid: A size of a predefined pool of candidates\n or a size of data subsample from the whole data in validation mode.\n num_ranking_samples_test: A size of a predefined pool of candidates\n or a size of data subsample from the whole data in test mode.\n seed: Random seed.\n shuffle: Whether to shuffle data.\n len_vocab: A length of a vocabulary to perform sampling in training, validation and test mode.\n pos_pool_sample: Whether to sample response from `pos_pool` each time when the batch is generated.\n If ``False``, the response from `response` will be used.\n pos_pool_rank: Whether to count samples from the whole `pos_pool` as correct answers in test / validation mode.\n random_batches: Whether to choose batches randomly or iterate over data sequentally in training mode.\n batches_per_epoch: A number of batches to choose per each epoch in training mode.\n Only required if ``random_batches`` is set to ``True``.\n triplet_mode: Whether to use a model with triplet loss.\n If ``False``, a model with crossentropy loss will be used.\n hard_triplets_sampling: Whether to use hard triplets method of sampling in training mode.\n num_positive_samples: A number of contexts to choose from `pos_pool` for each `context`.\n Only required if ``hard_triplets_sampling`` is set to ``True``.\n \"\"\"\n\n def __init__(self,\n data: Dict[str, List],\n sample_candidates_pool: bool = False,\n sample_candidates_pool_valid: bool = True,\n sample_candidates_pool_test: bool = True,\n num_negative_samples: int = 10,\n num_ranking_samples_valid: int = 10,\n num_ranking_samples_test: int = 10,\n seed: int = None,\n shuffle: bool = False,\n len_vocab: int = 0,\n pos_pool_sample: bool = False,\n pos_pool_rank: bool = True,\n random_batches: bool = False,\n batches_per_epoch: int = None,\n triplet_mode: bool = True,\n hard_triplets_sampling: bool = False,\n num_positive_samples: int = 5):\n\n self.sample_candidates_pool = sample_candidates_pool\n self.sample_candidates_pool_valid = sample_candidates_pool_valid\n self.sample_candidates_pool_test = sample_candidates_pool_test\n self.num_negative_samples = num_negative_samples\n self.num_ranking_samples_valid = num_ranking_samples_valid\n self.num_ranking_samples_test = num_ranking_samples_test\n self.len_vocab = len_vocab\n self.pos_pool_sample = pos_pool_sample\n self.pos_pool_rank = pos_pool_rank\n self.random_batches = random_batches\n self.batches_per_epoch = batches_per_epoch\n self.triplet_mode = triplet_mode\n self.hard_triplets_sampling = hard_triplets_sampling\n self.num_positive_samples = num_positive_samples\n\n np.random.seed(seed)\n self.train = data.get('train', [])\n self.valid = data.get('valid', [])\n self.test = data.get('test', [])\n self.data = {\n 'train': self.train,\n 'valid': self.valid,\n 'test': self.test,\n 'all': self.train + self.test + self.valid\n }\n\n super().__init__(self.data, seed=seed, shuffle=shuffle)\n\n\n def gen_batches(self, batch_size: int, data_type: str = \"train\", shuffle: bool = True)->\\\n Tuple[List[List[Tuple[int, int]]], List[int]]:\n \"\"\"Generate batches of inputs and expected outputs to train neural networks.\n\n Args:\n batch_size: number of samples in batch\n data_type: can be either 'train', 'test', or 'valid'\n shuffle: whether to shuffle dataset before batching\n\n Returns:\n A tuple of a batch of inputs and a batch of expected outputs.\n\n Inputs and expected outputs have different structure and meaning\n depending on class attributes values and ``data_type``.\n \"\"\"\n data = self.data[data_type]\n if self.random_batches and self.batches_per_epoch is not None and data_type == \"train\":\n num_steps = self.batches_per_epoch\n assert(batch_size <= len(data))\n else:\n num_steps = len(data) // batch_size\n if data_type == \"train\":\n if shuffle:\n np.random.shuffle(data)\n for i in range(num_steps):\n if self.random_batches:\n context_response_data = np.random.choice(data, size=batch_size, replace=False)\n else:\n context_response_data = data[i * batch_size:(i + 1) * batch_size]\n context = [el[\"context\"] for el in context_response_data]\n if self.pos_pool_sample:\n response = [random.choice(el[\"pos_pool\"]) for el in context_response_data]\n else:\n response = [el[\"response\"] for el in context_response_data]\n if self.triplet_mode:\n negative_response = self._create_neg_resp_rand(context_response_data, batch_size)\n if self.hard_triplets_sampling:\n labels = [el[\"label\"] for el in context_response_data]\n positives = [random.choices(el[\"pos_pool\"], k=self.num_positive_samples)\n for el in context_response_data]\n x = [[(context[i], el) for el in positives[i]] for i in range(len(context_response_data))]\n y = labels\n else:\n x = [[(context[i], el) for el in [response[i]] + [negative_response[i]]]\n for i in range(len(context_response_data))]\n y = batch_size * [np.ones(2)]\n else:\n y = [el[\"label\"] for el in context_response_data]\n x = [[(context[i], response[i])] for i in range(len(context_response_data))]\n yield (x, y)\n if data_type in [\"valid\", \"test\"]:\n for i in range(num_steps + 1):\n if i < num_steps:\n context_response_data = data[i * batch_size:(i + 1) * batch_size]\n else:\n if len(data[i * batch_size:len(data)]) > 0:\n context_response_data = data[i * batch_size:len(data)]\n context = [el[\"context\"] for el in context_response_data]\n if data_type == \"valid\":\n ranking_length = self.num_ranking_samples_valid\n sample_candidates_pool = self.sample_candidates_pool_valid\n elif data_type == \"test\":\n ranking_length = self.num_ranking_samples_test\n sample_candidates_pool = self.sample_candidates_pool_test\n if not sample_candidates_pool:\n ranking_length = self.len_vocab\n response_data = self._create_rank_resp(context_response_data, ranking_length)\n if self.pos_pool_rank:\n y = [len(el[\"pos_pool\"]) * np.ones(ranking_length) for el in context_response_data]\n else:\n y = [np.ones(ranking_length) for _ in context_response_data]\n x = [[(context[i], el) for el in response_data[i]] for i in range(len(context_response_data))]\n yield (x, y)\n\n def _create_neg_resp_rand(self, context_response_data, batch_size):\n \"\"\"Randomly chooses negative response for each context in a batch.\n\n Sampling is performed from predefined pools of candidates or from the whole data.\n\n Args:\n context_response_data: A batch from the train part of the dataset.\n batch_size: A batch size.\n\n Returns:\n one negative response for each context in a batch.\n \"\"\"\n if self.sample_candidates_pool:\n negative_response_data = [random.choice(el[\"neg_pool\"])\n for el in context_response_data]\n else:\n candidates = []\n for i in range(batch_size):\n candidate = np.random.randint(0, self.len_vocab, 1)[0]\n while candidate in context_response_data[i][\"pos_pool\"]:\n candidate = np.random.randint(0, self.len_vocab, 1)[0]\n candidates.append(candidate)\n negative_response_data = candidates\n return negative_response_data\n\n def _create_rank_resp(self, context_response_data, ranking_length):\n \"\"\"Chooses a set of negative responses for each context in a batch to evaluate ranking quality.\n\n Negative responses are taken from predefined pools of candidates or from the whole data.\n\n Args:\n context_response_data: A batch from the train part of the dataset.\n ranking_length: a number of responses for each context to evaluate ranking quality.\n\n Returns:\n list of responses for each context in a batch.\n \"\"\"\n response_data = []\n for i in range(len(context_response_data)):\n pos_pool = context_response_data[i][\"pos_pool\"]\n resp = context_response_data[i][\"response\"]\n if self.pos_pool_rank:\n pos_pool.insert(0, pos_pool.pop(pos_pool.index(resp)))\n else:\n pos_pool = [resp]\n neg_pool = context_response_data[i][\"neg_pool\"]\n response = pos_pool + neg_pool\n response_data.append(response[:ranking_length])\n return response_data\n" ]
[ [ "numpy.random.seed", "numpy.random.choice", "numpy.random.shuffle", "numpy.ones", "numpy.random.randint" ] ]
gtuzi/TensorFlowPlayground
[ "293457e8e8a5e802aa0f28da3034d17492171f4c" ]
[ "Autoencoders/AutoencoderPhasedTraining.py" ]
[ "'''\nTrain Autoencoder networks one at a time. Then combine the final\nnetwork into one. Each of these steps are phases.\nTo achieve this, use a different TensorFlow graph for each phase.\nAt each phase, you build the outer layers, as you progress inwards.\nTraining data for the next phase becomes the training output\nfrom the previous phase.\n'''\n\nimport tensorflow as tf\nimport numpy as np\n################## Load Data ###############\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"/tmp/data/\")\n\n\n################## Parameters ##############\nn_inputs = 28 * 28 # for MNIST\nn_hidden1 = 300\nn_hidden2 = 150 # codings\nn_hidden3 = n_hidden1\nn_output = n_inputs\nlearning_rate = 0.001\nl2_reg = 0.0001\n\nn_epochs = 4\nbatch_size = 150\n\n\ndef reset_graph():\n tf.reset_default_graph()\n np.random.seed()\n\nactivation = tf.nn.elu\nl2_reg = tf.contrib.layers.l2_regularizer(l2_reg)\ninitializer = tf.contrib.layers.variance_scaling_initializer()\n\n\n################## Phase 1 #################\n\nph1Graph = tf.Graph()\n# Training data for the next autoencoder layer\nL1_TrainData = None\nL1_ValidData = None\n\n\n# Weights for reconstruction\nL1_Wvals = None\nL1_bvals = None\nLout_Wvals = None\nLout_bvals = None\nwith ph1Graph.as_default():\n\n # The following source ops and variables will be added to this graph\n X = tf.placeholder(dtype=tf.float32, shape=(None, n_inputs), name='X')\n\n # --- Layer 1 (Hidden)\n W1 = tf.Variable(dtype=tf.float32, initial_value=initializer([n_inputs, n_hidden1]), name='W1')\n b1 = tf.Variable(tf.zeros(shape=n_hidden1), name='b1')\n layer1 = activation(tf.add(tf.matmul(X, W1), b1), name='layer1')\n\n # --- Output layer\n Wout = tf.Variable(dtype=tf.float32, initial_value=initializer([n_hidden1, n_output]), name='Wout')\n bout = tf.Variable(tf.zeros(shape=n_output), name='bout')\n # No non-linearity here. We're comparing to a certain output\n output = tf.add(tf.matmul(layer1, Wout), bout, name='output')\n\n\n reconstruction_loss = tf.reduce_mean(tf.square(X - output), name='mse')\n reg_losses = [l2_reg(W1), l2_reg(Wout)]\n reg_loss = tf.add_n(reg_losses)\n loss = tf.add_n([reconstruction_loss] + reg_losses)\n # loss = reconstruction_loss + reg_loss\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n training_op = optimizer.minimize(loss)\n\n init = tf.global_variables_initializer()\n with tf.Session(graph=ph1Graph) as sess:\n sess.run(init)\n\n # Phase 1 data, is the actual data\n for epoch in range(n_epochs):\n n_batches = mnist.train.num_examples // batch_size\n loss_val = 0.\n for iteration in range(n_batches):\n X_batch, y_batch = mnist.train.next_batch(batch_size)\n sess.run(training_op, feed_dict={X: X_batch})\n\n print('===== Phase 1 - Epoch: {0} ====='.format(epoch))\n loss_train, reg_loss_train = sess.run([reconstruction_loss, reg_loss], feed_dict={X: mnist.train.images})\n loss_val = sess.run(reconstruction_loss, feed_dict={X: mnist.validation.images})\n print('Train: Reconstruction MSE = {0:.4f}, Regularization: {1:.4f}'.format(loss_train, reg_loss_train))\n print('Validation MSE: {0:.4f}'.format(loss_val))\n\n # The weights will be used for reconstructing the final autoencoder\n L1_Wvals, L1_bvals = W1.eval(), b1.eval()\n Lout_Wvals, Lout_bvals = Wout.eval(), bout.eval()\n\n # The output of the hidden layer of this phase,\n # becomes training for the next phase's hidden layer\n L1_TrainData = sess.run(layer1, feed_dict={X: mnist.train.images})\n L1_ValidData = sess.run(layer1, feed_dict={X: mnist.validation.images})\n\n\n\n################## Phase 2 #####################\n\ngraph2 = tf.Graph()\nL2_Wvals = None\nL2_bvals = None\nL3_Wvals = None\nL3_bvals = None\nwith graph2.as_default():\n\n L1_Dat = tf.placeholder(dtype=tf.float32, shape=(None, n_hidden1), name='L1_Dat')\n # The following variables will be added to this graph\n\n # ---- Coding layer\n W2 = tf.Variable(dtype=tf.float32, initial_value=initializer([n_hidden1, n_hidden2]), name='W2')\n b2 = tf.Variable(tf.zeros(shape=n_hidden2), name='b2')\n layer2 = activation(tf.add(tf.matmul(L1_Dat, W2), b2, name='layer2'))\n\n # --- Layer 3 (hidden)\n W3 = tf.Variable(dtype=tf.float32, initial_value=initializer([n_hidden2, n_hidden3]), name='W3')\n b3 = tf.Variable(tf.zeros(shape=n_hidden3), name='b3')\n\n # What activation we put here ? Well, the output that we're trying to replicate\n # is that from Layer 1, which goes through a non-linearity.\n # we're comparing directly to the output of the hidden 1 layer !!!\n layer3 = activation(tf.add(tf.matmul(layer2, W3), b3), name='layer3')\n\n reconstruction_loss = tf.reduce_mean(tf.square(L1_Dat - layer3), name='mse')\n reg_losses = [l2_reg(W2), l2_reg(W3)]\n reg_loss = tf.add_n(reg_losses)\n loss = tf.add_n([reconstruction_loss] + reg_losses)\n\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n training_op = optimizer.minimize(loss)\n\n init = tf.global_variables_initializer()\n with tf.Session(graph=graph2) as sess:\n sess.run(init)\n\n # Phase 1 data, is the actual data\n for epoch in range(n_epochs):\n n_batches = len(L1_TrainData) // batch_size\n loss_val = 0.\n for iteration in range(n_batches):\n L1_batch = L1_TrainData[iteration:iteration+batch_size,:]\n sess.run(training_op, feed_dict={L1_Dat: L1_batch})\n\n print('===== Phase 2 - Epoch: {0} ====='.format(epoch))\n loss_train, reg_loss_train = sess.run([reconstruction_loss, reg_loss], feed_dict={L1_Dat: L1_TrainData})\n loss_val = sess.run(reconstruction_loss, feed_dict={L1_Dat: L1_ValidData})\n print('Train: Reconstruction MSE = {0:.4f}, Regularization: {1:.4f}'.format(loss_train, reg_loss_train))\n print('Validation MSE: {0:.4f}'.format(loss_val))\n\n L2_Wvals, L2_bvals = W2.eval(), b2.eval()\n L3_Wvals, L3_bvals = W3.eval(), b3.eval()\n\n\n\n\n################# Plots #####################\nimport matplotlib.pyplot as plt\n\ndef plot_image(image, shape=[28, 28]):\n plt.imshow(image.reshape(shape), cmap=\"Greys\", interpolation=\"nearest\")\n plt.axis(\"off\")\n\ndef show_reconstructed_digits(X, outputs, n_test_digits = 2):\n with tf.Session() as sess:\n X_test = mnist.test.images[:n_test_digits]\n outputs_val = outputs.eval(feed_dict={X: X_test})\n\n fig = plt.figure(figsize=(8, 3 * n_test_digits))\n for digit_index in range(n_test_digits):\n plt.subplot(n_test_digits, 2, digit_index * 2 + 1)\n plot_image(X_test[digit_index])\n plt.subplot(n_test_digits, 2, digit_index * 2 + 2)\n plot_image(outputs_val[digit_index])\n plt.waitforbuttonpress()\n\n############ Construct the final autoencoder ##############\n\n'''\nConstructing the final encoder from the pre-trained components\nfrom the previous phases\n'''\n\n# tf.reset_default_graph()\nreset_graph()\n\n# Switching back to the default graph here\nX = tf.placeholder(dtype=tf.float32, shape=(None, n_inputs), name='X')\n\n# -- Layer 1\nW1 = tf.constant(value=L1_Wvals, name='W1final')\nb1 = tf.constant(value=L1_bvals, name='b1final')\nlayer1 = activation(tf.matmul(X, W1) + b1, name='layer1')\n\n# --- Layer 2 (codings)\nW2 = tf.constant(value=L2_Wvals, name='W2final')\nb2 = tf.constant(value=L2_bvals, name='b2final')\nlayer2 = activation(tf.matmul(layer1, W2) + b2, name='layer2')\n\n# --- Layer 3\nW3 = tf.constant(value=L3_Wvals, name='W3final')\nb3 = tf.constant(value=L3_bvals, name='b3final')\nlayer3 = activation(tf.matmul(layer2, W3) + b3, name='layer3')\n\n# --- Output layer\nWout = tf.constant(value=Lout_Wvals, name='Woutfinal')\nbout = tf.constant(value=Lout_bvals, name='boutfinal')\noutput = tf.matmul(layer3, Wout) + bout\n\n# Wout = tf.constant(value=Lout_Wvals, name='Woutfinal')\n# bout = tf.constant(value=Lout_bvals, name='boutfinal')\n# output = tf.matmul(layer1, Wout) + bout\n\n# Here we only care about reconstruction loss\nloss = tf.reduce_mean(tf.square(X - output), name='mse')\n\n# No training here. Just testing\ninit = tf.global_variables_initializer()\nwith tf.Session() as sess:\n sess.run(init)\n test_loss_val = sess.run(loss, feed_dict={X: mnist.test.images})\n valid_loss_val = sess.run(loss, feed_dict={X: mnist.validation.images})\n train_loss_val = sess.run(loss, feed_dict={X: mnist.train.images})\n\nprint('========== Final Output ==============')\nprint('Train: {0:.4f}, Valid: {1:.4f}, Test: {2:.4f} MSE'.format(train_loss_val, valid_loss_val, test_loss_val))\n\nshow_reconstructed_digits(X, output)\n\n\n\n\n" ]
[ [ "tensorflow.zeros", "tensorflow.train.AdamOptimizer", "tensorflow.add_n", "tensorflow.Graph", "tensorflow.contrib.layers.variance_scaling_initializer", "tensorflow.reset_default_graph", "matplotlib.pyplot.subplot", "tensorflow.square", "matplotlib.pyplot.axis", "matplotlib.pyplot.waitforbuttonpress", "tensorflow.Session", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "matplotlib.pyplot.figure", "tensorflow.matmul", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.constant", "numpy.random.seed", "tensorflow.contrib.layers.l2_regularizer" ] ]
EleMisi/ConditionalVAE
[ "f5e6a13f13b6539a71d8aced830fcda8f5171090" ]
[ "src/celeba.py" ]
[ "from collections import OrderedDict\nimport cv2\nfrom tensorflow.keras.utils import Sequence\nimport math\nimport numpy as np\nimport os\nimport pandas as pd\nimport random\nimport tensorflow as tf\n\nfrom utils import save_data\n\n\nclass CelebADataset(Sequence):\n\n\n def __init__(self, train_size, batch_size, mode = 'train', save_test_set = False):\n\n self.train_img_ids, self.test_img_ids, self.attributes = self.load(train_size)\n self.batch_size = batch_size\n self.mode = mode\n self.train_size = len(self.train_img_ids)\n if save_test_set:\n self.save_test_set()\n\n\n def load(self, train_dim):\n \"\"\" \n Loads all image IDs and the attributes and splits the dataset into training set and test set.\n \n Returns:\n - train_img_ids [list]\n - test_img_ids [list]\n - attributes [list]\n \n \"\"\"\n\n print(\"Loading images id and attributes...\")\n\n file_path = \"/input/CelebA/list_attr_celeba.csv\"\n df = pd.read_csv(file_path, header = 0, index_col = 0).replace(-1,0)\n attributes = [x for x in df.columns] \n od = OrderedDict(df.to_dict('index'))\n img_ids = OrderedDict()\n for k,v in od.items():\n img_id = [np.float32(x) for x in v.values()]\n img_ids[k] = img_id\n print(\"img_ids: {} \\nAttributes: {} \\n\".format(len(img_ids), len(attributes)))\n\n #Splitting\n print(\"Splitting dataset...\\n\")\n n_train = int(len(img_ids) * train_dim)\n list_img_ids = list(img_ids.items())\n train_img_ids = list_img_ids[:n_train]\n test_img_ids = list_img_ids[n_train:]\n\n print(\"Train set dimension: {} \\nTest set dimension: {} \\n\".format(len(train_img_ids), len(test_img_ids)))\n\n return train_img_ids, test_img_ids, attributes\n\n\n def next_batch(self, idx):\n \"\"\"\n Returns a batch of images with their ID as numpy arrays.\n \"\"\" \n \n batch_img_ids = [x[1] for x in self.train_img_ids[idx * self.batch_size : (idx + 1) * self.batch_size]]\n images_id = [x[0] for x in self.train_img_ids[idx * self.batch_size : (idx + 1) * self.batch_size]]\n batch_imgs = self.get_images(images_id) \n \n return np.asarray(batch_imgs, dtype='float32'), np.asarray(batch_img_ids, dtype='float32')\n\n\n def preprocess_image(self,image_path, img_size = 128, img_resize = 64, x = 25, y = 45):\n \"\"\"\n Crops, resizes and normalizes the target image.\n \"\"\"\n\n img = cv2.imread(image_path)\n img = img[y:y+img_size, x:x+img_size]\n img = cv2.resize(img, (img_resize, img_resize))\n img = np.array(img, dtype='float32')\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img /= 255.0 # Normalization to [0.,1.]\n \n return img\n\n\n def get_images(self,imgs_id):\n \"\"\"\n Returns the list of images corresponding to the given IDs.\n \"\"\"\n imgs = []\n\n for i in imgs_id:\n image_path ='/input/CelebA/img_align_celeba/img_align_celeba/' + i\n imgs.append(self.preprocess_image(image_path))\n\n return imgs\n\n\n def save_test_set(self):\n \"\"\"\n Saves a json file with useful information for teh test phase:\n - training size\n - test images IDs\n - attributes\n - batch size\n \"\"\"\n\n try:\n test_data = {\n 'train_size' : self.train_size,\n 'test_img_ids' : self.test_img_ids,\n 'attributes' : self.attributes,\n 'batch_size' : self.batch_size\n }\n\n file_path = \"./test_data\"\n save_data(file_path, test_data)\n except:\n raise\n print(\"Test img_ids successfully saved.\")\n\n\n def shuffle(self):\n \"\"\"\n Shuffles the training IDs.\n \"\"\"\n self.train_img_ids = random.sample(self.train_img_ids, k=self.train_size)\n print(\"IDs shuffled.\")\n\n\n def __len__(self):\n return int(math.ceil(self.train_size / float(self.batch_size)))\n\n\n def __getitem__(self, index):\n return self.next_batch(index)" ]
[ [ "numpy.asarray", "numpy.array", "pandas.read_csv", "numpy.float32" ] ]
dulithchinthaka/DCDS
[ "3f804a87aa57007769985b51eb2d7939fe15cb95" ]
[ "reid/models/fusion_branch.py" ]
[ "from torch import nn\nimport torch\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport pdb\n\n\n\n\ndef Dist_Comp(pg1, pg2, dotp):\n A = dotp\n A = A - torch.min(A) #############\n\n #############################################################################################################3\n b = A.data.clone().contiguous()\n b.requires_grad = False\n\n # making the diagonal zero\n z = -1 * torch.diag(b)\n kk = Variable(torch.diag(z), requires_grad=False)\n A = kk + A\n\n B_size = A.size(0)\n\n F_rank = Variable(torch.zeros(B_size, B_size).cuda())\n\n for i in range(0, B_size):\n v = torch.ones(B_size, 1)\n v[i] = 0\n ind = (v != 0).nonzero()\n\n ind = ind[:, 0]\n ind = ind.cuda()\n M = A[ind][:, ind]\n\n \n M2 = Variable(M.data.clone(), requires_grad=False)\n EE = torch.eig(M2)\n EE2 = EE[0]\n\n alpha = torch.max(EE2[:, 0])\n alpha = alpha + 0.0005\n # alpha = 0.9\n\n ident_M = torch.eye(B_size, B_size)\n ident_M[i, i] = 0\n\n ident_M = ident_M * float(alpha)\n ident_M = Variable(ident_M, requires_grad=False)\n A_2 = A - ident_M.cuda()\n\n A_2 = A_2 + alpha\n\n # A_2 = torch.mul(A_2, p_g_score).contiguous()\n # calling Replicator dynamics\n P_g_rank = Replicator(A_2)\n\n # P_g_rank = torch.exp(P_g_rank)\n\n # P_g_rank = (P_g_rank - torch.min(P_g_rank))/ (torch.max(P_g_rank) - torch.min(P_g_rank))\n P_g_rank = P_g_rank.view(1, -1)\n\n F_rank[i, :] = P_g_rank\n\n # instances_num = 8\n # num_id = B_size / instances_num\n # temp = []\n\n # rpKnn =\n # pg1=torch.abs(pg1)\n # F_rank = torch.mm(F_rank, p_g_score)\n # pg1 = torch.abs(pg1)\n\n F_rank_T = (0.3 - F_rank).contiguous()\n\n F_rank = (0.1 * ((F_rank).cuda()) * (0.9 * (pg1).cuda())).contiguous()\n F_rank2 = (0.1 * (((F_rank_T)).cuda()) * (0.9 * (pg2).cuda())).contiguous()\n\n # K_nn2 = Variable(K_nn2)\n # Fr = (torch.matmul(F_rank, K_nn2).cuda()).contiguous() # each column corresponds to the id, and the row corresponds to the gallery image\n\n return F_rank, F_rank2\n # return F_list\n\n\ndef Replicator(A_2):\n l = A_2.size(0)\n x = torch.ones(l, 1) / l\n\n x = Variable(x, requires_grad=True).cpu()\n A_f = A_2.cpu()\n x = x.cpu()\n\n # x = x * (torch.mm(A_f, x))\n # x = x / torch.sum(x)\n\n toll = 0.0000001\n ero = 2 * toll + 1\n max_it = 5\n if max_it:\n # print(self.max_it)\n max_it = max_it\n else:\n max_it = float('inf')\n\n count = int(0)\n\n x_old = x.cpu()\n x_old = x_old.type(torch.FloatTensor)\n x = (x * (torch.matmul(A_f, x))).contiguous()\n\n xx = torch.norm(x, p=2, dim=0).detach()\n x = x.div(xx.expand_as(x))\n\n while ero > toll and count < max_it:\n x_old = x.cpu()\n x_old = x_old.type(torch.FloatTensor)\n x = (x * (torch.matmul(A_f, x))).contiguous()\n\n xx = torch.norm(x, p=2, dim=0).detach()\n x = x.div(xx.expand_as(x))\n\n ero = torch.norm(x - x_old)\n ero = float(ero)\n count = count + 1\n\n return x.cuda()\n\n\nclass VNetExtension(nn.Module):\n def __init__(self, instances_num=4, base_model=None, embed_model=None, alpha=0.1):\n super(VNetExtension, self).__init__()\n self.instances_num = instances_num\n self.alpha = alpha\n self.base = base_model # Resnet50\n self.embed = embed_model \n\n # self.l = batch_size\n\n for i in range(len(embed_model)):\n setattr(self, 'embed_' + str(i), embed_model[i])\n\n def forward(self, x, epoch):\n x = self.base(x) # Resnet\n #x1, x2, x3 = self.base(x) # Resnet\n\n #x=x3\n #x=torch.cat((x1,x2,x),1)# 3072 feature size\n N, C, H, W = x.size()\n gallery_x = x\n gallery_x = gallery_x.contiguous()\n # gallery_x = gallery_x.view(gallery_num, C, H, W)\n f_size= C\n count = C / (len(self.embed))\n # outputs = Variable(torch.zeros((gallery_x.size(0)*gallery_x.size(0)),2).cuda())\n outputs = []\n for j in range(len(self.embed)):\n\n # p_g_score = self.embed[j](probe_x[:, i * count:(i + 1) * count].contiguous(), # take us to embedding.py\n # gallery_x[:, i * count:(i + 1) * count].contiguous(),\n # p2g=True, g2g=False)\n p_g_score, dotp = self.embed[j](gallery_x[:, j * count:(j + 1) * count].contiguous(),\n gallery_x[:, j * count:(j + 1) * count].contiguous(),\n p2g=False, g2g=True)\n ''' Uncomment this to run CDS on both negative and posetive class of the linear output\n\n for jk in range(2):\n pg1 = p_g_score[:,:,jk]\n outt=Dist_Comp(pg1)\n outt = outt.view(outt.size(0)*outt.size(0))\n outputs[:,jk] = outt\n ##'''\n pg1 = p_g_score[:, :, 1].contiguous()\n pg2 = p_g_score[:, :, 0].contiguous()\n\n if epoch >= 0:\n outt, outt2 = Dist_Comp(pg1, pg2, dotp) # Dominant Sets\n outt = outt.view(outt.size(0) * outt.size(0), 1).contiguous()\n outt2 = outt2.view(outt2.size(0) * outt2.size(0), 1).contiguous()\n outt3 = torch.cat((outt2, outt), -1).contiguous()\n outt = outt3.view(outt.size(0), 2).contiguous()\n\n outputs.append(outt)\n else:\n outt = pg1.view(pg1.size(0) * pg1.size(0), 1).contiguous()\n outt2 = pg2.view(pg2.size(0) * pg2.size(0), 1).contiguous()\n outt3 = torch.cat((outt2, outt), -1).contiguous()\n outt = outt3.view(outt.size(0), 2).contiguous()\n\n outputs.append(outt)\n\n outputs = torch.cat(outputs, 0)\n\n return outputs" ]
[ [ "torch.norm", "torch.ones", "torch.max", "torch.cat", "torch.zeros", "torch.min", "torch.eye", "torch.matmul", "torch.eig", "torch.diag", "torch.autograd.Variable" ] ]
ncsoft/rotated-box-is-back
[ "52040333851bd2456a4e2f15347311cad3410636" ]
[ "rpn_util/generate_anchors.py" ]
[ "# --------------------------------------------------------\n# Faster R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick and Sean Bell\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\n\n# Verify that we compute the same anchors as Shaoqing's matlab implementation:\n#\n# >> load output/rpn_cachedir/faster_rcnn_VOC2007_ZF_stage1_rpn/anchors.mat\n# >> anchors\n#\n# anchors =\n#\n# -83 -39 100 56\n# -175 -87 192 104\n# -359 -183 376 200\n# -55 -55 72 72\n# -119 -119 136 136\n# -247 -247 264 264\n# -35 -79 52 96\n# -79 -167 96 184\n# -167 -343 184 360\n\n# array([[ -83., -39., 100., 56.],\n# [-175., -87., 192., 104.],\n# [-359., -183., 376., 200.],\n# [ -55., -55., 72., 72.],\n# [-119., -119., 136., 136.],\n# [-247., -247., 264., 264.],\n# [ -35., -79., 52., 96.],\n# [ -79., -167., 96., 184.],\n# [-167., -343., 184., 360.]])\n\ndef generate_anchors(base_size=16, ratios=[0.5, 1, 2],\n scales=2 ** np.arange(3, 6)):\n \"\"\"\n Generate anchor (reference) windows by enumerating aspect ratios X\n scales wrt a reference (0, 0, 15, 15) window.\n \"\"\"\n\n base_anchor = np.array([1, 1, base_size, base_size]) - 1\n ratio_anchors = _ratio_enum(base_anchor, ratios)\n anchors = np.vstack([_scale_enum(ratio_anchors[i, :], scales)\n for i in range(ratio_anchors.shape[0])])\n anchors = anchors.astype(np.float32)\n return anchors\n\n\ndef _whctrs(anchor):\n \"\"\"\n Return width, height, x center, and y center for an anchor (window).\n \"\"\"\n\n w = anchor[2] - anchor[0] + 1\n h = anchor[3] - anchor[1] + 1\n x_ctr = anchor[0] + 0.5 * (w - 1)\n y_ctr = anchor[1] + 0.5 * (h - 1)\n return w, h, x_ctr, y_ctr\n\n\ndef _mkanchors(ws, hs, x_ctr, y_ctr):\n \"\"\"\n Given a vector of widths (ws) and heights (hs) around a center\n (x_ctr, y_ctr), output a set of anchors (windows).\n \"\"\"\n\n ws = ws[:, np.newaxis]\n hs = hs[:, np.newaxis]\n anchors = np.hstack((x_ctr - 0.5 * (ws - 1),\n y_ctr - 0.5 * (hs - 1),\n x_ctr + 0.5 * (ws - 1),\n y_ctr + 0.5 * (hs - 1)))\n return anchors\n\n\ndef _ratio_enum(anchor, ratios):\n \"\"\"\n Enumerate a set of anchors for each aspect ratio wrt an anchor.\n \"\"\"\n\n w, h, x_ctr, y_ctr = _whctrs(anchor)\n size = w * h\n size_ratios = size / ratios\n ws = np.round(np.sqrt(size_ratios))\n hs = np.round(ws * ratios)\n anchors = _mkanchors(ws, hs, x_ctr, y_ctr)\n return anchors\n\n\ndef _scale_enum(anchor, scales):\n \"\"\"\n Enumerate a set of anchors for each scale wrt an anchor.\n \"\"\"\n\n w, h, x_ctr, y_ctr = _whctrs(anchor)\n ws = w * scales\n hs = h * scales\n anchors = _mkanchors(ws, hs, x_ctr, y_ctr)\n return anchors\n\n\nif __name__ == '__main__':\n import time\n\n t = time.time()\n a = generate_anchors(base_size=4, ratios=[0.5, 1, 2],\n scales=np.arange(1, 2))\n print(time.time() - t)\n print(a)\n #from IPython import embed;\n\n #embed()\n" ]
[ [ "numpy.hstack", "numpy.sqrt", "numpy.arange", "numpy.round", "numpy.array" ] ]
Waste-NANDO/Mask_RCNN
[ "272fbeba35e62ce3a6772c9c70e62da9fcb4a40e" ]
[ "materials_trash_detector/trash_dataset.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n\nDataset for Mask R-CNN\nConfigurations and data loading code for COCO format.\n\n@author: Mattia Brusamento\n\"\"\"\n\nimport os\nimport sys\nimport time\nimport numpy as np\nimport json\n\n# Download and install the Python coco tools from https://github.com/waleedka/coco\n# That's a fork from the original https://github.com/pdollar/coco with a bug\n# fix for Python 3.\n# I submitted a pull request https://github.com/cocodataset/cocoapi/pull/50\n# If the PR is merged then use the original repo.\n# Note: Edit PythonAPI/Makefile and replace \"python\" with \"python3\".\nfrom pycocotools.coco import COCO\nfrom pycocotools import mask as maskUtils\n\nfrom mrcnn import model as modellib, utils\n\n\n############################################################\n# Dataset\n############################################################\n\nclass TrashDataset(utils.Dataset):\n\n def load_trash(self, data_dir, anno_file):\n print(\"Loading Trash Data:\" + str(data_dir) + \"\" + str(anno_file))\n trash = COCO(os.path.join(data_dir, anno_file))\n\n # Add classes\n class_ids = sorted(trash.getCatIds())\n for i in class_ids:\n self.add_class(\"trash\", i, trash.loadCats(i)[0][\"name\"])\n\n # Add images\n image_ids = list(trash.imgs.keys())\n\n for i in image_ids:\n current_annotation = []\n for a in trash.loadAnns(trash.getAnnIds()):\n if a[\"image_id\"] == i:\n current_annotation = a\n self.add_image(\n \"trash\", image_id=i,\n path=os.path.join(data_dir, trash.imgs[i]['file_name']),\n width=trash.imgs[i][\"width\"],\n height=trash.imgs[i][\"height\"],\n annotations=current_annotation) # annotations=[a for a in trash.loadAnns(trash.getAnnIds()) if a['image_id'] == str(i)]\n\n return trash\n\n def load_mask(self, image_id):\n \"\"\"Load instance masks for the given image.\n\n Different datasets use different ways to store masks. This\n function converts the different mask format to one format\n in the form of a bitmap [height, width, instances].\n\n Returns:\n masks: A bool array of shape [height, width, instance count] with\n one mask per instance.\n class_ids: a 1D array of class IDs of the instance masks.\n \"\"\"\n image_info = self.image_info[image_id]\n\n instance_masks = []\n class_ids = []\n annotation = image_info[\"annotations\"]\n if len(annotation) > 0:\n # Build mask of shape [height, width, instance_count] and list\n # of class IDs that correspond to each channel of the mask.\n # for annotation in annotations:\n class_id = self.map_source_class_id(\n \"trash.{}\".format(annotation[\"category_id\"]))\n if class_id:\n m = self.annToMask(annotation, image_info[\"height\"],\n image_info[\"width\"])\n # Some objects are so small that they're less than 1 pixel area\n # and end up rounded out. Skip those objects.\n if m.max() > 0:\n # continue\n # Is it a crowd? If so, use a negative class ID.\n if annotation['iscrowd']:\n # Use negative class ID for crowds\n class_id *= -1\n # For crowd masks, annToMask() sometimes returns a mask\n # smaller than the given dimensions. If so, resize it.\n if m.shape[0] != image_info[\"height\"] or m.shape[1] != image_info[\"width\"]:\n m = np.ones([image_info[\"height\"], image_info[\"width\"]], dtype=bool)\n instance_masks.append(m)\n class_ids.append(class_id)\n\n # Pack instance masks into an array\n if len(class_ids) > 0:\n mask = np.stack(instance_masks, axis=2).astype(np.bool)\n class_ids = np.array(class_ids, dtype=np.int32)\n return mask, class_ids\n else:\n # Call super class to return an empty mask\n return super(TrashDataset, self).load_mask(image_id)\n\n def image_reference(self, image_id):\n \"\"\"Return a link to the image in the trash Website.\"\"\"\n info = self.image_info[image_id]\n if info[\"source\"] == \"trash\":\n info['file_name']\n else:\n super(TrashDataset, self).image_reference(image_id)\n\n ######################################################################\n #### The following two functions are from pycocotools with a few changes.\n\n def annToRLE(self, ann, height, width):\n \"\"\"\n Convert annotation which can be polygons, uncompressed RLE to RLE.\n :return: binary mask (numpy 2D array)\n \"\"\"\n segm = ann['segmentation']\n if isinstance(segm, list):\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n rles = maskUtils.frPyObjects(segm, height, width)\n rle = maskUtils.merge(rles)\n elif isinstance(segm['counts'], list):\n # uncompressed RLE\n rle = maskUtils.frPyObjects(segm, height, width)\n else:\n # rle\n rle = ann['segmentation']\n return rle\n\n def annToMask(self, ann, height, width):\n \"\"\"\n Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.\n :return: binary mask (numpy 2D array)\n \"\"\"\n rle = self.annToRLE(ann, height, width)\n m = maskUtils.decode(rle)\n return m\n" ]
[ [ "numpy.array", "numpy.stack", "numpy.ones" ] ]
omserta/xsocs
[ "5e1cf1352233498c48f0566e0b819e18373e95e5" ]
[ "xsocs/gui/view/fitview/FitModel.py" ]
[ "# coding: utf-8\n# /*##########################################################################\n#\n# Copyright (c) 2015-2017 European Synchrotron Radiation Facility\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# ###########################################################################*/\n\nfrom __future__ import absolute_import\n\n__authors__ = [\"D. Naudet\"]\n__license__ = \"MIT\"\n__date__ = \"01/01/2017\"\n\nfrom silx.gui import qt as Qt\n\nimport numpy as np\n\nfrom ...model.ModelDef import ModelRoles\nfrom ...model.Model import Model, RootNode\n\nfrom ...project.Hdf5Nodes import H5File\nfrom ...project.Hdf5Nodes import H5Base, H5NodeClassDef\n\nfrom ....io.FitH5 import FitH5, FitH5QAxis\nfrom ....process.fitresults import FitStatus\n\nfrom ...widgets.XsocsPlot2D import XsocsPlot2D\n\n\ndef _grabWidget(widget):\n \"\"\"\n Grabs a widget and returns a pixmap.\n :param widget:\n :return:\n \"\"\"\n\n if int(Qt.qVersion().split('.')[0]) <= 4:\n pixmap = Qt.QPixmap.grabWidget(widget)\n else:\n pixmap = widget.grab()\n return pixmap\n\n\nclass PlotGrabber(XsocsPlot2D):\n \"\"\"\n XsocsPlot2D that can be converted to a pixmap.\n \"\"\"\n persistent = True\n\n def __init__(self, *args, **kwargs):\n super(PlotGrabber, self).__init__(*args, **kwargs)\n self._backend._enableAxis('left', False)\n self._backend._enableAxis('right', False)\n self._backend.ax.get_xaxis().set_visible(False)\n self._backend.ax.set_xmargin(0)\n self._backend.ax.set_ymargin(0)\n self.setActiveCurveHandling(False)\n self.setKeepDataAspectRatio(True)\n self.setDataMargins(0, 0, 0, 0)\n self.setCollaspibleMenuVisible(False)\n self.setPointWidgetVisible(False)\n\n def toPixmap(self):\n \"\"\"\n Returns a pixmap of the widget.\n :return:\n \"\"\"\n return _grabWidget(self)\n\n\n\n\n@H5NodeClassDef('FitH5')\nclass FitH5Node(H5File):\n \"\"\"\n Node linked to a FitH5 file.\n \"\"\"\n # TODO : check the file format (make sure that all required\n # groups/datasets are there)\n\n def _loadChildren(self):\n base = self.h5Path.rstrip('/')\n children = []\n with FitH5(self.h5File, mode='r') as h5f:\n entries = h5f.entries()\n\n for entry in entries:\n child = FitEntryNode(self.h5File, base + '/' + entry)\n children.append(child)\n\n return children\n\n\n@H5NodeClassDef('FitEntry')\nclass FitEntryNode(H5Base):\n \"\"\"\n Node linked to an entry in a FitH5 file.\n \"\"\"\n\n entry = property(lambda self: self.h5Path.lstrip('/').split('/')[0])\n\n def _loadChildren(self):\n base = self.h5Path.rstrip('/')\n entry = self.entry\n children = []\n\n with FitH5(self.h5File, mode='r') as h5f:\n processes = h5f.processes(entry)\n for process in processes:\n child = FitProcessNode(self.h5File, base + '/' + process)\n children.append(child)\n\n statusNode = FitStatusNode(self.h5File, base)\n children.append(statusNode)\n\n return children\n\n def mimeData(self, column, stream):\n # TODO : put column value in enum\n if column == 1:\n q_axis = FitH5QAxis.qx_axis\n elif column == 2:\n q_axis = FitH5QAxis.qy_axis\n elif column == 3:\n q_axis = FitH5QAxis.qz_axis\n else:\n raise ValueError('Unexpected column.')\n\n h5file = self.h5File\n entry = self.entry\n\n stream.writeQString(h5file)\n stream.writeQString(entry)\n stream.writeInt(q_axis)\n\n return True\n\n\nclass FitProcessNode(FitEntryNode):\n \"\"\"\n Node linked to a process group in a FitH5 file.\n \"\"\"\n process = property(lambda self: self.h5Path.lstrip('/').split('/')[1])\n\n def _loadChildren(self):\n base = self.h5Path.rstrip('/')\n entry = self.entry\n process = self.process\n children = []\n with FitH5(self.h5File, mode='r') as h5f:\n results = h5f.get_result_names(entry, process)\n for result in results:\n child = FitResultNode(self.h5File,\n base + '/' + result)\n children.append(child)\n\n return children\n\n\nclass FitStatusNode(FitEntryNode):\n \"\"\"\n Preview of the points where the fit has failed.\n \"\"\"\n def __init__(self, *args, **kwargs):\n self.dragEnabledColumns = [False, True, True, True]\n super(FitStatusNode, self).__init__(*args, **kwargs)\n self.nodeName = 'Status'\n\n self.__nErrors = [0, 0, 0]\n\n def _setupNode(self):\n width = 100\n plot = PlotGrabber()\n plot.setFixedSize(Qt.QSize(width, 100))\n plot.toPixmap()\n\n qApp = Qt.qApp\n qApp.processEvents()\n\n with FitH5(self.h5File) as fitH5:\n x = fitH5.scan_x(self.entry)\n y = fitH5.scan_y(self.entry)\n\n status = fitH5.get_qx_status(self.entry)\n errorPts = np.where(status != FitStatus.OK)[0]\n self.__nErrors[0] = len(errorPts)\n if len(errorPts) != 0:\n plot.setPlotData(x[errorPts], y[errorPts], status[errorPts])\n pixmap = plot.toPixmap()\n else:\n label = Qt.QLabel('No errors')\n label.setFixedWidth(width)\n label.setAlignment(Qt.Qt.AlignCenter)\n label.setAttribute(Qt.Qt.WA_TranslucentBackground)\n pixmap = _grabWidget(label)\n self.setData(1, pixmap, Qt.Qt.DecorationRole)\n qApp.processEvents()\n\n status = fitH5.get_qy_status(self.entry)\n errorPts = np.where(status != FitStatus.OK)[0]\n self.__nErrors[1] = len(errorPts)\n if len(errorPts) != 0:\n plot.setPlotData(x[errorPts], y[errorPts], status[errorPts])\n pixmap = plot.toPixmap()\n else:\n label = Qt.QLabel('No errors')\n label.setFixedWidth(width)\n label.setAlignment(Qt.Qt.AlignCenter)\n label.setAttribute(Qt.Qt.WA_TranslucentBackground)\n pixmap = _grabWidget(label)\n self.setData(2, pixmap, Qt.Qt.DecorationRole)\n qApp.processEvents()\n\n status = fitH5.get_qz_status(self.entry)\n errorPts = np.where(status != FitStatus.OK)[0]\n self.__nErrors[2] = len(errorPts)\n if len(errorPts) != 0:\n plot.setPlotData(x[errorPts], y[errorPts], status[errorPts])\n pixmap = plot.toPixmap()\n else:\n label = Qt.QLabel('No errors')\n label.setFixedWidth(width)\n label.setAlignment(Qt.Qt.AlignCenter)\n label.setAttribute(Qt.Qt.WA_TranslucentBackground)\n pixmap = _grabWidget(label)\n self.setData(3, pixmap, Qt.Qt.DecorationRole)\n qApp.processEvents()\n\n def _loadChildren(self):\n return []\n\n def mimeData(self, column, stream):\n\n if column < 1 or column > 3:\n return False\n\n if self.__nErrors[column - 1] == 0:\n return False\n\n if not FitEntryNode.mimeData(self, column, stream):\n return False\n\n stream.writeQString('status')\n\n return True\n\n\nclass FitResultNode(FitProcessNode):\n \"\"\"\n Node linked to a result group in a FitH5 file.\n \"\"\"\n result = property(lambda self: self.h5Path.split('/')[-1])\n\n def __init__(self, *args, **kwargs):\n self.dragEnabledColumns = [False, True, True, True]\n super(FitResultNode, self).__init__(*args, **kwargs)\n\n def _setupNode(self):\n plot = PlotGrabber()\n plot.setFixedSize(Qt.QSize(100, 100))\n plot.toPixmap()\n\n qApp = Qt.qApp\n qApp.processEvents()\n\n with FitH5(self.h5File) as fitH5:\n x = fitH5.scan_x(self.entry)\n y = fitH5.scan_y(self.entry)\n\n xBorder = np.array([x.min(), x.max()])\n yBorder = np.array([y.min(), y.max()])\n\n data = fitH5.get_qx_result(self.entry,\n self.process,\n self.result)\n plot.addCurve(xBorder,\n yBorder,\n linestyle=' ',\n symbol='.',\n color='white',\n legend='__border',\n z=-1)\n plot.setPlotData(x, y, data)\n pixmap = plot.toPixmap()\n self.setData(1, pixmap, Qt.Qt.DecorationRole)\n qApp.processEvents()\n\n data = fitH5.get_qy_result(self.entry,\n self.process,\n self.result)\n plot.addCurve(xBorder,\n yBorder,\n linestyle=' ',\n symbol='.',\n color='white',\n legend='__border',\n z=-1)\n plot.setPlotData(x, y, data)\n pixmap = plot.toPixmap()\n self.setData(2, pixmap, Qt.Qt.DecorationRole)\n qApp.processEvents()\n\n data = fitH5.get_qz_result(self.entry,\n self.process,\n self.result)\n plot.addCurve(xBorder,\n yBorder,\n linestyle=' ',\n symbol='.',\n color='white',\n legend='__border',\n z=-1)\n plot.setPlotData(x, y, data)\n\n pixmap = plot.toPixmap()\n self.setData(3, pixmap, Qt.Qt.DecorationRole)\n qApp.processEvents()\n\n def _loadChildren(self):\n return []\n\n def mimeData(self, column, stream):\n if not FitProcessNode.mimeData(self, column, stream):\n return False\n\n process = self.process\n result = self.result\n stream.writeQString('result')\n stream.writeQString(process)\n stream.writeQString(result)\n\n return True\n\n\nclass FitRootNode(RootNode):\n \"\"\"\n Root node for the FitModel\n \"\"\"\n ColumnNames = ['Param', 'Qx', 'Qy', 'Qz']\n\n\nclass FitModel(Model):\n \"\"\"\n Model displaying a FitH5 file contents.\n \"\"\"\n RootNode = FitRootNode\n ColumnsWithDelegates = [1, 2, 3]\n\n def mimeData(self, indexes):\n if len(indexes) > 1:\n raise ValueError('Drag&Drop of more than one item is not'\n 'supported yet.')\n\n mimeData = Qt.QMimeData()\n\n index = indexes[0]\n node = index.data(ModelRoles.InternalDataRole)\n\n if not isinstance(node, (FitResultNode, FitStatusNode)):\n return super(Model, self).mimeData(indexes)\n\n data = Qt.QByteArray()\n stream = Qt.QDataStream(data, Qt.QIODevice.WriteOnly)\n if node.mimeData(index.column(), stream):\n mimeData.setData('application/FitModel', data)\n\n return mimeData\n\n\nif __name__ == '__main__':\n pass\n" ]
[ [ "numpy.where" ] ]
ZiwenZhuang/rlpyt
[ "95a05a86f576190cf6217fd9aad7b5f615ee97d1", "95a05a86f576190cf6217fd9aad7b5f615ee97d1" ]
[ "rlpyt/utils/logging/logger.py", "rlpyt/utils/seed.py" ]
[ "from enum import Enum\n\nfrom exptools.logging.tabulate import tabulate\nfrom exptools.logging.console import mkdir_p, colorize\nfrom exptools.logging.autoargs import get_all_parameters\nfrom contextlib import contextmanager\nimport numpy as np\nimport os\nimport os.path as osp\nimport sys\nimport datetime\n# import dateutil.tz\nimport csv\n# import joblib\nimport json\nimport pickle\nimport base64\nimport torch\n\n_prefixes = []\n_prefix_str = ''\n\n_tabular_prefixes = []\n_tabular_prefix_str = ''\n\n_tabular = []\n\n_text_outputs = []\n_tabular_outputs = []\n\n_text_fds = {}\n_tabular_fds = {} # key: file_name, value: open file\n_tabular_fds_hold = {}\n_tabular_header_written = set()\n\n_snapshot_dir = None\n_snapshot_mode = 'all'\n_snapshot_gap = 1\n\n_log_tabular_only = False\n_header_printed = False\n_disable_prefix = False\n\n_tf_summary_dir = None\n_tf_summary_writer = None\n\n_disabled = False\n_tabular_disabled = False\n\n_iteration = 0\n\n\ndef disable():\n global _disabled\n _disabled = True\n\n\ndef disable_tabular():\n global _tabular_disabled\n _tabular_disabled = True\n\n\ndef enable():\n global _disabled\n _disabled = False\n\n\ndef enable_tabular():\n global _tabular_disabled\n _tabular_disabled = False\n\n\ndef set_iteration(iteration):\n global _iteration\n _iteration = iteration\n\n\ndef _add_output(file_name, arr, fds, mode='a'):\n if file_name not in arr:\n mkdir_p(os.path.dirname(file_name))\n arr.append(file_name)\n fds[file_name] = open(file_name, mode)\n\n\ndef _remove_output(file_name, arr, fds):\n if file_name in arr:\n fds[file_name].close()\n del fds[file_name]\n arr.remove(file_name)\n\n\ndef push_prefix(prefix):\n _prefixes.append(prefix)\n global _prefix_str\n _prefix_str = ''.join(_prefixes)\n\n\ndef add_text_output(file_name):\n _add_output(file_name, _text_outputs, _text_fds, mode='a')\n\n\ndef remove_text_output(file_name):\n _remove_output(file_name, _text_outputs, _text_fds)\n\n\ndef add_tabular_output(file_name):\n if file_name in _tabular_fds_hold.keys():\n _tabular_outputs.append(file_name)\n _tabular_fds[file_name] = _tabular_fds_hold[file_name]\n else:\n _add_output(file_name, _tabular_outputs, _tabular_fds, mode='w')\n\n\ndef remove_tabular_output(file_name):\n if file_name in _tabular_header_written:\n _tabular_header_written.remove(file_name)\n _remove_output(file_name, _tabular_outputs, _tabular_fds)\n\n\ndef hold_tabular_output(file_name):\n # what about _tabular_header_written?\n if file_name in _tabular_outputs:\n _tabular_outputs.remove(file_name)\n _tabular_fds_hold[file_name] = _tabular_fds.pop(file_name)\n\n\ndef set_snapshot_dir(dir_name):\n mkdir_p(dir_name)\n global _snapshot_dir\n _snapshot_dir = dir_name\n\n\ndef get_snapshot_dir():\n return _snapshot_dir\n\n\ndef set_tf_summary_dir(dir_name):\n global _tf_summary_dir\n _tf_summary_dir = dir_name\n\n\ndef get_tf_summary_dir():\n return _tf_summary_dir\n\n\ndef set_tf_summary_writer(writer_name):\n global _tf_summary_writer\n _tf_summary_writer = writer_name\n\n\ndef get_tf_summary_writer():\n return _tf_summary_writer\n\n\ndef get_snapshot_mode():\n return _snapshot_mode\n\n\ndef set_snapshot_mode(mode):\n global _snapshot_mode\n _snapshot_mode = mode\n\n\ndef get_snapshot_gap():\n return _snapshot_gap\n\n\ndef set_snapshot_gap(gap):\n global _snapshot_gap\n _snapshot_gap = gap\n\n\ndef set_log_tabular_only(log_tabular_only):\n global _log_tabular_only\n _log_tabular_only = log_tabular_only\n\n\ndef get_log_tabular_only():\n return _log_tabular_only\n\n\ndef set_disable_prefix(disable_prefix):\n global _disable_prefix\n _disable_prefix = disable_prefix\n\n\ndef get_disable_prefix():\n return _disable_prefix\n\n\ndef log(s, with_prefix=True, with_timestamp=True, color=None):\n if not _disabled:\n out = s\n if with_prefix and not _disable_prefix:\n out = _prefix_str + out\n if with_timestamp:\n now = datetime.datetime.now() # dateutil.tz.tzlocal())\n timestamp = now.strftime('%Y-%m-%d %H:%M:%S.%f %Z')\n out = \"%s | %s\" % (timestamp, out)\n if color is not None:\n out = colorize(out, color)\n if not _log_tabular_only:\n # Also log to stdout\n print(out)\n for fd in list(_text_fds.values()):\n fd.write(out + '\\n')\n fd.flush()\n sys.stdout.flush()\n\n\ndef record_tabular(key, val, *args, **kwargs):\n # if not _disabled and not _tabular_disabled:\n key = _tabular_prefix_str + str(key)\n _tabular.append((key, str(val)))\n if _tf_summary_writer is not None:\n _tf_summary_writer.add_scalar(key, val, _iteration)\n\n\ndef push_tabular_prefix(key):\n _tabular_prefixes.append(key)\n global _tabular_prefix_str\n _tabular_prefix_str = ''.join(_tabular_prefixes)\n\n\ndef pop_tabular_prefix():\n del _tabular_prefixes[-1]\n global _tabular_prefix_str\n _tabular_prefix_str = ''.join(_tabular_prefixes)\n\n\n@contextmanager\ndef prefix(key):\n push_prefix(key)\n try:\n yield\n finally:\n pop_prefix()\n\n\n@contextmanager\ndef tabular_prefix(key):\n push_tabular_prefix(key)\n yield\n pop_tabular_prefix()\n\n\nclass TerminalTablePrinter:\n def __init__(self):\n self.headers = None\n self.tabulars = []\n\n def print_tabular(self, new_tabular):\n if self.headers is None:\n self.headers = [x[0] for x in new_tabular]\n else:\n assert len(self.headers) == len(new_tabular)\n self.tabulars.append([x[1] for x in new_tabular])\n self.refresh()\n\n def refresh(self):\n import os\n rows, columns = os.popen('stty size', 'r').read().split()\n tabulars = self.tabulars[-(int(rows) - 3):]\n sys.stdout.write(\"\\x1b[2J\\x1b[H\")\n sys.stdout.write(tabulate(tabulars, self.headers))\n sys.stdout.write(\"\\n\")\n\n\ntable_printer = TerminalTablePrinter()\n\n_tabular_headers = dict() # keys are file_names and values are the keys of the header of that tabular file\n\n\ndef dump_tabular(*args, **kwargs):\n if not _disabled: # and not _tabular_disabled:\n wh = kwargs.pop(\"write_header\", None)\n if len(_tabular) > 0:\n if _log_tabular_only:\n table_printer.print_tabular(_tabular)\n else:\n for line in tabulate(_tabular).split('\\n'):\n log(line, *args, **kwargs)\n if not _tabular_disabled:\n tabular_dict = dict(_tabular)\n # Also write to the csv files\n # This assumes that the keys in each iteration won't change!\n for tabular_file_name, tabular_fd in list(_tabular_fds.items()):\n keys = tabular_dict.keys()\n if tabular_file_name in _tabular_headers:\n # check against existing keys: if new keys re-write Header and pad with NaNs\n existing_keys = _tabular_headers[tabular_file_name]\n if not set(existing_keys).issuperset(set(keys)):\n joint_keys = set(keys).union(set(existing_keys))\n tabular_fd.flush()\n read_fd = open(tabular_file_name, 'r')\n reader = csv.DictReader(read_fd)\n rows = list(reader)\n read_fd.close()\n tabular_fd.close()\n tabular_fd = _tabular_fds[tabular_file_name] = open(tabular_file_name, 'w')\n new_writer = csv.DictWriter(tabular_fd, fieldnames=list(joint_keys))\n new_writer.writeheader()\n for row in rows:\n for key in joint_keys:\n if key not in row:\n row[key] = np.nan\n new_writer.writerows(rows)\n _tabular_headers[tabular_file_name] = list(joint_keys)\n else:\n _tabular_headers[tabular_file_name] = keys\n\n writer = csv.DictWriter(tabular_fd, fieldnames=_tabular_headers[tabular_file_name]) # list(\n if wh or (wh is None and tabular_file_name not in _tabular_header_written):\n writer.writeheader()\n _tabular_header_written.add(tabular_file_name)\n _tabular_headers[tabular_file_name] = keys\n # add NaNs in all empty fields from the header\n for key in _tabular_headers[tabular_file_name]:\n if key not in tabular_dict:\n tabular_dict[key] = np.nan\n writer.writerow(tabular_dict)\n tabular_fd.flush()\n del _tabular[:]\n\n\ndef pop_prefix():\n del _prefixes[-1]\n global _prefix_str\n _prefix_str = ''.join(_prefixes)\n\n\ndef save_itr_params(itr, params):\n if _snapshot_dir:\n if _snapshot_mode == 'all':\n file_name = osp.join(get_snapshot_dir(), 'itr_%d.pkl' % itr)\n elif _snapshot_mode == 'last':\n # override previous params\n file_name = osp.join(get_snapshot_dir(), 'params.pkl')\n elif _snapshot_mode == \"gap\":\n if itr == 0 or (itr + 1) % _snapshot_gap == 0:\n file_name = osp.join(get_snapshot_dir(), 'itr_%d.pkl' % itr)\n else:\n return\n elif _snapshot_mode == \"last+gap\":\n if itr == 0 or (itr + 1) % _snapshot_gap == 0:\n file_name = osp.join(get_snapshot_dir(), 'itr_%d.pkl' % itr)\n torch.save(params, file_name)\n file_name = osp.join(get_snapshot_dir(), 'params.pkl')\n elif _snapshot_mode == 'none':\n return\n else:\n raise NotImplementedError\n torch.save(params, file_name)\n\n\ndef log_parameters(log_file, args, classes):\n log_params = {}\n for param_name, param_value in args.__dict__.items():\n if any([param_name.startswith(x) for x in list(classes.keys())]):\n continue\n log_params[param_name] = param_value\n for name, cls in classes.items():\n if isinstance(cls, type):\n params = get_all_parameters(cls, args)\n params[\"_name\"] = getattr(args, name)\n log_params[name] = params\n else:\n log_params[name] = getattr(cls, \"__kwargs\", dict())\n log_params[name][\"_name\"] = cls.__module__ + \".\" + cls.__class__.__name__\n mkdir_p(os.path.dirname(log_file))\n with open(log_file, \"w\") as f:\n json.dump(log_params, f, indent=2, sort_keys=True)\n\n\ndef stub_to_json(stub_sth):\n from rllab.misc import instrument\n from rllab.misc import instrument2\n if isinstance(stub_sth, instrument.StubObject) or isinstance(stub_sth, instrument2.StubObject):\n assert len(stub_sth.args) == 0\n data = dict()\n for k, v in stub_sth.kwargs.items():\n data[k] = stub_to_json(v)\n data[\"_name\"] = stub_sth.proxy_class.__module__ + \".\" + stub_sth.proxy_class.__name__\n return data\n elif isinstance(stub_sth, instrument.StubAttr) or isinstance(stub_sth, instrument2.StubAttr):\n return dict(\n obj=stub_to_json(stub_sth.obj),\n attr=stub_to_json(stub_sth.attr_name)\n )\n elif isinstance(stub_sth, instrument.StubMethodCall) or isinstance(stub_sth, instrument2.StubMethodCall):\n return dict(\n obj=stub_to_json(stub_sth.obj),\n method_name=stub_to_json(stub_sth.method_name),\n args=stub_to_json(stub_sth.args),\n kwargs=stub_to_json(stub_sth.kwargs),\n )\n elif isinstance(stub_sth, instrument.BinaryOp) or isinstance(stub_sth, instrument2.BinaryOp):\n return \"binary_op\"\n elif isinstance(stub_sth, instrument.StubClass) or isinstance(stub_sth, instrument2.StubClass):\n return stub_sth.proxy_class.__module__ + \".\" + stub_sth.proxy_class.__name__\n elif isinstance(stub_sth, dict):\n return {stub_to_json(k): stub_to_json(v) for k, v in stub_sth.items()}\n elif isinstance(stub_sth, (list, tuple)):\n return list(map(stub_to_json, stub_sth))\n elif type(stub_sth) == type(lambda: None):\n if stub_sth.__module__ is not None:\n return stub_sth.__module__ + \".\" + stub_sth.__name__\n return stub_sth.__name__\n elif \"theano\" in str(type(stub_sth)):\n return repr(stub_sth)\n return stub_sth\n\n\nclass MyEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, type):\n return {'$class': o.__module__ + \".\" + o.__name__}\n elif isinstance(o, Enum):\n return {'$enum': o.__module__ + \".\" + o.__class__.__name__ + '.' + o.name}\n return json.JSONEncoder.default(self, o)\n\n\ndef log_parameters_lite(log_file, args):\n log_params = {}\n for param_name, param_value in args.__dict__.items():\n log_params[param_name] = param_value\n if args.args_data is not None:\n stub_method = pickle.loads(base64.b64decode(args.args_data))\n method_args = stub_method.kwargs\n log_params[\"json_args\"] = dict()\n for k, v in list(method_args.items()):\n log_params[\"json_args\"][k] = stub_to_json(v)\n kwargs = stub_method.obj.kwargs\n for k in [\"baseline\", \"env\", \"policy\"]:\n if k in kwargs:\n log_params[\"json_args\"][k] = stub_to_json(kwargs.pop(k))\n log_params[\"json_args\"][\"algo\"] = stub_to_json(stub_method.obj)\n mkdir_p(os.path.dirname(log_file))\n with open(log_file, \"w\") as f:\n json.dump(log_params, f, indent=2, sort_keys=True, cls=MyEncoder)\n\n\ndef log_variant(log_file, variant_data):\n mkdir_p(os.path.dirname(log_file))\n if hasattr(variant_data, \"dump\"):\n variant_data = variant_data.dump()\n variant_json = stub_to_json(variant_data)\n with open(log_file, \"w\") as f:\n json.dump(variant_json, f, indent=2, sort_keys=True, cls=MyEncoder)\n\n\ndef record_tabular_misc_stat(key, values, placement='back'):\n if placement == 'front':\n prefix = \"\"\n suffix = key\n else:\n prefix = key\n suffix = \"\"\n if _tf_summary_writer is not None:\n prefix += \"/\" # Group stats together in Tensorboard.\n if len(values) > 0:\n record_tabular(prefix + \"Average\" + suffix, np.average(values))\n record_tabular(prefix + \"Std\" + suffix, np.std(values))\n record_tabular(prefix + \"Median\" + suffix, np.median(values))\n record_tabular(prefix + \"Min\" + suffix, np.min(values))\n record_tabular(prefix + \"Max\" + suffix, np.max(values))\n else:\n record_tabular(prefix + \"Average\" + suffix, np.nan)\n record_tabular(prefix + \"Std\" + suffix, np.nan)\n record_tabular(prefix + \"Median\" + suffix, np.nan)\n record_tabular(prefix + \"Min\" + suffix, np.nan)\n record_tabular(prefix + \"Max\" + suffix, np.nan)\n", "\nimport numpy as np\nimport time\n\nfrom exptools.logging.console import colorize\n\nseed_ = None\n\n\ndef set_seed(seed):\n \"\"\"Sets random.seed, np.random.seed, torch.manual_seed,\n torch.cuda.manual_seed.\"\"\"\n seed %= 4294967294\n global seed_\n seed_ = seed\n import random\n random.seed(seed)\n np.random.seed(seed)\n import torch\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n print(colorize(f\"using seed {seed}\", \"green\"))\n\n\ndef get_seed():\n return seed_\n\n\ndef make_seed():\n \"\"\"\n Returns a random number between [0, 10000], using timing jitter.\n\n This has a white noise spectrum and gives unique values for multiple\n simultaneous processes...some simpler attempts did not achieve that, but\n there's probably a better way.\n \"\"\"\n d = 10000\n t = time.time()\n sub1 = int(t * d) % d\n sub2 = int(t * d ** 2) % d\n s = 1e-3\n s_inv = 1. / s\n time.sleep(s * sub2 / d)\n t2 = time.time()\n t2 = t2 - int(t2)\n t2 = int(t2 * d * s_inv) % d\n time.sleep(s * sub1 / d)\n t3 = time.time()\n t3 = t3 - int(t3)\n t3 = int(t3 * d * s_inv * 10) % d\n return (t3 - t2) % d\n\n\ndef set_envs_seeds(envs, seed):\n \"\"\"Set different seeds for a collection of envs, if applicable. Standard\n rlpyt envs and spaces don't necessarily have this method, but gym envs\n and spaces do.\"\"\"\n if seed is not None:\n for i, env in enumerate(envs):\n if hasattr(env, \"seed\"): # e.g. Gym environments have seed.\n env.seed(seed + i)\n if hasattr(env.action_space, \"seed\"): # e.g. Gym spaces have seed.\n env.action_space.seed(seed + i)\n if hasattr(env.observation_space, \"seed\"):\n env.observation_space.seed(seed + i)\n" ]
[ [ "numpy.min", "numpy.median", "numpy.max", "numpy.std", "numpy.average", "torch.save" ], [ "torch.manual_seed", "torch.cuda.manual_seed", "numpy.random.seed" ] ]
hkaraoguz/RRPN
[ "4d09535cc383d6311738cda0c6c7cde3eeca05d8" ]
[ "lib/setup.py" ]
[ "# --------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick\n# --------------------------------------------------------\n\nimport os\nfrom os.path import join as pjoin\nfrom setuptools import setup\nfrom distutils.extension import Extension\nfrom Cython.Distutils import build_ext\nimport subprocess\nimport numpy as np\n\ndef find_in_path(name, path):\n \"Find a file in a search path\"\n # Adapted fom\n # http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/\n for dir in path.split(os.pathsep):\n binpath = pjoin(dir, name)\n if os.path.exists(binpath):\n return os.path.abspath(binpath)\n return None\n\n\ndef locate_cuda():\n \"\"\"Locate the CUDA environment on the system\n\n Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'\n and values giving the absolute path to each directory.\n\n Starts by looking for the CUDAHOME env variable. If not found, everything\n is based on finding 'nvcc' in the PATH.\n \"\"\"\n\n # first check if the CUDAHOME env variable is in use\n if 'CUDAHOME' in os.environ:\n home = os.environ['CUDAHOME']\n nvcc = pjoin(home, 'bin', 'nvcc')\n else:\n # otherwise, search the PATH for NVCC\n default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')\n nvcc = find_in_path('nvcc', os.environ['PATH'] + os.pathsep + default_path)\n if nvcc is None:\n raise EnvironmentError('The nvcc binary could not be '\n 'located in your $PATH. Either add it to your path, or set $CUDAHOME')\n home = os.path.dirname(os.path.dirname(nvcc))\n\n cudaconfig = {'home':home, 'nvcc':nvcc,\n 'include': pjoin(home, 'include'),\n 'lib64': pjoin(home, 'lib64')}\n for k, v in cudaconfig.iteritems():\n if not os.path.exists(v):\n raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v))\n\n return cudaconfig\nCUDA = locate_cuda()\n\n\n# Obtain the numpy include directory. This logic works across numpy versions.\ntry:\n numpy_include = np.get_include()\nexcept AttributeError:\n numpy_include = np.get_numpy_include()\n\ndef customize_compiler_for_nvcc(self):\n \"\"\"inject deep into distutils to customize how the dispatch\n to gcc/nvcc works.\n\n If you subclass UnixCCompiler, it's not trivial to get your subclass\n injected in, and still have the right customizations (i.e.\n distutils.sysconfig.customize_compiler) run on it. So instead of going\n the OO route, I have this. Note, it's kindof like a wierd functional\n subclassing going on.\"\"\"\n\n # tell the compiler it can processes .cu\n self.src_extensions.append('.cu')\n\n # save references to the default compiler_so and _comple methods\n default_compiler_so = self.compiler_so\n super = self._compile\n\n # now redefine the _compile method. This gets executed for each\n # object but distutils doesn't have the ability to change compilers\n # based on source extension: we add it.\n def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):\n if os.path.splitext(src)[1] == '.cu':\n # use the cuda for .cu files\n self.set_executable('compiler_so', CUDA['nvcc'])\n # use only a subset of the extra_postargs, which are 1-1 translated\n # from the extra_compile_args in the Extension class\n postargs = extra_postargs['nvcc']\n else:\n postargs = extra_postargs['gcc']\n\n super(obj, src, ext, cc_args, postargs, pp_opts)\n # reset the default compiler_so, which we might have changed for cuda\n self.compiler_so = default_compiler_so\n\n # inject our redefined _compile method into the class\n self._compile = _compile\n\n\n# run the customize_compiler\nclass custom_build_ext(build_ext):\n def build_extensions(self):\n customize_compiler_for_nvcc(self.compiler)\n build_ext.build_extensions(self)\n\n\next_modules = [\n Extension(\n \"utils.cython_bbox\",\n [\"utils/bbox.pyx\"],\n extra_compile_args={'gcc': [\"-Wno-cpp\", \"-Wno-unused-function\"]},\n include_dirs = [numpy_include]\n ),\n Extension(\n \"nms.cpu_nms\",\n [\"nms/cpu_nms.pyx\"],\n extra_compile_args={'gcc': [\"-Wno-cpp\", \"-Wno-unused-function\"]},\n include_dirs = [numpy_include]\n ),\n\n Extension(\n \"rotation.rotate_cython_nms\",\n [\"rotation/rotate_cython_nms.pyx\"],\n extra_compile_args={'gcc': [\"-Wno-cpp\", \"-Wno-unused-function\"]},\n include_dirs = [numpy_include]\n ),\n\n Extension(\n \"rotation.rotate_circle_nms\",\n [\"rotation/rotate_circle_nms.pyx\"],\n extra_compile_args={'gcc': [\"-Wno-cpp\", \"-Wno-unused-function\"]},\n include_dirs = [numpy_include]\n ),\n\n Extension('nms.gpu_nms',\n ['nms/nms_kernel.cu', 'nms/gpu_nms.pyx'],\n library_dirs=[CUDA['lib64']],\n libraries=['cudart'],\n language='c++',\n runtime_library_dirs=[CUDA['lib64']],\n # this syntax is specific to this build system\n # we're only going to use certain compiler args with nvcc and not with\n # gcc the implementation of this trick is in customize_compiler() below\n extra_compile_args={'gcc': [\"-Wno-unused-function\"],\n 'nvcc': ['-arch=sm_61',\n '--ptxas-options=-v',\n '-c',\n '--compiler-options',\n \"'-fPIC'\"]},\n include_dirs = [numpy_include, CUDA['include']]\n ),\n Extension('rotation.rotate_gpu_nms',\n ['rotation/rotate_nms_kernel.cu', 'rotation/rotate_gpu_nms.pyx'],\n library_dirs=[CUDA['lib64']],\n libraries=['cudart'],\n language='c++',\n runtime_library_dirs=[CUDA['lib64']],\n # this syntax is specific to this build system\n # we're only going to use certain compiler args with nvcc anrbd not with\n # gcc the implementation of this trick is in customize_compiler() below\n extra_compile_args={'gcc': [\"-Wno-unused-function\"],\n 'nvcc': ['-arch=sm_61',\n '--ptxas-options=-v',\n '-c',\n '--compiler-options',\n \"'-fPIC'\"]},\n include_dirs = [numpy_include, CUDA['include']]\n ),\n Extension('rotation.rbbox_overlaps',\n ['rotation/rbbox_overlaps_kernel.cu', 'rotation/rbbox_overlaps.pyx'],\n library_dirs=[CUDA['lib64']],\n libraries=['cudart'],\n language='c++',\n runtime_library_dirs=[CUDA['lib64']],\n # this syntax is specific to this build system\n # we're only going to use certain compiler args with nvcc and not with\n # gcc the implementation of this trick is in customize_compiler() below\n extra_compile_args={'gcc': [\"-Wno-unused-function\"],\n 'nvcc': ['-arch=sm_61',\n '--ptxas-options=-v',\n '-c',\n '--compiler-options',\n \"'-fPIC'\"]},\n include_dirs = [numpy_include, CUDA['include']]\n ),\n Extension('rotation.rotate_polygon_nms',\n ['rotation/rotate_polygon_nms_kernel.cu', 'rotation/rotate_polygon_nms.pyx'],\n library_dirs=[CUDA['lib64']],\n libraries=['cudart'],\n language='c++',\n runtime_library_dirs=[CUDA['lib64']],\n # this syntax is specific to this build system\n # we're only going to use certain compiler args with nvcc and not with\n # gcc the implementation of this trick is in customize_compiler() below\n extra_compile_args={'gcc': [\"-Wno-unused-function\"],\n 'nvcc': ['-arch=sm_61',\n '--ptxas-options=-v',\n '-c',\n '--compiler-options',\n \"'-fPIC'\"]},\n include_dirs = [numpy_include, CUDA['include']]\n ),\n\n Extension(\n 'pycocotools._mask',\n sources=['pycocotools/maskApi.c', 'pycocotools/_mask.pyx'],\n include_dirs = [numpy_include, 'pycocotools'],\n extra_compile_args={\n 'gcc': ['-Wno-cpp', '-Wno-unused-function', '-std=c99']},\n ),\n]\n\nsetup(\n name='fast_rcnn',\n ext_modules=ext_modules,\n # inject our custom trigger\n cmdclass={'build_ext': custom_build_ext},\n)\n" ]
[ [ "numpy.get_numpy_include", "numpy.get_include" ] ]
Shanlans/tensorflow-deeplab-resnet
[ "6d1035221b67fed81058bf84c96e56d8f4e62a89" ]
[ "kaffe/tensorflow/network.py" ]
[ "import numpy as np\nimport tensorflow as tf\nimport pdb\n\nslim = tf.contrib.slim\n\nDEFAULT_PADDING = 'SAME'\n\n\ndef layer(op):\n '''Decorator for composable network layers.'''\n\n def layer_decorated(self, *args, **kwargs):\n # Automatically set a name if not provided.\n name = kwargs.setdefault('name', self.get_unique_name(op.__name__))\n # Figure out the layer inputs.\n if len(self.terminals) == 0:\n raise RuntimeError('No input variables found for layer %s.' % name)\n elif len(self.terminals) == 1:\n layer_input = self.terminals[0]\n else:\n layer_input = list(self.terminals)\n # Perform the operation and get the output.\n layer_output = op(self, layer_input, *args, **kwargs)\n # Add to layer LUT.\n self.layers[name] = layer_output\n # This output is now the input for the next layer.\n self.feed(layer_output)\n # Return self for chained calls.\n return self\n\n return layer_decorated\n\n\nclass Network(object):\n\n def __init__(self, inputs, trainable=True, is_training=False, num_classes=21):\n # The input nodes for this network\n self.inputs = inputs\n # The current list of terminal nodes\n self.terminals = []\n # Mapping from layer names to layers\n self.layers = dict(inputs)\n # If true, the resulting variables are set as trainable\n self.trainable = trainable\n # Switch variable for dropout\n self.use_dropout = tf.placeholder_with_default(tf.constant(1.0),\n shape=[],\n name='use_dropout')\n self.setup(is_training, num_classes)\n\n def setup(self, is_training):\n '''Construct the network. '''\n raise NotImplementedError('Must be implemented by the subclass.')\n\n def load(self, data_path, session, ignore_missing=False):\n '''Load network weights.\n data_path: The path to the numpy-serialized network weights\n session: The current TensorFlow session\n ignore_missing: If true, serialized weights for missing layers are ignored.\n '''\n data_dict = np.load(data_path).item()\n for op_name in data_dict:\n pdb.set_trace()\n with tf.variable_scope(op_name, reuse=True):\n for param_name, data in data_dict[op_name].iteritems():\n try:\n var = tf.get_variable(param_name)\n session.run(var.assign(data))\n except ValueError:\n if not ignore_missing:\n raise\n\n def feed(self, *args):\n '''Set the input(s) for the next operation by replacing the terminal nodes.\n The arguments can be either layer names or the actual layers.\n '''\n assert len(args) != 0\n self.terminals = []\n for fed_layer in args:\n if isinstance(fed_layer, str):\n try:\n fed_layer = self.layers[fed_layer]\n except KeyError:\n raise KeyError('Unknown layer name fed: %s' % fed_layer)\n self.terminals.append(fed_layer)\n return self\n\n def get_output(self):\n '''Returns the current network output.'''\n return self.terminals[-1]\n\n def get_unique_name(self, prefix):\n '''Returns an index-suffixed unique name for the given prefix.\n This is used for auto-generating layer names based on the type-prefix.\n '''\n ident = sum(t.startswith(prefix) for t, _ in self.layers.items()) + 1\n return '%s_%d' % (prefix, ident)\n\n def make_var(self, name, shape):\n '''Creates a new TensorFlow variable.'''\n return tf.get_variable(name, shape, trainable=self.trainable)\n\n def validate_padding(self, padding):\n '''Verifies that the padding is one of the supported ones.'''\n assert padding in ('SAME', 'VALID')\n\n @layer\n def conv(self,\n input,\n k_h,\n k_w,\n c_o,\n s_h,\n s_w,\n name,\n relu=True,\n padding=DEFAULT_PADDING,\n group=1,\n biased=True):\n # Verify that the padding is acceptable\n self.validate_padding(padding)\n # Get the number of channels in the input\n c_i = input.get_shape().as_list()[-1]\n # Verify that the grouping parameter is valid\n assert c_i % group == 0\n assert c_o % group == 0\n # Convolution for a given input and kernel\n convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)\n with tf.variable_scope(name) as scope:\n kernel = self.make_var('weights', shape=[k_h, k_w, c_i / group, c_o])\n if group == 1:\n # This is the common-case. Convolve the input without any further complications.\n output = convolve(input, kernel)\n else:\n # Split the input into groups and then convolve each of them independently\n input_groups = tf.split(3, group, input)\n kernel_groups = tf.split(3, group, kernel)\n output_groups = [convolve(i, k) for i, k in zip(input_groups, kernel_groups)]\n # Concatenate the groups\n output = tf.concat(3, output_groups)\n # Add the biases\n if biased:\n biases = self.make_var('biases', [c_o])\n output = tf.nn.bias_add(output, biases)\n if relu:\n # ReLU non-linearity\n output = tf.nn.relu(output, name=scope.name)\n return output\n\n @layer\n def atrous_conv(self,\n input,\n k_h,\n k_w,\n c_o,\n dilation,\n name,\n relu=True,\n padding=DEFAULT_PADDING,\n group=1,\n biased=True):\n # Verify that the padding is acceptable\n self.validate_padding(padding)\n # Get the number of channels in the input\n c_i = input.get_shape().as_list()[-1]\n # Verify that the grouping parameter is valid\n assert c_i % group == 0\n assert c_o % group == 0\n # Convolution for a given input and kernel\n convolve = lambda i, k: tf.nn.atrous_conv2d(i, k, dilation, padding=padding)\n with tf.variable_scope(name) as scope:\n kernel = self.make_var('weights', shape=[k_h, k_w, c_i / group, c_o])\n if group == 1:\n # This is the common-case. Convolve the input without any further complications.\n output = convolve(input, kernel)\n else:\n # Split the input into groups and then convolve each of them independently\n input_groups = tf.split(3, group, input)\n kernel_groups = tf.split(3, group, kernel)\n output_groups = [convolve(i, k) for i, k in zip(input_groups, kernel_groups)]\n # Concatenate the groups\n output = tf.concat(3, output_groups)\n # Add the biases\n if biased:\n biases = self.make_var('biases', [c_o])\n output = tf.nn.bias_add(output, biases)\n if relu:\n # ReLU non-linearity\n output = tf.nn.relu(output, name=scope.name)\n return output\n \n @layer\n def relu(self, input, name):\n return tf.nn.relu(input, name=name)\n\n @layer\n def max_pool(self, input, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):\n self.validate_padding(padding)\n return tf.nn.max_pool(input,\n ksize=[1, k_h, k_w, 1],\n strides=[1, s_h, s_w, 1],\n padding=padding,\n name=name)\n\n @layer\n def avg_pool(self, input, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):\n self.validate_padding(padding)\n return tf.nn.avg_pool(input,\n ksize=[1, k_h, k_w, 1],\n strides=[1, s_h, s_w, 1],\n padding=padding,\n name=name)\n\n @layer\n def lrn(self, input, radius, alpha, beta, name, bias=1.0):\n return tf.nn.local_response_normalization(input,\n depth_radius=radius,\n alpha=alpha,\n beta=beta,\n bias=bias,\n name=name)\n\n @layer\n def concat(self, inputs, axis, name):\n return tf.concat(concat_dim=axis, values=inputs, name=name)\n\n @layer\n def add(self, inputs, name):\n return tf.add_n(inputs, name=name)\n\n @layer\n def fc(self, input, num_out, name, relu=True):\n with tf.variable_scope(name) as scope:\n input_shape = input.get_shape()\n if input_shape.ndims == 4:\n # The input is spatial. Vectorize it first.\n dim = 1\n for d in input_shape[1:].as_list():\n dim *= d\n feed_in = tf.reshape(input, [-1, dim])\n else:\n feed_in, dim = (input, input_shape[-1].value)\n weights = self.make_var('weights', shape=[dim, num_out]) \n biases = self.make_var('biases', [num_out])\n op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b\n fc = op(feed_in, weights, biases, name=scope.name)\n return fc\n\n @layer\n def softmax(self, input, name):\n input_shape = map(lambda v: v.value, input.get_shape())\n if len(input_shape) > 2:\n # For certain models (like NiN), the singleton spatial dimensions\n # need to be explicitly squeezed, since they're not broadcast-able\n # in TensorFlow's NHWC ordering (unlike Caffe's NCHW).\n if input_shape[1] == 1 and input_shape[2] == 1:\n input = tf.squeeze(input, squeeze_dims=[1, 2])\n else:\n raise ValueError('Rank 2 tensor input expected for softmax!')\n return tf.nn.softmax(input, name)\n \n @layer\n def batch_normalization(self, input, name, is_training, activation_fn=None, scale=True):\n with tf.variable_scope(name) as scope:\n output = slim.batch_norm(\n input,\n activation_fn=activation_fn,\n is_training=is_training,\n updates_collections=None,\n scale=scale,\n scope=scope)\n return output\n\n @layer\n def dropout(self, input, keep_prob, name):\n keep = 1 - self.use_dropout + (self.use_dropout * keep_prob)\n return tf.nn.dropout(input, keep, name=name)\n" ]
[ [ "tensorflow.nn.bias_add", "tensorflow.nn.relu", "tensorflow.get_variable", "tensorflow.nn.softmax", "tensorflow.concat", "tensorflow.constant", "tensorflow.nn.conv2d", "tensorflow.nn.max_pool", "tensorflow.reshape", "tensorflow.squeeze", "tensorflow.nn.avg_pool", "tensorflow.variable_scope", "numpy.load", "tensorflow.split", "tensorflow.add_n", "tensorflow.nn.atrous_conv2d", "tensorflow.nn.local_response_normalization", "tensorflow.nn.dropout" ] ]
GolamRashed/azure-databricks-mlops-mlflow
[ "d1e4a21cb5047213ac0d4c620aac8938048d8168" ]
[ "ml_source/tests/diabetes/scoring/test_scoring_batch.py" ]
[ "import os\nimport unittest\n\nimport pandas as pd\nfrom diabetes.scoring.batch.run import batch_scoring\nfrom diabetes.training.evaluate import split_data\nfrom diabetes.training.train import train_model\n\n\nclass TestScoringBatchMethods(unittest.TestCase):\n def test_batch_scoring(self):\n ridge_args = {\"alpha\": 0.5}\n data_file = os.path.join(\"tests/diabetes/data\", \"diabetes_unit_test.csv\")\n train_df = pd.read_csv(data_file).drop(columns=[\"SEX\"])\n data = split_data(train_df)\n model = train_model(data[\"train\"], ridge_args)\n\n score_data_file = os.path.join(\"tests/diabetes/data\", \"scoring_dataset.csv\")\n score_df = pd.read_csv(score_data_file).drop(columns=[\"SEX\"])\n scores = batch_scoring(model, score_df)\n self.assertAlmostEqual(scores[0], 60.75743442)\n self.assertAlmostEqual(scores[1], 67.10061271)\n" ]
[ [ "pandas.read_csv" ] ]
EXAUQ/mogp-emulator
[ "9d5772135498bdf5b95b44b4afb065c2c266f899", "9d5772135498bdf5b95b44b4afb065c2c266f899" ]
[ "mogp_emulator/GaussianProcessGPU.py", "mogp_emulator/SequentialDesign.py" ]
[ "\"\"\"\nextends GaussianProcess with an (optional) GPU implementation\n\"\"\"\n\nimport os\nimport re\nimport numpy as np\n\nfrom mogp_emulator.Kernel import SquaredExponential, Matern52\nfrom mogp_emulator.MeanFunction import MeanFunction, MeanBase\n\nimport mogp_emulator.LibGPGPU as LibGPGPU\n\nfrom mogp_emulator.GaussianProcess import GaussianProcessBase, PredictResult\n\n\nclass GPUUnavailableError(RuntimeError):\n \"\"\"Exception type to use when a GPU, or the GPU library, is unavailable\"\"\"\n pass\n\n\ndef ndarray_coerce_type_and_flags(arr):\n \"\"\"\n Helper function for the GaussianProcessGPU methods that call\n CUDA/C++ functions (those wrapped by _dense_gpgpu) and that take\n numpy arrays as arguments. Ensures that an array is of the\n correct type for this purpose.\n\n Takes an array or array-like.\n\n Returns an ndarray with the same data, that has:\n - dtype of float64\n - flags.writable\n - flags.c_contiguous\n\n The array returned may reference the original array, be a copy of\n it, or be newly constructed from the argument.\n \"\"\"\n\n arr = np.array(arr, copy=False)\n\n # cast into float64, just in case we were given integers and ensure contiguous (C type)\n arr_contiguous_float64 = np.ascontiguousarray(arr.astype(np.float64, copy=False))\n if not arr_contiguous_float64.flags['WRITEABLE']:\n return np.copy(arr_contiguous_float64)\n else:\n return arr_contiguous_float64\n\n\ndef parse_meanfunc_formula(formula):\n \"\"\"\n Assuming the formula has already been parsed by the Python MeanFunction interface,\n we expect it to be in a standard form, with parameters denoted by 'c' and variables\n denoted by 'x[dim_index]' where dim_index < D.\n\n :param formula: string representing the desired mean function formula\n :type formula: str\n\n :returns: Instance of LibGPGPU.ConstMeanFunc or LibGPGPU.PolyMeanFunc implementing formula,\n or None if the formula could not be parsed, or is not currently implemented in C++ code.\n :rtype: LibGPGPU.ConstMeanFunc or LibGPGPU.PolyMeanFun or None\n \"\"\"\n # convert to a raw string\n if formula == \"c\":\n return LibGPGPU.ConstMeanFunc()\n else:\n # see if the formula is a string representation of a number\n try:\n m = float(formula)\n return LibGPGPU.FixedMeanFunc(m)\n except:\n pass\n # if we got here, we hopefully have a parse-able formula\n terms = formula.split(\"+\")\n def find_index_and_power(term):\n variables = re.findall(r\"x\\[[\\d+]\\]\",term)\n if len(variables) == 0:\n # didn't find a non-const term\n return None\n indices = [int(re.search(r\"\\[([\\d+])\\]\", v).groups()[0]) for v in variables]\n if indices.count(indices[0]) != len(indices):\n raise NotImplementedError(\"Cross terms, e.g. x[0]*x[1] not implemented in GPU version.\")\n # first guess at the power to which the index is raised is how many times it appears\n # e.g. if written as x[0]*x[0]\n power = len(indices)\n # however, it's also possible to write 'x[0]^2' or even, 'x[0]*x[0]^2' or even 'x[0]^2*x[0]^2'\n # so look at all the numbers appearing after a '^'.\n more_powers = re.findall(r\"\\^[\\d]+\",term)\n more_powers = [int(re.search(r\"\\^([\\d]+)\",p).groups()[0]) for p in more_powers]\n # now add these on to the original power number\n # (subtracting one each time, as we already have x^1 implicitly)\n for p in more_powers:\n power += p - 1\n return [indices[0], power]\n indices_powers = []\n for term in terms:\n ip = find_index_and_power(term)\n if ip:\n indices_powers.append(ip)\n if len(indices_powers) > 0:\n return LibGPGPU.PolyMeanFunc(indices_powers)\n else:\n return None\n\ndef interpret_nugget(nugget):\n \"\"\"\n Interpret a provided 'nugget' value (str or float) as the C++ friendly nugget type and nugget size.\n :param: nugget, must be either a str with value 'adaptive' or 'fit,\n or a non-negative float.\n :returns: \n :rtype: LibGPGPU.nugget_type, float\n \"\"\"\n if not isinstance(nugget, (str, float)):\n try:\n nugget = float(nugget)\n except TypeError:\n raise TypeError(\"nugget parameter must be a string or a non-negative float\")\n\n if isinstance(nugget, str):\n if nugget == \"adaptive\":\n nugget_type = LibGPGPU.nugget_type.adaptive\n elif nugget == \"fit\":\n nugget_type = LibGPGPU.nugget_type.fit\n else:\n raise ValueError(\"nugget must be a string set to 'adaptive', 'fit', or a float\")\n nugget_size = 0.\n else:\n # nugget is fixed\n if nugget < 0.:\n raise ValueError(\"nugget parameter must be non-negative\")\n nugget_type = LibGPGPU.nugget_type.fixed\n nugget_size = nugget\n # return info needed to set the nugget on the C++ object\n return nugget_type, nugget_size\n\nclass GaussianProcessGPU(GaussianProcessBase):\n\n \"\"\"\n This class implements the same interface as\n :class:`mogp_emulator.GaussianProcess.GaussianProcess`, but using a GPU if available.\n Will raise a RuntimeError if a CUDA-compatible GPU, GPU-interface library libgpgpu\n could not be found.\n Note that while the class uses a C++/CUDA implementation of the SquaredExponential or\n Matern52 kernels for the \"fit\" and \"predict\" methods, the 'kernel' data member (and\n hence the results of e.g. 'gp.kernel.kernel_f(theta)' will be the pure Python versions,\n for compatibility with the interface of the GaussianProcess class.\n \"\"\"\n\n def __init__(self, inputs, targets, mean=None, kernel=SquaredExponential(), priors=None,\n nugget=\"adaptive\", inputdict = {}, use_patsy=True, max_batch_size=2000):\n\n if not LibGPGPU.HAVE_LIBGPGPU:\n raise RuntimeError(\"Cannot construct GaussianProcessGPU: \"\n \"The GPU library (libgpgpu) could not be loaded\")\n\n elif not LibGPGPU.gpu_usable():\n raise RuntimeError(\"Cannot construct GaussianProcessGPU: \"\n \"A compatible GPU could not be found\")\n\n inputs = ndarray_coerce_type_and_flags(inputs)\n if inputs.ndim == 1:\n inputs = np.reshape(inputs, (-1, 1))\n assert inputs.ndim == 2\n\n targets = ndarray_coerce_type_and_flags(targets)\n assert targets.ndim == 1\n assert targets.shape[0] == inputs.shape[0]\n\n self._inputs = inputs\n self._targets = targets\n self._max_batch_size = max_batch_size\n\n if mean is None:\n self.mean = LibGPGPU.ZeroMeanFunc()\n else:\n if not issubclass(type(mean), MeanBase):\n if isinstance(mean, str):\n mean = MeanFunction(mean, inputdict, use_patsy)\n else:\n raise ValueError(\"provided mean function must be a subclass of MeanBase,\"+\n \" a string formula, or None\")\n\n # at this point, mean will definitely be a MeanBase. We can call its __str__ and\n # parse this to create an instance of a C++ MeanFunction\n self.mean = parse_meanfunc_formula(mean.__str__())\n # if we got None back from that function, something went wrong\n if not self.mean:\n raise ValueError(\"\"\"\n GPU implementation was unable to parse mean function formula {}.\n \"\"\".format(mean.__str__())\n )\n # set the kernel.\n # Note that for the \"kernel\" data member, we use the Python instance\n # rather than the C++/CUDA one (for consistency in interface with\n # GaussianProcess class). However the C++/CUDA version of the kernel is\n # used when calling fit() or predict()\n if (isinstance(kernel, str) and kernel == \"SquaredExponential\") \\\n or isinstance(kernel, SquaredExponential):\n self.kernel_type = LibGPGPU.kernel_type.SquaredExponential\n self.kernel = SquaredExponential()\n elif (isinstance(kernel, str) and kernel == \"Matern52\") \\\n or isinstance(kernel, Matern52):\n self.kernel_type = LibGPGPU.kernel_type.Matern52\n self.kernel = Matern52()\n else:\n raise ValueError(\"GPU implementation requires kernel to be SquaredExponential or Matern52\")\n\n # the nugget parameter passed to constructor can be str or float,\n # disambiguate it here to pass values to C++ constructor.\n nugget_type, nugget_size = interpret_nugget(nugget)\n self._nugget_type = nugget_type\n self._init_nugget_size = nugget_size\n # instantiate the DenseGP_GPU class\n self._densegp_gpu = None\n self._init_gpu() \n\n @classmethod\n def from_cpp(cls, denseGP_GPU):\n inputs = denseGP_GPU.inputs()\n targets = denseGP_GPU.targets()\n obj = cls.__new__(cls)\n obj._densegp_gpu = denseGP_GPU\n obj._inputs = inputs,\n obj._targets = targets\n obj._nugget_type = denseGP_GPU.get_nugget_type()\n obj.kernel_type = denseGP_GPU.get_kernel_type()\n obj.mean = denseGP_GPU.get_meanfunc()\n return obj\n\n\n\n def _init_gpu(self):\n \"\"\"\n Instantiate the DenseGP_GPU C++/CUDA class, if it doesn't already exist.\n \"\"\"\n if not self._densegp_gpu:\n self._densegp_gpu = LibGPGPU.DenseGP_GPU(self._inputs,\n self._targets,\n self._max_batch_size,\n self.mean,\n self.kernel_type,\n self._nugget_type,\n self._init_nugget_size)\n\n @property\n def inputs(self):\n \"\"\"\n Returns inputs for the emulator as a numpy array\n\n :returns: Emulator inputs, 2D array with shape ``(n, D)``\n :rtype: ndarray\n \"\"\"\n return self._densegp_gpu.inputs()\n\n @property\n def targets(self):\n \"\"\"\n Returns targets for the emulator as a numpy array\n\n :returns: Emulator targets, 1D array with shape ``(n,)``\n :rtype: ndarray\n \"\"\"\n return self._densegp_gpu.targets()\n\n @property\n def n(self):\n \"\"\"\n Returns number of training examples for the emulator\n\n :returns: Number of training examples for the emulator object\n :rtype: int\n \"\"\"\n return self._densegp_gpu.n()\n\n @property\n def D(self):\n \"\"\"\n Returns number of inputs (dimensions) for the emulator\n\n :returns: Number of inputs for the emulator object\n :rtype: int\n \"\"\"\n return self._densegp_gpu.D()\n\n @property\n def n_params(self):\n \"\"\"\n Returns number of hyperparameters\n\n Returns the number of hyperparameters for the emulator. The number depends on the\n choice of mean function, covariance function, and nugget strategy, and possibly the\n number of inputs for certain choices of the mean function.\n\n :returns: Number of hyperparameters\n :rtype: int\n \"\"\"\n return self._densegp_gpu.n_params()\n\n @property\n def nugget_type(self):\n \"\"\"\n Returns method used to select nugget parameter\n\n Returns a string indicating how the nugget parameter is treated, either ``\"adaptive\"``,\n ``\"fit\"``, or ``\"fixed\"``. This is automatically set when changing the ``nugget``\n property.\n\n :returns: Current nugget fitting method\n :rtype: str\n \"\"\"\n return self._nugget_type.__str__().split(\".\")[1]\n\n @property\n def nugget(self):\n \"\"\"\n See :func:`mogp_emulator.GaussianProcess.GaussianProcess.nugget`\n\n Use the value cached in the C++ class, as we can't rely on the Python fit()\n function being called.\n \"\"\"\n return self._densegp_gpu.get_nugget_size()\n\n @nugget.setter\n def nugget(self, nugget):\n nugget_type, nugget_size = interpret_nugget(nugget)\n self._nugget_type = nugget_type\n self._densegp_gpu.set_nugget_size(nugget_size)\n self._densegp_gpu.set_nugget_type(nugget_type)\n \n\n @property\n def theta(self):\n \"\"\"\n Returns emulator hyperparameters\n see\n :func:`mogp_emulator.GaussianProcess.GaussianProcess.theta`\n\n :type theta: ndarray\n \"\"\"\n if not self._densegp_gpu.theta_fit_status():\n return None\n theta = self._densegp_gpu.get_theta()\n return theta\n\n @theta.setter\n def theta(self, theta):\n \"\"\"\n Fits the emulator and sets the parameters (property-based setter\n alias for ``fit``)\n\n See :func:`mogp_emulator.GaussianProcess.GaussianProcess.theta`\n\n :type theta: ndarray\n :returns: None\n \"\"\"\n if theta is None:\n self._densegp_gpu.reset_theta_fit_status()\n else:\n self.fit(theta)\n\n @property\n def L(self):\n \"\"\"\n Return the lower triangular Cholesky factor.\n\n :returns: np.array\n \"\"\"\n result = np.zeros((self.n, self.n))\n self._densegp_gpu.get_cholesky_lower(result)\n return np.tril(result.transpose())\n\n @property \n def invQt(self):\n \"\"\"\n Return the product of inverse covariance matrix with the target values \n\n :returns: np.array\n \"\"\"\n if not self._densegp_gpu.theta_fit_status():\n return None\n invQt_result = np.zeros(self.n)\n self._densegp_gpu.get_invQt(invQt_result)\n return invQt_result\n \n @property\n def current_logpost(self):\n \"\"\"\n Return the current value of the log posterior. This is cached in the C++ class.\n\n :returns: double\n \"\"\"\n return self.logposterior(self.theta)\n\n def get_K_matrix(self):\n \"\"\"\n Returns current value of the inverse covariance matrix as a numpy array.\n\n Does not include the nugget parameter, as this is dependent on how the\n nugget is fit.\n \"\"\"\n result = np.zeros((self.n, self.n))\n self._densegp_gpu.get_K(result)\n return result\n\n def fit(self, theta):\n \"\"\"\n Fits the emulator and sets the parameters.\n\n Implements the same interface as\n :func:`mogp_emulator.GaussianProcess.GaussianProcess.fit`\n \"\"\"\n theta = ndarray_coerce_type_and_flags(theta)\n\n self._densegp_gpu.fit(theta)\n \n\n def logposterior(self, theta):\n \"\"\"\n Calculate the negative log-posterior at a particular value of the hyperparameters\n\n See :func:`mogp_emulator.GaussianProcess.GaussianProcess.logposterior`\n\n :param theta: Value of the hyperparameters. Must be array-like with shape ``(n_params,)``\n :type theta: ndarray\n :returns: negative log-posterior\n :rtype: float\n \"\"\"\n\n return self._densegp_gpu.get_logpost(theta)\n\n def logpost_deriv(self, theta):\n \"\"\"\n Calculate the partial derivatives of the negative log-posterior\n\n See :func:`mogp_emulator.GaussianProcess.GaussianProcess.logpost_deriv`\n\n :param theta: Value of the hyperparameters. Must be array-like with shape ``(n_params,)``\n :type theta: ndarray\n :returns: partial derivatives of the negative log-posterior with respect to the\n hyperparameters (array with shape ``(n_params,)``)\n :rtype: ndarray\n \"\"\"\n theta = np.array(theta, copy=False)\n\n assert theta.shape == (self.n_params,), \"bad shape for new parameters\"\n\n if self.theta is None or not np.allclose(theta, self.theta, rtol=1.e-10, atol=1.e-15):\n self.fit(theta)\n\n result = np.zeros(self.n_params)\n self._densegp_gpu.logpost_deriv(result)\n return result\n\n def logpost_hessian(self, theta):\n \"\"\"\n Calculate the Hessian of the negative log-posterior\n\n See :func:`mogp_emulator.GaussianProcess.GaussianProcess.logpost_hessian`\n\n :param theta: Value of the hyperparameters. Must be array-like with shape\n ``(n_params,)``\n :type theta: ndarray\n :returns: Hessian of the negative log-posterior (array with shape\n ``(n_params, n_params)``)\n :rtype: ndarray\n \"\"\"\n raise GPUUnavailableError(\n \"The Hessian calculation is not currently implemented in the GPU version of MOGP.\"\n )\n\n\n def predict(self, testing, unc=True, deriv=True, include_nugget=True):\n \"\"\"\n Make a prediction for a set of input vectors for a single set of hyperparameters.\n This method implements the same interface as\n :func:`mogp_emulator.GaussianProcess.GaussianProcess.predict`\n \"\"\"\n\n if self.theta is None:\n raise ValueError(\"hyperparameters have not been fit for this Gaussian Process\")\n\n testing = ndarray_coerce_type_and_flags(testing)\n\n if self.D == 1 and testing.ndim == 1:\n testing = np.reshape(testing, (-1, 1))\n elif testing.ndim == 1:\n testing = np.reshape(testing, (1, len(testing)))\n assert testing.ndim == 2\n\n n_testing, D = np.shape(testing)\n\n assert D == self.D\n\n means = np.zeros(n_testing)\n\n if unc:\n variances = np.zeros(n_testing)\n for i in range(0, n_testing, self._max_batch_size):\n self._densegp_gpu.predict_variance_batch(\n testing[i:i+self._max_batch_size],\n means[i:i+self._max_batch_size],\n variances[i:i+self._max_batch_size])\n if include_nugget:\n variances += self.nugget\n else:\n for i in range(0, n_testing, self._max_batch_size):\n self._densegp_gpu.predict_batch(\n testing[i:i+self._max_batch_size],\n means[i:i+self._max_batch_size])\n variances = None\n if deriv:\n deriv_result = np.zeros((n_testing,self.D))\n for i in range(0, n_testing, self._max_batch_size):\n self._densegp_gpu.predict_deriv(\n testing[i:i+self._max_batch_size],\n deriv_result[i:i+self._max_batch_size])\n else:\n deriv_result = None\n return PredictResult(mean=means, unc=variances, deriv=deriv_result)\n\n\n def __call__(self, testing):\n \"\"\"A Gaussian process object is callable: calling it is the same as\n calling `predict` without uncertainty and derivative\n predictions, and extracting the zeroth component for the\n 'mean' prediction.\n \"\"\"\n return (self.predict(testing, unc=False, deriv=False)[0])\n\n\n def __str__(self):\n \"\"\"\n Returns a string representation of the model\n\n :returns: A string representation of the model\n (indicates number of training examples and inputs)\n :rtype: str\n \"\"\"\n return (\"Gaussian Process with \" + str(self.n) + \" training examples and \" +\n str(self.D) + \" input variables\")\n\n\n ## __setstate__ and __getstate__ for pickling: don't pickle \"_dense_gp_gpu\",\n ## and instead reinitialize this when unpickling\n ## (Pickling is required to use multiprocessing.)\n def __setstate__(self, state):\n self.__dict__ = state\n self.init_gpu()\n if self._theta is not None:\n self.fit(self._theta)\n\n\n def __getstate__(self):\n copy_dict = self.__dict__.copy()\n del copy_dict[\"_densegp_gpu\"]\n copy_dict[\"_densegp_gpu\"] = None\n return copy_dict\n", "import numpy as np\nfrom scipy.spatial.distance import cdist\nfrom inspect import signature\nfrom mogp_emulator.ExperimentalDesign import ExperimentalDesign\nfrom mogp_emulator.GaussianProcess import GaussianProcess\nfrom mogp_emulator.fitting import fit_GP_MAP\nfrom numpy.linalg import LinAlgError\n\nclass SequentialDesign(object):\n \"\"\"\n Base class representing a sequential experimental design\n\n This class provides the base implementation of a class for designing experiments sequentially. This\n means that rather than picking all simulation points in a single step, the points are selected one\n by one, taking into account the information obtained by determining the true parameter value at each\n design point when selecting the next one. Sequential designs can be very useful when running expensive,\n high-dimensional simulations to ensure that a limited computational budget is used effectvely.\n\n Instead of choosing all points at once, which is the case in a one-shot design, a sequential design\n does some additional computation work at each step to more carefully choose the next point. This means\n that sequential designs are better suited for very expensive simulations, where the additional\n cost of choosing the next point is small compared to the overall computational cost of running\n the simulations.\n\n A sequential design is built on top of a base design (which must be a subclass of the\n ``ExperimentalDesign`` class. In addition to the base design, the class must contain information on\n how many points are used in the initial design (i.e. the number of starting points used before starting\n the sequential steps in the design) and the number of candidate points that are considered during each\n iteration. Optionally, a function for evaluating the actual simulation can be optionally bound to the\n class instance, which allows the entire design process to be automated. If such a function is not\n provided, then the steps to run the design must be carried out manually, with the evaluated\n simulation values provided to the class at the end of each simulation in order to determine the\n next point.\n\n To use the base class to create an experimental design, a new subclass must be created that provides\n a method ``_eval_metric``, which considers all candidate points and returns the index of the best\n candidate. Otherwise, all other code provided here allows for a generic sequential design to be\n easily run and managed.\n \"\"\"\n def __init__(self, base_design, f = None, n_samples = None, n_init = 10, n_cand = 50):\n \"\"\"\n Create a new instance of a sequential experimental design\n\n Creates a new instance of a sequential experimental design, which sequentially chooses\n points to be evaluated from a complex simulation function. It is often used for\n expensive computational models, where the cost of running a single evaluation is\n large and must be done in series due to computational limitations, and thus some\n additional computation done at each step to select new points is small compared\n to the overall cost of running a single simulation.\n\n Sequential designs require specifying a base design using a subclass of ``ExperimentalDesign``\n as well as information on the number of points to use in each step in the design\n process. Additionally, the function to evaluated can be bound to the class to allow\n automatic evaluation of the function at each step.\n\n :param base_design: Base one-shot experimental design (must be a subclass of\n ``ExperimentalDesign``). This contains the information on the\n parameter space to be sampled.\n :type base_design: ExperimentalDesign\n :param f: Function to be evaluated for the design. Must take all parameter values as a single\n input array and return a single float or an array of length 1\n :type f: function or other callable\n :param n_samples: Number of sequential design points to be drawn. If specified, this must be\n a non-negative integer. Note that this is in addition to the number of initial\n points, meaning that the total design size will be ``n_samples + n_init``.\n This can also be specified when running the full design. This parameter is\n optional, and defaults to ``None`` (meaning the number of samples is set when\n running the design, or that samples will be added manually).\n :type n_samples: int or None\n :param n_init: Number of points in the inital design before the sequential steps begin. Must\n be a positive integer. Optional, default value is 10.\n :type n_init: int\n :param n_cand: Number of candidates to consider at each sequential design step. Must be a positive\n integer. Optional, default value is 50.\n \"\"\"\n\n if not isinstance(base_design, ExperimentalDesign):\n raise TypeError(\"base design must be a one-shot experimental design\")\n\n if not f is None:\n if not callable(f):\n raise TypeError(\"simulator f must be a function or other callable\")\n\n if not len(signature(f).parameters) == 1:\n raise ValueError(\"simulator f must accept all parameters as a single input array\")\n\n if (not n_samples is None) and int(n_samples) < 0:\n raise ValueError(\"number of samples must be nonzero\")\n\n if int(n_init) <= 0:\n raise ValueError(\"number of initial design points must be positive\")\n\n if int(n_cand) <= 0:\n raise ValueError(\"number of candidate design points must be positive\")\n\n self.base_design = base_design\n self.f = f\n if n_samples is None:\n self.n_samples = None\n else:\n self.n_samples = int(n_samples)\n self.n_init = int(n_init)\n self.n_cand = int(n_cand)\n\n self.current_iteration = 0\n self.initialized = False\n self.inputs = None\n self.targets = None\n self.candidates = None\n\n def save_design(self, filename):\n \"\"\"\n Save current state of the sequential design\n\n Saves the current state of the sequential design by writing the current\n values of ``inputs``, ``targets``, and ``candidates`` to file as a ``.npz``\n file. To re-load a saved design, use the ``load_design`` method.\n\n Note that this method only dumps the arrays holding the inputs, targets, and\n candidates to a ``.npz`` file. It does not ensure that the function or base\n design are consistent, so it is up to the user to ensure that the new design\n parameters are the same as the parameters for the old one.\n\n :param filename: Filename or file object where design will be saved\n :type filename: str or file\n :returns: None\n \"\"\"\n\n design_dict = {}\n design_dict['inputs'] = self.inputs\n design_dict['targets'] = self.targets\n design_dict['candidates'] = self.candidates\n\n np.savez(filename, **design_dict)\n\n def load_design(self, filename):\n \"\"\"\n Load previously saved sequential design\n\n Loads a previously saved sequential design from file. Loads the arrays for\n ``inputs``, ``targets``, and ``candidates`` from file and sets other internal\n data to be consistent. It performs a few checks for consistency to ensure\n that the loaded design is compatible with the selected parameters, however,\n it does not completely check everything for consistency (in particular, it does\n not make any attempt to ensure that the exact base design or function are\n identical to what was previously used). It is up to the user to ensure that\n these are consistent with the previous instance of the design.\n\n :param filename: Filename or file object from which the design will be loaded\n :type filename: str or file\n :returns: None\n \"\"\"\n\n design_file = np.load(filename, allow_pickle=True)\n\n self.inputs = np.array(design_file['inputs'])\n if np.all(self.inputs) == None:\n self.inputs = None\n\n self.targets = np.array(design_file['targets'])\n if np.all(self.targets) == None:\n self.targets = None\n\n self.candidates = np.array(design_file['candidates'])\n if np.all(self.candidates) == None:\n self.candidates = None\n\n # perform some checks (note this is not exhaustive)\n\n if self.inputs is None:\n assert self.targets is None, \"Cannot have targets without corresponding inputs\"\n else:\n if not self.targets is None:\n assert self.targets.ndim == 1, \"bad number of dimensions for targets\"\n assert self.targets.shape[0] <= self.inputs.shape[0], \"targets cannot be longer than inputs\"\n self.initialized = True\n self.current_iteration = self.targets.shape[0]\n assert self.get_n_parameters() == self.inputs.shape[1], \"Bad shape for inputs\"\n if self.inputs.shape[1] < self.n_init:\n print(\"n_init greater than number of inputs, changing n_init\")\n self.n_init = self.inputs.shape[1]\n\n if not self.candidates is None:\n assert self.get_n_parameters() == self.candidates.shape[1], \"Bad shape for candidates\"\n if self.candidates.shape[0] != self.n_cand:\n print(\"shape of candidates differs from n_cand, candidates will be overridden\")\n\n def has_function(self):\n \"\"\"\n Determines if class contains a function for running the simulator\n\n This method checks to see if a function has been provided for running the simulation.\n\n :returns: Whether or not the design has a bound function for evaluting the simulation.\n :rtype: bool\n \"\"\"\n return (not self.f is None)\n\n def get_n_parameters(self):\n \"\"\"\n Get number of parameters in design\n\n Returns the number of parameters in the design (note that this is specified in the base\n design that must be provided when initializing the class instance).\n\n :returns: Number of parameters in the design\n :rtype: int\n \"\"\"\n\n return self.base_design.get_n_parameters()\n\n def get_n_init(self):\n \"\"\"\n Get number of initial design points\n\n Returns the number of initial design points used before beginning the sequential design\n steps. Note that this means that the total number of samples to be drawn for the design\n is ``n_init + n_samples``.\n\n :returns: Number of initial design points\n :rtype: int\n \"\"\"\n\n return self.n_init\n\n def get_n_samples(self):\n \"\"\"\n Get number of sequential design points\n\n Returns the number of sequential design points used in the sequential design steps. This\n parameter can be ``None`` to indicate that the number of samples will be specified when\n running the design, or that the samples will be updated manually. Note that the total number\n of samples to be drawn for the design is ``n_init + n_samples``.\n\n :returns: Number of sequential design points\n :rtype: int\n \"\"\"\n\n return self.n_samples\n\n def get_n_cand(self):\n \"\"\"\n Get number of candidate design points\n\n Returns the number of candidate design points used in each sequential design step. Candidates\n are re-drawn at each step, so this number of points will be drawn each time and all points\n will be considered at each iteration.\n\n :returns: Number of candidate design points\n :rtype: int\n \"\"\"\n\n return self.n_cand\n\n def get_current_iteration(self):\n \"\"\"\n Get number of current iteration in the experimental design\n\n Returns the current iteration during the sequential design process. This is mostly useful\n if the sequential design is being updated manually to know the current iteration.\n\n :returns: Current iteration number\n :rtype: int\n \"\"\"\n\n return self.current_iteration\n\n def get_inputs(self):\n \"\"\"\n Get current design input points\n\n Returns a numpy array holding the current design points. The array is 2D and has shape\n ``(current_iteration, n_parameters)`` (i.e. it is resized after each iteration when a new\n design point is chosen).\n\n :returns: Current value of the design inputs\n :rtype: ndarray\n \"\"\"\n\n return self.inputs\n\n def get_targets(self):\n \"\"\"\n Get current design target points\n\n Returns a numpy array holding the current target points. The array is 1D and has shape\n ``(current_iteration,)`` (i.e. it is resized after each iteration when a new target point\n is added). Note that simulation outputs must be a single number, so if considering a\n simulation has multiple outputs, the user must decide how to combine them to form the\n relevant target value for deciding which point to simulate next.\n\n :returns: Current value of the target inputs\n :rtype: ndarray\n \"\"\"\n\n return self.targets\n\n def get_candidates(self):\n \"\"\"\n Get current candidate design input points\n\n Returns a numpy array holding the current candidate design points. The array is 2D and\n has shape ``(n_cand, n_parameters)``. It always has the same size once it is initialized,\n but the values will change acros iterations as new candidate points are considered at\n each iteration.\n\n :returns: Current value of the candidate design inputs\n :rtype: ndarray\n \"\"\"\n\n return self.candidates\n\n def get_base_design(self):\n \"\"\"\n Get type of base design\n\n Returns the type of the base design. The base design must be a subclass of ``ExperimentalDesign``,\n but any one-shot design method can be used to generate the initial design and the candidates.\n\n :returns: Base design type as a string\n :rtype: str\n \"\"\"\n\n return type(self.base_design).__name__\n\n def generate_initial_design(self):\n \"\"\"\n Create initial design\n\n Method to set the initial design inputs. Generates the desired number of points for the initial\n design by drawing from the base design. Method sets the ``inputs`` attribute of the\n ``SequentialDesign`` instance, but also returns the initial design as a numpy array if the\n simulations are to be run manually. This method can be run repeatedly to draw different\n initial designs if the initial target values have not been set, but once the targets have been\n set the method will not overwrite them to prevent corruption of the design.\n\n :returns: Initial design points, a 2D numpy array with shape ``(n_init, n_parameters)``\n :rtype: ndarray\n \"\"\"\n\n assert not self.initialized, \"initial design has already been created\"\n\n self.inputs = self.base_design.sample(self.n_init)\n self.current_iteration = self.n_init\n return self.inputs\n\n def set_initial_targets(self, targets):\n \"\"\"\n Set initial design target values\n\n Method to set the initial design targets. Generates the desired number of points for the initial\n design by drawing from the base design. Method sets the ``inputs`` attribute of the\n ``SequentialDesign`` instance, but also returns the initial design as a numpy array if the\n simulations are to be run manually. This method can be run repeatedly to draw different\n initial designs if the initial target values have not been set, but once the targets have been\n set the method will not overwrite them to prevent corruption of the design.\n\n Target values must be an array with length ``(n_init,)``, with values obtained by running\n the initial design through the simulation. Note that this means the initial design must\n be created prior to running this method -- if this method is called prior to\n ``generate_initial_design``, the code will raise an error.\n\n :param targets: Initial value of targets, must be a 1D numpy array with shape ``(n_init,)``\n :type targets: ndarray\n :returns: None\n :rtype: None\n \"\"\"\n\n if self.inputs is None:\n raise ValueError(\"Initial design has not been generated\")\n else:\n assert self.inputs.shape == (self.n_init, self.get_n_parameters()), \"inputs have not been initialized correctly\"\n\n targets = np.atleast_1d(np.squeeze(np.array(targets)))\n assert np.array(targets).shape == (self.n_init,), \"initial targets must have shape (n_init,)\"\n\n self.targets = np.array(targets)\n self.initialized = True\n\n def run_initial_design(self):\n \"\"\"\n Run initial design\n\n Method to run the initial design by generating the initial design, evaluating the function on\n all design points, and setting the target values. Note that this requires having a bound function\n to the class in order to evaluate the design points internally. It is a shortcut to running\n ``generate_initial_design``, evaluating the initial design points, and then using\n ``set_initial_targets`` to set the target values, with some additional checks along the way.\n\n If the initial design has already been fully run, this method will raise an error as the\n method to generate the initial design checks this prior to overwriting the initial targets.\n Note also that this method checks that the outputs of the bound function match up with\n the expected array sizes and that all outputs are finite before updating the initial targets.\n\n :returns: None\n :rtype: None\n \"\"\"\n\n\n assert self.has_function(), \"Design must have a bound function to use run_initial_design\"\n\n inputs = self.generate_initial_design()\n targets = np.full((self.n_init,), np.nan)\n\n for i in range(self.n_init):\n targets[i] = np.array(self.f(inputs[i,:]))\n\n assert np.all(np.isfinite(targets)), \"error in initializing sequential design, function outputs may not be the correct shape\"\n self.set_initial_targets(targets)\n\n def _generate_candidates(self):\n \"\"\"\n Generate candidates for next iteration\n\n Internal method for generating candidates for the next iteration of the sequential design.\n Draws the desired number of points from the base design and sets the internal ``candidates``\n attribute to the resuting candidate design points.\n\n :returns: None\n :rtype: None\n \"\"\"\n\n self.candidates = self.base_design.sample(self.n_cand)\n\n def _eval_metric(self):\n \"\"\"\n Evaluate metric for selecting next point\n\n Apply the metric used for sequential design to all candidate points and returns the index of the\n best candidate. This is not implemented for the base implementation, and is the only method\n that should need to be updated in order to create a new type of sequential design.\n\n :returns: Index of best candidate from the possible next design points. Must be an integer\n 0 <= index < n_cand\n :rtype: int\n \"\"\"\n raise NotImplementedError(\"Base class for Sequential Design does not implement an evaluation metric\")\n\n def _estimate_next_target(self, next_point):\n \"\"\"\n Estimate value of simulator for a point in a Sequential design\n\n This method is used for the batch version of a sequential design. Instead of updating\n the targets with the known solution, this method is used to estimate the function\n instead. This is method-specific, so this is not defined for the base class but instead\n should be defined in the subclass. Returns an array of length 1 holding the prediction.\n\n :param next_point: Input to be simulated. Must be an array of shape ``(n_parameters,)``\n :type next_point: ndarray\n :returns: Estimated simulation value for the given input as an array of length 1\n :rtype: ndarray\n \"\"\"\n raise NotImplementedError(\"_estimate_next_point not implemented for base SequentialDesign\")\n\n def get_batch_points(self, n_points):\n \"\"\"\n Batch version of get_next_point for a Sequential Design\n\n This method returns a batch of design points to run from a Sequential Design. This is\n useful if simulations can be run in parallel, which speeds up the ability to\n generate designs efficiently. The method simply calls ``get_next_point`` the\n required number of times, but rather than using the true value of the simulation\n it instead substitutes the predicted value that is method-specific. This can be\n implemented in a subclass by defining the method ``_estimate_next_target``.\n\n :param n_points: Size of batch to generate for the next set of simulation points.\n This parameter determines the shape of the output array. Must\n be a positive integer.\n :type n_points: int\n :returns: Set of batch points chosen using the batch version of the design\n as a numpy array with shape ``(n_points, n_parameters)``\n :rtype: ndarray\n \"\"\"\n\n assert n_points > 0, \"n_points must be positive\"\n\n batch_points = np.zeros((n_points, self.get_n_parameters()))\n\n for i in range(n_points):\n batch_points[i] = self.get_next_point()\n next_target = self._estimate_next_target(batch_points[i])\n self.set_next_target(next_target)\n\n self.current_iteration = self.current_iteration - n_points\n new_targets = np.array(self.targets[:self.current_iteration])\n self.targets = np.array(new_targets)\n\n return batch_points\n\n def get_next_point(self):\n \"\"\"\n Evaluate candidates to determine next point\n\n Public method for determining the next point in the design. Internally, it checks that the inputs\n and target arrays are as expected for correctly drawing a new point, generates prospective candidates,\n and then evaluates them using the desired metric in order to select the best one. It updates the\n ``inputs`` array and returns the next point to be evaluated as a 1D numpy array of length\n ``n_parameters``.\n\n :returns: Next design point, a 1D numpy array of length ``n_parameters``\n :rtype: ndarray\n \"\"\"\n\n if self.inputs is None:\n raise ValueError(\"Initial design has not been generated\")\n else:\n assert self.inputs.shape == (self.current_iteration, self.get_n_parameters()), \"inputs have not been correctly updated\"\n\n if self.targets is None:\n raise ValueError(\"Initial targets have not been generated\")\n else:\n assert self.targets.shape == (self.current_iteration,), \"targets have not been correctly updated\"\n\n self._generate_candidates()\n next_index = self._eval_metric()\n\n new_inputs = np.empty((self.current_iteration + 1, self.get_n_parameters()))\n new_inputs[:-1, :] = self.inputs\n\n next_point = self.candidates[next_index,:]\n new_inputs[-1,:] = next_point\n\n self.inputs = np.array(new_inputs)\n\n return next_point\n\n def set_batch_targets(self, new_targets):\n \"\"\"\n Batch version of set_next_target for a Sequential Design\n\n This method updates the targets array for a batch set of simulations. The input\n array must have shape ``(n_points,)``, where ``n_points`` is the number of points\n selected when calling ``get_batch_points``. Disagreement between these two values\n will result in an error.\n\n :param new_targets: Array holding results from the simulations. Must be an array\n of shape ``(n_points,)``, where ``n_points`` is set when\n calling ``get_batch_points``\n :type new_targets: ndarray\n :returns: None\n \"\"\"\n if self.inputs is None:\n raise ValueError(\"Initial design has not been generated\")\n else:\n n_points = self.inputs.shape[0] - self.current_iteration\n assert self.inputs.shape == (self.current_iteration + n_points, self.get_n_parameters()), \"inputs have not been correctly updated\"\n\n if self.targets is None:\n raise ValueError(\"Initial targets have not been generated\")\n else:\n assert self.targets.shape == (self.current_iteration,), \"targets have not been correctly updated\"\n\n new_targets = np.atleast_1d(np.array(new_targets))\n new_targets = np.reshape(new_targets, (len(new_targets),))\n assert new_targets.shape == (n_points,), \"new targets must have length n_points\"\n\n updated_targets = np.empty((self.current_iteration + n_points,))\n updated_targets[:-n_points] = self.targets\n updated_targets[-n_points:] = np.array(new_targets)\n\n self.targets = np.array(updated_targets)\n self.current_iteration = self.current_iteration + n_points\n\n def set_next_target(self, target):\n \"\"\"\n Set value of next target\n\n Updates the target array with the correct value (from running the actual simulation) of the\n latest design point determined using ``get_next_point``. The target input must be a float\n or an array of length 1. The code internally checks the inputs and targets for any problems\n that may have occurred in updating them correctly, and if all is well then updates the\n target array and increments the number of iterations. If the design has not been\n correctly initialized, or ``get_next_point`` has not been previously run, this method\n will raise an error.\n\n :param target: New target value found from evaluating the simulation on the latest design\n point found from the ``get_next_point`` method.\n :type target: float or length 1 array\n :returns: None\n :rtype: None\n \"\"\"\n\n if self.inputs is None:\n raise ValueError(\"Initial design has not been generated\")\n else:\n assert self.inputs.shape == (self.current_iteration + 1, self.get_n_parameters()), \"inputs have not been correctly updated\"\n\n if self.targets is None:\n raise ValueError(\"Initial targets have not been generated\")\n else:\n assert self.targets.shape == (self.current_iteration,), \"targets have not been correctly updated\"\n\n target = np.atleast_1d(np.array(target))\n target = np.reshape(target, (len(target),))\n assert target.shape == (1,), \"new target must have length 1\"\n\n new_targets = np.empty((self.current_iteration + 1,))\n new_targets[:-1] = self.targets\n new_targets[-1] = np.array(target)\n\n self.targets = np.array(new_targets)\n self.current_iteration = self.current_iteration + 1\n\n def run_next_point(self):\n \"\"\"\n Perform one iteration of the sequential design process\n\n Method for performing an iteration of the sequential design process. This is a shortcut for\n generating and evaluating the candidates to find the best next design point, evaluating\n the function on the next point, and then updating the targets array with the value.\n This requires a function be bound to the class instance to automatically run the\n simulation. This will also automatically update the ``current_iteration`` attribute,\n which can be used to determine the number of sequential design steps that have been run.\n\n :returns: None\n :rtype: None\n \"\"\"\n\n assert self.has_function(), \"Design must have a bound function to use run_next_point\"\n\n next_point = self.get_next_point()\n next_target = np.array(self.f(next_point))\n self.set_next_target(next_target)\n\n def run_sequential_design(self, n_samples = None):\n \"\"\"\n Run the entire sequential design\n\n Method to run all steps of the sequential design process. Note that the class instance must\n have a bound function for evaluating the design points to run all steps automatically. If\n such a method is not provided, the design steps must be run manually.\n\n The desired number of samples to be drawn can either be specified when initializing the\n class instance or when calling this method. If a number of samples is provided on\n both occasions, then the number provided when calling ``run_sequential_design`` is used.\n\n Internally, this method is a wrapper to ``run_initial_design`` and then calling\n ``run_next_point`` a total of ``n_samples`` times. Note that this means that the total\n number of design points is ``n_init + n_samples``.\n\n :param n_samples: Number of sequential design steps to be run. Optional if the number was\n specified upon initialization. Default is ``None`` (default to number\n set when initializing). If numbers are provided on both occasions, the\n number set here is used. If a number is provided, must be non-negative.\n :type n_samples: int or None\n :returns: None\n :rtype: None\n \"\"\"\n\n assert self.has_function(), \"Design must have a bound function to use run_sequential_design\"\n\n if n_samples is None and self.n_samples is None:\n raise ValueError(\"must specify n_samples either when initializing or calling run_sequential_design\")\n\n if n_samples is None:\n n_iter = self.n_samples\n else:\n n_iter = n_samples\n\n assert n_iter >= 0, \"number of samples must be non-negative\"\n\n self.run_initial_design()\n\n for i in range(n_iter):\n self.run_next_point()\n\n def __str__(self):\n \"\"\"\n Returns string representation of a sequential design\n\n Returns a string representation of the sequential design. Contains information on the base\n design, the number of points used in the different steps, and the input and target values.\n\n :returns: String representation of the sequential design\n :rtype: str\n \"\"\"\n\n output_string = \"\"\n output_string += type(self).__name__+\" with\\n\"\n output_string += self.get_base_design()+\" base design\\n\"\n if self.has_function():\n output_string += \"a bound simulator function\\n\"\n output_string += str(self.get_n_samples())+\" total samples\\n\"\n output_string += str(self.get_n_init())+\" initial points\\n\"\n output_string += str(self.get_n_cand())+\" candidate points\\n\"\n output_string += str(self.get_current_iteration())+\" current samples\\n\"\n output_string += \"current inputs: \"+str(self.get_inputs())+\"\\n\"\n output_string += \"current targets: \"+str(self.get_targets())\n\n return output_string\n\n\nclass MICEFastGP(GaussianProcess):\n \"\"\"\n Derived GaussianProcess class implementing the Woodbury matrix identity for fast predictions\n\n This class implements a Gaussian Process that is used in the MICE Sequential Design. The GP\n is fit using all candidate points from the sequential design, and the uses the Woodbury\n matrix identity to correct that fit to exclude the candidate point in question. This reduces\n the cost of fitting the GP from O(n^3) to O(n^2), which can dramatically speed up this\n process for large numbers of candidate points. This is mostly used for the particular\n application to the MICE sequential design, but could potentially have other applications\n where many candidate points are to be considered one at a time.\n \"\"\"\n def fast_predict(self, index):\n \"\"\"\n Make a fast prediction using one input point to a fit GP\n\n This method is used to correct a Gaussian Process fit to a set of candidate points to\n evaluate the uncertainty at the candidate point. It is used in the MICE sequential\n design procedure to examine the mutual information between candidate points by determining\n how well correlated the design point is in question to the remainder of the candidates.\n It uses the Woodbury matrix identity to correct the existing GP fit (which requires\n O(n^3) operations) using O(n^2) operations, speeding up the process significantly for\n large candidate design sizes.\n\n The method requires a fit GP, and the index of the input point that is to be excluded.\n The method then corrects the GP fit and computes the uncertainty of the prediction\n on the excluded point returning the uncertainty as a float.\n\n :param index: Index of input point to be excluded in the fit and to which the prediction\n will be applied. Must be an integer with 0 <= index < n (where n is the number\n of target points in the fit GP, or the number of candidate points when\n applied to the MICE procedure).\n :type index: int\n :returns: Uncertainty in the corrected fit applied to the given index point\n :rtype: float\n \"\"\"\n\n index = int(index)\n assert index >= 0 and index < self.n, \"index must be 0 <= index < n\"\n\n indices = (np.arange(self.n) != index)\n\n switch = self.theta.n_mean\n sigma_2 = self.theta.cov + self.theta.nugget\n\n Ktest = self.theta.cov*self.kernel.kernel_f(np.reshape(self.inputs[indices,:], (self.n - 1, self.D)),\n np.reshape(self.inputs[index, :], (1, self.D)),\n self.theta.corr_raw)\n\n invQ = np.linalg.solve(self.L.T, np.linalg.solve(self.L, np.eye(self.n)))\n invQ_mod = (invQ[indices][:, indices] -\n 1./invQ[index, index]*np.outer(invQ[indices, index], invQ[indices, index]))\n\n var = np.maximum(sigma_2 - np.sum(Ktest * np.dot(invQ_mod, Ktest), axis=0), 0.)\n\n return var\n\nclass MICEDesign(SequentialDesign):\n \"\"\"\n Class representing a Mutual Information for Computer Experiments (MICE) sequential\n experimental design\n\n This class provides an implementation of the MICE algorithm, which uses Mutual Information\n as the criterion for selecting new points in a sequential design. The idea in MICE is to\n select design points based on the point that provides the most information on the function\n values in the entire design space. This is a straightforward application of a sequential\n design procedure, though the class requires a few additional parameters in order to\n compute the MICE criteria.\n\n These additional parameters are nugget parameters provided to the Gaussian Process fit to\n smooth the predictions when evaluating the Mutual Information criteria. Essentially, since\n experimental design often requires sampling from a high dimensional space, this cannot be\n done in a way that guarantees that all candidate points are equally spaced. The Mutual\n Information criterion is sensitive to how these candidate points are distributed in space,\n so the nugget parameter provides some smoothing that makes the criterion less dependent on\n the distribution of the candidate points. Typical values of the smoothing nugget parameters\n (``nugget_s`` in this implementation) are 1, though this may depend on the application.\n\n Other than the smoothing parameters, the implementation follows the base procedure for a\n sequential design. The implementation adds methods for querying the nugget parameters\n and an additional helper function for computing the Mutual Information criterion, but\n other methods are identical.\n \"\"\"\n def __init__(self, base_design, f = None, n_samples = None, n_init = 10, n_cand = 50,\n nugget = \"adaptive\", nugget_s = 1.):\n \"\"\"\n Create new instance of a MICE sequential design\n\n Method to initialize a new MICE design. Parameters are largely the same as for the base\n ``SequentialDesign`` class, with a few additional nugget parameters for computing the\n Mutual Information criterion. A base design must be provided (must be a subclass of the\n ``ExperimentalDesign`` class), plus optionally a function to be evaluated in the design.\n Additional parameters include the number of samples, the number of initial design points,\n the number of candidate points, the nugget parameter for the base GP, and the smoothing\n nugget parameter for smoothing the uncertainty predictions on the candidate design points.\n Note that the total number of design points is ``n_init + n_samples``.\n\n :param base_design: Base one-shot experimental design (must be a subclass of\n ``ExperimentalDesign``). This contains the information on the\n parameter space to be sampled.\n :type base_design: ExperimentalDesign\n :param f: Function to be evaluated for the design. Must take all parameter values as a single\n input array and return a single float or an array of length 1\n :type f: function or other callable\n :param n_samples: Number of sequential design points to be drawn. If specified, this must be\n a positive integer. Note that this is in addition to the number of initial\n points, meaning that the total design size will be ``n_samples + n_init``.\n This can also be specified when running the full design. This parameter is\n optional, and defaults to ``None`` (meaning the number of samples is set when\n running the design, or that samples will be added manually).\n :type n_samples: int or None\n :param n_init: Number of points in the inital design before the sequential steps begin. Must\n be a positive integer. Optional, default value is 10.\n :type n_init: int\n :param n_cand: Number of candidates to consider at each sequential design step. Must be a positive\n integer. Optional, default value is 50.\n :param nugget: Nugget parameter for base GP predictions. Must be a non-negative float or ``None``,\n where ``None`` indicates that the nugget parameter is selected adaptively. Optional,\n default value is ``None``.\n :type nugget: float or None\n :param nugget_s: Smoothing nugget parameter for smoothing the predictions on the candidate space.\n Must be a non-negative float. Default value is 1.\n :type nugget_s: float\n \"\"\"\n\n if not isinstance(nugget, str):\n try:\n float(nugget)\n except TypeError:\n raise TypeError(\"nugget must be a string or convertible to a float\")\n if nugget < 0.:\n raise ValueError(\"nugget parameter cannot be negative\")\n\n if nugget_s < 0.:\n raise ValueError(\"nugget smoothing parameter cannot be negative\")\n\n if isinstance(nugget, str):\n self.nugget = nugget\n else:\n self.nugget = float(nugget)\n self.nugget_s = float(nugget_s)\n\n super().__init__(base_design, f, n_samples, n_init, n_cand)\n\n def get_nugget(self):\n \"\"\"\n Get value of nugget parameter for base GP\n\n Returns the nugget value for the base GP (used to actually fit the inputs to targets).\n Can be a float or None (meaning fitting will adaptively add noise to stabilize matrix\n inversion as needed).\n\n :returns: Nugget parameter, can be a float or None for adaptive noise addition.\n :rtype: float or None\n \"\"\"\n return self.nugget\n\n def get_nugget_s(self):\n \"\"\"\n Get value of smoothing nugget parameter\n\n Returns the value of the smoothing nugget parameter for the GP used to evaluate the mutual\n information criterion. This GP examines the correlation between a candidate design point and\n the other candidate points, which requires smoothing to ensure that the correlation measure is\n not biased by the distribution of the candidate points in space. This parameter must be a\n nonnegative float (typical values used are 1, though this may depend on the application).\n\n :returns: Nugget parameter for smoothing predictions from candidate points made on a candidate\n point. Typical values are 1.\n :rtype: float\n \"\"\"\n return self.nugget_s\n\n def _estimate_next_target(self, next_point):\n \"\"\"\n Estimate value of simulator for a point in a MICE design\n\n This method is used for the batch version of a sequential design. Instead of updating\n the targets with the known solution, this method is used to estimate the function\n instead. For the MICEDesign, this is just the prediction of the current design GP\n for the point. Returns an array of length 1 holding the prediction.\n\n :param next_point: Input to be simulated. Must be an array of shape ``(n_parameters,)``\n :type next_point: ndarray\n :returns: Estimated simulation value for the given input as an array of length 1\n :rtype: ndarray\n \"\"\"\n\n next_point = np.array(next_point)\n assert next_point.shape == (self.get_n_parameters(),), \"bad shape for next_point\"\n\n return self.gp.predict(next_point)[0]\n\n def _MICE_criterion(self, data_point):\n \"\"\"\n Compute the MICE criterion for a single candidate point\n\n This internal method computes the MICE criterion for a single candidate point. Requires\n input of the index of a candidate point to be considered (must be an integer satisfying\n ``0 <= index < n_cand``). It involves fitting a corrected GP to the candidate points other\n than the one under consideration (using the ``MICEFastGP`` class to correct the fit\n via the Woodbury matrix identity), and then computing the MICE criterion based on\n the predictions of the base GP on the point and the corrected GP fit. The MICE\n criterion is then the variance of the base GP divided by the variance of the corrected\n candidate GP. Value returned is the MICE criterion for the point in question.\n\n :param data_point: Index of the candidate point under consideration. Must be an integer\n with ``0 <= index < n_cand``.\n :type data_point: int\n :returns: MICE criterion for the data point in question\n :rtype: float\n \"\"\"\n\n data_point = int(data_point)\n\n assert data_point >= 0 and data_point < self.n_cand, \"test point index is out of range\"\n\n _, unc1, _ = self.gp.predict(self.candidates[data_point], unc=True, deriv=False)\n unc2 = self.gp_fast.fast_predict(data_point)\n\n mice_criter = unc1/unc2\n\n assert np.isfinite(mice_criter), \"error in computing MICE critera\"\n\n return float(mice_criter)\n\n def _eval_metric(self):\n \"\"\"\n Evaluate MICE criterion on all candidate points and select new design point\n\n This internal method computes the MICE criterion on all candidate points and returns\n the index of the point with the maximum value. It does so by first fitting a base GP\n to all points in the current design, and then fitting a dummy GP to all candidate\n design points using the parameter values determined from the base GP fit. The MICE\n criterion does not depend on the target values, since the parameters are determined\n via the base GP and the MICE criterion only depends on the uncertainty of the\n candidate GP (which is independent of the target values). These fit GPs are then used\n to compute the MICE criterion for each candidate point, and the method returns the\n index of the point that had the maximum value of the MICE criterion.\n\n :returns: Index of the candidate with the maximum MICE score (integer with\n ``0 <= index < n_cand``)\n :rtype: int\n \"\"\"\n\n numtries = 10\n\n for i in range(numtries):\n try:\n self.gp = GaussianProcess(self.inputs, self.targets, nugget=self.nugget)\n self.gp = fit_GP_MAP(self.gp)\n\n self.gp_fast = MICEFastGP(self.candidates, np.ones(self.n_cand), nugget=self.gp.theta.nugget*self.nugget_s)\n self.gp_fast.theta = self.gp.theta\n break\n except FloatingPointError:\n if i < numtries - 1:\n continue\n else:\n raise FloatingPointError(\"Unable to find parameters suitable for both GPs\")\n except LinAlgError:\n if i < numtries - 1:\n continue\n else:\n raise LinAlgError(\"Unable to find parameters suitable for both GPs\")\n\n results = []\n\n for point in range(self.n_cand):\n results.append(self._MICE_criterion(point))\n\n return np.argmax(results)\n" ]
[ [ "numpy.allclose", "numpy.reshape", "numpy.copy", "numpy.shape", "numpy.array", "numpy.zeros" ], [ "numpy.dot", "numpy.savez", "numpy.isfinite", "numpy.reshape", "numpy.arange", "numpy.eye", "numpy.full", "numpy.all", "numpy.ones", "numpy.argmax", "numpy.linalg.LinAlgError", "numpy.load", "numpy.array", "numpy.empty", "numpy.outer" ] ]
ag-ds-bubble/swtloc
[ "af11cfa4369116ed708b9db930b294ed9a430f59" ]
[ "swtloc/bubble_bbox.py" ]
[ "# Author : Achintya Gupta\n\nimport numpy as np\nfrom cv2 import cv2\n\nclass BubbleBBOX:\n\n def __init__(self, labelmask, comp_props, lookup_radii_multiplier=0.8,\n sw_ratio = 2, cl_deviat = [13,13,13], ht_ratio = 2, ar_ratio = 3, ang_deviat = 30,\n bubble_width = 1):\n \n self.labelmask = labelmask.copy()\n self.lookup_radii_multiplier = lookup_radii_multiplier\n self.h, self.w = self.labelmask.shape[:2]\n self.maskviz = np.zeros(self.labelmask.shape)\n self.maskcomb = np.zeros(self.labelmask.shape)\n self.comp_props = comp_props.copy()\n self.comp_dstack = []\n\n self.sw_ratio = sw_ratio\n self.cl_deviat = np.linalg.norm(cl_deviat)\n self.ht_ratio = ht_ratio\n self.ar_ratio = ar_ratio\n self.ang_deviat = ang_deviat\n self.grouped_labels = []\n self.ungrouped_labels = set(list(self.comp_props.keys()))\n\n # Asthetics\n self.bubble_width = bubble_width\n\n self.sanity_checks()\n\n def sanity_checks(self):\n # Check for the Bubble Widths\n if not isinstance(self.bubble_width, int):\n raise ValueError(\"'bubble_width' parameter should be of type in 'int'\")\n \n\n def create_circular_mask(self, center, radius):\n\n if center is None: # use the middle of the image\n center = (int(self.w/2), int(self.h/2))\n if radius is None: # use the smallest distance between the center and image walls\n radius = min(center[0], center[1], self.w-center[0], self.h-center[1])\n Y, X = np.ogrid[:self.h, :self.w]\n dist_from_center = np.sqrt((X - center[0])**2 + (Y-center[1])**2)\n mask = dist_from_center <= radius\n return mask\n\n def generate_comp_bubble(self):\n\n for label,props in self.comp_props.items():\n\n label_ct = np.array([props['bbm_cy'], props['bbm_cx']]).astype(np.uint16)\n label_bx = props['bbm_bbox']\n label_an = props['bbm_anchor']\n radii = max([np.linalg.norm(epnt[::-1]-label_ct) for epnt in label_bx])*self.lookup_radii_multiplier\n \n cv2.putText(self.maskviz, str(label), tuple(label_an),\n cv2.FONT_HERSHEY_PLAIN, 5,\n 2, 1, cv2.LINE_AA)\n cv2.polylines(self.maskviz, np.int32([label_bx]), True, 1, 1)\n mask = self.create_circular_mask(label_ct[::-1], radii)\n self.comp_dstack.append(mask*label)\n self.maskcomb += mask\n\n self.comp_dstack = np.dstack(tuple(self.comp_dstack))\n\n\n def get_attr(self, label, mode='component'):\n \n props = self.comp_props[label]\n \n if mode == 'component':\n sw = props['sw_median']\n ar = props['bbm_ar']\n co = np.array(eval(props['img_color_median']))\n ht = props['bbm_h']\n wt = props['bbm_w']\n ang = props['bbm_ang']\n return sw, ar, co, ht, wt, ang\n \n elif mode == 'proximity':\n ct = np.array([props['bbm_cy'], props['bbm_cx']]).astype(np.uint16)\n bx = props['bbm_bbox']\n return ct, bx\n \n def get_proximity_labels(self, label):\n \n _properties = self.get_attr(label, mode = 'proximity')\n label_ct, label_bx = _properties\n radii = max([np.linalg.norm(epnt[::-1]-label_ct) for epnt in label_bx])*self.lookup_radii_multiplier\n mask = self.create_circular_mask(label_ct[::-1], radii)\n maxk_y, mask_x = mask.nonzero()\n proximty_labels = np.setdiff1d(np.unique(self.comp_dstack[maxk_y,mask_x,:]),[0, label])\n return proximty_labels\n\n def grouping_check(self, label1, label2):\n \n label1_props = self.get_attr(label1, mode='component')\n label_sw, label_ar, label_co, label_ht, label_wt, label_ang = label1_props\n\n label2_props = self.get_attr(label2, mode='component')\n comp_sw, comp_ar, comp_co, comp_ht, comp_wt, comp_ang = label2_props\n \n # Check for the Stroke Width Ratio\n check1 = (1/self.sw_ratio) <= (comp_sw/label_sw) <= self.sw_ratio\n # Check for the Color Deviation\n check2 = np.linalg.norm(label_co-comp_co, axis=0) <= self.cl_deviat\n # Check for the Height Ratio\n check3 = (1/self.ht_ratio) <= (comp_ht/label_ht) <= self.ht_ratio\n # Check for the Angle Deviation\n diff1 = np.abs(comp_ang - label_ang)\n diff2 = np.abs(90-comp_ang - label_ang)\n check4 = any(k <= self.ang_deviat for k in [diff1, diff2])\n # Check for the Aspect Ratio\n check5 = (1/self.ar_ratio) <= (comp_ar/label_ar) <= self.ar_ratio\n return check1 and check2 and check3 and check4 and check5\n \n def grouplabel(self, label, bucket):\n proxim_labels = self.get_proximity_labels(label=label)\n\n proxim_labels = [k for k in proxim_labels if k not in bucket]\n if proxim_labels == []:\n return bucket\n\n for each_pl in proxim_labels:\n if self.grouping_check(label1=label, label2=each_pl):\n bucket.append(each_pl)\n bucket = self.grouplabel(label=each_pl, bucket=bucket)\n return bucket\n\n\n def run_grouping(self):\n self.generate_comp_bubble()\n while len(self.ungrouped_labels) > 0:\n curr_label = list(self.ungrouped_labels)[0]\n curr_bucket = self.grouplabel(label=curr_label, bucket=[curr_label])\n self.grouped_labels.append(curr_bucket)\n self.ungrouped_labels = self.ungrouped_labels.difference(set(curr_bucket))\n\n \n self.grouped_bubblebbox = [] \n \n self.grouped_annot_bubble = np.zeros(self.labelmask.shape, dtype=np.uint8)\n self.grouped_annot_bubble = cv2.cvtColor(self.grouped_annot_bubble, cv2.COLOR_GRAY2BGR)\n\n self.grouped_annot = np.zeros(self.labelmask.shape, dtype=np.uint8)\n self.grouped_annot = cv2.cvtColor(self.grouped_annot, cv2.COLOR_GRAY2BGR)\n\n for each_group in self.grouped_labels:\n mask = np.zeros(self.labelmask.shape, dtype=np.uint8)\n for each_label in each_group:\n label_ct, label_bx = self.get_attr(each_label, mode = 'proximity')\n radii = max([np.linalg.norm(epnt[::-1]-label_ct) for epnt in label_bx])\n mask += self.create_circular_mask(label_ct[::-1], radii)\n \n contours, hierarchy = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n self.grouped_bubblebbox.append(contours)\n\n mask = np.zeros(self.labelmask.shape, dtype=np.uint8)\n for each_label in each_group:\n mask += self.labelmask==each_label\n mask*=255\n\n self.grouped_annot_bubble += cv2.cvtColor(mask.copy(), cv2.COLOR_GRAY2BGR)\n self.grouped_annot += cv2.cvtColor(mask.copy(), cv2.COLOR_GRAY2BGR)\n cv2.drawContours(self.grouped_annot_bubble, contours, -1, (0,0,255), self.bubble_width)\n \n rotrect = cv2.minAreaRect(contours[0])\n combbbox = cv2.boxPoints(rotrect)\n self.grouped_annot += cv2.polylines(self.grouped_annot, np.int32([combbbox]), True, (0,0,255), 2)\n \n return self.grouped_labels, self.grouped_bubblebbox, self.grouped_annot_bubble, self.grouped_annot, self.maskviz, self.maskcomb\n\n\n\n" ]
[ [ "numpy.abs", "numpy.sqrt", "numpy.unique", "numpy.int32", "numpy.linalg.norm", "numpy.array", "numpy.zeros" ] ]
FRCTeam2984/infiniterecharge2020
[ "8f3e8515c69cd9c378f19a0d0e24a5de2b5011e2" ]
[ "src/utils/lazytalonsrx.py" ]
[ "import logging\n\nimport ctre\nimport numpy as np\n\n\nclass EncoderType:\n Quad = ctre.FeedbackDevice.QuadEncoder\n Integrated = ctre.FeedbackDevice.IntegratedSensor\n CTREMag = ctre.FeedbackDevice.CTRE_MagEncoder_Relative\n\n\nclass EncoderConfig:\n def __init__(self, _type: EncoderType, cpr: int):\n self.type = _type\n self.cpr = cpr\n\n @property\n def radians_per_count(self):\n return (2 * np.pi) / self.cpr\n\n @property\n def counts_per_radian(self):\n return self.cpr / (2 * np.pi)\n\n\nCTREMag = EncoderConfig(EncoderType.CTREMag, 4096)\nFalconEncoder = EncoderConfig(EncoderType.Integrated, 2048)\n\n\nclass LazyTalonSRX(ctre.WPI_TalonSRX):\n \"\"\"A wraper for the ctre.WPI_TalonSRX to simplfy configuration and getting/setting values.\"\"\"\n\n TIMEOUT = 10\n\n ControlMode = ctre.ControlMode\n DemandType = ctre.DemandType\n StatusFrame = ctre.StatusFrameEnhanced\n NeutralMode = ctre.NeutralMode\n\n def __init__(self, id: int):\n super().__init__(id)\n self.no_encoder_warning = f\"No encoder connected to Talon {id}\"\n self.no_closed_loop_warning = f\"Talon {id} not in closed loop mode\"\n\n def initialize(self, name: str = None) -> None:\n \"\"\"Initialize the motors (enable the encoder, set invert status, set voltage limits).\"\"\"\n self.encoder = False\n if name != None:\n self.setName(name)\n\n def setEncoderConfig(self, config: EncoderConfig, phase: bool) -> None:\n self.encoder = True\n self.encoder_config = config\n self.configSelectedFeedbackSensor(config.type, 0, self.TIMEOUT)\n self.setSensorPhase(phase)\n\n def setPIDF(self, slot: int, kp: float, ki: float, kd: float, kf: float) -> None:\n \"\"\"Initialize the PIDF controller.\"\"\"\n self.selectProfileSlot(slot, self.TIMEOUT)\n self.config_kP(slot, kp, self.TIMEOUT)\n self.config_kI(slot, ki, self.TIMEOUT)\n self.config_kD(slot, kd, self.TIMEOUT)\n self.config_kF(slot, kf, self.TIMEOUT)\n\n def setIZone(self, slot: int, izone: float) -> None:\n \"\"\"Set the izone of the PIDF controller.\"\"\"\n self.config_IntegralZone(\n slot, int(izone * self.encoder_config.counts_per_radian), self.TIMEOUT\n )\n\n def setBrakeMode(self):\n self.setNeutralMode(self.NeutralMode.Brake)\n\n def setCoastMode(self):\n self.setNeutralMode(self.NeutralMode.Coast)\n\n def setSoftMax(self, limit: float):\n self.configForwardSoftLimitThreshold(\n int(limit * self.encoder_config.counts_per_radian)\n )\n self.configForwardSoftLimitEnable(True)\n\n def setSoftMin(self, limit: float):\n self.configReverseSoftLimitThreshold(\n int(limit * self.encoder_config.counts_per_radian)\n )\n self.configReverseSoftLimitEnable(True)\n\n def setMotionMagicConfig(self, vel: float, accel: float) -> None:\n self.configMotionCruiseVelocity(\n int(vel * self.encoder_config.counts_per_radian / 10), self.TIMEOUT\n )\n self.configMotionAcceleration(\n int(accel * self.encoder_config.counts_per_radian / 10), self.TIMEOUT\n )\n\n def setOutput(self, signal: float, max_signal: float = 1) -> None:\n \"\"\"Set the percent output of the motor.\"\"\"\n signal = np.clip(signal, -max_signal, max_signal)\n self.set(self.ControlMode.PercentOutput, signal)\n\n def setPosition(self, pos: float) -> None:\n \"\"\"Set the position of the motor.\"\"\"\n self.set(self.ControlMode.Position, pos * self.encoder_config.counts_per_radian)\n\n def setVelocity(self, vel: float, ff: float = 0) -> None:\n \"\"\"Set the velocity of the motor.\"\"\"\n self.set(\n self.ControlMode.Velocity,\n vel * self.encoder_config.counts_per_radian / 10,\n self.DemandType.ArbitraryFeedForward,\n ff,\n )\n\n def setMotionMagicPosition(self, pos: float) -> None:\n \"\"\"Set the position of the motor using motion magic.\"\"\"\n self.set(\n self.ControlMode.MotionMagic, pos * self.encoder_config.counts_per_radian\n )\n\n def zero(self, pos: float = 0) -> None:\n \"\"\"Zero the encoder if it exists.\"\"\"\n if self.encoder:\n self.setSelectedSensorPosition(\n int(pos * self.encoder_config.counts_per_radian), 0, self.TIMEOUT\n )\n else:\n logging.warning(self.no_encoder_warning)\n\n def getPosition(self) -> int:\n \"\"\"Get the encoder position if it exists.\"\"\"\n if self.encoder:\n return (\n self.getSelectedSensorPosition(0)\n * self.encoder_config.radians_per_count\n )\n else:\n logging.warning(self.no_encoder_warning)\n return 0\n\n def getVelocity(self) -> int:\n \"\"\"Get the encoder velocity if it exists.\"\"\"\n if self.encoder:\n return (\n self.getSelectedSensorVelocity(0)\n * self.encoder_config.radians_per_count\n * 10\n )\n else:\n logging.warning(self.no_encoder_warning)\n return 0\n\n def getError(self) -> int:\n \"\"\"Get the closed loop error if in closed loop mode.\"\"\"\n if self._isClosedLoop():\n return self.getClosedLoopError(0)\n else:\n logging.warning(self.no_closed_loop_warning)\n return 0\n\n def getTarget(self) -> int:\n \"\"\"Get the closed loop target if in closed loop mode.\"\"\"\n if self._isClosedLoop():\n return self.getClosedLoopTarget(0)\n else:\n logging.warning(self.no_closed_loop_warning)\n return 0\n\n def _isClosedLoop(self) -> bool:\n return self.getControlMode() in (\n self.ControlMode.Velocity,\n self.ControlMode.Position,\n self.ControlMode.MotionMagic,\n )\n" ]
[ [ "numpy.clip" ] ]
OlegJakushkin/KaggleGPT2-ArxivTitleGeneration
[ "270a492b426cc3f0969b6b2af677ab1a86c4439a" ]
[ "generate-gpt2.py" ]
[ "import os\nimport pandas as pd\nimport requests\n\nfrom tqdm import tqdm\n\ntqdm.pandas()\n\nfrom pandarallel import pandarallel\n\nfrom gensim.summarization import keywords\nfrom gpt2_client import *\nimport gpt_2_simple as gpt2\nimport tensorflow as tf\nfrom itertools import groupby\n\nfrom termcolor import colored, cprint\nimport sys\nfrom tqdm import tqdm\nimport json\nimport regex as re\n\nfrom tensorflow.contrib.training import HParams\nimport numpy as np\nfrom utils import *\nimport multiprocessing\nfrom math import *\nfrom functools import partial\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=str(\"\")\ndef chunks(l, n):\n return [l[i:i + n] for i in range(0, len(l), n)]\n\npandarallel.initialize(progress_bar=True)\n\n\ndef parse_csv():\n df = pd.read_csv(\"./test.csv\")\n print(df.head(5))\n\n def kwp(text):\n line = ', '.join(keywords(text, words=13).split('\\n'))\n return line\n\n df['keywords'] = df.abstract.parallel_apply(kwp)\n df.to_csv(\"parsed-test.csv\")\n\n\ncounter = multiprocessing.Value('i', 0)\n\n\nclass GPT2EC():\n def __init__(self, model_name='117M', save_dir='models'):\n assert save_dir != '', 'Please provide a save directory for the model weights and checkpoints. This cannot be empty.'\n\n self.model_name = model_name\n self.save_dir = save_dir\n self.enc = get_encoder(self.model_name, self.save_dir)\n\n\ndef encode(ctx, in_str):\n context_tokens = ctx.enc.encode(in_str)\n return context_tokens\n\ndef process_mt(ctx, encoded_batch, gpus=2):\n p = multiprocessing.Pool(gpus)\n total = len(encoded_batch)\n chunk_size = ceil(total / gpus)\n slice = chunks(encoded_batch, chunk_size)\n func = partial(processBatch, ctx)\n r = p.map(func, slice)\n out = []\n for i in r:\n out += i\n return out\n\n\ndef processBatch(ctx, encoded_batch):\n global counter\n cvalue = 0\n with counter.get_lock():\n cvalue = counter.value\n counter.value += 1\n\n models_dir = models_dir = os.path.expanduser(os.path.expandvars(ctx.save_dir))\n hparams = default_hparams()\n\n with open(os.path.join(ctx.save_dir, ctx.model_name, 'hparams.json')) as f:\n data = json.load(f)\n hparams.override_from_dict(data)\n\n length = hparams.n_ctx\n clen = 0\n for context_tokens in encoded_batch:\n csize = len(context_tokens)\n if clen < csize:\n clen = csize\n\n if csize > 900:\n csize = 900\n print(cvalue)\n os.environ[\"CUDA_VISIBLE_DEVICES\"]=str(cvalue)\n print(\"Num GPUs Available: \", len(tf.config.experimental.list_physical_devices('GPU')))\n tf.debugging.set_log_device_placement(True)\n #gpu_options = tf.GPUOptions(visible_device_list=str(cvalue))\n results = []\n with tf.Session(graph=tf.Graph()) as sess:\n batch_size = 1\n temperature = 1\n top_k = 2\n\n context = tf.placeholder(tf.int32, [batch_size, None])\n np.random.seed(None)\n tf.set_random_seed(None)\n initialized = False\n\n output = sample_sequence(\n context=context,\n hparams=hparams,\n length=350, # min(length, 1023 - csize),\n start_token=None,\n batch_size=batch_size,\n temperature=temperature,\n top_k=top_k\n )\n saver = tf.train.Saver()\n ckpt = tf.train.latest_checkpoint(os.path.join(ctx.save_dir, ctx.model_name))\n saver.restore(sess, ckpt)\n i = 0\n \n clen = len(encoded_batch)\n for context_tokens in encoded_batch:\n i = i + 1\n out = sess.run(output, feed_dict={\n context: [context_tokens]\n })[:, len(context_tokens):]\n results.append(out[0])\n if i % 2 == 0:\n print(str(cvalue) + \" at \" + str(i) + \" from \" + str(clen))\n \n final_generated_text = []\n for r in results:\n decoded = ctx.enc.decode(r)\n final_generated_text.append(decoded)\n\n return final_generated_text\n\n\ndef generate_titles():\n gpt2c = GPT2EC('run1', save_dir='checkpoint') # This could also be `345M`, `774M`, or `1558M`\n df = pd.read_csv(\"./parsed-test.csv\")#, nrows=10)\n print(df.head(5))\n\n def encodex(row):\n line = \"<END>\\n<START> <TEXT:> \" + str(row.abstract) + \" <KEYS:> \" + str(row.keywords) + \"; <TITLE:>\"\n code = gpt2c.enc.encode(line)\n return code\n\n codes = df.parallel_apply(encodex, axis=1)\n titles_array = process_mt(gpt2c, codes, 2)\n df['title'] = pd.Series(titles_array, index=df.index)\n\n def filter(generated):\n garr = generated.split()\n res = [i[0] for i in groupby(garr)]\n s = ' '\n result = s.join(res)\n result = result.split('<END>', 1)[0]\n\n try:\n result = result.split('.', 1)[0]\n # result = result.split('<TITLE:>',1)[1]\n except:\n pass\n result = re.sub(' +', ' ', result)\n result = re.sub(r\"^\\s+|\\s+$\", \"\", result)\n return result\n\n df['title'] = df.title.parallel_apply(filter)\n df.to_csv(\"titled.csv\")\n\n\nparse_csv()\nprint(\"\\n---\\nparsed!\\n---\\n\")\ngenerate_titles()\nprint(\"\\n---\\ngenerated corpys!\\n---\\n\")\n" ]
[ [ "tensorflow.Graph", "pandas.read_csv", "pandas.Series", "numpy.random.seed", "tensorflow.config.experimental.list_physical_devices", "tensorflow.debugging.set_log_device_placement", "tensorflow.placeholder", "tensorflow.set_random_seed", "tensorflow.train.Saver" ] ]
carlosal1015/sfepy
[ "f02f88c5df9814ad710c658429e23c90744b0d9d" ]
[ "sfepy/solvers/semismooth_newton.py" ]
[ "from __future__ import absolute_import\n\nimport numpy as nm\nimport numpy.linalg as nla\nimport scipy.sparse as sp\n\nfrom sfepy.base.base import output, get_default, debug\nfrom sfepy.base.timing import Timer\nfrom sfepy.solvers.nls import Newton, conv_test\nfrom sfepy.linalg import compose_sparse\nimport six\nfrom six.moves import range\n\nclass SemismoothNewton(Newton):\n r\"\"\"\n The semi-smooth Newton method.\n\n This method is suitable for solving problems of the following structure:\n\n .. math::\n \\begin{split}\n & F(y) = 0 \\\\\n & A(y) \\ge 0 \\;,\\ B(y) \\ge 0 \\;,\\ \\langle A(y), B(y) \\rangle = 0\n \\end{split}\n\n The function :math:`F(y)` represents the smooth part of the problem.\n\n Regular step: :math:`y \\leftarrow y - J(y)^{-1} \\Phi(y)`\n\n Steepest descent step: :math:`y \\leftarrow y - \\beta J(y) \\Phi(y)`\n\n Although ``fun_smooth_grad()`` computes the gradient of the smooth part\n only, it should return the global matrix, where the non-smooth part is\n uninitialized, but pre-allocated.\n \"\"\"\n name = 'nls.semismooth_newton'\n\n _parameters = [\n ('semismooth', 'bool', True, False,\n \"\"\"If True, use the semi-smooth algorithm. Otherwise a non-smooth\n equation is assumed (use a brute force).\"\"\"),\n ('i_max', 'int', 1, False,\n 'The maximum number of iterations.'),\n ('eps_a', 'float', 1e-10, False,\n 'The absolute tolerance for the residual, i.e. :math:`||f(x^i)||`.'),\n ('eps_r', 'float', 1.0, False,\n \"\"\"The relative tolerance for the residual, i.e. :math:`||f(x^i)|| /\n ||f(x^0)||`.\"\"\"),\n ('macheps', 'float', nm.finfo(nm.float64).eps, False,\n 'The float considered to be machine \"zero\".'),\n ('lin_red', 'float', 1.0, False,\n \"\"\"The linear system solution error should be smaller than (`eps_a` *\n `lin_red`), otherwise a warning is printed.\"\"\"),\n ('ls_on', 'float', 0.99999, False,\n \"\"\"Start the backtracking line-search by reducing the step, if\n :math:`||f(x^i)|| / ||f(x^{i-1})||` is larger than `ls_on`.\"\"\"),\n ('ls_red', '0.0 < float < 1.0', 0.1, False,\n 'The step reduction factor in case of correct residual assembling.'),\n ('ls_red_warp', '0.0 < float < 1.0', 0.001, False,\n \"\"\"The step reduction factor in case of failed residual assembling\n (e.g. the \"warp violation\" error caused by a negative volume\n element resulting from too large deformations).\"\"\"),\n ('ls_min', '0.0 < float < 1.0', 1e-5, False,\n 'The minimum step reduction factor.'),\n ]\n\n _colors = {'regular' : 'g', 'steepest_descent' : 'k'}\n\n def __call__(self, vec_x0, conf=None, fun_smooth=None, fun_smooth_grad=None,\n fun_a=None, fun_a_grad=None, fun_b=None, fun_b_grad=None,\n lin_solver=None, status=None):\n\n conf = get_default(conf, self.conf)\n\n fun_smooth = get_default(fun_smooth, self.fun_smooth)\n fun_smooth_grad = get_default(fun_smooth_grad, self.fun_smooth_grad)\n fun_a = get_default(fun_a, self.fun_a)\n fun_a_grad = get_default(fun_a_grad, self.fun_a_grad)\n fun_b = get_default(fun_b, self.fun_b)\n fun_b_grad = get_default(fun_b_grad, self.fun_b_grad)\n\n lin_solver = get_default(lin_solver, self.lin_solver)\n status = get_default(status, self.status)\n\n timer = Timer()\n time_stats = {}\n\n vec_x = vec_x0.copy()\n vec_x_last = vec_x0.copy()\n vec_dx = None\n\n if self.log is not None:\n self.log.plot_vlines(color='r', linewidth=1.0)\n\n err0 = -1.0\n err_last = -1.0\n it = 0\n step_mode = 'regular'\n r_last = None\n reuse_matrix = False\n while 1:\n\n ls = 1.0\n vec_dx0 = vec_dx;\n i_ls = 0\n while 1:\n timer.start()\n\n try:\n vec_smooth_r = fun_smooth(vec_x)\n vec_a_r = fun_a(vec_x)\n vec_b_r = fun_b(vec_x)\n\n except ValueError:\n vec_smooth_r = vec_semismooth_r = None\n if (it == 0) or (ls < conf.ls_min):\n output('giving up!')\n raise\n\n else:\n ok = False\n\n else:\n if conf.semismooth:\n # Semi-smooth equation.\n vec_semismooth_r = (nm.sqrt(vec_a_r**2.0 + vec_b_r**2.0)\n - (vec_a_r + vec_b_r))\n\n else:\n # Non-smooth equation (brute force).\n vec_semismooth_r = nm.where(vec_a_r < vec_b_r,\n vec_a_r, vec_b_r)\n\n r_last = (vec_smooth_r, vec_a_r, vec_b_r, vec_semismooth_r)\n\n ok = True\n\n time_stats['residual'] = timer.stop()\n\n if ok:\n vec_r = nm.r_[vec_smooth_r, vec_semismooth_r]\n\n try:\n err = nla.norm(vec_r)\n except:\n output('infs or nans in the residual:',\n vec_semismooth_r)\n output(nm.isfinite(vec_semismooth_r).all())\n debug()\n\n if self.log is not None:\n self.log(err, it)\n\n if it == 0:\n err0 = err;\n break\n\n if err < (err_last * conf.ls_on):\n step_mode = 'regular'\n break\n\n else:\n output('%s step line search' % step_mode)\n\n red = conf.ls_red[step_mode];\n output('iter %d, (%.5e < %.5e) (new ls: %e)'\\\n % (it, err, err_last * conf.ls_on, red * ls))\n\n else: # Failed to compute residual.\n red = conf.ls_red_warp;\n output('residual computation failed for iter %d'\n ' (new ls: %e)!' % (it, red * ls))\n\n if ls < conf.ls_min:\n if step_mode == 'regular':\n output('restore previous state')\n vec_x = vec_x_last.copy()\n (vec_smooth_r, vec_a_r, vec_b_r,\n vec_semismooth_r) = r_last\n err = err_last\n reuse_matrix = True\n\n step_mode = 'steepest_descent'\n\n else:\n output('linesearch failed, continuing anyway')\n\n break\n\n ls *= red;\n\n vec_dx = ls * vec_dx0;\n vec_x = vec_x_last.copy() - vec_dx\n\n i_ls += 1\n\n # End residual loop.\n\n output('%s step' % step_mode)\n\n if self.log is not None:\n self.log.plot_vlines([1],\n color=self._colors[step_mode],\n linewidth=0.5)\n\n err_last = err;\n vec_x_last = vec_x.copy()\n\n condition = conv_test(conf, it, err, err0)\n if condition >= 0:\n break\n\n timer.start()\n\n if not reuse_matrix:\n mtx_jac = self.compute_jacobian(vec_x, fun_smooth_grad,\n fun_a_grad, fun_b_grad,\n vec_smooth_r,\n vec_a_r, vec_b_r)\n\n else:\n reuse_matrix = False\n\n time_stats['matrix'] = timer.stop()\n\n timer.start()\n\n if step_mode == 'regular':\n vec_dx = lin_solver(vec_r, mtx=mtx_jac)\n\n vec_e = mtx_jac * vec_dx - vec_r\n lerr = nla.norm(vec_e)\n if lerr > (conf.eps_a * conf.lin_red):\n output('linear system not solved! (err = %e)' % lerr)\n\n output('switching to steepest descent step')\n step_mode = 'steepest_descent'\n vec_dx = mtx_jac.T * vec_r\n\n else:\n vec_dx = mtx_jac.T * vec_r\n\n time_stats['solve'] = timer.stop()\n\n for kv in six.iteritems(time_stats):\n output('%10s: %7.2f [s]' % kv)\n\n vec_x -= vec_dx\n it += 1\n\n if status is not None:\n status['time_stats'] = time_stats\n status['err0'] = err0\n status['err'] = err\n status['condition'] = condition\n\n if conf.log.plot is not None:\n if self.log is not None:\n self.log(save_figure=conf.log.plot)\n\n return vec_x\n\n def compute_jacobian(self, vec_x, fun_smooth_grad, fun_a_grad, fun_b_grad,\n vec_smooth_r, vec_a_r, vec_b_r):\n conf = self.conf\n\n mtx_s = fun_smooth_grad(vec_x)\n mtx_a = fun_a_grad(vec_x)\n mtx_b = fun_b_grad(vec_x)\n\n n_s = vec_smooth_r.shape[0]\n n_ns = vec_a_r.shape[0]\n\n if conf.semismooth:\n aa = nm.abs(vec_a_r)\n ab = nm.abs(vec_b_r)\n iz = nm.where((aa < (conf.macheps * max(aa.max(), 1.0)))\n & (ab < (conf.macheps * max(ab.max(), 1.0))))[0]\n inz = nm.setdiff1d(nm.arange(n_ns), iz)\n\n output('non_active/active: %d/%d' % (len(inz), len(iz)))\n\n mul_a = nm.empty_like(vec_a_r)\n mul_b = nm.empty_like(mul_a)\n\n # Non-active part of the jacobian.\n if len(inz) > 0:\n a_r_nz = vec_a_r[inz]\n b_r_nz = vec_b_r[inz]\n\n sqrt_ab = nm.sqrt(a_r_nz**2.0 + b_r_nz**2.0)\n mul_a[inz] = (a_r_nz / sqrt_ab) - 1.0\n mul_b[inz] = (b_r_nz / sqrt_ab) - 1.0\n\n # Active part of the jacobian.\n if len(iz) > 0:\n vec_z = nm.zeros_like(vec_x)\n vec_z[n_s+iz] = 1.0\n\n mtx_a_z = mtx_a[iz]\n mtx_b_z = mtx_b[iz]\n\n sqrt_ab = nm.empty((iz.shape[0],), dtype=vec_a_r.dtype)\n for ir in range(len(iz)):\n row_a_z = mtx_a_z[ir]\n row_b_z = mtx_b_z[ir]\n sqrt_ab[ir] = nm.sqrt((row_a_z * row_a_z.T).todense()\n + (row_b_z * row_b_z.T).todense())\n mul_a[iz] = ((mtx_a_z * vec_z) / sqrt_ab) - 1.0\n mul_b[iz] = ((mtx_b_z * vec_z) / sqrt_ab) - 1.0\n\n else:\n iz = nm.where(vec_a_r > vec_b_r)[0]\n mul_a = nm.zeros_like(vec_a_r)\n mul_b = nm.ones_like(mul_a)\n\n mul_a[iz] = 1.0\n mul_b[iz] = 0.0\n\n mtx_ns = sp.spdiags(mul_a, 0, n_ns, n_ns) * mtx_a \\\n + sp.spdiags(mul_b, 0, n_ns, n_ns) * mtx_b\n\n mtx_jac = compose_sparse([[mtx_s], [mtx_ns]]).tocsr()\n mtx_jac.sort_indices()\n\n return mtx_jac\n" ]
[ [ "numpy.ones_like", "numpy.abs", "numpy.sqrt", "numpy.isfinite", "numpy.empty_like", "numpy.arange", "numpy.linalg.norm", "numpy.finfo", "scipy.sparse.spdiags", "numpy.zeros_like", "numpy.where", "numpy.empty" ] ]
zommiommy/ray
[ "4fb195a22e972a0b54359ffa58afedb35e827540" ]
[ "rllib/agents/es/es.py" ]
[ "# Code in this file is copied and adapted from\n# https://github.com/openai/evolution-strategies-starter.\n\nfrom collections import namedtuple\nimport logging\nimport numpy as np\nimport time\n\nimport ray\nfrom ray.rllib.agents import Trainer, with_common_config\nfrom ray.rllib.agents.es import optimizers, utils\nfrom ray.rllib.agents.es.es_tf_policy import ESTFPolicy, rollout\nfrom ray.rllib.env.env_context import EnvContext\nfrom ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID\nfrom ray.rllib.utils import FilterManager\nfrom ray.rllib.utils.annotations import override\n\nlogger = logging.getLogger(__name__)\n\nResult = namedtuple(\"Result\", [\n \"noise_indices\", \"noisy_returns\", \"sign_noisy_returns\", \"noisy_lengths\",\n \"eval_returns\", \"eval_lengths\"\n])\n\n# yapf: disable\n# __sphinx_doc_begin__\nDEFAULT_CONFIG = with_common_config({\n \"action_noise_std\": 0.01,\n \"l2_coeff\": 0.005,\n \"noise_stdev\": 0.02,\n \"episodes_per_batch\": 1000,\n \"train_batch_size\": 10000,\n \"eval_prob\": 0.003,\n \"return_proc_mode\": \"centered_rank\",\n \"num_workers\": 10,\n \"stepsize\": 0.01,\n \"observation_filter\": \"MeanStdFilter\",\n \"noise_size\": 250000000,\n \"report_length\": 10,\n})\n# __sphinx_doc_end__\n# yapf: enable\n\n\[email protected]\ndef create_shared_noise(count):\n \"\"\"Create a large array of noise to be shared by all workers.\"\"\"\n seed = 123\n noise = np.random.RandomState(seed).randn(count).astype(np.float32)\n return noise\n\n\nclass SharedNoiseTable:\n def __init__(self, noise):\n self.noise = noise\n assert self.noise.dtype == np.float32\n\n def get(self, i, dim):\n return self.noise[i:i + dim]\n\n def sample_index(self, dim):\n return np.random.randint(0, len(self.noise) - dim + 1)\n\n\[email protected]\nclass Worker:\n def __init__(self,\n config,\n policy_params,\n env_creator,\n noise,\n worker_index,\n min_task_runtime=0.2):\n self.min_task_runtime = min_task_runtime\n self.config = config\n self.config.update(policy_params)\n self.config[\"single_threaded\"] = True\n self.noise = SharedNoiseTable(noise)\n\n env_context = EnvContext(config[\"env_config\"] or {}, worker_index)\n self.env = env_creator(env_context)\n from ray.rllib import models\n self.preprocessor = models.ModelCatalog.get_preprocessor(\n self.env, config[\"model\"])\n\n policy_cls = get_policy_class(config)\n self.policy = policy_cls(self.env.observation_space,\n self.env.action_space, config)\n\n @property\n def filters(self):\n return {DEFAULT_POLICY_ID: self.policy.observation_filter}\n\n def sync_filters(self, new_filters):\n for k in self.filters:\n self.filters[k].sync(new_filters[k])\n\n def get_filters(self, flush_after=False):\n return_filters = {}\n for k, f in self.filters.items():\n return_filters[k] = f.as_serializable()\n if flush_after:\n f.clear_buffer()\n return return_filters\n\n def rollout(self, timestep_limit, add_noise=True):\n rollout_rewards, rollout_fragment_length = rollout(\n self.policy,\n self.env,\n timestep_limit=timestep_limit,\n add_noise=add_noise)\n return rollout_rewards, rollout_fragment_length\n\n def do_rollouts(self, params, timestep_limit=None):\n # Set the network weights.\n self.policy.set_flat_weights(params)\n\n noise_indices, returns, sign_returns, lengths = [], [], [], []\n eval_returns, eval_lengths = [], []\n\n # Perform some rollouts with noise.\n task_tstart = time.time()\n while (len(noise_indices) == 0\n or time.time() - task_tstart < self.min_task_runtime):\n\n if np.random.uniform() < self.config[\"eval_prob\"]:\n # Do an evaluation run with no perturbation.\n self.policy.set_flat_weights(params)\n rewards, length = self.rollout(timestep_limit, add_noise=False)\n eval_returns.append(rewards.sum())\n eval_lengths.append(length)\n else:\n # Do a regular run with parameter perturbations.\n noise_index = self.noise.sample_index(self.policy.num_params)\n\n perturbation = self.config[\"noise_stdev\"] * self.noise.get(\n noise_index, self.policy.num_params)\n\n # These two sampling steps could be done in parallel on\n # different actors letting us update twice as frequently.\n self.policy.set_flat_weights(params + perturbation)\n rewards_pos, lengths_pos = self.rollout(timestep_limit)\n\n self.policy.set_flat_weights(params - perturbation)\n rewards_neg, lengths_neg = self.rollout(timestep_limit)\n\n noise_indices.append(noise_index)\n returns.append([rewards_pos.sum(), rewards_neg.sum()])\n sign_returns.append(\n [np.sign(rewards_pos).sum(),\n np.sign(rewards_neg).sum()])\n lengths.append([lengths_pos, lengths_neg])\n\n return Result(\n noise_indices=noise_indices,\n noisy_returns=returns,\n sign_noisy_returns=sign_returns,\n noisy_lengths=lengths,\n eval_returns=eval_returns,\n eval_lengths=eval_lengths)\n\n\ndef get_policy_class(config):\n if config[\"use_pytorch\"]:\n from ray.rllib.agents.es.es_torch_policy import ESTorchPolicy\n policy_cls = ESTorchPolicy\n else:\n policy_cls = ESTFPolicy\n return policy_cls\n\n\nclass ESTrainer(Trainer):\n \"\"\"Large-scale implementation of Evolution Strategies in Ray.\"\"\"\n\n _name = \"ES\"\n _default_config = DEFAULT_CONFIG\n\n @override(Trainer)\n def _init(self, config, env_creator):\n env_context = EnvContext(config[\"env_config\"] or {}, worker_index=0)\n env = env_creator(env_context)\n policy_cls = get_policy_class(config)\n self.policy = policy_cls(\n obs_space=env.observation_space,\n action_space=env.action_space,\n config=config)\n self.optimizer = optimizers.Adam(self.policy, config[\"stepsize\"])\n self.report_length = config[\"report_length\"]\n\n # Create the shared noise table.\n logger.info(\"Creating shared noise table.\")\n noise_id = create_shared_noise.remote(config[\"noise_size\"])\n self.noise = SharedNoiseTable(ray.get(noise_id))\n\n # Create the actors.\n logger.info(\"Creating actors.\")\n self._workers = [\n Worker.remote(config, {}, env_creator, noise_id, idx + 1)\n for idx in range(config[\"num_workers\"])\n ]\n\n self.episodes_so_far = 0\n self.reward_list = []\n self.tstart = time.time()\n\n @override(Trainer)\n def _train(self):\n config = self.config\n\n theta = self.policy.get_flat_weights()\n assert theta.dtype == np.float32\n assert len(theta.shape) == 1\n\n # Put the current policy weights in the object store.\n theta_id = ray.put(theta)\n # Use the actors to do rollouts, note that we pass in the ID of the\n # policy weights.\n results, num_episodes, num_timesteps = self._collect_results(\n theta_id, config[\"episodes_per_batch\"], config[\"train_batch_size\"])\n\n all_noise_indices = []\n all_training_returns = []\n all_training_lengths = []\n all_eval_returns = []\n all_eval_lengths = []\n\n # Loop over the results.\n for result in results:\n all_eval_returns += result.eval_returns\n all_eval_lengths += result.eval_lengths\n\n all_noise_indices += result.noise_indices\n all_training_returns += result.noisy_returns\n all_training_lengths += result.noisy_lengths\n\n assert len(all_eval_returns) == len(all_eval_lengths)\n assert (len(all_noise_indices) == len(all_training_returns) ==\n len(all_training_lengths))\n\n self.episodes_so_far += num_episodes\n\n # Assemble the results.\n eval_returns = np.array(all_eval_returns)\n eval_lengths = np.array(all_eval_lengths)\n noise_indices = np.array(all_noise_indices)\n noisy_returns = np.array(all_training_returns)\n noisy_lengths = np.array(all_training_lengths)\n\n # Process the returns.\n if config[\"return_proc_mode\"] == \"centered_rank\":\n proc_noisy_returns = utils.compute_centered_ranks(noisy_returns)\n else:\n raise NotImplementedError(config[\"return_proc_mode\"])\n\n # Compute and take a step.\n g, count = utils.batched_weighted_sum(\n proc_noisy_returns[:, 0] - proc_noisy_returns[:, 1],\n (self.noise.get(index, self.policy.num_params)\n for index in noise_indices),\n batch_size=500)\n g /= noisy_returns.size\n assert (g.shape == (self.policy.num_params, ) and g.dtype == np.float32\n and count == len(noise_indices))\n # Compute the new weights theta.\n theta, update_ratio = self.optimizer.update(-g +\n config[\"l2_coeff\"] * theta)\n # Set the new weights in the local copy of the policy.\n self.policy.set_flat_weights(theta)\n # Store the rewards\n if len(all_eval_returns) > 0:\n self.reward_list.append(np.mean(eval_returns))\n\n # Now sync the filters\n FilterManager.synchronize({\n DEFAULT_POLICY_ID: self.policy.observation_filter\n }, self._workers)\n\n info = {\n \"weights_norm\": np.square(theta).sum(),\n \"grad_norm\": np.square(g).sum(),\n \"update_ratio\": update_ratio,\n \"episodes_this_iter\": noisy_lengths.size,\n \"episodes_so_far\": self.episodes_so_far,\n }\n\n reward_mean = np.mean(self.reward_list[-self.report_length:])\n result = dict(\n episode_reward_mean=reward_mean,\n episode_len_mean=eval_lengths.mean(),\n timesteps_this_iter=noisy_lengths.sum(),\n info=info)\n\n return result\n\n @override(Trainer)\n def compute_action(self, observation, *args, **kwargs):\n action = self.policy.compute_actions(observation, update=False)[0]\n if kwargs.get(\"full_fetch\"):\n return action, [], {}\n return action\n\n @override(Trainer)\n def _stop(self):\n # workaround for https://github.com/ray-project/ray/issues/1516\n for w in self._workers:\n w.__ray_terminate__.remote()\n\n def _collect_results(self, theta_id, min_episodes, min_timesteps):\n num_episodes, num_timesteps = 0, 0\n results = []\n while num_episodes < min_episodes or num_timesteps < min_timesteps:\n logger.info(\n \"Collected {} episodes {} timesteps so far this iter\".format(\n num_episodes, num_timesteps))\n rollout_ids = [\n worker.do_rollouts.remote(theta_id) for worker in self._workers\n ]\n # Get the results of the rollouts.\n for result in ray.get(rollout_ids):\n results.append(result)\n # Update the number of episodes and the number of timesteps\n # keeping in mind that result.noisy_lengths is a list of lists,\n # where the inner lists have length 2.\n num_episodes += sum(len(pair) for pair in result.noisy_lengths)\n num_timesteps += sum(\n sum(pair) for pair in result.noisy_lengths)\n\n return results, num_episodes, num_timesteps\n\n def __getstate__(self):\n return {\n \"weights\": self.policy.get_flat_weights(),\n \"filter\": self.policy.observation_filter,\n \"episodes_so_far\": self.episodes_so_far,\n }\n\n def __setstate__(self, state):\n self.episodes_so_far = state[\"episodes_so_far\"]\n self.policy.set_flat_weights(state[\"weights\"])\n self.policy.observation_filter = state[\"filter\"]\n FilterManager.synchronize({\n DEFAULT_POLICY_ID: self.policy.observation_filter\n }, self._workers)\n" ]
[ [ "numpy.square", "numpy.sign", "numpy.mean", "numpy.random.uniform", "numpy.array", "numpy.random.RandomState" ] ]
awslabs/sagemaker-privacy-for-nlp
[ "899f178748401eaf2713cec83d37306f8a1327a8" ]
[ "source/sagemaker/src/package/data_privatization/container/train.py" ]
[ "# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0\n# Licensed under the Amazon Software License http://aws.amazon.com/asl/\n\nimport argparse\nimport os\nfrom os.path import join\nimport json\nimport random\nimport time\nimport logging\nimport subprocess\nfrom pathlib import Path\nfrom glob import glob\n\nimport torch\nfrom torch import nn\nfrom torch import optim\n\nfrom torchtext import data\nfrom torchtext.data import Field, TabularDataset\n\nfrom torchtext.vocab import Vectors\n\nLOG = logging.getLogger()\nLOG.setLevel(logging.INFO)\n\n\nclass FastText(nn.Module):\n # The model is taken from the excellent Torchtext tutorial at\n # https://github.com/bentrevett/pytorch-sentiment-analysis/\n def __init__(self, vocab_size, embedding_dim, output_dim, pad_idx):\n super().__init__()\n\n self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=pad_idx)\n\n self.fc = nn.Linear(embedding_dim, output_dim)\n\n def forward(self, text):\n # text = [sent len, batch size]\n\n embedded = self.embedding(text)\n\n # embedded = [sent len, batch size, emb dim]\n\n embedded = embedded.permute(1, 0, 2)\n\n # embedded = [batch size, sent len, emb dim]\n\n pooled = nn.functional.avg_pool2d(embedded, (embedded.shape[1], 1)).squeeze(1)\n\n # pooled = [batch size, embedding_dim]\n\n return self.fc(pooled)\n\n\ndef binary_accuracy(preds, y):\n \"\"\"\n Returns accuracy per batch, i.e. if you get 8/10 right, this returns 0.8, NOT 8\n \"\"\"\n\n # round predictions to the closest integer\n rounded_preds = torch.round(torch.sigmoid(preds))\n correct = (rounded_preds == y).float() # convert into float for division\n acc = correct.sum() / len(correct)\n return acc\n\n\ndef evaluate(model, iterator, criterion):\n epoch_loss = 0\n epoch_acc = 0\n\n model.eval()\n\n for batch in iterator:\n predictions = model(batch.review).squeeze(1)\n\n loss = criterion(predictions, batch.sentiment)\n\n acc = binary_accuracy(predictions, batch.sentiment)\n\n epoch_loss += loss.item()\n epoch_acc += acc.item()\n\n return epoch_loss / iterator.iterations, epoch_acc / iterator.iterations\n\n\ndef epoch_time(start_time, end_time):\n elapsed_time = end_time - start_time\n elapsed_mins = int(elapsed_time / 60)\n elapsed_secs = int(elapsed_time - (elapsed_mins * 60))\n return elapsed_mins, elapsed_secs\n\n\ndef train_one_epoch(model, iterator, optimizer, criterion):\n epoch_loss = 0\n epoch_acc = 0\n\n model.train()\n\n for batch in iterator:\n optimizer.zero_grad()\n\n predictions = model(batch.review).squeeze(1)\n\n loss = criterion(predictions, batch.sentiment)\n\n acc = binary_accuracy(predictions, batch.sentiment)\n\n loss.backward()\n\n optimizer.step()\n\n epoch_loss += loss.item()\n epoch_acc += acc.item()\n\n return epoch_loss / len(iterator), epoch_acc / len(iterator)\n\n\ndef train(model, train_iterator, valid_iterator, n_epochs, model_dir):\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n optimizer = optim.Adam(model.parameters())\n\n criterion = nn.BCEWithLogitsLoss()\n\n model = model.to(device)\n criterion = criterion.to(device)\n\n best_valid_loss = float('inf')\n\n model_path = join(model_dir, \"model.pt\")\n\n for epoch in range(n_epochs):\n\n print(f'Epoch: {epoch + 1:02} started...')\n\n start_time = time.time()\n\n train_loss, train_acc = train_one_epoch(model, train_iterator, optimizer, criterion)\n valid_loss, valid_acc = evaluate(model, valid_iterator, criterion)\n\n end_time = time.time()\n\n epoch_mins, epoch_secs = epoch_time(start_time, end_time)\n\n if valid_loss < best_valid_loss:\n best_valid_loss = valid_loss\n torch.save(model.state_dict(), model_path)\n\n print(f'Epoch: {epoch + 1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s')\n print(f'\\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc * 100:.2f}%')\n print(f'\\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc * 100:.2f}%')\n\n\ndef create_fields():\n TEXT = Field(sequential=True, tokenize=\"basic_english\")\n\n LABEL = data.LabelField(dtype=torch.float)\n\n return TEXT, LABEL\n\n\ndef create_iterators(train_data, valid_data):\n # Create iterators\n BATCH_SIZE = 64\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n train_iterator, valid_iterator = data.BucketIterator.splits(\n (train_data, valid_data),\n batch_size=BATCH_SIZE,\n sort=False,\n device=device)\n\n return train_iterator, valid_iterator\n\n\ndef create_model(input_dimensions, embedding_size, pad_idx, unk_idx, pretrained_embeddings):\n model = FastText(input_dimensions, embedding_size, output_dim=1, pad_idx=pad_idx)\n\n model.embedding.weight.data.copy_(pretrained_embeddings)\n\n # Set <unk> and <pad> token vectors to all zero\n model.embedding.weight.data[unk_idx] = torch.zeros(embedding_size)\n model.embedding.weight.data[pad_idx] = torch.zeros(embedding_size)\n\n return model\n\n\nif __name__ == '__main__':\n SEED = 42\n\n torch.manual_seed(SEED)\n torch.backends.cudnn.deterministic = True\n parser = argparse.ArgumentParser()\n\n # hyperparameters sent by the client are passed as command-line arguments to the script.\n parser.add_argument('--epochs', type=int, default=5)\n parser.add_argument('--batch-size', type=int, default=64)\n parser.add_argument('--vocab-size', type=int, default=25_000)\n parser.add_argument('--embedding-size', type=int, default=300)\n\n # Data, model, and output directories\n parser.add_argument('--output-data-dir', type=str, default=os.environ['SM_OUTPUT_DATA_DIR'])\n parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])\n parser.add_argument('--train-dir', type=str, default=os.environ['SM_CHANNEL_TRAIN'])\n parser.add_argument('--vectors-dir', type=str, default=os.environ['SM_CHANNEL_VECTORS'])\n parser.add_argument('--vectors-filename', type=str, default='glove.6B.300d.txt.gz')\n parser.add_argument('--train-filename', type=str, default='train_examples.csv')\n\n args, _ = parser.parse_known_args()\n\n LOG.info(\"Loading data...\")\n\n TEXT, LABEL = create_fields()\n\n fields = [('review', TEXT),\n ('sentiment', LABEL)]\n\n # Torchtext expects a single file, so we concatenate the partial output files\n train_file = Path(\"{}/{}\".format(os.environ['SM_CHANNEL_TRAIN'], args.train_filename))\n if not train_file.exists():\n part_files = glob(\"{0}/part-*\".format(os.environ['SM_CHANNEL_TRAIN']))\n subprocess.check_call([\"cat\"] + part_files, stdout=train_file.open(mode='w'))\n\n assert train_file.exists()\n reviews = TabularDataset(\n path=str(train_file), format='csv',\n fields=fields,\n skip_header=True)\n\n train_data, valid_data = reviews.split(\n split_ratio=[.9, .1], random_state=random.seed(SEED))\n\n # Create vocabs\n MAX_VOCAB_SIZE = args.vocab_size\n vectors = Vectors(args.vectors_dir + \"/\" + args.vectors_filename)\n\n TEXT.build_vocab(train_data,\n max_size=MAX_VOCAB_SIZE,\n vectors=vectors,\n unk_init=torch.Tensor.normal_)\n\n LABEL.build_vocab(train_data)\n\n train_iterator, valid_iterator = create_iterators(train_data, valid_data)\n\n LOG.info(\"Instantiating model...\")\n model = create_model(\n len(TEXT.vocab),\n args.embedding_size,\n TEXT.vocab.stoi[TEXT.pad_token],\n TEXT.vocab.stoi[TEXT.unk_token],\n TEXT.vocab.vectors)\n\n LOG.info(\"Starting training...\")\n train(model, train_iterator, valid_iterator, args.epochs, args.model_dir)\n\n # Save vocab, we'll need them for testing later\n vocab_path = join(args.model_dir, \"vocab.pt\")\n torch.save(TEXT.vocab, vocab_path)\n\n # Keep track of experiment settings\n json_file = join(args.model_dir, \"training-settings.json\")\n with open(json_file, 'w') as f:\n f.write(json.dumps(vars(args)))\n\n\n" ]
[ [ "torch.sigmoid", "torch.zeros", "torch.manual_seed", "torch.nn.functional.avg_pool2d", "torch.nn.Embedding", "torch.nn.Linear", "torch.nn.BCEWithLogitsLoss", "torch.cuda.is_available", "torch.save" ] ]
DragonDriver/milvus
[ "b72e4c63726e28cd54f90c642faf5394d4a24c12" ]
[ "tests20/python_client/testcases/test_collection.py" ]
[ "import numpy\nimport pandas as pd\nimport pytest\nfrom pymilvus import DataType\n\nfrom base.client_base import TestcaseBase\nfrom utils.util_log import test_log as log\nfrom common import common_func as cf\nfrom common import common_type as ct\nfrom common.common_type import CaseLabel, CheckTasks\n\nprefix = \"collection\"\nexp_name = \"name\"\nexp_schema = \"schema\"\nexp_num = \"num_entities\"\nexp_primary = \"primary\"\ndefault_schema = cf.gen_default_collection_schema()\ndefault_binary_schema = cf.gen_default_binary_collection_schema()\n\n\nclass TestCollectionParams(TestcaseBase):\n \"\"\" Test case of collection interface \"\"\"\n\n @pytest.fixture(scope=\"function\", params=ct.get_invalid_strs)\n def get_none_removed_invalid_strings(self, request):\n if request.param is None:\n pytest.skip(\"None schema is valid\")\n yield request.param\n\n @pytest.fixture(scope=\"function\", params=ct.get_invalid_strs)\n def get_invalid_type_fields(self, request):\n if isinstance(request.param, list):\n pytest.skip(\"list is valid fields\")\n yield request.param\n\n @pytest.fixture(scope=\"function\", params=cf.gen_all_type_fields())\n def get_unsupported_primary_field(self, request):\n if request.param.dtype == DataType.INT64:\n pytest.skip(\"int64 type is valid primary key\")\n yield request.param\n\n @pytest.fixture(scope=\"function\", params=ct.get_invalid_strs)\n def get_invalid_dim(self, request):\n if request.param == 1:\n request.param = 0\n yield request.param\n\n @pytest.mark.tags(CaseLabel.L0)\n def test_collection(self):\n \"\"\"\n target: test collection with default schema\n method: create collection with default schema\n expected: assert collection property\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n self.collection_wrap.init_collection(c_name, schema=default_schema,\n check_task=CheckTasks.check_collection_property,\n check_items={exp_name: c_name, exp_schema: default_schema, exp_num: 0,\n exp_primary: ct.default_int64_field_name})\n assert c_name, _ in self.utility_wrap.list_collections()\n\n @pytest.mark.tags(CaseLabel.L0)\n def test_collection_empty_name(self):\n \"\"\"\n target: test collection with empty name\n method: create collection with a empty name\n expected: raise exception\n \"\"\"\n self._connect()\n c_name = \"\"\n error = {ct.err_code: 1, ct.err_msg: \"value is illegal\"}\n self.collection_wrap.init_collection(c_name, schema=default_schema, check_task=CheckTasks.err_res,\n check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n @pytest.mark.parametrize(\"name\", [[], 1, [1, \"2\", 3], (1,), {1: 1}, None])\n def test_collection_illegal_name(self, name):\n \"\"\"\n target: test collection with illegal name\n method: create collection with illegal name\n expected: raise exception\n \"\"\"\n self._connect()\n error = {ct.err_code: 1, ct.err_msg: \"`collection_name` value {} is illegal\".format(name)}\n self.collection_wrap.init_collection(name, schema=default_schema, check_task=CheckTasks.err_res,\n check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n @pytest.mark.parametrize(\"name\", [\"12-s\", \"12 s\", \"(mn)\", \"中文\", \"%$#\", \"a\".join(\"a\" for i in range(256))])\n def test_collection_invalid_name(self, name):\n \"\"\"\n target: test collection with invalid name\n method: create collection with invalid name\n expected: raise exception\n \"\"\"\n self._connect()\n error = {ct.err_code: 1, ct.err_msg: \"Invalid collection name: {}\".format(name)}\n self.collection_wrap.init_collection(name, schema=default_schema, check_task=CheckTasks.err_res,\n check_items=error)\n\n @pytest.mark.tags(CaseLabel.L0)\n @pytest.mark.xfail(reason=\"issue #5947\")\n def test_collection_dup_name(self):\n \"\"\"\n target: test collection with dup name\n method: create collection with dup name and none schema and data\n expected: collection properties consistent\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,\n check_items={exp_name: c_name, exp_schema: default_schema})\n self.collection_wrap.init_collection(collection_w.name)\n assert collection_w.name == self.collection_wrap.name\n assert collection_w.schema == self.collection_wrap.schema\n assert collection_w.num_entities == self.collection_wrap.num_entities\n assert collection_w.name, _ in self.utility_wrap.list_collections()[0]\n\n @pytest.mark.tags(CaseLabel.L1)\n @pytest.mark.xfail(reason=\"issue #5947\")\n def test_collection_dup_name_with_desc(self):\n \"\"\"\n target: test collection with dup name\n method: 1. default schema with desc 2. dup name collection\n expected: desc consistent\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n schema = cf.gen_default_collection_schema(description=ct.collection_desc)\n collection_w = self.init_collection_wrap(name=c_name, schema=schema,\n check_task=CheckTasks.check_collection_property,\n check_items={exp_name: c_name, exp_schema: schema})\n self.collection_wrap.init_collection(c_name,\n check_task=CheckTasks.check_collection_property,\n check_items={exp_name: c_name, exp_schema: schema})\n assert collection_w.description == self.collection_wrap.description\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_collection_dup_name_new_schema(self):\n \"\"\"\n target: test collection with dup name and new schema\n method: 1.create collection with default schema\n 2. collection with dup name and new schema\n expected: raise exception\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,\n check_items={exp_name: c_name, exp_schema: default_schema})\n fields = [cf.gen_int64_field(is_primary=True)]\n schema = cf.gen_collection_schema(fields=fields)\n error = {ct.err_code: 0, ct.err_msg: \"The collection already exist, but the schema isnot the same as the \"\n \"schema passed in\"}\n self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_collection_dup_name_new_primary(self):\n \"\"\"\n target: test collection with dup name and new primary_field schema\n method: 1.collection with default schema\n 2. collection with same fields and new primary_field schema\n expected: raise exception\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n int_field_one = cf.gen_int64_field()\n int_field_two = cf.gen_int64_field(name=\"int2\")\n fields = [int_field_one, int_field_two, cf.gen_float_vec_field()]\n schema = cf.gen_collection_schema(fields, primary_field=int_field_one.name)\n collection_w = self.init_collection_wrap(name=c_name, schema=schema,\n check_task=CheckTasks.check_collection_property,\n check_items={exp_name: c_name, exp_schema: schema,\n exp_primary: int_field_one.name})\n new_schema = cf.gen_collection_schema(fields, primary_field=int_field_two.name)\n error = {ct.err_code: 0, ct.err_msg: \"The collection already exist, but the schema isnot the same as the \"\n \"schema passed in\"}\n self.collection_wrap.init_collection(c_name, schema=new_schema, check_task=CheckTasks.err_res,\n check_items=error)\n assert collection_w.primary_field.name == int_field_one.name\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_collection_dup_name_new_dim(self):\n \"\"\"\n target: test collection with dup name and new dim schema\n method: 1. default schema 2. schema with new dim\n expected: raise exception\n \"\"\"\n self._connect()\n new_dim = 120\n c_name = cf.gen_unique_str(prefix)\n collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,\n check_items={exp_name: c_name, exp_schema: default_schema})\n schema = cf.gen_default_collection_schema()\n new_fields = cf.gen_float_vec_field(dim=new_dim)\n schema.fields[-1] = new_fields\n error = {ct.err_code: 0, ct.err_msg: \"The collection already exist, but the schema isnot the same as the \"\n \"schema passed in.\"}\n self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)\n dim = collection_w.schema.fields[-1].params['dim']\n assert dim == ct.default_dim\n\n @pytest.mark.tags(CaseLabel.L2)\n def test_collection_dup_name_invalid_schema_type(self, get_none_removed_invalid_strings):\n \"\"\"\n target: test collection with dup name and invalid schema\n method: 1. default schema 2. invalid schema\n expected: raise exception and\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,\n check_items={exp_name: c_name, exp_schema: default_schema})\n error = {ct.err_code: 0, ct.err_msg: \"Schema type must be schema.CollectionSchema\"}\n schema = get_none_removed_invalid_strings\n self.collection_wrap.init_collection(collection_w.name, schema=schema,\n check_task=CheckTasks.err_res, check_items=error)\n assert collection_w.name == c_name\n\n @pytest.mark.tags(CaseLabel.L1)\n @pytest.mark.xfail(reason=\"#5947\")\n def test_collection_dup_name_same_schema(self):\n \"\"\"\n target: test collection with dup name and same schema\n method: dup name and same schema\n expected: two collection object is available\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n collection_w = self.init_collection_wrap(name=c_name, schema=default_schema,\n check_task=CheckTasks.check_collection_property,\n check_items={exp_name: c_name, exp_schema: default_schema})\n self.collection_wrap.init_collection(name=c_name, schema=default_schema,\n check_task=CheckTasks.check_collection_property,\n check_items={exp_name: c_name, exp_schema: default_schema})\n assert collection_w.name == self.collection_wrap.name\n\n @pytest.mark.tags(CaseLabel.L0)\n def test_collection_none_schema(self):\n \"\"\"\n target: test collection with none schema\n method: create collection with none schema\n expected: raise exception\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n error = {ct.err_code: 0, ct.err_msg: \"Should be passed into the schema\"}\n self.collection_wrap.init_collection(c_name, schema=None, check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L0)\n def test_collection_invalid_type_schema(self, get_none_removed_invalid_strings):\n \"\"\"\n target: test collection with invalid schema\n method: create collection with non-CollectionSchema type schema\n expected: raise exception\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n error = {ct.err_code: 1, ct.err_msg: \"schema type must be schema.CollectionSchema\"}\n self.collection_wrap.init_collection(c_name, schema=get_none_removed_invalid_strings,\n check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_collection_invalid_type_fields(self, get_invalid_type_fields):\n \"\"\"\n target: test collection with invalid fields type, non-list\n method: create collection schema with non-list invalid fields\n expected: exception\n \"\"\"\n self._connect()\n fields = get_invalid_type_fields\n error = {ct.err_code: 1, ct.err_msg: \"The fields of schema must be type list\"}\n self.collection_schema_wrap.init_collection_schema(fields=fields,\n check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_collection_with_unknown_type(self):\n \"\"\"\n target: test collection with unknown type\n method: create with DataType.UNKNOWN\n expected: raise exception\n \"\"\"\n self._connect()\n error = {ct.err_code: 0, ct.err_msg: \"Field type not support <DataType.UNKNOWN: 999\"}\n self.field_schema_wrap.init_field_schema(name=\"unknown\", dtype=DataType.UNKNOWN,\n check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n @pytest.mark.parametrize(\"name\", [[], 1, (1,), {1: 1}, \"12-s\"])\n def test_collection_invalid_type_field(self, name):\n \"\"\"\n target: test collection with invalid field name\n method: invalid string name\n expected: raise exception\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n field, _ = self.field_schema_wrap.init_field_schema(name=name, dtype=5, is_primary=True)\n vec_field = cf.gen_float_vec_field()\n schema = cf.gen_collection_schema(fields=[field, vec_field])\n error = {ct.err_code: 1, ct.err_msg: \"expected one of: bytes, unicode\"}\n self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n @pytest.mark.parametrize(\"name\", [\"12-s\", \"12 s\", \"(mn)\", \"中文\", \"%$#\", \"a\".join(\"a\" for i in range(256))])\n def test_collection_invalid_field_name(self, name):\n \"\"\"\n target: test collection with invalid field name\n method: invalid string name\n expected: raise exception\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n field, _ = self.field_schema_wrap.init_field_schema(name=name, dtype=DataType.INT64, is_primary=True)\n vec_field = cf.gen_float_vec_field()\n schema = cf.gen_collection_schema(fields=[field, vec_field])\n error = {ct.err_code: 1, ct.err_msg: \"Invalid field name\"}\n self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_collection_none_field_name(self):\n \"\"\"\n target: test field schema with None name\n method: None field name\n expected: raise exception\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n field, _ = self.field_schema_wrap.init_field_schema(name=None, dtype=DataType.INT64, is_primary=True)\n schema = cf.gen_collection_schema(fields=[field, cf.gen_float_vec_field()])\n error = {ct.err_code: 1, ct.err_msg: \"You should specify the name of field\"}\n self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n @pytest.mark.parametrize(\"dtype\", [6, [[]], {}, (), \"\", \"a\"])\n def test_collection_invalid_field_type(self, dtype):\n \"\"\"\n target: test collection with invalid field type\n method: invalid DataType\n expected: raise exception\n \"\"\"\n self._connect()\n error = {ct.err_code: 0, ct.err_msg: \"Field type must be of DataType\"}\n self.field_schema_wrap.init_field_schema(name=\"test\", dtype=dtype, is_primary=True,\n check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L2)\n def test_collection_field_dtype_float_value(self):\n \"\"\"\n target: test collection with float type\n method: create field with float type\n expected:\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n field, _ = self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=5.0,\n is_primary=True)\n schema = cf.gen_collection_schema(fields=[field, cf.gen_float_vec_field()])\n error = {ct.err_code: 0, ct.err_msg: \"Field type must be of DataType\"}\n self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L0)\n @pytest.mark.xfail(reason=\"#5918\")\n def test_collection_empty_fields(self):\n \"\"\"\n target: test collection with empty fields\n method: create collection with fields = []\n expected: exception\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n schema = cf.gen_collection_schema(fields=[], primary_field=ct.default_int64_field_name)\n error = {ct.err_code: 0, ct.err_msg: \"The field of the schema cannot be empty\"}\n self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_collection_dup_field(self):\n \"\"\"\n target: test collection with dup field name\n method: Two FieldSchema have same name\n expected: raise exception\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n field_one = cf.gen_int64_field(is_primary=True)\n field_two = cf.gen_int64_field()\n schema = cf.gen_collection_schema(fields=[field_one, field_two, cf.gen_float_vec_field()])\n error = {ct.err_code: 0, ct.err_msg: \"duplicated field name\"}\n self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)\n assert not self.utility_wrap.has_collection(c_name)[0]\n\n @pytest.mark.tags(CaseLabel.L0)\n @pytest.mark.skip(reason=\"waiting for required int primary field\")\n @pytest.mark.parametrize(\"field\", [cf.gen_float_vec_field(), cf.gen_binary_vec_field()])\n def test_collection_only_vector_field(self, field):\n \"\"\"\n target: test collection just with vec field\n method: create with float-vec fields\n expected: raise exception\n \"\"\"\n self._connect()\n error = {ct.err_code: 0, ct.err_msg: \"Must be have a primary key field\"}\n self.collection_schema_wrap.init_collection_schema([field], check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_collection_multi_float_vectors(self):\n \"\"\"\n target: test collection with multi float vectors\n method: create collection with two float-vec fields\n expected: raise exception\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n fields = [cf.gen_int64_field(is_primary=True), cf.gen_float_vec_field(), cf.gen_float_vec_field(name=\"tmp\")]\n schema = cf.gen_collection_schema(fields=fields, auto_id=True)\n self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,\n check_items={exp_name: c_name, exp_schema: schema})\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_collection_mix_vectors(self):\n \"\"\"\n target: test collection with mix vectors\n method: create with float and binary vec\n expected: raise exception\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n fields = [cf.gen_int64_field(is_primary=True), cf.gen_float_vec_field(), cf.gen_binary_vec_field()]\n schema = cf.gen_collection_schema(fields=fields, auto_id=True)\n self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,\n check_items={exp_name: c_name, exp_schema: schema})\n\n @pytest.mark.tags(CaseLabel.L0)\n def test_collection_without_vectors(self):\n \"\"\"\n target: test collection without vectors\n method: create collection only with int field\n expected: raise exception\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n schema = cf.gen_collection_schema([cf.gen_int64_field(is_primary=True)])\n error = {ct.err_code: 0, ct.err_msg: \"The schema must have vector column\"}\n self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_collection_without_primary_field(self):\n \"\"\"\n target: test collection without primary field\n method: no primary field specified in collection schema and fields\n expected: raise exception\n \"\"\"\n self._connect()\n int_fields, _ = self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=DataType.INT64)\n vec_fields, _ = self.field_schema_wrap.init_field_schema(name=ct.default_float_vec_field_name,\n dtype=DataType.FLOAT_VECTOR, dim=ct.default_dim)\n error = {ct.err_code: 0, ct.err_msg: \"Must be have a primary key field\"}\n self.collection_schema_wrap.init_collection_schema([int_fields, vec_fields],\n check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_collection_is_primary_false(self):\n \"\"\"\n target: test collection with all is_primary false\n method: set all fields if_primary false\n expected: raise exception\n \"\"\"\n self._connect()\n fields = [cf.gen_int64_field(is_primary=False), cf.gen_float_field(is_primary=False),\n cf.gen_float_vec_field(is_primary=False)]\n error = {ct.err_code: 0, ct.err_msg: \"Must be have a primary key field\"}\n self.collection_schema_wrap.init_collection_schema(fields, check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n @pytest.mark.parametrize(\"is_primary\", ct.get_invalid_strs)\n def test_collection_invalid_is_primary(self, is_primary):\n \"\"\"\n target: test collection with invalid primary\n method: define field with is_primary=non-bool\n expected: raise exception\n \"\"\"\n self._connect()\n name = cf.gen_unique_str(prefix)\n error = {ct.err_code: 0, ct.err_msg: \"Param is_primary must be bool type\"}\n self.field_schema_wrap.init_field_schema(name=name, dtype=DataType.INT64, is_primary=is_primary,\n check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n @pytest.mark.xfail(reason=\"#5918\")\n @pytest.mark.parametrize(\"primary_field\", [\"12-s\", \"12 s\", \"(mn)\", \"中文\", \"%$#\", \"a\".join(\"a\" for i in range(256))])\n def test_collection_invalid_primary_field(self, primary_field):\n \"\"\"\n target: test collection with invalid primary_field\n method: specify invalid string primary_field in collection schema\n expected: raise exception\n \"\"\"\n self._connect()\n fields = [cf.gen_int64_field(), cf.gen_float_vec_field()]\n error = {ct.err_code: 0, ct.err_msg: \"invalid primary field\"}\n self.collection_schema_wrap.init_collection_schema(fields=fields, primary_field=primary_field,\n check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n @pytest.mark.xfail(reason=\"#5918\")\n @pytest.mark.parametrize(\"primary_field\", [[], 1, [1, \"2\", 3], (1,), {1: 1}, None])\n def test_collection_non_string_primary_field(self, primary_field):\n \"\"\"\n target: test collection with non-string primary_field\n method: primary_field type is not string\n expected: raise exception\n \"\"\"\n self._connect()\n fields = [cf.gen_int64_field(), cf.gen_float_vec_field()]\n error = {ct.err_code: 0, ct.err_msg: \"invalid primary field\"}\n self.collection_schema_wrap.init_collection_schema(fields, primary_field=primary_field,\n check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n @pytest.mark.xfail(reason=\"#5918\")\n def test_collection_not_existed_primary_field(self):\n \"\"\"\n target: test collection with not exist primary field\n method: specify not existed field as primary_field\n expected: raise exception\n \"\"\"\n self._connect()\n fake_field = cf.gen_unique_str()\n fields = [cf.gen_int64_field(), cf.gen_float_vec_field()]\n error = {ct.err_code: 0, ct.err_msg: \"not existed field\"}\n\n self.collection_schema_wrap.init_collection_schema(fields, primary_field=fake_field,\n check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L0)\n def test_collection_primary_in_schema(self):\n \"\"\"\n target: test collection with primary field\n method: specify primary field in CollectionSchema\n expected: collection.primary_field\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n schema = cf.gen_default_collection_schema(primary_field=ct.default_int64_field_name)\n self.collection_wrap.init_collection(c_name, schema=schema)\n assert self.collection_wrap.primary_field.name == ct.default_int64_field_name\n\n @pytest.mark.tags(CaseLabel.L0)\n def test_collection_primary_in_field(self):\n \"\"\"\n target: test collection with primary field\n method: specify primary field in FieldSchema\n expected: collection.primary_field\n \"\"\"\n self._connect()\n fields = [cf.gen_int64_field(is_primary=True), cf.gen_float_field(), cf.gen_float_vec_field()]\n schema, _ = self.collection_schema_wrap.init_collection_schema(fields)\n self.collection_wrap.init_collection(cf.gen_unique_str(prefix), schema=schema)\n assert self.collection_wrap.primary_field.name == ct.default_int64_field_name\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_collection_unsupported_primary_field(self, get_unsupported_primary_field):\n \"\"\"\n target: test collection with unsupported primary field type\n method: specify non-int64 as primary field\n expected: raise exception\n \"\"\"\n self._connect()\n field = get_unsupported_primary_field\n vec_field = cf.gen_float_vec_field(name=\"vec\")\n error = {ct.err_code: 1, ct.err_msg: \"Primary key type must be DataType.INT64.\"}\n self.collection_schema_wrap.init_collection_schema(fields=[field, vec_field], primary_field=field.name,\n check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_collection_multi_primary_fields(self):\n \"\"\"\n target: test collection with multi primary\n method: collection with two primary fields\n expected: raise exception\n \"\"\"\n self._connect()\n int_field_one = cf.gen_int64_field(is_primary=True)\n int_field_two = cf.gen_int64_field(name=\"int2\", is_primary=True)\n error = {ct.err_code: 0, ct.err_msg: \"Primary key field can only be one\"}\n self.collection_schema_wrap.init_collection_schema(fields=[int_field_one, int_field_two],\n check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_collection_primary_inconsistent(self):\n \"\"\"\n target: test collection with different primary field setting\n method: 1. set A field is_primary 2. set primary_field is B\n expected: raise exception\n \"\"\"\n self._connect()\n int_field_one = cf.gen_int64_field(is_primary=True)\n int_field_two = cf.gen_int64_field(name=\"int2\")\n fields = [int_field_one, int_field_two, cf.gen_float_vec_field()]\n error = {ct.err_code: 0, ct.err_msg: \"Primary key field can only be one\"}\n self.collection_schema_wrap.init_collection_schema(fields, primary_field=int_field_two.name,\n check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_collection_primary_consistent(self):\n \"\"\"\n target: test collection with both collection schema and field schema\n method: 1. set A field is_primary 2.set primary_field is A\n expected: verify primary field\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n int_field_one = cf.gen_int64_field(is_primary=True)\n schema = cf.gen_collection_schema(fields=[int_field_one, cf.gen_float_vec_field()],\n primary_field=int_field_one.name)\n self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,\n check_items={exp_name: c_name, exp_schema: schema})\n\n @pytest.mark.tags(CaseLabel.L0)\n @pytest.mark.xfail(reason=\"#5918\")\n @pytest.mark.parametrize(\"auto_id\", [True, False])\n def test_collection_auto_id_in_field_schema(self, auto_id):\n \"\"\"\n target: test collection with auto_id in field schema\n method: specify auto_id True in field schema\n expected: verify schema's auto_id\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n int_field = cf.gen_int64_field(is_primary=True, auto_id=auto_id)\n log.debug(f'int_field auto_id: {int_field.auto_id}')\n vec_field = cf.gen_float_vec_field(name='vec')\n schema, _ = self.collection_schema_wrap.init_collection_schema([int_field, vec_field])\n assert schema.auto_id == auto_id\n self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,\n check_items={exp_name: c_name, exp_schema: schema})\n\n @pytest.mark.tags(CaseLabel.L0)\n @pytest.mark.parametrize(\"auto_id\", [True, False])\n def test_collection_auto_id_in_collection_schema(self, auto_id):\n \"\"\"\n target: test collection with auto_id in collection schema\n method: specify auto_id True in collection schema\n expected: verify schema auto_id and collection schema\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n int_field = cf.gen_int64_field(is_primary=True)\n vec_field = cf.gen_float_vec_field(name='vec')\n schema, _ = self.collection_schema_wrap.init_collection_schema([int_field, vec_field], auto_id=auto_id)\n assert schema.auto_id == auto_id\n self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,\n check_items={exp_name: c_name, exp_schema: schema})\n\n @pytest.mark.tags(CaseLabel.L0)\n @pytest.mark.xfail(reason=\"#5918\")\n def test_collection_auto_id_non_primary_field(self):\n \"\"\"\n target: test collection set auto_id in non-primary field\n method: set auto_id=True in non-primary field\n expected: raise exception\n \"\"\"\n self._connect()\n int_field_one = cf.gen_int64_field(is_primary=True)\n int_field_two = cf.gen_int64_field(auto_id=True)\n fields = [int_field_one, int_field_two, cf.gen_float_vec_field()]\n error = {ct.err_code: 0, ct.err_msg: \"auto_id can only be specified on the primary key field\"}\n self.collection_schema_wrap.init_collection_schema(fields, check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n @pytest.mark.xfail(reason=\"#5918\")\n def test_collection_auto_id_false_non_primary(self):\n \"\"\"\n target: test collection set auto_id in non-primary field\n method: set auto_id=True in non-primary field\n expected: verify schema auto_id is False\n \"\"\"\n self._connect()\n int_field_one = cf.gen_int64_field(is_primary=True)\n int_field_two = cf.gen_int64_field(name='int2', auto_id=False)\n fields = [int_field_one, int_field_two, cf.gen_float_vec_field()]\n schema, _ = self.collection_schema_wrap.init_collection_schema(fields)\n assert not schema.auto_id\n\n @pytest.mark.tags(CaseLabel.L0)\n @pytest.mark.xfail(reason=\"#5943\")\n def test_collection_auto_id_inconsistent(self):\n \"\"\"\n target: test collection auto_id with both collection schema and field schema\n method: 1.set primary field auto_id=True in field schema 2.set auto_id=False in collection schema\n expected: raise exception\n \"\"\"\n self._connect()\n int_field = cf.gen_int64_field(is_primary=True, auto_id=True)\n vec_field = cf.gen_float_vec_field(name='vec')\n error = {ct.err_code: 0, ct.err_msg: \"Primary key field can only be one\"}\n self.collection_schema_wrap.init_collection_schema([int_field, vec_field], auto_id=False,\n check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n @pytest.mark.parametrize(\"auto_id\", [True, False])\n def test_collection_auto_id_consistent(self, auto_id):\n \"\"\"\n target: test collection auto_id with both collection schema and field schema\n method: set auto_id=True/False both field and schema\n expected: verify auto_id\n \"\"\"\n self._connect()\n int_field = cf.gen_int64_field(is_primary=True, auto_id=auto_id)\n vec_field = cf.gen_float_vec_field(name='vec')\n schema, _ = self.collection_schema_wrap.init_collection_schema([int_field, vec_field], auto_id=auto_id)\n assert schema.auto_id == auto_id\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_collection_auto_id_none_in_field(self):\n \"\"\"\n target: test collection with auto_id is None\n method: set auto_id=None\n expected: raise exception\n \"\"\"\n self._connect()\n error = {ct.err_code: 0, ct.err_msg: \"Param auto_id must be bool type\"}\n self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=DataType.INT64,\n is_primary=True,\n auto_id=None, check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n @pytest.mark.xfail(reason=\"#5945\")\n @pytest.mark.parametrize(\"auto_id\", ct.get_invalid_strs)\n def test_collection_invalid_auto_id(self, auto_id):\n \"\"\"\n target: test collection with invalid auto_id\n method: define field with auto_id=non-bool\n expected: raise exception\n \"\"\"\n self._connect()\n int_field = cf.gen_int64_field(is_primary=True)\n vec_field = cf.gen_float_vec_field(name='vec')\n error = {ct.err_code: 0, ct.err_msg: \"Param auto_id must be bool type\"}\n self.collection_schema_wrap.init_collection_schema([int_field, vec_field], auto_id=auto_id,\n check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n @pytest.mark.xfail(reason=\"#5933\")\n def test_collection_multi_fields_auto_id(self):\n \"\"\"\n target: test collection auto_id with multi fields\n method: specify auto_id=True for multi int64 fields\n expected: todo raise exception\n \"\"\"\n self._connect()\n int_field_one = cf.gen_int64_field(is_primary=True, auto_id=True)\n int_field_two = cf.gen_int64_field(name=\"int\", auto_id=True)\n vec_field = cf.gen_float_vec_field(name='vec')\n error = {ct.err_code: 0, ct.err_msg: \"auto_id can only be specified on the primary key field\"}\n self.collection_schema_wrap.init_collection_schema([int_field_one, int_field_two, vec_field],\n check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L0)\n @pytest.mark.parametrize(\"dtype\", [DataType.FLOAT_VECTOR, DataType.BINARY_VECTOR])\n def test_collection_vector_without_dim(self, dtype):\n \"\"\"\n target: test collection without dimension\n method: define vector field without dim\n expected: raise exception\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n float_vec_field, _ = self.field_schema_wrap.init_field_schema(name=\"vec\", dtype=dtype)\n schema = cf.gen_collection_schema(fields=[cf.gen_int64_field(is_primary=True), float_vec_field])\n error = {ct.err_code: 0, ct.err_msg: \"dimension is not defined in field type params\"}\n self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n @pytest.mark.xfail(\"5950\")\n def test_collection_vector_invalid_dim(self, get_invalid_dim):\n \"\"\"\n target: test collection with invalid dimension\n method: define float-vec field with invalid dimension\n expected: raise exception\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n float_vec_field = cf.gen_float_vec_field(dim=get_invalid_dim)\n schema = cf.gen_collection_schema(fields=[cf.gen_int64_field(is_primary=True), float_vec_field])\n error = {ct.err_code: 0, ct.err_msg: \"dim must be of int\"}\n self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n @pytest.mark.parametrize(\"dim\", [-1, 32769])\n def test_collection_vector_out_bounds_dim(self, dim):\n \"\"\"\n target: test collection with out of bounds dim\n method: invalid dim -1 and 32759\n expected: raise exception\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n float_vec_field = cf.gen_float_vec_field(dim=dim)\n schema = cf.gen_collection_schema(fields=[cf.gen_int64_field(is_primary=True), float_vec_field])\n error = {ct.err_code: 0, ct.err_msg: \"invalid dimension: {}. should be in range 1 ~ 32768\".format(dim)}\n self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n @pytest.mark.skip(reason=\"waiting for primary field\")\n def test_collection_non_vector_field_dim(self):\n \"\"\"\n target: test collection with dim for non-vector field\n method: define int64 field with dim\n expected: no exception\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n int_field, _ = self.field_schema_wrap.init_field_schema(name=\"int\", dtype=DataType.INT64, dim=ct.default_dim)\n float_vec_field = cf.gen_float_vec_field()\n schema = cf.gen_collection_schema(fields=[int_field, float_vec_field])\n self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,\n check_items={exp_name: c_name, exp_schema: schema})\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_collection_desc(self):\n \"\"\"\n target: test collection with description\n method: create with description\n expected: assert default description\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n schema = cf.gen_default_collection_schema(description=ct.collection_desc)\n self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,\n check_items={exp_name: c_name, exp_schema: schema})\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_collection_none_desc(self):\n \"\"\"\n target: test collection with none description\n method: create with none description\n expected: raise exception\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n schema = cf.gen_default_collection_schema(description=None)\n error = {ct.err_code: 0, ct.err_msg: \"expected one of: bytes, unicode\"}\n self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_collection_long_desc(self):\n \"\"\"\n target: test collection with long desc\n method: create with long desc\n expected:\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n desc = \"a\".join(\"a\" for _ in range(256))\n schema = cf.gen_default_collection_schema(description=desc)\n self.collection_wrap.init_collection(c_name, schema=schema,\n check_task=CheckTasks.check_collection_property,\n check_items={exp_name: c_name, exp_schema: schema})\n\n @pytest.mark.tags(CaseLabel.L0)\n def test_collection_binary(self):\n \"\"\"\n target: test collection with binary-vec\n method: create collection with binary field\n expected: assert binary field\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n self.collection_wrap.init_collection(c_name, schema=default_binary_schema,\n check_task=CheckTasks.check_collection_property,\n check_items={exp_name: c_name, exp_schema: default_binary_schema})\n assert c_name, _ in self.utility_wrap.list_collections()\n\n\nclass TestCollectionOperation(TestcaseBase):\n \"\"\"\n ******************************************************************\n The following cases are used to test collection interface operations\n ******************************************************************\n \"\"\"\n\n # def teardown_method(self):\n # if self.self.collection_wrap is not None and self.self.collection_wrap.collection is not None:\n # self.self.collection_wrap.drop()\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_collection_without_connection(self):\n \"\"\"\n target: test collection without connection\n method: 1.create collection after connection removed\n expected: raise exception\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n self.connection_wrap.remove_connection(ct.default_alias)\n res_list, _ = self.connection_wrap.list_connections()\n assert ct.default_alias not in res_list\n error = {ct.err_code: 0, ct.err_msg: 'should create connect first'}\n self.collection_wrap.init_collection(c_name, schema=default_schema,\n check_task=CheckTasks.err_res, check_items=error)\n assert self.collection_wrap.collection is None\n\n @pytest.mark.tags(CaseLabel.L2)\n def test_collection_multi_create_drop(self):\n \"\"\"\n target: test cycle creation and deletion of multiple collections\n method: in a loop, collections are created and deleted sequentially\n expected: no exception\n \"\"\"\n self._connect()\n c_num = 20\n for _ in range(c_num):\n c_name = cf.gen_unique_str(prefix)\n self.collection_wrap.init_collection(c_name, schema=default_schema,\n check_task=CheckTasks.check_collection_property,\n check_items={exp_name: c_name, exp_schema: default_schema})\n self.collection_wrap.drop()\n assert c_name, _ not in self.utility_wrap.list_collections()\n\n @pytest.mark.tags(CaseLabel.L1)\n @pytest.mark.xfail(reason=\"issue #5947\")\n def test_collection_dup_name_drop(self):\n \"\"\"\n target: test collection with dup name, and drop\n method: 1. two dup name collection object\n 2. one object drop collection\n expected: collection dropped\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,\n check_items={exp_name: c_name, exp_schema: default_schema})\n self.collection_wrap.init_collection(c_name, check_task=CheckTasks.check_collection_property,\n check_items={exp_name: c_name, exp_schema: default_schema})\n self.collection_wrap.drop()\n assert not self.utility_wrap.has_collection(c_name)[0]\n error = {ct.err_code: 0, ct.err_msg: \"can't find collection\"}\n collection_w.has_partition(\"p\", check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_collection_after_drop(self):\n \"\"\"\n target: test create collection after create and drop\n method: 1. create a 2. drop a 3, re-create a\n expected: no exception\n \"\"\"\n c_name = cf.gen_unique_str(prefix)\n collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,\n check_items={exp_name: c_name, exp_schema: default_schema})\n collection_w.drop()\n assert not self.utility_wrap.has_collection(collection_w.name)[0]\n self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,\n check_items={exp_name: c_name, exp_schema: default_schema})\n assert self.utility_wrap.has_collection(c_name)[0]\n\n\nclass TestCollectionDataframe(TestcaseBase):\n \"\"\"\n ******************************************************************\n The following cases are used to test construct_from_dataframe\n ******************************************************************\n \"\"\"\n\n @pytest.fixture(scope=\"function\", params=ct.get_invalid_strs)\n def get_non_df(self, request):\n if request.param is None:\n pytest.skip(\"skip None\")\n yield request.param\n\n @pytest.mark.tags(CaseLabel.L0)\n @pytest.mark.xfail(reason=\"issue #5947\")\n def test_construct_from_dataframe(self):\n \"\"\"\n target: test collection with dataframe data\n method: create collection and insert with dataframe\n expected: collection num entities equal to nb\n \"\"\"\n conn = self._connect()\n c_name = cf.gen_unique_str(prefix)\n df = cf.gen_default_dataframe_data(ct.default_nb)\n self.collection_wrap.construct_from_dataframe(c_name, df, priamry_field=ct.default_int64_field_name,\n check_task=CheckTasks.check_collection_property,\n check_items={exp_name: c_name, exp_schema: default_schema})\n conn.flush([c_name])\n assert self.collection_wrap.num_entities == ct.default_nb\n\n @pytest.mark.tags(CaseLabel.L0)\n def test_construct_from_binary_dataframe(self):\n \"\"\"\n target: test binary collection with dataframe\n method: create binary collection with dataframe\n expected: collection num entities equal to nb\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n df, _ = cf.gen_default_binary_dataframe_data(nb=ct.default_nb)\n self.collection_wrap.construct_from_dataframe(c_name, df, priamry_field=ct.default_int64_field_name,\n check_task=CheckTasks.check_collection_property,\n check_items={exp_name: c_name, exp_schema: default_binary_schema})\n assert self.collection_wrap.num_entities == ct.default_nb\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_construct_from_none_dataframe(self):\n \"\"\"\n target: test create collection by empty dataframe\n method: invalid dataframe type create collection\n expected: raise exception\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n error = {ct.err_code: 0, ct.err_msg: \"Dataframe can not be None!\"}\n self.collection_wrap.construct_from_dataframe(c_name, None, check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_construct_from_dataframe_only_column(self):\n \"\"\"\n target: test collection with dataframe only columns\n method: dataframe only has columns\n expected: raise exception\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n df = pd.DataFrame(columns=[ct.default_int64_field_name, ct.default_float_vec_field_name])\n error = {ct.err_code: 0, ct.err_msg: \"Cannot infer schema from empty dataframe\"}\n self.collection_wrap.construct_from_dataframe(c_name, df, priamry_field=ct.default_int64_field_name,\n check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_construct_from_inconsistent_dataframe(self):\n \"\"\"\n target: test collection with data inconsistent\n method: create and insert with inconsistent data\n expected: raise exception\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n # one field different type df\n mix_data = [(1, 2., [0.1, 0.2]), (2, 3., 4)]\n df = pd.DataFrame(data=mix_data, columns=list(\"ABC\"))\n error = {ct.err_code: 0, ct.err_msg: \"The data in the same column must be of the same type\"}\n self.collection_wrap.construct_from_dataframe(c_name, df, priamry_field='A', check_task=CheckTasks.err_res,\n check_items=error)\n\n @pytest.mark.tags(CaseLabel.L0)\n def test_construct_from_non_dataframe(self, get_non_df):\n \"\"\"\n target: test create collection by invalid dataframe\n method: non-dataframe type create collection\n expected: raise exception\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n error = {ct.err_code: 0, ct.err_msg: \"Data type must be pandas.DataFrame!\"}\n df = get_non_df\n self.collection_wrap.construct_from_dataframe(c_name, df, check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_construct_from_data_type_dataframe(self):\n \"\"\"\n target: test collection with invalid dataframe\n method: create with invalid dataframe\n expected: raise exception\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n df = pd.DataFrame({\"date\": pd.date_range('20210101', periods=3), ct.default_int64_field_name: [1, 2, 3]})\n error = {ct.err_code: 0, ct.err_msg: \"Cannot infer schema from dataframe\"}\n self.collection_wrap.construct_from_dataframe(c_name, df, priamry_field=ct.default_int64_field_name,\n check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_construct_from_invalid_field_name(self):\n \"\"\"\n target: test collection with invalid field name\n method: create with invalid field name dataframe\n expected: raise exception\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n df = pd.DataFrame({'%$#': cf.gen_vectors(3, 2), ct.default_int64_field_name: [1, 2, 3]})\n error = {ct.err_code: 1, ct.err_msg: \"Invalid field name\"}\n self.collection_wrap.construct_from_dataframe(c_name, df, priamry_field=ct.default_int64_field_name,\n check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_construct_none_primary_field(self):\n \"\"\"\n target: test collection with none primary field\n method: primary_field is none\n expected: raise exception\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n df = cf.gen_default_dataframe_data(ct.default_nb)\n error = {ct.err_code: 0, ct.err_msg: \"Schema must have a primary key field!\"}\n self.collection_wrap.construct_from_dataframe(c_name, df, priamry_field=None,\n check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_construct_not_existed_primary_field(self):\n \"\"\"\n target: test collection with not existed primary field\n method: primary field not existed\n expected: raise exception\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n df = cf.gen_default_dataframe_data(ct.default_nb)\n error = {ct.err_code: 0, ct.err_msg: \"Must be have a primary key field\"}\n self.collection_wrap.construct_from_dataframe(c_name, df, priamry_field=c_name,\n check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n @pytest.mark.xfail(reason=\"issue #5945\")\n def test_construct_with_none_auto_id(self):\n \"\"\"\n target: test construct with non-int64 as primary field\n method: non-int64 as primary field\n expected: raise exception\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n df = cf.gen_default_dataframe_data(ct.default_nb)\n error = {ct.err_code: 0, ct.err_msg: \"Must be have a primary key field\"}\n self.collection_wrap.construct_from_dataframe(c_name, df, priamry_field=ct.default_int64_field_name,\n auto_id=None)\n log.debug(self.collection_wrap.schema)\n\n @pytest.mark.tags(CaseLabel.L0)\n def test_construct_auto_id_true_insert(self):\n \"\"\"\n target: test construct with true auto_id\n method: auto_id=True and insert values\n expected: raise exception\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n df = cf.gen_default_dataframe_data(nb=100)\n error = {ct.err_code: 0, ct.err_msg: \"Auto_id is True, but get the data of primary key field\"}\n self.collection_wrap.construct_from_dataframe(c_name, df, priamry_field=ct.default_int64_field_name,\n auto_id=True, check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L0)\n @pytest.mark.xfail(reason=\"#5967\")\n def test_construct_auto_id_true_no_insert(self):\n \"\"\"\n target: test construct with true auto_id\n method: auto_id=True and not insert ids\n expected: verify num entities\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n df = cf.gen_default_dataframe_data(ct.default_nb)\n df.drop(ct.default_int64_field_name, axis=1, inplace=True)\n self.collection_wrap.construct_from_dataframe(c_name, df, priamry_field=ct.default_int64_field_name,\n auto_id=True)\n assert self.collection_wrap.num_entities == ct.default_nb\n\n @pytest.mark.xfail(reason=\"#5970\")\n @pytest.mark.tags(CaseLabel.L2)\n def test_construct_none_value_auto_id_true(self):\n \"\"\"\n target: test construct with none value, auto_id\n method: df primary field with none value, auto_id=true\n expected: todo\n \"\"\"\n nb = 100\n df = cf.gen_default_dataframe_data(nb)\n log.debug(df.head(3))\n df.iloc[:, 0] = numpy.NaN\n log.debug(df.head(3))\n self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,\n priamry_field=ct.default_int64_field_name, auto_id=True)\n log.debug(self.collection_wrap.num_entities)\n\n @pytest.mark.tags(CaseLabel.L0)\n def test_construct_auto_id_false(self):\n \"\"\"\n target: test construct with false auto_id\n method: auto_id=False, primary_field correct\n expected: verify auto_id\n \"\"\"\n self._connect()\n c_name = cf.gen_unique_str(prefix)\n df = cf.gen_default_dataframe_data(ct.default_nb)\n self.collection_wrap.construct_from_dataframe(c_name, df, priamry_field=ct.default_int64_field_name,\n auto_id=False)\n assert not self.collection_wrap.schema.auto_id\n assert self.collection_wrap.num_entities == ct.default_nb\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_construct_none_value_auto_id_false(self):\n \"\"\"\n target: test construct with none value, auto_id\n method: df primary field with none value, auto_id=false\n expected: raise exception\n \"\"\"\n self._connect()\n nb = 100\n df = cf.gen_default_dataframe_data(nb)\n df.iloc[:, 0] = numpy.NaN\n error = {ct.err_code: 0, ct.err_msg: \"Primary key type must be DataType.INT64\"}\n self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,\n priamry_field=ct.default_int64_field_name, auto_id=False,\n check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.xfail(reason=\"#5977\")\n @pytest.mark.tags(CaseLabel.L1)\n def test_construct_auto_id_false_same_values(self):\n \"\"\"\n target: test construct with false auto_id and same value\n method: auto_id=False, primary field same values\n expected: raise exception\n \"\"\"\n self._connect()\n nb = 100\n df = cf.gen_default_dataframe_data(nb)\n df.iloc[1:, 0] = 1\n # error = {ct.err_code: 0, ct.err_msg: \"Primary key type must be DataType.INT64\"}\n self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,\n priamry_field=ct.default_int64_field_name, auto_id=False)\n log.debug(self.collection_wrap.num_entities)\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_construct_auto_id_false_negative_values(self):\n \"\"\"\n target: test construct with negative values\n method: auto_id=False, primary field values is negative\n expected: verify num entities\n \"\"\"\n self._connect()\n nb = 100\n df = cf.gen_default_dataframe_data(nb)\n new_values = pd.Series(data=[i for i in range(0, -nb, -1)])\n df[ct.default_int64_field_name] = new_values\n self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,\n priamry_field=ct.default_int64_field_name, auto_id=False)\n assert self.collection_wrap.num_entities == nb\n\n @pytest.mark.tags(CaseLabel.L1)\n @pytest.mark.xfail(reason=\"#5947\")\n def test_construct_from_dataframe_dup_name(self):\n \"\"\"\n target: test collection with dup name and insert dataframe\n method: create collection with dup name, none schema, dataframe\n expected: two collection object is correct\n \"\"\"\n conn = self._connect()\n c_name = cf.gen_unique_str(prefix)\n collection_w = self.init_collection_wrap(name=c_name, priamry_field=ct.default_int64_field_name,\n check_task=CheckTasks.check_collection_property,\n check_items={exp_name: c_name, exp_schema: default_schema})\n df = cf.gen_default_dataframe_data(ct.default_nb)\n self.collection_wrap.construct_from_dataframe(c_name, df, priamry_field=ct.default_int64_field_name,\n check_task=CheckTasks.check_collection_property,\n check_items={exp_name: c_name, exp_schema: default_schema})\n conn.flush([collection_w.name])\n assert collection_w.num_entities == ct.default_nb\n assert collection_w.num_entities == self.collection_wrap.num_entities\n" ]
[ [ "pandas.DataFrame", "pandas.date_range" ] ]
sfepy/example_poropiezo
[ "c21916c8e4f2d277aefe0462898c68cea4ca052a" ]
[ "poropiezo_micro_dfc.py" ]
[ "# This example implements homogenization of piezoeletric porous media.\n# The mathematical model and numerical results are described in: \n#\n# ROHAN E., LUKEŠ V.\n# Homogenization of the fluid-saturated piezoelectric porous media.\n# International Journal of Solids and Structures\n# Volume 147, 15 August 2018, Pages 110-125\n# https://doi.org/10.1016/j.ijsolstr.2018.05.017\n#\n# Run calculation of homogeized coefficients:\n#\n# ./homogen.py example_poropiezo-1/poropiezo_micro_dfc.py\n#\n# The results are stored in `example_poropiezo-1/results` directory.\n#\n\nimport sys\nimport numpy as nm\nimport os.path as osp\nfrom sfepy.mechanics.matcoefs import stiffness_from_youngpoisson\nfrom sfepy.homogenization.utils import coor_to_sym, define_box_regions\nimport sfepy.discrete.fem.periodic as per\nfrom sfepy.discrete.fem.mesh import Mesh\nimport sfepy.homogenization.coefs_base as cb\nfrom sfepy.base.base import Struct\n\ndata_dir = 'example_poropiezo-1'\n\n\ndef data_to_struct(data):\n out = {}\n for k, v in data.items():\n out[k] = Struct(name='output_data',\n mode='cell' if v[2] == 'c' else 'vertex',\n data=v[0],\n var_name=v[1],\n dofs=None)\n\n return out\n\n\ndef get_periodic_bc(var_tab, dim=3, dim_tab=None):\n if dim_tab is None:\n dim_tab = {'x': ['left', 'right'],\n 'z': ['bottom', 'top'],\n 'y': ['near', 'far']}\n\n periodic = {}\n epbcs = {}\n\n for ivar, reg in var_tab:\n periodic['per_%s' % ivar] = pers = []\n for idim in 'xyz'[0:dim]:\n key = 'per_%s_%s' % (ivar, idim)\n regs = ['%s_%s' % (reg, ii) for ii in dim_tab[idim]]\n epbcs[key] = (regs, {'%s.all' % ivar: '%s.all' % ivar},\n 'match_%s_plane' % idim)\n pers.append(key)\n\n return epbcs, periodic\n\n\n# reconstruct displacement, and electric fields at the microscopic level,\n# see Section 6.2\ndef recovery_micro_dfc(pb, corrs, macro):\n eps0 = macro['eps0']\n mesh = pb.domain.mesh\n regions = pb.domain.regions\n dim = mesh.dim\n Yms_map = regions['Yms'].get_entities(0)\n Ym_map = regions['Ym'].get_entities(0)\n\n gl = '_' + list(corrs.keys())[0].split('_')[-1]\n u1 = -corrs['corrs_p' + gl]['u'] * macro['press'][Yms_map, :]\n phi = -corrs['corrs_p' + gl]['r'] * macro['press'][Ym_map, :]\n\n for ii in range(2):\n u1 += corrs['corrs_k%d' % ii + gl]['u'] * macro['phi'][ii]\n phi += corrs['corrs_k%d' % ii + gl]['r'] * macro['phi'][ii]\n\n for ii in range(dim):\n for jj in range(dim):\n kk = coor_to_sym(ii, jj, dim)\n phi += corrs['corrs_rs' + gl]['r_%d%d' % (ii, jj)]\\\n * nm.expand_dims(macro['strain'][Ym_map, kk], axis=1)\n u1 += corrs['corrs_rs' + gl]['u_%d%d' % (ii, jj)]\\\n * nm.expand_dims(macro['strain'][Yms_map, kk], axis=1)\n\n u = macro['u'][Yms_map, :] + eps0 * u1\n\n mvar = pb.create_variables(['u', 'r', 'svar'])\n\n e_mac_Yms = [None] * macro['strain'].shape[1]\n\n for ii in range(dim):\n for jj in range(dim):\n kk = coor_to_sym(ii, jj, dim)\n mvar['svar'].set_data(macro['strain'][:, kk])\n mac_e_Yms = pb.evaluate('ev_volume_integrate.i2.Yms(svar)',\n mode='el_avg',\n var_dict={'svar': mvar['svar']})\n\n e_mac_Yms[kk] = mac_e_Yms.squeeze()\n\n e_mac_Yms = nm.vstack(e_mac_Yms).T[:, nm.newaxis, :, nm.newaxis]\n\n mvar['r'].set_data(phi)\n E_mic = pb.evaluate('ev_grad.i2.Ym(r)',\n mode='el_avg',\n var_dict={'r': mvar['r']}) / eps0\n\n mvar['u'].set_data(u1)\n e_mic = pb.evaluate('ev_cauchy_strain.i2.Yms(u)',\n mode='el_avg',\n var_dict={'u': mvar['u']})\n e_mic += e_mac_Yms\n\n out = {\n 'u0': (macro['u'][Yms_map, :], 'u', 'p'), # macro displacement\n 'u1': (u1, 'u', 'p'), # local displacement corrections, see eq. (58)\n 'u': (u, 'u', 'p'), # total displacement\n 'e_mic': (e_mic, 'u', 'c'), # micro strain field, see eq. (58)\n 'phi': (phi, 'r', 'p'), # electric potential, see eq. (57)\n 'E_mic': (E_mic, 'r', 'c'), # electric field, see eq. (58)\n }\n\n return data_to_struct(out)\n\n\n# define homogenized coefficients and subproblems for correctors\ndef define(grid0=100, filename_mesh=None):\n eps0 = 0.01 / grid0\n\n if filename_mesh is None:\n filename_mesh = osp.join(data_dir, 'piezo_mesh_micro_dfc.vtk')\n\n mesh = Mesh.from_file(filename_mesh)\n n_conduct = len(nm.unique(mesh.cmesh.cell_groups)) - 2\n\n sym_eye = 'nm.array([1,1,0])' if mesh.dim == 2 else\\\n 'nm.array([1,1,1,0,0,0])'\n\n bbox = mesh.get_bounding_box()\n regions = define_box_regions(mesh.dim, bbox[0], bbox[1], eps=1e-3)\n\n regions.update({\n 'Y': 'all',\n # matrix\n 'Ym': 'cells of group 1',\n 'Ym_left': ('r.Ym *v r.Left', 'vertex'),\n 'Ym_right': ('r.Ym *v r.Right', 'vertex'),\n 'Ym_bottom': ('r.Ym *v r.Bottom', 'vertex'),\n 'Ym_top': ('r.Ym *v r.Top', 'vertex'),\n 'Ym_far': ('r.Ym *v r.Far', 'vertex'),\n 'Ym_near': ('r.Ym *v r.Near', 'vertex'),\n 'Gamma_mc': ('r.Ym *v r.Yc', 'facet', 'Ym'),\n # channel / inclusion\n 'Yc': 'cells of group 2',\n 'Yc0': ('r.Yc -v r.Gamma_cm', 'vertex'),\n 'Gamma_cm': ('r.Ym *v r.Yc', 'facet', 'Yc'),\n })\n\n print('number of cnonductors: %d' % n_conduct)\n regions.update({\n 'Yms': ('r.Ym +v r.Ys', 'cell'),\n 'Yms_left': ('r.Yms *v r.Left', 'vertex'),\n 'Yms_right': ('r.Yms *v r.Right', 'vertex'),\n 'Yms_bottom': ('r.Yms *v r.Bottom', 'vertex'),\n 'Yms_top': ('r.Yms *v r.Top', 'vertex'),\n 'Yms_far': ('r.Yms *v r.Far', 'vertex'),\n 'Yms_near': ('r.Yms *v r.Near', 'vertex'),\n 'Gamma_ms': ('r.Ym *v r.Ys', 'facet', 'Ym'),\n 'Gamma_msc': ('r.Yms *v r.Yc', 'facet', 'Yms'),\n 'Ys': (' +v '.join(['r.Ys%d' % k for k in range(n_conduct)]),\n 'cell'),\n })\n\n options = {\n 'coefs_filename': 'coefs_poropiezo_%d' % (grid0),\n 'volume': {\n 'variables': ['svar'],\n 'expression': 'd_volume.i2.Y(svar)',\n },\n 'coefs': 'coefs',\n 'requirements': 'requirements',\n 'output_dir': osp.join(data_dir, 'results'),\n 'ls': 'ls',\n 'file_per_var': True,\n 'absolute_mesh_path': True,\n 'multiprocessing': False,\n 'recovery_hook': recovery_micro_dfc,\n }\n\n fields = {\n 'displacement': ('real', 'vector', 'Yms', 1),\n 'potential': ('real', 'scalar', 'Ym', 1),\n 'sfield': ('real', 'scalar', 'Y', 1),\n }\n\n variables = {\n # displacement\n 'u': ('unknown field', 'displacement'),\n 'v': ('test field', 'displacement', 'u'),\n 'Pi_u': ('parameter field', 'displacement', 'u'),\n 'U1': ('parameter field', 'displacement', '(set-to-None)'),\n 'U2': ('parameter field', 'displacement', '(set-to-None)'),\n # potential\n 'r': ('unknown field', 'potential'),\n 's': ('test field', 'potential', 'r'),\n 'Pi_r': ('parameter field', 'potential', 'r'),\n 'R1': ('parameter field', 'potential', '(set-to-None)'),\n 'R2': ('parameter field', 'potential', '(set-to-None)'),\n # aux variable\n 'svar': ('parameter field', 'sfield', '(set-to-None)'),\n }\n\n epbcs, periodic = get_periodic_bc([('u', 'Yms'), ('r', 'Ym')])\n\n mat_g_sc, mat_d_sc = eps0, eps0**2\n # BaTiO3 - Miara, Rohan, ... doi: 10.1016/j.jmps.2005.05.006\n materials = {\n 'matrix': ({\n 'D': {'Ym': nm.array([[1.504, 0.656, 0.659, 0, 0, 0],\n [0.656, 1.504, 0.659, 0, 0, 0],\n [0.659, 0.659, 1.455, 0, 0, 0],\n [0, 0, 0, 0.424, 0, 0],\n [0, 0, 0, 0, 0.439, 0],\n [0, 0, 0, 0, 0, 0.439]]) * 1e11, }\n },),\n 'piezo': ({\n 'g': nm.array([[0, 0, 0, 0, 11.404, 0],\n [0, 0, 0, 0, 0, 11.404],\n [-4.322, -4.322, 17.360, 0, 0, 0]]) / mat_g_sc,\n 'd': nm.array([[1.284, 0, 0],\n [0, 1.284, 0],\n [0, 0, 1.505]]) * 1e-8 / mat_d_sc,\n },),\n 'fluid': ({'gamma': 1.0 / 2.15e9},),\n }\n\n functions = {\n 'match_x_plane': (per.match_x_plane,),\n 'match_y_plane': (per.match_y_plane,),\n 'match_z_plane': (per.match_z_plane,),\n }\n\n ebcs = {\n 'fixed_u': ('Corners', {'u.all': 0.0}),\n 'fixed_r': ('Gamma_ms', {'r.all': 0.0}),\n }\n\n integrals = {\n 'i2': 2,\n 'i5': 5,\n }\n\n solvers = {\n 'ls': ('ls.scipy_direct', {}),\n 'ns_em6': ('nls.newton', {\n 'i_max': 1,\n 'eps_a': 1e-6,\n 'eps_r': 1e-6,\n 'problem': 'nonlinear'}),\n 'ns_em3': ('nls.newton', {\n 'i_max': 1,\n 'eps_a': 1e-3,\n 'eps_r': 1e-6,\n 'problem': 'nonlinear'}),\n }\n\n coefs = {\n # homogenized elasticity, see eq. (46)_1\n 'A': {\n 'requires': ['c.A1', 'c.A2'],\n 'expression': 'c.A1 + c.A2',\n 'class': cb.CoefEval,\n },\n 'A1': {\n 'status': 'auxiliary',\n 'requires': ['pis_u', 'corrs_rs'],\n 'expression': 'dw_lin_elastic.i2.Yms(matrix.D, U1, U2)',\n 'set_variables': [('U1', ('corrs_rs', 'pis_u'), 'u'),\n ('U2', ('corrs_rs', 'pis_u'), 'u')],\n 'class': cb.CoefSymSym,\n },\n 'A2': {\n 'status': 'auxiliary',\n 'requires': ['corrs_rs'],\n 'expression': 'dw_diffusion.i2.Ym(piezo.d, R1, R2)',\n 'set_variables': [('R1', 'corrs_rs', 'r'),\n ('R2', 'corrs_rs', 'r')],\n 'class': cb.CoefSymSym,\n },\n # homogenized Biot coefficient, see eq. (46)_2\n 'B': {\n 'requires': ['c.Phi', 'c.B1', 'c.B2'],\n 'expression': 'c.B1 - c.B2 + c.Phi * %s' % sym_eye,\n 'class': cb.CoefEval,\n },\n 'B1': {\n 'status': 'auxiliary',\n 'requires': ['pis_u', 'corrs_p'],\n 'expression': 'dw_lin_elastic.i2.Yms(matrix.D, U1, U2)',\n 'set_variables': [('U1', 'corrs_p', 'u'),\n ('U2', 'pis_u', 'u')],\n 'class': cb.CoefSym,\n },\n 'B2': {\n 'status': 'auxiliary',\n 'requires': ['pis_u', 'corrs_p'],\n 'expression': 'dw_piezo_coupling.i2.Ym(piezo.g, U1, R1)',\n 'set_variables': [('R1', 'corrs_p', 'r'),\n ('U1', 'pis_u', 'u')],\n 'class': cb.CoefSym,\n },\n # homogenized compressibility coefficient, see eq. (46)_6\n 'M': {\n 'requires': ['c.Phi', 'c.N'],\n 'expression': 'c.N + c.Phi * %e' % materials['fluid'][0]['gamma'],\n 'class': cb.CoefEval,\n },\n 'N': {\n 'status': 'auxiliary',\n 'requires': ['corrs_p'],\n 'expression': 'dw_surface_ltr.i2.Gamma_msc(U1)',\n 'set_variables': [('U1', 'corrs_p', 'u')],\n 'class': cb.CoefOne,\n },\n 'Phi': {\n 'requires': ['c.vol'],\n 'expression': 'c.vol[\"fraction_Yc\"]',\n 'class': cb.CoefEval,\n },\n # volume fractions of Ym, Yc, Ys1, Ys2, ...\n 'vol': {\n 'regions': ['Ym', 'Yc'] + ['Ys%d' % k for k in range(n_conduct)],\n 'expression': 'd_volume.i2.%s(svar)',\n 'class': cb.VolumeFractions,\n },\n 'eps0': {\n 'requires': [],\n 'expression': '%e' % eps0,\n 'class': cb.CoefEval,\n },\n 'filenames': {},\n }\n\n requirements = {\n 'pis_u': {\n 'variables': ['u'],\n 'class': cb.ShapeDimDim,\n },\n 'pis_r': {\n 'variables': ['r'],\n 'class': cb.ShapeDim,\n },\n # local subproblem defined by eq. (41)\n 'corrs_rs': {\n 'requires': ['pis_u'],\n 'ebcs': ['fixed_u', 'fixed_r'],\n 'epbcs': periodic['per_u'] + periodic['per_r'],\n 'is_linear': True,\n 'equations': {\n 'eq1':\n \"\"\"dw_lin_elastic.i2.Yms(matrix.D, v, u)\n - dw_piezo_coupling.i2.Ym(piezo.g, v, r)\n = - dw_lin_elastic.i2.Yms(matrix.D, v, Pi_u)\"\"\",\n 'eq2':\n \"\"\"\n - dw_piezo_coupling.i2.Ym(piezo.g, u, s)\n - dw_diffusion.i2.Ym(piezo.d, s, r)\n = dw_piezo_coupling.i2.Ym(piezo.g, Pi_u, s)\"\"\",\n },\n 'set_variables': [('Pi_u', 'pis_u', 'u')],\n 'class': cb.CorrDimDim,\n 'save_name': 'corrs_rs_%d' % grid0,\n 'dump_variables': ['u', 'r'],\n 'solvers': {'ls': 'ls', 'nls': 'ns_em3'},\n },\n # local subproblem defined by eq. (42)\n 'corrs_p': {\n 'requires': [],\n 'ebcs': ['fixed_u', 'fixed_r'],\n 'epbcs': periodic['per_u'] + periodic['per_r'],\n 'is_linear': True,\n 'equations': {\n 'eq1':\n \"\"\"dw_lin_elastic.i2.Yms(matrix.D, v, u)\n - dw_piezo_coupling.i2.Ym(piezo.g, v, r)\n = dw_surface_ltr.i2.Gamma_msc(v)\"\"\",\n 'eq2':\n \"\"\"\n - dw_piezo_coupling.i2.Ym(piezo.g, u, s)\n - dw_diffusion.i2.Ym(piezo.d, s, r)\n = 0\"\"\"\n },\n 'class': cb.CorrOne,\n 'save_name': 'corrs_p_%d' % grid0,\n 'dump_variables': ['u', 'r'],\n 'solvers': {'ls': 'ls', 'nls': 'ns_em6'},\n },\n # local subproblem defined by eq. (43)\n 'corrs_rho': {\n 'requires': [],\n 'ebcs': ['fixed_u', 'fixed_r'],\n 'epbcs': periodic['per_u'] + periodic['per_r'],\n 'is_linear': True,\n 'equations': {\n 'eq1':\n \"\"\"dw_lin_elastic.i2.Yms(matrix.D, v, u)\n - dw_piezo_coupling.i2.Ym(piezo.g, v, r)\n = 0\"\"\",\n 'eq2':\n \"\"\"\n - dw_piezo_coupling.i2.Ym(piezo.g, u, s)\n - dw_diffusion.i2.Ym(piezo.d, s, r)\n =\n - dw_surface_integrate.i2.Gamma_mc(s)\"\"\"\n },\n 'class': cb.CorrOne,\n 'save_name': 'corrs_p_%d' % grid0,\n 'dump_variables': ['u', 'r'],\n 'solvers': {'ls': 'ls', 'nls': 'ns_em6'},\n },\n }\n\n for k in range(n_conduct):\n sk = '%d' % k\n regions.update({\n 'Ys' + sk: 'cells of group %d' % (3 + k),\n 'Gamma_s' + sk: ('r.Ym *v r.Ys' + sk, 'facet', 'Ym'),\n })\n\n materials['matrix'][0]['D'].update({\n 'Ys' + sk: stiffness_from_youngpoisson(3, 200e9, 0.25),\n })\n\n ebcs.update({\n 'fixed_r1_k_' + sk: ('Gamma_s' + sk, {'r.0': 1.0}),\n 'fixed_r0_k_' + sk: ('Gamma_s' + sk, {'r.0': 0.0}),\n })\n\n fixed_r0_k = ['fixed_r0_k_%d' % ii for ii in range(n_conduct)\n if not ii == k]\n # local subproblems defined for conductors, see eq. (44)\n requirements.update({\n 'corrs_k' + sk: {\n 'requires': ['pis_r'],\n 'ebcs': ['fixed_u', 'fixed_r1_k_' + sk] + fixed_r0_k,\n 'epbcs': periodic['per_u'] + periodic['per_r'],\n 'is_linear': True,\n 'equations': {\n 'eq1':\n \"\"\"dw_lin_elastic.i2.Yms(matrix.D, v, u)\n - dw_piezo_coupling.i2.Ym(piezo.g, v, r)\n = 0\"\"\",\n 'eq2':\n \"\"\"\n - dw_piezo_coupling.i2.Ym(piezo.g, u, s)\n - dw_diffusion.i2.Ym(piezo.d, s, r)\n = 0\"\"\"\n },\n 'class': cb.CorrOne,\n 'save_name': 'corrs_k' + sk + '_%d' % grid0,\n 'dump_variables': ['u', 'r'],\n 'solvers': {'ls': 'ls', 'nls': 'ns_em6'},\n },\n })\n\n coefs.update({\n # homogenized coefficient (46)_3\n 'H' + sk: {\n 'requires': ['c.H1_' + sk, 'c.H2_' + sk],\n 'expression': 'c.H1_%s - c.H2_%s' % (sk, sk),\n 'class': cb.CoefEval,\n },\n 'H1_' + sk: {\n 'status': 'auxiliary',\n 'requires': ['pis_u', 'corrs_k' + sk],\n 'expression': 'dw_lin_elastic.i2.Yms(matrix.D, U1, U2)',\n 'set_variables': [('U1', 'corrs_k' + sk, 'u'),\n ('U2', 'pis_u', 'u')],\n 'class': cb.CoefSym,\n },\n 'H2_' + sk: {\n 'status': 'auxiliary',\n 'requires': ['pis_u', 'corrs_k' + sk],\n 'expression': 'dw_piezo_coupling.i2.Ym(piezo.g, U1, R1)',\n 'set_variables': [('R1', 'corrs_k' + sk, 'r'),\n ('U1', 'pis_u', 'u')],\n 'class': cb.CoefSym,\n },\n # homogenized coefficient (46)_7\n 'Z' + sk: {\n 'requires': ['corrs_k' + sk],\n 'expression': 'dw_surface_ltr.i2.Gamma_msc(U1)',\n 'set_variables': [('U1', 'corrs_k' + sk, 'u')],\n 'class': cb.CoefOne,\n },\n })\n\n return locals()\n" ]
[ [ "numpy.array", "numpy.expand_dims", "numpy.vstack", "numpy.unique" ] ]
karavik18/Orthogonal-Transforms
[ "7210dbd33cd8dddb5baaa6b018f0ecf5cab30e07" ]
[ "src/utils.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Python version: 3.6\n\nimport numpy as np\nimport math\n\ndef getSequence():\n inSequence = np.array([])\n inSeqString = input(\"\\nPlease enter the array (elements separated by space): \")\n seqSplited = inSeqString.split()\n for i in range(len(seqSplited)):\n inSequence = np.append(inSequence, float(seqSplited[i]))\n return inSequence\n\ndef dft(seq):\n l = len(seq)\n out = np.zeros(l, dtype=complex)\n for i in range(l):\n for j in range(l):\n out[i] = out[i] + seq[j]*np.exp((-2j*math.pi*j*i)/l)\n return out\n \ndef ditfft(seq):\n l = len(seq)\n half_l = l // 2\n out = np.zeros(l, dtype=complex)\n \n if l == 1:\n out[0] = seq[0]\n else:\n seq_even = seq[0::2] #get all even indexed elements\n seq_odd = seq[1::2] #get all odd indexed elements\n out_even = ditfft(seq_even)\n out_odd = ditfft(seq_odd)\n for m in range(l):\n out[m] = out_even[m % half_l] + out_odd[m % half_l] * np.exp(-1j*2*math.pi*m/l)\n return out\n \ndef diffft(seq):\n l = len(seq)\n half_l = l // 2\n out = np.zeros(l, dtype=complex)\n out1 = np.zeros(half_l, dtype=complex)\n out2 = np.zeros(half_l, dtype=complex)\n seq1 = np.zeros(half_l, dtype=complex)\n seq2 = np.zeros(half_l, dtype=complex)\n if l == 1:\n out[0] = seq[0]\n else:\n seq_hi = seq[0:half_l:1] #get 1st half of input array\n seq_lo = seq[half_l:l:1] #get 2nd half of input array\n for m in range(half_l):\n seq1[m] = seq_hi[m] + seq_lo[m]\n seq2[m] = (seq_hi[m] - seq_lo[m]) * np.exp(-1j*2*math.pi*m/l)\n \n out1 = diffft(seq1)\n out2 = diffft(seq2)\n out = np.concatenate([out1,out2])\n return out\n\ndef reArrange(seq):\n l = len(seq)\n n = np.int(np.log2(l))\n out = np.zeros(l, dtype=complex)\n for i in range(l):\n bin_i = bin(i)\n b_i = str(bin_i)[2::]\n b = ''.join(['0'*(n-len(b_i)),b_i])\n k = 0\n for element in range(len(b)): \n if b[element] == '1':\n k = k + (2**element)\n out[k] = seq[i]\n return out\n\ndef viewable(seq):\n for i in range(len(seq)):\n if abs(seq[i].real) < 10**(-10):\n seq[i] = 0 + 1j*seq[i].imag\n if abs(seq[i].imag) < 10**(-10):\n seq[i] = seq[i].real\n return seq\n \ndef dct(seq):\n l = len(seq)\n out = np.zeros(l)\n for i in range(l):\n for j in range(l):\n out[i] = out[i] + 2*seq[j]*np.cos((math.pi*i*((2*j)+1))/(2*l))\n return out\n \ndef viewable_r(seq):\n for i in range(len(seq)):\n if abs(seq[i]) < 10**(-10):\n seq[i] = 0\n return seq\n\ndef dst(seq):\n l = len(seq)\n out = np.zeros(l)\n for i in range(l):\n for j in range(l):\n out[i] = out[i] + seq[j]*np.sin((math.pi*(i+1)*((2*j)+1))/(2*l))\n return out\n \ndef wht(seq):\n raise NotImplementedError()\n \ndef slant(seq):\n raise NotImplementedError()\n \ndef haar(seq):\n raise NotImplementedError()\n \ndef klt(seq):\n raise NotImplementedError()\n" ]
[ [ "numpy.log2", "numpy.cos", "numpy.sin", "numpy.concatenate", "numpy.exp", "numpy.array", "numpy.zeros" ] ]
vazgenk/catboost
[ "dc7a55065ce2ef0dc140a35b06e6cf4dcd851ac5" ]
[ "catboost/pytest/test.py" ]
[ "from itertools import permutations\nimport yatest.common\nfrom yatest.common import ExecutionTimeoutError, ExecutionError\nimport pytest\nimport os\nimport filecmp\nimport numpy as np\nimport timeit\nimport json\n\nimport catboost\n\nfrom catboost_pytest_lib import (\n apply_catboost,\n compare_evals_with_precision,\n compare_fit_evals_with_precision,\n compare_evals,\n data_file,\n execute_catboost_fit,\n execute_dist_train,\n format_crossvalidation,\n generate_concatenated_random_labeled_dataset,\n get_limited_precision_dsv_diff_tool,\n local_canonical_file,\n permute_dataset_columns,\n remove_time_from_json,\n)\n\nCATBOOST_PATH = yatest.common.binary_path(\"catboost/app/catboost\")\n\nBOOSTING_TYPE = ['Ordered', 'Plain']\nGROW_POLICIES = ['SymmetricTree', 'Lossguide', 'Depthwise']\nBOOSTING_TYPE_WITH_GROW_POLICIES = [('Ordered', 'SymmetricTree'), ('Plain', 'SymmetricTree'),\n ('Plain', 'Lossguide'), ('Plain', 'Depthwise')]\n\nPREDICTION_TYPES = ['Probability', 'RawFormulaVal', 'Class']\n\nBINCLASS_LOSSES = ['Logloss', 'CrossEntropy']\nMULTICLASS_LOSSES = ['MultiClass', 'MultiClassOneVsAll']\nCLASSIFICATION_LOSSES = BINCLASS_LOSSES + MULTICLASS_LOSSES\nREGRESSION_LOSSES = ['MAE', 'MAPE', 'Poisson', 'Quantile', 'RMSE', 'LogLinQuantile', 'Lq']\nPAIRWISE_LOSSES = ['PairLogit', 'PairLogitPairwise']\nGROUPWISE_LOSSES = ['YetiRank', 'YetiRankPairwise', 'QueryRMSE', 'QuerySoftMax']\nRANKING_LOSSES = PAIRWISE_LOSSES + GROUPWISE_LOSSES\nALL_LOSSES = CLASSIFICATION_LOSSES + REGRESSION_LOSSES + RANKING_LOSSES\n\nSAMPLING_UNIT_TYPES = ['Object', 'Group']\n\nOVERFITTING_DETECTOR_TYPE = ['IncToDec', 'Iter']\n\nLOSS_FUNCTIONS = ['RMSE', 'Logloss', 'MAE', 'CrossEntropy', 'Quantile', 'LogLinQuantile',\n 'Poisson', 'MAPE', 'MultiClass', 'MultiClassOneVsAll']\n\nLEAF_ESTIMATION_METHOD = ['Gradient', 'Newton']\n\n# test both parallel in and non-parallel modes\n# default block size (5000000) is too big to run in parallel on these tests\nSCORE_CALC_OBJ_BLOCK_SIZES = ['60', '5000000']\nSCORE_CALC_OBJ_BLOCK_SIZES_IDS = ['calc_block=60', 'calc_block=5000000']\n\nSEPARATOR_TYPES = [\n 'ByDelimiter',\n 'BySense',\n]\n\nTEXT_FEATURE_ESTIMATORS = [\n 'BoW',\n 'NaiveBayes',\n 'BM25',\n 'BoW,NaiveBayes',\n 'BoW,NaiveBayes,BM25'\n]\n\n\ndef diff_tool(threshold=None):\n return get_limited_precision_dsv_diff_tool(threshold, True)\n\n\[email protected]('is_inverted', [False, True], ids=['', 'inverted'])\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_cv_multiregression(is_inverted, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'MultiRMSE',\n '-f', data_file('multiregression', 'train'),\n '--column-description', data_file('multiregression', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--cv', format_crossvalidation(is_inverted, 2, 10),\n '--cv-rand', '42',\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_multiregression(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='MultiRMSE',\n pool='multiregression',\n train='train',\n test='test',\n cd='train.cd',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_multiregression_single(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='MultiRMSE',\n pool='multiregression',\n train='train',\n test='test',\n cd='train_single.cd',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected]('n_trees', [100, 500])\ndef test_multiregression(boosting_type, grow_policy, n_trees):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n output_calc_path = yatest.common.test_output_path('test.calc')\n output_metric_path = yatest.common.test_output_path('test.metric')\n\n cmd_fit = (\n '--loss-function', 'MultiRMSE',\n '--boosting-type', boosting_type,\n '-f', data_file('multiregression', 'train'),\n '-t', data_file('multiregression', 'test'),\n '--column-description', data_file('multiregression', 'train.cd'),\n '-i', '{}'.format(n_trees),\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n '--grow-policy', grow_policy\n )\n execute_catboost_fit('CPU', cmd_fit)\n\n cmd_calc = (\n CATBOOST_PATH,\n 'calc',\n '--column-description', data_file('multiregression', 'train.cd'),\n '-T', '4',\n '-m', output_model_path,\n '--input-path', data_file('multiregression', 'test'),\n '-o', output_calc_path\n )\n yatest.common.execute(cmd_calc)\n\n cmd_metric = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--column-description', data_file('multiregression', 'train.cd'),\n '-T', '4',\n '-m', output_model_path,\n '--input-path', data_file('multiregression', 'test'),\n '-o', output_metric_path,\n '--metrics', 'MultiRMSE'\n )\n yatest.common.execute(cmd_metric)\n return [\n local_canonical_file(output_eval_path),\n local_canonical_file(output_calc_path),\n local_canonical_file(output_metric_path)\n ]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('n_trees', [100, 500])\[email protected]('target_count', [1, 2, 3])\ndef test_multiregression_target_permutation_invariance(boosting_type, n_trees, target_count):\n np.random.seed(42)\n\n X_COUNT = 200\n X_DIM = 5\n\n x = np.random.randn(X_COUNT, X_DIM)\n y = np.stack([\n np.sin(np.sum([np.pi * x[:, j] * (1 if np.random.randn() > 0 else -1) for j in range(X_DIM)], axis=0))\n for i in range(target_count)\n ], axis=1)\n\n test_size = X_COUNT // 2\n x_test, y_test = x[:test_size], y[:test_size]\n x_train, y_train = x[test_size:], y[test_size:]\n\n train_file = yatest.common.test_output_path('train')\n test_file = yatest.common.test_output_path('test')\n\n get_eval_path = lambda i: yatest.common.test_output_path('test_{}.eval'.format(i))\n get_model_path = lambda i: yatest.common.test_output_path('model_{}.bin'.format(i))\n get_cd_path = lambda i: yatest.common.test_output_path('cd_{}'.format(i))\n\n with open(get_cd_path(target_count), 'w') as cd:\n cd.write(''.join(('{}\\tTarget\\tm\\n'.format(i) for i in range(target_count))))\n\n evals = []\n for perm in permutations(range(target_count)):\n inv_perm = range(target_count)\n for i, j in enumerate(perm):\n inv_perm[j] = i\n\n np.savetxt(train_file, np.hstack([y_train[:, perm], x_train]), delimiter='\\t')\n np.savetxt(test_file, np.hstack([y_test[:, perm], x_test]), delimiter='\\t')\n\n fit_cmd = (\n '--loss-function', 'MultiRMSE',\n '--boosting-type', boosting_type,\n '-f', train_file,\n '-t', test_file,\n '--column-description', get_cd_path(target_count),\n '-i', '{}'.format(n_trees),\n '-T', '4',\n '-m', get_model_path(target_count),\n '--eval-file', get_eval_path(target_count),\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', fit_cmd)\n eval = np.loadtxt(get_eval_path(target_count), delimiter='\\t', skiprows=1, usecols=range(1, target_count + 1)).reshape((-1, target_count))\n evals.append(eval[:, inv_perm])\n\n for eva in evals:\n assert np.allclose(eva, evals[0])\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('n_trees', [10, 100, 1000])\[email protected]('target_count', [1, 2, 3])\ndef test_compare_multiregression_with_regression(boosting_type, n_trees, target_count):\n np.random.seed(42)\n ERR_PERC = 0.1\n\n X_COUNT = 200\n X_DIM = 5\n\n x = np.random.randn(X_COUNT, X_DIM)\n y = np.stack([\n np.sin(np.sum([np.pi * x[:, j] * (1 if np.random.randn() > 0 else -1) for j in range(X_DIM)], axis=0))\n for i in range(target_count)\n ], axis=1)\n\n test_size = X_COUNT // 2\n x_test, y_test = x[:test_size], y[:test_size]\n x_train, y_train = x[test_size:], y[test_size:]\n\n train_file = yatest.common.test_output_path('train')\n test_file = yatest.common.test_output_path('test')\n np.savetxt(train_file, np.hstack([y_train, x_train]), delimiter='\\t')\n np.savetxt(test_file, np.hstack([y_test, x_test]), delimiter='\\t')\n\n get_eval_path = lambda i: yatest.common.test_output_path('test_{}.eval'.format(i))\n get_model_path = lambda i: yatest.common.test_output_path('model_{}.bin'.format(i))\n get_cd_path = lambda i: yatest.common.test_output_path('cd_{}'.format(i))\n\n with open(get_cd_path(target_count), 'w') as cd:\n cd.write(''.join(('{}\\tTarget\\tm\\n'.format(i) for i in range(target_count))))\n\n fit_cmd = (\n '--loss-function', 'MultiRMSE',\n '--boosting-type', boosting_type,\n '-f', train_file,\n '-t', test_file,\n '--column-description', get_cd_path(target_count),\n '-i', '{}'.format(n_trees),\n '-T', '4',\n '-m', get_model_path(target_count),\n '--eval-file', get_eval_path(target_count),\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', fit_cmd)\n\n for i in range(target_count):\n with open(get_cd_path(i), 'w') as cd:\n cd.write(''.join((('{}\\tTarget\\n'.format(j) if j == i else '{}\\tAuxiliary\\n'.format(j)) for j in range(target_count))))\n\n rmse_fit_cmd = (\n '--loss-function', 'RMSE',\n '--boosting-type', boosting_type,\n '-f', train_file,\n '-t', test_file,\n '--column-description', get_cd_path(i),\n '-i', '{}'.format(n_trees),\n '-T', '4',\n '-m', get_model_path(i),\n '--eval-file', get_eval_path(i),\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', rmse_fit_cmd)\n\n multirmse_eval = np.loadtxt(get_eval_path(target_count), delimiter='\\t', skiprows=1, usecols=range(1, target_count + 1))\n rmse_eval = np.stack([\n np.loadtxt(get_eval_path(i), delimiter='\\t', skiprows=1, usecols=1)\n for i in range(target_count)\n ], axis=1)\n\n # cannot compare approxes because they are very different due to different boosting algorithms\n multi_rmse_loss = np.mean((multirmse_eval - y_test)**2)\n rmse_loss = np.mean((rmse_eval - y_test)**2)\n\n assert rmse_loss.shape == multi_rmse_loss.shape\n assert multi_rmse_loss < rmse_loss * (1 + ERR_PERC)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('n_trees', [100, 500])\ndef test_multiregression_single(boosting_type, n_trees):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n output_calc_path = yatest.common.test_output_path('test.calc')\n output_metric_path = yatest.common.test_output_path('test.metric')\n\n cmd_fit = (\n '--loss-function', 'MultiRMSE',\n '--boosting-type', boosting_type,\n '-f', data_file('multiregression', 'train'),\n '-t', data_file('multiregression', 'test'),\n '--column-description', data_file('multiregression', 'train_single.cd'),\n '-i', '{}'.format(n_trees),\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd_fit)\n\n cmd_calc = (\n CATBOOST_PATH,\n 'calc',\n '--column-description', data_file('multiregression', 'train_single.cd'),\n '-T', '4',\n '-m', output_model_path,\n '--input-path', data_file('multiregression', 'test'),\n '-o', output_calc_path\n )\n yatest.common.execute(cmd_calc)\n\n cmd_metric = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--column-description', data_file('multiregression', 'train_single.cd'),\n '-T', '4',\n '-m', output_model_path,\n '--input-path', data_file('multiregression', 'test'),\n '-o', output_metric_path,\n '--metrics', 'MultiRMSE'\n )\n yatest.common.execute(cmd_metric)\n return [\n local_canonical_file(output_eval_path),\n local_canonical_file(output_calc_path),\n local_canonical_file(output_metric_path)\n ]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_queryrmse(boosting_type, grow_policy, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n '--grow-policy', grow_policy\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_queryrmse_newton_gradient(boosting_type, dev_score_calc_obj_block_size):\n newton_eval_path = yatest.common.test_output_path('newton.eval')\n gradient_eval_path = yatest.common.test_output_path('gradient.eval')\n\n def run_catboost(eval_path, leaf_estimation_method):\n cmd = [\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '--leaf-estimation-method', leaf_estimation_method,\n '-i', '20',\n '-T', '4',\n '--eval-file', eval_path,\n '--use-best-model', 'false',\n ]\n execute_catboost_fit('CPU', cmd)\n\n run_catboost(newton_eval_path, 'Newton')\n run_catboost(gradient_eval_path, 'Gradient')\n assert filecmp.cmp(newton_eval_path, gradient_eval_path)\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_pool_with_QueryId(boosting_type, grow_policy):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd.query_id'),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n '--grow-policy', grow_policy\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_rmse_on_qwise_pool(boosting_type, grow_policy, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'RMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n '--grow-policy', grow_policy\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_averagegain(boosting_type):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '--custom-metric', 'AverageGain:top=2;hints=skip_train~false',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_queryaverage(boosting_type):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '--custom-metric', 'QueryAverage:top=2;hints=skip_train~false',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('sigma', ['sigma=' + str(sigma) for sigma in [0.01, 1, 10]])\[email protected]('num_estimations', ['num_estimations=' + str(n_estim) for n_estim in [1, 100]])\ndef test_stochastic_filter(sigma, num_estimations):\n model_path = yatest.common.test_output_path('model.bin')\n cd_path = yatest.common.test_output_path('pool.cd')\n train_path = yatest.common.test_output_path('train.txt')\n test_path = yatest.common.test_output_path('test.txt')\n\n prng = np.random.RandomState(seed=0)\n\n n_samples_by_query = 20\n n_features = 10\n n_queries = 50\n\n n_samples = n_samples_by_query * n_queries\n\n features = prng.uniform(0, 1, size=(n_samples, n_features))\n weights = prng.uniform(0, 1, size=n_features)\n\n labels = np.dot(features, weights)\n query_ids = np.arange(0, n_samples) // n_queries\n money = (n_queries - np.arange(0, n_samples) % n_queries) * 10\n\n labels = labels.reshape((n_samples, 1))\n query_ids = query_ids.reshape((n_samples, 1))\n money = money.reshape((n_samples, 1))\n\n features = np.hstack((labels, query_ids, money, features))\n\n n_learn = int(0.7 * n_samples)\n learn = features[:n_learn, :]\n test = features[n_learn:, :]\n np.savetxt(train_path, learn, fmt='%.5f', delimiter='\\t')\n np.savetxt(test_path, test, fmt='%.5f', delimiter='\\t')\n np.savetxt(cd_path, [[0, 'Target'], [1, 'GroupId']], fmt='%s', delimiter='\\t')\n\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n learn_error_one_thread_path = yatest.common.test_output_path('learn_error_one_thread.tsv')\n test_error_one_thread_path = yatest.common.test_output_path('test_error_one_thread.tsv')\n loss_description = 'StochasticFilter:' + sigma + ';' + num_estimations\n\n cmd = [\n '--loss-function', loss_description,\n '--leaf-estimation-backtracking', 'No',\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '--boosting-type', 'Plain',\n '-i', '20',\n '-m', model_path,\n '--use-best-model', 'false',\n ]\n\n cmd_one_thread = cmd + [\n '--learn-err-log', learn_error_one_thread_path,\n '--test-err-log', test_error_one_thread_path,\n '-T', '1'\n ]\n\n cmd_four_thread = cmd + [\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '-T', '4'\n ]\n execute_catboost_fit('CPU', cmd_one_thread)\n execute_catboost_fit('CPU', cmd_four_thread)\n\n compare_evals(learn_error_one_thread_path, learn_error_path)\n compare_evals(test_error_one_thread_path, test_error_path)\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path)]\n\n\[email protected]('metric', ['DCG', 'NDCG'])\[email protected]('top', [-1, 1, 10])\[email protected]('dcg_type', ['Base', 'Exp'])\[email protected]('denominator', ['Position', 'LogPosition'])\ndef test_stochastic_rank(metric, top, dcg_type, denominator):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n loss = 'StochasticRank:metric={};top={};type={};denominator={};hints=skip_train~false'.format(\n metric, top, dcg_type, denominator)\n\n cmd = (\n '--loss-function', loss,\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--cd', data_file('querywise', 'train.cd.query_id'),\n '-i', '10',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path)]\n\n\[email protected]('top', [-1, 1, 10])\[email protected]('decay', [1.0, 0.6, 0.0])\ndef test_stochastic_rank_pfound(top, decay):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n loss = 'StochasticRank:metric=PFound;top={};decay={};hints=skip_train~false'.format(top, decay)\n\n cmd = (\n CATBOOST_PATH,\n 'fit',\n '--loss-function', loss,\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--cd', data_file('querywise', 'train.cd.query_id'),\n '-i', '10',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path\n )\n yatest.common.execute(cmd)\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path)]\n\n\[email protected]('top', [-1, 1, 10])\[email protected]('decay', [1.0, 0.6, 0.0])\ndef test_stochastic_rank_pfound_with_many_ones(top, decay):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n\n loss = 'StochasticRank:metric=PFound;top={};decay={};hints=skip_train~false'.format(top, decay)\n\n np.random.seed(0)\n train_with_ones = yatest.common.test_output_path('train_with_ones')\n TARGET_COLUMN = 2\n with open(data_file('querywise', 'train')) as fin:\n with open(train_with_ones, 'w') as fout:\n for line in fin.readlines():\n if np.random.random() < 0.25:\n parts = line.split('\\t')\n parts[TARGET_COLUMN] = '1.0'\n line = '\\t'.join(parts)\n fout.write(line)\n\n cmd = (\n CATBOOST_PATH,\n 'fit',\n '--loss-function', loss,\n '-f', train_with_ones,\n '--cd', data_file('querywise', 'train.cd.query_id'),\n '-i', '10',\n '--learn-err-log', learn_error_path\n )\n yatest.common.execute(cmd)\n\n return [local_canonical_file(learn_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('top', [2, 100])\ndef test_averagegain_with_query_weights(boosting_type, top):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd.group_weight'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '--custom-metric', 'AverageGain:top={};hints=skip_train~false'.format(top),\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('top_size', [2, 5, 10, -1])\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('cd_file', ['train.cd', 'train.cd.subgroup_id'])\ndef test_pfound(top_size, boosting_type, cd_file):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', cd_file),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '--custom-metric', 'PFound:top={};hints=skip_train~false'.format(top_size),\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\ndef test_params_ordering():\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n learn_error_reversed_path = yatest.common.test_output_path('learn_error_reversed.tsv')\n test_error_path = yatest.common.test_output_path('ignored.tsv')\n\n def get_cmd(custom_metric, learn_error_path):\n return (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', 'Ordered',\n '-i', '20',\n '-T', '4',\n '--custom-metric', custom_metric,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', get_cmd(\"PFound:top=1;decay=0.6;hints=skip_train~false\", learn_error_path))\n execute_catboost_fit('CPU', get_cmd(\"PFound:decay=0.6;top=1;hints=skip_train~false\", learn_error_reversed_path))\n\n with open(learn_error_path) as f:\n assert 'PFound:top=1;decay=0.6' in f.read()\n with open(learn_error_reversed_path) as f:\n assert 'PFound:decay=0.6;top=1' in f.read()\n\n\ndef test_recall_at_k():\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', 'Ordered',\n '-i', '10',\n '-T', '4',\n '--custom-metric', 'RecallAt:top=3',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\ndef test_precision_at_k():\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', 'Ordered',\n '-i', '10',\n '-T', '4',\n '--custom-metric', 'PrecisionAt:top=3',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_mapk(boosting_type):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '--custom-metric', 'MAP:top={}'.format(10),\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('ndcg_power_mode', ['Base', 'Exp'])\[email protected]('metric_type', ['DCG', 'NDCG'])\[email protected]('ndcg_denominator', ['None', 'LogPosition', 'Position'])\ndef test_ndcg(boosting_type, ndcg_power_mode, metric_type, ndcg_denominator):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n denominator = '' if ndcg_denominator == 'None' else ';denominator={}'.format(ndcg_denominator)\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '--custom-metric', '{}:top={};type={};hints=skip_train~false{}'.format(metric_type, 10, ndcg_power_mode, denominator),\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\ndef test_queryrmse_approx_on_full_history():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--approx-on-full-history',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n '--boosting-type', 'Ordered',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_pairlogit(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n\n def run_catboost(eval_path, learn_pairs):\n cmd = [\n '--loss-function', 'PairLogit',\n '--eval-metric', 'PairAccuracy',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--learn-pairs', data_file('querywise', learn_pairs),\n '--test-pairs', data_file('querywise', 'test.pairs'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '--ctr', 'Borders,Counter',\n '--l2-leaf-reg', '0',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', eval_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n ]\n execute_catboost_fit('CPU', cmd)\n\n run_catboost(output_eval_path, 'train.pairs')\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path),\n local_canonical_file(output_eval_path)]\n\n\ndef test_pairs_generation():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n\n def run_catboost(eval_path):\n cmd = [\n '--loss-function', 'PairLogit',\n '--eval-metric', 'PairAccuracy',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--ctr', 'Borders,Counter',\n '--l2-leaf-reg', '0',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', eval_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n ]\n execute_catboost_fit('CPU', cmd)\n\n run_catboost(output_eval_path)\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path),\n local_canonical_file(output_eval_path)]\n\n\ndef test_pairs_generation_with_max_pairs():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n output_fstr_path = yatest.common.test_output_path('fstr.tsv')\n\n def run_catboost(eval_path):\n cmd = [\n '--loss-function', 'PairLogit:max_pairs=30',\n '--eval-metric', 'PairLogit:max_pairs=30',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--ctr', 'Borders,Counter',\n '--l2-leaf-reg', '0',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', eval_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n '--fstr-file', output_fstr_path,\n ]\n execute_catboost_fit('CPU', cmd)\n\n run_catboost(output_eval_path)\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path),\n local_canonical_file(output_eval_path),\n local_canonical_file(output_fstr_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_pairlogit_no_target(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'PairLogit',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd.no_target'),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--test-pairs', data_file('querywise', 'test.pairs'),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_pairlogit_approx_on_full_history():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'PairLogit',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--test-pairs', data_file('querywise', 'test.pairs'),\n '--approx-on-full-history',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n '--boosting-type', 'Ordered',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\[email protected]('pairs_file', ['train.pairs', 'train.pairs.weighted'])\ndef test_pairlogit_pairwise(pairs_file, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'PairLogitPairwise',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--test-pairs', data_file('querywise', 'test.pairs'),\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_yetirank(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'YetiRank',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('loss_function', ['QueryRMSE', 'PairLogit', 'YetiRank', 'PairLogitPairwise', 'YetiRankPairwise'])\ndef test_pairwise_reproducibility(loss_function):\n\n def run_catboost(threads, model_path, eval_path):\n cmd = [\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--test-pairs', data_file('querywise', 'test.pairs'),\n '--cd', data_file('querywise', 'train.cd'),\n '-i', '5',\n '-T', str(threads),\n '-m', model_path,\n '--eval-file', eval_path,\n ]\n execute_catboost_fit('CPU', cmd)\n\n model_1 = yatest.common.test_output_path('model_1.bin')\n eval_1 = yatest.common.test_output_path('test_1.eval')\n run_catboost(1, model_1, eval_1)\n model_4 = yatest.common.test_output_path('model_4.bin')\n eval_4 = yatest.common.test_output_path('test_4.eval')\n run_catboost(4, model_4, eval_4)\n assert filecmp.cmp(eval_1, eval_4)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_yetirank_with_params(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'YetiRank:permutations=5;decay=0.9',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_yetirank_pairwise(dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'YetiRankPairwise',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('loss_function', ('YetiRank', 'YetiRankPairwise'))\ndef test_yetirank_default_metric(loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--loss-function', loss_function,\n '--has-header',\n '-f', data_file('black_friday', 'train'),\n '-t', data_file('black_friday', 'test'),\n '--column-description', data_file('black_friday', 'cd'),\n '--model-file', output_model_path,\n '--boosting-type', 'Plain',\n '-i', '5',\n '-T', '4',\n '--test-err-log', test_error_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(test_error_path)]\n\n\nNAN_MODE = ['Min', 'Max']\n\n\[email protected]('nan_mode', NAN_MODE)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_nan_mode(nan_mode, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '-f', data_file('adult_nan', 'train_small'),\n '-t', data_file('adult_nan', 'test_small'),\n '--column-description', data_file('adult_nan', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--nan-mode', nan_mode,\n )\n execute_catboost_fit('CPU', cmd)\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult_nan', 'test_small'),\n '--column-description', data_file('adult_nan', 'train.cd'),\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(calc_cmd)\n assert (compare_evals(output_eval_path, formula_predict_path))\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('border_count', [64, 255, 350, 1000, 2500])\ndef test_different_border_count(border_count):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n train_path = data_file('querywise', 'train')\n test_path = data_file('querywise', 'test')\n cd_path = data_file('querywise', 'train.cd')\n cmd = (\n '--use-best-model', 'false',\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '-i', '20',\n '-T', '4',\n '-x', str(border_count),\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(calc_cmd)\n assert (compare_evals(output_eval_path, formula_predict_path))\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_nan_mode_forbidden(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--nan-mode', 'Forbidden',\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_overfit_detector_iter(boosting_type, grow_policy):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '2000',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-x', '1',\n '-n', '8',\n '-w', '0.5',\n '--rsm', '1',\n '--od-type', 'Iter',\n '--od-wait', '2',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_overfit_detector_inc_to_dec(boosting_type, grow_policy):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '2000',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-x', '1',\n '-n', '8',\n '-w', '0.5',\n '--rsm', '1',\n '--od-pval', '0.5',\n '--od-type', 'IncToDec',\n '--od-wait', '2',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected]('overfitting_detector_type', OVERFITTING_DETECTOR_TYPE)\ndef test_overfit_detector_with_resume_from_snapshot(boosting_type, grow_policy, overfitting_detector_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n snapshot_path = yatest.common.test_output_path('snapshot')\n\n cmd_prefix = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-x', '1',\n '-n', '8',\n '-w', '0.5',\n '--rsm', '1',\n '--leaf-estimation-iterations', '10',\n '--max-ctr-complexity', '4',\n '--snapshot-file', snapshot_path,\n '--od-type', overfitting_detector_type\n )\n if overfitting_detector_type == 'IncToDec':\n cmd_prefix += (\n '--od-wait', '2',\n '--od-pval', '0.5'\n )\n elif overfitting_detector_type == 'Iter':\n cmd_prefix += ('--od-wait', '2')\n\n cmd_first = cmd_prefix + ('-i', '10')\n execute_catboost_fit('CPU', cmd_first)\n\n cmd_second = cmd_prefix + ('-i', '2000')\n execute_catboost_fit('CPU', cmd_second)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('leaf_estimation_method', LEAF_ESTIMATION_METHOD)\ndef test_per_object_approx_on_full_history(leaf_estimation_method):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', 'Ordered',\n '--approx-on-full-history',\n '-i', '100',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-x', '1',\n '-w', '0.5',\n '--od-pval', '0.99',\n '--rsm', '1',\n '--leaf-estimation-method', leaf_estimation_method,\n '--leaf-estimation-iterations', '20',\n '--use-best-model', 'false')\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_shrink_model(boosting_type, grow_policy):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '100',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-x', '1',\n '-n', '8',\n '-w', '1',\n '--od-pval', '0.99',\n '--rsm', '1',\n '--use-best-model', 'true'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('leaf_estimation_method', LEAF_ESTIMATION_METHOD)\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_multi_leaf_estimation_method(leaf_estimation_method, boosting_type, grow_policy, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'MultiClass',\n '-f', data_file('cloudness_small', 'train_small'),\n '-t', data_file('cloudness_small', 'test_small'),\n '--column-description', data_file('cloudness_small', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--leaf-estimation-method', leaf_estimation_method,\n '--leaf-estimation-iterations', '2',\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('cloudness_small', 'test_small'),\n '--column-description', data_file('cloudness_small', 'train.cd'),\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(calc_cmd)\n assert(compare_evals(output_eval_path, formula_predict_path))\n return [local_canonical_file(output_eval_path)]\n\n\nLOSS_FUNCTIONS_SHORT = ['Logloss', 'MultiClass']\n\n\[email protected](\n 'loss_function',\n LOSS_FUNCTIONS_SHORT,\n ids=['loss_function=%s' % loss_function for loss_function in LOSS_FUNCTIONS_SHORT]\n)\[email protected](\n 'column_name',\n ['doc_id', 'sample_id'],\n ids=['column_name=doc_id', 'column_name=sample_id']\n)\ndef test_sample_id(loss_function, column_name):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n column_description = data_file('adult_' + column_name, 'train.cd')\n cmd = (\n '--loss-function', loss_function,\n '-f', data_file('adult_doc_id', 'train'),\n '-t', data_file('adult_doc_id', 'test'),\n '--column-description', column_description,\n '--boosting-type', 'Plain',\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult_doc_id', 'test'),\n '--column-description', column_description,\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(cmd)\n\n assert(compare_evals(output_eval_path, formula_predict_path))\n return [local_canonical_file(output_eval_path)]\n\n\nPOOLS = ['amazon', 'adult']\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_apply_missing_vals(boosting_type, grow_policy):\n model_path = yatest.common.test_output_path('adult_model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', model_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('test_adult_missing_val.tsv'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', model_path,\n '--output-path', output_eval_path\n )\n yatest.common.execute(calc_cmd)\n\n return local_canonical_file(output_eval_path)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_crossentropy(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'CrossEntropy',\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '-t', data_file('adult_crossentropy', 'test_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_permutation_block(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--fold-permutation-block', '239',\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_ignored_features(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '-I', '0:1:3:5-7:10000',\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_ignored_features_names():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'RMSE',\n '--has-header',\n '--learn-set', data_file('black_friday', 'train'),\n '--test-set', data_file('black_friday', 'test'),\n '--column-description', data_file('black_friday', 'cd'),\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-I', 'Stay_In_Current_City_Years:Product_Category_2:Gender',\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_ignored_features_not_read():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n input_cd_path = data_file('adult', 'train.cd')\n cd_path = yatest.common.test_output_path('train.cd')\n\n with open(input_cd_path, \"rt\") as f:\n cd_lines = f.readlines()\n with open(cd_path, \"wt\") as f:\n for cd_line in cd_lines:\n # Corrupt some features by making them 'Num'\n if cd_line.split() == ('5', 'Categ'): # column 5 --> feature 4\n cd_line = cd_line.replace('Categ', 'Num')\n if cd_line.split() == ('7', 'Categ'): # column 7 --> feature 6\n cd_line = cd_line.replace('Categ', 'Num')\n f.write(cd_line)\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', cd_path,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '-I', '4:6', # Ignore the corrupted features\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n # Not needed: return [local_canonical_file(output_eval_path)]\n\n\ndef test_ignored_features_not_read_names():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n input_cd_path = data_file('black_friday', 'cd')\n cd_path = yatest.common.test_output_path('cd')\n\n with open(input_cd_path, \"rt\") as f:\n cd_lines = f.readlines()\n with open(cd_path, \"wt\") as f:\n for cd_line in cd_lines:\n if cd_line.split() == ('2', 'Categ', 'Gender'):\n cd_line = cd_line.replace('2', 'Num', 'Gender')\n if cd_line.split() == ('10', 'Categ', 'Product_Category_3'):\n cd_line = cd_line.replace('10', 'Num', 'Product_Category_3')\n f.write(cd_line)\n\n cmd = (\n '--loss-function', 'RMSE',\n '--has-header',\n '--learn-set', data_file('black_friday', 'train'),\n '--test-set', data_file('black_friday', 'test'),\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-I', 'Gender:Product_Category_3',\n )\n execute_catboost_fit('CPU', cmd)\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_baseline(boosting_type, grow_policy):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult_weight', 'train_weight'),\n '-t', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('train_adult_baseline.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('train_adult_baseline.cd'),\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(calc_cmd)\n assert(compare_evals(output_eval_path, formula_predict_path))\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('loss_function', MULTICLASS_LOSSES)\ndef test_multiclass_baseline(boosting_type, loss_function):\n labels = ['0', '1', '2', '3']\n\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target'], [1, 'Baseline'], [2, 'Baseline'], [3, 'Baseline'], [4, 'Baseline']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n cmd = (\n '--loss-function', loss_function,\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--eval-file', eval_path,\n '--use-best-model', 'false',\n '--classes-count', '4'\n )\n execute_catboost_fit('CPU', cmd)\n\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', model_path,\n '--output-path', formula_predict_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(calc_cmd)\n assert(compare_evals(eval_path, formula_predict_path))\n return [local_canonical_file(eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('loss_function', MULTICLASS_LOSSES)\ndef test_multiclass_baseline_lost_class(boosting_type, loss_function):\n labels = [0, 1, 2, 3]\n\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target'], [1, 'Baseline'], [2, 'Baseline']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, [1, 2], prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n cmd = (\n '--loss-function', loss_function,\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--eval-file', eval_path,\n '--use-best-model', 'false',\n '--classes-count', '4',\n )\n\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_weights(boosting_type, grow_policy, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult_weight', 'train_weight'),\n '-t', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('adult_weight', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_weights_no_bootstrap(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult_weight', 'train_weight'),\n '-t', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('adult_weight', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '--bootstrap-type', 'No',\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_weights_gradient(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult_weight', 'train_weight'),\n '-t', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('adult_weight', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--leaf-estimation-method', 'Gradient'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_logloss_with_not_binarized_target(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult_not_binarized', 'train_small'),\n '-t', data_file('adult_not_binarized', 'test_small'),\n '--column-description', data_file('adult_not_binarized', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--target-border', '0.5',\n '--eval-file', output_eval_path\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('loss_function', LOSS_FUNCTIONS)\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_all_targets(loss_function, boosting_type, grow_policy, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_model_path_without_test = yatest.common.test_output_path('model_without_test.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n base_cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '--counter-calc-method', 'SkipTest', # TODO(kirillovs): remove after setting SkipTest as default type\n '-w', '0.03',\n '-T', '4',\n )\n\n train_with_test_cmd = base_cmd + (\n '-t', data_file('adult', 'test_small'),\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', train_with_test_cmd)\n\n train_without_test_cmd = base_cmd + (\n '-m', output_model_path_without_test,\n )\n execute_catboost_fit('CPU', train_without_test_cmd)\n\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n formula_predict_without_test_path = yatest.common.test_output_path('predict_without_test.eval')\n\n base_calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--prediction-type', 'RawFormulaVal'\n )\n calc_cmd = base_calc_cmd + (\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n )\n calc_cmd_without_test = base_calc_cmd + (\n '-m', output_model_path_without_test,\n '--output-path', formula_predict_without_test_path,\n )\n yatest.common.execute(calc_cmd)\n yatest.common.execute(calc_cmd_without_test)\n if loss_function == 'MAPE':\n # TODO(kirillovs): uncomment this after resolving MAPE problems\n # assert(compare_evals(output_eval_path, formula_predict_path))\n return [local_canonical_file(output_eval_path), local_canonical_file(formula_predict_path)]\n else:\n assert(compare_evals(output_eval_path, formula_predict_path))\n assert(filecmp.cmp(formula_predict_without_test_path, formula_predict_path))\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('is_inverted', [False, True], ids=['', 'inverted'])\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_cv(is_inverted, boosting_type, grow_policy):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--cv', format_crossvalidation(is_inverted, 2, 10),\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('is_inverted', [False, True], ids=['', 'inverted'])\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_cv_for_query(is_inverted, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--cv', format_crossvalidation(is_inverted, 2, 7),\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('is_inverted', [False, True], ids=['', 'inverted'])\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_cv_for_pairs(is_inverted, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'PairLogit',\n '-f', data_file('querywise', 'train'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--cv', format_crossvalidation(is_inverted, 2, 7),\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('bad_cv_params', ['XX', 'YY', 'XY'])\ndef test_multiple_cv_spec(bad_cv_params):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n if bad_cv_params == 'XX':\n cmd += ('--cv', format_crossvalidation(is_inverted=False, n=2, k=10),\n '--cv', format_crossvalidation(is_inverted=False, n=4, k=7))\n elif bad_cv_params == 'XY':\n cmd += ('--cv', format_crossvalidation(is_inverted=False, n=2, k=10),\n '--cv', format_crossvalidation(is_inverted=True, n=4, k=7))\n elif bad_cv_params == 'YY':\n cmd += ('--cv', format_crossvalidation(is_inverted=True, n=2, k=10),\n '--cv', format_crossvalidation(is_inverted=True, n=4, k=7))\n else:\n raise Exception('bad bad_cv_params value:' + bad_cv_params)\n\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\[email protected]('is_inverted', [False, True], ids=['', 'inverted'])\[email protected]('error_type', ['0folds', 'fold_idx_overflow'])\ndef test_bad_fold_cv_spec(is_inverted, error_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n ('--cv:Inverted' if is_inverted else '--cv:Classical'),\n {'0folds': '0/0', 'fold_idx_overflow': '3/2'}[error_type],\n '--eval-file', output_eval_path,\n )\n\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_empty_eval(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_time(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--has-time',\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_gradient(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--leaf-estimation-method', 'Gradient',\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected](\n 'loss_function',\n LOSS_FUNCTIONS_SHORT,\n ids=['loss_function=%s' % loss_function for loss_function in LOSS_FUNCTIONS_SHORT]\n)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_gradient_with_leafwise_approxes(loss_function, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n output_eval_path_dev_approxes = yatest.common.test_output_path('test_dev_approxes.eval')\n\n cmd = [\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', 'Plain',\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--leaf-estimation-method', 'Gradient',\n '--eval-file', output_eval_path,\n ]\n execute_catboost_fit('CPU', cmd)\n\n cmd = cmd[:-1] + [output_eval_path_dev_approxes, '--dev-leafwise-approxes']\n execute_catboost_fit('CPU', cmd)\n assert filecmp.cmp(output_eval_path, output_eval_path_dev_approxes)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_newton(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--leaf-estimation-iterations', '1',\n '--leaf-estimation-method', 'Newton',\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_newton_with_leafwise_approxes(dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n output_eval_path_dev_approxes = yatest.common.test_output_path('test_dev_approxes.eval')\n\n cmd = [\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', 'Plain',\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--leaf-estimation-iterations', '1',\n '--leaf-estimation-method', 'Newton',\n '--eval-file', output_eval_path,\n ]\n execute_catboost_fit('CPU', cmd)\n\n cmd = cmd[:-1] + [output_eval_path_dev_approxes, '--dev-leafwise-approxes']\n execute_catboost_fit('CPU', cmd)\n assert filecmp.cmp(output_eval_path, output_eval_path_dev_approxes)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_newton_on_pool_with_weights(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult_weight', 'train_weight'),\n '-t', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('adult_weight', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '40',\n '-T', '4',\n '-m', output_model_path,\n '--leaf-estimation-method', 'Newton',\n '--leaf-estimation-iterations', '7',\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_custom_priors(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--ctr', 'Borders:Prior=-2:Prior=0:Prior=8:Prior=1:Prior=-1:Prior=3,'\n 'Counter:Prior=0',\n '--per-feature-ctr', '4:Borders:Prior=0.444,Counter:Prior=0.444;'\n '6:Borders:Prior=0.666,Counter:Prior=0.666;'\n '8:Borders:Prior=-0.888:Prior=0.888,Counter:Prior=-0.888:Prior=0.888',\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_ctr_buckets(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'MultiClass',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--ctr', 'Buckets'\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_fold_len_multiplier(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'MultiClass',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--fold-len-multiplier', '1.5'\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\nFSTR_TYPES = ['PredictionValuesChange', 'InternalFeatureImportance', 'InternalInteraction', 'Interaction', 'ShapValues', 'PredictionDiff']\nDATASET_DEPENDENT_FSTR_TYPES = ['PredictionValuesChange', 'InternalFeatureImportance', 'LossFunctionChange', 'ShapValues', 'PredictionDiff']\n\n\[email protected]('fstr_type', FSTR_TYPES)\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_fstr(fstr_type, boosting_type, grow_policy):\n pool = 'adult' if fstr_type != 'PredictionDiff' else 'higgs'\n\n return do_test_fstr(\n fstr_type,\n loss_function='Logloss',\n input_path=data_file(pool, 'train_small'),\n cd_path=data_file(pool, 'train.cd'),\n boosting_type=boosting_type,\n grow_policy=grow_policy,\n normalize=False,\n additional_train_params=(('--max-ctr-complexity', '1') if fstr_type == 'ShapValues' else ())\n )\n\n\[email protected]('fstr_type', FSTR_TYPES)\[email protected]('grow_policy', GROW_POLICIES)\ndef test_fstr_normalized_model(fstr_type, grow_policy):\n pool = 'adult' if fstr_type != 'PredictionDiff' else 'higgs'\n\n return do_test_fstr(\n fstr_type,\n loss_function='Logloss',\n input_path=data_file(pool, 'train_small'),\n cd_path=data_file(pool, 'train.cd'),\n boosting_type='Plain',\n grow_policy=grow_policy,\n normalize=True,\n additional_train_params=(('--max-ctr-complexity', '1') if fstr_type == 'ShapValues' else ())\n )\n\n\[email protected]('fstr_type', DATASET_DEPENDENT_FSTR_TYPES)\[email protected]('grow_policy', GROW_POLICIES)\ndef test_fstr_with_target_border(fstr_type, grow_policy):\n if fstr_type == 'PredictionDiff':\n # because PredictionDiff needs pool without categorical features\n train_path = data_file('querywise', 'train')\n cd_path = data_file('querywise', 'train.cd')\n else:\n train_path = data_file('adult_not_binarized', 'train_small')\n cd_path = data_file('adult_not_binarized', 'train.cd')\n\n return do_test_fstr(\n fstr_type,\n loss_function='Logloss',\n input_path=train_path,\n cd_path=cd_path,\n boosting_type='Plain',\n grow_policy=grow_policy,\n normalize=False,\n additional_train_params=('--target-border', '0.4')\n )\n\n\[email protected]('fstr_type', DATASET_DEPENDENT_FSTR_TYPES)\[email protected]('grow_policy', GROW_POLICIES)\ndef test_fstr_with_weights(fstr_type, grow_policy):\n return do_test_fstr(\n fstr_type,\n loss_function='RMSE',\n input_path=data_file('querywise', 'train'),\n cd_path=data_file('querywise', 'train.cd.weight'),\n boosting_type='Plain',\n grow_policy=grow_policy,\n normalize=False\n )\n\n\[email protected]('fstr_type', DATASET_DEPENDENT_FSTR_TYPES)\[email protected]('grow_policy', GROW_POLICIES)\ndef test_fstr_with_class_weights(fstr_type, grow_policy):\n pool = 'adult' if fstr_type != 'PredictionDiff' else 'higgs'\n\n return do_test_fstr(\n fstr_type,\n loss_function='Logloss',\n input_path=data_file(pool, 'train_small'),\n cd_path=data_file(pool, 'train.cd'),\n boosting_type='Plain',\n grow_policy=grow_policy,\n normalize=False,\n additional_train_params=('--class-weights', '0.25,0.75')\n )\n\n\[email protected]('fstr_type', DATASET_DEPENDENT_FSTR_TYPES)\ndef test_fstr_with_target_border_and_class_weights(fstr_type):\n if fstr_type == 'PredictionDiff':\n # because PredictionDiff needs pool without categorical features\n train_path = data_file('querywise', 'train')\n cd_path = data_file('querywise', 'train.cd')\n else:\n train_path = data_file('adult_not_binarized', 'train_small')\n cd_path = data_file('adult_not_binarized', 'train.cd')\n\n return do_test_fstr(\n fstr_type,\n loss_function='Logloss',\n input_path=train_path,\n cd_path=cd_path,\n boosting_type='Plain',\n grow_policy='SymmetricTree',\n normalize=False,\n additional_train_params=('--target-border', '0.4', '--class-weights', '0.25,0.75')\n )\n\n\ndef do_test_fstr(\n fstr_type,\n loss_function,\n input_path,\n cd_path,\n boosting_type,\n grow_policy,\n normalize,\n additional_train_params=()\n):\n model_path = yatest.common.test_output_path('model.bin')\n output_fstr_path = yatest.common.test_output_path('fstr.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', input_path,\n '--column-description', cd_path,\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '--one-hot-max-size', '10',\n '-m', model_path\n ) + additional_train_params\n execute_catboost_fit('CPU', cmd)\n\n if fstr_type == 'PredictionDiff':\n with open(input_path) as input:\n fstr_pool_path = yatest.common.test_output_path('input.tsv')\n with open(fstr_pool_path, \"w\") as output:\n output.write(input.readline())\n output.write(input.readline())\n input_path = fstr_pool_path\n\n fstr_cmd = (\n CATBOOST_PATH,\n 'fstr',\n '--input-path', input_path,\n '--column-description', cd_path,\n '-m', model_path,\n '-o', output_fstr_path,\n '--fstr-type', fstr_type\n )\n\n if normalize:\n make_model_normalized(model_path)\n if not(\n fstr_type == 'PredictionValuesChange' or\n fstr_type == 'InternalFeatureImportance' and loss_function not in RANKING_LOSSES\n ):\n with pytest.raises(yatest.common.ExecutionError):\n yatest.common.execute(fstr_cmd)\n return\n\n yatest.common.execute(fstr_cmd)\n\n return local_canonical_file(output_fstr_path)\n\n\ndef make_model_normalized(model_path):\n yatest.common.execute([\n CATBOOST_PATH,\n 'normalize-model',\n '--model-path', model_path,\n '--output-model', model_path,\n '--set-scale', '0.5',\n '--set-bias', '0.125',\n ])\n\n\[email protected]('loss_function', ['QueryRMSE', 'PairLogit', 'YetiRank', 'PairLogitPairwise', 'YetiRankPairwise'])\ndef test_loss_change_fstr(loss_function):\n return do_test_loss_change_fstr(loss_function, normalize=False)\n\n\ndef test_loss_change_fstr_normalized():\n return do_test_loss_change_fstr('QueryRMSE', normalize=True)\n\n\ndef do_test_loss_change_fstr(loss_function, normalize):\n model_path = yatest.common.test_output_path('model.bin')\n output_fstr_path = yatest.common.test_output_path('fstr.tsv')\n train_fstr_path = yatest.common.test_output_path('t_fstr.tsv')\n\n def add_loss_specific_params(cmd, fstr_mode):\n if loss_function in ['PairLogit', 'PairLogitPairwise']:\n cmd += ('--column-description', data_file('querywise', 'train.cd.no_target'))\n if fstr_mode:\n cmd += ('--input-pairs', data_file('querywise', 'train.pairs'))\n else:\n cmd += ('--learn-pairs', data_file('querywise', 'train.pairs'))\n else:\n cmd += ('--column-description', data_file('querywise', 'train.cd'))\n return cmd\n\n cmd_prefix = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '--learn-set', data_file('querywise', 'train'),\n '--boosting-type', 'Plain',\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '--one-hot-max-size', '10',\n '--fstr-file', train_fstr_path,\n '--fstr-type', 'LossFunctionChange',\n '--model-file', model_path\n )\n cmd = add_loss_specific_params(cmd_prefix, fstr_mode=False)\n execute_catboost_fit('CPU', cmd)\n\n fstr_cmd_prefix = (\n CATBOOST_PATH,\n 'fstr',\n '--input-path', data_file('querywise', 'train'),\n '--model-file', model_path,\n '--output-path', output_fstr_path,\n '--fstr-type', 'LossFunctionChange',\n )\n fstr_cmd = add_loss_specific_params(fstr_cmd_prefix, fstr_mode=True)\n if normalize:\n make_model_normalized(model_path)\n with pytest.raises(yatest.common.ExecutionError):\n yatest.common.execute(fstr_cmd)\n return\n\n yatest.common.execute(fstr_cmd)\n\n fit_output = np.loadtxt(train_fstr_path, dtype='float', delimiter='\\t')\n fstr_output = np.loadtxt(output_fstr_path, dtype='float', delimiter='\\t')\n assert(np.allclose(fit_output, fstr_output, rtol=1e-6))\n\n return [local_canonical_file(output_fstr_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('ranking_parameters', [\n {'loss-function': 'PairLogit', 'fstr-type': 'LossFunctionChange'},\n {'loss-function': 'Logloss', 'fstr-type': 'PredictionValuesChange'}\n])\ndef test_fstr_feature_importance_default_value(boosting_type, ranking_parameters):\n model_path = yatest.common.test_output_path('model.bin')\n fstr_path_0 = yatest.common.test_output_path('fstr_0.tsv')\n fstr_path_1 = yatest.common.test_output_path('fstr_1.tsv')\n internal_fstr_path_0 = yatest.common.test_output_path('internal_fstr_0.tsv')\n internal_fstr_path_1 = yatest.common.test_output_path('internal_fstr_1.tsv')\n\n pool = 'adult' if ranking_parameters['loss-function'] == 'Logloss' else 'black_friday'\n pool_path = data_file(pool, 'train_small' if pool == 'adult' else 'train')\n cd_path = data_file(pool, 'train.cd' if pool == 'adult' else 'cd')\n has_header_suffix = ('--has-header',) if pool == 'black_friday' else ()\n\n cmd = (\n '--use-best-model', 'false',\n '--learn-set', pool_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '--one-hot-max-size', '10',\n '--model-file', model_path,\n '--loss-function', ranking_parameters['loss-function']\n ) + has_header_suffix\n\n if ranking_parameters['loss-function'] == 'Logloss':\n cmd += ('--target-border', '0.5')\n\n execute_catboost_fit(\n 'CPU',\n cmd + ('--fstr-file', fstr_path_0,\n '--fstr-internal-file', internal_fstr_path_0,\n '--fstr-type', 'FeatureImportance')\n )\n execute_catboost_fit(\n 'CPU',\n cmd + ('--fstr-file', fstr_path_1,\n '--fstr-internal-file', internal_fstr_path_1,\n '--fstr-type', ranking_parameters['fstr-type'])\n )\n\n assert filecmp.cmp(fstr_path_0, fstr_path_1)\n assert filecmp.cmp(internal_fstr_path_0, internal_fstr_path_1)\n\n fstr_cmd = (\n CATBOOST_PATH,\n 'fstr',\n '--input-path', pool_path,\n '--column-description', cd_path,\n '--model-file', model_path,\n ) + has_header_suffix\n\n yatest.common.execute(\n fstr_cmd + ('--output-path', fstr_path_1,\n '--fstr-type', 'FeatureImportance')\n )\n yatest.common.execute(\n fstr_cmd + ('--output-path', internal_fstr_path_1,\n '--fstr-type', 'InternalFeatureImportance')\n )\n\n assert filecmp.cmp(fstr_path_0, fstr_path_1)\n assert filecmp.cmp(internal_fstr_path_0, internal_fstr_path_1)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_loss_change_fstr_without_pairs(boosting_type):\n model_path = yatest.common.test_output_path('model.bin')\n output_fstr_path = yatest.common.test_output_path('fstr.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'PairLogit',\n '--learn-set', data_file('querywise', 'train'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '--learning-rate', '0.03',\n '-T', '4',\n '--one-hot-max-size', '10',\n '--model-file', model_path\n\n )\n execute_catboost_fit('CPU', cmd)\n\n fstr_cmd = (\n CATBOOST_PATH,\n 'fstr',\n '--input-path', data_file('querywise', 'train'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--model-file', model_path,\n '--output-path', output_fstr_path,\n '--fstr-type', 'LossFunctionChange',\n )\n yatest.common.execute(fstr_cmd)\n\n try:\n fstr_cmd = (\n CATBOOST_PATH,\n 'fstr',\n '--input-path', data_file('querywise', 'train'),\n '--column-description', data_file('querywise', 'train.cd.no_target'),\n '--model-file', model_path,\n '--fstr-type', 'LossFunctionChange',\n )\n yatest.common.execute(fstr_cmd)\n except:\n return [local_canonical_file(output_fstr_path)]\n\n assert False\n\n\ndef test_loss_change_fstr_on_different_pool_type():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_dsv_fstr_path = yatest.common.test_output_path('fstr.tsv')\n output_quantized_fstr_path = yatest.common.test_output_path('fstr.tsv.quantized')\n train_fstr_path = yatest.common.test_output_path('train_fstr.tsv')\n\n def get_pool_path(set_name, is_quantized=False):\n path = data_file('querywise', set_name)\n return 'quantized://' + path + '.quantized' if is_quantized else path\n\n cd_file = data_file('querywise', 'train.cd')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'PairLogit',\n '--learn-set', get_pool_path('train', True),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '-i', '10',\n '-T', '4',\n '--fstr-file', train_fstr_path,\n '--fstr-type', 'LossFunctionChange',\n '--model-file', output_model_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n CATBOOST_PATH, 'fstr',\n '--input-path', get_pool_path('train'),\n '--column-description', cd_file,\n '--input-pairs', data_file('querywise', 'train.pairs'),\n '--model-file', output_model_path,\n '--output-path', output_dsv_fstr_path,\n '--fstr-type', 'LossFunctionChange',\n )\n yatest.common.execute(cmd)\n\n cmd = (\n CATBOOST_PATH, 'fstr',\n '--input-path', get_pool_path('train', True),\n '--input-pairs', data_file('querywise', 'train.pairs'),\n '--model-file', output_model_path,\n '--output-path', output_quantized_fstr_path,\n '--fstr-type', 'LossFunctionChange',\n )\n yatest.common.execute(cmd)\n\n fstr_dsv = np.loadtxt(output_dsv_fstr_path, dtype='float', delimiter='\\t')\n fstr_quantized = np.loadtxt(output_quantized_fstr_path, dtype='float', delimiter='\\t')\n train_fstr = np.loadtxt(train_fstr_path, dtype='float', delimiter='\\t')\n assert(np.allclose(fstr_dsv, fstr_quantized, rtol=1e-6))\n assert(np.allclose(fstr_dsv, train_fstr, rtol=1e-6))\n\n\[email protected]('loss_function', LOSS_FUNCTIONS)\[email protected]('grow_policy', GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_reproducibility(loss_function, grow_policy, dev_score_calc_obj_block_size):\n\n def run_catboost(threads, model_path, eval_path):\n cmd = [\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--grow-policy', grow_policy,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '25',\n '-T', str(threads),\n '-m', model_path,\n '--eval-file', eval_path,\n ]\n execute_catboost_fit('CPU', cmd)\n\n model_1 = yatest.common.test_output_path('model_1.bin')\n eval_1 = yatest.common.test_output_path('test_1.eval')\n run_catboost(1, model_1, eval_1)\n model_4 = yatest.common.test_output_path('model_4.bin')\n eval_4 = yatest.common.test_output_path('test_4.eval')\n run_catboost(4, model_4, eval_4)\n assert filecmp.cmp(eval_1, eval_4)\n\n\nBORDER_TYPES = ['Median', 'GreedyLogSum', 'UniformAndQuantiles', 'MinEntropy', 'MaxLogSum', 'Uniform']\n\n\[email protected]('border_type', BORDER_TYPES)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_feature_border_types(border_type, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--feature-border-type', border_type,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('depth', [4, 8])\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_deep_tree_classification(depth, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '--depth', str(depth),\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_regularization(boosting_type, grow_policy, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--leaf-estimation-method', 'Newton',\n '--eval-file', output_eval_path,\n '--l2-leaf-reg', '5'\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\nREG_LOSS_FUNCTIONS = ['RMSE', 'MAE', 'Lq:q=1', 'Lq:q=1.5', 'Lq:q=3', 'Quantile', 'LogLinQuantile', 'Poisson', 'MAPE',\n 'Huber:delta=1.0']\n\n\[email protected]('loss_function', REG_LOSS_FUNCTIONS)\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_reg_targets(loss_function, boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '-t', data_file('adult_crossentropy', 'test_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('loss_function', MULTICLASS_LOSSES)\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_multi_targets(loss_function, boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n output_eval_path_dev_approxes = yatest.common.test_output_path('test_dev_approxes.eval')\n\n cmd = [\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('cloudness_small', 'train_small'),\n '-t', data_file('cloudness_small', 'test_small'),\n '--column-description', data_file('cloudness_small', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path\n ]\n execute_catboost_fit('CPU', cmd)\n\n if boosting_type == 'Plain':\n cmd = cmd[:-1] + [output_eval_path_dev_approxes, '--dev-leafwise-approxes']\n execute_catboost_fit('CPU', cmd)\n assert filecmp.cmp(output_eval_path, output_eval_path_dev_approxes)\n\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('cloudness_small', 'test_small'),\n '--column-description', data_file('cloudness_small', 'train.cd'),\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(calc_cmd)\n assert(compare_evals(output_eval_path, formula_predict_path))\n return [local_canonical_file(output_eval_path)]\n\n\nBORDER_TYPES = ['MinEntropy', 'Median', 'UniformAndQuantiles', 'MaxLogSum', 'GreedyLogSum', 'Uniform']\n\n\[email protected](\n 'border_type',\n BORDER_TYPES,\n ids=lambda border_type: 'border_type=%s' % border_type\n)\[email protected](\n 'border_count',\n [1, 3, 10],\n ids=lambda border_count: 'border_count=%d' % border_count\n)\[email protected](\n 'boosting_type',\n BOOSTING_TYPE,\n ids=lambda boosting_type: 'boosting_type=%s' % boosting_type\n)\ndef test_ctr_target_quantization(border_type, border_count, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'RMSE',\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '-t', data_file('adult_crossentropy', 'test_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '3',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--ctr', 'Borders:TargetBorderType=' + border_type,\n '--ctr-target-border-count', str(border_count)\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\nCOUNTER_METHODS = ['Full', 'SkipTest']\n\n\[email protected]('counter_calc_method', COUNTER_METHODS)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_counter_calc(counter_calc_method, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'RMSE',\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '-t', data_file('adult_crossentropy', 'test_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '60',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--counter-calc-method', counter_calc_method\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\nCTR_TYPES = ['Borders', 'Buckets', 'BinarizedTargetMeanValue:TargetBorderCount=10', 'Borders,BinarizedTargetMeanValue:TargetBorderCount=10', 'Buckets,Borders']\n\n\[email protected]('ctr_type', CTR_TYPES)\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_ctr_type(ctr_type, boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'RMSE',\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '-t', data_file('adult_crossentropy', 'test_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '3',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--ctr', ctr_type\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_custom_overfitting_detector_metric(boosting_type):\n model_path = yatest.common.test_output_path('adult_model.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '--eval-metric', 'AUC:hints=skip_train~false',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', model_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_same_metric_skip_different(boosting_type):\n model_path = yatest.common.test_output_path('adult_model.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path_with_custom_metric = yatest.common.test_output_path('test_error_with_custom_metric.tsv')\n learn_error_path_with_custom_metric = yatest.common.test_output_path('learn_error_with_custom_metric.tsv')\n\n cmd = [\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', model_path,\n ]\n\n cmd_without_custom_metric = cmd + [\n '--eval-metric', 'AUC:hints=skip_train~false',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n ]\n cmd_with_custom_metric = cmd + [\n '--eval-metric', 'AUC:hints=skip_train~true',\n '--custom-metric', 'AUC:hints=skip_train~false',\n '--learn-err-log', learn_error_path_with_custom_metric,\n '--test-err-log', test_error_path_with_custom_metric,\n ]\n\n execute_catboost_fit('CPU', cmd_without_custom_metric)\n execute_catboost_fit('CPU', cmd_with_custom_metric)\n\n assert filecmp.cmp(learn_error_path_with_custom_metric, learn_error_path)\n\n\[email protected]('loss_function', BINCLASS_LOSSES)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_custom_loss_for_classification(loss_function, boosting_type):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n custom_metrics = [\n metric for metric in\n [\n 'AUC:hints=skip_train~false',\n 'Logloss',\n 'CrossEntropy',\n 'Accuracy',\n 'Precision',\n 'Recall',\n 'F1',\n 'TotalF1',\n 'MCC',\n 'BalancedAccuracy',\n 'BalancedErrorRate',\n 'Kappa',\n 'WKappa',\n 'BrierScore',\n 'ZeroOneLoss',\n 'HammingLoss',\n 'HingeLoss',\n 'NormalizedGini'\n ]\n if metric != loss_function\n ]\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '-t', data_file('adult_crossentropy', 'test_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '--boosting-type', boosting_type,\n '-w', '0.03',\n '-i', '10',\n '-T', '4',\n '--custom-metric', ','.join(custom_metrics),\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n )\n\n if loss_function == 'Logloss':\n cmd += ('--target-border', '0.5')\n\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_loglikelihood_of_prediction(boosting_type):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult_weight', 'train_weight'),\n '-t', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('adult_weight', 'train.cd'),\n '--boosting-type', boosting_type,\n '-w', '0.03',\n '-i', '10',\n '-T', '4',\n '--custom-metric', 'LogLikelihoodOfPrediction',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(learn_error_path, diff_tool(1e-7)), local_canonical_file(test_error_path, diff_tool(1e-7))]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_custom_loss_for_multiclassification(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'MultiClass',\n '-f', data_file('cloudness_small', 'train_small'),\n '-t', data_file('cloudness_small', 'test_small'),\n '--column-description', data_file('cloudness_small', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--custom-metric',\n 'AUC:hints=skip_train~false;type=OneVsAll,Accuracy,Precision,Recall,F1,TotalF1,MCC,Kappa,WKappa,ZeroOneLoss,HammingLoss,HingeLoss,NormalizedGini',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_calc_prediction_type(boosting_type):\n model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', model_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', model_path,\n '--output-path', output_eval_path,\n '--prediction-type', 'Probability'\n )\n yatest.common.execute(calc_cmd)\n\n return local_canonical_file(output_eval_path)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_calc_no_target(boosting_type):\n model_path = yatest.common.test_output_path('adult_model.bin')\n fit_output_eval_path = yatest.common.test_output_path('fit_test.eval')\n calc_output_eval_path = yatest.common.test_output_path('calc_test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--counter-calc-method', 'SkipTest',\n '--eval-file', fit_output_eval_path\n )\n execute_catboost_fit('CPU', cmd)\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('train_notarget.cd'),\n '-m', model_path,\n '--output-path', calc_output_eval_path\n )\n yatest.common.execute(calc_cmd)\n\n assert(compare_evals(fit_output_eval_path, calc_output_eval_path))\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_classification_progress_restore(boosting_type):\n\n def run_catboost(iters, model_path, eval_path, additional_params=None):\n import random\n import shutil\n import string\n letters = string.ascii_lowercase\n train_random_name = ''.join(random.choice(letters) for i in xrange(8))\n shutil.copy(data_file('adult', 'train_small'), train_random_name)\n cmd = [\n '--loss-function', 'Logloss',\n '--learning-rate', '0.5',\n '-f', train_random_name,\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', str(iters),\n '-T', '4',\n '-m', model_path,\n '--eval-file', eval_path,\n ]\n if additional_params:\n cmd += additional_params\n execute_catboost_fit('CPU', cmd)\n\n canon_model_path = yatest.common.test_output_path('canon_model.bin')\n canon_eval_path = yatest.common.test_output_path('canon_test.eval')\n run_catboost(30, canon_model_path, canon_eval_path)\n model_path = yatest.common.test_output_path('model.bin')\n eval_path = yatest.common.test_output_path('test.eval')\n progress_path = yatest.common.test_output_path('test.cbp')\n run_catboost(15, model_path, eval_path, additional_params=['--snapshot-file', progress_path])\n run_catboost(30, model_path, eval_path, additional_params=['--snapshot-file', progress_path])\n assert filecmp.cmp(canon_eval_path, eval_path)\n # TODO(kirillovs): make this active when progress_file parameter will be deleted from json params\n # assert filecmp.cmp(canon_model_path, model_path)\n\n\[email protected]('loss_function', CLASSIFICATION_LOSSES)\[email protected]('prediction_type', PREDICTION_TYPES)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_prediction_type(prediction_type, loss_function, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--prediction-type', prediction_type\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_const_feature(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n train_path = yatest.common.test_output_path('train_small')\n test_path = yatest.common.test_output_path('test_small')\n train_dataset = np.loadtxt(data_file('adult', 'train_small'), dtype=str, delimiter='\\t')\n test_dataset = np.loadtxt(data_file('adult', 'test_small'), dtype=str, delimiter='\\t')\n train_dataset[:, 14] = '0'\n test_dataset[:, 14] = '0'\n np.savetxt(train_path, train_dataset, fmt='%s', delimiter='\\t')\n np.savetxt(test_path, test_dataset[:10, :], fmt='%s', delimiter='\\t')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'RMSE',\n '-f', train_path,\n '-t', test_path,\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\nQUANTILE_LOSS_FUNCTIONS = ['Quantile', 'LogLinQuantile']\n\n\[email protected]('loss_function', QUANTILE_LOSS_FUNCTIONS)\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_quantile_targets(loss_function, boosting_type, grow_policy):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function + ':alpha=0.9',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '5',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_quantile_targets_exact(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Quantile:alpha=0.9',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '5',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--leaf-estimation-method', 'Exact'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_quantile_weights(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Quantile:alpha=0.9',\n '-f', data_file('higgs', 'train_small'),\n '-t', data_file('higgs', 'test_small'),\n '--column-description', data_file('higgs', 'train_weight.cd'),\n '--boosting-type', boosting_type,\n '-i', '5',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--leaf-estimation-method', 'Exact'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_quantile_categorical(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Quantile:alpha=0.9',\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '-t', data_file('adult_crossentropy', 'test_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '5',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--leaf-estimation-method', 'Exact'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_quantile_exact_distributed():\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='MAE',\n pool='higgs',\n train='train_small',\n test='test_small',\n cd='train.cd',\n other_options=(\n '--leaf-estimation-method', 'Exact',\n '--boost-from-average', 'False'\n )\n )))]\n\n\nCUSTOM_LOSS_FUNCTIONS = ['RMSE,MAE', 'Quantile:alpha=0.9', 'MSLE,MedianAbsoluteError,SMAPE',\n 'NumErrors:greater_than=0.01,NumErrors:greater_than=0.1,NumErrors:greater_than=0.5',\n 'FairLoss:smoothness=0.9']\n\n\[email protected]('custom_loss_function', CUSTOM_LOSS_FUNCTIONS)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_custom_loss(custom_loss_function, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'RMSE',\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '-t', data_file('adult_crossentropy', 'test_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '50',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--custom-metric', custom_loss_function,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n )\n execute_catboost_fit('CPU', cmd)\n eps = 0 if 'MSLE' not in custom_loss_function else 1e-9\n return [local_canonical_file(learn_error_path, diff_tool=diff_tool(eps)),\n local_canonical_file(test_error_path, diff_tool=diff_tool(eps))]\n\n\ndef test_train_dir():\n output_model_path = 'model.bin'\n output_eval_path = 'test.eval'\n train_dir_path = 'trainDir'\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'RMSE',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '2',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--train-dir', train_dir_path,\n '--fstr-file', 'fstr.tsv',\n '--fstr-internal-file', 'ifstr.tsv'\n )\n execute_catboost_fit('CPU', cmd)\n outputs = ['time_left.tsv', 'learn_error.tsv', 'test_error.tsv', output_model_path, output_eval_path, 'fstr.tsv', 'ifstr.tsv']\n for output in outputs:\n assert os.path.isfile(train_dir_path + '/' + output)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('qwise_loss', ['QueryRMSE', 'RMSE'])\ndef test_train_on_binarized_equal_train_on_float(boosting_type, qwise_loss):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_model_path_binarized = yatest.common.test_output_path('model_binarized.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n\n borders_file = yatest.common.test_output_path('borders.tsv')\n borders_file_output = borders_file + '.out'\n predictions_path_learn = yatest.common.test_output_path('predictions_learn.tsv')\n predictions_path_learn_binarized = yatest.common.test_output_path('predictions_learn_binarized.tsv')\n predictions_path_test = yatest.common.test_output_path('predictions_test.tsv')\n predictions_path_test_binarized = yatest.common.test_output_path('predictions_test_binarized.tsv')\n\n learn_file = data_file('querywise', 'train')\n cd_file = data_file('querywise', 'train.cd')\n test_file = data_file('querywise', 'test')\n params = {\"--loss-function\": qwise_loss,\n \"-f\": learn_file,\n \"-t\": test_file,\n '--column-description': cd_file,\n '--boosting-type': boosting_type,\n '-i': '100',\n '-T': '4',\n '-m': output_model_path,\n '--learn-err-log': learn_error_path,\n '--test-err-log': test_error_path,\n '--use-best-model': 'false',\n '--output-borders-file': borders_file_output,\n }\n\n params_binarized = dict(params)\n params_binarized['--input-borders-file'] = borders_file_output\n params_binarized['--output-borders-file'] = borders_file\n params_binarized['-m'] = output_model_path_binarized\n\n execute_catboost_fit(task_type='CPU', params=params)\n\n apply_catboost(output_model_path, learn_file, cd_file, predictions_path_learn)\n apply_catboost(output_model_path, test_file, cd_file, predictions_path_test)\n\n execute_catboost_fit(\n task_type='CPU',\n params=params_binarized,\n )\n\n apply_catboost(output_model_path_binarized, learn_file, cd_file, predictions_path_learn_binarized)\n apply_catboost(output_model_path_binarized, test_file, cd_file, predictions_path_test_binarized)\n\n assert (filecmp.cmp(predictions_path_learn, predictions_path_learn_binarized))\n assert (filecmp.cmp(predictions_path_test, predictions_path_test_binarized))\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path),\n local_canonical_file(predictions_path_test),\n local_canonical_file(predictions_path_learn),\n local_canonical_file(borders_file)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_feature_id_fstr(boosting_type):\n model_path = yatest.common.test_output_path('adult_model.bin')\n output_fstr_path = yatest.common.test_output_path('fstr.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', model_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n fstr_cmd = (\n CATBOOST_PATH,\n 'fstr',\n '--input-path', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train_with_id.cd'),\n '-m', model_path,\n '-o', output_fstr_path,\n )\n yatest.common.execute(fstr_cmd)\n\n return local_canonical_file(output_fstr_path)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_class_names_logloss(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--class-names', '1,0'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('loss_function', MULTICLASS_LOSSES)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_class_names_multiclass(loss_function, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('precipitation_small', 'train_small'),\n '-t', data_file('precipitation_small', 'test_small'),\n '--column-description', data_file('precipitation_small', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--prediction-type', 'RawFormulaVal,Class',\n '--eval-file', output_eval_path,\n '--class-names', '0.,0.5,1.,0.25,0.75'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('loss_function', MULTICLASS_LOSSES)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_class_names_multiclass_last_class_missed(loss_function, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('precipitation_small', 'train_small'),\n '-t', data_file('precipitation_small', 'test_small'),\n '--column-description', data_file('precipitation_small', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--prediction-type', 'RawFormulaVal,Class',\n '--eval-file', output_eval_path,\n '--class-names', '0.,0.5,0.25,0.75,1.',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_class_weight_logloss(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--class-weights', '0.5,2'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('loss_function', MULTICLASS_LOSSES)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_class_weight_multiclass(loss_function, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--class-weights', '0.5,2'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_params_from_file(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '6',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--params-file', data_file('params.json')\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('loss_function', MULTICLASS_LOSSES)\ndef test_lost_class(boosting_type, loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('cloudness_lost_class', 'train_small'),\n '-t', data_file('cloudness_lost_class', 'test_small'),\n '--column-description', data_file('cloudness_lost_class', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--classes-count', '3',\n '--prediction-type', 'RawFormulaVal,Class',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_class_weight_with_lost_class(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'MultiClass',\n '-f', data_file('cloudness_lost_class', 'train_small'),\n '-t', data_file('cloudness_lost_class', 'test_small'),\n '--column-description', data_file('cloudness_lost_class', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--classes-count', '3',\n '--class-weights', '0.5,2,2',\n '--prediction-type', 'RawFormulaVal,Class',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_one_hot(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '100',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-x', '1',\n '-n', '8',\n '-w', '0.1',\n '--one-hot-max-size', '10'\n )\n execute_catboost_fit('CPU', cmd)\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', output_model_path,\n '--output-path', calc_eval_path\n )\n yatest.common.execute(calc_cmd)\n\n assert(compare_evals(output_eval_path, calc_eval_path))\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_random_strength(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '100',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-x', '1',\n '-n', '8',\n '-w', '0.1',\n '--random-strength', '100'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_only_categorical_features(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult_all_categorical.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '100',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-x', '1',\n '-n', '8',\n '-w', '0.1',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_weight_sampling_per_tree(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--sampling-frequency', 'PerTree',\n )\n execute_catboost_fit('CPU', cmd)\n return local_canonical_file(output_eval_path)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('used_ram_limit', ['1Kb', '4Gb'])\[email protected](\n 'dev_score_calc_obj_block_size',\n ['600', '5000000'],\n ids=['calc_block=600', 'calc_block=5000000']\n)\ndef test_allow_writing_files_and_used_ram_limit(boosting_type, used_ram_limit, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--allow-writing-files', 'false',\n '--used-ram-limit', used_ram_limit,\n '--loss-function', 'Logloss',\n '--max-ctr-complexity', '5',\n '--depth', '7',\n '-f', data_file('airlines_5K', 'train'),\n '-t', data_file('airlines_5K', 'test'),\n '--column-description', data_file('airlines_5K', 'cd'),\n '--has-header',\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-w', '0.03',\n '-T', '6',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected](\n 'ignored_features',\n [True, False],\n ids=['ignored_features=True', 'ignored_features=False']\n)\ndef test_apply_with_permuted_columns(ignored_features):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('airlines_5K', 'train'),\n '-t', data_file('airlines_5K', 'test'),\n '--column-description', data_file('airlines_5K', 'cd'),\n '--has-header',\n '-i', '20',\n '-w', '0.03',\n '-T', '6',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n if ignored_features:\n cmd += ('--ignore-features', '0:2:5')\n\n execute_catboost_fit('CPU', cmd)\n\n permuted_test_path, permuted_cd_path = permute_dataset_columns(\n data_file('airlines_5K', 'test'),\n data_file('airlines_5K', 'cd'),\n seed=123)\n\n permuted_predict_path = yatest.common.test_output_path('permuted_predict.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', permuted_test_path,\n '--has-header',\n '--column-description', permuted_cd_path,\n '-m', output_model_path,\n '--output-path', permuted_predict_path,\n '--output-columns', 'SampleId,RawFormulaVal,Label'\n )\n yatest.common.execute(calc_cmd)\n assert filecmp.cmp(output_eval_path, permuted_predict_path)\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_subsample_per_tree(boosting_type, grow_policy, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--sampling-frequency', 'PerTree',\n '--bootstrap-type', 'Bernoulli',\n '--subsample', '0.5',\n )\n execute_catboost_fit('CPU', cmd)\n return local_canonical_file(output_eval_path)\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_subsample_per_tree_level(boosting_type, grow_policy, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--sampling-frequency', 'PerTreeLevel',\n '--bootstrap-type', 'Bernoulli',\n '--subsample', '0.5',\n )\n if grow_policy == 'Lossguide':\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n else:\n execute_catboost_fit('CPU', cmd)\n return local_canonical_file(output_eval_path)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_bagging_per_tree_level(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--bagging-temperature', '0.5',\n )\n execute_catboost_fit('CPU', cmd)\n return local_canonical_file(output_eval_path)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_plain(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--boosting-type', 'Plain',\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_bootstrap(boosting_type, dev_score_calc_obj_block_size):\n bootstrap_option = {\n 'no': ('--bootstrap-type', 'No',),\n 'bayes': ('--bootstrap-type', 'Bayesian', '--bagging-temperature', '0.0',),\n 'bernoulli': ('--bootstrap-type', 'Bernoulli', '--subsample', '1.0',)\n }\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n )\n for bootstrap in bootstrap_option:\n model_path = yatest.common.test_output_path('model_' + bootstrap + '.bin')\n eval_path = yatest.common.test_output_path('test_' + bootstrap + '.eval')\n execute_catboost_fit('CPU', cmd + ('-m', model_path, '--eval-file', eval_path,) + bootstrap_option[bootstrap])\n\n ref_eval_path = yatest.common.test_output_path('test_no.eval')\n assert(filecmp.cmp(ref_eval_path, yatest.common.test_output_path('test_bayes.eval')))\n assert(filecmp.cmp(ref_eval_path, yatest.common.test_output_path('test_bernoulli.eval')))\n\n return [local_canonical_file(ref_eval_path)]\n\n\ndef test_json_logging():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n json_path = yatest.common.test_output_path('catboost_training.json')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-w', '0.03',\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--json-log', json_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(remove_time_from_json(json_path))]\n\n\ndef test_json_logging_metric_period():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n json_path = yatest.common.test_output_path('catboost_training.json')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--json-log', json_path,\n '--metric-period', '2',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(remove_time_from_json(json_path))]\n\n\ndef test_output_columns_format():\n model_path = yatest.common.test_output_path('adult_model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n # Intentionally skipped: -t ...\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--output-columns', 'SampleId,RawFormulaVal,#2,Label',\n '--eval-file', output_eval_path\n )\n execute_catboost_fit('CPU', cmd)\n\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', model_path,\n '--output-path', formula_predict_path,\n '--output-columns', 'SampleId,RawFormulaVal'\n )\n yatest.common.execute(calc_cmd)\n\n return local_canonical_file(output_eval_path, formula_predict_path)\n\n\ndef test_eval_period():\n model_path = yatest.common.test_output_path('adult_model.bin')\n\n cmd = (\n '--use-best-model', 'false',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', model_path,\n '--output-path', formula_predict_path,\n '--eval-period', '2'\n )\n yatest.common.execute(calc_cmd)\n\n return local_canonical_file(formula_predict_path)\n\n\ndef test_weights_output():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult_weight', 'train_weight'),\n '-t', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('adult_weight', 'train.cd'),\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--output-columns', 'SampleId,RawFormulaVal,Weight,Label',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_baseline_output():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult_weight', 'train_weight'),\n '-t', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('train_adult_baseline.cd'),\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--output-columns', 'SampleId,RawFormulaVal,Baseline,Label',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_baseline_from_file_output():\n output_model_path = yatest.common.test_output_path('model.bin')\n eval_0_path = yatest.common.test_output_path('test_0.eval')\n eval_1_path = yatest.common.test_output_path('test_1.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '--learn-set', data_file('higgs', 'train_small'),\n '--test-set', data_file('higgs', 'test_small'),\n '--column-description', data_file('higgs', 'train_baseline.cd'),\n '-i', '10',\n '--learning-rate', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', eval_0_path,\n '--output-columns', 'SampleId,RawFormulaVal',\n )\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '--learn-set', data_file('higgs', 'train_small'),\n '--test-set', data_file('higgs', 'test_small'),\n '--column-description', data_file('higgs', 'train_weight.cd'),\n '--learn-baseline', data_file('higgs', 'train_baseline'),\n '--test-baseline', data_file('higgs', 'test_baseline'),\n '-i', '10',\n '--ignore-features', '0', # baseline column\n '--learning-rate', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', eval_1_path,\n '--output-columns', 'SampleId,RawFormulaVal',\n )\n execute_catboost_fit('CPU', cmd)\n\n compare_evals(eval_0_path, eval_1_path)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('loss_function', MULTICLASS_LOSSES)\ndef test_multiclass_baseline_from_file(boosting_type, loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path_0 = yatest.common.test_output_path('test_0.eval')\n output_eval_path_1 = yatest.common.test_output_path('test_1.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('precipitation_small', 'train_small'),\n '-t', data_file('precipitation_small', 'train_small'),\n '--column-description', data_file('precipitation_small', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--prediction-type', 'RawFormulaVal,Class',\n '--eval-file', output_eval_path_0,\n )\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('precipitation_small', 'train_small'),\n '-t', data_file('precipitation_small', 'train_small'),\n '--column-description', data_file('precipitation_small', 'train.cd'),\n '--learn-baseline', output_eval_path_0,\n '--test-baseline', output_eval_path_0,\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--prediction-type', 'RawFormulaVal,Class',\n '--class-names', '0.,0.25,0.5,0.75',\n '--eval-file', output_eval_path_1,\n )\n execute_catboost_fit('CPU', cmd)\n\n try:\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('precipitation_small', 'train_small'),\n '-t', data_file('precipitation_small', 'train_small'),\n '--column-description', data_file('precipitation_small', 'train.cd'),\n '--learn-baseline', output_eval_path_0,\n '--test-baseline', output_eval_path_0,\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--prediction-type', 'RawFormulaVal,Class',\n '--class-names', '0.5,0.25,0.75.,0.',\n '--eval-file', output_eval_path_1,\n )\n execute_catboost_fit('CPU', cmd)\n except:\n return [local_canonical_file(output_eval_path_0), local_canonical_file(output_eval_path_1)]\n\n assert False\n\n\ndef test_baseline_from_file_output_on_quantized_pool():\n output_model_path = yatest.common.test_output_path('model.bin')\n eval_0_path = yatest.common.test_output_path('test_0.eval')\n eval_1_path = yatest.common.test_output_path('test_1.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '--learn-set', 'quantized://' + data_file('higgs', 'train_small_x128_greedylogsum.bin'),\n '--test-set', 'quantized://' + data_file('higgs', 'train_small_x128_greedylogsum.bin'),\n '--column-description', data_file('higgs', 'train_baseline.cd'),\n '--learning-rate', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', eval_0_path,\n )\n execute_catboost_fit('CPU', cmd + ('-i', '10'))\n execute_catboost_fit('CPU', cmd + (\n '-i', '10',\n '--learn-baseline', eval_0_path,\n '--test-baseline', eval_0_path,\n '--eval-file', eval_0_path))\n\n execute_catboost_fit('CPU', cmd + (\n '-i', '20',\n '--eval-file', eval_1_path))\n\n compare_evals(eval_0_path, eval_1_path)\n\n\ndef test_query_output():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--output-columns', 'SampleId,Label,RawFormulaVal,GroupId',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_subgroup_output():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd.subgroup_id'),\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--output-columns', 'GroupId,SubgroupId,SampleId,Label,RawFormulaVal',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_without_cat_features(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'RMSE',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-w', '0.1',\n '--one-hot-max-size', '102',\n '--bootstrap-type', 'No',\n '--random-strength', '0',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef make_deterministic_train_cmd(loss_function, pool, train, test, cd, schema='', test_schema='', dev_score_calc_obj_block_size=None, other_options=()):\n pool_path = schema + data_file(pool, train)\n test_path = test_schema + data_file(pool, test)\n cd_path = data_file(pool, cd)\n cmd = (\n '--loss-function', loss_function,\n '-f', pool_path,\n '-t', test_path,\n '--column-description', cd_path,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '--random-strength', '0',\n '--has-time',\n '--bootstrap-type', 'No',\n '--boosting-type', 'Plain',\n )\n if dev_score_calc_obj_block_size:\n cmd += ('--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size)\n return cmd + other_options\n\n\ndef run_dist_train(cmd, output_file_switch='--eval-file'):\n eval_0_path = yatest.common.test_output_path('test_0.eval')\n execute_catboost_fit('CPU', cmd + (output_file_switch, eval_0_path,))\n\n eval_1_path = yatest.common.test_output_path('test_1.eval')\n execute_dist_train(cmd + (output_file_switch, eval_1_path,))\n\n eval_0 = np.loadtxt(eval_0_path, dtype='float', delimiter='\\t', skiprows=1)\n eval_1 = np.loadtxt(eval_1_path, dtype='float', delimiter='\\t', skiprows=1)\n assert(np.allclose(eval_0, eval_1, atol=1e-5))\n return eval_1_path\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='Logloss',\n pool='higgs',\n train='train_small',\n test='test_small',\n cd='train.cd',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_with_weights(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='Logloss',\n pool='higgs',\n train='train_small',\n test='test_small',\n cd='train_weight.cd',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_with_baseline(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='Logloss',\n pool='higgs',\n train='train_small',\n test='test_small',\n cd='train_baseline.cd',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_multiclass(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='MultiClass',\n pool='cloudness_small',\n train='train_small',\n test='test_small',\n cd='train_float.cd',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_multiclass_weight(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='MultiClass',\n pool='cloudness_small',\n train='train_small',\n test='test_small',\n cd='train_float_weight.cd',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_quantized(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='Logloss',\n pool='higgs',\n train='train_small_x128_greedylogsum.bin',\n test='test_small',\n cd='train.cd',\n schema='quantized://',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,\n other_options=('-x', '128', '--feature-border-type', 'GreedyLogSum'))))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\[email protected]('pairs_file', ['train.pairs', 'train.pairs.weighted'])\[email protected]('target', ['PairLogitPairwise', 'QuerySoftMax'])\ndef test_dist_train_quantized_groupid(dev_score_calc_obj_block_size, pairs_file, target):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function=target,\n pool='querywise',\n train='train_x128_greedylogsum_aqtaa.bin',\n test='test',\n cd='train.cd.query_id',\n schema='quantized://',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,\n other_options=('-x', '128', '--feature-border-type', 'GreedyLogSum',\n '--learn-pairs', data_file('querywise', pairs_file)))))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_quantized_group_weights(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='QueryRMSE',\n pool='querywise',\n train='train.quantized',\n test='test',\n cd='train.cd.query_id',\n schema='quantized://',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,\n other_options=('-x', '128', '--feature-border-type', 'GreedyLogSum',\n '--learn-group-weights', data_file('querywise', 'train.group_weights')))))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_quantized_baseline(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='Logloss',\n pool='higgs',\n train='train_small_x128_greedylogsum.bin',\n test='train_small_x128_greedylogsum.bin',\n cd='train_baseline.cd',\n schema='quantized://',\n test_schema='quantized://',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,\n other_options=('-x', '128', '--feature-border-type', 'GreedyLogSum',\n '--test-baseline', data_file('higgs', 'test_baseline'),\n '--learn-baseline', data_file('higgs', 'train_baseline')))))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_queryrmse(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='QueryRMSE',\n pool='querywise',\n train='train',\n test='test',\n cd='train.cd.subgroup_id',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_subgroup(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='QueryRMSE',\n pool='querywise',\n train='train',\n test='test',\n cd='train.cd.subgroup_id',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,\n other_options=('--eval-metric', 'PFound')\n ), output_file_switch='--test-err-log'))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_pairlogit(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='PairLogit',\n pool='querywise',\n train='train',\n test='test',\n cd='train.cd.query_id',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,\n other_options=('--learn-pairs', data_file('querywise', 'train.pairs'))\n )))]\n\n\[email protected]('pairs_file', ['train.pairs', 'train.pairs.weighted'])\ndef test_dist_train_pairlogitpairwise(pairs_file):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='PairLogitPairwise',\n pool='querywise',\n train='train',\n test='test',\n cd='train.cd',\n other_options=('--learn-pairs', data_file('querywise', pairs_file))\n )))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_querysoftmax(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='QuerySoftMax',\n pool='querywise',\n train='train',\n test='test',\n cd='train.cd.subgroup_id',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]\n\n\[email protected]('loss_func', ['Logloss', 'RMSE'])\ndef test_dist_train_auc(loss_func):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function=loss_func,\n pool='higgs',\n train='train_small',\n test='test_small',\n cd='train_baseline.cd',\n other_options=('--eval-metric', 'AUC')\n ), output_file_switch='--test-err-log'))]\n\n\[email protected]('loss_func', ['Logloss', 'RMSE'])\ndef test_dist_train_auc_weight(loss_func):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function=loss_func,\n pool='higgs',\n train='train_small',\n test='test_small',\n cd='train_weight.cd',\n other_options=('--eval-metric', 'AUC', '--boost-from-average', '0')\n ), output_file_switch='--test-err-log'))]\n\n\[email protected](reason='Boost from average for distributed training')\[email protected]('schema,train', [('quantized://', 'train_small_x128_greedylogsum.bin'), ('', 'train_small')])\ndef test_dist_train_snapshot(schema, train):\n train_cmd = make_deterministic_train_cmd(\n loss_function='RMSE',\n pool='higgs',\n train=train,\n test='test_small',\n schema=schema,\n cd='train.cd')\n\n eval_10_trees_path = yatest.common.test_output_path('10_trees.eval')\n execute_catboost_fit('CPU', train_cmd + ('-i', '10', '--eval-file', eval_10_trees_path,))\n\n snapshot_path = yatest.common.test_output_path('snapshot')\n execute_dist_train(train_cmd + ('-i', '5', '--snapshot-file', snapshot_path,))\n\n eval_5_plus_5_trees_path = yatest.common.test_output_path('5_plus_5_trees.eval')\n execute_dist_train(train_cmd + ('-i', '10', '--eval-file', eval_5_plus_5_trees_path, '--snapshot-file', snapshot_path,))\n\n assert(filecmp.cmp(eval_10_trees_path, eval_5_plus_5_trees_path))\n return [local_canonical_file(eval_5_plus_5_trees_path)]\n\n\ndef test_dist_train_yetirank():\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='YetiRank',\n pool='querywise',\n train='repeat_same_query_8_times',\n test='repeat_same_query_8_times',\n cd='train.cd'\n ), output_file_switch='--test-err-log'))]\n\n\ndef test_no_target():\n train_path = yatest.common.test_output_path('train')\n cd_path = yatest.common.test_output_path('train.cd')\n pairs_path = yatest.common.test_output_path('pairs')\n\n np.savetxt(train_path, [[0], [1], [2], [3], [4]], delimiter='\\t', fmt='%.4f')\n np.savetxt(cd_path, [('0', 'Num')], delimiter='\\t', fmt='%s')\n np.savetxt(pairs_path, [[0, 1], [0, 2], [0, 3], [2, 4]], delimiter='\\t', fmt='%i')\n\n cmd = (\n '-f', train_path,\n '--cd', cd_path,\n '--learn-pairs', pairs_path\n )\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\[email protected]('loss_function', ALL_LOSSES)\ndef test_const_target(loss_function):\n train_path = yatest.common.test_output_path('train')\n cd_path = yatest.common.test_output_path('train.cd')\n\n np.savetxt(\n train_path,\n [[0, 0, 0],\n [0, 0, 1],\n [0, 0, 2],\n [0, 0, 3],\n [0, 0, 4]],\n delimiter='\\t',\n fmt='%.4f'\n )\n np.savetxt(cd_path, [('0', 'Target'), ('1', 'GroupId')], delimiter='\\t', fmt='%s')\n\n cmd = (\n '--loss-function', loss_function,\n '-f', train_path,\n '--cd', cd_path,\n )\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\ndef test_negative_weights():\n train_path = yatest.common.test_output_path('train')\n cd_path = yatest.common.test_output_path('train.cd')\n\n open(cd_path, 'wt').write('0\\tNum\\n1\\tWeight\\n2\\tTarget\\n')\n np.savetxt(train_path, [\n [0, 1, 2],\n [1, -1, 1]], delimiter='\\t', fmt='%.4f')\n cmd = ('-f', train_path,\n '--cd', cd_path,\n )\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\ndef test_zero_learning_rate():\n train_path = yatest.common.test_output_path('train')\n cd_path = yatest.common.test_output_path('train.cd')\n\n open(cd_path, 'wt').write(\n '0\\tNum\\n'\n '1\\tNum\\n'\n '2\\tTarget\\n')\n np.savetxt(train_path, [\n [0, 1, 2],\n [1, 1, 1]], delimiter='\\t', fmt='%.4f')\n cmd = ('-f', train_path,\n '--cd', cd_path,\n '--learning-rate', '0.0',\n )\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\ndef do_test_eval_metrics(metric, metric_period, train, test, cd, loss_function, additional_train_params=()):\n output_model_path = yatest.common.test_output_path('model.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n eval_path = yatest.common.test_output_path('output.tsv')\n cmd = (\n '--loss-function', loss_function,\n '--eval-metric', metric,\n '-f', train,\n '-t', test,\n '--column-description', cd,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n '--metric-period', metric_period\n ) + additional_train_params\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', metric,\n '--input-path', test,\n '--column-description', cd,\n '-m', output_model_path,\n '-o', eval_path,\n '--block-size', '100',\n '--eval-period', metric_period,\n '--save-stats'\n )\n yatest.common.execute(cmd)\n\n first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 1], 8)\n second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1], 8)\n assert np.all(first_metrics == second_metrics)\n\n return [local_canonical_file(eval_path)]\n\n\[email protected]('metric_period', ['1', '2'])\[email protected]('metric', ['Logloss', 'F1', 'Accuracy', 'PFound', 'TotalF1', 'MCC', 'PairAccuracy'])\ndef test_eval_metrics(metric, metric_period):\n if metric == 'PFound':\n train, test, cd, loss_function = data_file('querywise', 'train'), data_file('querywise', 'test'), data_file('querywise', 'train.cd'), 'QueryRMSE'\n elif metric == 'PairAccuracy':\n # note: pairs are autogenerated\n train, test, cd, loss_function = data_file('querywise', 'train'), data_file('querywise', 'test'), data_file('querywise', 'train.cd'), 'PairLogitPairwise'\n else:\n train, test, cd, loss_function = data_file('adult', 'train_small'), data_file('adult', 'test_small'), data_file('adult', 'train.cd'), 'Logloss'\n\n return do_test_eval_metrics(metric, metric_period, train, test, cd, loss_function)\n\n\ndef test_eval_metrics_with_target_border():\n return do_test_eval_metrics(\n metric='Logloss',\n metric_period='1',\n train=data_file('adult_not_binarized', 'train_small'),\n test=data_file('adult_not_binarized', 'test_small'),\n cd=data_file('adult_not_binarized', 'train.cd'),\n loss_function='Logloss',\n additional_train_params=('--target-border', '0.4')\n )\n\n\ndef test_eval_metrics_with_class_weights():\n return do_test_eval_metrics(\n metric='Logloss',\n metric_period='1',\n train=data_file('adult', 'train_small'),\n test=data_file('adult', 'test_small'),\n cd=data_file('adult', 'train.cd'),\n loss_function='Logloss',\n additional_train_params=('--class-weights', '0.25,0.75')\n )\n\n\ndef test_eval_metrics_with_target_border_and_class_weights():\n return do_test_eval_metrics(\n metric='Logloss',\n metric_period='1',\n train=data_file('adult_not_binarized', 'train_small'),\n test=data_file('adult_not_binarized', 'test_small'),\n cd=data_file('adult_not_binarized', 'train.cd'),\n loss_function='Logloss',\n additional_train_params=('--target-border', '0.4', '--class-weights', '0.25,0.75')\n )\n\n\[email protected]('metrics', ['AUC', 'AUC,Precision'])\ndef test_eval_metrics_with_binarized_target(metrics):\n train = data_file('adult', 'train_small')\n test = data_file('adult', 'test_small')\n cd = data_file('adult', 'train.cd')\n loss_function = 'Logloss'\n\n output_model_path = yatest.common.test_output_path('model.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', loss_function,\n '-f', train,\n '-t', test,\n '--column-description', cd,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n '--target-border', '0.25',\n '--custom-metric', metrics,\n )\n execute_catboost_fit('CPU', cmd)\n\n eval_path = yatest.common.test_output_path('output.tsv')\n cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', metrics,\n '--input-path', test,\n '--column-description', cd,\n '-m', output_model_path,\n '-o', eval_path,\n '--block-size', '100',\n '--save-stats',\n )\n yatest.common.execute(cmd)\n first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 2:], 8)\n second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1:], 8)\n assert np.all(first_metrics == second_metrics)\n\n\[email protected]('metric_period', ['1', '2'])\[email protected]('metric', ['MultiClass', 'MultiClassOneVsAll', 'F1', 'Accuracy', 'TotalF1', 'MCC', 'Precision', 'Recall'])\[email protected]('loss_function', MULTICLASS_LOSSES)\[email protected]('dataset', ['cloudness_small', 'cloudness_lost_class'])\ndef test_eval_metrics_multiclass(metric, loss_function, dataset, metric_period):\n if metric in MULTICLASS_LOSSES and metric != loss_function:\n # MultiClass and MultiClassOneVsAll are incompatible\n return\n\n train, test, cd = data_file(dataset, 'train_small'), data_file(dataset, 'test_small'), data_file(dataset, 'train.cd')\n\n output_model_path = yatest.common.test_output_path('model.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n eval_path = yatest.common.test_output_path('output.tsv')\n cmd = (\n '--loss-function', loss_function,\n '--custom-metric', metric,\n '-f', train,\n '-t', test,\n '--column-description', cd,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n '--classes-count', '3',\n '--metric-period', metric_period\n )\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', metric,\n '--input-path', test,\n '--column-description', cd,\n '-m', output_model_path,\n '-o', eval_path,\n '--block-size', '100',\n '--eval-period', metric_period,\n '--save-stats'\n )\n yatest.common.execute(cmd)\n\n start_index = 1 if metric == loss_function else 2\n first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, start_index:], 8)\n second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1:], 8)\n assert np.all(first_metrics == second_metrics)\n return [local_canonical_file(eval_path)]\n\n\ndef test_eval_metrics_class_names():\n labels = ['a', 'b', 'c', 'd']\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', 'MultiClass',\n '--custom-metric', 'TotalF1,AUC:type=OneVsAll,AUC:type=Mu,AUC:misclass_cost_matrix=0/0.239/1/-1/0.5/0/1.5/-1.2/1/0.67/0/1.3/-0.5/1/0.5/0',\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n '--class-names', ','.join(labels),\n )\n execute_catboost_fit('CPU', cmd)\n\n eval_cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', 'TotalF1,AUC:type=OneVsAll,AUC:type=Mu,AUC:misclass_cost_matrix=0/0.239/1/-1/0.5/0/1.5/-1.2/1/0.67/0/1.3/-0.5/1/0.5/0',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', model_path,\n '-o', eval_path,\n '--block-size', '100',\n '--save-stats'\n )\n execute_catboost_fit('CPU', cmd)\n yatest.common.execute(eval_cmd)\n\n first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 2], 8)\n second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1], 8)\n assert np.all(first_metrics == second_metrics)\n\n\[email protected]('metric_period', ['1', '2'])\[email protected]('metric', ['Accuracy', 'AUC:type=Ranking'])\ndef test_eval_metrics_with_baseline(metric_period, metric):\n train = data_file('adult_weight', 'train_weight')\n test = data_file('adult_weight', 'test_weight')\n cd = data_file('train_adult_baseline.cd')\n\n output_model_path = yatest.common.test_output_path('model.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n eval_path = yatest.common.test_output_path('output.tsv')\n cmd = (\n '--loss-function', 'Logloss',\n '--eval-metric', metric,\n '-f', train,\n '-t', test,\n '--column-description', cd,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n '--metric-period', metric_period\n )\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', metric,\n '--input-path', test,\n '--column-description', cd,\n '-m', output_model_path,\n '-o', eval_path,\n '--block-size', '100',\n '--eval-period', metric_period,\n '--save-stats'\n )\n yatest.common.execute(cmd)\n\n first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 1], 8)\n second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1], 8)\n assert np.all(first_metrics == second_metrics)\n\n return [local_canonical_file(eval_path)]\n\n\[email protected]('metric_period', ['1', '2'])\[email protected]('metric', ['Accuracy'])\ndef test_eval_metrics_multiclass_with_baseline(metric_period, metric):\n labels = [0, 1, 2, 3]\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target'], [1, 'Baseline'], [2, 'Baseline'], [3, 'Baseline'], [4, 'Baseline']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n output_model_path = yatest.common.test_output_path('model.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n eval_path = yatest.common.test_output_path('output.tsv')\n\n cmd = (\n '--loss-function', 'MultiClass',\n '--eval-metric', metric,\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n '--classes-count', '4',\n '--metric-period', metric_period\n )\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', metric,\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', output_model_path,\n '-o', eval_path,\n '--block-size', '100',\n '--eval-period', metric_period,\n '--save-stats'\n )\n yatest.common.execute(cmd)\n\n first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 1], 8)\n second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1], 8)\n assert np.all(first_metrics == second_metrics)\n return [local_canonical_file(eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_ctr_leaf_count_limit(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '--ctr-leaf-count-limit', '10',\n '-i', '30',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected]('loss_function', ['RMSE', 'Logloss', 'CrossEntropy'])\ndef test_boost_from_average(boosting_type, grow_policy, loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_calc_eval_path = yatest.common.test_output_path('test_calc.eval')\n output_eval_path_with_avg = yatest.common.test_output_path('test_avg.eval')\n output_eval_path_with_baseline = yatest.common.test_output_path('test_baseline.eval')\n baselined_train = yatest.common.test_output_path('baselined_train')\n baselined_test = yatest.common.test_output_path('baselined_test')\n baselined_cd = yatest.common.test_output_path('baselined.cd')\n\n train_path = data_file('adult', 'train_small')\n test_path = data_file('adult', 'test_small')\n original_cd = data_file('adult', 'train.cd')\n\n # use float32 beacause we use float in C++\n sum_target = np.float32(0)\n obj_count = np.float32(0)\n with open(train_path) as train_f:\n for line in train_f:\n obj_count += 1\n sum_target += np.float32(line.split()[1])\n\n mean_target = sum_target / obj_count\n if loss_function in ['Logloss', 'CrossEntropy']:\n mean_target = -np.log(1 / mean_target - 1)\n mean_target_str = str(mean_target)\n\n def append_baseline_to_pool(source, target):\n with open(source) as source_f, open(target, 'w') as target_f:\n for line in source_f:\n target_f.write(line.rstrip('\\n') + '\\t' + mean_target_str + '\\n')\n\n append_baseline_to_pool(train_path, baselined_train)\n append_baseline_to_pool(test_path, baselined_test)\n\n with open(baselined_cd, 'w') as cd_output, open(original_cd) as cd_input:\n for line in cd_input:\n cd_output.write(line)\n cd_output.write('18\\tBaseline\\n')\n\n base_cmd = (\n '--loss-function', loss_function,\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '30',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n )\n\n execute_catboost_fit('CPU', base_cmd + (\n '-f', baselined_train,\n '-t', baselined_test,\n '--boost-from-average', '0',\n '--column-description', baselined_cd,\n '--eval-file', output_eval_path_with_baseline,\n ))\n execute_catboost_fit('CPU', base_cmd + (\n '-f', train_path,\n '-t', test_path,\n '--boost-from-average', '1',\n '--column-description', original_cd,\n '--eval-file', output_eval_path_with_avg,\n ))\n yatest.common.execute((\n CATBOOST_PATH, 'calc',\n '--cd', original_cd,\n '--input-path', test_path,\n '-m', output_model_path,\n '-T', '1',\n '--output-path', output_calc_eval_path,\n ))\n\n assert compare_fit_evals_with_precision(output_eval_path_with_avg, output_eval_path_with_baseline)\n assert compare_evals(output_eval_path_with_avg, output_calc_eval_path)\n return [local_canonical_file(output_eval_path_with_avg)]\n\n\[email protected]('eval_period', ['1', '2'])\ndef test_eval_non_additive_metric(eval_period):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', 'AUC:hints=skip_train~false',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', output_model_path,\n '-o', output_eval_path,\n '--eval-period', eval_period,\n '--block-size', '10'\n )\n yatest.common.execute(cmd)\n\n output_eval_in_parts = yatest.common.test_output_path('eval_in_parts.eval')\n cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', 'AUC:hints=skip_train~false',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', output_model_path,\n '-o', output_eval_in_parts,\n '--eval-period', eval_period,\n '--calc-on-parts',\n '--block-size', '10'\n )\n yatest.common.execute(cmd)\n\n first_metrics = np.loadtxt(output_eval_path, skiprows=1)\n second_metrics = np.loadtxt(output_eval_in_parts, skiprows=1)\n assert np.all(first_metrics == second_metrics)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected]('max_ctr_complexity', [1, 2])\ndef test_eval_eq_calc(boosting_type, grow_policy, max_ctr_complexity):\n one_hot_max_size = 2\n cd_path = yatest.common.test_output_path('cd.txt')\n train_path = yatest.common.test_output_path('train.txt')\n test_path = yatest.common.test_output_path('test.txt')\n model_path = yatest.common.test_output_path('model.bin')\n test_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n np.savetxt(cd_path, [['0', 'Target'],\n ['1', 'Categ'],\n ['2', 'Categ']\n ], fmt='%s', delimiter='\\t')\n np.savetxt(train_path, [['1', 'A', 'X'],\n ['1', 'B', 'Y'],\n ['1', 'C', 'Y'],\n ['0', 'A', 'Z'],\n ['0', 'B', 'Z'],\n ], fmt='%s', delimiter='\\t')\n np.savetxt(test_path, [['1', 'A', 'Y'],\n ['1', 'D', 'U'],\n ['1', 'D', 'U']\n ], fmt='%s', delimiter='\\t')\n cmd_fit = ('--loss-function', 'Logloss',\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--cd', cd_path,\n '-f', train_path,\n '-t', test_path,\n '-m', model_path,\n '--eval-file', test_eval_path,\n '-i', '5',\n '-T', '1',\n '--max-ctr-complexity', str(max_ctr_complexity),\n '--one-hot-max-size', str(one_hot_max_size),\n )\n cmd_calc = (CATBOOST_PATH, 'calc',\n '--cd', cd_path,\n '--input-path', test_path,\n '-m', model_path,\n '-T', '1',\n '--output-path', calc_eval_path,\n )\n execute_catboost_fit('CPU', cmd_fit)\n yatest.common.execute(cmd_calc)\n assert(compare_evals(test_eval_path, calc_eval_path))\n\n\ndef do_test_object_importances(pool, loss_function, additional_train_params):\n output_model_path = yatest.common.test_output_path('model.bin')\n object_importances_path = yatest.common.test_output_path('object_importances.tsv')\n cmd = (\n '--loss-function', loss_function,\n '-f', data_file(pool, 'train_small'),\n '-t', data_file(pool, 'test_small'),\n '--column-description', data_file(pool, 'train.cd'),\n '-i', '10',\n '--boosting-type', 'Plain',\n '-T', '4',\n '-m', output_model_path,\n '--use-best-model', 'false'\n ) + additional_train_params\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n CATBOOST_PATH,\n 'ostr',\n '-f', data_file(pool, 'train_small'),\n '-t', data_file(pool, 'test_small'),\n '--column-description', data_file(pool, 'train.cd'),\n '-m', output_model_path,\n '-o', object_importances_path,\n )\n yatest.common.execute(cmd)\n\n return [local_canonical_file(object_importances_path)]\n\n\[email protected]('loss_function', ['RMSE', 'Logloss', 'Poisson'])\[email protected]('leaf_estimation_iteration', ['1', '2'])\ndef test_object_importances(loss_function, leaf_estimation_iteration):\n additional_train_params = (\n '--leaf-estimation-method', 'Gradient',\n '--leaf-estimation-iterations', leaf_estimation_iteration\n )\n return do_test_object_importances(\n pool='adult',\n loss_function=loss_function,\n additional_train_params=additional_train_params\n )\n\n\ndef test_object_importances_with_target_border():\n return do_test_object_importances(\n pool='adult_not_binarized',\n loss_function='Logloss',\n additional_train_params=('--target-border', '0.4')\n )\n\n\ndef test_object_importances_with_class_weights():\n return do_test_object_importances(\n pool='adult',\n loss_function='Logloss',\n additional_train_params=('--class-weights', '0.25,0.75')\n )\n\n\ndef test_object_importances_with_target_border_and_class_weights():\n return do_test_object_importances(\n pool='adult_not_binarized',\n loss_function='Logloss',\n additional_train_params=('--target-border', '0.4', '--class-weights', '0.25,0.75')\n )\n\n\n# Create `num_tests` test files from `test_input_path`.\ndef split_test_to(num_tests, test_input_path):\n test_input_lines = open(test_input_path).readlines()\n test_paths = [yatest.common.test_output_path('test{}'.format(i)) for i in range(num_tests)]\n for testno in range(num_tests):\n test_path = test_paths[testno]\n test_lines = test_input_lines[testno::num_tests]\n open(test_path, 'wt').write(''.join(test_lines))\n return test_paths\n\n\n# Create a few shuffles from list of test files, for use with `-t` option.\ndef create_test_shuffles(test_paths, seed=20181219, prng=None):\n if prng is None:\n prng = np.random.RandomState(seed=seed)\n num_tests = len(test_paths)\n num_shuffles = num_tests # if num_tests < 3 else num_tests * (num_tests - 1)\n test_shuffles = set()\n while len(test_shuffles) < num_shuffles:\n test_shuffles.add(tuple(prng.permutation(test_paths)))\n return [','.join(shuffle) for shuffle in test_shuffles]\n\n\ndef fit_calc_cksum(fit_stem, calc_stem, test_shuffles):\n import hashlib\n last_cksum = None\n for i, shuffle in enumerate(test_shuffles):\n model_path = yatest.common.test_output_path('model{}.bin'.format(i))\n eval_path = yatest.common.test_output_path('eval{}.txt'.format(i))\n execute_catboost_fit('CPU', fit_stem + (\n '-t', shuffle,\n '-m', model_path,\n ))\n yatest.common.execute(calc_stem + (\n '-m', model_path,\n '--output-path', eval_path,\n ))\n cksum = hashlib.md5(open(eval_path).read()).hexdigest()\n if last_cksum is None:\n last_cksum = cksum\n continue\n assert(last_cksum == cksum)\n\n\[email protected]('num_tests', [3, 4])\[email protected]('boosting_type', ['Plain', 'Ordered'])\ndef test_multiple_eval_sets_order_independent(boosting_type, num_tests):\n train_path = data_file('adult', 'train_small')\n cd_path = data_file('adult', 'train.cd')\n test_input_path = data_file('adult', 'test_small')\n fit_stem = (\n '--loss-function', 'RMSE',\n '-f', train_path,\n '--cd', cd_path,\n '--boosting-type', boosting_type,\n '-i', '5',\n '-T', '4',\n '--use-best-model', 'false',\n )\n calc_stem = (\n CATBOOST_PATH, 'calc',\n '--cd', cd_path,\n '--input-path', test_input_path,\n '-T', '4',\n )\n # We use a few shuffles of tests and check equivalence of resulting models\n prng = np.random.RandomState(seed=20181219)\n test_shuffles = create_test_shuffles(split_test_to(num_tests, test_input_path), prng=prng)\n fit_calc_cksum(fit_stem, calc_stem, test_shuffles)\n\n\[email protected]('num_tests', [3, 4])\[email protected]('boosting_type', ['Plain', 'Ordered'])\ndef test_multiple_eval_sets_querywise_order_independent(boosting_type, num_tests):\n train_path = data_file('querywise', 'train')\n cd_path = data_file('querywise', 'train.cd.query_id')\n test_input_path = data_file('querywise', 'test')\n fit_stem = (\n '--loss-function', 'QueryRMSE',\n '-f', train_path,\n '--cd', cd_path,\n '--boosting-type', boosting_type,\n '-i', '5',\n '-T', '4',\n '--use-best-model', 'false',\n )\n calc_stem = (CATBOOST_PATH, 'calc',\n '--cd', cd_path,\n '--input-path', test_input_path,\n '-T', '4',\n )\n # We use a few shuffles of tests and check equivalence of resulting models\n prng = np.random.RandomState(seed=20181219)\n test_shuffles = create_test_shuffles(split_test_to(num_tests, test_input_path), prng=prng)\n fit_calc_cksum(fit_stem, calc_stem, test_shuffles)\n\n\ndef test_multiple_eval_sets_no_empty():\n train_path = data_file('adult', 'train_small')\n cd_path = data_file('adult', 'train.cd')\n test_input_path = data_file('adult', 'test_small')\n fit_stem = ('--loss-function', 'RMSE',\n '-f', train_path,\n '--cd', cd_path,\n '-i', '5',\n '-T', '4',\n '--use-best-model', 'false',\n )\n test0_path = yatest.common.test_output_path('test0.txt')\n open(test0_path, 'wt').write('')\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', fit_stem + (\n '-t', ','.join((test_input_path, test0_path))\n ))\n\n\[email protected]('loss_function', ['RMSE', 'QueryRMSE'])\ndef test_multiple_eval_sets(loss_function):\n num_tests = 5\n train_path = data_file('querywise', 'train')\n cd_path = data_file('querywise', 'train.cd.query_id')\n test_input_path = data_file('querywise', 'test')\n eval_path = yatest.common.test_output_path('test.eval')\n test_paths = list(reversed(split_test_to(num_tests, test_input_path)))\n cmd = ('--loss-function', loss_function,\n '-f', train_path,\n '-t', ','.join(test_paths),\n '--column-description', cd_path,\n '-i', '5',\n '-T', '4',\n '--use-best-model', 'false',\n '--eval-file', eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(eval_path)]\n\n\ndef test_multiple_eval_sets_err_log():\n num_tests = 3\n train_path = data_file('querywise', 'train')\n cd_path = data_file('querywise', 'train.cd.query_id')\n test_input_path = data_file('querywise', 'test')\n test_err_log_path = yatest.common.test_output_path('test-err.log')\n json_log_path = yatest.common.test_output_path('json.log')\n test_paths = reversed(split_test_to(num_tests, test_input_path))\n cmd = ('--loss-function', 'RMSE',\n '-f', train_path,\n '-t', ','.join(test_paths),\n '--column-description', cd_path,\n '-i', '5',\n '-T', '4',\n '--test-err-log', test_err_log_path,\n '--json-log', json_log_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(test_err_log_path),\n local_canonical_file(remove_time_from_json(json_log_path))]\n\n\n# Cast<float>(CityHash('Quvena')) is QNaN\n# Cast<float>(CityHash('Sineco')) is SNaN\[email protected]('cat_value', ['Normal', 'Quvena', 'Sineco'])\ndef test_const_cat_feature(cat_value):\n\n def make_a_set(nrows, value, seed=20181219, prng=None):\n if prng is None:\n prng = np.random.RandomState(seed=seed)\n label = prng.randint(0, nrows, [nrows, 1])\n feature = np.full([nrows, 1], value, dtype='|S{}'.format(len(value)))\n return np.concatenate([label, feature], axis=1)\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target'], [1, 'Categ']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=20181219)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, make_a_set(10, cat_value, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, make_a_set(10, cat_value, prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n\n cmd = ('--loss-function', 'RMSE',\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '-i', '5',\n '-T', '4',\n '--eval-file', eval_path,\n )\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\ndef test_model_metadata():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '2',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-w', '0.1',\n '--set-metadata-from-freeargs',\n 'A', 'A',\n 'BBB', 'BBB',\n 'CCC', 'A'\n )\n execute_catboost_fit('CPU', cmd)\n\n calc_cmd = (\n CATBOOST_PATH,\n 'metadata', 'set',\n '-m', output_model_path,\n '--key', 'CCC',\n '--value', 'CCC'\n )\n yatest.common.execute(calc_cmd)\n\n calc_cmd = (\n CATBOOST_PATH,\n 'metadata', 'set',\n '-m', output_model_path,\n '--key', 'CCC',\n '--value', 'CCC'\n )\n yatest.common.execute(calc_cmd)\n\n py_catboost = catboost.CatBoost()\n py_catboost.load_model(output_model_path)\n\n assert 'A' == py_catboost.get_metadata()['A']\n assert 'BBB' == py_catboost.get_metadata()['BBB']\n assert 'CCC' == py_catboost.get_metadata()['CCC']\n\n\ndef test_fit_multiclass_with_class_names():\n labels = ['a', 'b', 'c', 'd']\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n\n fit_cmd = (\n '--loss-function', 'MultiClass',\n '--class-names', ','.join(labels),\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '--use-best-model', 'false',\n '--prediction-type', 'RawFormulaVal,Class',\n '--eval-file', eval_path\n )\n\n execute_catboost_fit('CPU', fit_cmd)\n\n return [local_canonical_file(eval_path)]\n\n\ndef test_extract_multiclass_labels_from_class_names():\n labels = ['a', 'b', 'c', 'd']\n\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n\n fit_cmd = (\n '--loss-function', 'MultiClass',\n '--class-names', ','.join(labels),\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--use-best-model', 'false',\n )\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-T', '4',\n '-m', model_path,\n '--output-path', eval_path,\n '--prediction-type', 'RawFormulaVal,Class',\n )\n\n execute_catboost_fit('CPU', fit_cmd)\n yatest.common.execute(calc_cmd)\n\n py_catboost = catboost.CatBoost()\n py_catboost.load_model(model_path)\n\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'String'\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [0, 1, 2, 3]\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == ['a', 'b', 'c', 'd']\n assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 0\n\n assert json.loads(py_catboost.get_metadata()['params'])['data_processing_options']['class_names'] == ['a', 'b', 'c', 'd']\n\n return [local_canonical_file(eval_path)]\n\n\[email protected]('loss_function', ['MultiClass', 'MultiClassOneVsAll', 'Logloss', 'RMSE'])\ndef test_save_class_labels_from_data(loss_function):\n labels = [10000000, 7, 0, 9999]\n\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n cmd = (\n '--loss-function', loss_function,\n '-f', train_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--use-best-model', 'false',\n )\n\n if loss_function == 'Logloss':\n cmd += ('--target-border', '0.5')\n\n execute_catboost_fit('CPU', cmd)\n\n py_catboost = catboost.CatBoost()\n py_catboost.load_model(model_path)\n\n if loss_function in MULTICLASS_LOSSES:\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'String'\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [0, 1, 2, 3]\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == ['0.0', '7.0', '9999.0', '10000000.0']\n assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 0\n elif loss_function == 'Logloss':\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'Integer'\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [0, 1]\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == []\n assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 0\n else:\n assert 'class_params' not in py_catboost.get_metadata()\n\n\[email protected]('prediction_type', ['Probability', 'RawFormulaVal', 'Class'])\ndef test_apply_multiclass_labels_from_data(prediction_type):\n labels = [10000000, 7, 0, 9999]\n\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n\n fit_cmd = (\n '--loss-function', 'MultiClass',\n '-f', train_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--use-best-model', 'false',\n )\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', model_path,\n '--output-path', eval_path,\n '--prediction-type', prediction_type,\n )\n\n execute_catboost_fit('CPU', fit_cmd)\n yatest.common.execute(calc_cmd)\n\n py_catboost = catboost.CatBoost()\n py_catboost.load_model(model_path)\n\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'String'\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [0, 1, 2, 3]\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == ['0.0', '7.0', '9999.0', '10000000.0']\n assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 0\n\n if prediction_type in ['Probability', 'RawFormulaVal']:\n with open(eval_path, \"rt\") as f:\n for line in f:\n assert line[:-1] == 'SampleId\\t{}:Class=0.0\\t{}:Class=7.0\\t{}:Class=9999.0\\t{}:Class=10000000.0' \\\n .format(prediction_type, prediction_type, prediction_type, prediction_type)\n break\n else: # Class\n with open(eval_path, \"rt\") as f:\n for i, line in enumerate(f):\n if not i:\n assert line[:-1] == 'SampleId\\tClass'\n else:\n assert float(line[:-1].split()[1]) in labels\n\n return [local_canonical_file(eval_path)]\n\n\[email protected]('loss_function', MULTICLASS_LOSSES)\[email protected]('prediction_type', ['Probability', 'RawFormulaVal', 'Class'])\ndef test_save_and_apply_multiclass_labels_from_classes_count(loss_function, prediction_type):\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, [1, 2], prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, [0, 1, 2, 3], prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n\n fit_cmd = (\n '--loss-function', loss_function,\n '--classes-count', '4',\n '-f', train_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--use-best-model', 'false',\n )\n\n execute_catboost_fit('CPU', fit_cmd)\n\n py_catboost = catboost.CatBoost()\n py_catboost.load_model(model_path)\n\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'Integer'\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [1, 2]\n assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 4\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == []\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', model_path,\n '--output-path', eval_path,\n '--prediction-type', prediction_type\n )\n\n yatest.common.execute(calc_cmd)\n\n if prediction_type == 'RawFormulaVal':\n with open(eval_path, \"rt\") as f:\n for i, line in enumerate(f):\n if i == 0:\n assert line[:-1] == 'SampleId\\t{}:Class=0\\t{}:Class=1\\t{}:Class=2\\t{}:Class=3' \\\n .format(prediction_type, prediction_type, prediction_type, prediction_type)\n else:\n assert float(line[:-1].split()[1]) == float('-inf') and float(line[:-1].split()[4]) == float('-inf') # fictitious approxes must be negative infinity\n\n if prediction_type == 'Probability':\n with open(eval_path, \"rt\") as f:\n for i, line in enumerate(f):\n if i == 0:\n assert line[:-1] == 'SampleId\\t{}:Class=0\\t{}:Class=1\\t{}:Class=2\\t{}:Class=3' \\\n .format(prediction_type, prediction_type, prediction_type, prediction_type)\n else:\n assert (abs(float(line[:-1].split()[1])) < 1e-307\n and abs(float(line[:-1].split()[4])) < 1e-307) # fictitious probabilities must be virtually zero\n\n if prediction_type == 'Class':\n with open(eval_path, \"rt\") as f:\n for i, line in enumerate(f):\n if i == 0:\n assert line[:-1] == 'SampleId\\tClass'\n else:\n assert float(line[:-1].split()[1]) in [1, 2] # probability of 0,3 classes appearance must be zero\n\n return [local_canonical_file(eval_path)]\n\n\ndef test_set_class_names_implicitly():\n INPUT_CLASS_LABELS = ['a', 'bc', '7.', '8.0', '19.2']\n SAVED_CLASS_LABELS = ['19.2', '7.', '8.0', 'a', 'bc']\n\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, INPUT_CLASS_LABELS, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, INPUT_CLASS_LABELS, prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n\n fit_cmd = (\n '--loss-function', 'MultiClass',\n '-f', train_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--use-best-model', 'false',\n )\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', model_path,\n '--output-path', eval_path,\n '--prediction-type', 'RawFormulaVal,Class',\n )\n\n execute_catboost_fit('CPU', fit_cmd)\n\n py_catboost = catboost.CatBoost()\n py_catboost.load_model(model_path)\n\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'String'\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [0, 1, 2, 3, 4]\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == SAVED_CLASS_LABELS\n assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 0\n\n yatest.common.execute(calc_cmd)\n\n with open(eval_path, \"rt\") as f:\n for i, line in enumerate(f):\n if not i:\n assert line[:-1] == 'SampleId\\t{}:Class=19.2\\t{}:Class=7.\\t{}:Class=8.0\\t{}:Class=a\\t{}:Class=bc\\tClass' \\\n .format(*(['RawFormulaVal'] * 5))\n else:\n label = line[:-1].split()[-1]\n assert label in SAVED_CLASS_LABELS\n\n return [local_canonical_file(eval_path)]\n\n\nCANONICAL_CLOUDNESS_MINI_MULTICLASS_MODEL_PATH = data_file('', 'multiclass_model.bin')\n\n\[email protected]('prediction_type', ['Probability', 'RawFormulaVal', 'Class'])\ndef test_multiclass_model_backward_compatibility(prediction_type):\n model = catboost.CatBoost()\n model.load_model(CANONICAL_CLOUDNESS_MINI_MULTICLASS_MODEL_PATH)\n\n assert 'class_params' not in model.get_metadata()\n\n pool = catboost.Pool(data_file('cloudness_small', 'train_small'),\n column_description=data_file('cloudness_small', 'train.cd'))\n model.predict(data=pool, prediction_type='Class')\n model.eval_metrics(data=pool, metrics=['Accuracy'])\n\n output_path = yatest.common.test_output_path('out.txt')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('cloudness_small', 'train_small'),\n '--column-description', data_file('cloudness_small', 'train.cd'),\n '-m', CANONICAL_CLOUDNESS_MINI_MULTICLASS_MODEL_PATH,\n '--prediction-type', prediction_type,\n '--output-path', output_path,\n )\n\n yatest.common.execute(calc_cmd)\n return [local_canonical_file(output_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('use_best_model', ['true', 'false'])\ndef test_learning_rate_auto_set(boosting_type, use_best_model):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', use_best_model,\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--od-type', 'Iter',\n '--od-wait', '2',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_paths_with_dsv_scheme():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', 'dsv://' + data_file('querywise', 'train'),\n '-t', 'dsv://' + data_file('querywise', 'test'),\n '--column-description', 'dsv://' + data_file('querywise', 'train.cd'),\n '--boosting-type', 'Ordered',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_skip_train():\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n json_log_path = yatest.common.test_output_path('json_log.json')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '-i', '20',\n '-T', '4',\n '--custom-metric', 'AverageGain:top=2;hints=skip_train~true',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n '--json-log', json_log_path\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path),\n local_canonical_file(remove_time_from_json(json_log_path))]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_group_weight(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n def run_catboost(train_path, test_path, cd_path, eval_path):\n cmd = (\n '--loss-function', 'YetiRank',\n '-f', data_file('querywise', train_path),\n '-t', data_file('querywise', test_path),\n '--column-description', data_file('querywise', cd_path),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n output_eval_path_first = yatest.common.test_output_path('test_first.eval')\n output_eval_path_second = yatest.common.test_output_path('test_second.eval')\n run_catboost('train', 'test', 'train.cd', output_eval_path_first)\n run_catboost('train.const_group_weight', 'test.const_group_weight', 'train.cd.group_weight', output_eval_path_second)\n assert filecmp.cmp(output_eval_path_first, output_eval_path_second)\n\n run_catboost('train', 'test', 'train.cd.group_weight', output_eval_path)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected]('loss_function', ['QueryRMSE', 'RMSE'])\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_group_weight_and_object_weight(boosting_type, grow_policy, loss_function, dev_score_calc_obj_block_size):\n\n def run_catboost(train_path, test_path, cd_path, eval_path):\n cmd = (\n '--loss-function', loss_function,\n '-f', data_file('querywise', train_path),\n '-t', data_file('querywise', test_path),\n '--column-description', data_file('querywise', cd_path),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '--eval-file', eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n output_eval_path_first = yatest.common.test_output_path('test_first.eval')\n output_eval_path_second = yatest.common.test_output_path('test_second.eval')\n run_catboost('train', 'test', 'train.cd.group_weight', output_eval_path_first)\n run_catboost('train', 'test', 'train.cd.weight', output_eval_path_second)\n assert filecmp.cmp(output_eval_path_first, output_eval_path_second)\n\n\ndef test_snapshot_without_random_seed():\n\n def run_catboost(iters, eval_path, additional_params=None):\n cmd = [\n '--loss-function', 'Logloss',\n '--learning-rate', '0.5',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', str(iters),\n '-T', '4',\n '--use-best-model', 'False',\n '--eval-file', eval_path,\n ]\n if additional_params:\n cmd += additional_params\n tmpfile = 'test_data_dumps'\n with open(tmpfile, 'w') as f:\n execute_catboost_fit('CPU', cmd, stdout=f)\n with open(tmpfile, 'r') as output:\n line_count = sum(1 for line in output)\n return line_count\n\n model_path = yatest.common.test_output_path('model.bin')\n eval_path = yatest.common.test_output_path('test.eval')\n progress_path = yatest.common.test_output_path('test.cbp')\n additional_params = ['--snapshot-file', progress_path, '-m', model_path]\n\n first_line_count = run_catboost(15, eval_path, additional_params=additional_params)\n second_line_count = run_catboost(30, eval_path, additional_params=additional_params)\n third_line_count = run_catboost(45, eval_path, additional_params=additional_params)\n assert first_line_count == second_line_count == third_line_count\n\n canon_eval_path = yatest.common.test_output_path('canon_test.eval')\n cb_model = catboost.CatBoost()\n cb_model.load_model(model_path)\n random_seed = cb_model.random_seed_\n run_catboost(45, canon_eval_path, additional_params=['-r', str(random_seed)])\n assert filecmp.cmp(canon_eval_path, eval_path)\n\n\ndef test_snapshot_with_interval():\n\n def run_with_timeout(cmd, timeout):\n try:\n execute_catboost_fit('CPU', cmd, timeout=timeout)\n except ExecutionTimeoutError:\n return True\n return False\n\n cmd = [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-T', '4',\n ]\n\n measure_time_iters = 100\n exec_time = timeit.timeit(lambda: execute_catboost_fit('CPU', cmd + ['-i', str(measure_time_iters)]), number=1)\n\n SNAPSHOT_INTERVAL = 1\n TIMEOUT = 5\n TOTAL_TIME = 25\n iters = int(TOTAL_TIME / (exec_time / measure_time_iters))\n\n canon_eval_path = yatest.common.test_output_path('canon_test.eval')\n canon_params = cmd + ['--eval-file', canon_eval_path, '-i', str(iters)]\n execute_catboost_fit('CPU', canon_params)\n\n eval_path = yatest.common.test_output_path('test.eval')\n progress_path = yatest.common.test_output_path('test.cbp')\n model_path = yatest.common.test_output_path('model.bin')\n params = cmd + ['--snapshot-file', progress_path,\n '--snapshot-interval', str(SNAPSHOT_INTERVAL),\n '-m', model_path,\n '--eval-file', eval_path,\n '-i', str(iters)]\n\n was_timeout = False\n while run_with_timeout(params, TIMEOUT):\n was_timeout = True\n assert was_timeout\n assert filecmp.cmp(canon_eval_path, eval_path)\n\n\ndef test_snapshot_with_different_params():\n cmd = [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-T', '4',\n '-i', '10',\n '--snapshot-file', 'snapshot.cbp'\n ]\n\n cmd_1 = cmd + ['--eval-metric', 'Logloss']\n cmd_2 = cmd + ['--eval-metric', 'Accuracy']\n execute_catboost_fit('CPU', cmd_1)\n try:\n execute_catboost_fit('CPU', cmd_2)\n except ExecutionError:\n return\n\n assert False\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected]('leaf_estimation_method', LEAF_ESTIMATION_METHOD)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_querysoftmax(boosting_type, grow_policy, leaf_estimation_method, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'QuerySoftMax',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--leaf-estimation-method', leaf_estimation_method,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_shap_verbose():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_values_path = yatest.common.test_output_path('shapval')\n output_log = yatest.common.test_output_path('log')\n cmd_fit = [\n '--loss-function', 'Logloss',\n '--learning-rate', '0.5',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '250',\n '-T', '4',\n '-m', output_model_path,\n ]\n execute_catboost_fit('CPU', cmd_fit)\n cmd_shap = [\n CATBOOST_PATH,\n 'fstr',\n '-o', output_values_path,\n '--input-path', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--verbose', '12',\n '--fstr-type', 'ShapValues',\n '-T', '4',\n '-m', output_model_path,\n ]\n with open(output_log, 'w') as log:\n yatest.common.execute(cmd_shap, stdout=log)\n with open(output_log, 'r') as log:\n line_count = sum(1 for line in log)\n assert line_count == 5\n\n\ndef test_shap_approximate():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_values_path = yatest.common.test_output_path('shapval')\n cmd_fit = [\n '--loss-function', 'Logloss',\n '--learning-rate', '0.5',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '250',\n '-T', '4',\n '-m', output_model_path,\n ]\n execute_catboost_fit('CPU', cmd_fit)\n cmd_shap = [\n CATBOOST_PATH,\n 'fstr',\n '-o', output_values_path,\n '--input-path', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--verbose', '0',\n '--fstr-type', 'ShapValues',\n '--shap-calc-type', 'Approximate',\n '-T', '4',\n '-m', output_model_path,\n ]\n yatest.common.execute(cmd_shap)\n\n return [local_canonical_file(output_values_path)]\n\n\ndef test_shap_exact():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_values_path = yatest.common.test_output_path('shapval')\n cmd_fit = [\n CATBOOST_PATH,\n 'fit',\n '--loss-function', 'Logloss',\n '--learning-rate', '0.5',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '250',\n '-T', '4',\n '-m', output_model_path,\n ]\n yatest.common.execute(cmd_fit)\n cmd_shap = [\n CATBOOST_PATH,\n 'fstr',\n '-o', output_values_path,\n '--input-path', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--verbose', '0',\n '--fstr-type', 'ShapValues',\n '--shap-calc-type', 'Exact',\n '-T', '4',\n '-m', output_model_path,\n ]\n yatest.common.execute(cmd_shap)\n\n return [local_canonical_file(output_values_path)]\n\n\[email protected]('bagging_temperature', ['0', '1'])\[email protected]('sampling_unit', SAMPLING_UNIT_TYPES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_querywise_bayesian_bootstrap(bagging_temperature, sampling_unit, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'RMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--bootstrap-type', 'Bayesian',\n '--sampling-unit', sampling_unit,\n '--bagging-temperature', bagging_temperature,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('subsample', ['0.5', '1'])\[email protected]('sampling_unit', SAMPLING_UNIT_TYPES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_querywise_bernoulli_bootstrap(subsample, sampling_unit, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'RMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--bootstrap-type', 'Bernoulli',\n '--sampling-unit', sampling_unit,\n '--subsample', subsample,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\nLOSS_FUNCTIONS_WITH_PAIRWISE_SCORRING = ['YetiRankPairwise', 'PairLogitPairwise']\n\n\[email protected]('bagging_temperature', ['0', '1'])\[email protected]('sampling_unit', SAMPLING_UNIT_TYPES)\[email protected]('loss_function', LOSS_FUNCTIONS_WITH_PAIRWISE_SCORRING)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_pairwise_bayesian_bootstrap(bagging_temperature, sampling_unit, loss_function, dev_score_calc_obj_block_size):\n if loss_function == 'YetiRankPairwise' and sampling_unit == 'Group' and bagging_temperature == '1':\n return pytest.xfail(reason='MLTOOLS-1801')\n\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', loss_function,\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--test-pairs', data_file('querywise', 'test.pairs'),\n '--bootstrap-type', 'Bayesian',\n '--sampling-unit', sampling_unit,\n '--bagging-temperature', bagging_temperature,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('subsample', ['0.5', '1'])\[email protected]('sampling_unit', SAMPLING_UNIT_TYPES)\[email protected]('loss_function', LOSS_FUNCTIONS_WITH_PAIRWISE_SCORRING)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_pairwise_bernoulli_bootstrap(subsample, sampling_unit, loss_function, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', loss_function,\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--test-pairs', data_file('querywise', 'test.pairs'),\n '--bootstrap-type', 'Bernoulli',\n '--sampling-unit', sampling_unit,\n '--subsample', subsample,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd, env=dict(MKL_CBWR='SSE4_2'))\n eps = 0 if yatest.common.context.sanitize is None else 0.1\n\n return [local_canonical_file(output_eval_path, diff_tool=diff_tool(eps))]\n\n\[email protected]('loss_function', ['Logloss', 'RMSE', 'MultiClass', 'QuerySoftMax', 'QueryRMSE'])\[email protected]('metric', ['Logloss', 'RMSE', 'MultiClass', 'QuerySoftMax', 'AUC', 'PFound'])\ndef test_bad_metrics_combination(loss_function, metric):\n BAD_PAIRS = {\n 'Logloss': ['RMSE', 'MultiClass'],\n 'RMSE': ['Logloss', 'MultiClass'],\n 'MultiClass': ['Logloss', 'RMSE', 'QuerySoftMax', 'PFound'],\n 'QuerySoftMax': ['RMSE', 'MultiClass', 'QueryRMSE'],\n 'QueryRMSE': ['Logloss', 'MultiClass', 'QuerySoftMax'],\n 'YetiRank': ['Logloss', 'RMSE', 'MultiClass']\n }\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target'], [1, 'QueryId']], fmt='%s', delimiter='\\t')\n\n data = np.array([[0, 1, 0, 1, 0], [0, 0, 1, 1, 2], [1, 2, 3, 4, 5]]).T\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, data, fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, data, fmt='%s', delimiter='\\t')\n\n cmd = (\n '--loss-function', loss_function,\n '--custom-metric', metric,\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '-i', '4',\n '-T', '4',\n )\n\n try:\n execute_catboost_fit('CPU', cmd)\n except Exception:\n assert metric in BAD_PAIRS[loss_function]\n return\n\n assert metric not in BAD_PAIRS[loss_function]\n\n\[email protected]('metric', [('good', ',AUC,'), ('bad', ',')])\ndef test_extra_commas(metric):\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-w', '0.03',\n '-i', '10',\n '-T', '4',\n '--custom-metric', metric[1]\n )\n if metric[0] == 'good':\n execute_catboost_fit('CPU', cmd)\n if metric[0] == 'bad':\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\ndef execute_fit_for_test_quantized_pool(loss_function, pool_path, test_path, cd_path, eval_path,\n border_count=128, other_options=()):\n model_path = yatest.common.test_output_path('model.bin')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', pool_path,\n '-t', test_path,\n '--cd', cd_path,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-x', str(border_count),\n '--feature-border-type', 'GreedyLogSum',\n '-m', model_path,\n '--eval-file', eval_path,\n )\n execute_catboost_fit('CPU', cmd + other_options)\n\n\ndef test_quantized_pool():\n test_path = data_file('higgs', 'test_small')\n\n tsv_eval_path = yatest.common.test_output_path('tsv.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='Logloss',\n pool_path=data_file('higgs', 'train_small'),\n test_path=test_path,\n cd_path=data_file('higgs', 'train.cd'),\n eval_path=tsv_eval_path\n )\n\n quantized_eval_path = yatest.common.test_output_path('quantized.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='Logloss',\n pool_path='quantized://' + data_file('higgs', 'train_small_x128_greedylogsum.bin'),\n test_path=test_path,\n cd_path=data_file('higgs', 'train.cd'),\n eval_path=quantized_eval_path\n )\n\n assert filecmp.cmp(tsv_eval_path, quantized_eval_path)\n\n\ndef test_quantized_pool_ignored_features():\n test_path = data_file('higgs', 'test_small')\n\n tsv_eval_path = yatest.common.test_output_path('tsv.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='Logloss',\n pool_path=data_file('higgs', 'train_small'),\n test_path=test_path,\n cd_path=data_file('higgs', 'train.cd'),\n eval_path=tsv_eval_path,\n other_options=('-I', '5',)\n )\n\n quantized_eval_path = yatest.common.test_output_path('quantized.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='Logloss',\n pool_path='quantized://' + data_file('higgs', 'train_small_x128_greedylogsum.bin'),\n test_path=test_path,\n cd_path=data_file('higgs', 'train.cd'),\n eval_path=quantized_eval_path,\n other_options=('-I', '5',)\n )\n\n assert filecmp.cmp(tsv_eval_path, quantized_eval_path)\n\n\ndef test_quantized_pool_groupid():\n test_path = data_file('querywise', 'test')\n\n tsv_eval_path = yatest.common.test_output_path('tsv.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='PairLogitPairwise',\n pool_path=data_file('querywise', 'train'),\n test_path=test_path,\n cd_path=data_file('querywise', 'train.cd.query_id'),\n eval_path=tsv_eval_path\n )\n\n quantized_eval_path = yatest.common.test_output_path('quantized.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='PairLogitPairwise',\n pool_path='quantized://' + data_file('querywise', 'train_x128_greedylogsum_aqtaa.bin'),\n test_path=test_path,\n cd_path=data_file('querywise', 'train.cd.query_id'),\n eval_path=quantized_eval_path\n )\n\n assert filecmp.cmp(tsv_eval_path, quantized_eval_path)\n\n\ndef test_quantized_pool_ignored_during_quantization():\n test_path = data_file('querywise', 'test')\n\n tsv_eval_path = yatest.common.test_output_path('tsv.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='PairLogitPairwise',\n pool_path=data_file('querywise', 'train'),\n test_path=test_path,\n cd_path=data_file('querywise', 'train.cd.query_id'),\n eval_path=tsv_eval_path,\n other_options=('-I', '18-36',)\n )\n\n quantized_eval_path = yatest.common.test_output_path('quantized.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='PairLogitPairwise',\n pool_path='quantized://' + data_file('querywise', 'train_x128_greedylogsum_aqtaa_ignore_18_36.bin'),\n test_path=test_path,\n cd_path=data_file('querywise', 'train.cd.query_id'),\n eval_path=quantized_eval_path\n )\n\n assert filecmp.cmp(tsv_eval_path, quantized_eval_path)\n\n\ndef test_quantized_pool_quantized_test():\n test_path = data_file('querywise', 'test')\n\n tsv_eval_path = yatest.common.test_output_path('tsv.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='PairLogitPairwise',\n pool_path=data_file('querywise', 'train'),\n test_path=test_path,\n cd_path=data_file('querywise', 'train.cd.query_id'),\n eval_path=tsv_eval_path\n )\n\n quantized_eval_path = yatest.common.test_output_path('quantized.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='PairLogitPairwise',\n pool_path='quantized://' + data_file('querywise', 'train_x128_greedylogsum_aqtaa.bin'),\n test_path='quantized://' + data_file('querywise', 'test_borders_from_train_aqtaa.bin'),\n cd_path=data_file('querywise', 'train.cd.query_id'),\n eval_path=quantized_eval_path\n )\n\n assert filecmp.cmp(tsv_eval_path, quantized_eval_path)\n\n\ndef test_quantized_pool_with_large_grid():\n test_path = data_file('querywise', 'test')\n\n tsv_eval_path = yatest.common.test_output_path('tsv.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='PairLogitPairwise',\n pool_path=data_file('querywise', 'train'),\n test_path=test_path,\n cd_path=data_file('querywise', 'train.cd.query_id'),\n eval_path=tsv_eval_path,\n border_count=1024\n )\n\n quantized_eval_path = yatest.common.test_output_path('quantized.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='PairLogitPairwise',\n pool_path='quantized://' + data_file('querywise', 'train.quantized_x1024'),\n test_path='quantized://' + data_file('querywise', 'test.quantized_x1024'),\n cd_path=data_file('querywise', 'train.cd.query_id'),\n eval_path=quantized_eval_path\n )\n\n assert filecmp.cmp(tsv_eval_path, quantized_eval_path)\n\n\ndef test_learn_without_header_eval_with_header():\n train_path = yatest.common.test_output_path('airlines_without_header')\n with open(data_file('airlines_5K', 'train'), 'r') as with_header_file:\n with open(train_path, 'w') as without_header_file:\n without_header_file.writelines(with_header_file.readlines()[1:])\n\n model_path = yatest.common.test_output_path('model.bin')\n\n cmd_fit = (\n '--loss-function', 'Logloss',\n '-f', train_path,\n '--cd', data_file('airlines_5K', 'cd'),\n '-i', '10',\n '-m', model_path\n )\n execute_catboost_fit('CPU', cmd_fit)\n\n cmd_calc = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('airlines_5K', 'test'),\n '--cd', data_file('airlines_5K', 'cd'),\n '-m', model_path,\n '--has-header'\n )\n yatest.common.execute(cmd_calc)\n\n\ndef test_group_weights_file():\n first_eval_path = yatest.common.test_output_path('first.eval')\n second_eval_path = yatest.common.test_output_path('second.eval')\n\n def run_catboost(eval_path, cd_file, is_additional_query_weights):\n cmd = [\n '--use-best-model', 'false',\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', cd_file),\n '-i', '5',\n '-T', '4',\n '--eval-file', eval_path,\n ]\n if is_additional_query_weights:\n cmd += [\n '--learn-group-weights', data_file('querywise', 'train.group_weights'),\n '--test-group-weights', data_file('querywise', 'test.group_weights'),\n ]\n execute_catboost_fit('CPU', cmd)\n\n run_catboost(first_eval_path, 'train.cd', True)\n run_catboost(second_eval_path, 'train.cd.group_weight', False)\n assert filecmp.cmp(first_eval_path, second_eval_path)\n\n return [local_canonical_file(first_eval_path)]\n\n\ndef test_group_weights_file_quantized():\n first_eval_path = yatest.common.test_output_path('first.eval')\n second_eval_path = yatest.common.test_output_path('second.eval')\n\n def run_catboost(eval_path, train, test, is_additional_query_weights):\n cmd = [\n '--use-best-model', 'false',\n '--loss-function', 'QueryRMSE',\n '-f', 'quantized://' + data_file('querywise', train),\n '-t', 'quantized://' + data_file('querywise', test),\n '-i', '5',\n '-T', '4',\n '--eval-file', eval_path,\n ]\n if is_additional_query_weights:\n cmd += [\n '--learn-group-weights', data_file('querywise', 'train.group_weights'),\n '--test-group-weights', data_file('querywise', 'test.group_weights'),\n ]\n execute_catboost_fit('CPU', cmd)\n\n run_catboost(first_eval_path, 'train.quantized', 'test.quantized', True)\n run_catboost(second_eval_path, 'train.quantized.group_weight', 'test.quantized.group_weight', False)\n assert filecmp.cmp(first_eval_path, second_eval_path)\n\n return [local_canonical_file(first_eval_path)]\n\n\ndef test_mode_roc():\n eval_path = yatest.common.test_output_path('eval.tsv')\n output_roc_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '10',\n '-T', '4',\n '--counter-calc-method', 'SkipTest',\n '--eval-file', eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n roc_cmd = (\n CATBOOST_PATH,\n 'roc',\n '--eval-file', eval_path,\n '--output-path', output_roc_path\n )\n yatest.common.execute(roc_cmd)\n\n return local_canonical_file(output_roc_path)\n\n\[email protected]('pool', ['adult', 'higgs', 'adult_nan'])\ndef test_convert_model_to_json(pool):\n output_model_path = yatest.common.test_output_path('model')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '-f', data_file(pool, 'train_small'),\n '-t', data_file(pool, 'test_small'),\n '--column-description', data_file(pool, 'train.cd'),\n '-i', '20',\n '-T', '4',\n '--eval-file', output_eval_path,\n '-m', output_model_path,\n '--nan-mode', 'Max' if pool == 'adult_nan' else 'Forbidden',\n '--model-format', 'CatboostBinary,Json'\n )\n execute_catboost_fit('CPU', cmd)\n formula_predict_path_bin = yatest.common.test_output_path('predict_test_bin.eval')\n formula_predict_path_json = yatest.common.test_output_path('predict_test_json.eval')\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file(pool, 'test_small'),\n '--column-description', data_file(pool, 'train.cd'),\n '-m', output_model_path + '.json',\n '--model-format', 'Json',\n '--output-path', formula_predict_path_json\n )\n yatest.common.execute(calc_cmd)\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file(pool, 'test_small'),\n '--column-description', data_file(pool, 'train.cd'),\n '-m', output_model_path + '.bin',\n '--output-path', formula_predict_path_bin\n )\n yatest.common.execute(calc_cmd)\n assert (compare_evals_with_precision(output_eval_path, formula_predict_path_bin))\n assert (compare_evals_with_precision(output_eval_path, formula_predict_path_json))\n\n\nLOSS_FUNCTIONS_NO_MAPE = ['RMSE', 'Logloss', 'MAE', 'CrossEntropy', 'Quantile', 'LogLinQuantile', 'Poisson']\n\n\[email protected]('loss_function', LOSS_FUNCTIONS_NO_MAPE)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_quantized_adult_pool(loss_function, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n quantized_train_file = 'quantized://' + data_file('quantized_adult', 'train.qbin')\n quantized_test_file = 'quantized://' + data_file('quantized_adult', 'test.qbin')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', quantized_train_file,\n '-t', quantized_test_file,\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n )\n\n execute_catboost_fit('CPU', cmd)\n cd_file = data_file('quantized_adult', 'pool.cd')\n test_file = data_file('quantized_adult', 'test_small.tsv')\n apply_catboost(output_model_path, test_file, cd_file, output_eval_path)\n\n return [local_canonical_file(output_eval_path, diff_tool=diff_tool())]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_quantized_with_one_thread(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n quantized_train_file = 'quantized://' + data_file('querywise', 'train.quantized')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', quantized_train_file,\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '1',\n '-m', output_model_path,\n '--target-border', '0.5',\n )\n print(cmd)\n execute_catboost_fit('CPU', cmd)\n\n\ndef test_eval_result_on_different_pool_type():\n output_eval_path = yatest.common.test_output_path('test.eval')\n output_quantized_eval_path = yatest.common.test_output_path('test.eval.quantized')\n\n def run_catboost(train, test, eval_path):\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '--border-count', '128',\n '-f', train,\n '-t', test,\n '--cd', data_file('querywise', 'train.cd'),\n '-i', '10',\n '-T', '4',\n '--target-border', '0.5',\n '--eval-file', eval_path,\n )\n\n execute_catboost_fit('CPU', cmd)\n\n def get_pool_path(set_name, is_quantized=False):\n path = data_file('querywise', set_name)\n return 'quantized://' + path + '.quantized' if is_quantized else path\n\n run_catboost(get_pool_path('train'), get_pool_path('test'), output_eval_path)\n run_catboost(get_pool_path('train', True), get_pool_path('test', True), output_quantized_eval_path)\n\n assert filecmp.cmp(output_eval_path, output_quantized_eval_path)\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_apply_on_different_pool_type():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n output_quantized_eval_path = yatest.common.test_output_path('test.eval.quantized')\n\n def get_pool_path(set_name, is_quantized=False):\n path = data_file('querywise', set_name)\n return 'quantized://' + path + '.quantized' if is_quantized else path\n cd_file = data_file('querywise', 'train.cd')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '--learn-set', get_pool_path('train', True),\n '--test-set', get_pool_path('test', True),\n '--column-description', cd_file,\n '-i', '10',\n '-T', '4',\n '--target-border', '0.5',\n '--model-file', output_model_path,\n )\n execute_catboost_fit('CPU', cmd)\n cmd = (\n CATBOOST_PATH, 'calc',\n '--input-path', get_pool_path('test'),\n '--column-description', cd_file,\n '--model-file', output_model_path,\n '--output-path', output_eval_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(cmd)\n cmd = (\n CATBOOST_PATH, 'calc',\n '--input-path', get_pool_path('test', True),\n '--model-file', output_model_path,\n '--output-path', output_quantized_eval_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(cmd)\n assert filecmp.cmp(output_eval_path, output_quantized_eval_path)\n\n\ndef test_apply_output_column_by_idx():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n learn = data_file('black_friday', 'train')\n test = data_file('black_friday', 'test')\n cd = data_file('black_friday', 'cd')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'RMSE',\n '--learn-set', learn,\n '--test-set', test,\n '--column-description', cd,\n '-i', '10',\n '-T', '4',\n '--model-file', output_model_path,\n '--has-header'\n )\n execute_catboost_fit('CPU', cmd)\n\n column_names = [\n 'User_ID',\n 'Product_ID',\n 'Gender',\n 'Age',\n 'Occupation',\n 'City_Category',\n 'Stay_In_Current_City_Years',\n 'Marital_Status',\n 'Product_Category_1',\n 'Product_Category_2',\n 'Product_Category_3',\n 'Purchase'\n ]\n output_columns = ','.join(['#{}:{}'.format(idx, name) for idx, name in enumerate(column_names)])\n output_columns = 'RawFormulaVal,' + output_columns\n\n cmd = (\n CATBOOST_PATH, 'calc',\n '--input-path', test,\n '--column-description', cd,\n '--model-file', output_model_path,\n '--output-path', output_eval_path,\n '--output-columns', output_columns,\n '--has-header'\n )\n yatest.common.execute(cmd)\n\n with open(output_eval_path, 'r') as f:\n eval_lines = f.readlines()\n with open(test, 'r') as f:\n test_lines = f.readlines()\n\n assert len(eval_lines) == len(test_lines)\n for i in range(len(eval_lines)):\n eval_line = eval_lines[i].split('\\t')[1:] # skip RawFormulaVal\n test_line = test_lines[i].split('\\t')\n\n for eval_column, test_column in zip(eval_line, test_line):\n assert eval_column == test_column\n\n\[email protected](\n 'dataset_name,loss_function,has_pairs,has_group_weights',\n [\n ('adult_small_broken_features', 'Logloss', False, False),\n ('querywise_broken_pairs', 'RMSE', True, False),\n ('querywise_broken_group_weights', 'RMSE', False, True),\n ]\n)\ndef test_broken_dsv_format(dataset_name, loss_function, has_pairs, has_group_weights):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n # iterations and threads are specified just to finish fast if test is xpass\n cmd = (\n '--loss-function', loss_function,\n '--learn-set', data_file('broken_format', dataset_name, 'train'),\n '--test-set', data_file('broken_format', dataset_name, 'test'),\n '--column-description', data_file('broken_format', dataset_name, 'train.cd'),\n '-i', '1',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n if has_pairs:\n cmd += (\n '--learn-pairs', data_file('broken_format', dataset_name, 'train.pairs'),\n '--test-pairs', data_file('broken_format', dataset_name, 'test.pairs'),\n )\n if has_group_weights:\n cmd += (\n '--learn-group-weights', data_file('broken_format', dataset_name, 'train.group_weights'),\n '--test-group-weights', data_file('broken_format', dataset_name, 'test.group_weights'),\n )\n\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\[email protected]_fixtures('compressed_data')\[email protected](\n 'loss_function,eval_metric,boosting_type',\n [\n ('QueryRMSE', 'NDCG', 'Plain'),\n ('QueryRMSE', 'NDCG', 'Ordered'),\n # Boosting type 'Ordered' is not supported for YetiRankPairwise and PairLogitPairwise\n ('YetiRankPairwise', 'NDCG', 'Plain'),\n ('PairLogit:max_pairs=30', 'PairLogit:max_pairs=30', 'Plain'),\n ('PairLogitPairwise:max_pairs=30', 'NDCG', 'Plain'),\n ('PairLogitPairwise:max_pairs=30', 'PairLogit:max_pairs=30', 'Plain'),\n ],\n ids=[\n 'loss_function=QueryRMSE,eval_metric=NDCG,boosting_type=Plain',\n 'loss_function=QueryRMSE,eval_metric=NDCG,boosting_type=Ordered',\n 'loss_function=YetiRankPairwise,eval_metric=NDCG,boosting_type=Plain',\n 'loss_function=PairLogit:max_pairs=30,eval_metric=PairLogit:max_pairs=30,boosting_type=Plain',\n 'loss_function=PairLogitPairwise:max_pairs=30,eval_metric=NDCG,boosting_type=Plain',\n 'loss_function=PairLogitPairwise:max_pairs=30,eval_metric=PairLogit:max_pairs=30,boosting_type=Plain'\n ]\n)\ndef test_groupwise_with_cat_features(compressed_data, loss_function, eval_metric, boosting_type):\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--loss-function', loss_function,\n '-f', os.path.join(compressed_data.name, 'mslr_web1k', 'train'),\n '-t', os.path.join(compressed_data.name, 'mslr_web1k', 'test'),\n '--column-description', os.path.join(compressed_data.name, 'mslr_web1k', 'cd.with_cat_features'),\n '--boosting-type', boosting_type,\n '-i', '100',\n '-T', '8',\n '--eval-metric', eval_metric,\n '--metric-period', '100',\n '--use-best-model', 'false',\n '--test-err-log', test_error_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(test_error_path, diff_tool=diff_tool(1e-5))]\n\n\ndef test_gradient_walker():\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '20',\n '-T', '4',\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n '--boosting-type', 'Ordered',\n '--max-ctr-complexity', '4',\n '--leaf-estimation-iterations', '10',\n '--leaf-estimation-backtracking', 'AnyImprovement',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\n# training with pairwise scoring with categorical features on CPU does not yet support one-hot features\n# so they are disabled by default, explicit non-default specification should be an error\[email protected](\n 'loss_function', ['YetiRankPairwise', 'PairLogitPairwise'],\n ids=['loss_function=YetiRankPairwise', 'loss_function=PairLogitPairwise']\n)\ndef test_groupwise_with_bad_one_hot_max_size(loss_function):\n cmd = (\n '--loss-function', loss_function,\n '--has-header',\n '-f', data_file('black_friday', 'train'),\n '-t', data_file('black_friday', 'test'),\n '--column-description', data_file('black_friday', 'cd'),\n '--boosting-type', 'Plain',\n '-i', '10',\n '-T', '4',\n '--eval-metric', 'NDCG',\n '--one_hot_max_size', '10'\n )\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\ndef test_load_quantized_pool_with_double_baseline():\n # Dataset with 3 random columns, first column is Target, seconds columns is Num, third column\n # is Baseline.\n #\n # There are only 10 rows in dataset.\n cmd = (\n '-f', 'quantized://' + data_file('quantized_with_baseline', 'dataset.qbin'),\n '-i', '10')\n\n execute_catboost_fit('CPU', cmd)\n\n\ndef test_write_predictions_to_streams():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n calc_output_eval_path_redirected = yatest.common.test_output_path('calc_test.eval')\n\n cmd = (\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--eval-file', output_eval_path,\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '10',\n '-m', output_model_path\n )\n execute_catboost_fit('CPU', cmd)\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', output_model_path,\n '--output-path', 'stream://stdout',\n )\n with open(calc_output_eval_path_redirected, 'w') as catboost_stdout:\n yatest.common.execute(calc_cmd, stdout=catboost_stdout)\n\n assert compare_evals(output_eval_path, calc_output_eval_path_redirected)\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', output_model_path,\n '--output-path', 'stream://stderr'\n )\n with open(calc_output_eval_path_redirected, 'w') as catboost_stderr:\n yatest.common.execute(calc_cmd, stderr=catboost_stderr)\n\n assert compare_evals(output_eval_path, calc_output_eval_path_redirected)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_mvs_bootstrap(boosting_type):\n def run_catboost(eval_path, mvs_sample_rate):\n cmd = [\n '--use-best-model', 'false',\n '--allow-writing-files', 'false',\n '--loss-function', 'Logloss',\n '--max-ctr-complexity', '5',\n '-f', data_file('airlines_5K', 'train'),\n '-t', data_file('airlines_5K', 'test'),\n '--column-description', data_file('airlines_5K', 'cd'),\n '--has-header',\n '--boosting-type', boosting_type,\n '--bootstrap-type', 'MVS',\n '--subsample', mvs_sample_rate,\n '-i', '50',\n '-w', '0.03',\n '-T', '6',\n '-r', '0',\n '--leaf-estimation-iterations', '10',\n '--eval-file', eval_path,\n ]\n execute_catboost_fit('CPU', cmd)\n\n ref_eval_path = yatest.common.test_output_path('test.eval')\n run_catboost(ref_eval_path, '0.5')\n\n for sample_rate in ('0.1', '0.9'):\n eval_path = yatest.common.test_output_path('test_{}.eval'.format(sample_rate))\n run_catboost(eval_path, sample_rate)\n assert (filecmp.cmp(ref_eval_path, eval_path) is False)\n\n return [local_canonical_file(ref_eval_path)]\n\n\ndef test_simple_ctr():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n simple_ctr = ','.join((\n 'Borders:TargetBorderCount=15',\n 'Buckets:TargetBorderCount=15',\n 'Borders:TargetBorderType=MinEntropy',\n 'Counter:CtrBorderCount=20',\n ))\n execute_catboost_fit('CPU', (\n '--loss-function', 'RMSE',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', 'Ordered',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--simple-ctr', simple_ctr,\n ))\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_output_options():\n output_options_path = 'training_options.json'\n train_dir = 'catboost_info'\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '10',\n '-T', '4',\n '--train-dir', train_dir,\n '--training-options-file', output_options_path,\n )\n execute_catboost_fit('CPU', cmd)\n return local_canonical_file(os.path.join(train_dir, output_options_path))\n\n\ndef test_target_border():\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '-i', '20',\n '-T', '4',\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n '--target-border', '0.3'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_monotonic_constraint():\n train_pool = catboost.Pool(\n data_file('higgs', 'train_small'),\n column_description=data_file('higgs', 'train.cd')\n )\n test_pool = catboost.Pool(\n data_file('higgs', 'test_small'),\n column_description=data_file('higgs', 'train.cd')\n )\n monotone_constraints = [0, 0, 1, -1, 0, 0, 1, 0, -1, 1, 1, -1, 0, 1, 0, 0, -1, 1, 1, -1, 0, 0, 0, 0, 0, -1, 0, -1]\n model = catboost.CatBoostRegressor(\n n_estimators=100,\n learning_rate=0.2,\n monotone_constraints=monotone_constraints,\n verbose=False\n ).fit(train_pool, eval_set=test_pool)\n\n dummy_data = np.zeros((1, test_pool.num_col()))\n dummy_target = np.zeros(len(dummy_data))\n feature_stats = model.calc_feature_statistics(dummy_data, dummy_target, plot=False)\n for feature_index, feature_name in enumerate(model.feature_names_):\n monotonicity = monotone_constraints[feature_index]\n if monotonicity == 0:\n continue\n feature_borders = feature_stats[feature_name]['borders']\n if len(feature_borders) == 0:\n continue\n mid_values = (feature_borders[:-1] + feature_borders[1:]) / 2\n min_value = feature_borders[0] - 1\n max_value = feature_borders[-1] + 1\n feature_values = np.array([min_value] + list(mid_values) + [max_value])\n for obj in test_pool.get_features():\n obj_variations = np.zeros((len(feature_values), test_pool.num_col()))\n obj_variations[:] = obj.reshape((1, -1))\n obj_variations[:, feature_index] = feature_values\n model_predicts = model.predict(obj_variations)\n prediction_deltas = model_predicts[1:] - model_predicts[:-1]\n assert np.all(prediction_deltas * monotonicity >= 0)\n\n\ndef test_different_formats_of_monotone_constraints():\n eval_path = yatest.common.test_output_path('eval.tsv')\n eval_path_with_monotone1 = yatest.common.test_output_path('eval_monotone1.tsv')\n eval_path_with_monotone2 = yatest.common.test_output_path('eval_monotone2.tsv')\n cmd = [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--cd', data_file('adult', 'train_with_id.cd'),\n '-i', '20'\n ]\n execute_catboost_fit('CPU', cmd + ['--eval-file', eval_path])\n execute_catboost_fit('CPU', cmd + ['--eval-file', eval_path_with_monotone1, '--monotone-constraints', '(0,0,0,1,0,-1)'])\n assert not filecmp.cmp(eval_path_with_monotone1, eval_path)\n\n for constraints in ['3:1,5:-1', 'F0:1,F1:-1']:\n execute_catboost_fit('CPU', cmd + ['--eval-file', eval_path_with_monotone2, '--monotone-constraints', constraints])\n assert filecmp.cmp(eval_path_with_monotone1, eval_path_with_monotone2)\n\n params_file = yatest.common.test_output_path(\"params.json\")\n for constraints in ['3:1,5:-1', 'F0:1,F1:-1', [0, 0, 0, 1, 0, -1], {3: 1, 5: -1}, {'F0': 1, 'F1': -1}]:\n json.dump({'monotone_constraints': constraints}, open(params_file, 'w'))\n execute_catboost_fit('CPU', cmd + ['--eval-file', eval_path_with_monotone2, '--params-file', params_file])\n assert filecmp.cmp(eval_path_with_monotone1, eval_path_with_monotone2)\n\n\nclass TestModelWithoutParams(object):\n\n @pytest.fixture(\n params=[\n ('cut-info', 'RMSE'),\n ('cut-params', 'RMSE'),\n ('cut-info', 'QueryRMSE'),\n ('cut-params', 'QueryRMSE'),\n ],\n ids=lambda param: '-'.join(param),\n )\n def model_etc(self, request):\n cut, loss = request.param\n model_json = yatest.common.test_output_path('model.json')\n learn_set = data_file('querywise', 'train')\n test_set = data_file('querywise', 'test')\n cd = data_file('querywise', 'train.cd')\n cmd = (\n '--loss-function', loss,\n '--learn-set', learn_set,\n '--test-set', test_set,\n '--column-description', cd,\n '--iterations', '10',\n '--model-file', model_json,\n '--model-format', 'Json',\n '--use-best-model', 'false'\n )\n execute_catboost_fit('CPU', cmd)\n model = json.load(open(model_json))\n if cut == 'cut-info':\n model.pop('model_info')\n if cut == 'cut-params':\n model['model_info'].pop('params')\n json.dump(model, open(model_json, 'wt'))\n return model_json, learn_set, test_set, cd\n\n def test_ostr(self, model_etc):\n model_json, train_set, test_set, cd = model_etc\n ostr_result = yatest.common.test_output_path('result.txt')\n ostr_cmd = (\n CATBOOST_PATH, 'ostr',\n '--learn-set', train_set,\n '--test-set', test_set,\n '--column-description', cd,\n '--model-file', model_json,\n '--model-format', 'Json',\n '--output-path', ostr_result,\n )\n with pytest.raises(yatest.common.ExecutionError):\n yatest.common.execute(ostr_cmd)\n\n @pytest.mark.parametrize('should_fail,fstr_type', [\n (False, 'FeatureImportance'),\n (False, 'PredictionValuesChange'),\n (True, 'LossFunctionChange'),\n (False, 'ShapValues'),\n ])\n def test_fstr(self, model_etc, fstr_type, should_fail):\n model_json, train_set, _, cd = model_etc\n fstr_result = yatest.common.test_output_path('result.txt')\n fstr_cmd = (\n CATBOOST_PATH, 'fstr',\n '--input-path', train_set,\n '--column-description', cd,\n '--model-file', model_json,\n '--model-format', 'Json',\n '--output-path', fstr_result,\n '--fstr-type', fstr_type,\n )\n if should_fail:\n with pytest.raises(yatest.common.ExecutionError):\n yatest.common.execute(fstr_cmd)\n else:\n yatest.common.execute(fstr_cmd)\n\n\ndef test_equal_feature_names():\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', (\n '--loss-function', 'RMSE',\n '-f', data_file('querywise', 'train'),\n '--column-description', data_file('querywise', 'train.cd.equal_names'),\n ))\n\n\ndef enumerate_eval_feature_output_dirs(eval_mode, set_count, offset, fold_count, only_baseline=False):\n if eval_mode == 'OneVsOthers':\n baseline = 'Baseline_set_{set_idx}_fold_{fold_idx}'\n else:\n baseline = 'Baseline_fold_{fold_idx}'\n if not only_baseline:\n testing = 'Testing_set_{set_idx}_fold_{fold_idx}'\n dirs = []\n for set_idx in range(set_count):\n for fold_idx in range(offset, offset + fold_count):\n fold = baseline.format(fold_idx=fold_idx, set_idx=set_idx)\n if fold not in dirs:\n dirs += [fold]\n if not only_baseline:\n fold = testing.format(fold_idx=fold_idx, set_idx=set_idx)\n dirs += [fold]\n return dirs\n\n\[email protected]('eval_mode', ['OneVsNone', 'OneVsAll', 'OneVsOthers', 'OthersVsAll'])\[email protected]('features_to_eval', ['0-6', '0-6;7-13'], ids=['one_set', 'two_sets'])\[email protected]('offset', [0, 2])\ndef test_eval_feature(eval_mode, features_to_eval, offset):\n output_eval_path = yatest.common.test_output_path('feature.eval')\n test_err_log = 'test_error.log'\n fstr_file = 'fstrs'\n train_dir = yatest.common.test_output_path('')\n fold_count = 2\n cmd = (\n CATBOOST_PATH,\n 'eval-feature',\n '--loss-function', 'RMSE',\n '-f', data_file('higgs', 'train_small'),\n '--cd', data_file('higgs', 'train.cd'),\n '--features-to-evaluate', features_to_eval,\n '--feature-eval-mode', eval_mode,\n '-i', '30',\n '-T', '4',\n '-w', '0.7',\n '--feature-eval-output-file', output_eval_path,\n '--offset', str(offset),\n '--fold-count', str(fold_count),\n '--fold-size-unit', 'Object',\n '--fold-size', '20',\n '--test-err-log', test_err_log,\n '--train-dir', train_dir,\n '--fstr-file', fstr_file,\n )\n\n yatest.common.execute(cmd)\n\n pj = os.path.join\n set_count = len(features_to_eval.split(';'))\n artifacts = [local_canonical_file(output_eval_path, diff_tool=diff_tool())]\n for output_dir in enumerate_eval_feature_output_dirs(eval_mode, set_count, offset, fold_count):\n artifacts += [\n local_canonical_file(pj(train_dir, output_dir, test_err_log), diff_tool=diff_tool()),\n local_canonical_file(pj(train_dir, output_dir, fstr_file), diff_tool=diff_tool()),\n ]\n return artifacts\n\n\[email protected]('offset', [0, 2])\ndef test_eval_feature_empty_feature_set(offset):\n output_eval_path = yatest.common.test_output_path('feature.eval')\n test_err_log = 'test_error.log'\n fstr_file = 'fstrs'\n train_dir = yatest.common.test_output_path('')\n fold_count = 2\n eval_mode = 'OneVsNone'\n cmd = (\n CATBOOST_PATH,\n 'eval-feature',\n '--loss-function', 'RMSE',\n '-f', data_file('higgs', 'train_small'),\n '--cd', data_file('higgs', 'train.cd'),\n '--feature-eval-mode', eval_mode,\n '-i', '30',\n '-T', '4',\n '-w', '0.7',\n '--feature-eval-output-file', output_eval_path,\n '--offset', str(offset),\n '--fold-count', str(fold_count),\n '--fold-size-unit', 'Object',\n '--fold-size', '20',\n '--test-err-log', test_err_log,\n '--train-dir', train_dir,\n '--fstr-file', fstr_file,\n )\n\n yatest.common.execute(cmd)\n\n pj = os.path.join\n set_count = 1\n artifacts = [local_canonical_file(output_eval_path, diff_tool=diff_tool())]\n for output_dir in enumerate_eval_feature_output_dirs(eval_mode, set_count, offset, fold_count, only_baseline=True):\n artifacts += [\n local_canonical_file(pj(train_dir, output_dir, test_err_log), diff_tool=diff_tool()),\n local_canonical_file(pj(train_dir, output_dir, fstr_file), diff_tool=diff_tool()),\n ]\n return artifacts\n\n\[email protected]('eval_mode', ['OneVsNone', 'OneVsAll', 'OneVsOthers', 'OthersVsAll'])\[email protected]('fold_size_unit', ['Object', 'Group'])\ndef test_eval_feature_timesplit(eval_mode, fold_size_unit):\n output_eval_path = yatest.common.test_output_path('feature.eval')\n test_err_log = 'test_error.log'\n fstr_file = 'fstrs'\n train_dir = yatest.common.test_output_path('')\n fold_count = 2\n features_to_eval = '2-5;10-15'\n offset = 2\n fold_size = 500\n cmd = (\n CATBOOST_PATH,\n 'eval-feature',\n '--loss-function', 'RMSE',\n '-f', data_file('querywise', 'train'),\n '--cd', data_file('querywise', 'train.cd'),\n '--features-to-evaluate', features_to_eval,\n '--feature-eval-mode', eval_mode,\n '-i', '30',\n '-T', '4',\n '-w', '0.7',\n '--feature-eval-output-file', output_eval_path,\n '--offset', str(offset),\n '--fold-count', str(fold_count),\n '--fold-size-unit', fold_size_unit,\n '--fold-size', str(fold_size),\n '--test-err-log', test_err_log,\n '--train-dir', train_dir,\n '--fstr-file', fstr_file,\n '--learn-timestamps', data_file('querywise', 'train.timestamps'),\n '--timesplit-quantile', '0.75'\n )\n\n yatest.common.execute(cmd)\n\n pj = os.path.join\n set_count = len(features_to_eval.split(';'))\n artifacts = [local_canonical_file(output_eval_path, diff_tool=diff_tool())]\n for output_dir in enumerate_eval_feature_output_dirs(eval_mode, set_count, offset, fold_count):\n artifacts += [\n local_canonical_file(pj(train_dir, output_dir, test_err_log), diff_tool=diff_tool()),\n local_canonical_file(pj(train_dir, output_dir, fstr_file), diff_tool=diff_tool()),\n ]\n return artifacts\n\n\[email protected]('eval_mode', ['OneVsNone', 'OneVsAll', 'OneVsOthers', 'OthersVsAll'])\[email protected]('features_to_eval', ['2-5', '2-5;10-15'], ids=['one_set', 'two_sets'])\[email protected]('offset', [0, 2])\ndef test_eval_feature_snapshot(eval_mode, features_to_eval, offset):\n test_err_log = 'test_error.log'\n fstr_file = 'fstrs'\n fold_count = 2\n snapshot_interval = 1\n\n def make_cmd(summary, train_dir):\n return (\n CATBOOST_PATH,\n 'eval-feature',\n '--loss-function', 'RMSE',\n '-f', data_file('querywise', 'train'),\n '--cd', data_file('querywise', 'train.cd'),\n '-i', '200',\n '-T', '4',\n '-w', '0.1',\n '--boost-from-average', 'False',\n '--permutations', '1',\n '--snapshot-interval', str(snapshot_interval),\n '--features-to-evaluate', features_to_eval,\n '--feature-eval-mode', eval_mode,\n '--feature-eval-output-file', summary,\n '--offset', str(offset),\n '--fold-count', str(fold_count),\n '--fold-size-unit', 'Group',\n '--fold-size', '40',\n '--test-err-log', test_err_log,\n '--train-dir', train_dir,\n '--fstr-file', fstr_file,\n )\n\n reference_summary = yatest.common.test_output_path('reference_feature.eval')\n reference_dir = yatest.common.test_output_path('reference')\n yatest.common.execute(make_cmd(summary=reference_summary, train_dir=reference_dir))\n\n snapshot_summary = yatest.common.test_output_path('snapshot_feature.eval')\n snapshot_dir = yatest.common.test_output_path('snapshot')\n snapshot = yatest.common.test_output_path('eval_feature.snapshot')\n eval_with_snapshot_cmd = make_cmd(summary=snapshot_summary, train_dir=snapshot_dir) + ('--snapshot-file', snapshot,)\n\n def stop_after_timeout(cmd, timeout):\n try:\n yatest.common.execute(cmd, timeout=timeout)\n except ExecutionTimeoutError:\n pass\n\n resume_from_snapshot_count = 15\n for idx in range(resume_from_snapshot_count):\n timeout = 0.5 if idx % 2 == 0 else snapshot_interval + 0.1\n stop_after_timeout(cmd=eval_with_snapshot_cmd, timeout=timeout)\n yatest.common.execute(['rm', '-rf', snapshot_dir])\n yatest.common.execute(eval_with_snapshot_cmd)\n\n assert filecmp.cmp(reference_summary, snapshot_summary)\n\n pj = os.path.join\n set_count = len(features_to_eval.split(';'))\n for output_dir in enumerate_eval_feature_output_dirs(eval_mode, set_count, offset, fold_count):\n assert filecmp.cmp(pj(reference_dir, output_dir, test_err_log), pj(snapshot_dir, output_dir, test_err_log))\n assert filecmp.cmp(pj(reference_dir, output_dir, fstr_file), pj(snapshot_dir, output_dir, fstr_file))\n\n\ndef test_eval_feature_snapshot_wrong_options():\n summary = yatest.common.test_output_path('eval_feature_summary')\n snapshot = yatest.common.test_output_path('eval_feature_snapshot')\n\n def make_cmd(fold_size):\n return (\n CATBOOST_PATH,\n 'eval-feature',\n '--loss-function', 'RMSE',\n '-f', data_file('querywise', 'train'),\n '--cd', data_file('querywise', 'train.cd'),\n '-i', '600',\n '-T', '4',\n '-w', '0.1',\n '--permutations', '1',\n '--snapshot-interval', '1',\n '--features-to-evaluate', '2-5',\n '--feature-eval-mode', 'OneVsAll',\n '--feature-eval-output-file', summary,\n '--offset', '0',\n '--fold-count', '5',\n '--fold-size-unit', 'Group',\n '--fold-size', str(fold_size),\n '--snapshot-file', snapshot\n )\n\n def stop_after_timeout(cmd, timeout):\n try:\n yatest.common.execute(cmd, timeout=timeout)\n except ExecutionTimeoutError:\n pass\n\n stop_after_timeout(cmd=make_cmd(fold_size=40), timeout=3)\n with pytest.raises(yatest.common.ExecutionError):\n yatest.common.execute(make_cmd(fold_size=20))\n\n\ndef test_eval_feature_parse_timestamps():\n summary = yatest.common.test_output_path('eval_feature_summary')\n\n def make_cmd(timestamps_file):\n return (\n CATBOOST_PATH,\n 'eval-feature',\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '--cd', data_file('querywise', 'train.cd'),\n '-i', '600',\n '-T', '4',\n '-w', '0.1',\n '--permutations', '1',\n '--snapshot-interval', '1',\n '--features-to-evaluate', '2-5',\n '--feature-eval-mode', 'OneVsAll',\n '--feature-eval-output-file', summary,\n '--offset', '0',\n '--fold-count', '5',\n '--fold-size-unit', 'Group',\n '--fold-size', '40',\n '--learn-timestamps', data_file('querywise', timestamps_file),\n '--timesplit-quantile', '0.75'\n )\n\n yatest.common.execute(make_cmd('train.timestamps'))\n\n with pytest.raises(yatest.common.ExecutionError):\n yatest.common.execute(make_cmd('train.group_weights'))\n\n\ndef test_eval_feature_relative_fold_size():\n summary = yatest.common.test_output_path('eval_feature_summary')\n\n def make_cmd():\n return (\n CATBOOST_PATH,\n 'eval-feature',\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '--cd', data_file('querywise', 'train.cd'),\n '-i', '100',\n '-T', '4',\n '-w', '0.1',\n '--permutations', '1',\n '--snapshot-interval', '1',\n '--features-to-evaluate', '2-5',\n '--feature-eval-mode', 'OneVsAll',\n '--feature-eval-output-file', summary,\n '--offset', '0',\n '--fold-count', '5',\n '--fold-size-unit', 'Group',\n '--relative-fold-size', '0.1',\n )\n\n yatest.common.execute(make_cmd())\n\n with pytest.raises(yatest.common.ExecutionError):\n yatest.common.execute(make_cmd() + ('--fold-size', '40',))\n\n\nTEST_METRIC_DESCRIPTION_METRICS_LIST = ['Logloss', 'Precision', 'AUC']\[email protected]('dataset_has_weights', [True, False], ids=['dataset_has_weights=True', 'dataset_has_weights=False'])\[email protected]('eval_metric_loss', TEST_METRIC_DESCRIPTION_METRICS_LIST,\n ids=['eval_loss=' + mode for mode in TEST_METRIC_DESCRIPTION_METRICS_LIST])\[email protected]('eval_metric_use_weights', [True, False, None],\n ids=['eval_weights=' + str(mode) for mode in [True, False, None]])\[email protected]('custom_metric_loss', TEST_METRIC_DESCRIPTION_METRICS_LIST,\n ids=['custom_loss=' + mode for mode in TEST_METRIC_DESCRIPTION_METRICS_LIST])\[email protected]('custom_metric_use_weights', [True, False, None],\n ids=['custom_weights=' + str(mode) for mode in [True, False, None]])\ndef test_metric_description(dataset_has_weights, eval_metric_loss, eval_metric_use_weights, custom_metric_loss, custom_metric_use_weights):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n if dataset_has_weights:\n train_pool_filename = data_file('adult_weight', 'train_weight')\n test_pool_filename = data_file('adult_weight', 'test_weight')\n pool_cd_filename = data_file('adult_weight', 'train.cd')\n else:\n train_pool_filename = data_file('adult', 'train_small')\n test_pool_filename = data_file('adult', 'test_small')\n pool_cd_filename = data_file('adult', 'train.cd')\n\n eval_metric = eval_metric_loss\n if eval_metric == 'AUC':\n eval_metric += ':hints=skip_train~false'\n if eval_metric_use_weights is not None:\n eval_metric += ';' if eval_metric_loss == 'AUC' else ':'\n eval_metric += 'use_weights=' + str(eval_metric_use_weights)\n\n custom_metric = custom_metric_loss\n if custom_metric == 'AUC':\n custom_metric += ':hints=skip_train~false'\n if custom_metric_use_weights is not None:\n custom_metric += ';' if custom_metric_loss == 'AUC' else ':'\n custom_metric += 'use_weights=' + str(custom_metric_use_weights)\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', train_pool_filename,\n '-t', test_pool_filename,\n '--cd', pool_cd_filename,\n '-i', '10',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--eval-metric', eval_metric,\n '--custom-metric', custom_metric,\n )\n should_fail = not dataset_has_weights and (eval_metric_use_weights is not None or custom_metric_use_weights is not None)\n try:\n execute_catboost_fit('CPU', cmd)\n except ExecutionError:\n assert should_fail\n return\n for filename in [learn_error_path, test_error_path]:\n with open(filename, 'r') as f:\n metrics_descriptions = f.readline().split('\\t')[1:] # without 'iter' column\n metrics_descriptions[-1] = metrics_descriptions[-1][:-1] # remove '\\n' symbol\n unique_metrics_descriptions = set([s.lower() for s in metrics_descriptions])\n assert len(metrics_descriptions) == len(unique_metrics_descriptions)\n expected_objective_metric_description = 'Logloss'\n\n if dataset_has_weights:\n expected_eval_metric_description = \\\n eval_metric_loss if eval_metric_use_weights is None else eval_metric_loss + ':use_weights=' + str(eval_metric_use_weights)\n\n if custom_metric_loss == 'AUC':\n expected_custom_metrics_descriptions = \\\n ['AUC' if custom_metric_use_weights is None else 'AUC:use_weights=' + str(custom_metric_use_weights)]\n else:\n expected_custom_metrics_descriptions = (\n [custom_metric_loss + ':use_weights=False', custom_metric_loss + ':use_weights=True']\n if custom_metric_use_weights is None\n else [custom_metric_loss + ':use_weights=' + str(custom_metric_use_weights)])\n else:\n expected_eval_metric_description = eval_metric_loss\n expected_custom_metrics_descriptions = [custom_metric_loss]\n assert unique_metrics_descriptions == set(s.lower() for s in [expected_objective_metric_description] + [expected_eval_metric_description] + expected_custom_metrics_descriptions)\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\ndef test_leafwise_scoring():\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n cmd = [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--cd', data_file('adult', 'train.cd'),\n '-i', '50',\n '-r', '0',\n '--learn-err-log', learn_error_path\n ]\n execute_catboost_fit('CPU', cmd)\n learn_errors_log = open(learn_error_path).read()\n execute_catboost_fit('CPU', cmd + ['--dev-leafwise-scoring'])\n new_learn_errors_log = open(learn_error_path).read()\n assert new_learn_errors_log == learn_errors_log\n\n\ndef test_group_features():\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_predictions_path = yatest.common.test_output_path('test_predictions.tsv')\n model_path = yatest.common.test_output_path('model.bin')\n fit_cmd = [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--cd', data_file('adult', 'train.cd'),\n '-i', '50',\n '-r', '0',\n '-m', model_path,\n '--learn-err-log', learn_error_path\n ]\n execute_catboost_fit('CPU', fit_cmd)\n calc_cmd = [\n CATBOOST_PATH,\n 'calc',\n '-m', model_path,\n '--input-path', data_file('adult', 'test_small'),\n '--cd', data_file('adult', 'train.cd'),\n '--output-path', test_predictions_path,\n '--output-columns', 'Probability'\n ]\n yatest.common.execute(calc_cmd)\n return [local_canonical_file(learn_error_path), local_canonical_file(test_predictions_path)]\n\n\ndef test_model_sum():\n model_path = yatest.common.test_output_path('model.bin')\n model_eval = yatest.common.test_output_path('model_eval.txt')\n execute_catboost_fit('CPU', [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--cd', data_file('adult', 'train.cd'),\n '-i', '10',\n '-m', model_path,\n '-t', data_file('adult', 'test_small'),\n '--eval-file', model_eval,\n '--output-columns', 'SampleId,RawFormulaVal',\n ])\n\n sum_path = yatest.common.test_output_path('sum.bin')\n yatest.common.execute([\n CATBOOST_PATH,\n 'model-sum',\n '--model-with-weight', '{}={}'.format(model_path, 0.75),\n '--model-with-weight', '{}={}'.format(model_path, 0.25),\n '--output-path', sum_path,\n ])\n\n sum_eval = yatest.common.test_output_path('sum_eval.txt')\n yatest.common.execute([\n CATBOOST_PATH,\n 'calc',\n '-m', sum_path,\n '--input-path', data_file('adult', 'test_small'),\n '--cd', data_file('adult', 'train.cd'),\n '--output-path', sum_eval,\n ])\n yatest.common.execute(get_limited_precision_dsv_diff_tool(0) + [model_eval, sum_eval])\n\n\ndef test_external_feature_names():\n fstr_cd_with_id_path = yatest.common.test_output_path('fstr_cd_with_id.tsv')\n fstr_cd_without_id_path = yatest.common.test_output_path('fstr_cd_without_id.tsv')\n\n for cd_has_feature_names in [False, True]:\n if cd_has_feature_names:\n cd_file = data_file('adult', 'train_with_id.cd')\n fstr_path = fstr_cd_with_id_path\n else:\n cd_file = data_file('adult', 'train.cd')\n fstr_path = fstr_cd_without_id_path\n\n cmd = (\n '--loss-function', 'Logloss',\n '--target-border', '0.5',\n '-f', data_file('adult', 'train_small'),\n '--column-description', cd_file,\n '-i', '10',\n '-T', '4',\n '--feature-names-path', data_file('adult', 'feature_names'),\n '--fstr-type', 'FeatureImportance',\n '--fstr-file', fstr_path\n )\n execute_catboost_fit('CPU', cmd)\n\n assert filecmp.cmp(fstr_cd_with_id_path, fstr_cd_without_id_path)\n\n return [local_canonical_file(fstr_cd_with_id_path)]\n\n\ndef test_diffusion_temperature():\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--cd', data_file('adult', 'train.cd'),\n '-i', '50',\n '-r', '0',\n '--langevin', 'True',\n '--diffusion-temperature', '1000',\n '--eval-file', output_eval_path\n ]\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('config', [('Constant', 0.2, 0.1), ('Constant', 2, 0.1), ('Decreasing', 0.2, 0.1)])\ndef test_model_shrink_correct(config):\n mode, rate, lr = config\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--cd', data_file('adult', 'train.cd'),\n '-i', '50',\n '-r', '0',\n '--eval-file', output_eval_path,\n '--model-shrink-mode', mode,\n '--model-shrink-rate', str(rate),\n '--learning-rate', str(lr)\n ]\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('config', [('Constant', 20, 0.1), ('Constant', 10, 0.1), ('Decreasing', 2, 0.1)])\ndef test_model_shrink_incorrect(config):\n mode, rate, lr = config\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--cd', data_file('adult', 'train.cd'),\n '-i', '50',\n '-r', '0',\n '--eval-file', output_eval_path,\n '--model-shrink-mode', mode,\n '--model-shrink-rate', str(rate),\n '--learning-rate', str(lr)\n ]\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\[email protected]('average', ['Macro', 'Micro', 'Weighted'])\ndef test_total_f1_params(average):\n return do_test_eval_metrics(\n metric='TotalF1:average=' + average,\n metric_period='1',\n train=data_file('cloudness_small', 'train_small'),\n test=data_file('cloudness_small', 'test_small'),\n cd=data_file('cloudness_small', 'train.cd'),\n loss_function='MultiClass'\n )\n\n\ndef test_tweedie():\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n\n cmd = (\n '--loss-function', 'Tweedie:variance_power=1.5',\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '-i', '100',\n '--learning-rate', '0.5',\n '--learn-err-log', learn_error_path\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('separator_type', SEPARATOR_TYPES)\[email protected]('feature_estimators', TEXT_FEATURE_ESTIMATORS)\ndef test_fit_binclass_with_text_features(boosting_type, separator_type, feature_estimators):\n output_model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn.tsv')\n test_error_path = yatest.common.test_output_path('test.tsv')\n\n test_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n tokenizers = [{'tokenizer_id': separator_type, 'separator_type': separator_type, 'token_types': ['Word']}]\n dictionaries = [{'dictionary_id': 'Word'}, {'dictionary_id': 'Bigram', 'gram_order': '2'}]\n dicts = {'BoW': ['Bigram', 'Word'], 'NaiveBayes': ['Word'], 'BM25': ['Word']}\n feature_processing = [{'feature_calcers': [calcer], 'dictionaries_names': dicts[calcer], 'tokenizers_names': [separator_type]} for calcer in feature_estimators.split(',')]\n\n text_processing = {'feature_processing': {'default': feature_processing}, 'dictionaries': dictionaries, 'tokenizers': tokenizers}\n\n pool_name = 'rotten_tomatoes'\n test_file = data_file(pool_name, 'test')\n cd_file = data_file(pool_name, 'cd_binclass')\n cmd = (\n '--loss-function', 'Logloss',\n '--eval-metric', 'AUC',\n '-f', data_file(pool_name, 'train'),\n '-t', test_file,\n '--text-processing', json.dumps(text_processing),\n '--column-description', cd_file,\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--eval-file', test_eval_path,\n '--output-columns', 'RawFormulaVal',\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])\n assert filecmp.cmp(test_eval_path, calc_eval_path)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('separator_type', SEPARATOR_TYPES)\[email protected]('feature_estimators', TEXT_FEATURE_ESTIMATORS)\[email protected]('loss_function', MULTICLASS_LOSSES)\ndef test_fit_multiclass_with_text_features(separator_type, feature_estimators, loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn.tsv')\n test_error_path = yatest.common.test_output_path('test.tsv')\n\n test_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n tokenizers = [{'tokenizer_id': separator_type, 'separator_type': separator_type, 'token_types': ['Word']}]\n dictionaries = [{'dictionary_id': 'Word'}, {'dictionary_id': 'Bigram', 'gram_order': '2'}]\n dicts = {'BoW': ['Bigram', 'Word'], 'NaiveBayes': ['Word'], 'BM25': ['Word']}\n feature_processing = [{'feature_calcers': [calcer], 'dictionaries_names': dicts[calcer], 'tokenizers_names': [separator_type]} for calcer in feature_estimators.split(',')]\n\n text_processing = {'feature_processing': {'default': feature_processing}, 'dictionaries': dictionaries, 'tokenizers': tokenizers}\n\n pool_name = 'rotten_tomatoes'\n test_file = data_file(pool_name, 'test')\n cd_file = data_file(pool_name, 'cd')\n cmd = (\n '--loss-function', loss_function,\n '--eval-metric', 'Accuracy',\n '-f', data_file(pool_name, 'train'),\n '-t', test_file,\n '--text-processing', json.dumps(text_processing),\n '--column-description', cd_file,\n '--boosting-type', 'Plain',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--eval-file', test_eval_path,\n '--output-columns', 'RawFormulaVal',\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])\n assert filecmp.cmp(test_eval_path, calc_eval_path)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('grow_policy', GROW_POLICIES)\ndef test_shrink_model_with_text_features(grow_policy):\n output_model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn.tsv')\n test_error_path = yatest.common.test_output_path('test.tsv')\n\n test_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n loss_function = 'MultiClass'\n feature_estimators = 'BoW,NaiveBayes,BM25'\n\n dictionaries = [{'dictionary_id': 'Word'}, {'dictionary_id': 'Bigram', 'gram_order': '2'}]\n dicts = {'BoW': ['Bigram', 'Word'], 'NaiveBayes': ['Word'], 'BM25': ['Word']}\n feature_processing = [{'feature_calcers': [calcer], 'dictionaries_names': dicts[calcer]} for calcer in feature_estimators.split(',')]\n\n text_processing = {'feature_processing': {'default': feature_processing}, 'dictionaries': dictionaries}\n\n pool_name = 'rotten_tomatoes'\n test_file = data_file(pool_name, 'test')\n cd_file = data_file(pool_name, 'cd')\n cmd = (\n '--loss-function', loss_function,\n '--eval-metric', 'Accuracy',\n '-f', data_file(pool_name, 'train'),\n '-t', test_file,\n '--column-description', cd_file,\n '--text-processing', json.dumps(text_processing),\n '--grow-policy', grow_policy,\n '--boosting-type', 'Plain',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--eval-file', test_eval_path,\n '--output-columns', 'RawFormulaVal',\n '--use-best-model', 'true',\n )\n execute_catboost_fit('CPU', cmd)\n\n apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])\n assert filecmp.cmp(test_eval_path, calc_eval_path)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\nDICTIONARIES_OPTIONS = [\n {\n \"Simple\": \"token_level_type=Word:occurrence_lower_bound=50\"\n },\n {\n \"UniGramOccur5\": \"occurrence_lower_bound=5:token_level_type=Letter\",\n \"BiGramOccur2\": \"occurrence_lower_bound=2:gram_order=2:token_level_type=Letter\",\n \"WordDictOccur1\": \"occurrence_lower_bound=1:token_level_type=Word\",\n \"WordDictOccur2\": \"occurrence_lower_bound=2:token_level_type=Word\",\n \"WordDictOccur3\": \"occurrence_lower_bound=3:token_level_type=Word\"\n },\n {\n \"Unigram\": \"gram_order=1:token_level_type=Letter:occurrence_lower_bound=50\",\n \"Bigram\": \"gram_order=2:token_level_type=Letter:occurrence_lower_bound=50\",\n \"Trigram\": \"gram_order=3:token_level_type=Letter:occurrence_lower_bound=50\"\n },\n {\n \"Letter\": \"token_level_type=Letter:occurrence_lower_bound=50\",\n \"Word\": \"token_level_type=Word:occurrence_lower_bound=50\"\n }\n]\n\n\[email protected]('dictionaries', DICTIONARIES_OPTIONS)\[email protected]('loss_function', MULTICLASS_LOSSES)\ndef test_text_processing_options(dictionaries, loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn.tsv')\n test_error_path = yatest.common.test_output_path('test.tsv')\n\n test_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n dictionaries = ','.join([key + ':' + value for key, value in dictionaries.items()])\n feature_estimators = 'BM25,BoW,NaiveBayes'\n\n pool_name = 'rotten_tomatoes'\n test_file = data_file(pool_name, 'test')\n cd_file = data_file(pool_name, 'cd')\n cmd = (\n '--loss-function', loss_function,\n '--eval-metric', 'Accuracy',\n '-f', data_file(pool_name, 'train'),\n '-t', test_file,\n '--column-description', cd_file,\n '--dictionaries', dictionaries,\n '--feature-calcers', feature_estimators,\n '--boosting-type', 'Plain',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--eval-file', test_eval_path,\n '--output-columns', 'RawFormulaVal',\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])\n assert filecmp.cmp(test_eval_path, calc_eval_path)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_fit_with_per_feature_text_options(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn.tsv')\n test_error_path = yatest.common.test_output_path('test.tsv')\n\n test_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n text_processing = {\n 'tokenizers': [\n {'tokenizer_id': 'Space', 'delimiter': ' '},\n {'tokenizer_id': 'Comma', 'delimiter': ','},\n ],\n 'dictionaries': [\n {'dictionary_id': 'Word', 'token_level_type': 'Word', 'occurrence_lower_bound': '50'},\n {'dictionary_id': 'Bigram', 'token_level_type': 'Word', 'gram_order': '2', 'occurrence_lower_bound': '50'},\n {'dictionary_id': 'Trigram', 'token_level_type': 'Letter', 'gram_order': '3', 'occurrence_lower_bound': '50'},\n ],\n 'feature_processing': {\n '0': [\n {'tokenizers_names': ['Space'], 'dictionaries_names': ['Word'], 'feature_calcers': ['BoW', 'NaiveBayes']},\n {'tokenizers_names': ['Space'], 'dictionaries_names': ['Bigram', 'Trigram'], 'feature_calcers': ['BoW']},\n ],\n '1': [\n {'tokenizers_names': ['Space'], 'dictionaries_names': ['Word'], 'feature_calcers': ['BoW', 'NaiveBayes', 'BM25']},\n {'tokenizers_names': ['Space'], 'dictionaries_names': ['Trigram'], 'feature_calcers': ['BoW', 'BM25']},\n ],\n '2': [\n {'tokenizers_names': ['Space'], 'dictionaries_names': ['Word', 'Bigram', 'Trigram'], 'feature_calcers': ['BoW']},\n ],\n }\n }\n\n pool_name = 'rotten_tomatoes'\n test_file = data_file(pool_name, 'test')\n cd_file = data_file(pool_name, 'cd_binclass')\n cmd = (\n '--loss-function', 'Logloss',\n '--eval-metric', 'AUC',\n '-f', data_file(pool_name, 'train'),\n '-t', test_file,\n '--text-processing', json.dumps(text_processing),\n '--column-description', cd_file,\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--eval-file', test_eval_path,\n '--output-columns', 'RawFormulaVal',\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])\n assert filecmp.cmp(test_eval_path, calc_eval_path)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n" ]
[ [ "numpy.dot", "numpy.hstack", "numpy.log", "numpy.random.random", "numpy.allclose", "numpy.random.seed", "numpy.arange", "numpy.all", "numpy.concatenate", "numpy.random.randn", "numpy.mean", "numpy.float32", "numpy.savetxt", "numpy.array", "numpy.random.RandomState", "numpy.loadtxt" ] ]
lyalcorn/LUCI
[ "ed9dde4286ca80694f53a3a50e1da2073a92ff76" ]
[ "LUCI/LuciFitParameters.py" ]
[ "\"\"\"\nIn this file we have the functions used to calculate the velocity, the velocity dispersion (broadening),\nand the flux as well as their uncertainties.\n\"\"\"\nimport math\nimport numpy as np\nfrom scipy import special as sps\n\n# Define Constants #\nSPEED_OF_LIGHT = 299792 # km/s\nFWHM_COEFF = 2.*math.sqrt(2. * math.log(2.))\n\n\ndef calculate_vel(ind, lines, fit_sol, line_dict):\n \"\"\"\n Calculate velocity.\n\n .. math::\n v = SPEED_OF_LIGHT*(l\\_calc - l\\_rest)/l\\_calc\n\n Where :math:`l\\_calc = 1e7/fit\\_vel` and :math:`l\\_rest` is the rest wavelength of the line.\n :math:`fit\\_vel` is the shifted position of the line in units of cm-1.\n\n Args:\n ind: Index of line in lines\n lines:Lines to be fit (e.x. ['Halpha'])\n fit_sol: Solution from fitting algorithm\n line_dict: Dictionary of Line Names and their wavelengths in nm\n Return:\n Velocity of the Halpha line in units of km/s\n \"\"\"\n line_name = lines[ind]\n l_calc = 1e7 / fit_sol[3*ind+1]\n l_shift = (l_calc - line_dict[line_name]) / line_dict[line_name]\n v = SPEED_OF_LIGHT * l_shift\n return v\n\n\ndef calculate_vel_err(ind, lines, fit_sol, line_dict, uncertainties):\n \"\"\"\n Calculate velocity error\n\n We simply take the difference between the velocities with and without the uncertainty.\n\n Args:\n ind: Index of line in lines\n lines:Lines to be fit (e.x. ['Halpha'])\n fit_sol: Solution from fitting algorithm\n line_dict: Dictionary of Line Names and their wavelengths in nm\n uncertaintes: Uncertainties from fitting algoritm\n Return:\n Velocity of the Halpha line in units of km/s\n \"\"\"\n line_name = lines[ind]\n l_calc1 = 1e7 / (fit_sol[3*ind+1])\n l_calc2 = 1e7 / (fit_sol[3*ind+1] + uncertainties[3*ind+1])\n #print()\n #print(l_calc1, l_calc2)\n #print(l_calc1, l_calc2)\n l_shift1 = (l_calc1 - line_dict[line_name]) / line_dict[line_name]\n l_shift2 = (l_calc2 - line_dict[line_name]) / line_dict[line_name]\n #print(l_shift1, l_shift2)\n v1 = SPEED_OF_LIGHT * l_shift1\n v2 = SPEED_OF_LIGHT * l_shift2\n #print(v1, v2)\n return np.abs(v1 - v2)\n\n\ndef calculate_broad(ind, fit_sol, axis_step):\n \"\"\"\n Calculate velocity dispersion\n\n .. math::\n \\sigma = (SPEED_OF_LIGHT*fit\\_\\sigma * axis\\_step)/(fit\\_vel)\n\n where :math:`fit\\_sigma` is the gaussian broadening parameter found in the fit, :math:`axis\\_step` is defined in the HowLuciWorks section,\n and :math:`fit\\_vel` is the shifted position of the line in units of cm-1.\n\n Args:\n ind: Index of line in lines\n fit_sol: Solution from fitting algorithm\n axis_step: Step due to correction factor (see LuciFit.calculate_correction)\n Return:\n Velocity Dispersion of the Halpha line in units of km/s\n \"\"\"\n #broad = (SPEED_OF_LIGHT * fit_sol[3*ind+2] * axis_step) / fit_sol[3*ind+1]\n #return np.abs(broad)/abs(2.*np.sqrt(2. * np.log(2.))) # Add FWHM correction\n broad = (SPEED_OF_LIGHT * fit_sol[3*ind+2]) / fit_sol[3*ind+1]\n return np.abs(broad)#/FWHM_COEFF # Add FWHM correction\n\n\ndef calculate_broad_err(ind, fit_sol, axis_step, uncertainties):\n \"\"\"\n Calculate velocity dispersion error\n We simply take the difference between the velocity dispersions with and without the uncertainty.\n\n Args:\n ind: Index of line in lines\n fit_sol: Solution from fitting algorithm\n axis_step: Step due to correction factor (see LuciFit.calculate_correction)\n uncertaintes: Uncertainties from fitting algorithm\n Return:\n Velocity Dispersion of the Halpha line in units of km/s\n \"\"\"\n broad1 = (SPEED_OF_LIGHT * fit_sol[3*ind+2]* axis_step) / fit_sol[3*ind+1]\n \n broad2 = (SPEED_OF_LIGHT * (fit_sol[3*ind+2]+uncertainties[3*ind+2])* axis_step) / (fit_sol[3*ind+1]+uncertainties[3*ind+1])\n \n return np.abs(broad1-broad2)\n\n\ndef calculate_flux(line_amp, line_sigma, model_type, sinc_width):\n \"\"\"\n Calculate flux value given fit of line\n See HowLuciWorks for calculations\n\n\n Args:\n line_amp: Amplitude of the line (un-normalized)\n line_sigma: Sigma of the line fit\n model_type: Fitting function (i.e. 'gaussian', 'sinc', or 'sincgauss')\n sinc_width: Fixed with of the sinc function\n Return:\n Flux of the provided line in units of erg/s/cm-2\n \"\"\"\n flux = 0.0 # Initialize\n if model_type == 'gaussian':\n flux = (1.20671/FWHM_COEFF) * np.sqrt(2 * np.pi) * line_amp * line_sigma\n elif model_type == 'sinc':\n flux = np.pi *np.sqrt(np.pi) * line_amp * line_sigma\n elif model_type == 'sincgauss':\n flux = (1.20671/(np.pi*FWHM_COEFF)) * line_amp * ((np.sqrt(2*np.pi)*line_sigma) / (sps.erf(line_sigma / (np.sqrt(2) * sinc_width))))\n else:\n print(\"ERROR: INCORRECT FIT FUNCTION\")\n return flux\n\n\ndef calculate_flux_err(ind, fit_sol, uncertainties, model_type, sinc_width):\n \"\"\"\n Calculate flux error\n\n We simply take the difference between the fluxes with and without the uncertainty.\n\n\n Args:\n ind: Index of line in lines\n fit_sol: Solution from fitting algorithm\n uncertaintes: Uncertainties from fitting algoritm\n model_type: Fitting function (i.e. 'gaussian', 'sinc', or 'sincgauss')\n sinc_width: Fixed with of the sinc function\n\n Return:\n Error of the provided line in units of ergs/s/cm-2\n \"\"\"\n\n p0 = fit_sol[3*ind]\n p2 = fit_sol[3*ind + 2]\n p0_err = uncertainties[3*ind]\n p2_err = uncertainties[3*ind + 2]\n\n\n if model_type == 'gaussian':\n flux_err = np.sqrt(2*np.pi) * calculate_flux(p0 , p2, model_type, sinc_width) * \\\n np.sqrt( (p0_err / p0 )**2 + (p2_err / p2)**2 )\n\n elif model_type == 'sinc':\n flux_err = np.sqrt(np.pi) * calculate_flux(p0 , p2, model_type, sinc_width) * \\\n np.sqrt( (p0_err / p0 )**2 + (p2_err / p2)**2 )\n\n elif model_type == 'sincgauss':\n erf_func = sps.erf(p2 / (np.sqrt(2)*sinc_width)) #Shortcut for the error function\n flux_err = np.sqrt(2*np.pi) * np.sqrt( (p2*p0_err / erf_func)**2 + \\\n ( p0*p2_err * (erf_func - (np.sqrt(2)*p2*np.exp(-(p2/(np.sqrt(2)*sinc_width))**2)/np.sqrt(np.pi))) / erf_func**2 )**2 )\n\n else:\n print('The fit function you have entered, %s, does not exist!'%model_type)\n print('The program is terminating!')\n exit()\n\n return flux_err\n" ]
[ [ "numpy.sqrt", "numpy.abs" ] ]
trohit/stackedBarChart
[ "8e8c2ee542f87e8ad7fefe648dec712abeaef1fe" ]
[ "graphcsv.py" ]
[ "#!/usr/bin/env python3.7\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 4 10:15:48 2020\n@author: rohit\n\npython3.7 ./graphcsv.py -i /tmp/linuxmem_a.csv --offset 5 --vxdate \"Apr 06 11:04:50\" --vpct 20 --vtitle \"report gen started\"\n\ndate,drname,cpupct,curmem,maxmem,mempct,netrx,nettx\nApr 06 11:00:37,ubuntu_docker,26.52,5.019,28G,17.93,0B,0B\nApr 06 11:01:40,ubuntu_docker,0.22,5.019,28G,17.92,0B,0B\nApr 06 11:02:44,ubuntu_docker,0.20,5.019,28G,17.93,0B,0B\nApr 06 11:03:47,ubuntu_docker,0.22,5.019,28G,17.93,0B,0B\n\n\"\"\"\nimport csv\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as md\nimport dateutil\nimport sys\nimport os\nimport argparse\n\ndef extract_datetime(dt_str):\n print(\"parsing date:\" + dt_str)\n dt = dateutil.parser.parse(dt_str)\n return dt\n\ndef is_header_in_row(row):\n if row[0].isalpha():\n return True\n\ndef get_ll_by_csv(filename):\n hdr = None\n if not os.path.isfile(filename):\n print(\"Unable to open file \" + str(filename))\n sys.exit(1)\n\n with open(filename) as f:\n reader = csv.reader(f, delimiter=\",\")\n data = list(reader)\n if is_header_in_row(data[0]):\n hdr = data.pop(0)\n #import pdb; pdb.set_trace()\n return hdr, data\n\ndef get_time_and_val_lists(ll, field_offset_to_graph):\n # ll is a list of sub-lists where each sub-list is [dt,val]\n dl = []\n vl = []\n for sl in ll:\n # offset 0 is the date list\n #print(\"sl:\" + str(sl))\n #import pdb; pdb.set_trace()\n dl.append(sl[0])\n vl.append(float(sl[field_offset_to_graph]))\n return (dl, vl)\n\n #disp_chart(date_list, val_list, hdr[0], hdr[field_offset_to_graph], fname, vxdate, vtitle, vpct)\ndef disp_chart(xl, yl, x_label, y_label, title, vx_date=None, vx_label=None, vpct=20):\n import pdb; pdb.set_trace()\n x = [ extract_datetime(i) for i in xl ]\n y = [float(i) for i in yl]\n# import pdb; pdb.set_trace()\n print(\"x:\" + str(x))\n print(\"y:\" + str(y))\n\n # embellishments\n ax=plt.gca()\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n ax.set_title(title)\n ax.title.set_weight('bold')\n ax.grid('on')\n ax.xaxis.get_label().set_style('italic')\n ax.xaxis.get_label().set_size(10)\n ax.yaxis.get_label().set_style('italic')\n ax.yaxis.get_label().set_size(10)\n ## show bluish tint under graph\n ax.fill_between(x, y)\n plt.tight_layout()\n #plt.minorticks_on()\n plt.xticks(rotation=50)\n\n ymin, ymax = ax.get_ylim()\n\n\n #ax.axvline(\"Apr 06 19:38:10\", color=\"red\", linestyle=\"--\")\n #plt.axvline(extract_datetime(\"Apr 12 16:28:41\"), color='r', linewidth=2.0, linestyle='--')\n #plt.text(extract_datetime(\"Apr 12 16:28:20\"),(ymax-ymin)/4,'16:28:41 rx nw hb slow 4123ms',rotation=90,color='r')\n\n #def disp_chart(xl, yl, x_label, y_label, title, vx_date=None, vx_label=None, vpct=0.2):\n if vx_date is not None:\n plt.axvline(extract_datetime(vx_date), color='r', linewidth=2.0, linestyle='--')\n if vx_label is not None:\n plt.text(extract_datetime(vx_date),(ymax-ymin)*(int(vpct)/100),vx_label, rotation=90,color='r')\n \n # set the basic properties\n #plt.plot(x,y, \"or-\")\n plt.plot(x,y, \"ok-\")\n # beautify the x-labels\n plt.gcf().autofmt_xdate()\n myFmt = md.DateFormatter('%H:%M')\n plt.gca().xaxis.set_major_formatter(myFmt)\n #plt.show()\n print(\"Saving to :\" + title + '.png')\n plt.savefig(title + '.png')\n plt.close()\n\ndef parse():\n parser = argparse.ArgumentParser()\n parser._action_groups.pop()\n required = parser.add_argument_group('required arguments')\n optional = parser.add_argument_group('optional arguments')\n required.add_argument('-i', required=True, help=\"<input_csv_file>\")\n #optional.add_argument('-o', help=\"<out_png_file>\")\n #optional.add_argument('--vxdate', metavar='vx timeline')\n optional.add_argument('--offset', help=\"offset in csvfile(2 onwards)\",default=2, type=int)\n optional.add_argument('--vxdate',help=\"date like 'Apr 19 13:23:31'\")\n optional.add_argument('--vpct', metavar=\"<0-100>(in percent)\")\n optional.add_argument('--vtitle')\n return parser.parse_args()\n\n#main\nif __name__ == \"__main__\":\n dd = parse()\n path = dd.i\n field_offset_to_graph = dd.offset\n vpct = dd.vpct\n vxdate = dd.vxdate\n vtitle = dd.vtitle\n #matplotlib.use(\"agg\")\n #import pdb; pdb.set_trace()\n #defaults\n\n #field_offset_to_graph = 2\n #argc = len(sys.argv)\n ##import pdb; pdb.set_trace()\n #if argc > 1:\n # path = sys.argv[1]\n #else:\n # #path = input(\"Please enter a csv file to graph:\\n\")\n # print(\"Usage:\" + sys.argv[0] + \" <file> [offset_starts_from_2]\\n\")\n # sys.exit(1)\n # path = \"data.csv\"\n #if argc > 2:\n # # first field is datetime which is at offset 0\n # field_offset_to_graph = sys.argv[2]\n # print(\"using offset:\" + str(field_offset_to_graph))\n # field_offset_to_graph = int(field_offset_to_graph)\n\n print(os.getcwd())\n hdr, ll = get_ll_by_csv(path)\n fname = os.path.basename(path)\n date_list, val_list = get_time_and_val_lists(ll, field_offset_to_graph)\n #import pdb; pdb.set_trace()\n# =============================================================================\n# import pdb; pdb.set_trace()\n# date_list=[\"Apr 10 09:22:41\", \"Apr 10 09:32:41\", \"Apr 10 09:35:41\"]\n# val_list = [50,20,30]\n# hdr= [\"hdra\", \"hdrb\"]\n# =============================================================================\n #disp_chart(date_list, val_list, hdr[0], hdr[2], fname)\n #disp_chart(date_list, val_list, hdr[0], hdr[field_offset_to_graph], fname)\n#def disp_chart(xl, yl, x_label, y_label, title, vx_date=None, vx_label=None, vpct=0.2):\n disp_chart(date_list, val_list, hdr[0], hdr[field_offset_to_graph], fname, vxdate, vtitle, vpct)\n" ]
[ [ "matplotlib.pyplot.gca", "matplotlib.dates.DateFormatter", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.savefig", "matplotlib.pyplot.gcf", "matplotlib.pyplot.plot", "matplotlib.pyplot.close", "matplotlib.pyplot.xticks" ] ]
soothing35/cryoEM_annealling
[ "212beaf23fe287d69571582fdbadc736f1abf908" ]
[ "Pymol_scripts/Draw_rotation_axis.py" ]
[ "'''\nModified from:\nhttp://pymolwiki.org/index.php/RotationAxis\n\nAn extra function, auto_draw_axis, was added, to calculate the rotational angles among bunch of structures (line 267)\n'''\n\nfrom __future__ import print_function\n\n__author__ = 'Pablo Guardado Calvo'\n__version__ = '0.1'\n__email__ = 'pablo.guardado (at) gmail.com'\n\n\n\nfrom pymol import cmd, cgo\nimport math\nimport numpy\n\n\ndef transf_matrix(chA, chB):\n '''\n DESCRIPTION\n\n Align two selections/chains, and returns the transformation matrix. I used super to carry out the alignment, likely is possible to use cmd.align and\n is going to be a bit faster, but I think is not going to work well with low-sequence-identity alignments.\n\n '''\n cmd.create('working', chA)\n cmd.super('working', chB)\n T = cmd.get_object_matrix('working')\n global cmW\n cmW = center_of_Mass('working')\n cmd.delete('working')\n return T\n\n\ndef center_of_Mass(selection):\n '''\n DESCRIPTION\n\n Calculates the center of mass of a given selection\n\n '''\n model= cmd.get_model(selection)\n x,y,z=0,0,0\n totmass = 0\n for a in model.atom:\n m = a.get_mass()\n x+= a.coord[0]*m\n y+= a.coord[1]*m\n z+= a.coord[2]*m\n totmass += m\n cM = numpy.array([x/totmass, y/totmass, z/totmass])\n return cM\n\ndef direction_cosines(chA, chB):\n '''\n DESCRIPTION\n\n Calculates the direction cosines of the rotation axis from the transformation matrix.\n\n '''\n t=transf_matrix(chA, chB)\n a1= (t[6]-t[9])/math.sqrt((t[6]-t[9])**2+(t[8]-t[2])**2+(t[1]-t[4])**2)\n b1= (t[8]-t[2])/math.sqrt((t[6]-t[9])**2+(t[8]-t[2])**2+(t[1]-t[4])**2)\n c1= (t[1]-t[4])/math.sqrt((t[6]-t[9])**2+(t[8]-t[2])**2+(t[1]-t[4])**2)\n axis = numpy.array([a1, b1, c1])\n return axis\n\ndef angle_axis(chA, chB):\n '''\n DESCRIPTION\n\n Calculates the rotation angle from the transformation matrix\n\n '''\n t=transf_matrix(chA, chB)\n angle_rad = math.acos((t[0]+t[5]+t[10]-1)/2)\n return angle_rad\n\ndef center_of_rot(chA, chB):\n '''\n DESCRIPTION\n\n Calculates the center of rotation of the axis\n\n '''\n cm_Working=center_of_Mass(chA)\n cm_Reference=cmW\n u=direction_cosines(chA, chB)[0]\n u2=u**2\n v=direction_cosines(chA, chB)[1]\n v2=v**2\n w=direction_cosines(chA, chB)[2]\n w2=w**2\n cos_theta=numpy.cos(angle_axis(chA, chB))\n sin_theta=numpy.sin(angle_axis(chA, chB))\n fx=cm_Working[0]\n fy=cm_Working[1]\n fz=cm_Working[2]\n x=cm_Reference[0]\n y=cm_Reference[1]\n z=cm_Reference[2]\n\n T11 = (v2 + w2)*(1-cos_theta)\n T12 = (w*sin_theta)-((u*v)*(1-cos_theta))\n T13 = -(v*sin_theta)-(u*w*(1-cos_theta))\n T14 =fx-((((u2*x)+((u*v)*y)+((u*w)*z))*(1-cos_theta))+(x*cos_theta)+((-(w*y)+(v*z))*sin_theta))\n T21 = -(w*sin_theta)-((u*v)*(1-cos_theta))\n T22 = (u2 + w2)*(1-cos_theta)\n T23 = (u*sin_theta)-(w*v*(1-cos_theta))\n T24 =fy-((((v*u*x)+(v2*y)+(v*w*z))*(1-cos_theta))+(y*cos_theta)+(((w*x)-(u*z))*sin_theta))\n T31 = (v*sin_theta)-(w*u*(1-cos_theta))\n T32 = -(u*sin_theta)-(w*v*(1-cos_theta))\n T33 = (u2 + v2)*(1-cos_theta)\n T34 =fz-(((((u*x)*w)+((v*y)*w)+(w2*z))*(1-cos_theta))+(z*cos_theta)+((-(v*x)+(u*y))*sin_theta))\n\n term_lig = numpy.array([[T11, T12, T13], [T21, T22, T23], [T31, T32, T33]])\n term_ind = numpy.array([T14, T24, T34])\n\n sol_lstsq = numpy.linalg.lstsq(term_lig, term_ind)\n sol = sol_lstsq[0]\n\n return sol\n\ndef nearest_point_to_axis(chA, chB):\n '''\n DESCRIPTION\n\n Calculates the nearest point of the axis, I use it to create the cgo object.\n\n '''\n cmA=center_of_Mass(chA)\n cmB=cmW\n cmAver=(cmB+cmA)/2\n vector=numpy.array([(cmB[0]-cmA[0]), (cmB[1]-cmA[1]), (cmB[2]-cmA[2])])\n moduli_vector=numpy.linalg.norm(vector)\n vector_director=numpy.array([(cmB[0]-cmA[0])/moduli_vector, (cmB[1]-cmA[1])/moduli_vector, (cmB[2]-cmA[2])/moduli_vector])\n axis1= direction_cosines(chA, chB)\n sol=center_of_rot(chA, chB)\n term_lig2=numpy.array([[vector_director[0], vector_director[1], vector_director[2], 0], [1, 0, 0, -axis1[0]], [0, 1, 0, -axis1[1]], [0, 0, 1, -axis1[2]]])\n term_ind2=numpy.array([(cmAver[0]*(vector_director[0]))+(cmAver[1]*(vector_director[1]))+(cmAver[2]*(vector_director[2])), sol[0], sol[1], sol[2]])\n term_j=(cmAver[0]*vector_director[0])+(cmAver[1]*vector_director[1])+(cmAver[2]*vector_director[2])\n suma_vect_director=vector_director+axis1\n term_ji=(cmAver[0]*suma_vect_director[0])+(cmAver[1]*suma_vect_director[1])+(cmAver[2]*suma_vect_director[2])\n if numpy.dot(vector_director, axis1) != 0:\n t = ((-numpy.dot(vector_director, sol))+term_j)/numpy.dot(vector_director, axis1)\n else:\n t = ((-numpy.dot(suma_vect_director, sol))+term_ji)/numpy.dot(suma_vect_director, axis1)\n p = [sol[0]+axis1[0]*t, sol[1]+axis1[1]*t, sol[2]+axis1[2]*t]\n\n return p\n\ndef proyeccion_centroide(selection, chA, chB):\n '''\n DESCRIPTION\n\n Calculates the proyection of the mass center for the working molecule before being aligned with the reference molecule. For representation purpuses.\n\n '''\n axis1=numpy.array([direction_cosines(chA, chB)[0], direction_cosines(chA, chB)[1], direction_cosines(chA, chB)[2]])\n sol=center_of_rot(chA, chB)\n cmSel=center_of_Mass(selection)\n t_cen=numpy.dot(cmSel, axis1)-numpy.dot(sol, axis1)\n proy_cen= [sol[0]+(t_cen*axis1[0]), sol[1]+(t_cen*axis1[1]), sol[2]+(t_cen*axis1[2])]\n return proy_cen\n\ndef proyeccion_centroide_working(chA, chB):\n '''\n DESCRIPTION\n\n Calculates the proyection of the mass center for working molecule after being aligned with the reference molecule. For representation purpuses.\n\n '''\n axis1=numpy.array([direction_cosines(chA, chB)[0], direction_cosines(chA, chB)[1], direction_cosines(chA, chB)[2]])\n sol=center_of_rot(chA, chB)\n cmSel=cmW\n t_cen=numpy.dot(cmSel, axis1)-numpy.dot(sol, axis1)\n proy_cen= [sol[0]+(t_cen*axis1[0]), sol[1]+(t_cen*axis1[1]), sol[2]+(t_cen*axis1[2])]\n return proy_cen\n\ndef print_information(T, axis1, angle_degrees, moduli_vector, obj, x1, y1, z1, x2, y2, z2, w, r1, g1, b1, r2, g2, b2, modu_tr=0):\n '''\n DESCRIPTION\n\n Print to basic information to the screen.\n '''\n print(\"#################################################################################################\")\n print(\"Transformation (TTT) matrix\")\n print(\"%8.2f, %8.2f, %8.2f, %8.2f\" %(T[0], T[1], T[2], T[3]))\n print(\"%8.2f, %8.2f, %8.2f, %8.2f\" %(T[4], T[5], T[6], T[7]))\n print(\"%8.2f, %8.2f, %8.2f, %8.2f\" %(T[8], T[9], T[10], T[11]))\n print(\"%8.2f, %8.2f, %8.2f, %8.2f\" %(T[12], T[13], T[14], T[15]))\n print(\".................................................................................................\")\n print(\"\")\n print(\"The direction cosines of the rotation axis is: %3.2f, %3.2f, %3.2f\" %(axis1[0], axis1[1], axis1[2]))\n print(\"The angle of rotation is %3.2f degrees\" %(angle_degrees))\n print(\"The lenght of the translation vector along the rotation axis is %3.2f Angstroms\" %(modu_tr))\n print(\"The distance between mass centers is %3.2f Angstroms\" %(moduli_vector))\n print(\".................................................................................................\")\n print(\"\")\n print(\"Lines to be used in a pml script to generate the axis\")\n print(\"\")\n print(\"CYLINDER, %3.2f, %3.2f, %3.2f, %3.2f, %3.2f, %3.2f, %3.2f, %3.2f, %3.2f, %3.2f, %3.2f, %3.2f, %3.2f, 0.0\" %(x1, y1, z1, x2, y2, z2, w, r1, g1, b1, r2, g2, b2))\n print(\"cmd.load_cgo(obj, %3.2f)\" %(angle_degrees))\n print(\"\")\n print(\"#################################################################################################\")\n\ndef draw_axis(chA, chB, scale_factor=20, w=0.6, r1=1, g1=1, b1=1, r2=1, g2=0, b2=0):\n T = transf_matrix(chA, chB)\n angle=angle_axis(chA, chB)\n angle_degrees=(angle*180)/math.pi\n axis1=[direction_cosines(chA, chB)[0], direction_cosines(chA, chB)[1], direction_cosines(chA, chB)[2]]\n p = nearest_point_to_axis(chA, chB)\n x1, y1, z1 = p[0] + (3*scale_factor*axis1[0]), p[1] + (3*scale_factor*axis1[1]), p[2] + (3*scale_factor*axis1[2])\n x2, y2, z2 = p[0] - (3*scale_factor*axis1[0]), p[1] - (3*scale_factor*axis1[1]), p[2] - (3*scale_factor*axis1[2])\n obj = [cgo.CYLINDER, x1, y1, z1, x2, y2, z2, w, r1, g1, b1, r2, g2, b2, 0.0]\n cmd.load_cgo(obj, angle_degrees)\n\n cmA=center_of_Mass(chA)\n cmB=cmW\n cmAver=(cmB+cmA)/2\n vector=numpy.array([(cmB[0]-cmA[0]), (cmB[1]-cmA[1]), (cmB[2]-cmA[2])])\n moduli_vector=numpy.linalg.norm(vector)\n vector_director=numpy.array([(cmB[0]-cmA[0])/moduli_vector, (cmB[1]-cmA[1])/moduli_vector, (cmB[2]-cmA[2])/moduli_vector])\n pC_A = proyeccion_centroide(chA, chA, chB)\n pC_B = proyeccion_centroide_working(chA, chB)\n\n\n trans_vector = numpy.array([(pC_B[0]-pC_A[0]), (pC_B[1]-pC_A[1]), (pC_B[2]-pC_A[2])])\n modu_tr = numpy.linalg.norm(trans_vector)\n rota_centroid_rad=numpy.dot(vector_director, axis1)\n rota_centroid = (rota_centroid_rad*180)/math.pi\n rota_centroid_absol_0= numpy.absolute(rota_centroid)\n rota_centroid_absol=round(rota_centroid_absol_0,2)\n\n\n if rota_centroid_absol == 0.00:\n p1 = '_1'\n p2 = '_2'\n p3 = '_3'\n cmd.pseudoatom (pos=[cmA[0], cmA[1], cmA[2]], object=p1)\n cmd.pseudoatom (pos=[pC_A[0], pC_A[1], pC_A[2]], object=p2)\n cmd.pseudoatom (pos=[cmB[0], cmB[1], cmB[2]], object=p3)\n cmd.angle(None, p1, p2, p3)\n #print_information(T, axis1, angle_degrees, moduli_vector, obj, x1, y1, z1, x2, y2, z2, w, r1, g1, b1, r2, g2, b2)\n\n if rota_centroid_absol != 0:\n p1 = '_1'\n p2 = '_2'\n p3 = '_3'\n p4 = '_4'\n cmd.pseudoatom (pos=[cmA[0], cmA[1], cmA[2]], object=p1)\n cmd.pseudoatom (pos=[pC_A[0], pC_A[1], pC_A[2]], object=p2)\n cmd.pseudoatom (pos=[pC_B[0], pC_B[1], pC_B[2]], object=p3)\n cmd.pseudoatom (pos=[cmB[0], cmB[1], cmB[2]], object=p4)\n cmd.dihedral(None, p1, p2, p3, p4)\n cmd.distance(None, p2, p3)\n #print_information(T, axis1, angle_degrees, moduli_vector, obj, x1, y1, z1, x2, y2, z2, w, r1, g1, b1, r2, g2, b2, modu_tr)\n\n cmd.create('working', chA)\n cmd.super('working', chB)\n\n return angle_degrees\n\ndef auto_draw_axis():\n for i in range(1,24):\n j = 2*i\n model1 = \"30S-frame1.pdb\"\n model2 = \"30S-frame\" + str(j) + \".pdb\"\n cmd.load(model1)\n cmd.load(model2)\n objectName1 = \"30S-frame1\"\n objectName2 = \"30S-frame\" + str(j)\n angle_degrees = draw_axis(objectName1, objectName2, 60)\n\n print(\"Angle between \" + objectName1 + \" \" + objectName2 + \" is #### \" + str(angle_degrees))\n\n cmd.delete('all')\n" ]
[ [ "numpy.dot", "numpy.absolute", "numpy.linalg.norm", "numpy.linalg.lstsq", "numpy.array" ] ]
Ethan07902050/s3prl
[ "e120e29a59e5b4f6a1e9d7946602070567301938" ]
[ "s3prl/downstream/mosei/expert.py" ]
[ "import os\n\n# import math\nimport torch\n\n# import random\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom torch.nn.utils.rnn import pad_sequence\n\nfrom .model import Model\nfrom .dataset import MOSEIDataset\n\nimport pandas as pd\n\n# import sys\n\nfrom collections import defaultdict\nimport sklearn\n\nfrom pathlib import Path\n\n\nclass DownstreamExpert(nn.Module):\n \"\"\"\n Used to handle downstream-specific operations\n eg. downstream forward, metric computation, contents to log\n \"\"\"\n\n def __init__(self, upstream_dim, downstream_expert, expdir, **kwargs):\n \"\"\"\n Args:\n upstream_dim: int\n Different upstream will give different representation dimension\n You might want to first project them to the same dimension\n\n downstream_expert: dict\n The 'downstream_expert' field specified in your downstream config file\n eg. downstream/downstream/example/config.yaml\n\n **kwargs: dict\n The arguments specified by the argparser in run_downstream.py\n in case you need it.\n \"\"\"\n\n super(DownstreamExpert, self).__init__()\n self.upstream_dim = upstream_dim\n self.datarc = downstream_expert[\"datarc\"]\n self.modelrc = downstream_expert[\"modelrc\"]\n\n # Convert Label File to (filename, label) for each split\n self.train_data, self.dev_data, self.test_data = [], [], []\n df = pd.read_csv(self.datarc[\"label_path\"], encoding=\"latin-1\")\n for row in df.itertuples():\n filename = row.file + \"_\" + str(row.index) + \".wav\"\n # for 2-clas sentiment with negative and non-negative\n # neutral is counted as positive\n if self.datarc[\"num_class\"] == 2:\n label = row.label2a\n # for three-class sentiment with positive, neutral and negative\n elif self.datarc[\"num_class\"] == 3:\n label = (row.label2b + 1) # avoid cuda error: device-side ...\n # for six-class emotion\n elif self.datarc[\"num_class\"] == 6:\n label = row.label6\n # for seven-class sentiment\n elif self.datarc[\"num_class\"] == 7:\n # Avoid CUDA error:\n # device-side assert triggered (due to negative label)\n label = (row.label7 + 3)\n else:\n raise ValueError(\"Unsupported num_class\")\n # split train, dev, test\n if row.split == 0:\n self.train_data.append((filename, label))\n elif row.split == 1:\n self.dev_data.append((filename, label))\n elif row.split == 2:\n self.test_data.append((filename, label))\n\n self.train_dataset = MOSEIDataset(\"train\",\n self.train_data, self.datarc[\"data_dir\"])\n self.dev_dataset = MOSEIDataset(\"dev\",\n self.dev_data, self.datarc[\"data_dir\"])\n self.test_dataset = MOSEIDataset(\"test\", \n self.test_data, self.datarc[\"data_dir\"])\n\n self.connector = nn.Linear(upstream_dim, self.modelrc[\"input_dim\"])\n self.model = Model(output_class_num=self.datarc[\"num_class\"],\n **self.modelrc)\n self.objective = nn.CrossEntropyLoss()\n self.expdir = expdir\n self.logging = os.path.join(self.expdir, \"log.log\")\n self.best = defaultdict(lambda: 0)\n self.answer = []\n\n def _get_train_dataloader(self, dataset, epoch: int):\n from s3prl.utility.data import get_ddp_sampler\n sampler = get_ddp_sampler(dataset, epoch)\n return DataLoader(\n dataset, batch_size=self.datarc['train_batch_size'],\n shuffle=(sampler is None),\n sampler=sampler,\n num_workers=self.datarc['num_workers'],\n collate_fn=dataset.collate_fn\n )\n\n def _get_eval_dataloader(self, dataset):\n return DataLoader(\n dataset,\n batch_size=self.datarc[\"eval_batch_size\"],\n shuffle=False,\n num_workers=self.datarc[\"num_workers\"],\n collate_fn=dataset.collate_fn,\n )\n\n \"\"\"\n Datalaoder Specs:\n Each dataloader should output a list in the following format:\n\n [[wav1, wav2, ...], your_other_contents1, your_other_contents2, ...]\n\n where wav1, wav2 ... are in variable length\n each wav is torch.FloatTensor in cpu with:\n 1. dim() == 1\n 2. sample_rate == 16000\n 3. directly loaded by torchaudio without any preprocessing\n \"\"\"\n\n # Interface\n def get_train_dataloader(self, epoch: int):\n return self._get_train_dataloader(self.train_dataset, epoch)\n\n # Interface\n def get_dev_dataloader(self):\n return self._get_eval_dataloader(self.dev_dataset)\n\n # Interface\n def get_test_dataloader(self):\n return self._get_eval_dataloader(self.test_dataset)\n\n def get_dataloader(self, mode, epoch: int=0):\n if mode == 'train':\n return eval(f'self.get_{mode}_dataloader')(epoch)\n return eval(f'self.get_{mode}_dataloader')()\n\n # Interface\n def forward(self, mode, features, labels, records, **kwargs):\n \"\"\"\n This function will be used in both train/dev/test, you can use\n self.training (bool) to control the different behavior for\n training or evaluation (dev/test)\n\n Args:\n mode: str\n 'train' or 'dev' or 'test'\n\n features:\n list of unpadded features [feat1, feat2, ...]\n each feat is in torch.FloatTensor and already\n put in the device assigned by command-line args\n\n records:\n defaultdict(list), by dumping contents into records,\n these contents can be averaged and logged on Tensorboard\n later by self.log_records\n\n Note1. downstream/runner.py will call self.log_records\n 1. every log_step during training\n 2. once after evalute the whole dev/test dataloader\n\n Note2. log_step is defined in your downstream config\n\n Return:\n loss:\n the loss to be optimized, should not be detached\n a single scalar in torch.FloatTensor\n \"\"\"\n features = pad_sequence(features, batch_first=True)\n features = self.connector(features)\n predicted = self.model(features)\n\n utterance_labels = labels\n labels = torch.LongTensor(utterance_labels).to(features.device)\n loss = self.objective(predicted, labels)\n predicted_classid = predicted.max(dim=-1).indices\n\n records[\"acc\"] += (\n predicted_classid == labels).view(-1).cpu().float().tolist()\n # records['filename'] += filenames\n records[\"predicted\"] += predicted_classid.cpu().float().tolist()\n records[\"original\"] += labels.cpu().float().tolist()\n\n if not self.training:\n # some evaluation-only processing, eg. decoding\n pass\n\n return loss\n\n # interface\n def log_records(self, mode, records, logger, global_step, **kwargs):\n \"\"\"\n This function will be used in both train/dev/test, you can use\n self.training (bool) to control the different behavior for\n training or evaluation (dev/test)\n\n Args:\n mode: str\n 'train' or 'dev' or 'test'\n\n records:\n defaultdict(list), contents already prepared by self.forward\n\n logger:\n Tensorboard SummaryWriter\n please use f'{prefix}your_content_name' as key name\n to log your customized contents\n\n global_step:\n global_step in runner, which is helpful for Tensorboard logging\n \"\"\"\n prefix = f\"mosei/{mode}-\"\n\n average = torch.FloatTensor(records[\"acc\"]).mean().item()\n f1 = sklearn.metrics.f1_score(\n records[\"original\"], records[\"predicted\"], average=\"macro\"\n )\n\n logger.add_scalar(f\"{prefix}acc\", average, global_step=global_step)\n if mode in [\"dev\", \"test\"]:\n print(f\"{prefix}acc: {average}\")\n message = f\"{mode} at step {global_step}: {average} (acc), {f1} (f1)\\n\"\n save_ckpt = []\n\n if average > self.best[prefix]:\n self.best[prefix] = average\n message = f\"New best on {message}\"\n name = prefix.split(\"/\")[-1].split(\"-\")[0]\n save_ckpt.append(f\"{name}-best.ckpt\")\n\n # only saves the prediction from the best model, not the latest\n if mode in [\"dev\", \"test\"]:\n with open(Path(self.expdir) / f\"{mode}_predict.txt\",\n \"w\") as file:\n line = [f\"{f} \\n\" for f in records[\"predicted\"]]\n file.writelines(line)\n\n # write true label once\n if mode in [\"dev\", \"test\"]:\n with open(Path(self.expdir) / f\"{mode}_truth.txt\", \"w\") as file:\n line = [f\"{f} \\n\" for f in records[\"original\"]]\n file.writelines(line)\n\n with open(self.logging, \"a\") as f:\n f.write(message)\n\n if not self.training:\n # some evaluation-only processing, eg. decoding\n pass\n\n return save_ckpt\n" ]
[ [ "torch.nn.CrossEntropyLoss", "pandas.read_csv", "torch.LongTensor", "torch.nn.utils.rnn.pad_sequence", "torch.utils.data.DataLoader", "torch.nn.Linear", "torch.FloatTensor", "sklearn.metrics.f1_score" ] ]
yashbonde/gpt
[ "11e57a5fddd0eb20e44ebe3a661aa0b144a1730b" ]
[ "tokenizer.py" ]
[ "# @yashbonde x NBX Internal - 28th April 2021\n# This file is based on code by the authors denoted below and has been modified from its original version.\n#\n# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tokenization classes for OpenAI GPT.\"\"\"\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport sys\nimport json\nimport logging\nimport os\nimport regex as re\nfrom io import open\n\nimport torch\n\nfrom functools import lru_cache\nlogger = logging.getLogger(__name__)\n\nVOCAB_NAME = 'vocab.json'\nMERGES_NAME = 'merges.txt'\nSPECIAL_TOKENS_NAME = 'special_tokens.txt'\nhere = os.path.dirname(os.path.abspath(__file__))\nVOCAB_FULL_PATH = os.path.join(here, VOCAB_NAME)\nMERGES_FULL_PATH = os.path.join(here, MERGES_NAME)\n\n\n@lru_cache()\ndef bytes_to_unicode():\n \"\"\"\n Returns list of utf-8 byte and a corresponding list of unicode strings.\n The reversible bpe codes work on unicode strings.\n This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.\n When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.\n This is a signficant percentage of your normal, say, 32K bpe vocab.\n To avoid that, we want lookup tables between utf-8 bytes and unicode strings.\n And avoids mapping to whitespace/control characters the bpe code barfs on.\n \"\"\"\n _chr = unichr if sys.version_info[0] == 2 else chr\n bs = list(range(ord(\"!\"), ord(\"~\") + 1)) + list(range(ord(\"¡\"), ord(\"¬\") + 1)) + \\\n list(range(ord(\"®\"), ord(\"ÿ\") + 1))\n cs = bs[:]\n n = 0\n for b in range(2**8):\n if b not in bs:\n bs.append(b)\n cs.append(2**8 + n)\n n += 1\n cs = [_chr(n) for n in cs]\n return dict(zip(bs, cs))\n\n\ndef get_pairs(word):\n \"\"\"Return set of symbol pairs in a word.\n Word is represented as tuple of symbols (symbols being variable-length strings).\n \"\"\"\n pairs = set()\n prev_char = word[0]\n for char in word[1:]:\n pairs.add((prev_char, char))\n prev_char = char\n return pairs\n\n\nclass GPT2Tokenizer(object):\n def __init__(self, vocab_file = VOCAB_FULL_PATH, merges_file = MERGES_FULL_PATH, errors='replace',\n special_tokens=None, max_len=None):\n self.max_len = max_len if max_len is not None else int(1e12)\n self.encoder = json.load(open(vocab_file))\n self.decoder = {v: k for k, v in self.encoder.items()}\n self.errors = errors # how to handle errors in decoding\n self.byte_encoder = bytes_to_unicode()\n self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}\n bpe_data = open(merges_file, encoding='utf-8').read().split('\\n')[1:-1]\n bpe_merges = [tuple(merge.split()) for merge in bpe_data]\n self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))\n self.cache = {}\n\n # Should haved added re.IGNORECASE so BPE merges can happen for\n # capitalized versions of contractions\n self.pat = re.compile(r\"\"\"'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+\"\"\")\n\n self.special_tokens = {}\n self.special_tokens_decoder = {}\n self.set_special_tokens(special_tokens)\n\n self.eot_tag = \"<|endoftext|>\"\n self.eot_id = self.encoder[\"<|endoftext|>\"]\n\n\n def __len__(self):\n return len(self.encoder) + len(self.special_tokens)\n\n def set_special_tokens(self, special_tokens):\n \"\"\" Add a list of additional tokens to the encoder.\n The additional tokens are indexed starting from the last index of the\n current vocabulary in the order of the `special_tokens` list.\n \"\"\"\n if not special_tokens:\n self.special_tokens = {}\n self.special_tokens_decoder = {}\n return\n self.special_tokens = dict((tok, len(self.encoder) + i)\n for i, tok in enumerate(special_tokens))\n self.special_tokens_decoder = {v: k for k, v in self.special_tokens.items()}\n logger.info(\"Special tokens {}\".format(self.special_tokens))\n\n def bpe(self, token):\n if token in self.cache:\n return self.cache[token]\n word = tuple(token)\n pairs = get_pairs(word)\n\n if not pairs:\n return token\n\n while True:\n bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))\n if bigram not in self.bpe_ranks:\n break\n first, second = bigram\n new_word = []\n i = 0\n while i < len(word):\n try:\n j = word.index(first, i)\n new_word.extend(word[i:j])\n i = j\n except BaseException:\n new_word.extend(word[i:])\n break\n\n if word[i] == first and i < len(word) - 1 and word[i + 1] == second:\n new_word.append(first + second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n new_word = tuple(new_word)\n word = new_word\n if len(word) == 1:\n break\n else:\n pairs = get_pairs(word)\n word = ' '.join(word)\n self.cache[token] = word\n return word\n\n def tokenize(self, text):\n \"\"\" Tokenize a string. \"\"\"\n bpe_tokens = []\n for token in re.findall(self.pat, text):\n if sys.version_info[0] == 2:\n token = ''.join(self.byte_encoder[ord(b)] for b in token)\n else:\n token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))\n bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(' '))\n return bpe_tokens\n\n def convert_tokens_to_ids(self, tokens):\n \"\"\" Converts a sequence of tokens into ids using the vocab. \"\"\"\n ids = []\n if isinstance(tokens, str) or (sys.version_info[0] == 2 and isinstance(tokens, unicode)):\n if tokens in self.special_tokens:\n return self.special_tokens[tokens]\n else:\n return self.encoder.get(tokens, 0)\n for token in tokens:\n if token in self.special_tokens:\n ids.append(self.special_tokens[token])\n else:\n ids.append(self.encoder.get(token, 0))\n if len(ids) > self.max_len:\n logger.warning(\n \"Token indices sequence length is longer than the specified maximum \"\n \" sequence length for this OpenAI GPT model ({} > {}). Running this\"\n \" sequence through the model will result in indexing errors\".format(\n len(ids), self.max_len)\n )\n return ids\n\n def convert_ids_to_tokens(self, ids, skip_special_tokens=False):\n \"\"\"Converts a sequence of ids in BPE tokens using the vocab.\"\"\"\n tokens = []\n for i in ids:\n if i in self.special_tokens_decoder:\n if not skip_special_tokens:\n tokens.append(self.special_tokens_decoder[i])\n else:\n tokens.append(self.decoder[i])\n return tokens\n\n def encode(self, text):\n return self.convert_tokens_to_ids(self.tokenize(text))\n\n def __call__(self, text):\n return torch.Tensor(self.encode(text)).long()\n\n def decode(self, tokens):\n if isinstance(tokens[0], list):\n return [self.decode(t) for t in tokens]\n text = ''.join([self.decoder[token] for token in tokens])\n text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)\n return text\n\n def save_vocabulary(self, vocab_path):\n \"\"\"Save the tokenizer vocabulary and merge files to a directory.\"\"\"\n if not os.path.isdir(vocab_path):\n logger.error(\"Vocabulary path ({}) should be a directory\".format(vocab_path))\n return\n vocab_file = os.path.join(vocab_path, VOCAB_NAME)\n merge_file = os.path.join(vocab_path, MERGES_NAME)\n special_tokens_file = os.path.join(vocab_path, SPECIAL_TOKENS_NAME)\n\n with open(vocab_file, 'w', encoding='utf-8') as f:\n f.write(json.dumps(self.encoder, ensure_ascii=False))\n\n index = 0\n with open(merge_file, \"w\", encoding=\"utf-8\") as writer:\n writer.write(u'#version: 0.2\\n')\n for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):\n if index != token_index:\n logger.warning(\"Saving vocabulary to {}: BPE merge indices are not consecutive.\"\n \" Please check that the tokenizer is not corrupted!\".format(merge_file))\n index = token_index\n writer.write(' '.join(bpe_tokens) + u'\\n')\n index += 1\n\n index = len(self.encoder)\n with open(special_tokens_file, 'w', encoding='utf-8') as writer:\n for token, token_index in sorted(self.special_tokens.items(), key=lambda kv: kv[1]):\n if index != token_index:\n logger.warning(\"Saving special tokens vocabulary to {}: BPE indices are not consecutive.\"\n \" Please check that the tokenizer is not corrupted!\".format(special_tokens_file))\n index = token_index\n writer.write(token + u'\\n')\n index += 1\n\n return vocab_file, merge_file, special_tokens_file\n\nif __name__ == \"__main__\":\n tokenizer = GPT2Tokenizer()\n\n # test dec(enc(x)) == x\n string = \"When a self-driving car kills a pedestrian, prickly Cyber Cell detective Saajan Kundu teams up with his \" \\\n \"estranged partner Laxmi Suri to investigate. But was this an accident?\"\n encoded = tokenizer.encode(string)\n decoded = tokenizer.decode(encoded)\n assert decoded == string\n\n # test if our mods work\n encoded = tokenizer(string)\n encoded = torch.tile(encoded, [3, 1])\n tokenizer.decode(encoded.tolist())\n" ]
[ [ "torch.tile" ] ]
smoorjani/matrix-multiplication
[ "8d378d972f45e9f9c1f4b26457021636ad1f2d9b" ]
[ "benchmarks/random_tensor_benchmark.py" ]
[ "import torch\nimport numpy as np\nimport time\nimport logging\nfrom custom_mm import (\n init_cublas,\n destroy_cublas,\n init_cusparse,\n destroy_cusparse\n)\nfrom matmuls import (\n cublas_matmul,\n cusparse_matmul\n)\n\ninit_cublas()\ninit_cusparse()\n\nLOG = \"./random_tensor_benchmark.log\"\nlogging.basicConfig(filename=LOG, filemode=\"w\", level=logging.DEBUG)\n\nconsole = logging.StreamHandler()\nconsole.setLevel(logging.ERROR)\nlogging.getLogger(\"\").addHandler(console)\n\nlogger = logging.getLogger(__name__)\n\n\ndef generate_dataset(num_samples: int = 1000, dim: int = 1024,\n seed: int = None, sparsity: float = 0):\n\n if seed:\n torch.manual_seed(seed)\n np.random.seed(seed)\n\n a = np.random.rand((num_samples * dim * dim))\n b = np.random.rand((num_samples * dim * dim))\n\n if sparsity:\n # nnz = sparsity * num_samples * dim * dim\n indices = a.flatten()\n to_replace = np.random.permutation(\n indices)[:int(indices.size * sparsity)]\n\n a[np.unravel_index(to_replace, a.shape)] = 0\n\n a = a.reshape((num_samples, dim, dim))\n b = b.reshape((num_samples, dim, dim))\n a = torch.tensor(a)\n b = torch.tensor(b)\n\n return (a, b)\n\n\ndef test_kernel(matmul, a, b):\n t_init = time.time()\n assert a.shape[0] == b.shape[0]\n assert len(a.shape) == len(b.shape) == 3\n\n c = matmul(a, b)\n t_final = time.time() - t_init\n\n logger.debug('Execution time for {num_samples} multiplications: {time}\\n'.format(\n num_samples=a.shape[0], time=t_final))\n logger.debug('Average time for one multiplication: {time}\\n'.format(\n time=t_final/a.shape[0]))\n return c\n\n\nnum_samples = 1000\n\ndims = [1024, 4096, 8192, 12288, 16384]\nsparsity_levels = [0, 0.25, 0.5, 0.75, 0.9, 0.99]\n\nfor sparsity in sparsity_levels:\n for dim in dims:\n a, b = generate_dataset(num_samples=num_samples,\n dim=dim, seed=0, sparsity=sparsity)\n logger.debug(\"Testing {} by {} matrices with {} percent sparsity.\\n\".format(\n dim, dim, sparsity*100))\n\n logger.debug(\"Regular Torch Matmul: \\n\")\n test_kernel(torch.matmul, a, b)\n\n logger.debug(\"cuBLAS Matmul: \\n\")\n test_kernel(cublas_matmul, a, b)\n\n logger.debug(\"cuSPARSE Matmul: \\n\")\n _a = a.type(torch.DoubleTensor)\n _b = b.type(torch.DoubleTensor)\n test_kernel(cusparse_matmul, _a, _b)\n\n# # TODO: fix issue with \"RuntimeError: operation does not have an identity.\"\n# print(\"BlockSparse Matmul: \\n\")\n# H, M, N, K = num_samples, dim, dim, dim\n# block = 16\n# layout = torch.randint(0, 2, (H, M//block, N//block))\n# blocksparse_mmul = torch_blocksparse.MatMul(\n# layout, block, 'sdd', trans_a=True, trans_b=False)\n\n# test_kernel(blocksparse_mmul, a, b)\n\ndestroy_cublas()\ndestroy_cusparse()\n" ]
[ [ "numpy.random.seed", "torch.manual_seed", "torch.tensor", "numpy.random.permutation", "numpy.random.rand", "numpy.unravel_index" ] ]
pwentrys/tensorflow_tests
[ "b8d897bc1ca02c9f35139cc8090ee3814e349716" ]
[ "tensorflow_self_check.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"A script for testing that TensorFlow is installed correctly on Windows.\n\nThe script will attempt to verify your TensorFlow installation, and print\nsuggestions for how to fix your installation.\n\"\"\"\n\nimport ctypes\nimport imp\nimport sys\n\n\ndef main():\n try:\n import tensorflow as tf\n print(\"TensorFlow successfully installed.\")\n if tf.test.is_built_with_cuda():\n print(\"The installed version of TensorFlow includes GPU support.\")\n else:\n print(\"The installed version of TensorFlow does not include GPU support.\")\n sys.exit(0)\n except ImportError:\n print(\"ERROR: Failed to import the TensorFlow module.\")\n\n candidate_explanation = False\n\n python_version = sys.version_info.major, sys.version_info.minor\n print(\"\\n- Python version is %d.%d.\" % python_version)\n if not (python_version == (3, 5) or python_version == (3, 6)):\n candidate_explanation = True\n print(\"- The official distribution of TensorFlow for Windows requires \"\n \"Python version 3.5 or 3.6.\")\n\n try:\n _, pathname, _ = imp.find_module(\"tensorflow\")\n print(\"\\n- TensorFlow is installed at: %s\" % pathname)\n except ImportError:\n candidate_explanation = False\n print(\"\"\"\n- No module named TensorFlow is installed in this Python environment. You may\n install it using the command `pip install tensorflow`.\"\"\")\n\n try:\n msvcp140 = ctypes.WinDLL(\"msvcp140.dll\")\n except OSError:\n candidate_explanation = True\n print(\"\"\"\n- Could not load 'msvcp140.dll'. TensorFlow requires that this DLL be\n installed in a directory that is named in your %PATH% environment\n variable. You may install this DLL by downloading Microsoft Visual\n C++ 2015 Redistributable Update 3 from this URL:\n https://www.microsoft.com/en-us/download/details.aspx?id=53587\"\"\")\n\n try:\n cudart64_80 = ctypes.WinDLL(\"cudart64_80.dll\")\n except OSError:\n candidate_explanation = True\n print(\"\"\"\n- Could not load 'cudart64_80.dll'. The GPU version of TensorFlow\n requires that this DLL be installed in a directory that is named in\n your %PATH% environment variable. Download and install CUDA 8.0 from\n this URL: https://developer.nvidia.com/cuda-toolkit\"\"\")\n\n try:\n nvcuda = ctypes.WinDLL(\"nvcuda.dll\")\n except OSError:\n candidate_explanation = True\n print(\"\"\"\n- Could not load 'nvcuda.dll'. The GPU version of TensorFlow requires that\n this DLL be installed in a directory that is named in your %PATH%\n environment variable. Typically it is installed in 'C:\\Windows\\System32'.\n If it is not present, ensure that you have a CUDA-capable GPU with the\n correct driver installed.\"\"\")\n\n cudnn5_found = False\n try:\n cudnn5 = ctypes.WinDLL(\"cudnn64_5.dll\")\n cudnn5_found = True\n except OSError:\n candidate_explanation = True\n print(\"\"\"\n- Could not load 'cudnn64_5.dll'. The GPU version of TensorFlow\n requires that this DLL be installed in a directory that is named in\n your %PATH% environment variable. Note that installing cuDNN is a\n separate step from installing CUDA, and it is often found in a\n different directory from the CUDA DLLs. You may install the\n necessary DLL by downloading cuDNN 5.1 from this URL:\n https://developer.nvidia.com/cudnn\"\"\")\n\n cudnn6_found = False\n try:\n cudnn = ctypes.WinDLL(\"cudnn64_6.dll\")\n cudnn6_found = True\n except OSError:\n candidate_explanation = True\n\n if not cudnn5_found or not cudnn6_found:\n print()\n if not cudnn5_found and not cudnn6_found:\n print(\"- Could not find cuDNN.\")\n elif not cudnn5_found:\n print(\"- Could not find cuDNN 5.1.\")\n else:\n print(\"- Could not find cuDNN 6.\")\n print(\"\"\"\n The GPU version of TensorFlow requires that the correct cuDNN DLL be installed\n in a directory that is named in your %PATH% environment variable. Note that\n installing cuDNN is a separate step from installing CUDA, and it is often\n found in a different directory from the CUDA DLLs. The correct version of\n cuDNN depends on your version of TensorFlow:\n\n * TensorFlow 1.2.1 or earlier requires cuDNN 5.1. ('cudnn64_5.dll')\n * TensorFlow 1.3 or later requires cuDNN 6. ('cudnn64_6.dll')\n\n You may install the necessary DLL by downloading cuDNN from this URL:\n https://developer.nvidia.com/cudnn\"\"\")\n\n if not candidate_explanation:\n print(\"\"\"\n- All required DLLs appear to be present. Please open an issue on the\n TensorFlow GitHub page: https://github.com/tensorflow/tensorflow/issues\"\"\")\n\n sys.exit(-1)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "tensorflow.test.is_built_with_cuda" ] ]
scottcha/tsfresh
[ "b3395c12d7e25494bdc297a31f6d1136e76c477e" ]
[ "tests/integrations/test_feature_extraction.py" ]
[ "# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)\n# Maximilian Christ (maximilianchrist.com), Blue Yonder Gmbh, 2016\n\nfrom unittest import TestCase\n\nimport dask.dataframe as dd\nimport pandas as pd\n\nfrom tsfresh.examples.driftbif_simulation import load_driftbif\nfrom tsfresh import extract_relevant_features, extract_features\nfrom tsfresh.feature_extraction import MinimalFCParameters\n\n\nclass FeatureExtractionTestCase(TestCase):\n def setUp(self):\n df, y = load_driftbif(100, 10, classification=True, seed=42)\n\n df['my_id'] = df['id'].astype('str')\n del df[\"id\"]\n\n self.df = df\n\n def test_pandas(self):\n df = self.df\n\n # Test shape and a single entry (to see if it works at all)\n X = extract_features(df, column_id=\"my_id\", column_sort=\"time\", column_kind=\"dimension\", column_value=\"value\",\n default_fc_parameters=MinimalFCParameters())\n self.assertIn(\"1__mean\", X.columns)\n self.assertAlmostEqual(X.loc[\"5\", \"1__mean\"], 5.516e-05, 4)\n self.assertIn(\"11\", X.index)\n self.assertEqual(X.shape, (100, 18))\n\n X = extract_features(df, column_id=\"my_id\", column_sort=\"time\", column_kind=\"dimension\",\n default_fc_parameters=MinimalFCParameters())\n self.assertIn(\"1__mean\", X.columns)\n self.assertAlmostEqual(X.loc[\"5\", \"1__mean\"], 5.516e-05, 4)\n self.assertIn(\"11\", X.index)\n self.assertEqual(X.shape, (100, 18))\n\n X = extract_features(df.drop(columns=[\"dimension\"]), column_id=\"my_id\", column_sort=\"time\",\n default_fc_parameters=MinimalFCParameters())\n self.assertIn(\"value__mean\", X.columns)\n self.assertAlmostEqual(X.loc[\"5\", \"value__mean\"], 5.516e-05, 4)\n self.assertIn(\"11\", X.index)\n self.assertEqual(X.shape, (100, 9))\n\n X = extract_features(df.drop(columns=[\"dimension\", \"time\"]), column_id=\"my_id\",\n default_fc_parameters=MinimalFCParameters())\n self.assertIn(\"value__mean\", X.columns)\n self.assertAlmostEqual(X.loc[\"5\", \"value__mean\"], 5.516e-05, 4)\n self.assertIn(\"11\", X.index)\n self.assertEqual(X.shape, (100, 9))\n\n def test_pandas_no_pivot(self):\n df = self.df\n\n X = extract_features(df, column_id=\"my_id\", column_sort=\"time\",\n column_kind=\"dimension\", column_value=\"value\",\n pivot=False,\n default_fc_parameters=MinimalFCParameters())\n X = pd.DataFrame(X, columns=[\"my_id\", \"variable\", \"value\"])\n self.assertIn(\"1__mean\", X[\"variable\"].values)\n self.assertAlmostEqual(X[(X[\"my_id\"] == \"5\") & (X[\"variable\"] == \"1__mean\")][\"value\"].iloc[0], 5.516e-05, 4)\n self.assertEqual(X.shape, (100*18, 3))\n\n X = extract_features(df, column_id=\"my_id\", column_sort=\"time\",\n column_kind=\"dimension\",\n pivot=False,\n default_fc_parameters=MinimalFCParameters())\n X = pd.DataFrame(X, columns=[\"my_id\", \"variable\", \"value\"])\n self.assertIn(\"1__mean\", X[\"variable\"].values)\n self.assertAlmostEqual(X[(X[\"my_id\"] == \"5\") & (X[\"variable\"] == \"1__mean\")][\"value\"].iloc[0], 5.516e-05, 4)\n self.assertEqual(X.shape, (100*18, 3))\n\n X = extract_features(df.drop(columns=[\"dimension\"]), column_id=\"my_id\",\n column_sort=\"time\",\n pivot=False,\n default_fc_parameters=MinimalFCParameters())\n X = pd.DataFrame(X, columns=[\"my_id\", \"variable\", \"value\"])\n self.assertIn(\"value__mean\", X[\"variable\"].values)\n self.assertAlmostEqual(X[(X[\"my_id\"] == \"5\") & (X[\"variable\"] == \"value__mean\")][\"value\"].iloc[0], 5.516e-05, 4)\n self.assertEqual(X.shape, (100*9, 3))\n\n X = extract_features(df.drop(columns=[\"dimension\", \"time\"]), column_id=\"my_id\",\n pivot=False,\n default_fc_parameters=MinimalFCParameters())\n X = pd.DataFrame(X, columns=[\"my_id\", \"variable\", \"value\"])\n self.assertIn(\"value__mean\", X[\"variable\"].values)\n self.assertAlmostEqual(X[(X[\"my_id\"] == \"5\") & (X[\"variable\"] == \"value__mean\")][\"value\"].iloc[0], 5.516e-05, 4)\n self.assertEqual(X.shape, (100*9, 3))\n\n def test_dask(self):\n df = dd.from_pandas(self.df, npartitions=1)\n\n X = extract_features(df, column_id=\"my_id\", column_sort=\"time\",\n column_kind=\"dimension\", column_value=\"value\",\n default_fc_parameters=MinimalFCParameters()).compute()\n self.assertIn(\"1__mean\", X.columns)\n self.assertAlmostEqual(X.loc[\"5\", \"1__mean\"], 5.516e-05, 4)\n self.assertIn(\"11\", X.index)\n self.assertEqual(X.shape, (100, 18))\n\n X = extract_features(df, column_id=\"my_id\", column_sort=\"time\",\n column_kind=\"dimension\",\n default_fc_parameters=MinimalFCParameters()).compute()\n self.assertIn(\"1__mean\", X.columns)\n self.assertAlmostEqual(X.loc[\"5\", \"1__mean\"], 5.516e-05, 4)\n self.assertIn(\"11\", X.index)\n self.assertEqual(X.shape, (100, 18))\n\n X = extract_features(df.drop(columns=[\"dimension\"]), column_id=\"my_id\",\n column_sort=\"time\",\n default_fc_parameters=MinimalFCParameters()).compute()\n self.assertIn(\"value__mean\", X.columns)\n self.assertAlmostEqual(X.loc[\"5\", \"value__mean\"], 5.516e-05, 4)\n self.assertIn(\"11\", X.index)\n self.assertEqual(X.shape, (100, 9))\n\n X = extract_features(df.drop(columns=[\"dimension\", \"time\"]), column_id=\"my_id\",\n default_fc_parameters=MinimalFCParameters()).compute()\n self.assertIn(\"value__mean\", X.columns)\n self.assertAlmostEqual(X.loc[\"5\", \"value__mean\"], 5.516e-05, 4)\n self.assertIn(\"11\", X.index)\n self.assertEqual(X.shape, (100, 9))\n\n def test_dask_no_pivot(self):\n df = dd.from_pandas(self.df, npartitions=1)\n\n X = extract_features(df, column_id=\"my_id\", column_sort=\"time\",\n column_kind=\"dimension\", column_value=\"value\",\n pivot=False,\n default_fc_parameters=MinimalFCParameters()).compute()\n self.assertIn(\"1__mean\", X[\"variable\"].values)\n self.assertAlmostEqual(X[(X[\"my_id\"] == \"5\") & (X[\"variable\"] == \"1__mean\")][\"value\"].iloc[0], 5.516e-05, 4)\n self.assertEqual(X.shape, (100*18, 3))\n\n X = extract_features(df, column_id=\"my_id\", column_sort=\"time\",\n column_kind=\"dimension\",\n pivot=False,\n default_fc_parameters=MinimalFCParameters()).compute()\n self.assertIn(\"1__mean\", X[\"variable\"].values)\n self.assertAlmostEqual(X[(X[\"my_id\"] == \"5\") & (X[\"variable\"] == \"1__mean\")][\"value\"].iloc[0], 5.516e-05, 4)\n self.assertEqual(X.shape, (100*18, 3))\n\n X = extract_features(df.drop(columns=[\"dimension\"]), column_id=\"my_id\",\n column_sort=\"time\",\n pivot=False,\n default_fc_parameters=MinimalFCParameters()).compute()\n self.assertIn(\"value__mean\", X[\"variable\"].values)\n self.assertAlmostEqual(X[(X[\"my_id\"] == \"5\") & (X[\"variable\"] == \"value__mean\")][\"value\"].iloc[0], 5.516e-05, 4)\n self.assertEqual(X.shape, (100*9, 3))\n\n X = extract_features(df.drop(columns=[\"dimension\", \"time\"]), column_id=\"my_id\",\n pivot=False,\n default_fc_parameters=MinimalFCParameters()).compute()\n self.assertIn(\"value__mean\", X[\"variable\"].values)\n self.assertAlmostEqual(X[(X[\"my_id\"] == \"5\") & (X[\"variable\"] == \"value__mean\")][\"value\"].iloc[0], 5.516e-05, 4)\n self.assertEqual(X.shape, (100*9, 3))\n" ]
[ [ "pandas.DataFrame" ] ]
jaisakthism/u-net
[ "daf600930d6637f65beafc8f914e186bfc418316" ]
[ "train.py" ]
[ "from __future__ import print_function\n\nimport cv2\nimport numpy as np\nfrom keras.models import Model\nfrom keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D, Dropout\nfrom keras.optimizers import Adam\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler\nfrom keras import backend as K\n\nfrom data import load_train_data, load_test_data\n\nfrom skimage.transform import rotate, resize\nfrom skimage import data\nimport matplotlib.pyplot as plt\nimg_rows = 160\nimg_cols = 224\n\nsmooth = 1.\n\n\ndef dice_coef(y_true, y_pred):\n y_true_f = K.flatten(y_true)\n y_pred_f = K.flatten(y_pred)\n intersection = K.sum(y_true_f * y_pred_f)\n return (2. * intersection + smooth) / (K.sum(y_true_f*y_true_f) + K.sum(y_pred_f*y_pred_f) + smooth)\n\n\ndef dice_coef_loss(y_true, y_pred):\n return 1.-dice_coef(y_true, y_pred)\n\n\ndef augmentation(image, imageB, org_width=160,org_height=224, width=190, height=262):\n max_angle=20\n image=cv2.resize(image,(height,width))\n imageB=cv2.resize(imageB,(height,width))\n\n angle=np.random.randint(max_angle)\n if np.random.randint(2):\n angle=-angle\n image=rotate(image,angle,resize=True)\n imageB=rotate(imageB,angle,resize=True)\n\n xstart=np.random.randint(width-org_width)\n ystart=np.random.randint(height-org_height)\n image=image[xstart:xstart+org_width,ystart:ystart+org_height]\n imageB=imageB[xstart:xstart+org_width,ystart:ystart+org_height]\n\n if np.random.randint(2):\n image=cv2.flip(image,1)\n imageB=cv2.flip(imageB,1)\n \n if np.random.randint(2):\n image=cv2.flip(image,0)\n imageB=cv2.flip(imageB,0)\n\n image=cv2.resize(image,(org_height,org_width))\n imageB=cv2.resize(imageB,(org_height,org_width))\n\n return image,imageB\n # print(image.shape)\n # plt.imshow(image)\n # plt.show()\n\ndef get_unet():\n inputs = Input((1, img_rows, img_cols))\n conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)\n conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n\n conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1)\n conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv2)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n\n conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2)\n conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n\n conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(pool3)\n conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv4)\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)\n\n conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(pool4)\n conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(conv5)\n # pool5 = MaxPooling2D(pool_size=(2, 2))(conv5)\n\n # convdeep = Convolution2D(1024, 3, 3, activation='relu', border_mode='same')(pool5)\n # convdeep = Convolution2D(1024, 3, 3, activation='relu', border_mode='same')(convdeep)\n \n # upmid = merge([Convolution2D(512, 2, 2, border_mode='same')(UpSampling2D(size=(2, 2))(convdeep)), conv5], mode='concat', concat_axis=1)\n # convmid = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(upmid)\n # convmid = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(convmid)\n\n up6 = merge([Convolution2D(256, 2, 2,activation='relu', border_mode='same')(UpSampling2D(size=(2, 2))(conv5)), conv4], mode='concat', concat_axis=1)\n conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(up6)\n conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv6)\n\n up7 = merge([Convolution2D(128, 2, 2,activation='relu', border_mode='same')(UpSampling2D(size=(2, 2))(conv6)), conv3], mode='concat', concat_axis=1)\n conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(up7)\n conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv7)\n\n up8 = merge([Convolution2D(64, 2, 2,activation='relu', border_mode='same')(UpSampling2D(size=(2, 2))(conv7)), conv2], mode='concat', concat_axis=1)\n conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up8)\n conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv8)\n\n up9 = merge([Convolution2D(32, 2, 2,activation='relu', border_mode='same')(UpSampling2D(size=(2, 2))(conv8)), conv1], mode='concat', concat_axis=1)\n conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up9)\n conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv9)\n\n conv10 = Convolution2D(1, 1, 1, activation='sigmoid')(conv9)\n\n model = Model(input=inputs, output=conv10)\n\n model.compile(optimizer=Adam(lr=1e-5), loss=dice_coef_loss, metrics=[dice_coef])\n\n return model\n\n\ndef preprocess(imgs):\n imgs_p = np.ndarray((imgs.shape[0], imgs.shape[1], img_rows, img_cols), dtype=np.float)\n for i in range(imgs.shape[0]):\n imgs_p[i, 0] = cv2.resize(imgs[i, 0], (img_cols, img_rows), interpolation=cv2.INTER_CUBIC)\n return imgs_p\n\n\ndef train_and_predict():\n print('-'*30)\n print('Loading and preprocessing train data...')\n print('-'*30)\n # imgs_train, imgs_mask_train = load_train_data()\n imgs_train=np.load(\"/mnt/data1/yihuihe/mnc/data.npy\")\n imgs_mask_train=np.load(\"/mnt/data1/yihuihe/mnc/mask.npy\")\n imgs_train = imgs_train.astype('float32')\n imgs_mask_train = imgs_mask_train.astype('float32')\n\n # imgs_train = preprocess(imgs_train)\n # imgs_mask_train = preprocess(imgs_mask_train)\n # print(np.histogram(imgs_train))\n # print(np.histogram(imgs_mask_train))\n\n total=imgs_train.shape[0]\n # imgs_train/=255.\n # mean = imgs_train.mean()# (0)[np.newaxis,:] # mean for data centering\n # std = np.std(imgs_train) # std for data normalization\n # imgs_train -= mean\n # imgs_train /= std\n\n # imgs_mask_train /= 255. # scale masks to [0, 1]\n\n print('-'*30)\n print('Creating and compiling model...')\n print('-'*30)\n model = get_unet()\n \n # model_checkpoint = ModelCheckpoint('unet.hdf5', monitor='loss',verbose=1, save_best_only=True)\n\n # print('-'*30)\n # print('Fitting model...')\n # print('-'*30)\n # model.fit(imgs_train, imgs_mask_train, batch_size=32, nb_epoch=20, verbose=1, shuffle=True,callbacks=[model_checkpoint])\n \n # batch_size=32\n # max_iters=10000\n # for i in range(max_iters):\n # data_batch=np.ndarray((batch_size,1,img_rows,img_cols))\n # mask_batch=np.ndarray((batch_size,1,img_rows,img_cols))\n \n # for img in range(batch_size):\n # idx=np.random.randint(total)\n # data_batch[img,0],mask_batch[img,0]=augmentation(imgs_train[idx],imgs_mask_train[idx])\n # # plt.subplot(121)\n # # plt.imshow(data_batch[img,0])\n # # plt.subplot(122)\n # # plt.imshow(mask_batch[img,0])\n # # plt.show()\n # data_batch-=mean\n # data_batch/=std\n # print(np.histogram(data_batch))\n # print(np.histogram(mask_batch))\n\n # model.train_on_batch(data_batch,mask_batch)\n\n print('-'*30)\n print('Loading and preprocessing test data...')\n print('-'*30)\n imgs_test, imgs_id_test = load_test_data()\n imgs_test = preprocess(imgs_test) # TODO: bug\n\n imgs_test = imgs_test.astype('float32')\n imgs_test -= np.load('/mnt/data1/yihuihe/mnc/mean.npy')\n imgs_test /=np.load('/mnt/data1/yihuihe/mnc/std.npy')\n\n print('-'*30)\n print('Loading saved weights...')\n print('-'*30)\n model.load_weights('unet.hdf5')\n\n print('-'*30)\n print('Predicting masks on test data...')\n print('-'*30)\n imgs_mask_test = model.predict(imgs_test, verbose=1)\n np.save('imgs_mask_test.npy', imgs_mask_test)\n\n\nif __name__ == '__main__':\n train_and_predict()\n" ]
[ [ "numpy.load", "numpy.ndarray", "numpy.save", "numpy.random.randint" ] ]
dirangill/Hands-On-Data-Analysis-with-Pandas-2nd-edition
[ "580ee52cf37e5ddee54470aa0526da2a5ca9b310" ]
[ "book_env/Lib/site-packages/ipympl/backend_nbagg.py" ]
[ "\"\"\"Interactive figures in the Jupyter notebook\"\"\"\n\nfrom base64 import b64encode\nimport json\nimport io\n\nfrom IPython.display import display, HTML\n\nfrom ipywidgets import DOMWidget, widget_serialization\nfrom traitlets import (\n Unicode, Bool, CInt, Float, List, Instance, CaselessStrEnum, Enum,\n default\n)\n\nfrom matplotlib import rcParams\nfrom matplotlib.figure import Figure\nfrom matplotlib import is_interactive\nfrom matplotlib.backends.backend_webagg_core import (FigureManagerWebAgg,\n FigureCanvasWebAggCore,\n NavigationToolbar2WebAgg,\n TimerTornado)\nfrom matplotlib.backend_bases import (ShowBase, NavigationToolbar2,\n FigureCanvasBase, cursors)\n\nfrom ._version import js_semver\n\ncursors_str = {\n cursors.HAND: 'pointer',\n cursors.POINTER: 'default',\n cursors.SELECT_REGION: 'crosshair',\n cursors.MOVE: 'move',\n cursors.WAIT: 'wait'\n}\n\n\nclass Show(ShowBase):\n\n def __call__(self, block=None):\n from matplotlib._pylab_helpers import Gcf\n\n managers = Gcf.get_all_fig_managers()\n if not managers:\n return\n\n interactive = is_interactive()\n\n for manager in managers:\n manager.show()\n\n # plt.figure adds an event which puts the figure in focus\n # in the activeQue. Disable this behaviour, as it results in\n # figures being put as the active figure after they have been\n # shown, even in non-interactive mode.\n if hasattr(manager, '_cidgcf'):\n manager.canvas.mpl_disconnect(manager._cidgcf)\n\n if not interactive and manager in Gcf._activeQue:\n Gcf._activeQue.remove(manager)\n\n\nshow = Show()\n\n\ndef draw_if_interactive():\n import matplotlib._pylab_helpers as pylab_helpers\n\n if is_interactive():\n manager = pylab_helpers.Gcf.get_active()\n if manager is not None:\n manager.show()\n\n\ndef connection_info():\n \"\"\"\n Return a string showing the figure and connection status for\n the backend. This is intended as a diagnostic tool, and not for general\n use.\n\n \"\"\"\n from matplotlib._pylab_helpers import Gcf\n result = []\n for manager in Gcf.get_all_fig_managers():\n fig = manager.canvas.figure\n result.append('{0} - {1}'.format((fig.get_label() or\n \"Figure {}\".format(manager.num)),\n manager.web_sockets))\n if not is_interactive():\n result.append('Figures pending show: {0}'.format(len(Gcf._activeQue)))\n return '\\n'.join(result)\n\n\nclass Toolbar(DOMWidget, NavigationToolbar2WebAgg):\n\n _model_module = Unicode('jupyter-matplotlib').tag(sync=True)\n _model_module_version = Unicode(js_semver).tag(sync=True)\n _model_name = Unicode('ToolbarModel').tag(sync=True)\n\n _view_module = Unicode('jupyter-matplotlib').tag(sync=True)\n _view_module_version = Unicode(js_semver).tag(sync=True)\n _view_name = Unicode('ToolbarView').tag(sync=True)\n\n toolitems = List().tag(sync=True)\n orientation = Enum(['horizontal', 'vertical'],\n default_value='vertical').tag(sync=True)\n button_style = CaselessStrEnum(\n values=['primary', 'success', 'info', 'warning', 'danger', ''],\n default_value='',\n help=\"\"\"Use a predefined styling for the button.\"\"\").tag(sync=True)\n collapsed = Bool(True).tag(sync=True)\n\n _current_action = Enum(values=['pan', 'zoom', ''],\n default_value='').tag(sync=True)\n\n def __init__(self, canvas, *args, **kwargs):\n DOMWidget.__init__(self, *args, **kwargs)\n NavigationToolbar2WebAgg.__init__(self, canvas, *args, **kwargs)\n\n self.on_msg(self.canvas._handle_message)\n\n def export(self):\n buf = io.BytesIO()\n self.canvas.figure.savefig(buf, format='png', dpi='figure')\n # Figure width in pixels\n pwidth = (self.canvas.figure.get_figwidth() *\n self.canvas.figure.get_dpi())\n # Scale size to match widget on HiPD monitors\n width = pwidth / self.canvas._dpi_ratio\n data = \"<img src='data:image/png;base64,{0}' width={1}/>\"\n data = data.format(b64encode(buf.getvalue()).decode('utf-8'), width)\n display(HTML(data))\n\n @default('toolitems')\n def _default_toolitems(self):\n icons = {\n 'home': 'home',\n 'back': 'arrow-left',\n 'forward': 'arrow-right',\n 'zoom_to_rect': 'square-o',\n 'move': 'arrows',\n 'download': 'floppy-o',\n 'export': 'file-picture-o'\n }\n\n download_item = ('Download', 'Download plot', 'download',\n 'save_figure')\n\n toolitems = (NavigationToolbar2.toolitems + (download_item,))\n\n return [(text, tooltip, icons[icon_name], method_name)\n for text, tooltip, icon_name, method_name\n in toolitems\n if icon_name in icons]\n\n\nclass Canvas(DOMWidget, FigureCanvasWebAggCore):\n\n _model_module = Unicode('jupyter-matplotlib').tag(sync=True)\n _model_module_version = Unicode(js_semver).tag(sync=True)\n _model_name = Unicode('MPLCanvasModel').tag(sync=True)\n\n _view_module = Unicode('jupyter-matplotlib').tag(sync=True)\n _view_module_version = Unicode(js_semver).tag(sync=True)\n _view_name = Unicode('MPLCanvasView').tag(sync=True)\n\n toolbar = Instance(Toolbar,\n allow_none=True).tag(sync=True, **widget_serialization)\n toolbar_visible = Bool(True).tag(sync=True)\n toolbar_position = Enum(['top', 'bottom', 'left', 'right'],\n default_value='left').tag(sync=True)\n\n header_visible = Bool(True).tag(sync=True)\n footer_visible = Bool(True).tag(sync=True)\n\n resizable = Bool(True).tag(sync=True)\n capture_scroll = Bool(False).tag(sync=True)\n\n _width = CInt().tag(sync=True)\n _height = CInt().tag(sync=True)\n\n _figure_label = Unicode('Figure').tag(sync=True)\n _message = Unicode().tag(sync=True)\n _cursor = Unicode('pointer').tag(sync=True)\n\n _image_mode = Unicode('full').tag(sync=True)\n\n _rubberband_x = CInt(0).tag(sync=True)\n _rubberband_y = CInt(0).tag(sync=True)\n _rubberband_width = CInt(0).tag(sync=True)\n _rubberband_height = CInt(0).tag(sync=True)\n\n _closed = Bool(True)\n\n # Must declare the superclass private members.\n _png_is_old = Bool()\n _force_full = Bool()\n _current_image_mode = Unicode()\n _dpi_ratio = Float(1.0)\n\n def __init__(self, figure, *args, **kwargs):\n DOMWidget.__init__(self, *args, **kwargs)\n FigureCanvasWebAggCore.__init__(self, figure, *args, **kwargs)\n\n self.on_msg(self._handle_message)\n\n def _handle_message(self, object, content, buffers):\n # Every content has a \"type\".\n if content['type'] == 'closing':\n self._closed = True\n elif content['type'] == 'initialized':\n _, _, w, h = self.figure.bbox.bounds\n self.manager.resize(w, h)\n else:\n self.manager.handle_json(content)\n\n def send_json(self, content):\n # Change in the widget state?\n if content['type'] == 'cursor':\n self._cursor = cursors_str[content['cursor']]\n\n elif content['type'] == 'message':\n self._message = content['message']\n\n elif content['type'] == 'figure_label':\n self._figure_label = content['label']\n\n elif content['type'] == 'resize':\n self._width = content['size'][0]\n self._height = content['size'][1]\n # Send resize message anyway\n self.send({'data': json.dumps(content)})\n\n elif content['type'] == 'image_mode':\n self._image_mode = content['mode']\n\n else:\n # Default: send the message to the front-end\n self.send({'data': json.dumps(content)})\n\n def send_binary(self, data):\n self.send({'data': '{\"type\": \"binary\"}'}, buffers=[data])\n\n def new_timer(self, *args, **kwargs):\n return TimerTornado(*args, **kwargs)\n\n def start_event_loop(self, timeout):\n FigureCanvasBase.start_event_loop_default(self, timeout)\n\n def stop_event_loop(self):\n FigureCanvasBase.stop_event_loop_default(self)\n\n\nclass FigureManager(FigureManagerWebAgg):\n ToolbarCls = Toolbar\n\n def __init__(self, canvas, num):\n FigureManagerWebAgg.__init__(self, canvas, num)\n self.web_sockets = [self.canvas]\n\n def show(self):\n if self.canvas._closed:\n self.canvas._closed = False\n display(self.canvas)\n else:\n self.canvas.draw_idle()\n\n def destroy(self):\n self.canvas.close()\n\n\ndef new_figure_manager(num, *args, **kwargs):\n \"\"\"\n Create a new figure manager instance\n \"\"\"\n figure_class = kwargs.pop('FigureClass', Figure)\n this_fig = figure_class(*args, **kwargs)\n return new_figure_manager_given_figure(num, this_fig)\n\n\ndef new_figure_manager_given_figure(num, figure):\n \"\"\"\n Create a new figure manager instance for the given figure.\n \"\"\"\n from matplotlib._pylab_helpers import Gcf\n\n def closer(event):\n Gcf.destroy(num)\n\n canvas = Canvas(figure)\n if 'nbagg.transparent' in rcParams and rcParams['nbagg.transparent']:\n figure.patch.set_alpha(0)\n manager = FigureManager(canvas, num)\n\n if is_interactive():\n manager.show()\n figure.canvas.draw_idle()\n\n canvas.mpl_connect('close_event', closer)\n\n return manager\n" ]
[ [ "matplotlib.backends.backend_webagg_core.FigureCanvasWebAggCore.__init__", "matplotlib.is_interactive", "matplotlib.backends.backend_webagg_core.FigureManagerWebAgg.__init__", "matplotlib._pylab_helpers.Gcf.get_all_fig_managers", "matplotlib.backend_bases.FigureCanvasBase.start_event_loop_default", "matplotlib._pylab_helpers.Gcf._activeQue.remove", "matplotlib.backends.backend_webagg_core.TimerTornado", "matplotlib.backend_bases.FigureCanvasBase.stop_event_loop_default", "matplotlib.backends.backend_webagg_core.NavigationToolbar2WebAgg.__init__", "matplotlib._pylab_helpers.Gcf.get_active", "matplotlib._pylab_helpers.Gcf.destroy" ] ]
foundations/py-futu-api
[ "b8f9fb1f26f35f99630ca47863f5595b6e635533" ]
[ "futu/trade/open_trade_context.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport pandas as pd\nfrom futu.common.open_context_base import OpenContextBase\nfrom futu.trade.trade_query import *\nfrom futu.common.err import *\nfrom futu.common.constant import *\n\nclass OpenTradeContextBase(OpenContextBase):\n \"\"\"Class for set context of HK stock trade\"\"\"\n\n def __init__(self, trd_mkt, host=\"127.0.0.1\", port=11111, is_encrypt=None, security_firm=SecurityFirm.FUTUSECURITIES, trd_category=TrdCategory.NONE, need_general_sec_acc=False):\n self.__trd_mkt = trd_mkt\n self._ctx_unlock = None\n self.__last_acc_list = []\n self.__is_acc_sub_push = False\n self.__security_firm = security_firm\n self.__trd_category = trd_category\n self.__need_general_sec_acc = need_general_sec_acc\n\n # if host != \"127.0.0.1\" and host != \"localhost\" and is_encrypt is None:\n # '''非本地连接必须加密,以免远程攻击'''\n # print(\"{} is not local connection!\".format(host))\n # raise Exception('Non-local connections must be encrypted')\n\n super(OpenTradeContextBase, self).__init__(host, port, is_encrypt=is_encrypt)\n\n def close(self):\n \"\"\"\n to call close old obj before loop create new, otherwise socket will encounter erro 10053 or more!\n \"\"\"\n super(OpenTradeContextBase, self).close()\n\n def on_api_socket_reconnected(self):\n \"\"\"for API socket reconnected\"\"\"\n self.__is_acc_sub_push = False\n self.__last_acc_list = []\n\n ret, msg = RET_OK, ''\n # auto unlock trade\n if self._ctx_unlock is not None:\n password, password_md5 = self._ctx_unlock\n ret, data = self.unlock_trade(password, password_md5)\n logger.debug('auto unlock trade ret={},data={}'.format(ret, data))\n if ret != RET_OK:\n msg = data\n\n # 定阅交易帐号推送\n if ret == RET_OK:\n self.__check_acc_sub_push()\n\n return ret, msg\n\n def get_acc_list(self):\n \"\"\"\n :return: (ret, data)\n \"\"\"\n query_processor = self._get_sync_query_processor(\n GetAccountList.pack_req, GetAccountList.unpack_rsp)\n\n kargs = {\n 'user_id': self.get_login_user_id(),\n 'conn_id': self.get_sync_conn_id(),\n 'trd_category': self.__trd_category,\n 'need_general_sec_acc': self.__need_general_sec_acc\n }\n\n ret_code, msg, acc_list = query_processor(**kargs)\n if ret_code != RET_OK:\n return RET_ERROR, msg\n\n # 记录当前市场的帐号列表\n self.__last_acc_list = []\n\n for record in acc_list:\n trdMkt_list = record[\"trdMarket_list\"]\n if self.__trd_mkt == TrdMarket.NONE or self.__trd_mkt in trdMkt_list:\n if record['trd_env'] == TrdEnv.SIMULATE or record['security_firm'] == NoneDataValue or record['security_firm'] == self.__security_firm:\n trd_marketauth = []\n for item in record[\"trdmarket_auth\"]:\n trd_marketauth.append(TrdMarket.to_string2(item))\n self.__last_acc_list.append({\n \"trd_env\": record[\"trd_env\"],\n \"acc_id\": record[\"acc_id\"],\n \"acc_type\": record[\"acc_type\"],\n \"card_num\": record[\"card_num\"],\n \"security_firm\": record[\"security_firm\"],\n \"sim_acc_type\": record[\"sim_acc_type\"],\n \"trdmarket_auth\" : trd_marketauth})\n\n col_list = [\"acc_id\", \"trd_env\", \"acc_type\", \"card_num\", \"security_firm\", \"sim_acc_type\", \"trdmarket_auth\"]\n\n acc_table = pd.DataFrame(copy(self.__last_acc_list), columns=col_list)\n\n return RET_OK, acc_table\n\n def unlock_trade(self, password=None, password_md5=None, is_unlock=True):\n \"\"\"\n 交易解锁,安全考虑,所有的交易api,需成功解锁后才可操作\n :param password: 明文密码字符串 (二选一)\n :param password_md5: 密码的md5字符串(二选一)\n :param is_unlock: 解锁 = True, 锁定 = False\n :return:(ret, data) ret == RET_OK时, data为None,如果之前已经解锁过了,data为提示字符串,指示出已经解锁\n ret != RET_OK时, data为错误字符串\n \"\"\"\n\n # 仅支持真实交易的市场可以解锁,模拟交易不需要解锁\n md5_val = ''\n if is_unlock:\n ret = TRADE.check_mkt_envtype(self.__trd_mkt, TrdEnv.REAL)\n if not ret:\n return RET_OK, Err.NoNeedUnlock.text\n\n if password is None and password_md5 is None:\n return RET_ERROR, 'Missing necessary parameter. One of the two parameters (password and password_md5) is required.'\n\n md5_val = str(password_md5) if password_md5 else md5_transform(str(password))\n\n # 解锁要求先拉一次帐户列表, 目前仅真实环境需要解锁\n ret, msg, acc_id = self._check_acc_id(TrdEnv.REAL, 0)\n if ret != RET_OK:\n return ret, msg\n\n query_processor = self._get_sync_query_processor(\n UnlockTrade.pack_req, UnlockTrade.unpack_rsp)\n\n kargs = {\n 'is_unlock': is_unlock,\n 'password_md5': md5_val,\n 'conn_id': self.get_sync_conn_id(),\n 'security_firm': self.__security_firm\n }\n\n ret_code, msg, _ = query_processor(**kargs)\n if ret_code != RET_OK:\n return RET_ERROR, msg\n\n # reconnected to auto unlock\n if RET_OK == ret_code:\n self._ctx_unlock = (password, password_md5) if is_unlock else None\n\n # 定阅交易帐号推送\n if is_unlock and ret_code == RET_OK:\n self.__check_acc_sub_push()\n\n if msg is not None and len(msg) > 0:\n return RET_OK, msg\n return RET_OK, None\n\n def _async_sub_acc_push(self, acc_id_list):\n \"\"\"\n 异步连接指定要接收送的acc id\n :param acc_id:\n :return:\n \"\"\"\n kargs = {\n 'acc_id_list': acc_id_list,\n 'conn_id': self.get_async_conn_id(),\n }\n ret_code, msg, push_req_str = SubAccPush.pack_req(**kargs)\n if ret_code == RET_OK:\n self._send_async_req(push_req_str)\n\n return RET_OK, None\n\n def on_async_sub_acc_push(self, ret_code, msg):\n self.__is_acc_sub_push = ret_code == RET_OK\n if not self.__is_acc_sub_push:\n logger.error(\"ret={} msg={}\".format(ret_code, msg))\n\n def _check_trd_env(self, trd_env):\n is_enable = TRADE.check_mkt_envtype(self.__trd_mkt, trd_env)\n if not is_enable:\n return RET_ERROR, ERROR_STR_PREFIX + \"the type of environment param is wrong \"\n\n return RET_OK, \"\"\n\n def __check_acc_sub_push(self):\n if self.__is_acc_sub_push:\n return\n\n if len(self.__last_acc_list) == 0:\n ret, _ = self.get_acc_list()\n if ret != RET_OK:\n return\n\n acc_id_list = [x['acc_id'] for x in self.__last_acc_list]\n\n if len(acc_id_list):\n self._async_sub_acc_push(acc_id_list)\n\n def _check_acc_id(self, trd_env, acc_id):\n if acc_id == 0:\n if len(self.__last_acc_list) == 0:\n ret, content = self.get_acc_list()\n if ret != RET_OK:\n return ret, content, acc_id\n acc_id = self._get_default_acc_id(trd_env)\n\n msg = \"\" if acc_id != 0 else ERROR_STR_PREFIX + \"No one available account!\"\n ret = RET_OK if acc_id != 0 else RET_ERROR\n\n return ret, msg, acc_id\n\n def _check_order_status(self, status_filter_list):\n unique_and_normalize_list(status_filter_list)\n for status in status_filter_list:\n if not OrderStatus.if_has_key(status):\n return RET_ERROR, ERROR_STR_PREFIX + \"the type of status_filter_list param is wrong \"\n return RET_OK, \"\",\n\n def _get_default_acc_id(self, trd_env):\n for record in self.__last_acc_list:\n if record['trd_env'] == trd_env:\n return record['acc_id']\n return 0\n\n def _get_default_acc_id(self, trd_env):\n for record in self.__last_acc_list:\n if record['trd_env'] == trd_env:\n return record['acc_id']\n return 0\n\n def _get_acc_id_by_acc_index(self, trd_env, acc_index=0):\n ret, msg = self.get_acc_list()\n if ret != RET_OK:\n return ret, msg, None, []\n acc_table = msg\n env_list = [trd_env]\n acc_table = acc_table[acc_table['trd_env'].isin(env_list)]\n acc_table = acc_table.reset_index(drop=True)\n\n total_acc_num = acc_table.shape[0]\n if total_acc_num == 0:\n msg = Err.NoAccForSecurityFirm.text\n return RET_ERROR, msg, acc_index, []\n elif acc_index >= total_acc_num:\n msg = ERROR_STR_PREFIX + \"the index {0} is out of the total amount {1} \".format(acc_index, total_acc_num)\n return RET_ERROR, msg, acc_index, []\n return RET_OK, \"\", acc_table['acc_id'][acc_index], acc_table['trdmarket_auth'][acc_index]\n\n def _check_acc_id_exist(self, trd_env, acc_id):\n ret, msg = self.get_acc_list()\n if ret != RET_OK:\n return ret, msg, acc_id, []\n content = msg\n\n acc_index = content[(content.acc_id == acc_id) & (content.trd_env == trd_env)].index.tolist()\n if len(acc_index):\n return RET_OK, \"\", acc_id, content['trdmarket_auth'][acc_index[0]]\n else:\n return RET_ERROR, ERROR_STR_PREFIX + \"This account is not available account!\", acc_id, []\n\n def _check_acc_id_and_acc_index(self, trd_env, acc_id, acc_index):\n if self.__trd_category == TrdCategory.SECURITY and self.__trd_mkt == TrdMarket.FUTURES:\n return RET_ERROR, 'OpenSecTradeContext can not connect to futures accounts.', None, []\n if acc_id == 0:\n ret, msg, acc_id, auth_list = self._get_acc_id_by_acc_index(trd_env, acc_index)\n if ret != RET_OK:\n return ret, msg, acc_id, []\n else:\n ret, msg, acc_id, auth_list = self._check_acc_id_exist(trd_env, acc_id)\n if ret != RET_OK:\n return ret, msg, acc_id, []\n return RET_OK, \"\", acc_id, auth_list\n\n def accinfo_query(self, trd_env=TrdEnv.REAL, acc_id=0, acc_index=0, refresh_cache=False, currency=Currency.HKD):\n \"\"\"\n :param trd_env:\n :param acc_id:\n :param acc_index:\n :return:\n \"\"\"\n ret, msg = self._check_trd_env(trd_env)\n if ret != RET_OK:\n return ret, msg\n\n ret, msg, acc_id, acc_auth_list = self._check_acc_id_and_acc_index(trd_env, acc_id, acc_index)\n if ret != RET_OK:\n return ret, msg\n\n query_processor = self._get_sync_query_processor(\n AccInfoQuery.pack_req, AccInfoQuery.unpack_rsp)\n\n kargs = {\n 'acc_id': int(acc_id),\n 'trd_env': trd_env,\n 'trd_market': acc_auth_list[0],\n 'conn_id': self.get_sync_conn_id(),\n 'refresh_cache': refresh_cache,\n 'currency': currency\n }\n\n ret_code, msg, accinfo_list = query_processor(**kargs)\n if ret_code != RET_OK:\n return RET_ERROR, msg\n\n col_list = [\n 'power', 'max_power_short', 'net_cash_power', 'total_assets', 'cash', 'market_val', 'long_mv', 'short_mv',\n 'pending_asset', 'interest_charged_amount', 'frozen_cash', 'avl_withdrawal_cash', 'max_withdrawal', 'currency',\n 'available_funds', 'unrealized_pl', 'realized_pl', 'risk_level', 'risk_status', 'initial_margin',\n 'margin_call_margin', 'maintenance_margin', 'hk_cash', 'hk_avl_withdrawal_cash', 'us_cash',\n 'us_avl_withdrawal_cash', 'cn_cash', 'cn_avl_withdrawal_cash', 'jp_cash', 'jp_avl_withdrawal_cash',\n 'sg_cash', 'sg_avl_withdrawal_cash', 'is_pdt', 'pdt_seq', 'beginning_dtbp', 'remaining_dtbp',\n 'dt_call_amount', 'dt_status'\n ]\n accinfo_frame_table = pd.DataFrame(accinfo_list, columns=col_list)\n\n return RET_OK, accinfo_frame_table\n\n def _get_trd_market_from_market(self, qot_market, trd_env, trd_category):\n # if self.__trd_mkt != TrdMarket.NONE:\n # return self.__trd_mkt\n trd_market = 'N/A'\n if trd_category == TrdCategory.FUTURE:\n trd_market = TrdMarket.FUTURES\n else:\n if qot_market == Market.HK:\n trd_market = TrdMarket.HK\n if qot_market == Market.US:\n trd_market = TrdMarket.US\n if qot_market == Market.SH or qot_market == Market.SZ:\n if trd_env == TrdEnv.REAL:\n trd_market = TrdMarket.HKCC\n else:\n trd_market = TrdMarket.CN\n return trd_market\n\n def _check_stock_code(self, code):\n stock_code = ''\n if code is not None and code != '':\n ret_code, content = split_stock_str(str(code))\n if ret_code == RET_OK:\n _, stock_code = content\n else:\n stock_code = code\n return RET_OK, \"\", stock_code\n\n def _split_stock_code(self, code):\n stock_str = str(code)\n\n split_loc = stock_str.find(\".\")\n '''do not use the built-in split function in python.\n The built-in function cannot handle some stock strings correctly.\n for instance, US..DJI, where the dot . itself is a part of original code'''\n if 0 <= split_loc < len(stock_str) - 1 and Market.if_has_key(stock_str[0:split_loc]):\n market_str = stock_str[0:split_loc]\n partial_stock_str = stock_str[split_loc + 1:]\n return RET_OK, (market_str, partial_stock_str)\n\n else:\n error_str = ERROR_STR_PREFIX + \"format of %s is wrong. (US.AAPL, HK.00700, SZ.000001)\" % stock_str\n return RET_ERROR, error_str\n\n def position_list_query(self, code='', pl_ratio_min=None,\n pl_ratio_max=None, trd_env=TrdEnv.REAL, acc_id=0, acc_index=0, refresh_cache=False):\n \"\"\"for querying the position list\"\"\"\n ret, msg = self._check_trd_env(trd_env)\n if ret != RET_OK:\n return ret, msg\n\n ret, msg, acc_id, acc_auth_list = self._check_acc_id_and_acc_index(trd_env, acc_id, acc_index)\n if ret != RET_OK:\n return ret, msg\n\n ret, msg, stock_code = self._check_stock_code(code)\n if ret != RET_OK:\n return ret, msg\n\n query_processor = self._get_sync_query_processor(\n PositionListQuery.pack_req, PositionListQuery.unpack_rsp)\n\n kargs = {\n 'code': str(stock_code),\n 'pl_ratio_min': pl_ratio_min,\n 'pl_ratio_max': pl_ratio_max,\n 'trd_mkt': acc_auth_list[0],\n 'trd_env': trd_env,\n 'acc_id': acc_id,\n 'conn_id': self.get_sync_conn_id(),\n 'refresh_cache': refresh_cache\n }\n ret_code, msg, position_list = query_processor(**kargs)\n\n if ret_code != RET_OK:\n return RET_ERROR, msg\n\n col_list = [\n \"code\", \"stock_name\", \"qty\", \"can_sell_qty\", \"cost_price\",\n \"cost_price_valid\", \"market_val\", \"nominal_price\", \"pl_ratio\",\n \"pl_ratio_valid\", \"pl_val\", \"pl_val_valid\", \"today_buy_qty\",\n \"today_buy_val\", \"today_pl_val\", \"today_trd_val\", \"today_sell_qty\",\n \"today_sell_val\", \"position_side\", \"unrealized_pl\", \"realized_pl\",\n \"currency\",\n ]\n\n position_list_table = pd.DataFrame(position_list, columns=col_list)\n return RET_OK, position_list_table\n\n def order_list_query(self, order_id=\"\", status_filter_list=[], code='', start='', end='',\n trd_env=TrdEnv.REAL, acc_id=0, acc_index=0, refresh_cache=False):\n\n ret, msg = self._check_trd_env(trd_env)\n if ret != RET_OK:\n return ret, msg\n\n ret, msg, acc_id, acc_auth_list = self._check_acc_id_and_acc_index(trd_env, acc_id, acc_index)\n if ret != RET_OK:\n return ret, msg\n\n ret_code, ret_data = self._order_list_query_impl(order_id, status_filter_list,\n code, start, end, trd_env, acc_id,\n refresh_cache, acc_auth_list[0])\n if ret_code != RET_OK:\n return ret_code, ret_data\n\n col_list = [\n \"code\", \"stock_name\", \"trd_side\", \"order_type\", \"order_status\",\n \"order_id\", \"qty\", \"price\", \"create_time\", \"updated_time\",\n \"dealt_qty\", \"dealt_avg_price\", \"last_err_msg\", \"remark\",\n \"time_in_force\", \"fill_outside_rth\", \"aux_price\", \"trail_type\",\n \"trail_value\", \"trail_spread\", \"currency\",\n ]\n order_list = ret_data\n order_list_table = pd.DataFrame(order_list, columns=col_list)\n\n return RET_OK, order_list_table\n\n def _order_list_query_impl(self, order_id, status_filter_list, code, start, end, trd_env, acc_id, refresh_cache, trd_mkt):\n ret, msg = self._check_trd_env(trd_env)\n if ret != RET_OK:\n return ret, msg\n ret, msg , acc_id = self._check_acc_id(trd_env, acc_id)\n if ret != RET_OK:\n return ret, msg\n\n ret, msg, stock_code = self._check_stock_code(code)\n if ret != RET_OK:\n return ret, msg\n\n ret, msg = self._check_order_status(status_filter_list)\n if ret != RET_OK:\n return ret, msg\n\n if start:\n ret, data = normalize_date_format(start)\n if ret != RET_OK:\n return ret, data\n start = data\n\n if end:\n ret, data = normalize_date_format(end)\n if ret != RET_OK:\n return ret, data\n end = data\n\n query_processor = self._get_sync_query_processor(\n OrderListQuery.pack_req, OrderListQuery.unpack_rsp)\n\n # the keys of kargs should be corresponding to the actual function arguments\n kargs = {\n 'order_id': str(order_id),\n 'status_filter_list': status_filter_list,\n 'code': str(stock_code),\n 'start': str(start) if start else \"\",\n 'end': str(end) if end else \"\",\n 'trd_mkt': trd_mkt,\n 'trd_env': trd_env,\n 'acc_id': acc_id,\n 'conn_id': self.get_sync_conn_id(),\n 'refresh_cache': refresh_cache\n }\n ret_code, msg, order_list = query_processor(**kargs)\n\n if ret_code != RET_OK:\n return RET_ERROR, msg\n\n return RET_OK, order_list\n\n def place_order(self, price, qty, code, trd_side, order_type=OrderType.NORMAL,\n adjust_limit=0, trd_env=TrdEnv.REAL, acc_id=0, acc_index=0, remark=None,\n time_in_force=TimeInForce.DAY, fill_outside_rth=False, aux_price = None,\n trail_type = None ,trail_value = None ,trail_spread = None):\n \"\"\"\n place order\n use set_handle(HKTradeOrderHandlerBase) to recv order push !\n \"\"\"\n # ret, msg = self._check_trd_env(trd_env)\n # if ret != RET_OK:\n # return ret, msg\n ret, msg = self._check_trd_env(trd_env)\n if ret != RET_OK:\n return ret, msg\n\n ret, msg, acc_id, acc_auth_list = self._check_acc_id_and_acc_index(trd_env, acc_id, acc_index)\n if ret != RET_OK:\n return ret, msg\n\n ret, content = self._split_stock_code(code)\n if ret != RET_OK:\n return ret, content\n\n if remark is not None:\n if is_str(remark):\n remark_utf8 = remark.encode('utf-8')\n if len(remark_utf8) > 64:\n return RET_ERROR, make_wrong_value_msg_utf8_len_le('remark', 64)\n else:\n return RET_ERROR, make_wrong_type_msg('remark', 'str')\n\n fill_outside_rth = True if fill_outside_rth else False\n\n market_str, stock_code = content\n\n trd_market = self._get_trd_market_from_market(market_str, trd_env, self.__trd_category)\n\n query_processor = self._get_sync_query_processor(\n PlaceOrder.pack_req, PlaceOrder.unpack_rsp)\n\n # the keys of kargs should be corresponding to the actual function arguments\n kargs = {\n 'trd_side': trd_side,\n 'order_type': order_type,\n 'price': float(price),\n 'qty': float(qty),\n 'code': stock_code,\n 'adjust_limit': float(adjust_limit),\n 'trd_mkt': trd_market,\n 'sec_mkt_str': market_str,\n 'trd_env': trd_env,\n 'acc_id': acc_id,\n 'conn_id': self.get_sync_conn_id(),\n 'remark': remark,\n 'time_in_force': time_in_force,\n 'fill_outside_rth': fill_outside_rth,\n 'aux_price': aux_price ,\n 'trail_type': trail_type ,\n 'trail_value': trail_value ,\n 'trail_spread': trail_spread ,\n }\n\n ret_code, msg, order_id = query_processor(**kargs)\n if ret_code != RET_OK:\n return RET_ERROR, msg\n\n order_item = {'trd_env': trd_env, 'order_id': order_id}\n\n # 保持跟v2.0兼容, 增加必要的订单字段\n for x in range(3):\n ret_code, ret_data = self._order_list_query_impl(order_id=order_id,status_filter_list=[],\n code=\"\", start=\"\", end=\"\", trd_env=trd_env, acc_id=acc_id,\n refresh_cache=False, trd_mkt=acc_auth_list[0])\n if ret_code == RET_OK and len(ret_data) > 0:\n order_item = ret_data[0]\n order_item['trd_env'] = trd_env\n break\n\n col_list = [\n \"code\", \"stock_name\", \"trd_side\", \"order_type\", \"order_status\",\n \"order_id\", \"qty\", \"price\", \"create_time\", \"updated_time\",\n \"dealt_qty\", \"dealt_avg_price\", \"last_err_msg\", \"remark\",\n \"time_in_force\", \"fill_outside_rth\", 'aux_price',\n 'trail_type', 'trail_value', 'trail_spread', \"currency\",\n ]\n order_list = [order_item]\n order_table = pd.DataFrame(order_list, columns=col_list)\n\n return RET_OK, order_table\n\n def modify_order(self, modify_order_op, order_id, qty, price,\n adjust_limit=0, trd_env=TrdEnv.REAL, acc_id=0, acc_index=0,\n aux_price=None, trail_type=None, trail_value=None, trail_spread=None):\n\n ret, msg = self._check_trd_env(trd_env)\n if ret != RET_OK:\n return ret, msg\n\n ret, msg, acc_id, acc_auth_list = self._check_acc_id_and_acc_index(trd_env, acc_id, acc_index)\n if ret != RET_OK:\n return ret, msg\n\n if not order_id:\n return RET_ERROR, ERROR_STR_PREFIX + \"the type of order_id param is wrong \"\n\n if not ModifyOrderOp.if_has_key(modify_order_op):\n return RET_ERROR, ERROR_STR_PREFIX + \"the type of modify_order_op param is wrong \"\n\n if trail_type is not None and not TrailType.if_has_key(trail_type):\n return RET_ERROR, ERROR_STR_PREFIX + \"the type of trail_type param is wrong \"\n\n query_processor = self._get_sync_query_processor(\n ModifyOrder.pack_req, ModifyOrder.unpack_rsp)\n\n kargs = {\n 'modify_order_op': modify_order_op,\n 'order_id': str(order_id),\n 'price': float(price),\n 'qty': float(qty),\n 'adjust_limit': adjust_limit,\n 'trd_mkt': acc_auth_list[0],\n 'trd_env': trd_env,\n 'acc_id': acc_id,\n 'conn_id': self.get_sync_conn_id(),\n 'aux_price': aux_price,\n 'trail_type': trail_type,\n 'trail_value': trail_value,\n 'trail_spread': trail_spread,\n }\n\n ret_code, msg, modify_order_list = query_processor(**kargs)\n\n if ret_code != RET_OK:\n return RET_ERROR,msg\n\n col_list = ['trd_env', 'order_id']\n modify_order_table = pd.DataFrame(modify_order_list, columns=col_list)\n\n return RET_OK, modify_order_table\n\n def cancel_all_order(self, trd_env=TrdEnv.REAL, acc_id=0, acc_index=0, trdmarket=TrdMarket.NONE):\n \"\"\"\n 取消所有的订单\n \"\"\"\n ret, msg = self._check_trd_env(trd_env)\n if ret != RET_OK:\n return ret, msg\n\n ret, msg, acc_id, acc_auth_list = self._check_acc_id_and_acc_index(trd_env, acc_id, acc_index)\n if ret != RET_OK:\n return ret, msg\n\n if trdmarket is not None and not TrdMarket.if_has_key(trdmarket):\n return RET_ERROR, ERROR_STR_PREFIX + \"the type of trdmarket param is wrong \"\n\n query_processor = self._get_sync_query_processor(\n CancelOrder.pack_req, CancelOrder.unpack_rsp)\n\n kargs = {\n 'trd_mkt': acc_auth_list[0],\n 'trd_env': trd_env,\n 'acc_id': acc_id,\n 'conn_id': self.get_sync_conn_id(),\n 'trdmarket': trdmarket,\n }\n\n ret_code, msg, _ = query_processor(**kargs)\n return ret_code, msg\n\n\n def change_order(self, order_id, price, qty, adjust_limit=0, trd_env=TrdEnv.REAL, acc_id=0):\n return self.modify_order(ModifyOrderOp.NORMAL, order_id=order_id, qty=qty, price=price,\n adjust_limit=adjust_limit, trd_env=trd_env, acc_id=acc_id)\n\n def deal_list_query(self, code=\"\", trd_env=TrdEnv.REAL, acc_id=0, acc_index=0, refresh_cache=False):\n \"\"\"for querying deal list\"\"\"\n ret, msg = self._check_trd_env(trd_env)\n if ret != RET_OK:\n return ret, msg\n\n ret, msg, acc_id, acc_auth_list = self._check_acc_id_and_acc_index(trd_env, acc_id, acc_index)\n if ret != RET_OK:\n return ret, msg\n\n ret, msg, stock_code = self._check_stock_code(code)\n if ret != RET_OK:\n return ret, msg\n\n query_processor = self._get_sync_query_processor(\n DealListQuery.pack_req, DealListQuery.unpack_rsp)\n\n kargs = {\n 'code': stock_code,\n 'trd_mkt': acc_auth_list[0],\n 'trd_env': trd_env,\n 'acc_id': acc_id,\n 'conn_id': self.get_sync_conn_id(),\n 'refresh_cache': refresh_cache\n }\n ret_code, msg, deal_list = query_processor(**kargs)\n if ret_code != RET_OK:\n return RET_ERROR, msg\n\n col_list = [\n \"code\", \"stock_name\", \"deal_id\", \"order_id\", \"qty\", \"price\",\n \"trd_side\", \"create_time\", \"counter_broker_id\", \"counter_broker_name\", 'status'\n ]\n deal_list_table = pd.DataFrame(deal_list, columns=col_list)\n\n return RET_OK, deal_list_table\n\n def history_order_list_query(self, status_filter_list=[], code='', start='', end='',\n trd_env=TrdEnv.REAL, acc_id=0, acc_index=0):\n\n ret, msg = self._check_trd_env(trd_env)\n if ret != RET_OK:\n return ret, msg\n\n ret, msg, acc_id, acc_auth_list = self._check_acc_id_and_acc_index(trd_env, acc_id, acc_index)\n if ret != RET_OK:\n return ret, msg\n\n ret, msg, stock_code = self._check_stock_code(code)\n if ret != RET_OK:\n return ret, msg\n\n ret, msg = self._check_order_status(status_filter_list)\n if ret != RET_OK:\n return ret, msg\n\n ret, msg, start, end = normalize_start_end_date(start, end, 90)\n if ret != RET_OK:\n return ret, msg\n\n query_processor = self._get_sync_query_processor(\n HistoryOrderListQuery.pack_req,\n HistoryOrderListQuery.unpack_rsp)\n\n kargs = {\n 'status_filter_list': status_filter_list,\n 'code': str(stock_code),\n 'start': str(start) if start else \"\",\n 'end': str(end) if end else \"\",\n 'trd_mkt': acc_auth_list[0],\n 'trd_env': trd_env,\n 'acc_id': acc_id,\n 'conn_id': self.get_sync_conn_id()\n }\n ret_code, msg, order_list = query_processor(**kargs)\n if ret_code != RET_OK:\n return RET_ERROR, msg\n\n col_list = [\n \"code\", \"stock_name\", \"trd_side\", \"order_type\", \"order_status\",\n \"order_id\", \"qty\", \"price\", \"create_time\", \"updated_time\",\n \"dealt_qty\", \"dealt_avg_price\", \"last_err_msg\", \"remark\",\n \"time_in_force\", \"fill_outside_rth\", \"aux_price\", \"trail_type\", \"trail_value\",\n \"trail_spread\", \"currency\",\n ]\n order_list_table = pd.DataFrame(order_list, columns=col_list)\n\n return RET_OK, order_list_table\n\n def history_deal_list_query(self, code='', start='', end='', trd_env=TrdEnv.REAL, acc_id=0, acc_index=0):\n\n ret, msg = self._check_trd_env(trd_env)\n if ret != RET_OK:\n return ret, msg\n\n ret, msg, acc_id, acc_auth_list = self._check_acc_id_and_acc_index(trd_env, acc_id, acc_index)\n if ret != RET_OK:\n return ret, msg\n\n ret, msg, stock_code = self._check_stock_code(code)\n if ret != RET_OK:\n return ret, msg\n\n ret, msg, start, end = normalize_start_end_date(start, end, 90)\n if ret != RET_OK:\n return ret, msg\n\n query_processor = self._get_sync_query_processor(\n HistoryDealListQuery.pack_req,\n HistoryDealListQuery.unpack_rsp)\n\n kargs = {\n 'code': str(stock_code),\n 'start': str(start) if start else \"\",\n 'end': str(end) if end else \"\",\n 'trd_mkt': acc_auth_list[0],\n 'trd_env': trd_env,\n 'acc_id': acc_id,\n 'conn_id': self.get_sync_conn_id()\n }\n ret_code, msg, deal_list = query_processor(**kargs)\n if ret_code != RET_OK:\n return RET_ERROR, msg\n\n col_list = [\n \"code\", \"stock_name\", \"deal_id\", \"order_id\", \"qty\", \"price\",\n \"trd_side\", \"create_time\", \"counter_broker_id\", \"counter_broker_name\", 'status'\n ]\n deal_list_table = pd.DataFrame(deal_list, columns=col_list)\n\n return RET_OK, deal_list_table\n\n def acctradinginfo_query(self, order_type, code, price, order_id=None, adjust_limit=0, trd_env=TrdEnv.REAL, acc_id=0, acc_index=0):\n \"\"\"\n 查询账户下最大可买卖数量\n :param order_type: 订单类型,参见OrderType\n :param code: 证券代码,例如'HK.00700'\n :param price: 报价,3位精度\n :param order_id: 订单号。如果是新下单,则可以传None。如果是改单则要传单号。\n :param adjust_limit: 调整方向和调整幅度百分比限制,正数代表向上调整,负数代表向下调整,具体值代表调整幅度限制,如:0.015代表向上调整且幅度不超过1.5%;-0.01代表向下调整且幅度不超过1%。默认0表示不调整\n :param trd_env: 交易环境,参见TrdEnv\n :param acc_id: 业务账号,默认0表示第1个\n :param acc_index: int,交易业务子账户ID列表所对应的下标,默认0,表示第1个业务ID\n :return: (ret, data)\n\n ret == RET_OK, data为pd.DataFrame,数据列如下\n\n ret != RET_OK, data为错误信息\n\n ======================= =========== ======================================================================================\n 参数 类型 说明\n ======================= =========== ======================================================================================\n max_cash_buy float 不使用融资,仅自己的现金最大可买整手股数\n max_cash_and_margin_buy float 使用融资,自己的现金 + 融资资金总共的最大可买整手股数\n max_position_sell float 不使用融券(卖空),仅自己的持仓最大可卖整手股数\n max_sell_short float 使用融券(卖空),最大可卖空整手股数,不包括多仓\n max_buy_back float 卖空后,需要买回的最大整手股数。因为卖空后,必须先买回已卖空的股数,还掉股票,才能再继续买多。\n ======================= =========== ======================================================================================\n \"\"\"\n ret, msg = self._check_trd_env(trd_env)\n if ret != RET_OK:\n return ret, msg\n\n ret, msg, acc_id, acc_auth_list = self._check_acc_id_and_acc_index(trd_env, acc_id, acc_index)\n if ret != RET_OK:\n return ret, msg\n\n ret, content = self._split_stock_code(code)\n if ret != RET_OK:\n return ret, content\n\n market_str, stock_code = content\n\n trd_market = self._get_trd_market_from_market(market_str, trd_env, self.__trd_category)\n\n query_processor = self._get_sync_query_processor(\n AccTradingInfoQuery.pack_req,\n AccTradingInfoQuery.unpack_rsp)\n\n kargs = {\n 'order_type': order_type,\n 'code': str(stock_code),\n 'price': price,\n 'order_id': order_id,\n 'adjust_limit': adjust_limit,\n 'trd_mkt': trd_market,\n 'sec_mkt_str': market_str,\n 'trd_env': trd_env,\n 'acc_id': acc_id,\n 'conn_id': self.get_sync_conn_id()\n }\n\n ret_code, msg, data = query_processor(**kargs)\n if ret_code != RET_OK:\n return RET_ERROR, msg\n\n col_list = ['max_cash_buy', 'max_cash_and_margin_buy', 'max_position_sell', 'max_sell_short', 'max_buy_back',\n 'long_required_im', 'short_required_im']\n acctradinginfo_table = pd.DataFrame(data, columns=col_list)\n return RET_OK, acctradinginfo_table\n\n def get_margin_ratio(self, code_list):\n code_list = unique_and_normalize_list(code_list)\n if not code_list:\n error_str = ERROR_STR_PREFIX + \"the type of code param is wrong\"\n return RET_ERROR, error_str\n\n ret, msg, acc_id, acc_auth_list = self._check_acc_id_and_acc_index(TrdEnv.REAL, 0, 0)\n if ret != RET_OK:\n return ret, msg\n\n ret, content = self._split_stock_code(code_list[0])\n if ret != RET_OK:\n return ret, content\n\n market_str, stock_code = content\n\n trd_market = self._get_trd_market_from_market(market_str, TrdEnv.REAL, self.__trd_category)\n\n query_processor = self._get_sync_query_processor(\n MarginRatio.pack_req, MarginRatio.unpack_rsp)\n kargs = {\n \"code_list\": code_list,\n \"conn_id\": self.get_sync_conn_id(),\n \"acc_id\": acc_id,\n 'trd_mkt': trd_market,\n }\n\n ret_code, msg, margin_ratio_list = query_processor(**kargs)\n if ret_code != RET_OK:\n return RET_ERROR, msg\n\n col_list = [\n \"code\", \"is_long_permit\", \"is_short_permit\", \"short_pool_remain\", \"short_fee_rate\", \"alert_long_ratio\",\n \"alert_short_ratio\", \"im_long_ratio\", \"im_short_ratio\", \"mcm_long_ratio\", 'mcm_short_ratio', \"mm_long_ratio\", 'mm_short_ratio'\n ]\n margin_ratio_table = pd.DataFrame(margin_ratio_list, columns=col_list)\n\n return RET_OK, margin_ratio_table\n\n# 港股交易接口\nclass OpenHKTradeContext(OpenTradeContextBase):\n def __init__(self, host=\"127.0.0.1\", port=11111, is_encrypt=None, security_firm=SecurityFirm.FUTUSECURITIES):\n super(OpenHKTradeContext, self).__init__(TrdMarket.HK, host, port, is_encrypt=is_encrypt, security_firm=security_firm, trd_category=TrdCategory.SECURITY)\n\n\n# 美股交易接口\nclass OpenUSTradeContext(OpenTradeContextBase):\n def __init__(self, host=\"127.0.0.1\", port=11111, is_encrypt=None, security_firm=SecurityFirm.FUTUSECURITIES):\n super(OpenUSTradeContext, self).__init__(TrdMarket.US, host, port, is_encrypt=is_encrypt, security_firm=security_firm, trd_category=TrdCategory.SECURITY)\n\n\n# A股通交易接口\nclass OpenHKCCTradeContext(OpenTradeContextBase):\n def __init__(self, host=\"127.0.0.1\", port=11111, is_encrypt=None, security_firm=SecurityFirm.FUTUSECURITIES):\n super(OpenHKCCTradeContext, self).__init__(TrdMarket.HKCC, host, port, is_encrypt=is_encrypt, security_firm=security_firm, trd_category=TrdCategory.SECURITY)\n\n\n# A股交易接口\nclass OpenCNTradeContext(OpenTradeContextBase):\n def __init__(self, host=\"127.0.0.1\", port=11111, is_encrypt=None, security_firm=SecurityFirm.FUTUSECURITIES):\n super(OpenCNTradeContext, self).__init__(TrdMarket.CN, host, port, is_encrypt=is_encrypt, security_firm=security_firm, trd_category=TrdCategory.SECURITY)\n\n\n# 期货交易接口\nclass OpenFutureTradeContext(OpenTradeContextBase):\n def __init__(self, host=\"127.0.0.1\", port=11111, is_encrypt=None, security_firm=SecurityFirm.FUTUSECURITIES):\n super(OpenFutureTradeContext, self).__init__(TrdMarket.FUTURES, host, port, is_encrypt=is_encrypt, security_firm=security_firm, trd_category=TrdCategory.FUTURE)\n\n\n# 证券市场交易对象\nclass OpenSecTradeContext(OpenTradeContextBase):\n def __init__(self, filter_trdmarket=TrdMarket.HK, host='127.0.0.1', port=11111, is_encrypt=None, security_firm=SecurityFirm.FUTUSECURITIES):\n super(OpenSecTradeContext, self).__init__(filter_trdmarket, host, port, is_encrypt=is_encrypt, security_firm=security_firm, trd_category=TrdCategory.SECURITY, need_general_sec_acc=True)\n" ]
[ [ "pandas.DataFrame" ] ]
Chemformalixer/Machine-Learning-Multiple-Target-Vars
[ "eb42646e265aa594f7bdca1288764ef26a1096c1" ]
[ "CMTVEM_train_validate.py" ]
[ "#####################################################################################################################################################\n#####################################################################################################################################################\n### ###\n### ###\n### Classification on Multiple Target Variables on the Same Dataset ### \n### Automated Preprocessing, Balancing, Training and Validation and in House Hyper-parameter Tuning ###\n### Training and validation file ###\n### Programmed by E. M. (CMTVEM) ###\n### July 2018 ###\n### train_validate: Constructs, trains and validates the model ###\n### Input: preprocessed training dataframe, configuration parameters ###\n### Output: trained models in the form of a dictionary(model_dispatcher) of 9 classification functions along with average Matthews score ###\n#####################################################################################################################################################\n#####################################################################################################################################################\n\n\n#Importing libraries\nimport numpy as np\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier, VotingClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom imblearn.over_sampling import SMOTE\nfrom sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, confusion_matrix, auc, matthews_corrcoef\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.gaussian_process import GaussianProcessClassifier\nfrom sklearn.gaussian_process.kernels import RBF\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn import metrics\nimport matplotlib.pyplot as plt\n\n#importing functions from accompanying CMTVEM .py files\nfrom CMTVEM_auxilliary import output_writer\n\n#suppressing copy warning\npd.options.mode.chained_assignment = None\n\ndef train_validate(df,configparms):\n \n #starting the report, reportCMTVEM will be written on the report txt file at every stage of the program \n reportCMTVEM = list()\n reportCMTVEM.append(\"---------------------------------------------------------------\")\n reportCMTVEM.append(\"performance of the model\")\n #initialize classifier dictionary\n model_dispatcher={}\n\n X = df.drop(configparms['target_name_list'], axis = 1)\n X = X.drop(['formulaA','formulaB'], axis = 1)\n matthews_score_array = []\n f1_score_array = []\n precision_array = []\n recall_array = []\n accuracy_array = []\n n_item = 0\n colors = ['aqua', 'green', 'blue', 'red', 'sienna']\n phases = ['target1','target2', 'target3', 'target4', 'target5']\n linestyles = ['--',':','-.','--','-']\n\n if configparms['scale']=='yes':\n scaler = StandardScaler()\n reportCMTVEM.append(\"train data scaled\")\n #remove categorical variables\n reportCMTVEM.append(scaler.fit(X.drop(configparms['categorical_features_fullnames'], axis = 1)))\n #put data back together\n X=pd.concat([pd.DataFrame(scaler.transform(X.drop(configparms['categorical_features_fullnames'], axis = 1))), X[configparms['categorical_features_fullnames']]], axis=1)\n for item in configparms['target_name_list']:\n if item=='stabilitysum':\n continue\n #if item == 'stability_5th_interval': #this conditional block can be used for testing the code with parm tuner at a faster pace\n # break\n y=df[item]\n [X_train, X_Validation, y_train, y_Validation] = train_test_split(X, y, test_size = 0.25)#Splitting train and validation sets\n #balancing because the data is dominated by unstable binary states\n if configparms['balance_sampling']!='n' :\n #kind = type of balancing\n smethod = SMOTE(kind=configparms['balance_sampling'])\n #only train data should be synthetically modified to become balanced, validation data should not be touched\n [X_res, y_res] = smethod.fit_sample(X_train,y_train)\n X_train=pd.DataFrame(X_res)\n y_train=pd.DataFrame(y_res)\n #random forest\n if configparms['learning_model']=='rf':\n classifier = RandomForestClassifier(n_estimators = 300, max_features=15, criterion = 'entropy')\n #support vector machine classifier\n elif configparms['learning_model']=='svc':\n classifier = SVC(kernel='poly', max_iter=100, gamma = 2)\n #k nearest neighbor\n elif configparms['learning_model']=='knn':\n classifier = KNeighborsClassifier(n_neighbors=5)\n #gaussian process\n elif configparms['learning_model']=='gp':\n classifier = GaussianProcessClassifier(1.0 * RBF(1.0), max_iter_predict = 10)\n #multilayer perceptron\n elif configparms['learning_model']=='mlp':\n classifier = MLPClassifier(alpha=1,hidden_layer_sizes=(95,75))\n #ensemble voting\n elif configparms['learning_model']=='ev':\n #ev does not include gaussian process due to heavy computational cost, does not include svc either for limited accuracy\n #n_estimators = 301 is chosen empirically, after 300 not much is gained in accuracy\n classifier1 = RandomForestClassifier(n_estimators = 301, max_features=int(configparms['hyper_parameters'][0]), criterion = 'entropy') \n classifier3 = KNeighborsClassifier(n_neighbors=int(configparms['hyper_parameters'][1]))\n classifier4 = MLPClassifier(alpha=1,hidden_layer_sizes=(95,75))\n classifier = VotingClassifier(estimators=[('rf',classifier1),('knn', classifier3), ('mlp', classifier4)], voting='soft', weights=configparms['hyper_parameters'][2:5],flatten_transform=True)\n \n \n classifier.fit(X_train,y_train.values.ravel())\n model_dispatcher[item] = classifier\n y_pred = classifier.predict(X_Validation)\n cm = confusion_matrix(y_Validation, y_pred)\n matthews_score_array.append(matthews_corrcoef(y_Validation, y_pred))\n f1_score_array.append(f1_score(y_Validation, y_pred))\n precision_array.append(precision_score(y_Validation, y_pred))\n recall_array.append(recall_score(y_Validation, y_pred))\n accuracy_array.append(accuracy_score(y_Validation, y_pred))\n reportCMTVEM.append(\"confusion matrix for \"+item)\n print(\"confusion matrix for \"+item)\n reportCMTVEM.append(cm)\n print(cm)\n \n # making the ROC plot: To make all curves for stable phases appear in a single plot saving the plot occurs after the loop \n if configparms['roc_plot'] != 'none':\n fpr, tpr, _ = metrics.roc_curve(y_Validation, y_pred, pos_label=1)\n roc_auc = auc(fpr, tpr)\n plt.plot(fpr, tpr, color=colors[n_item], linestyle=linestyles[n_item], label='ROC curve of {0} (area = {1:0.2f})'''.format(phases[n_item], roc_auc))\n n_item=n_item+1\n #saving the roc curve in png format\n if configparms['roc_plot'] != 'none':\n plt.plot([0, 1], [0, 1], color='black', linestyle='-', linewidth=4.0)\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver Operating Characteristic plot')\n plt.legend(loc=\"lower right\")\n plt.savefig(configparms['roc_plot'])\n\n #recording the performance metrics\n reportCMTVEM.append(\"---------------------------------------------------------------\")\n #mattews_score_array mean as the target of maximization of parameter tuning is passed to model dispatcher\n reportCMTVEM.append(\"average Matthews score\")\n model_dispatcher['average_Matthews_score'] = np.mean(matthews_score_array)\n reportCMTVEM.append(model_dispatcher['average_Matthews_score'])\n reportCMTVEM.append(\"average f1 score\")\n reportCMTVEM.append(np.mean(f1_score_array))\n reportCMTVEM.append(\"average precision score\")\n reportCMTVEM.append(np.mean(precision_array))\n reportCMTVEM.append(\"average recall score\")\n reportCMTVEM.append(np.mean(recall_array))\n reportCMTVEM.append(\"average accuracy score\")\n reportCMTVEM.append(np.mean(accuracy_array))\n \n print(\"average Matthews score:\", np.mean(matthews_score_array))\n print(\"average f1 score:\", np.mean(f1_score_array))\n print(\"average precision score\", np.mean(precision_array))\n print(\"average recall score:\", np.mean(recall_array))\n \n #If we were to choose a useful metric, f1_score and Matthews score are the most relevant and statistically useful\n output_writer(reportCMTVEM,configparms)\n #model_dispatcher is a dictionary of all 9 models for 9 target variables\n return(model_dispatcher)\n\n\n" ]
[ [ "sklearn.neural_network.MLPClassifier", "matplotlib.pyplot.legend", "sklearn.metrics.matthews_corrcoef", "sklearn.metrics.confusion_matrix", "pandas.DataFrame", "matplotlib.pyplot.plot", "numpy.mean", "sklearn.metrics.f1_score", "sklearn.ensemble.RandomForestClassifier", "sklearn.ensemble.VotingClassifier", "sklearn.neighbors.KNeighborsClassifier", "matplotlib.pyplot.title", "matplotlib.pyplot.ylim", "sklearn.metrics.precision_score", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.savefig", "sklearn.metrics.roc_curve", "sklearn.svm.SVC", "sklearn.metrics.auc", "sklearn.gaussian_process.kernels.RBF", "sklearn.metrics.recall_score", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlim", "matplotlib.pyplot.xlabel", "sklearn.preprocessing.StandardScaler", "sklearn.metrics.accuracy_score" ] ]
MengLcool/Ac-OCR
[ "370152cc33995f41ee79374b3f5d62e94fea09d3" ]
[ "Recognition/transformer_xl/utils/log_uniform_sampler.py" ]
[ "import torch\r\nfrom torch import nn\r\nimport numpy as np\r\n\r\nclass LogUniformSampler(object):\r\n def __init__(self, range_max, n_sample):\r\n \"\"\"\r\n Reference : https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/candidate_sampling_ops.py\r\n `P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)`\r\n\r\n expected count can be approximated by 1 - (1 - p)^n\r\n and we use a numerically stable version -expm1(num_tries * log1p(-p))\r\n\r\n Our implementation fixes num_tries at 2 * n_sample, and the actual #samples will vary from run to run\r\n \"\"\"\r\n with torch.no_grad():\r\n self.range_max = range_max\r\n log_indices = torch.arange(1., range_max+2., 1.).log_()\r\n self.dist = (log_indices[1:] - log_indices[:-1]) / log_indices[-1]\r\n # print('P', self.dist.numpy().tolist()[-30:])\r\n\r\n self.log_q = (- (-self.dist.double().log1p_() * 2 * n_sample).expm1_()).log_().float()\r\n\r\n self.n_sample = n_sample\r\n\r\n def sample(self, labels):\r\n \"\"\"\r\n labels: [b1, b2]\r\n Return\r\n true_log_probs: [b1, b2]\r\n samp_log_probs: [n_sample]\r\n neg_samples: [n_sample]\r\n \"\"\"\r\n\r\n # neg_samples = torch.empty(0).long()\r\n n_sample = self.n_sample\r\n n_tries = 2 * n_sample\r\n\r\n with torch.no_grad():\r\n neg_samples = torch.multinomial(self.dist, n_tries, replacement=True).unique()\r\n device = labels.device\r\n neg_samples = neg_samples.to(device)\r\n true_log_probs = self.log_q[labels].to(device)\r\n samp_log_probs = self.log_q[neg_samples].to(device)\r\n return true_log_probs, samp_log_probs, neg_samples\r\n\r\ndef sample_logits(embedding, bias, labels, inputs, sampler):\r\n \"\"\"\r\n embedding: an nn.Embedding layer\r\n bias: [n_vocab]\r\n labels: [b1, b2]\r\n inputs: [b1, b2, n_emb]\r\n sampler: you may use a LogUniformSampler\r\n Return\r\n logits: [b1, b2, 1 + n_sample]\r\n \"\"\"\r\n true_log_probs, samp_log_probs, neg_samples = sampler.sample(labels)\r\n n_sample = neg_samples.size(0)\r\n b1, b2 = labels.size(0), labels.size(1)\r\n all_ids = torch.cat([labels.view(-1), neg_samples])\r\n all_w = embedding(all_ids)\r\n true_w = all_w[: -n_sample].view(b1, b2, -1)\r\n sample_w = all_w[- n_sample:].view(n_sample, -1)\r\n\r\n all_b = bias[all_ids]\r\n true_b = all_b[: -n_sample].view(b1, b2)\r\n sample_b = all_b[- n_sample:]\r\n\r\n hit = (labels[:, :, None] == neg_samples).detach()\r\n\r\n true_logits = torch.einsum('ijk,ijk->ij',\r\n [true_w, inputs]) + true_b - true_log_probs\r\n sample_logits = torch.einsum('lk,ijk->ijl',\r\n [sample_w, inputs]) + sample_b - samp_log_probs\r\n sample_logits.masked_fill_(hit, -1e30)\r\n logits = torch.cat([true_logits[:, :, None], sample_logits], -1)\r\n\r\n return logits\r\n\r\n\r\n# class LogUniformSampler(object):\r\n# def __init__(self, range_max, unique=False):\r\n# \"\"\"\r\n# Reference : https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/candidate_sampling_ops.py\r\n# `P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)`\r\n# \"\"\"\r\n# self.range_max = range_max\r\n# log_indices = torch.arange(1., range_max+2., 1.).log_()\r\n# self.dist = (log_indices[1:] - log_indices[:-1]) / log_indices[-1]\r\n\r\n# self.unique = unique\r\n\r\n# if self.unique:\r\n# self.exclude_mask = torch.ByteTensor(range_max).fill_(0)\r\n\r\n# def sample(self, n_sample, labels):\r\n# pos_sample, new_labels = labels.unique(return_inverse=True)\r\n# n_pos_sample = pos_sample.size(0)\r\n# n_neg_sample = n_sample - n_pos_sample\r\n\r\n# if self.unique:\r\n# self.exclude_mask.index_fill_(0, pos_sample, 1)\r\n# sample_dist = self.dist.clone().masked_fill_(self.exclude_mask, 0)\r\n# self.exclude_mask.index_fill_(0, pos_sample, 0)\r\n# else:\r\n# sample_dist = self.dist\r\n\r\n# neg_sample = torch.multinomial(sample_dist, n_neg_sample)\r\n\r\n# sample = torch.cat([pos_sample, neg_sample])\r\n# sample_prob = self.dist[sample]\r\n\r\n# return new_labels, sample, sample_prob\r\n\r\n\r\nif __name__ == '__main__':\r\n S, B = 3, 4\r\n n_vocab = 10000\r\n n_sample = 5\r\n H = 32\r\n\r\n labels = torch.LongTensor(S, B).random_(0, n_vocab)\r\n\r\n # sampler = LogUniformSampler(n_vocab, unique=False)\r\n # new_labels, sample, sample_prob = sampler.sample(n_sample, labels)\r\n\r\n sampler = LogUniformSampler(n_vocab, unique=True)\r\n # true_probs, samp_probs, neg_samples = sampler.sample(n_sample, labels)\r\n\r\n # print('true_probs', true_probs.numpy().tolist())\r\n # print('samp_probs', samp_probs.numpy().tolist())\r\n # print('neg_samples', neg_samples.numpy().tolist())\r\n\r\n # print('sum', torch.sum(sampler.dist).item())\r\n\r\n # assert torch.all(torch.sort(sample.unique())[0].eq(torch.sort(sample)[0])).item()\r\n\r\n embedding = nn.Embedding(n_vocab, H)\r\n bias = torch.zeros(n_vocab)\r\n inputs = torch.Tensor(S, B, H).normal_()\r\n\r\n logits, out_labels = sample_logits(embedding, bias, labels, inputs, sampler, n_sample)\r\n print('logits', logits.detach().numpy().tolist())\r\n print('logits shape', logits.size())\r\n print('out_labels', out_labels.detach().numpy().tolist())\r\n print('out_labels shape', out_labels.size())\r\n\r\n" ]
[ [ "torch.LongTensor", "torch.Tensor", "torch.zeros", "torch.cat", "torch.einsum", "torch.nn.Embedding", "torch.multinomial", "torch.no_grad", "torch.arange" ] ]
VoidlessVoid7/eLibrary
[ "4158c1e720a15527ce4d337636d143f324806e9e" ]
[ "backend/model.py" ]
[ "import pandas as pd\nimport numpy as np\nimport pandas as pd\nimport numpy as np\nfrom flask import jsonify\nimport nltk\nnltk.download('stopwords')\nfrom nltk.corpus import stopwords\nfrom sklearn.metrics.pairwise import linear_kernel\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom nltk.tokenize import RegexpTokenizer\nimport re\nimport string\nimport random\nfrom PIL import Image\nimport requests\nfrom io import BytesIO\nimport matplotlib.pyplot as plt\n\n\n\ndf = pd.read_csv(\"goodread.csv\")\ndf2 = df.copy()\n\n# Function for removing NonAscii characters\ndef _removeNonAscii(s):\n return \"\".join(i for i in s if ord(i)<128)\n# Function for converting into lower case\ndef make_lower_case(text):\n return text.lower()\n# Function for removing stop words\ndef remove_stop_words(text):\n text = text.split()\n stops = set(stopwords.words(\"english\"))\n text = [w for w in text if not w in stops]\n text = \" \".join(text)\n return text\n# Function for removing punctuation\ndef remove_punctuation(text):\n tokenizer = RegexpTokenizer(r'\\w+')\n text = tokenizer.tokenize(text)\n text = \" \".join(text)\n return text\n#Function for removing the html tags\ndef remove_html(text):\n html_pattern = re.compile('<.*?>')\n return html_pattern.sub(r'', text)\n# Applying all the functions in description and storing as a cleaned_desc\n\ndf2['cleaned_desc'] = df2['Desc'].apply(_removeNonAscii)\ndf2['cleaned_desc'] = df2.cleaned_desc.apply(func = make_lower_case)\ndf2['cleaned_desc'] = df2.cleaned_desc.apply(func = remove_stop_words)\ndf2['cleaned_desc'] = df2.cleaned_desc.apply(func=remove_punctuation)\ndf2['cleaned_desc'] = df2.cleaned_desc.apply(func=remove_html)\n\ndef recommend_title(title, genre):\n data = df2.loc[df2['genre'] == genre] \n data.reset_index(level = 0, inplace = True) \n indices = pd.Series(data.index, index = data['title'])\n tf = TfidfVectorizer(analyzer='word', ngram_range=(2, 2), min_df = 1, stop_words='english')\n tfidf_matrix = tf.fit_transform(data['title'])\n sg = cosine_similarity(tfidf_matrix, tfidf_matrix)\n idx = indices[title]\n sig = list(enumerate(sg[idx]))\n sig = sorted(sig, key=lambda x: x[1], reverse=True)\n sig = sig[1:6]\n movie_indices = [i[0] for i in sig]\n rec = data[['title', 'image_link']].iloc[movie_indices]\n recd = rec.to_dict()\n print(rec)\n\n\ndef recommend(title, genre):\n global rec\n data = df2.loc[df2['genre'] == genre] \n data.reset_index(level = 0, inplace = True) \n indices = pd.Series(data.index, index = data['title'])\n tf = TfidfVectorizer(analyzer='word', ngram_range=(2, 2), min_df = 1, stop_words='english')\n tfidf_matrix = tf.fit_transform(data['cleaned_desc'])\n sg = cosine_similarity(tfidf_matrix, tfidf_matrix)\n idx = indices[title]\n sig = list(enumerate(sg[idx]))\n sig = sorted(sig, key=lambda x: x[1], reverse=True)\n sig = sig[1:6]\n movie_indices = [i[0] for i in sig]\n rec = data[['title', 'image_link']].iloc[movie_indices]\n return rec.to_dict('index')\n\nrecommend(\"Silence\", \"Non-Fiction\")" ]
[ [ "pandas.read_csv", "sklearn.metrics.pairwise.cosine_similarity", "pandas.Series", "sklearn.feature_extraction.text.TfidfVectorizer" ] ]
luoshenseeker/AIJack
[ "4e871a5b3beb4b7c976d38060d6956efcebf880d" ]
[ "src/aijack/attack/inversion/gan_attack.py" ]
[ "import copy\n\nimport torch\n\nfrom ..base_attack import BaseAttacker\n\n\nclass GAN_Attack(BaseAttacker):\n \"\"\"GAN based model inversion attack (https://arxiv.org/abs/1702.07464)\n\n Attributes:\n model (torch.nn.Module):\n target_label(int): index of target class\n generator (torch.nn.Module): Generator\n generator_optimizer (torch.optim.Optimizer): optimizer for the generator\n generator_criterion (function): loss function for the generator\n nz (int): dimension of latent space of the generator\n user_id (int): user id\n device (string): device type (cpu or cuda)\n \"\"\"\n\n def __init__(\n self,\n client,\n target_label,\n generator,\n generator_optimizer,\n generator_criterion,\n nz=100,\n device=\"cpu\",\n ):\n \"\"\"Inits the GAN_Attack\n\n Args:\n model (torch.nn.Module):\n target_label(int): index of target class\n generator (torch.nn.Module): Generator\n generator_optimizer (torch.optim.Optimizer): optimizer for the generator\n generator_criterion (function): loss function for the generator\n nz (int): dimension of latent space of the generator\n user_id (int): user id\n device (string): device type (cpu or cuda)\n \"\"\"\n super().__init__(target_model=None)\n self.client = client\n self.target_label = target_label\n self.generator = generator\n self.generator_optimizer = generator_optimizer\n self.generator_criterion = generator_criterion\n self.nz = nz\n self.device = device\n\n self.discriminator = copy.deepcopy(self.client.model)\n self.discriminator.to(self.device)\n\n self.noise = torch.randn(1, self.nz, 1, 1, device=self.device)\n\n def update_generator(self, batch_size=10, epoch=1, log_interval=5):\n \"\"\"Updata the Generator\n\n Args:\n batch_size (int): batch size\n epoch (int): epoch\n log_interval (int): interval of logging\n \"\"\"\n\n for i in range(1, epoch + 1):\n running_error = 0\n self.generator.zero_grad()\n\n noise = torch.randn(batch_size, self.nz, 1, 1, device=self.device)\n fake = self.generator(noise)\n output = self.discriminator(fake)\n\n label = torch.full(\n (batch_size,), self.target_label, dtype=torch.int64, device=self.device\n )\n loss_generator = self.generator_criterion(output, label)\n loss_generator.backward()\n\n self.generator_optimizer.step()\n\n running_error += loss_generator.item()\n\n if log_interval != 0 and i % log_interval == 0:\n print(\n f\"updating generator - epoch {i}: generator loss is {running_error/batch_size}\"\n )\n\n def update_discriminator(self):\n \"\"\"Update the discriminator\"\"\"\n self.discriminator.load_state_dict(self.client.model.state_dict())\n\n def attack(self, n):\n \"\"\"Generate fake images\n\n Args:\n n (int): the number of fake images created by the Generator\n\n Returns:\n fake: generated fake images\n \"\"\"\n noise = torch.randn(n, self.nz, 1, 1, device=self.device)\n with torch.no_grad():\n fake = self.generator(noise)\n return fake\n" ]
[ [ "torch.randn", "torch.no_grad", "torch.full" ] ]
shivangdubey/sympy
[ "bd3ddd4c71d439c8b623f69a02274dd8a8a82198" ]
[ "sympy/physics/quantum/qubit.py" ]
[ "\"\"\"Qubits for quantum computing.\n\nTodo:\n* Finish implementing measurement logic. This should include POVM.\n* Update docstrings.\n* Update tests.\n\"\"\"\n\nfrom __future__ import print_function, division\n\nimport math\n\nfrom sympy import Integer, log, Mul, Add, Pow, conjugate\nfrom sympy.core.basic import sympify\nfrom sympy.core.compatibility import SYMPY_INTS\nfrom sympy.matrices import Matrix, zeros\nfrom sympy.printing.pretty.stringpict import prettyForm\n\nfrom sympy.physics.quantum.hilbert import ComplexSpace\nfrom sympy.physics.quantum.state import Ket, Bra, State\n\nfrom sympy.physics.quantum.qexpr import QuantumError\nfrom sympy.physics.quantum.represent import represent\nfrom sympy.physics.quantum.matrixutils import (\n numpy_ndarray, scipy_sparse_matrix\n)\nfrom mpmath.libmp.libintmath import bitcount\n\n__all__ = [\n 'Qubit',\n 'QubitBra',\n 'IntQubit',\n 'IntQubitBra',\n 'qubit_to_matrix',\n 'matrix_to_qubit',\n 'matrix_to_density',\n 'measure_all',\n 'measure_partial',\n 'measure_partial_oneshot',\n 'measure_all_oneshot'\n]\n\n#-----------------------------------------------------------------------------\n# Qubit Classes\n#-----------------------------------------------------------------------------\n\n\nclass QubitState(State):\n \"\"\"Base class for Qubit and QubitBra.\"\"\"\n\n #-------------------------------------------------------------------------\n # Initialization/creation\n #-------------------------------------------------------------------------\n\n @classmethod\n def _eval_args(cls, args):\n # If we are passed a QubitState or subclass, we just take its qubit\n # values directly.\n if len(args) == 1 and isinstance(args[0], QubitState):\n return args[0].qubit_values\n\n # Turn strings into tuple of strings\n if len(args) == 1 and isinstance(args[0], str):\n args = tuple(args[0])\n\n args = sympify(args)\n\n # Validate input (must have 0 or 1 input)\n for element in args:\n if not (element == 1 or element == 0):\n raise ValueError(\n \"Qubit values must be 0 or 1, got: %r\" % element)\n return args\n\n @classmethod\n def _eval_hilbert_space(cls, args):\n return ComplexSpace(2)**len(args)\n\n #-------------------------------------------------------------------------\n # Properties\n #-------------------------------------------------------------------------\n\n @property\n def dimension(self):\n \"\"\"The number of Qubits in the state.\"\"\"\n return len(self.qubit_values)\n\n @property\n def nqubits(self):\n return self.dimension\n\n @property\n def qubit_values(self):\n \"\"\"Returns the values of the qubits as a tuple.\"\"\"\n return self.label\n\n #-------------------------------------------------------------------------\n # Special methods\n #-------------------------------------------------------------------------\n\n def __len__(self):\n return self.dimension\n\n def __getitem__(self, bit):\n return self.qubit_values[int(self.dimension - bit - 1)]\n\n #-------------------------------------------------------------------------\n # Utility methods\n #-------------------------------------------------------------------------\n\n def flip(self, *bits):\n \"\"\"Flip the bit(s) given.\"\"\"\n newargs = list(self.qubit_values)\n for i in bits:\n bit = int(self.dimension - i - 1)\n if newargs[bit] == 1:\n newargs[bit] = 0\n else:\n newargs[bit] = 1\n return self.__class__(*tuple(newargs))\n\n\nclass Qubit(QubitState, Ket):\n \"\"\"A multi-qubit ket in the computational (z) basis.\n\n We use the normal convention that the least significant qubit is on the\n right, so ``|00001>`` has a 1 in the least significant qubit.\n\n Parameters\n ==========\n\n values : list, str\n The qubit values as a list of ints ([0,0,0,1,1,]) or a string ('011').\n\n Examples\n ========\n\n Create a qubit in a couple of different ways and look at their attributes:\n\n >>> from sympy.physics.quantum.qubit import Qubit\n >>> Qubit(0,0,0)\n |000>\n >>> q = Qubit('0101')\n >>> q\n |0101>\n\n >>> q.nqubits\n 4\n >>> len(q)\n 4\n >>> q.dimension\n 4\n >>> q.qubit_values\n (0, 1, 0, 1)\n\n We can flip the value of an individual qubit:\n\n >>> q.flip(1)\n |0111>\n\n We can take the dagger of a Qubit to get a bra:\n\n >>> from sympy.physics.quantum.dagger import Dagger\n >>> Dagger(q)\n <0101|\n >>> type(Dagger(q))\n <class 'sympy.physics.quantum.qubit.QubitBra'>\n\n Inner products work as expected:\n\n >>> ip = Dagger(q)*q\n >>> ip\n <0101|0101>\n >>> ip.doit()\n 1\n \"\"\"\n\n @classmethod\n def dual_class(self):\n return QubitBra\n\n def _eval_innerproduct_QubitBra(self, bra, **hints):\n if self.label == bra.label:\n return Integer(1)\n else:\n return Integer(0)\n\n def _represent_default_basis(self, **options):\n return self._represent_ZGate(None, **options)\n\n def _represent_ZGate(self, basis, **options):\n \"\"\"Represent this qubits in the computational basis (ZGate).\n \"\"\"\n _format = options.get('format', 'sympy')\n n = 1\n definite_state = 0\n for it in reversed(self.qubit_values):\n definite_state += n*it\n n = n*2\n result = [0]*(2**self.dimension)\n result[int(definite_state)] = 1\n if _format == 'sympy':\n return Matrix(result)\n elif _format == 'numpy':\n import numpy as np\n return np.matrix(result, dtype='complex').transpose()\n elif _format == 'scipy.sparse':\n from scipy import sparse\n return sparse.csr_matrix(result, dtype='complex').transpose()\n\n def _eval_trace(self, bra, **kwargs):\n indices = kwargs.get('indices', [])\n\n #sort index list to begin trace from most-significant\n #qubit\n sorted_idx = list(indices)\n if len(sorted_idx) == 0:\n sorted_idx = list(range(0, self.nqubits))\n sorted_idx.sort()\n\n #trace out for each of index\n new_mat = self*bra\n for i in range(len(sorted_idx) - 1, -1, -1):\n # start from tracing out from leftmost qubit\n new_mat = self._reduced_density(new_mat, int(sorted_idx[i]))\n\n if (len(sorted_idx) == self.nqubits):\n #in case full trace was requested\n return new_mat[0]\n else:\n return matrix_to_density(new_mat)\n\n def _reduced_density(self, matrix, qubit, **options):\n \"\"\"Compute the reduced density matrix by tracing out one qubit.\n The qubit argument should be of type python int, since it is used\n in bit operations\n \"\"\"\n def find_index_that_is_projected(j, k, qubit):\n bit_mask = 2**qubit - 1\n return ((j >> qubit) << (1 + qubit)) + (j & bit_mask) + (k << qubit)\n\n old_matrix = represent(matrix, **options)\n old_size = old_matrix.cols\n #we expect the old_size to be even\n new_size = old_size//2\n new_matrix = Matrix().zeros(new_size)\n\n for i in range(new_size):\n for j in range(new_size):\n for k in range(2):\n col = find_index_that_is_projected(j, k, qubit)\n row = find_index_that_is_projected(i, k, qubit)\n new_matrix[i, j] += old_matrix[row, col]\n\n return new_matrix\n\n\nclass QubitBra(QubitState, Bra):\n \"\"\"A multi-qubit bra in the computational (z) basis.\n\n We use the normal convention that the least significant qubit is on the\n right, so ``|00001>`` has a 1 in the least significant qubit.\n\n Parameters\n ==========\n\n values : list, str\n The qubit values as a list of ints ([0,0,0,1,1,]) or a string ('011').\n\n See also\n ========\n\n Qubit: Examples using qubits\n\n \"\"\"\n @classmethod\n def dual_class(self):\n return Qubit\n\n\nclass IntQubitState(QubitState):\n \"\"\"A base class for qubits that work with binary representations.\"\"\"\n\n @classmethod\n def _eval_args(cls, args, nqubits=None):\n # The case of a QubitState instance\n if len(args) == 1 and isinstance(args[0], QubitState):\n return QubitState._eval_args(args)\n # otherwise, args should be integer\n elif not all((isinstance(a, (int, Integer)) for a in args)):\n raise ValueError('values must be integers, got (%s)' % (tuple(type(a) for a in args),))\n # use nqubits if specified\n if nqubits is not None:\n if not isinstance(nqubits, (int, Integer)):\n raise ValueError('nqubits must be an integer, got (%s)' % type(nqubits))\n if len(args) != 1:\n raise ValueError(\n 'too many positional arguments (%s). should be (number, nqubits=n)' % (args,))\n return cls._eval_args_with_nqubits(args[0], nqubits)\n # For a single argument, we construct the binary representation of\n # that integer with the minimal number of bits.\n if len(args) == 1 and args[0] > 1:\n #rvalues is the minimum number of bits needed to express the number\n rvalues = reversed(range(bitcount(abs(args[0]))))\n qubit_values = [(args[0] >> i) & 1 for i in rvalues]\n return QubitState._eval_args(qubit_values)\n # For two numbers, the second number is the number of bits\n # on which it is expressed, so IntQubit(0,5) == |00000>.\n elif len(args) == 2 and args[1] > 1:\n return cls._eval_args_with_nqubits(args[0], args[1])\n else:\n return QubitState._eval_args(args)\n\n @classmethod\n def _eval_args_with_nqubits(cls, number, nqubits):\n need = bitcount(abs(number))\n if nqubits < need:\n raise ValueError(\n 'cannot represent %s with %s bits' % (number, nqubits))\n qubit_values = [(number >> i) & 1 for i in reversed(range(nqubits))]\n return QubitState._eval_args(qubit_values)\n\n def as_int(self):\n \"\"\"Return the numerical value of the qubit.\"\"\"\n number = 0\n n = 1\n for i in reversed(self.qubit_values):\n number += n*i\n n = n << 1\n return number\n\n def _print_label(self, printer, *args):\n return str(self.as_int())\n\n def _print_label_pretty(self, printer, *args):\n label = self._print_label(printer, *args)\n return prettyForm(label)\n\n _print_label_repr = _print_label\n _print_label_latex = _print_label\n\n\nclass IntQubit(IntQubitState, Qubit):\n \"\"\"A qubit ket that store integers as binary numbers in qubit values.\n\n The differences between this class and ``Qubit`` are:\n\n * The form of the constructor.\n * The qubit values are printed as their corresponding integer, rather\n than the raw qubit values. The internal storage format of the qubit\n values in the same as ``Qubit``.\n\n Parameters\n ==========\n\n values : int, tuple\n If a single argument, the integer we want to represent in the qubit\n values. This integer will be represented using the fewest possible\n number of qubits.\n If a pair of integers and the second value is more than one, the first\n integer gives the integer to represent in binary form and the second\n integer gives the number of qubits to use.\n List of zeros and ones is also accepted to generate qubit by bit pattern.\n\n nqubits : int\n The integer that represents the number of qubits.\n This number should be passed with keyword ``nqubits=N``.\n You can use this in order to avoid ambiguity of Qubit-style tuple of bits.\n Please see the example below for more details.\n\n Examples\n ========\n\n Create a qubit for the integer 5:\n\n >>> from sympy.physics.quantum.qubit import IntQubit\n >>> from sympy.physics.quantum.qubit import Qubit\n >>> q = IntQubit(5)\n >>> q\n |5>\n\n We can also create an ``IntQubit`` by passing a ``Qubit`` instance.\n\n >>> q = IntQubit(Qubit('101'))\n >>> q\n |5>\n >>> q.as_int()\n 5\n >>> q.nqubits\n 3\n >>> q.qubit_values\n (1, 0, 1)\n\n We can go back to the regular qubit form.\n\n >>> Qubit(q)\n |101>\n\n Please note that ``IntQubit`` also accepts a ``Qubit``-style list of bits.\n So, the code below yields qubits 3, not a single bit ``1``.\n\n >>> IntQubit(1, 1)\n |3>\n\n To avoid ambiguity, use ``nqubits`` parameter.\n Use of this keyword is recommended especially when you provide the values by variables.\n\n >>> IntQubit(1, nqubits=1)\n |1>\n >>> a = 1\n >>> IntQubit(a, nqubits=1)\n |1>\n \"\"\"\n @classmethod\n def dual_class(self):\n return IntQubitBra\n\n def _eval_innerproduct_IntQubitBra(self, bra, **hints):\n return Qubit._eval_innerproduct_QubitBra(self, bra)\n\nclass IntQubitBra(IntQubitState, QubitBra):\n \"\"\"A qubit bra that store integers as binary numbers in qubit values.\"\"\"\n\n @classmethod\n def dual_class(self):\n return IntQubit\n\n\n#-----------------------------------------------------------------------------\n# Qubit <---> Matrix conversion functions\n#-----------------------------------------------------------------------------\n\n\ndef matrix_to_qubit(matrix):\n \"\"\"Convert from the matrix repr. to a sum of Qubit objects.\n\n Parameters\n ----------\n matrix : Matrix, numpy.matrix, scipy.sparse\n The matrix to build the Qubit representation of. This works with\n sympy matrices, numpy matrices and scipy.sparse sparse matrices.\n\n Examples\n ========\n\n Represent a state and then go back to its qubit form:\n\n >>> from sympy.physics.quantum.qubit import matrix_to_qubit, Qubit\n >>> from sympy.physics.quantum.represent import represent\n >>> q = Qubit('01')\n >>> matrix_to_qubit(represent(q))\n |01>\n \"\"\"\n # Determine the format based on the type of the input matrix\n format = 'sympy'\n if isinstance(matrix, numpy_ndarray):\n format = 'numpy'\n if isinstance(matrix, scipy_sparse_matrix):\n format = 'scipy.sparse'\n\n # Make sure it is of correct dimensions for a Qubit-matrix representation.\n # This logic should work with sympy, numpy or scipy.sparse matrices.\n if matrix.shape[0] == 1:\n mlistlen = matrix.shape[1]\n nqubits = log(mlistlen, 2)\n ket = False\n cls = QubitBra\n elif matrix.shape[1] == 1:\n mlistlen = matrix.shape[0]\n nqubits = log(mlistlen, 2)\n ket = True\n cls = Qubit\n else:\n raise QuantumError(\n 'Matrix must be a row/column vector, got %r' % matrix\n )\n if not isinstance(nqubits, Integer):\n raise QuantumError('Matrix must be a row/column vector of size '\n '2**nqubits, got: %r' % matrix)\n # Go through each item in matrix, if element is non-zero, make it into a\n # Qubit item times the element.\n result = 0\n for i in range(mlistlen):\n if ket:\n element = matrix[i, 0]\n else:\n element = matrix[0, i]\n if format == 'numpy' or format == 'scipy.sparse':\n element = complex(element)\n if element != 0.0:\n # Form Qubit array; 0 in bit-locations where i is 0, 1 in\n # bit-locations where i is 1\n qubit_array = [int(i & (1 << x) != 0) for x in range(nqubits)]\n qubit_array.reverse()\n result = result + element*cls(*qubit_array)\n\n # If sympy simplified by pulling out a constant coefficient, undo that.\n if isinstance(result, (Mul, Add, Pow)):\n result = result.expand()\n\n return result\n\n\ndef matrix_to_density(mat):\n \"\"\"\n Works by finding the eigenvectors and eigenvalues of the matrix.\n We know we can decompose rho by doing:\n sum(EigenVal*|Eigenvect><Eigenvect|)\n \"\"\"\n from sympy.physics.quantum.density import Density\n eigen = mat.eigenvects()\n args = [[matrix_to_qubit(Matrix(\n [vector, ])), x[0]] for x in eigen for vector in x[2] if x[0] != 0]\n if (len(args) == 0):\n return 0\n else:\n return Density(*args)\n\n\ndef qubit_to_matrix(qubit, format='sympy'):\n \"\"\"Converts an Add/Mul of Qubit objects into it's matrix representation\n\n This function is the inverse of ``matrix_to_qubit`` and is a shorthand\n for ``represent(qubit)``.\n \"\"\"\n return represent(qubit, format=format)\n\n\n#-----------------------------------------------------------------------------\n# Measurement\n#-----------------------------------------------------------------------------\n\n\ndef measure_all(qubit, format='sympy', normalize=True):\n \"\"\"Perform an ensemble measurement of all qubits.\n\n Parameters\n ==========\n\n qubit : Qubit, Add\n The qubit to measure. This can be any Qubit or a linear combination\n of them.\n format : str\n The format of the intermediate matrices to use. Possible values are\n ('sympy','numpy','scipy.sparse'). Currently only 'sympy' is\n implemented.\n\n Returns\n =======\n\n result : list\n A list that consists of primitive states and their probabilities.\n\n Examples\n ========\n\n >>> from sympy.physics.quantum.qubit import Qubit, measure_all\n >>> from sympy.physics.quantum.gate import H\n >>> from sympy.physics.quantum.qapply import qapply\n\n >>> c = H(0)*H(1)*Qubit('00')\n >>> c\n H(0)*H(1)*|00>\n >>> q = qapply(c)\n >>> measure_all(q)\n [(|00>, 1/4), (|01>, 1/4), (|10>, 1/4), (|11>, 1/4)]\n \"\"\"\n m = qubit_to_matrix(qubit, format)\n\n if format == 'sympy':\n results = []\n\n if normalize:\n m = m.normalized()\n\n size = max(m.shape) # Max of shape to account for bra or ket\n nqubits = int(math.log(size)/math.log(2))\n for i in range(size):\n if m[i] != 0.0:\n results.append(\n (Qubit(IntQubit(i, nqubits=nqubits)), m[i]*conjugate(m[i]))\n )\n return results\n else:\n raise NotImplementedError(\n \"This function can't handle non-sympy matrix formats yet\"\n )\n\n\ndef measure_partial(qubit, bits, format='sympy', normalize=True):\n \"\"\"Perform a partial ensemble measure on the specified qubits.\n\n Parameters\n ==========\n\n qubits : Qubit\n The qubit to measure. This can be any Qubit or a linear combination\n of them.\n bits : tuple\n The qubits to measure.\n format : str\n The format of the intermediate matrices to use. Possible values are\n ('sympy','numpy','scipy.sparse'). Currently only 'sympy' is\n implemented.\n\n Returns\n =======\n\n result : list\n A list that consists of primitive states and their probabilities.\n\n Examples\n ========\n\n >>> from sympy.physics.quantum.qubit import Qubit, measure_partial\n >>> from sympy.physics.quantum.gate import H\n >>> from sympy.physics.quantum.qapply import qapply\n\n >>> c = H(0)*H(1)*Qubit('00')\n >>> c\n H(0)*H(1)*|00>\n >>> q = qapply(c)\n >>> measure_partial(q, (0,))\n [(sqrt(2)*|00>/2 + sqrt(2)*|10>/2, 1/2), (sqrt(2)*|01>/2 + sqrt(2)*|11>/2, 1/2)]\n \"\"\"\n m = qubit_to_matrix(qubit, format)\n\n if isinstance(bits, (SYMPY_INTS, Integer)):\n bits = (int(bits),)\n\n if format == 'sympy':\n if normalize:\n m = m.normalized()\n\n possible_outcomes = _get_possible_outcomes(m, bits)\n\n # Form output from function.\n output = []\n for outcome in possible_outcomes:\n # Calculate probability of finding the specified bits with\n # given values.\n prob_of_outcome = 0\n prob_of_outcome += (outcome.H*outcome)[0]\n\n # If the output has a chance, append it to output with found\n # probability.\n if prob_of_outcome != 0:\n if normalize:\n next_matrix = matrix_to_qubit(outcome.normalized())\n else:\n next_matrix = matrix_to_qubit(outcome)\n\n output.append((\n next_matrix,\n prob_of_outcome\n ))\n\n return output\n else:\n raise NotImplementedError(\n \"This function can't handle non-sympy matrix formats yet\"\n )\n\n\ndef measure_partial_oneshot(qubit, bits, format='sympy'):\n \"\"\"Perform a partial oneshot measurement on the specified qubits.\n\n A oneshot measurement is equivalent to performing a measurement on a\n quantum system. This type of measurement does not return the probabilities\n like an ensemble measurement does, but rather returns *one* of the\n possible resulting states. The exact state that is returned is determined\n by picking a state randomly according to the ensemble probabilities.\n\n Parameters\n ----------\n qubits : Qubit\n The qubit to measure. This can be any Qubit or a linear combination\n of them.\n bits : tuple\n The qubits to measure.\n format : str\n The format of the intermediate matrices to use. Possible values are\n ('sympy','numpy','scipy.sparse'). Currently only 'sympy' is\n implemented.\n\n Returns\n -------\n result : Qubit\n The qubit that the system collapsed to upon measurement.\n \"\"\"\n import random\n m = qubit_to_matrix(qubit, format)\n\n if format == 'sympy':\n m = m.normalized()\n possible_outcomes = _get_possible_outcomes(m, bits)\n\n # Form output from function\n random_number = random.random()\n total_prob = 0\n for outcome in possible_outcomes:\n # Calculate probability of finding the specified bits\n # with given values\n total_prob += (outcome.H*outcome)[0]\n if total_prob >= random_number:\n return matrix_to_qubit(outcome.normalized())\n else:\n raise NotImplementedError(\n \"This function can't handle non-sympy matrix formats yet\"\n )\n\n\ndef _get_possible_outcomes(m, bits):\n \"\"\"Get the possible states that can be produced in a measurement.\n\n Parameters\n ----------\n m : Matrix\n The matrix representing the state of the system.\n bits : tuple, list\n Which bits will be measured.\n\n Returns\n -------\n result : list\n The list of possible states which can occur given this measurement.\n These are un-normalized so we can derive the probability of finding\n this state by taking the inner product with itself\n \"\"\"\n\n # This is filled with loads of dirty binary tricks...You have been warned\n\n size = max(m.shape) # Max of shape to account for bra or ket\n nqubits = int(math.log(size, 2) + .1) # Number of qubits possible\n\n # Make the output states and put in output_matrices, nothing in them now.\n # Each state will represent a possible outcome of the measurement\n # Thus, output_matrices[0] is the matrix which we get when all measured\n # bits return 0. and output_matrices[1] is the matrix for only the 0th\n # bit being true\n output_matrices = []\n for i in range(1 << len(bits)):\n output_matrices.append(zeros(2**nqubits, 1))\n\n # Bitmasks will help sort how to determine possible outcomes.\n # When the bit mask is and-ed with a matrix-index,\n # it will determine which state that index belongs to\n bit_masks = []\n for bit in bits:\n bit_masks.append(1 << bit)\n\n # Make possible outcome states\n for i in range(2**nqubits):\n trueness = 0 # This tells us to which output_matrix this value belongs\n # Find trueness\n for j in range(len(bit_masks)):\n if i & bit_masks[j]:\n trueness += j + 1\n # Put the value in the correct output matrix\n output_matrices[trueness][i] = m[i]\n return output_matrices\n\n\ndef measure_all_oneshot(qubit, format='sympy'):\n \"\"\"Perform a oneshot ensemble measurement on all qubits.\n\n A oneshot measurement is equivalent to performing a measurement on a\n quantum system. This type of measurement does not return the probabilities\n like an ensemble measurement does, but rather returns *one* of the\n possible resulting states. The exact state that is returned is determined\n by picking a state randomly according to the ensemble probabilities.\n\n Parameters\n ----------\n qubits : Qubit\n The qubit to measure. This can be any Qubit or a linear combination\n of them.\n format : str\n The format of the intermediate matrices to use. Possible values are\n ('sympy','numpy','scipy.sparse'). Currently only 'sympy' is\n implemented.\n\n Returns\n -------\n result : Qubit\n The qubit that the system collapsed to upon measurement.\n \"\"\"\n import random\n m = qubit_to_matrix(qubit)\n\n if format == 'sympy':\n m = m.normalized()\n random_number = random.random()\n total = 0\n result = 0\n for i in m:\n total += i*i.conjugate()\n if total > random_number:\n break\n result += 1\n return Qubit(IntQubit(result, int(math.log(max(m.shape), 2) + .1)))\n else:\n raise NotImplementedError(\n \"This function can't handle non-sympy matrix formats yet\"\n )\n" ]
[ [ "numpy.matrix", "scipy.sparse.csr_matrix" ] ]
kintatta/d3rl
[ "0674c4898927a53f36c5c875d8f217337f22d364" ]
[ "examples/data_augmentation/vector.py" ]
[ "from d3rlpy.datasets import get_pybullet\nfrom d3rlpy.algos import CQL\nfrom d3rlpy.metrics.scorer import evaluate_on_environment\nfrom d3rlpy.metrics.scorer import td_error_scorer\nfrom d3rlpy.metrics.scorer import discounted_sum_of_advantage_scorer\nfrom d3rlpy.metrics.scorer import average_value_estimation_scorer\nfrom sklearn.model_selection import train_test_split\n\ndataset, env = get_pybullet('hopper-bullet-mixed-v0')\n\ntrain_episodes, test_episodes = train_test_split(dataset, test_size=0.2)\n\ncql = CQL(n_epochs=100,\n augmentation=['single_amplitude_scaling'],\n use_gpu=True)\n\ncql.fit(train_episodes,\n eval_episodes=test_episodes,\n scorers={\n 'environment': evaluate_on_environment(env),\n 'td_error': td_error_scorer,\n 'discounted_advantage': discounted_sum_of_advantage_scorer,\n 'value_scale': average_value_estimation_scorer\n })\n" ]
[ [ "sklearn.model_selection.train_test_split" ] ]
mrluin/ESFNet-Pytorch
[ "a2c166a91281e96f953398cf953f446ff6337a14" ]
[ "models/SegNet.py" ]
[ "import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom collections import OrderedDict\r\n\r\nclass SegNet(nn.Module):\r\n # modified\r\n def __init__(self,config):\r\n super(SegNet, self).__init__()\r\n\r\n self.config = config\r\n self.name = 'SegNet'\r\n batchNorm_momentum = 0.1\r\n\r\n self.conv11 = nn.Conv2d(3, 64, kernel_size=3, padding=1)\r\n self.bn11 = nn.BatchNorm2d(64, momentum= batchNorm_momentum)\r\n self.conv12 = nn.Conv2d(64, 64, kernel_size=3, padding=1)\r\n self.bn12 = nn.BatchNorm2d(64, momentum= batchNorm_momentum)\r\n\r\n self.conv21 = nn.Conv2d(64, 128, kernel_size=3, padding=1)\r\n self.bn21 = nn.BatchNorm2d(128, momentum= batchNorm_momentum)\r\n self.conv22 = nn.Conv2d(128, 128, kernel_size=3, padding=1)\r\n self.bn22 = nn.BatchNorm2d(128, momentum= batchNorm_momentum)\r\n\r\n self.conv31 = nn.Conv2d(128, 256, kernel_size=3, padding=1)\r\n self.bn31 = nn.BatchNorm2d(256, momentum= batchNorm_momentum)\r\n self.conv32 = nn.Conv2d(256, 256, kernel_size=3, padding=1)\r\n self.bn32 = nn.BatchNorm2d(256, momentum= batchNorm_momentum)\r\n self.conv33 = nn.Conv2d(256, 256, kernel_size=3, padding=1)\r\n self.bn33 = nn.BatchNorm2d(256, momentum= batchNorm_momentum)\r\n\r\n self.conv41 = nn.Conv2d(256, 512, kernel_size=3, padding=1)\r\n self.bn41 = nn.BatchNorm2d(512, momentum= batchNorm_momentum)\r\n self.conv42 = nn.Conv2d(512, 512, kernel_size=3, padding=1)\r\n self.bn42 = nn.BatchNorm2d(512, momentum= batchNorm_momentum)\r\n self.conv43 = nn.Conv2d(512, 512, kernel_size=3, padding=1)\r\n self.bn43 = nn.BatchNorm2d(512, momentum= batchNorm_momentum)\r\n\r\n self.conv51 = nn.Conv2d(512, 512, kernel_size=3, padding=1)\r\n self.bn51 = nn.BatchNorm2d(512, momentum= batchNorm_momentum)\r\n self.conv52 = nn.Conv2d(512, 512, kernel_size=3, padding=1)\r\n self.bn52 = nn.BatchNorm2d(512, momentum= batchNorm_momentum)\r\n self.conv53 = nn.Conv2d(512, 512, kernel_size=3, padding=1)\r\n self.bn53 = nn.BatchNorm2d(512, momentum= batchNorm_momentum)\r\n\r\n self.conv53d = nn.Conv2d(512, 512, kernel_size=3, padding=1)\r\n self.bn53d = nn.BatchNorm2d(512, momentum= batchNorm_momentum)\r\n self.conv52d = nn.Conv2d(512, 512, kernel_size=3, padding=1)\r\n self.bn52d = nn.BatchNorm2d(512, momentum= batchNorm_momentum)\r\n self.conv51d = nn.Conv2d(512, 512, kernel_size=3, padding=1)\r\n self.bn51d = nn.BatchNorm2d(512, momentum= batchNorm_momentum)\r\n\r\n self.conv43d = nn.Conv2d(512, 512, kernel_size=3, padding=1)\r\n self.bn43d = nn.BatchNorm2d(512, momentum= batchNorm_momentum)\r\n self.conv42d = nn.Conv2d(512, 512, kernel_size=3, padding=1)\r\n self.bn42d = nn.BatchNorm2d(512, momentum= batchNorm_momentum)\r\n self.conv41d = nn.Conv2d(512, 256, kernel_size=3, padding=1)\r\n self.bn41d = nn.BatchNorm2d(256, momentum= batchNorm_momentum)\r\n\r\n self.conv33d = nn.Conv2d(256, 256, kernel_size=3, padding=1)\r\n self.bn33d = nn.BatchNorm2d(256, momentum= batchNorm_momentum)\r\n self.conv32d = nn.Conv2d(256, 256, kernel_size=3, padding=1)\r\n self.bn32d = nn.BatchNorm2d(256, momentum= batchNorm_momentum)\r\n self.conv31d = nn.Conv2d(256, 128, kernel_size=3, padding=1)\r\n self.bn31d = nn.BatchNorm2d(128, momentum= batchNorm_momentum)\r\n\r\n self.conv22d = nn.Conv2d(128, 128, kernel_size=3, padding=1)\r\n self.bn22d = nn.BatchNorm2d(128, momentum= batchNorm_momentum)\r\n self.conv21d = nn.Conv2d(128, 64, kernel_size=3, padding=1)\r\n self.bn21d = nn.BatchNorm2d(64, momentum= batchNorm_momentum)\r\n\r\n self.conv12d = nn.Conv2d(64, 64, kernel_size=3, padding=1)\r\n self.bn12d = nn.BatchNorm2d(64, momentum= batchNorm_momentum)\r\n self.conv11d = nn.Conv2d(64, self.config.nb_classes, kernel_size=3, padding=1)\r\n\r\n\r\n def forward(self, x):\r\n\r\n # Stage 1\r\n x11 = F.relu(self.bn11(self.conv11(x)))\r\n x12 = F.relu(self.bn12(self.conv12(x11)))\r\n x1p, id1 = F.max_pool2d(x12,kernel_size=2, stride=2,return_indices=True)\r\n\r\n # Stage 2\r\n x21 = F.relu(self.bn21(self.conv21(x1p)))\r\n x22 = F.relu(self.bn22(self.conv22(x21)))\r\n x2p, id2 = F.max_pool2d(x22,kernel_size=2, stride=2,return_indices=True)\r\n\r\n # Stage 3\r\n x31 = F.relu(self.bn31(self.conv31(x2p)))\r\n x32 = F.relu(self.bn32(self.conv32(x31)))\r\n x33 = F.relu(self.bn33(self.conv33(x32)))\r\n x3p, id3 = F.max_pool2d(x33,kernel_size=2, stride=2,return_indices=True)\r\n\r\n # Stage 4\r\n x41 = F.relu(self.bn41(self.conv41(x3p)))\r\n x42 = F.relu(self.bn42(self.conv42(x41)))\r\n x43 = F.relu(self.bn43(self.conv43(x42)))\r\n x4p, id4 = F.max_pool2d(x43,kernel_size=2, stride=2,return_indices=True)\r\n\r\n # Stage 5\r\n x51 = F.relu(self.bn51(self.conv51(x4p)))\r\n x52 = F.relu(self.bn52(self.conv52(x51)))\r\n x53 = F.relu(self.bn53(self.conv53(x52)))\r\n x5p, id5 = F.max_pool2d(x53,kernel_size=2, stride=2,return_indices=True)\r\n\r\n\r\n # Stage 5d\r\n x5d = F.max_unpool2d(x5p, id5, kernel_size=2, stride=2)\r\n x53d = F.relu(self.bn53d(self.conv53d(x5d)))\r\n x52d = F.relu(self.bn52d(self.conv52d(x53d)))\r\n x51d = F.relu(self.bn51d(self.conv51d(x52d)))\r\n\r\n # Stage 4d\r\n x4d = F.max_unpool2d(x51d, id4, kernel_size=2, stride=2)\r\n x43d = F.relu(self.bn43d(self.conv43d(x4d)))\r\n x42d = F.relu(self.bn42d(self.conv42d(x43d)))\r\n x41d = F.relu(self.bn41d(self.conv41d(x42d)))\r\n\r\n # Stage 3d\r\n x3d = F.max_unpool2d(x41d, id3, kernel_size=2, stride=2)\r\n x33d = F.relu(self.bn33d(self.conv33d(x3d)))\r\n x32d = F.relu(self.bn32d(self.conv32d(x33d)))\r\n x31d = F.relu(self.bn31d(self.conv31d(x32d)))\r\n\r\n # Stage 2d\r\n x2d = F.max_unpool2d(x31d, id2, kernel_size=2, stride=2)\r\n x22d = F.relu(self.bn22d(self.conv22d(x2d)))\r\n x21d = F.relu(self.bn21d(self.conv21d(x22d)))\r\n\r\n # Stage 1d\r\n x1d = F.max_unpool2d(x21d, id1, kernel_size=2, stride=2)\r\n x12d = F.relu(self.bn12d(self.conv12d(x1d)))\r\n x11d = self.conv11d(x12d)\r\n\r\n return x11d\r\n\r\n def load_from_segnet(self, model_path):\r\n s_dict = self.state_dict()# create a copy of the state dict\r\n th = torch.load(model_path).state_dict() # load the weigths\r\n # for name in th:\r\n # s_dict[corresp_name[name]] = th[name]\r\n self.load_state_dict(th)" ]
[ [ "torch.load", "torch.nn.Conv2d", "torch.nn.BatchNorm2d", "torch.nn.functional.max_pool2d", "torch.nn.functional.max_unpool2d" ] ]
jkkummerfeld/coref-ee
[ "594075470e9331ee1c40d611854b7546c735f668" ]
[ "ps.py" ]
[ "#!/usr/bin/env python\n\nimport os\n\nimport tensorflow as tf\nimport util\n\nif __name__ == \"__main__\":\n args = util.get_args()\n config = util.initialize_from_env(args.experiment, args.logdir)\n report_frequency = config[\"report_frequency\"]\n cluster_config = util.get_cluster_config()\n util.set_gpus()\n cluster = tf.train.ClusterSpec(cluster_config[\"addresses\"])\n server = tf.train.Server(cluster, job_name=\"ps\", task_index=0)\n server.join()\n" ]
[ [ "tensorflow.train.Server", "tensorflow.train.ClusterSpec" ] ]
Usama0121/PronouncUR
[ "3583a8498ae9a9f87bcdbf2b2ce45b2ad61e6c8b" ]
[ "g2p.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport os\nimport time\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.core.protobuf import saver_pb2\n\nimport data_utils\nimport seq2seq_model\n\nfrom six.moves import xrange, input\nfrom six import text_type\n\nclass G2PModel(object):\n \"\"\"Grapheme-to-Phoneme translation model class.\n\n Constructor parameters (for training mode only):\n train_lines: Train dictionary;\n valid_lines: Development dictionary;\n test_lines: Test dictionary.\n\n Attributes:\n gr_vocab: Grapheme vocabulary;\n ph_vocab: Phoneme vocabulary;\n train_set: Training buckets: words and sounds are mapped to ids;\n valid_set: Validation buckets: words and sounds are mapped to ids;\n session: Tensorflow session;\n model: Tensorflow Seq2Seq model for G2PModel object.\n train: Train method.\n interactive: Interactive decode method;\n evaluate: Word-Error-Rate counting method;\n decode: Decode file method.\n \"\"\"\n # We use a number of buckets and pad to the closest one for efficiency.\n # See seq2seq_model.Seq2SeqModel for details of how they work.\n _BUCKETS = [(5, 10), (10, 15), (40, 50)]\n\n def __init__(self, model_dir):\n \"\"\"Initialize model directory.\"\"\"\n self.model_dir = model_dir\n\n def load_decode_model(self):\n \"\"\"Load G2P model and initialize or load parameters in session.\"\"\"\n if not os.path.exists(os.path.join(self.model_dir, 'checkpoint')):\n raise RuntimeError(\"Model not found in %s\" % self.model_dir)\n\n self.batch_size = 1 # We decode one word at a time.\n #Load model parameters.\n num_layers, size = data_utils.load_params(self.model_dir)\n # Load vocabularies\n print(\"Loading vocabularies from %s\" % self.model_dir)\n self.gr_vocab = data_utils.load_vocabulary(os.path.join(self.model_dir,\n \"vocab.grapheme\"))\n self.ph_vocab = data_utils.load_vocabulary(os.path.join(self.model_dir,\n \"vocab.phoneme\"))\n\n self.rev_ph_vocab =\\\n data_utils.load_vocabulary(os.path.join(self.model_dir, \"vocab.phoneme\"),\n reverse=True)\n\n self.session = tf.Session()\n\n # Restore model.\n print(\"Creating %d layers of %d units.\" % (num_layers, size))\n self.model = seq2seq_model.Seq2SeqModel(len(self.gr_vocab),\n len(self.ph_vocab), self._BUCKETS,\n size, num_layers, 0,\n self.batch_size, 0, 0,\n forward_only=True)\n self.model.saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)\n # Check for saved models and restore them.\n print(\"Reading model parameters from %s\" % self.model_dir)\n self.model.saver.restore(self.session, os.path.join(self.model_dir,\n \"model\"))\n\n\n def __put_into_buckets(self, source, target):\n \"\"\"Put data from source and target into buckets.\n\n Args:\n source: data with ids for graphemes;\n target: data with ids for phonemes;\n it must be aligned with the source data: n-th line contains the desired\n output for n-th line from the source.\n\n Returns:\n data_set: a list of length len(_BUCKETS); data_set[n] contains a list of\n (source, target) pairs read from the provided data that fit\n into the n-th bucket, i.e., such that len(source) < _BUCKETS[n][0] and\n len(target) < _BUCKETS[n][1]; source and target are lists of ids.\n \"\"\"\n\n # By default unk to unk\n data_set = [[[[4], [4]]] for _ in self._BUCKETS]\n\n for source_ids, target_ids in zip(source, target):\n target_ids.append(data_utils.EOS_ID)\n for bucket_id, (source_size, target_size) in enumerate(self._BUCKETS):\n if len(source_ids) < source_size and len(target_ids) < target_size:\n data_set[bucket_id].append([source_ids, target_ids])\n break\n return data_set\n\n\n def prepare_data(self, train_path, valid_path, test_path):\n \"\"\"Prepare train/validation/test sets. Create or load vocabularies.\"\"\"\n # Prepare data.\n print(\"Preparing G2P data\")\n train_gr_ids, train_ph_ids, valid_gr_ids, valid_ph_ids, self.gr_vocab,\\\n self.ph_vocab, self.test_lines =\\\n data_utils.prepare_g2p_data(self.model_dir, train_path, valid_path,\n test_path)\n # Read data into buckets and compute their sizes.\n print (\"Reading development and training data.\")\n self.valid_set = self.__put_into_buckets(valid_gr_ids, valid_ph_ids)\n self.train_set = self.__put_into_buckets(train_gr_ids, train_ph_ids)\n\n self.rev_ph_vocab = dict([(x, y) for (y, x) in enumerate(self.ph_vocab)])\n\n\n def __prepare_model(self, params):\n \"\"\"Prepare G2P model for training.\"\"\"\n\n self.params = params\n\n self.session = tf.Session()\n\n # Prepare model.\n print(\"Creating model with parameters:\")\n print(params)\n self.model = seq2seq_model.Seq2SeqModel(len(self.gr_vocab),\n len(self.ph_vocab), self._BUCKETS,\n self.params.size,\n self.params.num_layers,\n self.params.max_gradient_norm,\n self.params.batch_size,\n self.params.learning_rate,\n self.params.lr_decay_factor,\n forward_only=False,\n optimizer=self.params.optimizer)\n self.model.saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)\n\n\n def load_train_model(self, params):\n \"\"\"Load G2P model for continuing train.\"\"\"\n # Check for saved model.\n if not os.path.exists(os.path.join(self.model_dir, 'checkpoint')):\n raise RuntimeError(\"Model not found in %s\" % self.model_dir)\n\n # Load model parameters.\n params.num_layers, params.size = data_utils.load_params(self.model_dir)\n\n # Prepare data and G2P Model.\n self.__prepare_model(params)\n\n # Restore model.\n print(\"Reading model parameters from %s\" % self.model_dir)\n self.model.saver.restore(self.session, os.path.join(self.model_dir,\n \"model\"))\n\n\n def create_train_model(self, params):\n \"\"\"Create G2P model for train from scratch.\"\"\"\n # Save model parameters.\n data_utils.save_params(params.num_layers, params.size, self.model_dir)\n\n # Prepare data and G2P Model\n self.__prepare_model(params)\n\n print(\"Created model with fresh parameters.\")\n self.session.run(tf.global_variables_initializer())\n\n\n def train(self):\n \"\"\"Train a gr->ph translation model using G2P data.\"\"\"\n\n train_bucket_sizes = [len(self.train_set[b])\n for b in xrange(len(self._BUCKETS))]\n train_total_size = float(sum(train_bucket_sizes))\n # A bucket scale is a list of increasing numbers from 0 to 1 that we'll use\n # to select a bucket. Length of [scale[i], scale[i+1]] is proportional to\n # the size if i-th training bucket, as used later.\n train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size\n for i in xrange(len(train_bucket_sizes))]\n\n # This is the training loop.\n step_time, train_loss = 0.0, 0.0\n current_step, num_iter_wo_improve = 0, 0\n prev_train_losses, prev_valid_losses = [], []\n num_iter_cover_train = int(sum(train_bucket_sizes) /\n self.params.batch_size /\n self.params.steps_per_checkpoint)\n while (self.params.max_steps == 0\n or self.model.global_step.eval(self.session)\n <= self.params.max_steps):\n # Get a batch and make a step.\n start_time = time.time()\n step_loss = self.__calc_step_loss(train_buckets_scale)\n step_time += (time.time() - start_time) / self.params.steps_per_checkpoint\n train_loss += step_loss / self.params.steps_per_checkpoint\n current_step += 1\n\n # Once in a while, we save checkpoint, print statistics, and run evals.\n if current_step % self.params.steps_per_checkpoint == 0:\n # Print statistics for the previous steps.\n train_ppx = math.exp(train_loss) if train_loss < 300 else float('inf')\n print (\"global step %d learning rate %.4f step-time %.2f perplexity \"\n \"%.2f\" % (self.model.global_step.eval(self.session),\n self.model.learning_rate.eval(self.session),\n step_time, train_ppx))\n eval_loss = self.__calc_eval_loss()\n eval_ppx = math.exp(eval_loss) if eval_loss < 300 else float('inf')\n print(\" eval: perplexity %.2f\" % (eval_ppx))\n # Decrease learning rate if no improvement was seen on train set\n # over last 3 times.\n if (len(prev_train_losses) > 2\n and train_loss > max(prev_train_losses[-3:])):\n self.session.run(self.model.learning_rate_decay_op)\n\n if (len(prev_valid_losses) > 0\n and eval_loss <= min(prev_valid_losses)):\n # Save checkpoint and zero timer and loss.\n self.model.saver.save(self.session,\n os.path.join(self.model_dir, \"model\"),\n write_meta_graph=False)\n\n if (len(prev_valid_losses) > 0\n and eval_loss >= min(prev_valid_losses)):\n num_iter_wo_improve += 1\n else:\n num_iter_wo_improve = 0\n\n if num_iter_wo_improve > num_iter_cover_train * 2:\n print(\"No improvement over last %d times. Training will stop after %d\"\n \"iterations if no improvement was seen.\"\n % (num_iter_wo_improve,\n num_iter_cover_train - num_iter_wo_improve))\n\n # Stop train if no improvement was seen on validation set\n # over last 3 epochs.\n if num_iter_wo_improve > num_iter_cover_train * 3:\n break\n\n prev_train_losses.append(train_loss)\n prev_valid_losses.append(eval_loss)\n step_time, train_loss = 0.0, 0.0\n\n print('Training done.')\n with tf.Graph().as_default():\n g2p_model_eval = G2PModel(self.model_dir)\n g2p_model_eval.load_decode_model()\n g2p_model_eval.evaluate(self.test_lines)\n\n\n def __calc_step_loss(self, train_buckets_scale):\n \"\"\"Choose a bucket according to data distribution. We pick a random number\n in [0, 1] and use the corresponding interval in train_buckets_scale.\n \"\"\"\n random_number_01 = np.random.random_sample()\n bucket_id = min([i for i in xrange(len(train_buckets_scale))\n if train_buckets_scale[i] > random_number_01])\n\n # Get a batch and make a step.\n encoder_inputs, decoder_inputs, target_weights = self.model.get_batch(\n self.train_set, bucket_id)\n _, step_loss, _ = self.model.step(self.session, encoder_inputs,\n decoder_inputs, target_weights,\n bucket_id, False)\n return step_loss\n\n\n def __calc_eval_loss(self):\n \"\"\"Run evals on development set and print their perplexity.\n \"\"\"\n eval_loss, num_iter_total = 0.0, 0.0\n for bucket_id in xrange(len(self._BUCKETS)):\n num_iter_cover_valid = int(math.ceil(len(self.valid_set[bucket_id])/\n self.params.batch_size))\n num_iter_total += num_iter_cover_valid\n for batch_id in xrange(num_iter_cover_valid):\n encoder_inputs, decoder_inputs, target_weights =\\\n self.model.get_eval_set_batch(self.valid_set, bucket_id,\n batch_id * self.params.batch_size)\n _, eval_batch_loss, _ = self.model.step(self.session, encoder_inputs,\n decoder_inputs, target_weights,\n bucket_id, True)\n eval_loss += eval_batch_loss\n eval_loss = eval_loss/num_iter_total if num_iter_total > 0 else float('inf')\n return eval_loss\n\n\n def decode_word(self, word):\n \"\"\"Decode input word to sequence of phonemes.\n\n Args:\n word: input word;\n\n Returns:\n phonemes: decoded phoneme sequence for input word;\n \"\"\"\n # Check if all graphemes attended in vocabulary\n gr_absent = [gr for gr in word if gr not in self.gr_vocab]\n if gr_absent:\n print(\"Symbols '%s' are not in vocabulary\" % \"','\".join(gr_absent).encode('utf-8'))\n return \"\"\n\n # Get token-ids for the input word.\n token_ids = [self.gr_vocab.get(s, data_utils.UNK_ID) for s in word]\n # Which bucket does it belong to?\n bucket_id = min([b for b in xrange(len(self._BUCKETS))\n if self._BUCKETS[b][0] > len(token_ids)])\n # Get a 1-element batch to feed the word to the model.\n encoder_inputs, decoder_inputs, target_weights = self.model.get_batch(\n {bucket_id: [(token_ids, [])]}, bucket_id)\n # Get output logits for the word.\n _, _, output_logits = self.model.step(self.session, encoder_inputs,\n decoder_inputs, target_weights,\n bucket_id, True)\n # This is a greedy decoder - outputs are just argmaxes of output_logits.\n outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]\n # If there is an EOS symbol in outputs, cut them at that point.\n if data_utils.EOS_ID in outputs:\n outputs = outputs[:outputs.index(data_utils.EOS_ID)]\n # Phoneme sequence corresponding to outputs.\n return \" \".join([self.rev_ph_vocab[output] for output in outputs])\n\n\n def interactive(self):\n \"\"\"Decode word from standard input.\n \"\"\"\n while True:\n try:\n word = input(\"> \")\n if not issubclass(type(word), text_type):\n word = text_type(word, encoding='utf-8', errors='replace')\n except EOFError:\n break\n if not word:\n break\n print(self.decode_word(word))\n\n\n def calc_error(self, dictionary):\n \"\"\"Calculate a number of prediction errors.\n \"\"\"\n errors = 0\n for word, pronunciations in dictionary.items():\n hyp = self.decode_word(word)\n if hyp not in pronunciations:\n errors += 1\n return errors\n\n\n def evaluate(self, test_lines):\n \"\"\"Calculate and print out word error rate (WER) and Accuracy\n on test sample.\n\n Args:\n test_lines: List of test dictionary. Each element of list must be String\n containing word and its pronounciation (e.g., \"word W ER D\");\n \"\"\"\n test_dic = data_utils.collect_pronunciations(test_lines)\n\n if len(test_dic) < 1:\n print(\"Test dictionary is empty\")\n return\n\n print('Beginning calculation word error rate (WER) on test sample.')\n errors = self.calc_error(test_dic)\n\n print(\"Words: %d\" % len(test_dic))\n print(\"Errors: %d\" % errors)\n print(\"WER: %.3f\" % (float(errors)/len(test_dic)))\n print(\"Accuracy: %.3f\" % float(1-(errors/len(test_dic))))\n\n\n def decode(self, decode_lines, output_file=None):\n \"\"\"Decode words from file.\n\n Returns:\n if [--output output_file] pointed out, write decoded word sequences in\n this file. Otherwise, print decoded words in standard output.\n \"\"\"\n phoneme_lines = []\n\n # Decode from input file.\n if output_file:\n for word in decode_lines:\n word = word.strip()\n phonemes = self.decode_word(word)\n output_file.write(word)\n output_file.write(' ')\n output_file.write(phonemes)\n output_file.write('\\n')\n phoneme_lines.append(phonemes)\n output_file.close()\n else:\n for word in decode_lines:\n word = word.strip()\n phonemes = self.decode_word(word)\n phoneme_lines.append(phonemes)\n return phoneme_lines\n" ]
[ [ "tensorflow.Graph", "tensorflow.global_variables", "numpy.random.random_sample", "tensorflow.global_variables_initializer", "numpy.argmax", "tensorflow.Session" ] ]
eanemo/MedicalZooPytorch
[ "4996894818fad230532d42dfd590e914c36a0c53" ]
[ "lib/visual3D_temp/BaseWriter.py" ]
[ "import os\nimport numpy as np\nfrom torch.utils.tensorboard import SummaryWriter\n\nimport lib.utils as utils\n\ndict_class_names = {\"iseg2017\": [\"Air\", \"CSF\", \"GM\", \"WM\"],\n \"iseg2019\": [\"Air\", \"CSF\", \"GM\", \"WM\"],\n \"mrbrains4\": [\"Air\", \"CSF\", \"GM\", \"WM\"],\n \"mrbrains9\": [\"Background\", \"Cort.GM\", \"BS\", \"WM\", \"WML\", \"CSF\",\n \"Ventr.\", \"Cerebellum\", \"stem\"],\n \"brats2018\": [\"Background\", \"NCR/NET\", \"ED\", \"ET\"],\n \"brats2019\": [\"Background\", \"NCR\", \"ED\", \"NET\", \"ET\"],\n \"brats2020\": [\"Background\", \"NCR/NET\", \"ED\", \"ET\"],\n \"covid_seg\": [\"c1\", \"c2\", \"c3\"],\n \"miccai2019\": [\"c1\", \"c2\", \"c3\", \"c4\", \"c5\", \"c6\", \"c7\"],\n \"mandible_seg\" : [\"Background\", \"Mandible\"]\n }\n\n\nclass TensorboardWriter():\n\n def __init__(self, args):\n\n name_model = args.log_dir + args.model + \"_\" + args.dataset_name + \"_\" + utils.datestr()\n self.writer = SummaryWriter(log_dir=args.log_dir + name_model, comment=name_model)\n\n utils.make_dirs(args.save)\n self.csv_train, self.csv_val = self.create_stats_files(args.save)\n self.dataset_name = args.dataset_name\n self.classes = args.classes\n self.label_names = dict_class_names[args.dataset_name]\n\n self.data = self.create_data_structure()\n\n def create_data_structure(self, ):\n data = {\"train\": dict((label, 0.0) for label in self.label_names),\n \"val\": dict((label, 0.0) for label in self.label_names)}\n data['train']['loss'] = 0.0\n data['val']['loss'] = 0.0\n data['train']['count'] = 1.0\n data['val']['count'] = 1.0\n data['train']['dsc'] = 0.0\n data['val']['dsc'] = 0.0\n return data\n\n def display_terminal(self, iter, epoch, mode='train', summary=False):\n \"\"\"\n\n :param iter: iteration or partial epoch\n :param epoch: epoch of training\n :param loss: any loss numpy\n :param mode: train or val ( for training and validation)\n :param summary: to print total statistics at the end of epoch\n \"\"\"\n if summary:\n info_print = \"\\nSummary {} Epoch {:2d}: Loss:{:.4f} \\t DSC:{:.4f} \".format(mode, epoch,\n self.data[mode]['loss'] /\n self.data[mode]['count'],\n self.data[mode]['dsc'] /\n self.data[mode]['count'])\n\n for i in range(len(self.label_names)):\n info_print += \"\\t{} : {:.4f}\".format(self.label_names[i],\n self.data[mode][self.label_names[i]] / self.data[mode]['count'])\n\n print(info_print)\n else:\n\n info_print = \"\\nEpoch: {:.2f} Loss:{:.4f} \\t DSC:{:.4f}\".format(iter, self.data[mode]['loss'] /\n self.data[mode]['count'],\n self.data[mode]['dsc'] /\n self.data[mode]['count'])\n\n for i in range(len(self.label_names)):\n info_print += \"\\t{}:{:.4f}\".format(self.label_names[i],\n self.data[mode][self.label_names[i]] / self.data[mode]['count'])\n print(info_print)\n\n def create_stats_files(self, path):\n train_f = open(os.path.join(path, 'train.csv'), 'w')\n val_f = open(os.path.join(path, 'val.csv'), 'w')\n return train_f, val_f\n\n def reset(self, mode):\n self.data[mode]['dsc'] = 0.0\n self.data[mode]['loss'] = 0.0\n self.data[mode]['count'] = 1\n for i in range(len(self.label_names)):\n self.data[mode][self.label_names[i]] = 0.0\n\n def update_scores(self, iter, loss, channel_score, mode, writer_step):\n \"\"\"\n :param iter: iteration or partial epoch\n :param loss: any loss torch.tensor.item()\n :param channel_score: per channel score or dice coef\n :param mode: train or val ( for training and validation)\n :param writer_step: tensorboard writer step\n \"\"\"\n # WARNING ASSUMING THAT CHANNELS IN SAME ORDER AS DICTIONARY\n\n dice_coeff = np.mean(channel_score) * 100\n\n num_channels = len(channel_score)\n self.data[mode]['dsc'] += dice_coeff\n self.data[mode]['loss'] += loss\n self.data[mode]['count'] = iter + 1\n\n for i in range(num_channels):\n self.data[mode][self.label_names[i]] += channel_score[i]\n if self.writer is not None:\n self.writer.add_scalar(mode + '/' + self.label_names[i], channel_score[i], global_step=writer_step)\n\n def write_end_of_epoch(self, epoch):\n\n self.writer.add_scalars('DSC/', {'train': self.data['train']['dsc'] / self.data['train']['count'],\n 'val': self.data['val']['dsc'] / self.data['val']['count'],\n }, epoch)\n self.writer.add_scalars('Loss/', {'train': self.data['train']['loss'] / self.data['train']['count'],\n 'val': self.data['val']['loss'] / self.data['val']['count'],\n }, epoch)\n for i in range(len(self.label_names)):\n self.writer.add_scalars(self.label_names[i],\n {'train': self.data['train'][self.label_names[i]] / self.data['train']['count'],\n 'val': self.data['val'][self.label_names[i]] / self.data['train']['count'],\n }, epoch)\n\n train_csv_line = 'Epoch:{:2d} Loss:{:.4f} DSC:{:.4f}'.format(epoch,\n self.data['train']['loss'] / self.data['train'][\n 'count'],\n self.data['train']['dsc'] / self.data['train'][\n 'count'])\n val_csv_line = 'Epoch:{:2d} Loss:{:.4f} DSC:{:.4f}'.format(epoch,\n self.data['val']['loss'] / self.data['val'][\n 'count'],\n self.data['val']['dsc'] / self.data['val'][\n 'count'])\n self.csv_train.write(train_csv_line + '\\n')\n self.csv_val.write(val_csv_line + '\\n')\n" ]
[ [ "numpy.mean", "torch.utils.tensorboard.SummaryWriter" ] ]
patrickywu/PartisanAssociations
[ "f28f3afefb75e87b6dd2909def5031a40f03b40b" ]
[ "partisanassociations/utils.py" ]
[ "import pandas as pd\nimport string\nimport gensim\nfrom gensim.parsing.preprocessing import preprocess_string\nfrom gensim.models.doc2vec import TaggedDocument\n\ndef data_reader(data, id_var, description_var, drop_missing=True):\n id = data[id_var].values\n descriptions = data[description_var].values\n data_frame = {'id': id, 'description': descriptions}\n df = pd.DataFrame(data_frame)\n if drop_missing:\n df = df.dropna()\n df.reset_index(inplace=True, drop=True)\n return df\n\ndef preprocess_text(df, description_var, filters=None, printable_only=True, remove_empty_list_observations=True):\n tokens = []\n printable = set(string.printable)\n if filters is None:\n for i in range(len(df[description_var])):\n if printable_only:\n desc = ''.join(filter(lambda x: x in printable, str(df[description_var][i])))\n else:\n desc = str(df[description_var][i])\n tokens.append(preprocess_string(desc))\n else:\n for i in range(len(df[description_var])):\n if printable_only:\n desc = ''.join(filter(lambda x: x in printable, str(df[description_var][i])))\n else:\n desc = str(df[description_var][i])\n tokens.append(preprocess_string(desc, filters=filters))\n df['tokens'] = tokens\n if remove_empty_list_observations:\n df = df[~df.tokens.str.len().eq(0)]\n df.reset_index(inplace=True, drop=True)\n return df\n\n# Assumes df has a variable named \"tokens\" and \"id\"\ndef TweetDocument(df):\n descriptions = [TaggedDocument(words=df.tokens[i], tags=[str(df.id[i])]) for i in range(len(df.tokens))]\n return descriptions\n" ]
[ [ "pandas.DataFrame" ] ]
MariosKef/RULe
[ "4cf4563d1abcf350de9958a0de26d2d0126c819d" ]
[ "mipego_multi/mipego.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 6 15:05:01 2017\n\n@author: wangronin\n@email: [email protected]\n\n\"\"\"\nfrom __future__ import division\nfrom __future__ import print_function\n\n# import pdb\nimport dill, functools, itertools, copyreg, logging\nimport numpy as np\n\nimport GPUtil as gp\n\nimport queue\nimport threading\nimport time\nimport copy\n\nimport json # to save and load data\n\n\nfrom joblib import Parallel, delayed\nfrom scipy.optimize import fmin_l_bfgs_b\nfrom sklearn.metrics import r2_score\n\nfrom .InfillCriteria import EI, PI, MGFI, HVI, MONTECARLO\nfrom .optimizer import mies\nfrom .utils import proportional_selection\n\nfrom .Bi_Objective import * # added the Bi_Objective code by Christiaan Lamers\n\n# TODO: remove the usage of pandas here change it to customized np.ndarray\n# TODO: finalize the logging system\nclass Solution(np.ndarray):\n def __new__(\n cls, x, fitness=None, n_eval=0, index=None, var_name=None, loss=None, time=None\n ):\n obj = np.asarray(x, dtype=\"object\").view(cls)\n obj.fitness = fitness\n obj.loss = loss # CHRIS: added loss and time here for bi-objective\n obj.time = time\n obj.n_eval = n_eval\n obj.index = index\n obj.var_name = var_name\n return obj\n\n def __array_finalize__(self, obj):\n if obj is None:\n return\n # Needed for array slicing\n self.fitness = getattr(obj, \"fitness\", None)\n self.loss = getattr(obj, \"loss\", None) # added loss and time here\n self.time = getattr(obj, \"time\", None)\n self.n_eval = getattr(obj, \"n_eval\", None)\n self.index = getattr(obj, \"index\", None)\n self.var_name = getattr(obj, \"var_name\", None)\n\n def to_dict(self):\n if self.var_name is None:\n return\n return {k: self[i] for i, k in enumerate(self.var_name)}\n\n def __str__(self):\n return self.to_dict()\n\n\nclass mipego(object):\n \"\"\"\n Generic Bayesian optimization algorithm\n \"\"\"\n\n def __init__(\n self,\n search_space,\n obj_func,\n surrogate,\n second_surrogate=None,\n ftarget=None,\n minimize=True,\n noisy=False,\n max_eval=None,\n infill=\"MGFI\",\n t0=2,\n tf=1e-1,\n schedule=None,\n n_init_sample=None,\n n_point=1,\n n_job=1,\n backend=\"multiprocessing\",\n n_restart=None,\n max_infill_eval=None,\n wait_iter=3,\n optimizer=\"MIES\",\n log_file=None,\n data_file=None,\n verbose=False,\n random_seed=None,\n available_gpus=[],\n bi_objective=False,\n ref_time=3000.0,\n ref_loss=3.0,\n hvi_alpha=0.1,\n ignore_gpu=[],\n **obj_func_params,\n ):\n \"\"\"\n parameter\n ---------\n search_space : instance of SearchSpace type\n obj_func : callable,\n the objective function to optimize\n surrogate: surrogate model, currently support either GPR or random forest\n second_surrogate: second surrogate model for bi-objective optimization.\n minimize : bool,\n minimize or maximize\n noisy : bool,\n is the objective stochastic or not?\n max_eval : int,\n maximal number of evaluations on the objective function\n infill: string\n infill criterion used in optimization\n For bi-objective this should be set to HVI (hyper-volume indicator)\n Other options are MGFI, IE, and PI\n <TODO> add sanity check for this.\n t0=2, tf=1e-1, schedule=None\n Temperature parameters for the MGFI infill criterion.\n n_init_sample : int,\n the size of inital Design of Experiment (DoE),\n default: 20 * dim\n n_point : int,\n the number of candidate solutions proposed using infill-criteria,\n default : 1\n n_job : int,\n the number of jobs scheduled for parallelizing the evaluation.\n Only Effective when n_point > 1\n backend : str,\n the parallelization backend, supporting: 'multiprocessing', 'MPI', 'SPARC'\n optimizer: str,\n the optimization algorithm for infill-criteria,\n supported options: 'MIES' (Mixed-Integer Evolution Strategy),\n 'BFGS' (quasi-Newtion for GPR)\n available_gpus: array:\n one dimensional array of GPU numbers to use for running on GPUs in parallel. Defaults to no gpus.\n bi_objective: boolean:\n False means one objective optimization (one surrogate), True: two-objective problem (two surrogates)\n ref_time: float\n reference value for the second objective (only used in bi-objective optimization)\n ref_loss: float\n reference value for the first objective (only used in bi-objective optimization)\n hvi_alpha\n allows variable lower confidence interval, only used in bi-objective optimization\n ignore_gpus: list\n list of GPU's that should not be used\n\n obj_func_params: dictionary\n additional variables to be passed to the objective function\n\n \"\"\"\n self.verbose = verbose\n self.log_file = log_file\n self.data_file = data_file\n self._space = search_space\n self.var_names = self._space.var_name.tolist()\n self.obj_func = obj_func\n self.noisy = noisy\n self.time_surrogate = second_surrogate\n self.loss_surrogate = surrogate\n self.async_time_surrogates = {}\n self.async_loss_surrogates = {}\n self.all_time_r2 = []\n self.all_loss_r2 = []\n self.n_point = n_point\n self.n_jobs = n_job\n self.available_gpus = available_gpus\n self._parallel_backend = backend\n self.ftarget = ftarget\n self.infill = infill\n self.minimize = minimize\n self.max_iter = np.inf\n self.dim = len(self._space)\n self._best = min if self.minimize else max\n self.ignore_gpu = ignore_gpu\n self.surr_time_fit_hist = []\n self.surr_time_mies_hist = []\n self.surr_loss_fit_hist = []\n self.surr_loss_mies_hist = []\n self.time_between_gpu_hist = (\n []\n ) # time in gpuworker() that a network is not trained on a gpu\n self.obj_func_params = obj_func_params\n\n self.bi = bi_objective\n self.hvi_alpha = hvi_alpha # allows variable lower confidence interval\n\n self.r_index = self._space.id_C # index of continuous variable\n self.i_index = self._space.id_O # index of integer variable\n self.d_index = self._space.id_N # index of categorical variable\n\n self.param_type = self._space.var_type\n self.N_r = len(self.r_index)\n self.N_i = len(self.i_index)\n self.N_d = len(self.d_index)\n\n # parameter: objective evaluation\n # TODO: for noisy objective function, maybe increase the initial evaluations\n self.init_n_eval = 1\n self.max_eval = int(max_eval) if max_eval else np.inf\n self.n_left = (\n int(max_eval) if max_eval else np.inf\n ) # counts number of iterations left\n self.n_init_sample = (\n self.dim * 20 if n_init_sample is None else int(n_init_sample)\n )\n self_eval_hist = [] # TODO remove this and make it work\n self.eval_hist_time = [] # added time and loss history\n self.eval_hist_loss = []\n self.eval_hist_id = []\n self.iter_count = 0\n self.eval_count = 0\n self.ref_time = ref_time\n self.ref_loss = ref_loss\n # self.max_iter = self.max_eval - self.n_init_sample # Marios\n\n # setting up cooling schedule\n if self.infill == \"MGFI\":\n self.t0 = t0\n self.tf = tf\n self.t = t0\n self.schedule = schedule\n\n # TODO: find a nicer way to integrate this part\n # cooling down to 1e-1\n max_iter = self.max_eval - self.n_init_sample\n if self.schedule == \"exp\": # exponential\n self.alpha = (self.tf / t0) ** (1.0 / max_iter)\n elif self.schedule == \"linear\":\n self.eta = (t0 - self.tf) / max_iter # linear\n elif self.schedule == \"log\":\n self.c = self.tf * np.log(max_iter + 1) # logarithmic\n elif self.schedule == \"self-adaptive\":\n raise NotImplementedError\n\n # paramter: acquisition function optimziation\n mask = np.nonzero(self._space.C_mask | self._space.O_mask)[0]\n self._bounds = np.array(\n [self._space.bounds[i] for i in mask]\n ) # bounds for continuous and integer variable\n # self._levels = list(self._space.levels.values())\n self._levels = np.array(\n [self._space.bounds[i] for i in self._space.id_N]\n ) # levels for discrete variable\n self._optimizer = optimizer\n # TODO: set this number smaller when using L-BFGS and larger for MIES\n self._max_eval = (\n int(5e2 * self.dim) if max_infill_eval is None else max_infill_eval\n )\n self._random_start = int(5 * self.dim) if n_restart is None else n_restart\n self._wait_iter = int(\n wait_iter\n ) # maximal restarts when optimal value does not change\n\n # Intensify: the number of potential configuations compared against the current best\n # self.mu = int(np.ceil(self.n_init_sample / 3))\n self.mu = 3\n\n # stop criteria\n self.stop_dict = {}\n self.hist_f = []\n self._check_params()\n\n # set the random seed\n self.random_seed = random_seed\n if self.random_seed:\n np.random.seed(self.random_seed)\n\n self._get_logger(self.log_file)\n\n # allows for pickling the objective function\n copyreg.pickle(self._eval_one, dill.pickles)\n copyreg.pickle(self.obj_func, dill.pickles)\n\n # paralellize gpus\n self.init_gpus = True\n self.evaluation_queue = queue.Queue()\n\n def _get_logger(self, logfile):\n \"\"\"\n When logfile is None, no records are written\n \"\"\"\n self.logger = logging.getLogger(self.__class__.__name__)\n self.logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\n \"- %(asctime)s [%(levelname)s] -- \" \"[- %(process)d - %(name)s] %(message)s\"\n )\n\n # create console handler and set level to warning\n ch = logging.StreamHandler()\n ch.setLevel(logging.WARNING)\n ch.setFormatter(formatter)\n self.logger.addHandler(ch)\n\n # create file handler and set level to debug\n if logfile is not None:\n fh = logging.FileHandler(logfile)\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(formatter)\n self.logger.addHandler(fh)\n\n def _compare(self, f1, f2):\n \"\"\"\n Test if perf1 is better than perf2\n \"\"\"\n if self.minimize:\n return f1 < f2\n else:\n return f2 > f2\n\n def _remove_duplicate(self, data):\n \"\"\"\n check for the duplicated solutions, as it is not allowed\n for noiseless objective functions\n \"\"\"\n ans = []\n X = np.array([s.tolist() for s in self.data], dtype=\"object\")\n for i, x in enumerate(data):\n CON = np.all(\n np.isclose(\n np.asarray(X[:, self.r_index], dtype=\"float\"),\n np.asarray(x[self.r_index], dtype=\"float\"),\n ),\n axis=1,\n )\n INT = np.all(X[:, self.i_index] == x[self.i_index], axis=1)\n CAT = np.all(X[:, self.d_index] == x[self.d_index], axis=1)\n if not any(CON & INT & CAT):\n ans.append(x)\n return ans\n\n def _eval_gpu(self, x, gpu=6, runs=1): # changed gpu=0 to gpu=1\n \"\"\"\n evaluate one solution\n \"\"\"\n\n # TODO: sometimes the obj_func take a dictionary as input...\n time_, loss_, n_eval = x.time, x.loss, x.n_eval\n # try:\n # ans = [self.obj_func(x.tolist()) for i in range(runs)]\n # except:\n # ans = [self.obj_func(x.to_dict(), gpu_no=gpu) for i in range(runs)]\n gpu_patch = gpu\n try_count = 0\n while try_count < 5:\n ans = self.obj_func(x.to_dict(), gpu_no=gpu_patch, **self.obj_func_params)\n self.logger.info(\n f\"ans for config {x.to_dict()} on gpu {gpu_patch} is: {ans} \"\n )\n\n time_ans, loss_ans, success = ans[0], ans[1], ans[2]\n try_count += 1\n if success:\n break\n else:\n try_gpu_count = 0\n while try_gpu_count < 5:\n try_gpu_count += 1\n print(\n \"gpu \"\n + str(gpu_patch)\n + \" failed to give answer, searching for new gpu\"\n )\n available_gpus_patch = gp.getAvailable(limit=20)\n for i in range(len(self.ignore_gpu)):\n try:\n available_gpus_patch.remove(self.ignore_gpu[i])\n except:\n pass\n if len(available_gpus_patch) > 0:\n gpu_patch = np.random.choice(available_gpus_patch)\n break\n else:\n print(\"no gpus available, waiting 60 seconds\")\n time.sleep(60)\n\n time_loc = np.sum(time_ans)\n loss_loc = np.sum(loss_ans)\n\n x.n_eval += runs\n x.time = (\n time_loc / runs if time_ is None else (time_ * n_eval + time_loc) / x.n_eval\n )\n x.loss = (\n loss_loc / runs if loss_ is None else (loss_ * n_eval + loss_loc) / x.n_eval\n )\n\n self.eval_hist_loss += [loss_ans] # added time and loss history\n self.eval_hist_time += [time_ans]\n self.eval_hist_id += [x.index] * runs\n\n return x, runs, time_loc, loss_loc, [x.index] * runs\n\n def _eval_one(self, x, runs=1):\n \"\"\"\n evaluate one solution\n \"\"\"\n # TODO: sometimes the obj_func take a dictionary as input...\n time_ = x.time\n loss_ = x.loss\n n_eval = x.n_eval\n\n ans = self.obj_func(x.to_dict(), **self.obj_func_params)\n time_ans, loss_ans, success = ans[0], ans[1], ans[2]\n\n time = np.sum(time_ans)\n loss = np.sum(loss_ans)\n\n x.n_eval += runs\n x.time = time / runs if time_ is None else (time_ * n_eval + time) / x.n_eval\n x.loss = loss / runs if loss_ is None else (loss_ * n_eval + loss) / x.n_eval\n\n self.eval_hist_loss += [x.loss]\n self.eval_hist_time += [x.time]\n self.eval_hist_id += [x.index] * runs\n\n return x, runs, time, loss, [x.index] * runs\n\n def evaluate(self, data, runs=1):\n \"\"\"Evaluate the candidate points and update evaluation info in the dataframe\"\"\"\n if isinstance(data, Solution):\n self._eval_one(data)\n\n elif isinstance(data, list):\n if self.n_jobs > 1:\n if (\n self._parallel_backend == \"multiprocessing\"\n ): # parallel execution using joblib\n res = Parallel(n_jobs=self.n_jobs, verbose=False)(\n delayed(self._eval_one, check_pickle=False)(x) for x in data\n )\n x, runs, hist_time, hist_loss, hist_id = zip(*res)\n self.eval_count += sum(runs)\n self.eval_hist_time += list(itertools.chain(*hist_time))\n self.eval_hist_loss += list(itertools.chain(*hist_loss))\n self.eval_hist_id += list(itertools.chain(*hist_id))\n for i, k in enumerate(data):\n data[i] = x[i].copy()\n elif self._parallel_backend == \"MPI\": # parallel execution using MPI\n # TODO: to use InstanceRunner here\n pass\n elif (\n self._parallel_backend == \"Spark\"\n ): # parallel execution using Spark\n pass\n else:\n for x in data:\n self._eval_one(x)\n self.eval_count += 1\n\n def fit_and_assess(self, time_surrogate=None, loss_surrogate=None):\n while True:\n try:\n X = np.atleast_2d([s.tolist() for s in self.data])\n time_fitness = np.array([s.time for s in self.data])\n\n # TODO: is normalization really a good idea here? can be removed, or save scaling factor and give to s-metric (min and max)\n # normalization the response for numerical stability\n # e.g., for MGF-based acquisition function\n # _time_min, _time_max = np.min(time_fitness), np.max(time_fitness)\n # time_fitness_scaled = (time_fitness - _time_min) / (_time_max - _time_min) #Xin Guo improvement\n\n if (\n len(time_fitness) == 1\n ): # for the case n_init_sample=1 #Xin Guo improvement\n time_fitness_scaled = time_fitness\n else:\n time_min, time_max = np.min(time_fitness), np.max(time_fitness)\n if not time_min == time_max: # for the case of flat fitness\n time_fitness_scaled = (time_fitness - time_min) / (\n time_max - time_min\n )\n else:\n time_fitness_scaled = time_fitness\n\n # fit the time surrogate model\n if time_surrogate is None:\n start_timer = time.time()\n self.time_surrogate.fit(X, time_fitness)\n stop_timer = time.time()\n self.surr_time_fit_hist.append(stop_timer - start_timer)\n self.time_is_update = True\n start_timer = time.time()\n time_fitness_hat = self.time_surrogate.predict(X)\n stop_timer = time.time()\n self.surr_time_mies_hist.append(stop_timer - start_timer)\n else:\n start_timer = time.time()\n time_surrogate.fit(X, time_fitness)\n stop_timer = time.time()\n self.surr_time_fit_hist.append(stop_timer - start_timer)\n self.time_is_update = True\n start_timer = time.time()\n time_fitness_hat = time_surrogate.predict(X)\n stop_timer = time.time()\n self.surr_time_mies_hist.append(stop_timer - start_timer)\n\n loss_fitness = np.array([s.loss for s in self.data])\n\n # normalization the response for numerical stability\n # e.g., for MGF-based acquisition function\n # _loss_min, _loss_max = np.min(loss_fitness), np.max(loss_fitness) #Xin Guo improvement\n # loss_fitness_scaled = (loss_fitness - _loss_min) / (_loss_max - _loss_min)\n\n if (\n len(loss_fitness) == 1\n ): # for the case n_init_sample=1 #Xin Guo improvement\n loss_fitness_scaled = loss_fitness\n else:\n loss_min, loss_max = np.min(loss_fitness), np.max(loss_fitness)\n if not loss_min == loss_max: # for the case of flat fitness\n loss_fitness_scaled = (loss_fitness - loss_min) / (\n loss_max - loss_min\n )\n else:\n loss_fitness_scaled = loss_fitness\n\n # fit the loss surrogate model\n if loss_surrogate is None:\n start_timer = time.time()\n self.loss_surrogate.fit(X, loss_fitness)\n stop_timer = time.time()\n self.surr_loss_fit_hist.append(stop_timer - start_timer)\n self.loss_is_update = True\n start_timer = time.time()\n loss_fitness_hat = self.loss_surrogate.predict(X)\n stop_timer = time.time()\n self.surr_loss_mies_hist.append(stop_timer - start_timer)\n else:\n start_timer = time.time()\n loss_surrogate.fit(X, loss_fitness)\n stop_timer = time.time()\n self.surr_loss_fit_hist.append(stop_timer - start_timer)\n self.loss_is_update = True\n start_timer = time.time()\n loss_fitness_hat = loss_surrogate.predict(X)\n stop_timer = time.time()\n self.surr_loss_mies_hist.append(stop_timer - start_timer)\n\n # TODO use s-metric to calculate fitness? this is just for logging, optimization (searching for candidate) takes place before this step, so what does surrogate.predict do? the fitting part is useful though\n # fitness_hat = surrogate.predict(X)\n # TODO, maybe it's usefull to cast time and loss variables to sms-ego fitness here\n\n time_r2 = r2_score(time_fitness, time_fitness_hat)\n loss_r2 = r2_score(loss_fitness, loss_fitness_hat)\n self.all_time_r2.append(time_r2)\n self.all_loss_r2.append(loss_r2)\n break\n except Exception as e:\n print(\"Error fitting model, retrying...\")\n print(X)\n print(time_fitness)\n print(loss_fitness)\n print(e)\n time.sleep(15)\n # TODO: in case r2 is really poor, re-fit the model or transform the input?\n # consider the performance metric transformation in SMAC\n self.logger.info(\"Surrogate model time_r2: {}\".format(time_r2))\n self.logger.info(\"Surrogate model loss_r2: {}\".format(loss_r2))\n return time_r2, loss_r2\n\n def select_candidate(self):\n self.is_update = False\n X, infill_value = self.arg_max_acquisition(\n plugin=None,\n time_surrogate=self.time_surrogate,\n loss_surrogate=self.loss_surrogate,\n data=self.data,\n n_left=self.n_left,\n max_iter=self.max_iter,\n )\n\n if self.n_point > 1:\n X = [\n Solution(x, index=len(self.data) + i, var_name=self.var_names)\n for i, x in enumerate(X)\n ]\n else:\n X = [Solution(X, index=len(self.data), var_name=self.var_names)]\n\n X = self._remove_duplicate(X)\n # if the number of new design sites obtained is less than required,\n # draw the remaining ones randomly\n if len(X) < self.n_point:\n self.logger.warn(\n \"iteration {}: duplicated solution found \"\n \"by optimization! New points is taken from random \"\n \"design\".format(self.iter_count)\n )\n N = self.n_point - len(X)\n if N > 1:\n s = self._space.sampling(N=N, method=\"LHS\")\n else: # To generate a single sample, only uniform sampling is feasible\n s = self._space.sampling(N=1, method=\"uniform\")\n X += [\n Solution(x, index=len(self.data) + i, var_name=self.var_names)\n for i, x in enumerate(s)\n ]\n\n candidates_id = [x.index for x in X]\n # for noisy fitness: perform a proportional selection from the evaluated ones\n if self.noisy:\n # after evaluate run S-metric on all solutions to determine fitness\n for i in range(len(self.data)): # this is a bottleneck\n other_solutions = copy.deepcopy(self.data)\n del other_solutions[i]\n self.data[i].fitness = s_metric(\n self.data[i],\n other_solutions,\n self.n_left,\n self.max_iter,\n ref_time=self.ref_time,\n ref_loss=self.ref_loss,\n )\n id_, fitness = zip(\n [\n (i, d.fitness)\n for i, d in enumerate(self.data)\n if i != self.incumbent_id\n ]\n )\n __ = proportional_selection(\n fitness, self.mu, self.minimize, replacement=False\n )\n candidates_id.append(id_[__])\n\n # TODO: postpone the evaluate to intensify...\n self.evaluate(X, runs=self.init_n_eval)\n print(\"n_left\")\n print(self.n_left)\n self.data += X\n # after evaluate run S-metric on all solutions to determine fitness\n for i in range(len(self.data)): # this is a bottleneck\n other_solutions = copy.deepcopy(self.data)\n del other_solutions[i]\n self.data[i].fitness = s_metric(\n self.data[i],\n other_solutions,\n self.n_left,\n self.max_iter,\n ref_time=self.ref_time,\n ref_loss=self.ref_loss,\n )\n\n return candidates_id\n\n def intensify(self, candidates_ids):\n \"\"\"\n intensification procedure for noisy observations (from SMAC)\n \"\"\"\n # TODO: verify the implementation here\n maxR = 20 # maximal number of the evaluations on the incumbent\n for i, ID in enumerate(candidates_ids):\n r, extra_run = 1, 1\n conf = self.data.loc[i]\n self.evaluate(conf, 1)\n print(conf.to_frame().T)\n\n if conf.n_eval > self.incumbent_id.n_eval:\n self.incumbent_id = self.evaluate(self.incumbent_id, 1)\n extra_run = 0\n\n while True:\n if self._compare(self.incumbent_id.perf, conf.perf):\n self.incumbent_id = self.evaluate(\n self.incumbent_id,\n min(extra_run, maxR - self.incumbent_id.n_eval),\n )\n print(self.incumbent_id.to_frame().T)\n break\n if conf.n_eval > self.incumbent_id.n_eval:\n self.incumbent_id = conf\n if self.verbose:\n print(\n \"[DEBUG] iteration %d -- new incumbent selected:\"\n % self.iter_count\n )\n print(\"[DEBUG] {}\".format(self.incumbent_id))\n print(\n \"[DEBUG] with performance: {}\".format(\n self.incumbent_id.perf\n )\n )\n print()\n break\n\n r = min(2 * r, self.incumbent_id.n_eval - conf.n_eval)\n self.data.loc[i] = self.evaluate(conf, r)\n print(self.conf.to_frame().T)\n extra_run += r\n\n def _initialize(self):\n \"\"\"Generate the initial data set (DOE) and construct the surrogate model\"\"\"\n self.logger.info(\n \"selected time_surrogate model: {}\".format(self.time_surrogate.__class__)\n )\n self.logger.info(\n \"selected loss_surrogate model: {}\".format(self.loss_surrogate.__class__)\n )\n self.logger.info(\"building the initial design of experiemnts...\")\n\n samples = self._space.sampling(self.n_init_sample)\n self.data = [\n Solution(s, index=k, var_name=self.var_names) for k, s in enumerate(samples)\n ]\n self.evaluate(self.data, runs=self.init_n_eval)\n\n # after evaluate run S-metric on all solutions to determine fitness\n for i in range(len(self.data)): # this is a bottleneck\n other_solutions = copy.deepcopy(self.data)\n del other_solutions[i]\n self.data[i].fitness = s_metric(\n self.data[i],\n other_solutions,\n self.n_left,\n self.max_iter,\n ref_time=self.ref_time,\n ref_loss=self.ref_loss,\n )\n\n # set the initial incumbent\n fitness = np.array([s.fitness for s in self.data])\n\n self.incumbent_id = np.nonzero(fitness == self._best(fitness))[0][0]\n self.fit_and_assess()\n\n def gpuworker(self, q, gpu_no):\n \"GPU worker function\"\n\n self.async_time_surrogates[gpu_no] = copy.deepcopy(self.time_surrogate)\n self.async_loss_surrogates[gpu_no] = copy.deepcopy(self.loss_surrogate)\n while True:\n start_timer_1 = time.time()\n self.logger.info(\"GPU no. {} is waiting for task\".format(gpu_no))\n # print(\"Queue size before q.get()= \" + str(q.qsize()))\n confs_ = q.get()\n # print(\"Queue size after q.get()= \" + str(q.qsize()))\n time.sleep(gpu_no)\n\n self.logger.info(\"Evaluating:\")\n self.logger.info(confs_.to_dict())\n stop_timer_1 = time.time()\n\n confs_ = self._eval_gpu(confs_, gpu_no)[\n 0\n ] # will write the result to confs_\n\n start_timer_2 = time.time()\n self.n_left -= 1\n if self.n_left < 0:\n self.n_left = 0\n self.eval_count += 1 # instead of self.iter_count\n\n if self.data is None:\n self.data = [confs_]\n else:\n self.data += [confs_]\n\n for i in range(len(self.data)): # this is a bottleneck\n other_solutions = copy.deepcopy(self.data)\n del other_solutions[i]\n self.data[i].fitness = s_metric(\n self.data[i],\n other_solutions,\n self.n_left,\n self.max_iter,\n ref_time=self.ref_time,\n ref_loss=self.ref_loss,\n )\n\n q.task_done()\n\n # print \"GPU no. {} is waiting for task on thread {}\".format(gpu_no, gpu_no)\n if not self.check_stop():\n self.logger.info(\"Data size is {}\".format(len(self.data)))\n if len(self.data) >= self.n_init_sample:\n self.fit_and_assess(\n time_surrogate=self.async_time_surrogates[gpu_no],\n loss_surrogate=self.async_loss_surrogates[gpu_no],\n )\n while True:\n try:\n X, infill_value = self.arg_max_acquisition(\n plugin=None,\n time_surrogate=self.async_time_surrogates[gpu_no],\n loss_surrogate=self.async_loss_surrogates[gpu_no],\n data=self.data,\n n_left=self.n_left,\n ) # two surrogates are needed\n confs_ = Solution(\n X,\n index=len(self.data) + q.qsize(),\n var_name=self.var_names,\n )\n break\n except Exception as e:\n print(e)\n print(\n \"Error selecting candidate, retrying in 60 seconds...\"\n )\n time.sleep(60)\n q.put(confs_)\n else:\n samples = self._space.sampling(1)\n confs_ = Solution(\n samples[0],\n index=len(self.data) + q.qsize(),\n var_name=self.var_names,\n )\n # confs_ = self._to_dataframe(self._space.sampling(1))\n if q.empty():\n q.put(confs_)\n\n else:\n break\n stop_timer_2 = time.time()\n self.time_between_gpu_hist.append(\n (stop_timer_1 - start_timer_1) + (stop_timer_2 - start_timer_2)\n )\n\n print(\"Finished thread {}\".format(gpu_no))\n\n def save_data(self, filename):\n conf_array = []\n fit_array = []\n time_array = []\n loss_array = []\n n_eval_array = []\n index_array = []\n name_array = []\n\n for i in range(len(self.data)):\n conf_array.append(self.data[i].to_dict())\n fit_array.append(self.data[i].fitness)\n time_array.append(self.data[i].time)\n loss_array.append(self.data[i].loss)\n n_eval_array.append(self.data[i].n_eval)\n index_array.append(self.data[i].index)\n name_array.append(self.data[i].var_name)\n data_array = [\n conf_array,\n fit_array,\n time_array,\n loss_array,\n n_eval_array,\n index_array,\n name_array,\n self.all_time_r2,\n self.all_loss_r2,\n self.surr_time_fit_hist,\n self.surr_time_mies_hist,\n self.surr_loss_fit_hist,\n self.surr_loss_mies_hist,\n self.time_between_gpu_hist,\n ]\n\n with open(filename + \".json\", \"w\") as outfile:\n json.dump(data_array, outfile)\n return\n\n def step(self):\n if not hasattr(self, \"data\"):\n self._initialize()\n\n ids = self.select_candidate()\n if self.noisy:\n self.incumbent_id = self.intensify(ids)\n else:\n fitness = np.array([s.fitness for s in self.data])\n self.incumbent_id = np.nonzero(fitness == self._best(fitness))[0][0]\n\n self.incumbent = self.data[self.incumbent_id]\n\n # model re-training\n # TODO: test more control rules on model refitting\n # if self.eval_count % 2 == 0:\n # self.fit_and_assess()\n self.fit_and_assess()\n self.n_left -= 1\n if self.n_left < 0:\n self.n_left = 0\n self.iter_count += 1\n self.hist_f.append(self.incumbent.fitness)\n\n self.logger.info(\"iteration {}, current incumbent is:\".format(self.iter_count))\n self.logger.info(self.incumbent.to_dict())\n\n # save the iterative data configuration to csv\n self.incumbent.to_csv(self.data_file, header=False, index=False, mode=\"a\")\n return self.incumbent, self.incumbent.fitness\n\n def run(self, restart=False):\n if len(self.available_gpus) > 0:\n\n if self.n_jobs > len(self.available_gpus):\n print(\"Not enough GPUs available for n_jobs\")\n return 1, 1 # changed \"1\" to \"1,1\". This avoids an error message\n\n # self.n_point = 1 #set n_point to 1 because we only do one evaluation at a time (async)# n_point is set to 1 at initialisation\n # initialize\n self.logger.info(\n \"selected time_surrogate model: {}\".format(\n self.time_surrogate.__class__\n )\n )\n self.logger.info(\n \"selected loss_surrogate model: {}\".format(\n self.loss_surrogate.__class__\n )\n )\n self.logger.info(\"building the initial design of experiments...\")\n\n if not restart:\n samples = self._space.sampling(self.n_init_sample)\n datasamples = [\n Solution(s, index=k, var_name=self.var_names)\n for k, s in enumerate(samples)\n ]\n self.data = None\n\n for i in range(self.n_init_sample):\n self.evaluation_queue.put(datasamples[i])\n\n self.iter_count += (\n self.n_init_sample\n ) # because initial samples are in queue, counters count them as normal samples, so this needs to be coutered\n self.n_left -= self.n_init_sample\n else:\n for i in range(self.n_jobs):\n self.evaluation_queue.put(self.data[i - self.n_jobs])\n for i in range(self.n_jobs):\n del self.data[-1]\n del self.all_time_r2[-1]\n del self.all_loss_r2[-1]\n del self.surr_time_fit_hist[-1]\n del self.surr_time_mies_hist[-1]\n del self.surr_loss_fit_hist[-1]\n del self.surr_loss_mies_hist[-1]\n del self.time_between_gpu_hist[-1]\n self.n_left -= self.n_jobs\n self.iter_count += self.n_jobs\n self.eval_count += self.n_jobs\n\n thread_dict = {}\n # launch threads for all GPUs\n for i in range(self.n_jobs):\n t = threading.Thread(\n target=self.gpuworker,\n args=(\n self.evaluation_queue,\n self.available_gpus[i],\n ),\n )\n t.setDaemon = True\n thread_dict[i] = t\n t.start()\n\n # wait for queue to be empty and all threads to finish\n self.evaluation_queue.join()\n threads = [thread_dict[a] for a in thread_dict]\n for thread in threads:\n thread.join()\n\n print(\"\\n\\n All threads should now be done. Finishing program...\\n\\n\")\n\n self.stop_dict[\"n_eval\"] = self.eval_count\n self.stop_dict[\"n_iter\"] = self.iter_count\n\n return # self.incumbent, self.stop_dict #self.incumbent does not exist anymore\n\n else:\n\n while not self.check_stop():\n self.step()\n\n self.stop_dict[\"n_eval\"] = self.eval_count\n self.stop_dict[\"n_iter\"] = self.iter_count\n return # self.incumbent, self.stop_dict #self.incumbent does not exist anymore\n\n def check_stop(self):\n # TODO: add more stop criteria\n # unify the design purpose of stop_dict\n if self.iter_count >= self.max_iter:\n self.stop_dict[\"max_iter\"] = True\n\n if self.eval_count >= self.max_eval:\n self.stop_dict[\"max_eval\"] = True\n\n if (\n self.ftarget is not None\n and hasattr(self, \"incumbent\")\n and self._compare(self.incumbent.perf, self.ftarget)\n ):\n self.stop_dict[\"ftarget\"] = True\n return len(self.stop_dict)\n\n def _acquisition(\n self,\n plugin=None,\n dx=False,\n time_surrogate=None,\n loss_surrogate=None,\n data=None,\n n_left=None,\n max_iter=None,\n ):\n if plugin is None:\n # plugin = np.min(self.data.perf) if self.minimize else -np.max(self.data.perf)\n # Note that performance are normalized when building the surrogate\n plugin = 0 if self.minimize else -1\n # here two surrogate functions are needed\n if time_surrogate is None:\n time_surrogate = self.time_surrogate\n if loss_surrogate is None:\n loss_surrogate = self.loss_surrogate\n if data is None:\n data = self.data\n if n_left is None:\n n_left = self.n_left\n if max_iter is None:\n max_iter = self.max_iter\n\n if self.n_point == 1: # sequential mode\n if self.infill == \"HVI\":\n acquisition_func = HVI(\n time_model=time_surrogate,\n loss_model=loss_surrogate,\n plugin=plugin,\n minimize=self.minimize,\n solutions=data,\n n_left=n_left,\n max_iter=max_iter,\n sol=Solution,\n ref_time=self.ref_time,\n ref_loss=self.ref_loss,\n alpha=self.hvi_alpha,\n )\n elif self.infill == \"MC\":\n acquisition_func = MONTECARLO(\n model=time_surrogate, plugin=plugin, minimize=self.minimize\n )\n else:\n print(\n \"Error, only HVI and MC infill criterium work for this implementation\"\n )\n else:\n print(\"Error, n_point should be 1 for this implementation\")\n\n return functools.partial(acquisition_func, dx=dx)\n\n def _annealling(self):\n if self.schedule == \"exp\":\n self.t *= self.alpha\n elif self.schedule == \"linear\":\n self.t -= self.eta\n elif self.schedule == \"log\":\n # TODO: verify this\n self.t = self.c / np.log(self.iter_count + 1 + 1)\n\n def arg_max_acquisition(\n self,\n plugin=None,\n time_surrogate=None,\n loss_surrogate=None,\n data=None,\n n_left=None,\n max_iter=None,\n ):\n \"\"\"\n Global Optimization on the acqusition function\n \"\"\"\n if self.verbose:\n self.logger.info(\"acquisition function optimziation...\")\n\n dx = True if self._optimizer == \"BFGS\" else False\n # two surrogate functions must be passed\n obj_func = [\n self._acquisition(\n plugin,\n dx=dx,\n time_surrogate=time_surrogate,\n loss_surrogate=loss_surrogate,\n n_left=n_left,\n max_iter=max_iter,\n )\n for i in range(self.n_point)\n ]\n\n if self.n_point == 1:\n candidates, values = self._argmax_multistart(obj_func[0])\n else:\n # parallelization using joblib\n res = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(\n delayed(self._argmax_multistart, check_pickle=False)(func)\n for func in obj_func\n )\n candidates, values = list(zip(*res))\n return candidates, values\n\n def _argmax_multistart(self, obj_func):\n # keep the list of optima in each restart for future usage\n xopt, fopt = [], []\n eval_budget = self._max_eval\n best = -np.inf\n wait_count = 0\n\n for iteration in range(self._random_start):\n x0 = self._space.sampling(N=1, method=\"uniform\")[0]\n\n # TODO: add IPOP-CMA-ES here for testing\n # TODO: when the surrogate is GP, implement a GA-BFGS hybrid algorithm\n if self._optimizer == \"BFGS\":\n if self.N_d + self.N_i != 0:\n raise ValueError(\"BFGS is not supported with mixed variable types.\")\n # TODO: find out why: somehow this local lambda function can be pickled...\n # for minimization\n func = lambda x: tuple(map(lambda x: -1.0 * x, obj_func(x)))\n xopt_, fopt_, stop_dict = fmin_l_bfgs_b(\n func,\n x0,\n pgtol=1e-8,\n factr=1e6,\n bounds=self._bounds,\n maxfun=eval_budget,\n )\n xopt_ = xopt_.flatten().tolist()\n fopt_ = -np.asscalar(fopt_)\n\n if stop_dict[\"warnflag\"] != 0 and self.verbose:\n self.logger.warn(\n \"L-BFGS-B terminated abnormally with the \"\n \" state: %s\" % stop_dict\n )\n\n elif self._optimizer == \"MIES\":\n # here send to MIES optimizer that uses s-metric as obj_func\n opt = mies(\n self._space,\n obj_func,\n max_eval=eval_budget,\n minimize=False,\n verbose=False,\n plus_selection=False,\n )\n xopt_, fopt_, stop_dict = opt.optimize()\n\n if fopt_ > best:\n best = fopt_\n wait_count = 0\n if self.verbose:\n self.logger.info(\n \"restart : {} - funcalls : {} - Fopt : {}\".format(\n iteration + 1, stop_dict[\"funcalls\"], fopt_\n )\n )\n else:\n wait_count += 1\n\n eval_budget -= stop_dict[\"funcalls\"]\n xopt.append(xopt_)\n fopt.append(fopt_)\n\n if eval_budget <= 0 or wait_count >= self._wait_iter:\n break\n # maximization: sort the optima in descending order\n idx = np.argsort(fopt)[::-1]\n return xopt[idx[0]], fopt[idx[0]]\n\n def _check_params(self):\n assert hasattr(self.obj_func, \"__call__\")\n\n if np.isinf(self.max_eval) and np.isinf(self.max_iter):\n raise ValueError(\"max_eval and max_iter cannot be both infinite\")\n" ]
[ [ "numpy.log", "sklearn.metrics.r2_score", "numpy.isinf", "numpy.random.seed", "numpy.nonzero", "numpy.asarray", "scipy.optimize.fmin_l_bfgs_b", "numpy.asscalar", "numpy.random.choice", "numpy.min", "numpy.all", "numpy.max", "numpy.argsort", "numpy.array", "numpy.sum" ] ]
sebastianffx/stainlib
[ "6d79f165fc69e7599d14310c6f4d26e3d0d01543" ]
[ "utils/stain_utils.py" ]
[ "import numpy as np\nimport cv2 as cv\nimport spams\nimport copy\nfrom stainlib.utils.excepts import TissueMaskException\nfrom abc import ABC, abstractmethod\n\nclass ABCStainExtractor(ABC):\n\n @staticmethod\n @abstractmethod\n def get_stain_matrix(I):\n \"\"\"\n Estimate the stain matrix given an image.\n :param I:\n :return:\n \"\"\"\n\nclass ABCTissueLocator(ABC):\n @staticmethod\n @abstractmethod\n def get_tissue_mask(I):\n \"\"\"\n Get a boolean tissue mask.\n :param I:\n :return:\n \"\"\"\n\nclass LuminosityThresholdTissueLocator(ABCTissueLocator):\n\n @staticmethod\n def get_tissue_mask(I, luminosity_threshold=0.8):\n \"\"\"\n Get a binary mask where true denotes pixels with a luminosity less than the specified threshold.\n Typically we use to identify tissue in the image and exclude the bright white background.\n :param I: RGB uint 8 image.\n :param luminosity_threshold: Luminosity threshold.\n :return: Binary mask.\n \"\"\"\n assert is_uint8_image(I), \"Image should be RGB uint8.\"\n I_LAB = cv.cvtColor(I, cv.COLOR_RGB2LAB)\n L = I_LAB[:, :, 0] / 255.0 # Convert to range [0,1].\n mask = L < luminosity_threshold\n\n # Check it's not empty\n if mask.sum() == 0:\n raise TissueMaskException(\"Empty tissue mask computed\")\n return mask\n\nclass LuminosityStandardizer(object):\n\n @staticmethod\n def standardize(I, percentile=95):\n \"\"\"\n Transform image I to standard brightness.\n Modifies the luminosity channel such that a fixed percentile is saturated.\n :param I: Image uint8 RGB.\n :param percentile: Percentile for luminosity saturation. At least (100 - percentile)% of pixels should be fully luminous (white).\n :return: Image uint8 RGB with standardized brightness.\n \"\"\"\n assert is_uint8_image(I), \"Image should be RGB uint8.\"\n I_LAB = cv.cvtColor(I, cv.COLOR_RGB2LAB)\n L_float = I_LAB[:, :, 0].astype(float)\n p = np.percentile(L_float, percentile)\n I_LAB[:, :, 0] = np.clip(255 * L_float / p, 0, 255).astype(np.uint8)\n I = cv.cvtColor(I_LAB, cv.COLOR_LAB2RGB)\n return I\n\ndef get_concentrations(I, stain_matrix, regularizer=0.01):\n \"\"\"\n Estimate concentration matrix given an image and stain matrix.\n :param I:\n :param stain_matrix:\n :param regularizer:\n :return:\n \"\"\"\n OD = convert_RGB_to_OD(I).reshape((-1, 3))\n return spams.lasso(X=OD.T, D=stain_matrix.T, mode=2, lambda1=regularizer, pos=True).toarray().T\n\ndef get_sign(x):\n \"\"\"\n Returns the sign of x.\n :param x: A scalar x.\n :return: The sign of x.\n \"\"\"\n if x > 0:\n return +1\n elif x < 0:\n return -1\n elif x == 0:\n return 0\n\ndef normalize_matrix_rows(A):\n \"\"\"\n Normalize the rows of an array.\n :param A: An array.\n :return: Array with rows normalized.\n \"\"\"\n return A / np.linalg.norm(A, axis=1)[:, None]\n\ndef convert_RGB_to_OD(I):\n \"\"\"\n Convert from RGB to optical density (OD_RGB) space.\n RGB = 255 * exp(-1*OD_RGB).\n :param I: Image RGB uint8.\n :return: Optical denisty RGB image.\n \"\"\"\n mask = (I == 0)\n I_masked = copy.deepcopy(I)\n I_masked[mask] = 1 \n #I[mask] = 1\n return np.maximum(-1 * np.log(I_masked / 255), 1e-6)\n\ndef convert_OD_to_RGB(OD):\n\n \"\"\"\n Convert from optical density (OD_RGB) to RGB.\n RGB = 255 * exp(-1*OD_RGB)\n :param OD: Optical denisty RGB image.\n :return: Image RGB uint8.\n \"\"\"\n assert OD.min() >= 0, \"Negative optical density.\"\n OD = np.maximum(OD, 1e-6)\n return (255 * np.exp(-1 * OD)).astype(np.uint8)\n\ndef is_image(I):\n \"\"\"\n Is I an image.\n \"\"\"\n if not isinstance(I, np.ndarray):\n return False\n if not I.ndim == 3:\n return False\n return True\n\ndef is_uint8_image(I):\n \"\"\"\n Is I a uint8 image.\n \"\"\"\n if not is_image(I):\n return False\n if I.dtype != np.uint8:\n return False\n return True\n\ndef lab_split(I):\n \"\"\"\n Convert from RGB uint8 to LAB and split into channels\n :param I: uint8\n :return:\n \"\"\"\n I = cv.cvtColor(I, cv.COLOR_RGB2LAB)\n I = I.astype(np.float32)\n I1, I2, I3 = cv.split(I)\n I1 /= 2.55\n I2 -= 128.0\n I3 -= 128.0\n return I1, I2, I3\n\ndef merge_back(I1, I2, I3):\n \"\"\"\n Take seperate LAB channels and merge back to give RGB uint8\n :param I1:\n :param I2:\n :param I3:\n :return:\n \"\"\"\n I1 *= 2.55\n I2 += 128.0\n I3 += 128.0\n I = np.clip(cv.merge((I1, I2, I3)), 0, 255).astype(np.uint8)\n return cv.cvtColor(I, cv.COLOR_LAB2RGB)\n\ndef get_mean_std(I):\n \"\"\"\n Get mean and standard deviation of each channel\n :param I: uint8\n :return:\n \"\"\"\n I1, I2, I3 = lab_split(I)\n m1, sd1 = cv.meanStdDev(I1)\n m2, sd2 = cv.meanStdDev(I2)\n m3, sd3 = cv.meanStdDev(I3)\n means = m1, m2, m3\n stds = sd1, sd2, sd3\n return means, stds\n\ndef standardize_brightness(I):\n \"\"\"\n :param I:\n :return: Image pixel values divided by the 90th percentile\n \"\"\"\n p = np.percentile(I, 90)\n return np.clip(I * 255.0 / p, 0, 255).astype(np.uint8)\n" ]
[ [ "numpy.log", "numpy.maximum", "numpy.clip", "numpy.linalg.norm", "numpy.percentile", "numpy.exp" ] ]
rotmanmi/hp-vae-gan
[ "70c67c8ad9ff1f8c48a2bf7f883cd1f2cfd0c043" ]
[ "utils/images.py" ]
[ "import torch\nimport torch.nn.functional as F\nimport math\n\n__all__ = ['interpolate', 'interpolate_3D', 'adjust_scales2image', 'generate_noise', 'get_scales_by_index',\n 'get_fps_td_by_index', 'get_fps_by_index', 'upscale', 'upscale_2d']\n\n\ndef interpolate(input, size=None, scale_factor=None, interpolation='bilinear'):\n if input.dim() == 5:\n b, c, t, h0, w0 = input.shape\n img = input.permute(0, 2, 1, 3, 4).flatten(0, 1) # (B+T)CHW\n scaled = F.interpolate(img, size=size, scale_factor=scale_factor, mode=interpolation, align_corners=True)\n _, _, h1, w1 = scaled.shape\n scaled = scaled.reshape(b, t, c, h1, w1).permute(0, 2, 1, 3, 4)\n else:\n scaled = F.interpolate(input, size=size, scale_factor=scale_factor, mode=interpolation, align_corners=True)\n\n return scaled\n\n\ndef interpolate_3D(input, size=None, scale_factor=None, interpolation='trilinear'):\n assert input.dim() == 5, \"input must be 5D\"\n scaled = F.interpolate(input, size=size, scale_factor=scale_factor, mode=interpolation, align_corners=True)\n\n return scaled\n\n\ndef adjust_scales2image(size, opt):\n opt.num_scales = math.ceil((math.log(math.pow(opt.min_size / size, 1), opt.scale_factor_init))) + 1\n scale2stop = math.ceil(math.log(min([opt.max_size, size]) / size, opt.scale_factor_init))\n opt.stop_scale = opt.num_scales - scale2stop\n opt.scale1 = min(opt.max_size / size, 1)\n opt.scale_factor = math.pow(opt.min_size / size, 1 / opt.stop_scale)\n scale2stop = math.ceil(math.log(min([opt.max_size, size]) / size, opt.scale_factor_init))\n opt.stop_scale = opt.num_scales - scale2stop\n\n\ndef generate_noise(ref=None, size=None, type='normal', emb_size=None, device=None):\n # Initiate noise without batch size\n if ref is not None:\n noise = torch.zeros_like(ref)\n elif size is not None:\n noise = torch.zeros(*size).to(device)\n else:\n raise Exception(\"ref or size must be applied\")\n\n if type == 'normal':\n return noise.normal_(0, 1)\n elif type == 'benoulli':\n return noise.bernoulli_(0.5)\n\n if type == 'int':\n assert (emb_size is not None) and (size is not None) and (device is not None)\n return torch.randint(0, emb_size, size=size, device=device)\n\n return noise.uniform_(0, 1) # Default == Uniform\n\n\ndef get_scales_by_index(index, scale_factor, stop_scale, img_size):\n scale = math.pow(scale_factor, stop_scale - index)\n s_size = math.ceil(scale * img_size)\n\n return s_size\n\n\ndef get_fps_by_index(index, opt):\n # Linear fps interpolation by divisors\n fps_index = int((index / opt.stop_scale_time) * (len(opt.sampling_rates) - 1))\n\n return opt.org_fps / opt.sampling_rates[fps_index], fps_index\n\n\ndef get_fps_td_by_index(index, opt):\n fps, fps_index = get_fps_by_index(index, opt)\n\n every = opt.sampling_rates[fps_index]\n time_depth = opt.fps_lcm // every + 1\n\n return fps, time_depth, fps_index\n\n\ndef upscale(video, index, opt):\n assert index > 0\n\n next_shape = get_scales_by_index(index, opt.scale_factor, opt.stop_scale, opt.img_size)\n next_fps, next_td, _ = get_fps_td_by_index(index, opt)\n next_shape = [next_td, int(next_shape * opt.ar), next_shape]\n\n # Video interpolation\n vid_up = interpolate_3D(video, size=next_shape)\n\n return vid_up\n\n\ndef upscale_2d(image, index, opt):\n assert index > 0\n\n next_shape = get_scales_by_index(index, opt.scale_factor, opt.stop_scale, opt.img_size)\n next_shape = [int(next_shape * opt.ar), next_shape]\n\n # Video interpolation\n img_up = interpolate(image, size=next_shape)\n\n return img_up\n" ]
[ [ "torch.zeros_like", "torch.randint", "torch.nn.functional.interpolate", "torch.zeros" ] ]
balabalabalalaba/testb
[ "fd2a8262bba94c561be49d69120401f0d7259fdf" ]
[ "bicks/bicsearch.py" ]
[ "from bicks.eigenkpar import find_eigen_kpar_in_an_area, find_eigen_kpar\nfrom bicks.photoniccrystalbandprojection import find_band_projection\nfrom bicks.photoniccrystalbandprojection import mini_frequncy\nfrom bicks.field import FieldsWithCTIRInArea, FieldsWithCTIRMix\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as ani\nimport numpy as np\nimport time\n\nclass FindBICs:\n \"\"\"find BICs in q-k0 space with single polarization.?\n \n Attributes\n ----------\n BIC_qs: list[list[float]]\n each item contains BICs' qs for corresponding thickness.\n BIC_k0s: list[list[float]]\n each item contains BICs' k0s for corresponding thickness.\n BIC_hs: list[float]\n the thickness of PhC slab where BICs exist.\n \"\"\"\n def __init__(self, phcs, num, mode=\"E\", Nq=250):\n \"\"\"Initialize the class, create the gridding.\n \n Parameters\n ----------\n phcs: PhotonicCrystalSlab\n the Photonic Crystal Slab which is a kind of class.\n num: EssentialNumber\n \n mode: {\"E\", \"H\",}, optional\n considered mode \n Nq: int, optional\n number which we divided half of the Brillouin into\n \"\"\"\n self.phcs = phcs\n self.num = num\n self.mode = mode\n if type(Nq) != int:\n raise ValueError(\"\"\"Nq should be int.\n \"\"\")\n if mode.lower() == 'h' or mode.lower() == 'e':\n deltaq = 0.5/Nq\n k0_floor, k0_ceiling, dataq, band_proj = \\\n find_band_projection(phcs, num, mode=mode, Nq=Nq)\n self.k0_floor = k0_floor\n self.k0_ceiling = k0_ceiling\n self.dataq = dataq\n datak0 = band_proj[\"k0a\"]\n kpara_real_range_origin = band_proj[\"real\"]\n kpara_imag_range_origin = band_proj[\"imag\"]\n print(\"=============\")\n print(\"Initializing:\")\n start = time.time()\n #gridding\n real_k_parallel, imag_k_parallel, qk0 = [], [], []\n flagenum = len(dataq)//50\n for i in range(len(dataq)):\n qa = i * deltaq + dataq[0]\n # this is the area in which Bloch waves' kz(real and image) will be found\n kpara_real_range = []\n kpara_imag_range = []\n # this is the range of considered frequency \n k0_range = []\n \n for j in range(len(datak0)):\n datak0j = datak0[j]\n if k0_floor[i] <= datak0j <= k0_ceiling[i]:\n k0_range.append(datak0j)\n kpara_real_range.append(\n kpara_real_range_origin[j])\n kpara_imag_range.append(\n kpara_imag_range_origin[j])\n # compute the data \n for k in range(len(k0_range)):\n k0a = k0_range[k]\n kpara_real_extreme = kpara_real_range[k]\n kpara_imag_extreme = kpara_imag_range[k]\n tem_real_k_parallel, tem_imag_k_parallel = \\\n find_eigen_kpar_in_an_area(phcs, qa*2*np.pi,\n k0a*2*np.pi, num,\n kpara_real_extreme,\n kpara_imag_extreme,\n mode=mode)\n real_k_parallel.append(tem_real_k_parallel)\n imag_k_parallel.append(tem_imag_k_parallel)\n qk0.append([qa, k0a])\n if i%flagenum == 0:\n iky = int(i/len(dataq)*50)+1\n aii = \"*\" * iky\n bii = \".\" * (50 - iky)\n cii = iky / 50 * 100\n dur = time.time() - start\n print(\"\\r{:^3.0f}%[{}->{}]{:.2f}s\".format(cii,aii,bii,dur),\n end = \"\") \n print(\"\\n\" + \"Initialization accomplished.\")\n self.qk0 = qk0\n self.real_k_parallel = real_k_parallel\n self.imag_k_parallel = imag_k_parallel\n else:\n raise ValueError(\"\"\"mode should only be 'E' or 'H'\n \"\"\")\n \n \n def find_kpar(self, mode, Nq):\n phcs = self.phcs\n num = self.num\n deltaq = 0.5/Nq\n k0_floor, k0_ceiling, dataq, band_proj = \\\n find_band_projection(phcs, num, mode=mode, Nq=Nq)\n datak0 = band_proj[\"k0a\"]\n kpara_real_range_origin = band_proj[\"real\"]\n kpara_imag_range_origin = band_proj[\"imag\"]\n \n #gridding\n real_k_parallel, imag_k_parallel, qk0 = [], [], []\n \n for i in range(len(dataq)):\n qa = i * deltaq + dataq[0]\n # this is the area in which Bloch waves' kz(real and image) will be found\n kpara_real_range = []\n kpara_imag_range = []\n # this is the range of considered frequency \n k0_range = []\n \n for j in range(len(datak0)):\n datak0j = datak0[j]\n if k0_floor[i] <= datak0j <= k0_ceiling[i]:\n k0_range.append(datak0j)\n kpara_real_range.append(\n kpara_real_range_origin[j])\n kpara_imag_range.append(\n kpara_imag_range_origin[j])\n # compute the data \n for k in range(len(k0_range)):\n k0a = k0_range[k]\n kpara_real_extreme = kpara_real_range[k]\n kpara_imag_extreme = kpara_imag_range[k]\n tem_real_k_parallel, tem_imag_k_parallel = \\\n find_eigen_kpar_in_an_area(phcs, qa*2*np.pi,\n k0a*2*np.pi, num,\n kpara_real_extreme,\n kpara_imag_extreme,\n mode=mode)\n real_k_parallel.append(tem_real_k_parallel)\n imag_k_parallel.append(tem_imag_k_parallel)\n qk0.append([qa, k0a]) \n return qk0, real_k_parallel, imag_k_parallel\n \n \n def getcoeffs(self):\n \"\"\"get the ratio of coefficients of two Bloch waves in opposite\n direction.\n \"\"\"\n qk0 = self.qk0\n phcs, num = self.phcs, self.num\n real_k_parallel, imag_k_parallel = \\\n self.real_k_parallel, self.imag_k_parallel\n kya = 0\n odd_coefs, even_coefs, kzas = [], [], []\n print(\"=============\")\n print(\"Computing:\")\n start = time.time()\n flagenum = len(qk0)//50\n for i in range(len(qk0)):\n qa, k0a = qk0[i]\n temfield = FieldsWithCTIRInArea(phcs, num,\n k0a*2*np.pi,\n qa*2*np.pi, kya,\n real_k_parallel[i],\n imag_k_parallel[i],\n mode=self.mode)\n odd_coefs.append(temfield.odd_coefs_inside)\n even_coefs.append(temfield.even_coefs_inside)\n kzas.append(temfield.realkzs)\n if i%flagenum == 0:\n iky = int(i/len(qk0)*50)+1\n aii = \"*\" * iky\n bii = \".\" * (50 - iky)\n cii = iky / 50 * 100\n dur = time.time() - start\n print(\"\\r{:^3.0f}%[{}->{}]{:.2f}s\".format(cii,aii,bii,dur),\n end = \"\") \n self.odd_coefs = np.array(odd_coefs)\n self.even_coefs = np.array(even_coefs)\n self.kzas = np.array(kzas)\n print(\"\\n\" + \"Computation accomplished.\")\n \n \n def run(self, hstart, hend, Nh=20, limit=0.999):\n \"\"\"search BICs by varying thickness of PhC slab.\n \n Parameters\n ----------\n hstart: float\n start searching in this thickness\n hend: float\n end searching in this thickness\n Nh: int, optional\n number of searching thickness\n limit:float\n the precision of judging if a point in q-k0 space is a BIC\n \"\"\"\n qk0 = self.qk0\n num = self.num\n odd_coefs = self.odd_coefs\n even_coefs = self.even_coefs\n kzas = self.kzas\n n_real = num.real\n\n def find_bic(h):\n \"\"\"\n This is a function to find bics in PhCS\n \n Parameters\n ----------\n h: int\n the thickness of PhCS\n \n Returns\n -------\n list[float]:\n the BICs' q\n list[float]:\n the BICs' k0\n \"\"\"\n test = []\n odd_coefs_boundry = np.real(odd_coefs *\n np.exp(-1j * h * kzas)).tolist()\n \n even_coefs_boundry = np.real(even_coefs *\n np.exp(-1j * h * kzas)).tolist()\n\n for i in range(len(qk0)):\n neflag = n_real\n noflag = n_real\n sum_odd_real = 0\n sum_even_real = 0\n for j in range(n_real):\n oddreal = odd_coefs_boundry[i][j]\n evenreal = even_coefs_boundry[i][j]\n if (-2+limit<oddreal<-limit):\n noflag = noflag - 1\n if (2-limit>evenreal>limit):\n neflag = neflag - 1\n sum_even_real = sum_even_real + evenreal\n sum_odd_real = sum_odd_real + oddreal\n dataq = qk0[i][0]\n datak0 = qk0[i][1]\n if neflag == 0:\n test.append([dataq, datak0, sum_even_real])\n if noflag == 0:\n test.append([dataq, datak0, -sum_odd_real])\n \n bicregion = [[test[0]]]\n flag = 1\n limitdelta = 0.02\n \n for i in range(len(test) - 1):\n for j in range(len(bicregion)):\n if (abs(bicregion[j][-1][0] - test[i+1][0])<limitdelta\n and \\\n abs(bicregion[j][-1][1] - test[i+1][1])<limitdelta):\n bicregion[j].append(test[i+1])\n flag = 0\n break\n flag = 1\n \n if flag:\n bicregion.append([test[i+1]])\n \n bic_q = []\n bic_k0 = []\n for onebicregion in bicregion:\n onebic = [onebicregion[0][0],\n onebicregion[0][1],\n onebicregion[0][2]]\n for j in range(len(onebicregion)-1):\n if onebicregion[j+1][-1]>onebic[-1]:\n onebic = [onebicregion[j+1][0],\n onebicregion[j+1][1],\n onebicregion[j+1][2]]\n bic_q.append(onebic[0])\n bic_k0.append(onebic[1])\n return bic_q, bic_k0\n \n rangeh = np.linspace(hstart, hend, Nh)\n print(\"=============\")\n print(\"Searching:\")\n start = time.time()\n bic_qs, bic_k0s, bic_hs = [], [], []\n ikk=0\n flagenum = Nh//50\n nbics=0\n for h in rangeh:\n ikk = ikk+1\n if Nh >= 50:\n if ikk%flagenum == 0:\n iky = int(ikk/Nh*50)\n aii = \"*\" * iky\n bii = \".\" * (50 - iky)\n cii = iky / 50 * 100\n dur = time.time() - start\n print(\"\\r{:^3.0f}%[{}->{}]{:.2f}s\".format(cii,aii,bii,dur),\n end = \"\") \n else:\n iky = int(ikk/Nh*50)\n aii = \"*\" * iky\n bii = \".\" * (50 - iky)\n cii = iky / 50 * 100\n dur = time.time() - start\n print(\"\\r{:^3.0f}%[{}->{}]{:.2f}s\".format(cii,aii,bii,dur),\n end = \"\") \n try:\n bic_q, bic_k0 = find_bic(h)\n if bic_q:\n bic_qs.append(bic_q)\n bic_k0s.append(bic_k0)\n bic_hs.append(h)\n nbics = len(bic_q) + nbics\n except:\n pass\n print(\"\\n\" + \"Search accomplished.\")\n print(\"Number of BICs found: \", nbics)\n self.bic_qs = bic_qs\n self.bic_k0s = bic_k0s\n self.bic_hs = bic_hs\n \n\n def showbic(self,i=0):\n h = self.bic_hs\n bic_q = self.bic_qs\n bic_k0 = self.bic_k0s\n dataq = self.dataq\n k0_ceiling = self.k0_ceiling\n k0_floor = self.k0_floor\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_xlabel('$k_x(2\\pi/a)$')\n ax.set_ylabel('$\\omega(2\\pi c/a)$')\n if h == []:\n h = 0\n else:\n h = h[i]\n ax.set_title(\"BICs in $k_x-\\omega$ space($h=%.3fa, k_y=0$)\"%h)\n ax.plot(dataq, k0_ceiling, 'b', ls=':')\n ax.plot(dataq, k0_floor, 'black', ls='--')\n ax.fill_between(dataq, k0_ceiling, k0_floor,\n color='C1', alpha=0.3,\n interpolate=True,\n label=\"Searching range\")\n if bic_k0:\n ax.scatter(bic_q[i], bic_k0[i], marker='*',\n s=100, c=\"red\", edgecolors=\"black\", \n label=\"BIC\")\n plt.legend(markerscale=1)\n plt.show()\n \n \n def dynamicplot(self, save=False):\n bic_h = self.bic_hs\n bic_q = self.bic_qs\n bic_k0 = self.bic_k0s\n dataq = self.dataq\n k0_ceiling = self.k0_ceiling\n k0_floor = self.k0_floor\n \n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_xlabel('$k_x(2\\pi/a)$')\n ax.set_ylabel('$\\omega(2\\pi c/a)$')\n ax.set_title(\"BICs in $k_x-\\omega$ space($k_y=0$)\")\n \n h_template = '$h$ = %.3f $a$'\n h_text = ax.text(0, np.min(k0_floor) + 0.02, '', fontsize=14,\n bbox=dict(boxstyle=\"round4\", fc=\"maroon\", alpha=0.3))\n ax.plot(dataq, k0_ceiling, 'b', ls=':')\n ax.plot(dataq, k0_floor, 'black', ls='--')\n ax.fill_between(dataq, k0_ceiling, k0_floor,\n color='lightskyblue', alpha=0.4,\n interpolate=True,\n label=\"Searching range\")\n \n bics, = ax.plot([], [], 'o', marker='*',\n markersize=10, color = 'red',\n animated=True, label='BICs')\n \n def update(i):\n try:\n x = bic_q[i]\n y = bic_k0[i]\n except BaseException:\n x, y = [], []\n bics.set_data(x, y)\n h_text.set_text(h_template % bic_h[i])\n return bics, h_text\n \n \n anim = ani.FuncAnimation(\n fig,\n update,\n frames=np.arange(\n 0,\n len(bic_h),\n 1),\n interval=400,\n blit=True)\n plt.legend()\n plt.tight_layout()\n \n if save:\n anim.save(save, writer=\"imagemagick\")\n \n \nclass FindBICsMix:\n \"\"\"find BICs in ky-k0 space with mix polarization.\n \n Attributes\n ----------\n BIC_kys: list[float]\n each item contains BICs' qs for corresponding thickness.\n BIC_k0s: list[float]\n each item contains BICs' k0s for corresponding thickness.\n \"\"\"\n def __init__(self, phcs, num, qa, k0range=0.5*2*np.pi, Nk0=200):\n \"\"\"Initialize the class, create the gridding.\n \n Parameters\n ----------\n phcs: PhotonicCrystalSlab\n\n num: EssentialNumber\n \n qa: float\n the Bloch wave number, in unit 1/a\n k0range: float, option\n the length of the range of k0, in unit 1/a\n Nk0: int, optional\n the number which we divided the k0range\n \"\"\"\n self.phcs = phcs\n self.num = num\n self.qa = qa\n self.k0range = k0range\n self.Nk0 = Nk0\n \n delta = k0range/Nk0\n mink0 = mini_frequncy(phcs, num, qa, 0)\n maxk0 = mink0 + k0range\n kpe = []\n kph = []\n k0set = np.linspace(mink0, maxk0, num=Nk0)\n nik0 = 0\n start = time.time()\n print(\"=================\")\n print(\"Initializing:\")\n flagenum = Nk0//50\n for k0s in k0set: \n nik0 = nik0 + 1\n singe_kpe = find_eigen_kpar(phcs, k0s, qa, num.modes)\n singe_kph = find_eigen_kpar(phcs, k0s, qa, num.modes, mode=\"H\")\n kpe.append(singe_kpe)\n kph.append(singe_kph)\n \n if nik0%flagenum == 0:\n iky = int(nik0/Nk0*50)\n aii = \"*\" * iky\n bii = \".\" * (50 - iky)\n cii = iky / 50 * 100\n dur = time.time() - start\n print(\"\\r{:^3.0f}%[{}->{}]{:.2f}s\".format(cii,aii,bii,dur),\n end = \"\") \n \n print(\"\\n\" + \"Initialization accomplished.\")\n \n \n maxky = np.sqrt(maxk0**2 - qa**2)\n\n def lightline(i, ky):\n \"\"\"light line\n \"\"\"\n if i%2:\n value_k0 = np.sqrt((qa/(2*np.pi) + (i - 1) / 2)**2 + ky**2)\n else:\n value_k0 = np.sqrt((i / 2 - qa/(2*np.pi))**2 + ky**2)\n return value_k0\n \n \n def num_large(array, value):\n \"\"\"the number of items in the array which large than some value.\n \"\"\"\n total = 0\n for item in array:\n if item>value:\n total = total + 1\n return total\n \n kyk0 = []\n k_para_e = []\n k_para_h = []\n print(\"=================\")\n start = time.time()\n print(\"Meshing:\")\n kylist = np.arange(0, maxky, delta)\n \n flagenum = len(kylist)//50\n niky = 0\n for ky in kylist:\n niky = niky + 1\n kys = ky/(2*np.pi)\n k0f = lightline(1, kys)*2*np.pi\n k0c = lightline(2, kys)*2*np.pi\n for i in range(len(k0set)): \n k0 = k0set[i]\n kpei = kpe[i][0]\n kphi = kph[i][0]\n if k0>k0f and k0<k0c:\n if num_large(kpei, ky)==2 and num_large(kphi, ky)==2:\n kyk0.append([ky, k0])\n #if kpei[0]<ky and kphi[0]<ky:\n lenthe = len(kpei)\n lenthh = len(kphi)\n if lenthe>2 and lenthh>2:\n imkpe = kpe[i][1]*1\n imkph = kph[i][1]*1\n imkpe.extend(kpei[0:lenthe-2])\n imkph.extend(kphi[0:lenthh-2])\n imkpe = imkpe[0:num.imag]\n imkph = imkph[0:num.imag]\n \n kppe = [kpei[lenthe-2:], imkpe]\n kpph = [kphi[lenthh-2:], imkph]\n \n k_para_e.append(kppe)\n k_para_h.append(kpph)\n else:\n k_para_e.append(kpe[i])\n k_para_h.append(kph[i])\n \n if niky%flagenum == 0:\n iky = int(niky/len(kylist)*50)+1\n aii = \"*\" * iky\n bii = \".\" * (50 - iky)\n cii = iky / 50 * 100\n dur = time.time() - start\n print(\"\\r{:^3.0f}%[{}->{}]{:.2f}s\".format(cii,aii,bii,dur),\n end = \"\") \n \n print(\"\\n\" + \"Mesh accomplished.\")\n \n \n self.kyk0 = kyk0\n self.k_para_e = k_para_e\n self.k_para_h = k_para_h\n \n \n def run(self, limit=0.999):\n \"\"\"get the BICs.\n \n Parameters\n ----------\n limit:float\n the precision of judging if a point in q-k0 space is a BIC\n \n \"\"\"\n kyk0 = self.kyk0\n k_para_e = self.k_para_e\n k_para_h = self.k_para_h\n phcs = self.phcs\n num = self.num\n n_real = num.real\n h = phcs.h\n even_coefs = []\n odd_coefs = []\n dataky = []\n datak01 = []\n datak02 = []\n print(\"=================\")\n print(\"Searching:\")\n start = time.time()\n flagenum = len(kyk0)//50\n \n ky = kyk0[0][0]\n k0 = kyk0[0][1]\n for i in range(len(kyk0)):\n ky = kyk0[i][0]\n k0 = kyk0[i][1]\n if i:\n if kyk0[i][0] == kyk0[i-1][0]:\n pass\n else:\n datak01.append(k0/(2*np.pi))\n datak02.append(kyk0[i-1][1]/(2*np.pi))\n dataky.append(ky/(2*np.pi))\n else:\n dataky.append(ky/(2*np.pi))\n datak01.append(k0/(2*np.pi))\n \n kparae = k_para_e[i]\n kparah = k_para_h[i]\n f1 = FieldsWithCTIRMix(phcs, num, k0, 0, ky, kparae, kparah)\n even_coefs_inside = f1.even_coefs_inside\n odd_coefs_inside = f1.odd_coefs_inside\n even_coefs.append(even_coefs_inside)\n odd_coefs.append(odd_coefs_inside)\n \n if i%flagenum == 0:\n iky = int(i/len(kyk0)*50) + 1\n aii = \"*\" * iky\n bii = \".\" * (50 - iky)\n cii = iky / 50 * 100\n dur = time.time() - start\n print(\"\\r{:^3.0f}%[{}->{}]{:.2f}s\".format(cii,aii,bii,dur),\n end = \"\") \n datak02.append(k0/(2*np.pi))\n print(\"\\n\"+\"Search accomplished.\")\n \n def find_bic(h):\n \"\"\"\n This is a function to find bics in PhCS\n \n Parameters\n ----------\n h: int\n the thickness of PhCS\n \n Returns\n -------\n list[float]:\n the BICs' ky\n list[float]:\n the BICs' k0\n \"\"\"\n test = []\n odd_coefs_boundry = np.real(odd_coefs).tolist()\n even_coefs_boundry = np.real(even_coefs).tolist()\n\n for i in range(len(kyk0)):\n neflag = n_real\n noflag = n_real\n sum_odd_real = 0\n sum_even_real = 0\n for j in range(n_real):\n oddreal = odd_coefs_boundry[i][j]\n evenreal = even_coefs_boundry[i][j]\n if (-2+limit<evenreal<-limit):\n neflag = neflag - 1\n if (2-limit>oddreal>limit):\n noflag = noflag - 1\n sum_even_real = sum_even_real + evenreal\n sum_odd_real = sum_odd_real + oddreal\n dataky = kyk0[i][0]\n datak0 = kyk0[i][1]\n if neflag == 0:\n test.append([dataky, datak0, -sum_even_real])\n if noflag == 0:\n test.append([dataky, datak0, sum_odd_real])\n \n bicregion = [[test[0]]]\n flag = 1\n limitdelta = 0.02 * (2*np.pi)\n \n for i in range(len(test) - 1):\n for j in range(len(bicregion)):\n if (abs(bicregion[j][-1][0] - test[i+1][0])<limitdelta\n and \n abs(bicregion[j][-1][1] - test[i+1][1])<limitdelta):\n bicregion[j].append(test[i+1])\n flag = 0\n break\n flag = 1\n \n if flag:\n bicregion.append([test[i+1]])\n \n bic_ky = []\n bic_k0 = []\n for onebicregion in bicregion:\n onebic = [onebicregion[0][0],\n onebicregion[0][1],\n onebicregion[0][2]]\n for j in range(len(onebicregion)-1):\n if onebicregion[j+1][-1]>onebic[-1]:\n onebic = [onebicregion[j+1][0],\n onebicregion[j+1][1],\n onebicregion[j+1][2]]\n bic_ky.append(onebic[0]/(2*np.pi))\n bic_k0.append(onebic[1]/(2*np.pi))\n return bic_ky, bic_k0\n try:\n bic_ky, bic_k0 = find_bic(h)\n except:\n bic_ky, bic_k0 = [], []\n if bic_ky:\n self.bic_kys = bic_ky\n self.bic_k0s = bic_k0\n else:\n self.bic_kys = []\n self.bic_k0s = []\n print(\"Number of BICs found: \", len(bic_k0))\n \n dataky = np.array(dataky)\n datak01 = np.array(datak01)\n datak02 = np.array(datak02)\n self.dataky = dataky\n self.datak01 = datak01\n self.datak02 = datak02\n \n self.showbic()\n \n def showbic(self):\n phcs = self.phcs\n qa = self.qa\n h = phcs.h\n bic_ky = self.bic_kys\n bic_k0 = self.bic_k0s\n dataky = self.dataky\n datak01 = self.datak01\n datak02 = self.datak02\n fig1 = plt.figure()\n ax = fig1.add_subplot(111)\n ax.set_xlabel('$k_y(2\\pi/a)$')\n ax.set_ylabel('$\\omega(2\\pi c/a)$')\n ax.set_title(\"BICs in $k_y-\\omega$ space($h=\"+\\\n str(round(h,3))+\\\n \"a, q=\"+str(qa)+\"$)\")\n ax.plot(dataky, datak01, 'b', ls=':')\n ax.plot(dataky, datak02, 'black', ls='--')\n ax.fill_between(dataky, datak01, datak02,\n color='C1', alpha=0.3,\n interpolate=True,\n label=\"Searching range\")\n \"\"\"\n ax.fill_between(dataky, datak01, datak02,\n where=(datak01 < datak02),\n color='C1', alpha=0.3, interpolate=True)\n \"\"\"\n ax.scatter(bic_ky, bic_k0, marker='*',\n s=100, c=\"red\", edgecolors=\"black\", \n label=\"BIC\")\n plt.legend(markerscale=1)\n plt.show()\n \n \n \n \n \n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.tight_layout", "numpy.sqrt", "numpy.linspace", "numpy.min", "numpy.arange", "numpy.real", "numpy.exp", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
talperetz/pyds
[ "1a46d83c1ee71056484ab9e78bc19da6c2163583", "1a46d83c1ee71056484ab9e78bc19da6c2163583" ]
[ "tests/resources/datasets/save_datasets.py", "pyds/features_engineering.py" ]
[ "\"\"\" \n:author: Tal Peretz\n:date: 11/11/2016\n:TL;DR: this module purpose is generating datasets for pyds tests\n\"\"\"\n\nimport os\n\nimport pandas as pd\nimport sklearn.datasets\n\nsave_attribute_to_file_extension = {'to_excel': 'xls', 'to_html': 'html', 'to_json': 'json', 'to_pickle': 'pickle',\n 'to_stata': 'stata', 'to_sql': 'sql', 'to_csv': 'csv', }\nDATASETS_PATH = os.path.abspath(\"\")\n\ndatasets = (\n sklearn.datasets.load_boston(),\n sklearn.datasets.fetch_california_housing())\n\n\ndef save_datasets(datasets_collection):\n for i, dataset in enumerate(datasets_collection):\n dataset_name = dataset['DESCR'].split('\\n')[0]\n # build path variable, check if exists, if not create it\n path = DATASETS_PATH + '/' + dataset_name + '/'\n file_name = 'train.%s' % tuple(save_attribute_to_file_extension.values())[i]\n if not os.path.exists(path):\n os.makedirs(path)\n\n # build the dataframe in the form of data columns and target variable in one DataFrame\n df = pd.concat([pd.DataFrame(data=dataset['data'], columns=dataset['feature_names']),\n pd.Series(data=dataset['target'], name='target')], axis=1)\n\n # save the resulting DataFrame in a format from save_attribute_to_file_extension\n getattr(df, tuple(save_attribute_to_file_extension.keys())[i])(path + file_name)\n\n\nif __name__ == '__main__':\n save_datasets(datasets)\n", "\"\"\" \n:Authors: Tal Peretz\n:Date: 10/14/2016\n:TL;DR: this module is responsible for the transformation and creation and selection of features\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.linear_model import RandomizedLasso, RandomizedLogisticRegression\nfrom sklearn.preprocessing import FunctionTransformer, MinMaxScaler\n\nfrom pyds import constants\n\n\ndef create_features(X):\n \"\"\"\n given an encoded and scaled pandas DataFrame returns dataframe with new features columns based on\n polynomial features combination, log transformation and one hot encoding\n :param y: [pandas Series] target column\n :param X: encoded and scaled pandas DataFrame\n :param pipeline_results: class: 'PipelineResults'\n :return: dataframe with new features columns based on polynomial features combination, log transformation\n and one hot encoding\n links: `feature engineering - getting good at it <http://machinelearningmastery.com/\n discover-feature-engineering-how-to-engineer-features-and-how-to-get-good-at-it/>`_\n `Quora - feature engineering <https://www.quora.com/What-are-some-best-practices-in-Feature-Engineering>`_\n \"\"\"\n assert (isinstance(X, pd.DataFrame)) and (not X.empty), 'X should be a valid pandas DataFrame'\n numerical_cols = X.select_dtypes(include=[np.number]).columns\n X_num = X.loc[:, numerical_cols].copy()\n created_features = set()\n log_features = None\n if len(X_num.columns) > 0:\n log_transformer = FunctionTransformer(func=np.log)\n log_features = pd.DataFrame(data=log_transformer.fit_transform(X_num),\n columns=('log_%s' % col_name for col_name in numerical_cols),\n index=X_num.index)\n # replace inf values with largest non-inf value * NEG_INF_REPRESENTATION\n replacements = {\n -np.inf: constants.NEG_INF_REPRESENTATION * abs(\n max([log_features.replace([np.inf, -np.inf, np.nan], 0).as_matrix().min(),\n log_features.replace([np.inf, -np.inf, np.nan], 0).as_matrix().max()],\n key=abs)), np.nan: constants.NEG_INF_REPRESENTATION * abs(\n max([log_features.replace([np.inf, -np.inf, np.nan], 0).as_matrix().min(),\n log_features.replace([np.inf, -np.inf, np.nan], 0).as_matrix().max()],\n key=abs))}\n column_to_replacements = {col: replacements for col in log_features.columns.tolist()}\n log_features = log_features.replace(column_to_replacements)\n created_features.update(log_features.columns.tolist())\n return pd.concat([df for df in [X, log_features] if df is not None],\n axis=1), created_features\n\n\ndef _get_col_name_to_rank(ranks, names, order=1):\n scaler = MinMaxScaler()\n ranks = scaler.fit_transform(order * np.array([ranks]).T).T[0]\n # ranks = map(lambda x: round(x, 2), ranks)\n return dict(zip(names, ranks))\n\n\ndef select_features(X, y):\n \"\"\"\n given a pandas DataFrame and the column name of target variable returns dataframe after dropping irrelevant features\n according to RandomizedLasso and RandomForestRegressor feature selection\n :param y: [pandas Series] target column\n :param X: [pandas DataFrame] predictor columns\n :return: dataframe without meaningless features according to RandomizedLasso and RandomForestRegressor\n feature selection, dropped columns\n links: `Quora - feature selection <https://www.quora.com/How-do-I-perform-feature-selection>`_\n `selecting good features\n <http://blog.datadive.net/selecting-good-features-part-iv-stability-selection-rfe-and-everything-side-by-side/>`_\n \"\"\"\n assert (isinstance(X, pd.DataFrame)) and (not X.empty), 'X should be a valid pandas DataFrame'\n assert (isinstance(y, pd.Series)) and (not y.empty), 'y should be a valid pandas Series'\n reduced_df = X.copy()\n names = X.columns.tolist()\n\n # ranking\n rs_model = RandomizedLogisticRegression() if y.dtype.type == pd.types.dtypes.CategoricalDtypeType else RandomizedLasso()\n rs_model.fit(X, y)\n col_name_to_rank = _get_col_name_to_rank(np.abs(rs_model.scores_), names)\n\n # dropping irrelevant features\n columns_to_drop = [name for name, rank in col_name_to_rank.items() if rank == 0]\n if columns_to_drop:\n reduced_df = X.drop(columns_to_drop, axis=1)\n return reduced_df, columns_to_drop\n" ]
[ [ "pandas.Series", "pandas.DataFrame" ], [ "sklearn.linear_model.RandomizedLasso", "pandas.concat", "numpy.abs", "sklearn.preprocessing.FunctionTransformer", "sklearn.linear_model.RandomizedLogisticRegression", "numpy.array", "sklearn.preprocessing.MinMaxScaler" ] ]
4pisky/radio-optical-transients-plot
[ "2ecc3bba79f6c87b84635a32596fa3c6e9ccf65d" ]
[ "radio_optical_transients_plot/ro_main.py" ]
[ "\"\"\"Radio Optical plot main classes.\n\nThis script contains the main plotting classes for the radio optical plots.\n\nThis file can be imported as a module and contains the following\nclasses:\n\n * RadioOpticalData: Object handling the data of the plot.\n * RadioOpticalPlot: Plotting non-track plots and histograms.\n * RadioOpticalTrackPlot: Plotting for track plots.\n\"\"\"\n\nimport matplotlib.font_manager as fm\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as mticker\nimport matplotlib.markers as plt_markers\nimport numpy as np\nimport pandas as pd\nimport pkg_resources\nimport warnings\n\nfrom astropy.cosmology import FlatLambdaCDM\nfrom collections import Counter\nfrom matplotlib.lines import Line2D\nfrom matplotlib.patches import Polygon, Ellipse, Arc\nfrom matplotlib.transforms import ScaledTranslation\nfrom pathlib import Path\nfrom pprint import pprint\nfrom typing import List, Optional, Tuple\nfrom radio_optical_transients_plot.ro_utils import (\n ConvertToABMag_pd,\n OpticaltomJy_pd,\n OpticaltomJy,\n mJytoOptical,\n kcorrect,\n ConvertToABMag,\n stellar_dist\n)\nfrom radio_optical_transients_plot.ro_plotting import (\n plotlines,\n cleanlines,\n extinction_arrow,\n plotboxpoints,\n addtracklabels\n)\n\n\ndef docstring_inherit(parent):\n \"\"\"Function to pass attributes to inherited class.\n\n Args:\n parent (class): The parent class.\n\n Returns:\n obj: The inherit object.\n \"\"\"\n def inherit(obj):\n \"\"\"Sorts and inherits the attributes section.\n\n Args:\n obj: The object.\n\n Returns:\n obj: The object.\n \"\"\"\n spaces = \" \"\n if not str(obj.__doc__).__contains__(\"Attributes:\"):\n obj.__doc__ += \"\\n\" + spaces + \"Attributes:\\n\"\n obj.__doc__ = str(obj.__doc__).rstrip() + \"\\n\"\n to_loop = (\n parent\n .__doc__\n .split(\"Attributes:\\n\")[-1]\n .lstrip()\n .split(\"\\n\")\n )\n for attribute in to_loop:\n obj.__doc__ += spaces * 2 + str(attribute).lstrip().rstrip() + \"\\n\"\n\n return obj\n\n return inherit\n\n\nclass RadioOpticalData(object):\n \"\"\"The representation of the radio-optical data object as found in\n Stewart et al. 2018.\n\n Attributes:\n base_data_file (Path): The path of the data file containing the\n master (base) data.\n exclude_stellar_types (List[str]): The types to exclude from the\n stellar types.\n ab_list (List[str]): The names of objects that have the optical\n magnitudes in the AB system. All those not listed are converted\n to AB.\n group_agn (bool): True when the 'group_agn' option is used.\n group_stellar (bool): True when the 'group_stellar' option is used.\n master_df (pd.DataFrame): The dataframe containing the master data.\n basis (List[str]): The types that form the basis plot.\n transients_data_file (Path): The path of the transient data file\n containing the master (base) data.\n transients_data_df (pd.DataFrame): The dataframe containing the\n transient data.\n transients (List[str]): The types that make up the transient objects.\n \"\"\"\n def __init__(\n self,\n base_data_file: Optional[str] = None,\n extra_exclude_stellar_types: Optional[List[str]] = None,\n extra_ab_list: Optional[List[str]] = None,\n group_agn: bool = False,\n group_stellar: bool = False,\n transients_file: Optional[str] = None\n ) -> None:\n \"\"\"\n Init function.\n\n Args:\n base_data_file: The file containing the tab-separated master data.\n If 'None' is entered then the packaged latest master table will\n be used.\n extra_exclude_stellar_types: Extra stellar types to add to the\n stellar exclude list.\n extra_ab_list: Extra names to add to the AB list.\n group_agn: When 'True' the quasars are grouped together under\n the type 'Quasars'.\n group_stellar: When 'True' the stellar sources are all grouped\n under the type 'Stellar'.\n transients_file: Path to the transients file to load. Also accepts\n the names of the packaged transient files\n 'transient_master_table_04072013.txt' and 'Stripe82_QSOs.txt'.\n\n Returns:\n None.\n \"\"\"\n super(RadioOpticalData, self).__init__()\n # If None grab the packaged master data file.\n if base_data_file is None:\n self.base_data_file = Path(pkg_resources.resource_filename(\n __name__, \"./data/Master_Table_27042018.tsv\"\n ))\n else:\n self.base_data_file = Path(base_data_file)\n if not self.base_data_file.exists():\n raise IOError(\n f'The file {self.base_data_file.resolve()} cannot be'\n ' found!'\n )\n\n # The next two are hardcoded due to the master data requiring these\n # filters.\n self.exclude_stellar_types = [\n 'XRB',\n 'Xray source',\n 'RadioGalaxy',\n 'QSO',\n 'X-rayNova',\n \"Planetary Nebula\"\n ]\n\n if extra_exclude_stellar_types is not None:\n self.exclude_stellar_types += extra_exclude_stellar_types\n\n self.ab_list = [\n \"GRB 010921\",\n \"GRB 051221A\",\n \"GRB 080319B\",\n \"GRB 081203B\",\n \"SN 2008D\"\n ]\n\n if extra_ab_list is not None:\n self.ab_list += extra_ab_list\n\n self.group_agn = group_agn\n self.group_stellar = group_stellar\n\n self.base_data_df = self.read_data()\n\n self.master_df = self.base_data_df.copy()\n\n self.basis = self.base_data_df[\"Type\"].unique().tolist()\n\n if transients_file is not None:\n self.transients_data_file = Path(transients_file)\n if not self.transients_data_file.exists():\n # Try looking in the data dir\n self.transients_data_file = Path(\n pkg_resources.resource_filename(\n __name__, f\"./data/{self.transients_data_file.name}\"\n )\n )\n if not self.transients_data_file.exists():\n raise IOError(\n f'The file {self.transients_data_file.resolve()}'\n ' cannot be found in the specified location or in the'\n ' packaged data files.'\n )\n if (self.transients_data_file.name ==\n \"transient_master_table_04072013.txt\"):\n first_use_I = True\n else:\n first_use_I = False\n self.transients_data_df = self.read_data(\n transients=True, first_use_I=first_use_I\n )\n self.transients = self.transients_data_df[\"Type\"].unique().tolist()\n\n self.master_df = self.master_df.append(self.transients_data_df)\n else:\n self.transients = []\n\n\n def read_data(\n self, transients: bool = False, first_use_I: bool = False\n ) -> pd.DataFrame:\n \"\"\"Function to read in the master and transient data from their\n respective files.\n\n Tab-separated files are expected.\n\n The first_use_I argument is used as a cheat method to load the file\n 'transient_master_table_04072013.txt' as this is the only instance\n where I band magnitudes are used.\n\n Args:\n transients: Set as 'True' when the data file being read is not\n the base data file.\n first_use_I: Set to `True` when reading the file\n 'transient_master_table_04072013.txt'.\n\n Returns:\n The DataFrame containing the data.\n \"\"\"\n if transients:\n data_file = self.transients_data_file\n else:\n data_file = self.base_data_file\n base_data = pd.read_csv(data_file, comment=\"#\", sep=\"\\t\")\n base_data = self.master_table_analysis(\n base_data, transients=transients, first_use_I=first_use_I\n )\n\n return base_data\n\n def master_table_analysis(\n self,\n df: pd.DataFrame,\n transients: bool = False,\n first_use_I: bool = False\n ) -> pd.DataFrame:\n \"\"\"Analyses the data and performs conversions to magnitudes.\n\n Column names expected are:\n - 'F Radio / mJy'\n - 'V' or 'R' (or 'I')\n - 'Type'\n - 'Name'\n\n The first_use_I argument is used as a cheat method to load the file\n 'transient_master_table_04072013.txt' as this is the only instance\n where I band magnitudes are used.\n\n Args:\n df: DataFrame containing the raw data.\n transients: Set as 'True' when the data file being read is not\n the base data file.\n first_use_I: Set to `True` when reading the file\n 'transient_master_table_04072013.txt'.\n\n Returns:\n The DataFrame with new columns calculated:\n - 'radio'\n - 'optical_mag_used_band'\n - 'optical_mag_used_value'\n - 'optical_mag_used_value_processed'\n - 'ratio'\n - 'optical_in_mJy'\n \"\"\"\n # First we get rid of any objects that have no R or V measurement\n # Cheating as the FIRST transients we use I\n if not first_use_I:\n mask = ((df[\"V\"] == 0.) & (df[\"R\"] == 0.))\n df = df[~mask].reset_index(drop=True)\n df = df[df[\"F Radio / mJy\"] != 0.].reset_index(drop=True)\n df[\"radio\"] = df[\"F Radio / mJy\"]\n # The old structure used a large dictionary, but we don't need that\n # with pandas, do some intermediate processing.\n # First get which band we're using for each source, preference of\n # R -> V.\n # Get mask of which ones are V\n if not first_use_I:\n mask = df[\"R\"] == 0.\n df[\"optical_mag_used_value\"] = np.where(mask, df[\"V\"], df[\"R\"])\n df[\"optical_mag_used_band\"] = np.where(mask, \"V\", \"R\")\n else:\n df[\"optical_mag_used_value\"] = df[\"I\"]\n df[\"optical_mag_used_band\"] = \"I\"\n\n # Next step is to convert the magnitudes to the AB system if needed\n to_convert_mask = ~(\n (df[\"Type\"].str.contains(\"OPTICAL Sel.\")) |\n (df[\"Name\"].isin(self.ab_list)) |\n (df['optical_mag_used_band'] == \"I\")\n )\n\n df[\"optical_mag_used_value_processed\"] = df[\"optical_mag_used_value\"]\n\n converted_values = df.loc[to_convert_mask, [\n \"optical_mag_used_value\",\n \"optical_mag_used_band\"\n ]].apply(ConvertToABMag_pd, axis=1)\n\n df.loc[to_convert_mask, \"optical_mag_used_value_processed\"] = (\n converted_values\n )\n\n df[\"optical_in_mJy\"] = df[[\n \"optical_mag_used_value_processed\",\n \"optical_mag_used_band\"\n ]].apply(OpticaltomJy_pd, axis=1)\n\n df[\"ratio\"] = df[\"radio\"] / df[\"optical_in_mJy\"]\n\n if not transients:\n if self.group_agn:\n agn_mask = (\n (df[\"Type\"]==\"Quasar (OPTICAL Sel.)\") |\n (df[\"Type\"] == \"Quasar (RADIO Sel.)\")\n )\n df[\"Type\"] = np.where(agn_mask, \"Quasar\", df[\"Type\"])\n\n df = self._sort_stellar_sources(\n df, group_stellar = self.group_stellar,\n stellar_exclude_list = self.exclude_stellar_types\n )\n\n return df\n\n def _sort_stellar_sources(\n self,\n df: pd.DataFrame,\n group_stellar: bool = True,\n stellar_exclude_list: List[str] = []\n ) -> pd.DataFrame:\n \"\"\"The stellar sources are grouped together and excluded.\n\n Args:\n df: The DataFrame containing the data.\n group_stellar: Indicates whether stellar sources should be grouped\n together.\n stellar_exclude_list: Types of sources to exclude from the stellar\n sources.\n\n Returns:\n The DataFrame with the stellar sources grouped and excluded.\n \"\"\"\n new_main_types = {\n \"CV\": \"CV\",\n \"Magnetic CV\": \"CV\",\n \"X\": \"X-ray binary\",\n \"RSCVn\": \"Stellar: RS CVn\",\n \"Algol\": \"Stellar: Algol\",\n \"SymbioticStar\": 'Stellar: Symbiotic',\n \"VariableStar\": 'Stellar: Variable Star',\n \"T Tauri\": \"Stellar: YSO\",\n \"YSO\": \"Stellar: YSO\",\n \"P\": \"Stellar: PMS\",\n \"Star\": \"Stellar: Star\"\n }\n\n stellar_rows = df[df[\"Type\"] == \"Stellar\"]\n\n rows_to_drop = stellar_rows[\n stellar_rows[\"Subtype\"].isin(stellar_exclude_list)\n ]\n\n df = df.drop(rows_to_drop.index).reset_index(drop=True)\n\n stellar_rows = df[df[\"Type\"] == \"Stellar\"]\n\n #By group stellar I mean have them all as one category 'Stellar'\n if not group_stellar:\n # TODO: Remove loop\n for i, row in stellar_rows.iterrows():\n if row[\"Subtype\"] in new_main_types:\n df.at[i, \"Type\"] = new_main_types[row[\"Subtype\"]]\n else:\n df.at[i, \"Type\"] = \"Stellar: Other\"\n\n return df\n\n def calc_averages(self) -> pd.DataFrame:\n \"\"\"Calculates the averages of the data.\n\n Returns:\n DataFrame containing the mean and median information.\n \"\"\"\n averages = self.base_data_df.groupby(\"Type\").agg({\n 'radio': ['mean', 'median'],\n 'optical_in_mJy': ['mean', 'median']\n })\n\n averages['optical_in_Mag', 'mean'] = (\n averages['optical_in_mJy', 'mean'].apply(mJytoOptical, args=(\"R\"))\n )\n averages['optical_in_Mag', 'median'] = (\n averages['optical_in_mJy', 'median'].apply(\n mJytoOptical, args=(\"R\")\n )\n )\n\n return averages\n\n def _load_stellar_distances(self) -> pd.DataFrame:\n \"\"\"Loads the stellar distances data.\n\n Returns:\n The DataFrame containing the stellar distances information.\n It contains the columns 'object' and 'distance'. 'Object' is\n related to 'Name' in the main data.\n \"\"\"\n stellar_distance_file = Path(pkg_resources.resource_filename(\n __name__, \"./data/stellar_distances.csv\"\n ))\n s_dist = pd.read_csv(stellar_distance_file)\n\n return s_dist\n\n def _load_qso_redshifts(self) -> pd.DataFrame:\n \"\"\"Loads the quasar redshift data.\n\n Returns:\n The DataFrame containing the quasar redshift information.\n It contains the columns 'name' and 'z'.\n \"\"\"\n qso_redshift_file = Path(pkg_resources.resource_filename(\n __name__, \"./data/all_qso_redshifts.txt\"\n ))\n redshifts = pd.read_csv(\n qso_redshift_file,\n comment='#',\n sep=\";\",\n names=['ra', 'dec', 'name', 'z'],\n usecols=['name', 'z'],\n dtype={'name':str, 'z':str},\n na_values=['',]\n )\n redshifts['z'] = redshifts['z'].str.strip()\n redshifts = redshifts.loc[redshifts['z'] != '']\n redshifts['z'] = redshifts['z'].astype(float)\n\n return redshifts\n\n\n@docstring_inherit(RadioOpticalData)\nclass RadioOpticalPlot(RadioOpticalData):\n \"\"\"The main plotting class for the Radio Optical plot.\n\n Attributes:\n markers (Dict[str, str]): What matplotlib markers to use for each\n class.\n mark_colors (Dict[str, str]): What matplotlib colors to use for each\n class.\n current_fig (plt.figure): The current figure that has been\n generated.\n hide_legend (bool): Whether the main legend should be hidden.\n \"\"\"\n def __init__(\n self,\n base_data_file: Optional[str] = None,\n extra_exclude_stellar_types: Optional[List[str]] = None,\n extra_ab_list: Optional[List[str]] = None,\n group_agn: bool = False,\n group_stellar: bool = False,\n transients_file: Optional[str] = None\n ) -> None:\n \"\"\"Init function.\n\n Args:\n base_data_file: The file containing the tab-separated master data.\n If 'None' is entered then the packaged latest master table\n will be used.\n extra_exclude_stellar_types: Extra stellar types to add to the\n stellar exclude list.\n extra_ab_list: Extra names to add to the AB list.\n group_agn: When 'True' the quasars are grouped together under\n the type 'Quasars'.\n group_stellar: When 'True' the stellar sources are all grouped\n under the type 'Stellar'.\n transients_file: Path to the transients file to load. Also accepts\n the names of the packaged transient files\n 'transient_master_table_04072013.txt' and 'Stripe82_QSOs.txt'.\n\n Returns:\n None\n \"\"\"\n # Could do kwargs below but clearer for users to see if they are\n # explicitly passed.\n super().__init__(\n base_data_file,\n extra_exclude_stellar_types=extra_exclude_stellar_types,\n extra_ab_list=extra_ab_list,\n group_agn=group_agn,\n group_stellar=group_stellar,\n transients_file=transients_file\n )\n\n self.markers = {\n \"Quasar\": '.',\n \"Quasar (OPTICAL Sel.)\": '.',\n \"Quasar (RADIO Sel.)\": '.',\n \"Quasar (Stripe82)\": 'o',\n \"FIRST (SDSS Gal.)\": '.',\n \"FIRST (SDSS Star)\": '.',\n \"Variable Quasar\": '.',\n \"Variable Quasar (Galaxy)\": '.',\n \"GRB\": 's',\n \"SN\": '^',\n \"Magnetar\": 'd',\n \"CV\": 'v',\n \"RRAT\": 'd',\n \"Pulsar\": 'd',\n \"X-ray binary\": 'o',\n \"Stellar: Other\": \"D\",\n \"Stellar\": '>',\n \"Stellar: Star\": '>',\n \"Stellar: RS CVn\": 's',\n \"Stellar: Algol\": (4,1,0),\n \"Stellar: Symbiotic\": 'd',\n \"Stellar: YSO\": 'o',\n \"Stellar: PMS\": \">\",\n \"Stellar: Variable Star\": \"^\",\n \"PSR J1012+5307 (Variable)\": (4,1,45),\n \"SDSS-QSO (Variable)\": (4,1,0),\n \"Unclassified (Variable)\": (4,1,0),\n \"Star (Variable)\": (4,1,0),\n \"SDSS-GAL (Transient)\": '*',\n \"Unclassified (Transient)\": (10,1,0),\n \"GRB140907A (Transient)\": (4,1,0)\n }\n\n self.mark_colors={\n \"Quasar\":'#377eb8',\n \"Quasar (OPTICAL Sel.)\":'#377eb8',\n \"Quasar (RADIO Sel.)\":'#e41a1c',\n \"Quasar (Stripe82)\":\"#9acd32\",\n \"FIRST (SDSS Gal.)\":'#9ACD32',\n \"FIRST (SDSS Star)\":'#4B0082',\n \"Variable Quasar\":'b',\n \"Variable Quasar (Galaxy)\":'y',\n \"GRB\":'#ff7f00',\n \"SN\":'#984ea3',\n \"Magnetar\":'#FF4500',\n \"CV\":'c',\n \"RRAT\":'#ffff33',\n \"Pulsar\":'#f781bf',\n \"X-ray binary\":'#4daf4a',\n \"Stellar: Other\":'#FFD700',\n \"Stellar: RS CVn\":'#D2691E',\n \"Stellar: Algol\": 'm',\n \"Stellar\":'#DAA520',\n \"Stellar: Star\":'#DAA520',\n \"Stellar: Symbiotic\":'#008B8B',\n \"Stellar: YSO\":'#F08080',\n \"Stellar: PMS\":'#CD853F',\n \"Stellar: Variable Star\":\"#1e90ff\",\n \"PSR J1012+5307 (Variable)\":'#00FFFF',\n \"SDSS-QSO (Variable)\":'#377eb8',\n \"Unclassified (Variable)\":'#e41a1c',\n \"Star (Variable)\":'#DAA520',\n \"SDSS-GAL (Transient)\":'#984ea3',\n \"Unclassified (Transient)\":'#4daf4a',\n \"GRB140907A (Transient)\":'#FF00FF'\n }\n\n self.current_fig = None\n\n def generate_plot(\n self,\n exclude_type: Optional[str] = None,\n exclude_name: Optional[str] = None,\n qso_box: bool = False,\n push_agn: bool = False,\n push_stellar_dist: float = None,\n meerkat: bool = False,\n ska_lsst: bool = False,\n background: bool = False,\n color_list: Optional[str] = None,\n highlight_list: Optional[str] = None,\n schematic: bool = False,\n schematic_cover: bool = False,\n hide_line_labels: bool = False,\n hide_diag_line_labels: bool = False,\n show_complete: bool = False,\n hide_arrow: bool = False,\n square: bool = False,\n hide_main_legend: bool = False,\n zoom_plot: Optional[str] = None,\n legend_size: int = 18,\n ) -> plt.figure:\n \"\"\"The main function to generate a radio optical plot.\n\n Args:\n exclude_type: A string of comma separated types to exclude from\n plotting. E.g. 'SN,GRB'.\n exclude_name: A string of comma separated types to exclude from\n plotting. E.g. 'SN 2004dj,GRB 030329'.\n qso_box: Set to 'True' to show a shaded region that represents\n the area quasars will extend to.\n push_agn: Set to 'True' to show how quasars will appear when\n pushed back by sqrt(10)x and 10x.\n push_stellar_dist: Show how the stellar sources will appear when\n pushed back by a provided distance (units is parsec).\n meerkat: Show the MeerKAT and MeerLICHT sensitivity limit lines.\n ska_lsst: Show the SKA and LSST sensitivity limit lines.\n background: Set to 'True' to show the basis plot as a light grey\n color instead of full color. Useful for track plots or\n transient overlays.\n color_list: A string list of types to show in color. Enter as a\n comma separated string, e.g. 'SN,GRB'.\n highlight_list: Used with 'background'. Highlights the types in a\n darker shade of gray. Enter as a comma separated string,\n e.g. 'SN,GRB'.\n schematic: Show opaque overlay ellipses of type clouds. Note, do\n not use with 'group_agn'.\n schematic_cover: Show transparent overlay ellipses of type clouds.\n hide_line_labels: Set to 'True' to hide the labels on the SDSS and\n FIST survey limit lines.\n hide_diag_line_labels: Set to 'True' to hide the labels on the\n distance lines.\n show_complete: When 'True' only those sources in the 'complete'\n region of the plot are shown in color.\n hide_arrow: Hide the extinction arrow.\n square: Set the plot shape to be square instead of rectangle.\n hide_main_legend: Hide the main legend of the plot.\n zoom_plot: Zoom to area of the plot, enter as 'x1,y1,x2,y2'.\n legend_size: The size of the legend with in square mode.\n\n Returns:\n The resulting figure.\n \"\"\"\n if push_agn and not self.group_agn:\n raise ValueError(\n \"Data must be initialised with 'group_agn=True\"\n \" to use the 'push_agn' option!\"\n )\n #change distance settings\n if push_stellar_dist is not None:\n change_stellar_distanes = True\n else:\n change_stellar_distanes = False\n\n #Creates exclude lists\n if exclude_name is not None:\n xname = exclude_name.split(',')\n else:\n xname = []\n\n if exclude_type is not None:\n xtype = exclude_type.split(',')\n else:\n xtype = []\n\n # Sort out color list\n if color_list is None:\n color_list = []\n else:\n color_list = color_list.split(\",\")\n\n # Sort out highlight list\n if highlight_list is None:\n highlight_list = []\n else:\n highlight_list = highlight_list.split(\",\")\n\n # Set the figure size\n if square:\n fig = plt.figure(1, figsize=(16.,11.25))\n title_font_size = 30\n self.prop = fm.FontProperties(size=legend_size)\n label_size = 30\n ticksize = 25\n else:\n fig = plt.figure(1, figsize=(20.,11.25))\n title_font_size = 20\n self.prop = fm.FontProperties(size=14)\n label_size = 'xx-large'\n ticksize = 20\n\n self.hide_legend = hide_main_legend\n\n # Create the axis\n ax1 = fig.add_subplot(111)\n\n #This sets up the top axis showing the magnitude\n ax6 = ax1.twiny()\n ax6.set_xscale('log')\n ax6.set_xlim([0.00001, 10000000])\n\n # Set the ticks for ax6\n magrange = range(0, 30, 2)\n ticks = [OpticaltomJy(thismag, \"R\") for thismag in magrange]\n ticklabels = magrange\n ax6.set_xticks(ticks)\n ax6.set_xticklabels(ticklabels)\n\n # Add title for ax6\n ax1.text(\n 0.5, 1.06, 'Optical AB magnitude (mag)',\n horizontalalignment='center',\n fontsize=title_font_size,\n transform = ax1.transAxes\n )\n\n # Set up the diagonal distance lines\n linesmagrange = range(-25, 34, 2)\n lineticks = [OpticaltomJy(thismag, \"R\") for thismag in linesmagrange]\n # Manually set the locations\n ratio_wanted=[\n 1e9, 1e8, 1e7, 1e6, 1e5, 1e4, 1e3, 1e2, 1e1, 1, 1e-1,\n 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7, 1e-8, 1e-9, 1e-10\n ]\n optical_range = lineticks\n optical_range.reverse()\n\n for i in ratio_wanted:\n lineopt, linerad, theratio = plotlines(optical_range[0], i)\n lineopt, linerad = cleanlines(lineopt, linerad)\n if len(lineopt)==0:\n continue\n ax1.plot(\n lineopt, linerad, linestyle='--', color='gray', marker=None,\n linewidth=1.5, zorder=1, alpha=0.6\n )\n\n if not np.any([hide_line_labels, hide_diag_line_labels]):\n if theratio == 1e7:\n ax1.text(\n OpticaltomJy(\n mJytoOptical(lineopt[-1], \"R\",) + 3.4, \"R\"\n ),\n linerad[-1] - 2.1e5,\n \"{0:.0e}\".format(theratio),\n rotation=40, alpha=0.6\n )\n elif 1e3 <= theratio <= 1e9:\n ax1.text(\n OpticaltomJy(\n mJytoOptical(lineopt[-1], \"R\",) + 2.5, \"R\"\n ),\n linerad[-1] - 1.9e5,\n \"{0:.0e}\".format(theratio),\n rotation=40, alpha=0.6\n )\n elif theratio == 1:\n ax1.text(\n OpticaltomJy(\n mJytoOptical(lineopt[0], \"R\",) - 0.8, \"R\"\n ),\n linerad[0] + 0.35e-2,\n \"{0:.0e}\".format(theratio),\n rotation=40, alpha=0.6\n )\n elif 1e-10 < theratio < 1e3:\n ax1.text(\n OpticaltomJy(\n mJytoOptical(lineopt[0], \"R\",) - 0.6, \"R\"\n ),\n linerad[0] + 0.4e-2,\n \"{0:.0e}\".format(theratio),\n rotation=40, alpha=0.6\n )\n\n # Add the QSO box if requested\n if qso_box:\n SDSSLimit = OpticaltomJy(22.2, \"R\")\n agnx1 = 0.04# SDSSLimit\n agnx2 = 0.8\n agny1 = 1000.\n agny2 = 0.03\n agny3 = 1.\n agnx3, _ = plotboxpoints(agnx1, agnx2, agny1, agny3)\n ax1.add_patch(\n Polygon([\n [agnx1, agny1],\n [agnx2,agny1],\n [agnx2,agny2],\n [SDSSLimit,agny2],\n [agnx3,1.]\n ], closed=True, facecolor='#377eb8', edgecolor='#377eb8',\n label=\"Likely Quasar Region\",\n alpha=0.4, zorder=0)\n )\n ax1.add_patch(\n Polygon([\n [agnx1, agny1],\n [agnx2,agny1],\n [agnx2,agny2],\n [SDSSLimit,agny2],\n [agnx3,1.]\n ], closed=True, facecolor='None', edgecolor=\"#377eb8\",\n alpha=0.4, zorder=0, hatch=\"\\\\\")\n )\n\n # zorder counter\n num = 3\n\n combined_list = self.basis + self.transients\n\n # Now start looping through per type to plot\n for i in combined_list:\n if i in xtype:\n continue\n\n if i in self.basis:\n if i == \"Stellar\":\n size_toplot = 50\n else:\n size_toplot = 90\n else:\n if \"(Variable)\" in i:\n size_toplot = 300\n elif \"(Transient)\" in i:\n size_toplot = 600\n else:\n size_toplot = 100\n\n # Get the selection\n data_selection = (\n self.master_df.loc[self.master_df[\"Type\"] == i]\n )\n\n # Filter out not wanted objects.\n if len(xname) > 0:\n data_selection = (\n data_selection[~data_selection[\"Name\"].isin(xname)]\n )\n\n try:\n # Obtain marker style and color\n mark_color = self.mark_colors[i]\n marker = self.markers[i]\n except:\n mark_color = None\n marker = '*'\n\n\n if i in self.basis:\n # Change some label headings\n if i == 'SN':\n thislabel = \"Supernova\"\n elif i == 'Pulsar':\n thislabel = \"Radio Pulsar\"\n else:\n thislabel = i\n\n if background and i not in color_list:\n num = 3\n if i in highlight_list:\n thiscolor = 'darkgrey'\n else:\n thiscolor = 'lightgrey'\n ax1.scatter(\n data_selection[\"optical_in_mJy\"],\n data_selection[\"radio\"],\n s=size_toplot,\n color=thiscolor,\n marker=marker,\n label=thislabel,\n zorder=num\n )\n\n elif not schematic and not show_complete:\n ax1.scatter(\n data_selection[\"optical_in_mJy\"],\n data_selection[\"radio\"],\n s=size_toplot,\n color=mark_color,\n marker=marker,\n label=thislabel,\n zorder=num\n )\n\n elif show_complete:\n color_ones_radio = []\n color_ones_optical = []\n bg_ones_radio = []\n bg_ones_optical = []\n o_limit = OpticaltomJy(22.2, \"R\")\n r_limit = 1.0\n for p, val in enumerate(data_selection[\"optical_in_mJy\"]):\n thisradio = data_selection[\"radio\"].iloc[p]\n if (thisradio > r_limit) and (val > o_limit):\n color_ones_optical.append(val)\n color_ones_radio.append(thisradio)\n else:\n bg_ones_optical.append(val)\n bg_ones_radio.append(thisradio)\n if len(color_ones_radio)>0:\n ax1.scatter(\n color_ones_optical,\n color_ones_radio,\n s=size_toplot,\n color=mark_color,\n marker=marker,\n label=thislabel,\n zorder=num\n )\n ax1.scatter(\n bg_ones_optical,\n bg_ones_radio,\n s=size_toplot,\n color='lightgrey',\n marker=marker,\n zorder=num\n )\n else:\n ax1.scatter(\n bg_ones_optical,\n bg_ones_radio,\n s=size_toplot,\n color='lightgrey',\n marker=marker,\n label=thislabel,\n zorder=num\n )\n\n if schematic:\n toplot = [\n \"Quasar (OPTICAL Sel.)\", 'Stellar', \"GRB\",\n \"SN\", \"Pulsar\", \"X-ray binary\"\n ]\n heights = {\n \"Quasar (OPTICAL Sel.)\": 4.5, 'Stellar': 3,\n \"GRB\": 1.8, \"SN\": 1.5, \"Pulsar\": 3,\n \"X-ray binary\": 4.5\n }\n widths = {\n \"Quasar (OPTICAL Sel.)\": 3.2, 'Stellar': 6,\n \"GRB\": 3.3, \"SN\": 2.75, \"Pulsar\": 1.8,\n \"X-ray binary\": 1.25\n }\n positions = {\n \"Quasar (OPTICAL Sel.)\": (0.2,1), 'Stellar': (0.5,0),\n \"GRB\": (-0.3,0), \"SN\": (0.2,0), \"Pulsar\": (-0.5,0.2),\n \"X-ray binary\": (-0.5,0)\n }\n angles = {\n \"Quasar (OPTICAL Sel.)\": -20, 'Stellar': 0,\n \"GRB\": 10, \"SN\": 0, \"Pulsar\": 0, \"X-ray binary\": -40\n }\n\n textlabels = {\n \"Quasar (OPTICAL Sel.)\": \"Quasars\",\n 'Stellar': \"Stellar & CVs\",\n \"GRB\": \"GRBs\",\n \"SN\": \"SNe\",\n \"Pulsar\": \" Radio\\nPulsars\",\n \"X-ray binary\": \"XRBs\"\n }\n\n if i in toplot:\n circ_offset = ScaledTranslation(\n np.median(data_selection[\"optical_in_mJy\"]),\n np.median(data_selection[\"radio\"]),\n ax1.transScale\n )\n\n circ_tform = (\n circ_offset + ax1.transLimits + ax1.transAxes\n )\n\n thisopticalmedian = np.median(\n data_selection[\"optical_in_mJy\"]\n )\n thisradiomedian = np.median(\n data_selection[\"radio\"]\n )\n thisopticalstd = np.std(\n data_selection[\"optical_in_mJy\"]\n )\n thisradiostd = np.std(data_selection[\"radio\"])\n optmax = max(data_selection[\"optical_in_mJy\"])\n radmax = max(data_selection[\"radio\"])\n optmin = min(data_selection[\"optical_in_mJy\"])\n radmin = min(data_selection[\"radio\"])\n e1 = Ellipse(\n xy=positions[i], width=widths[i],\n height=heights[i], transform=circ_tform,\n alpha=0.5, zorder=20, angle=angles[i],\n color=mark_color\n )\n ax1.add_artist(e1)\n\n if i == \"X-ray binary\":\n ax1.text(\n positions[i][0]-0.3,\n positions[i][1],\n textlabels[i],\n transform=circ_tform,\n zorder=21,\n weight='bold',\n size=24\n )\n else:\n ax1.text(\n positions[i][0] - 0.7,\n positions[i][1],\n textlabels[i],\n transform=circ_tform,\n zorder=21,\n weight='bold',\n size=24\n )\n\n if schematic_cover:\n toplot = [\n \"Quasar\", 'Stellar', \"GRB\", \"SN\",\n \"Pulsar\", \"X-ray binary\"\n ]\n heights = {\n \"Quasar\": 4.9, 'Stellar': 3.8, \"GRB\": 1.8,\n \"SN\": 1.5, \"Pulsar\": 3, \"X-ray binary\": 6.5\n }\n widths = {\n \"Quasar\": 3.2, 'Stellar': 6.5, \"GRB\": 3.6,\n \"SN\": 2.75, \"Pulsar\": 1.8, \"X-ray binary\": 2.5\n }\n positions = {\n \"Quasar\": (0.2,1), 'Stellar': (0.2,0.1),\n \"GRB\": (-0.1,0), \"SN\": (0.2,0), \"Pulsar\": (-0.5,0.2),\n \"X-ray binary\": (0.2,0.3)\n }\n angles = {\n \"Quasar\": -20, 'Stellar': 0, \"GRB\": 10, \"SN\": 0,\n \"Pulsar\": 0, \"X-ray binary\": -45\n }\n textlabels={\n \"Quasar\": \"Quasars\", 'Stellar': \"Stellar & CVs\",\n \"GRB\": \"GRBs\", \"SN\": \"SNe\",\n \"Pulsar\": \" Radio\\nPulsars\",\n \"X-ray binary\": \"XRBs\"\n }\n if i in toplot:\n circ_offset = ScaledTranslation(\n np.median(data_selection[\"optical_in_mJy\"]),\n np.median(data_selection[\"radio\"]),\n ax1.transScale\n )\n\n circ_tform = (\n circ_offset + ax1.transLimits + ax1.transAxes\n )\n\n thisopticalmedian = np.median(\n data_selection[\"optical_in_mJy\"]\n )\n thisradiomedian = np.median(data_selection[\"radio\"])\n thisopticalstd = np.std(\n data_selection[\"optical_in_mJy\"]\n )\n thisradiostd = np.std(data_selection[\"radio\"])\n optmax = max(data_selection[\"optical_in_mJy\"])\n radmax = max(data_selection[\"radio\"])\n optmin = min(data_selection[\"optical_in_mJy\"])\n radmin = min(data_selection[\"radio\"])\n\n if i==\"Quasar\":\n\n e1 = Arc(\n (0.2,1), height=4.9, width=3.4,\n transform=circ_tform, zorder=20,\n theta1=324.9, theta2=244,\n hatch = '..........', angle=-20, alpha=0.2\n )\n e1.set_color(mark_color)\n\n else:\n e1 = Ellipse(\n xy=positions[i], width=widths[i],\n height=heights[i], transform=circ_tform,\n alpha=0.2, zorder=50, angle=angles[i],\n color=mark_color\n )\n ax1.add_artist(e1)\n\n if i == \"X-ray binary\":\n ax1.text(\n positions[i][0] - 0.3,\n positions[i][1],\n textlabels[i],\n transform=circ_tform,\n zorder=21,\n weight='bold',\n size=24\n )\n elif i == \"SN\":\n ax1.text(\n positions[i][0] - 0.4,\n positions[i][1] - 0.2,\n textlabels[i],\n transform=circ_tform,\n zorder=21,\n weight='bold',\n size=24\n )\n else:\n ax1.text(\n positions[i][0] - 0.7,\n positions[i][1],\n textlabels[i],\n transform=circ_tform,\n zorder=21,\n weight='bold',\n size=24\n )\n else:\n if i == \"FIRST (SDSS Gal.)\" or i==\"FIRST (SDSS Star)\":\n ax1.scatter(\n data_selection[\"optical_in_mJy\"],\n data_selection[\"radio\"],\n s=120,\n color=mark_color,\n marker=marker,\n label=i,\n zorder=2\n )\n num -= 1\n\n elif i == \"GRB140907A (Transient)\":\n ax1.scatter(\n data_selection[\"optical_in_mJy\"],\n data_selection[\"radio\"],\n s=size_toplot,\n color=mark_color,\n marker=marker,\n label=i,\n zorder=num,\n lw=2\n )\n ax1.arrow(\n data_selection[\"optical_in_mJy\"].iloc[0],\n data_selection[\"radio\"].iloc[0],\n 0, -0.05, fc=mark_color,\n ec=mark_color,head_width=0.0045,\n head_length=0.015, alpha=0.8\n )\n else:\n ax1.scatter(\n data_selection[\"optical_in_mJy\"],\n data_selection[\"radio\"],\n s=size_toplot,\n color=mark_color,\n marker=marker,\n label=i,\n zorder=num,\n lw=2\n )\n num += 1\n # End of loop\n\n ax1.set_xlabel('Optical flux density (mJy)', size=label_size)\n ax1.set_ylabel('Radio flux density (mJy)', size=label_size)\n\n if not hide_arrow:\n extinction_arrow(\n ax1,\n OpticaltomJy(4., \"R\"),\n 0.03,\n -(OpticaltomJy(4., \"R\") - OpticaltomJy(9., \"R\")),\n 0,\n background,\n self.group_agn\n )\n\n ax1.tick_params(axis='both', which='major', labelsize=ticksize)\n ax6.tick_params(\n axis='both', which='major', labelsize=ticksize,\n zorder=20, direction='inout', length=6\n )\n\n # Hard code the limits\n ax1.set_xlim([0.00001,10000000])\n ax1.set_ylim([0.005,100000])\n\n zoomy2 = 2000 # used later for label placement\n\n if zoom_plot is not None:\n zoomx1, zoomy1, zoomx2, zoomy2 = zoom_plot.split(\",\")\n ax1.set_xlim([float(zoomx1), float(zoomx2)])\n ax6.set_xlim([float(zoomx1), float(zoomx2)])\n ax1.set_ylim([float(zoomy1), float(zoomy2)])\n\n # Draw the limit lines\n # TODO: Move these limits\n SDSSLimit = OpticaltomJy(22.2, \"R\")\n SDSSLimit_bright = OpticaltomJy(14.0, \"R\")\n Meerlicht_limit = OpticaltomJy(22.3, \"R\")\n MASTER_limit = OpticaltomJy(18.0, \"R\")\n LSST_limit = OpticaltomJy(24.0, \"R\")\n\n ax1.axvline(\n SDSSLimit, linestyle='--', color='k', linewidth=2.,\n zorder=1, alpha=0.6\n )\n ax1.axvline(\n SDSSLimit_bright, linestyle='--', color='k',\n linewidth=2., zorder=1, alpha=0.6\n )\n if not hide_line_labels:\n ax1.text(\n SDSSLimit_bright + (SDSSLimit_bright * 0.1),\n 30000 * (float(zoomy2) / 400000),\n \"SDSS Saturation Limit\",\n rotation='vertical',\n weight='bold',\n size=16\n )\n ax1.text(\n SDSSLimit - (SDSSLimit * 0.35),\n 40000 * (float(zoomy2) / 400000),\n \"SDSS 95% Completeness\",\n rotation='vertical',\n weight='bold',\n size=16\n )\n ax1.text(0.000012, 1-0.4, \"FIRST Limit\", weight='bold', size=16)\n if show_complete:\n first_red_line = Line2D(\n [SDSSLimit, 1e7],\n [1., 1.],\n color='red',\n linewidth=4,\n zorder=20\n )\n optical_line = Line2D(\n [SDSSLimit, SDSSLimit],\n [1., 5e5],\n color='red',\n linewidth=4,\n zorder=20\n )\n ax1.add_artist(first_red_line)\n ax1.add_artist(optical_line)\n\n ax1.axhline(\n 1., linestyle='--', color='k', linewidth=2., zorder=1, alpha=0.6,\n )\n\n if meerkat:\n ax1.axvline(\n Meerlicht_limit,\n linestyle='--',\n color='#4daf4a',\n label=\"MeerLICHT Limit (5 min, 22.3 mag)\",\n linewidth=4.,\n zorder=30\n )\n ax1.axhline(\n 23.5e-3,\n linestyle='--',\n color='#1e90ff',\n label=r\"5${\\rm \\sigma}$ MeerKAT Limit (1 h; 23.5 $\\mu$Jy)\",\n linewidth=4.,\n zorder=30\n )\n\n if ska_lsst:\n ax1.axvline(\n LSST_limit,\n linestyle='-',\n color='b',\n label=\"LSST Limit\",\n linewidth=2.,\n zorder=1\n )\n ax1.axhline(\n 2.1e-3,\n linestyle='-',\n color='k',\n label=\"SKA-1 Mid Limit\",\n linewidth=2.,\n zorder=1\n )\n ax1.set_ylim([0.0001, 1600000])\n\n ax1.set_xscale('log')\n ax1.set_yscale('log')\n\n if push_agn:\n #Load in redshifts:\n redshifts = self._load_qso_redshifts()\n\n dmods = [np.sqrt(10.), 10.]\n\n labels = {\n 0: \"Quasars (Distance x 3.16)\",\n 1: \"Quasars (Distance x 10)\",\n }\n\n colors = {\n 0: \"orange\",\n 1: \"lightblue\",\n }\n\n data_selection = self.master_df[\n self.master_df['Type'] == 'Quasar'\n ].copy()\n\n data_selection = data_selection.loc[\n data_selection['Name'].isin(redshifts['name'])\n ]\n\n data_selection = data_selection.merge(\n redshifts[['name', 'z']], left_on='Name', right_on='name'\n )\n\n cosmo = FlatLambdaCDM(H0=70, Om0=0.3)\n\n for i, d in enumerate(dmods):\n result = data_selection.apply(\n kcorrect, axis=1, args=(d, i, cosmo)\n )\n\n ax1.scatter(\n result[f\"optical_dmod_{i}\"],\n result[f\"radio_dmod_{i}\"],\n s=90,\n color=colors[i],\n marker=self.markers[\"Quasar\"],\n label=labels[i],\n zorder=15 + i\n )\n\n if change_stellar_distanes:\n s_dist = self._load_stellar_distances()\n data_selection = self.master_df[\n (self.master_df['Type'].str.contains('Stellar')) &\n (self.master_df['Name'].isin(s_dist['object']))\n ]\n data_selection = data_selection.merge(\n s_dist, left_on='Name', right_on='object'\n )\n data_selection = data_selection[\n ['optical_in_mJy', 'radio', 'distance']\n ].apply(stellar_dist, axis=1, args=(push_stellar_dist,))\n\n ax1.scatter(\n data_selection['optical_pushed'],\n data_selection['radio_pushed'],\n s=50,\n color=\"#00BFFF\",\n marker=self.markers[\"Stellar\"],\n label=f\"Stellar ({push_stellar_dist:.0f} pc)\",\n zorder=16\n )\n\n if not schematic:\n handles, labels = ax1.get_legend_handles_labels()\n if (len(self.transients) != 0 or\n push_agn or change_stellar_distanes):\n l2 = ax1.legend(\n handles[len(self.basis):],\n labels[len(self.basis):],\n loc=2,\n prop=self.prop,\n scatterpoints=1,\n markerscale=0.8\n )\n elif meerkat or ska_lsst:\n l2 = ax1.legend(\n handles[:-len(self.basis):],\n labels[:-len(self.basis)],\n loc=2,\n prop=self.prop,\n scatterpoints=1,\n markerscale=0.8\n )\n if qso_box:\n loca = 2\n else:\n loca = 1\n if not self.hide_legend:\n if meerkat or ska_lsst:\n l1 = ax1.legend(\n handles[-len(self.basis):],\n labels[-len(self.basis):],\n loc=loca,\n prop=self.prop,\n scatterpoints=1\n ).set_zorder(30)\n else:\n l1 = ax1.legend(\n handles[:len(self.basis)],\n labels[:len(self.basis)],\n loc=loca,\n prop=self.prop,\n scatterpoints=1\n ).set_zorder(30)\n if (len(self.transients)!=0 or push_agn or change_stellar_distanes\n or meerkat or ska_lsst):\n l2.set_zorder(30)\n ax1.add_artist(l2)\n\n self.current_fig = fig\n\n return fig\n\n def add_datapoint(\n self,\n name: str,\n o_mag: float,\n r_flux: float,\n o_band: str = 'R',\n ab: bool = True,\n marker='o',\n color=\"tab:blue\",\n markersize: float = 100,\n **kwargs\n ) -> plt.figure:\n \"\"\"Add a datapoint to the current figure.\n\n If not figure has been generated then a figure will be generated.\n Options for the plot can be declared using **kwargs.\n\n Args:\n name: The name of the object to add.\n o_mag: The optical magnitude.\n r_flux: The radio flux (mJy).\n o_band: The optical band of the magnitude.\n ab: Set to 'False' if the object magnitude is not in the AB system.\n marker (plt_markers): The matplotlib marker to use.\n color (plt_color): The matplotlib color to use.\n markersize: The size of the markers.\n **kwargs: Keyword arguments passed to 'generate_plot'.\n\n Returns:\n The current figure.\n \"\"\"\n if self.current_fig is None:\n self.current_fig = self.generate_plot(**kwargs)\n\n ax1 = self.current_fig.get_axes()[0]\n\n if not ab:\n o_mag = ConvertToABMag(o_mag, o_band)\n\n o_flux = OpticaltomJy(o_mag, o_band)\n\n ax1.scatter(\n o_flux,\n r_flux,\n s=markersize,\n color=color,\n marker=marker,\n label=name,\n zorder=17\n )\n\n self.add_second_legend(ax1)\n\n return self.current_fig\n\n def add_datapoints_from_df(\n self,\n df: pd.DataFrame,\n ab_to_convert: List[str] = [],\n # Unsure about type hints for markers and colors?\n markers = [],\n colors = [],\n markersize: int = 100,\n group_by_type: bool = False,\n **kwargs\n ) -> plt.figure:\n \"\"\"Add datapoints from a DataFrame to the current figure.\n\n Columns expected are:\n - 'Type'\n - 'Name'\n - 'radio'\n - 'optical_in_mJy'\n - 'R' or 'V'\n\n If not figure has been generated then a figure will be generated.\n Options for the plot can be declared using **kwargs.\n\n Args:\n df: The DataFrame containing the data points to add.\n ab_to_convert: The list of names that require converting to\n the AB system.\n markers: List of markers to use per type or per name. Make sure\n the number of markers match the number of unique names or\n types.\n colors: List of colors to use per type or per name. Make sure\n the number of colors match the number of unique names or\n types.\n markersize: The size to use for the markers.\n group_by_type: Group the datapoints by type rather than plotting\n with individual name labels.\n **kwargs: Keyword arguments passed to 'generate_plot'.\n\n Returns:\n The resulting current figure.\n \"\"\"\n if self.current_fig is None:\n self.current_fig = self.generate_plot(**kwargs)\n\n for i in df['Name'].unique():\n if i not in ab_to_convert:\n self.ab_list.append(i)\n\n # easier for users to put in radio\n df = df.rename(columns={'radio': \"F Radio / mJy\"})\n\n df = self.master_table_analysis(df, transients=True)\n\n if group_by_type:\n unique = df['Type'].unique()\n unique_col = 'Type'\n else:\n unique = df['Name'].unique()\n unique_col = 'Name'\n\n if unique.shape[0] > len(markers):\n raise ValueError(\n 'Number of markers is less than the number of unique entries.'\n )\n\n if unique.shape[0] > len(colors):\n raise ValueError(\n 'Number of colors is less than the number of unique entries.'\n )\n\n ax1 = self.current_fig.get_axes()[0]\n\n for i, val in enumerate(unique):\n\n data_selection = df.loc[df[unique_col] == val]\n\n ax1.scatter(\n data_selection['optical_in_mJy'],\n data_selection['radio'],\n s=markersize,\n color=colors[i],\n marker=markers[i],\n label=val,\n zorder=17\n )\n\n self.add_second_legend(ax1)\n\n return self.current_fig\n\n def add_second_legend(self, ax1: plt.Axes) -> None:\n \"\"\"\n Function to add the second legend to the plot when required.\n\n Args:\n ax1: The axis to add the legend to.\n\n Returns:\n None.\n \"\"\"\n handles, labels = ax1.get_legend_handles_labels()\n l2 = ax1.legend(\n handles[len(self.basis):],\n labels[len(self.basis):],\n loc=2,\n prop=self.prop,\n scatterpoints=1,\n markerscale=0.8\n )\n\n if not self.hide_legend:\n l1 = ax1.legend(\n handles[:len(self.basis)],\n labels[:len(self.basis)],\n loc=1,\n prop=self.prop,\n scatterpoints=1\n ).set_zorder(30)\n\n\n l2.set_zorder(30)\n ax1.add_artist(l2)\n\n def add_text(\n self,\n text: str,\n x: float,\n y: float,\n **kwargs\n ) -> Tuple[plt.figure, plt.text]:\n \"\"\"Adds text to the current figure.\n\n Args:\n text: The text string to add to the plot.\n x: The x coordinate of the text.\n y: The y coordinate of the text.\n **kwargs: Keyword arguments passed to 'plt.text'.\n \"\"\"\n if self.current_fig is None:\n raise ValueError(\n 'A plot must be generated before adding text!'\n )\n\n ax1 = self.current_fig.get_axes()[0]\n\n t = ax1.text(x, y, text, **kwargs)\n\n return self.current_fig, t\n\n def ratio_histogram(self) -> plt.figure:\n \"\"\"Plot the ratio histogram.\n\n Returns:\n Histogram plot of the optical / radio ratios.\n \"\"\"\n fig_hist = plt.figure(figsize=(6,15))\n subplots = {}\n basis_order = [\n \"Stellar\", \"CV\", \"SN\", \"X-ray binary\", \"GRB\",\n \"Quasar (OPTICAL Sel.)\", \"Quasar (RADIO Sel.)\", \"Pulsar\"\n ]\n legend_loc = {\n \"Stellar\":1, \"CV\":1, \"SN\":1, \"X-ray binary\":2, \"GRB\":2,\n \"Quasar (OPTICAL Sel.)\":2, \"Quasar (RADIO Sel.)\":2, \"Pulsar\":2\n }\n for i, val in enumerate(basis_order):\n label = \"Radio Pulsar\" if val == \"Pulsar\" else val\n subplots[val] = fig_hist.add_subplot(len(self.basis), 1, i+1)\n data_selection = self.base_data_df[\n self.base_data_df['Type'] == val\n ]\n subplots[val].hist(\n data_selection[\"ratio\"],\n bins=np.logspace(-8, 6, 15),\n color=self.mark_colors[val],\n label=label,\n edgecolor=\"none\"\n )\n\n subplots[val].set_xscale('log')\n subplots[val].legend(loc=legend_loc[val])\n subplots[val].grid(True)\n\n for label in subplots[val].xaxis.get_ticklabels()[::2]:\n label.set_visible(False)\n for label in subplots[val].yaxis.get_ticklabels()[::2]:\n label.set_visible(False)\n if val == \"Pulsar\":\n subplots[val].set_xlabel(\n r\"log $F_{\\mathrm{r}}/F_{\\mathrm{o}}$\", size=\"x-large\"\n )\n if val == \"X-ray binary\":\n subplots[val].set_ylabel(\"#\", size=\"x-large\")\n\n fig_hist.tight_layout()\n\n return fig_hist\n\n def frequency_histogram(self) -> plt.figure:\n \"\"\"Plot the frequency histogram.\n\n Returns:\n Histogram plot of the radio frequencies used.\n \"\"\"\n fig_freq = plt.figure(figsize=(9,6))\n ax = fig_freq.add_subplot(111)\n\n toplot=[\n \"CV\", \"Pulsar\", \"X-ray binary\", \"SN\", \"GRB\", \"Stellar\",\n \"Quasar (RADIO Sel.)\", \"Quasar (OPTICAL Sel.)\"\n ]\n toplot.reverse()\n\n labels=[\n \"CV\", \"Radio Pulsar\", \"X-ray binary\", \"SN\", \"GRB\",\n \"Stellar\", \"Quasar (RADIO Sel.)\", \"Quasar (OPTICAL Sel.)\"\n ]\n labels.reverse()\n\n histcolors=[self.mark_colors[i] for i in toplot]\n\n arraystoplot = []\n for i in toplot:\n filter_data = self.base_data_df[self.base_data_df[\"Type\"] == i]\n arraystoplot.append(\n filter_data['F Radio Freq GHz'].to_numpy()\n )\n\n ax.hist(\n arraystoplot,\n bins=range(1,11,1),\n alpha=1.0,\n label=labels,\n color=histcolors,\n edgecolor=\"None\"\n )\n\n ax.grid(True)\n ax.set_yscale('log')\n ax.set_ylim([0.1,1e5])\n\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(\"large\")\n\n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(\"large\")\n\n ax.set_xlabel(\"Frequency (GHz)\", size=\"x-large\")\n ax.set_ylabel(\"#\", size=\"x-large\")\n ax.legend(prop={'size':12})\n\n return fig_freq\n\n def band_histogram(self) -> plt.figure:\n \"\"\"Plot the optical band magnitudes.\n\n Returns:\n Histogram plot of the optical bands.\n \"\"\"\n fig_band = plt.figure(figsize=(9,6))\n ax = fig_band.add_subplot(111)\n\n toplot=[\n \"CV\", \"Pulsar\", \"X-ray binary\", \"SN\", \"GRB\", \"Stellar\",\n \"Quasar (RADIO Sel.)\", \"Quasar (OPTICAL Sel.)\"\n ]\n toplot.reverse()\n\n labels=[\n \"CV\", \"Radio Pulsar\", \"X-ray binary\", \"SN\", \"GRB\",\n \"Stellar\", \"Quasar (RADIO Sel.)\", \"Quasar (OPTICAL Sel.)\"\n ]\n labels.reverse()\n\n histcolors = [self.mark_colors[i] for i in toplot]\n\n arraystoplot = []\n for i in toplot:\n filter_data = self.base_data_df[self.base_data_df[\"Type\"] == i]\n arraystoplot.append(\n filter_data['optical_mag_used_band'].to_numpy()\n )\n\n ax.hist(\n arraystoplot,\n bins=[-0.5, 0.5, 1.5],\n histtype='bar',\n alpha=1.0,\n label=labels,\n color=histcolors,\n edgecolor=\"None\"\n )\n\n ax.grid(True)\n ax.set_yscale('log')\n ax.set_xlabel(\"Optical Band\", size=\"x-large\")\n\n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(\"large\")\n\n ax.set_ylabel(\"#\", size=\"x-large\")\n ax.set_ylim([0.1,1e5])\n ax.axvline(0.5, linestyle=\"--\", color='k')\n ax.invert_xaxis()\n ax.legend(loc=2, prop={'size': 9})\n\n return fig_band\n\n def qso_z_histogram(self) -> plt.figure:\n \"\"\"Plot the quasar redshift histogram.\n\n Returns:\n Histogram plot of the quasar redshift histogram.\n \"\"\"\n redshifts = self._load_qso_redshifts()\n\n objectstoplot=[\"Quasar (OPTICAL Sel.)\", \"Quasar (RADIO Sel.)\"]\n\n fig_qso_z = plt.figure(figsize=(9,6))\n ax = fig_qso_z.add_subplot(111)\n\n data_selection = self.base_data_df[\n self.base_data_df['Type'] == \"Quasar (RADIO Sel.)\"\n ].copy()\n data_selection['Name'] = data_selection['Name'].str.replace('PKS ', '')\n\n PKS_z = redshifts[\n redshifts['name'].isin(data_selection['Name'])\n ]['z'].to_numpy()\n\n data_selection = self.base_data_df.loc[\n self.base_data_df['Type'] == \"Quasar (OPTICAL Sel.)\"\n ]\n\n sdss_r9_mask = data_selection['Subtype'] == \"SDSS_DR9\"\n sdss_r9_data = data_selection[sdss_r9_mask]\n SDSS_r9_z = redshifts[\n redshifts['name'].isin(sdss_r9_data['Name'])\n ]['z'].to_numpy()\n\n sdss_r7_data = data_selection[~sdss_r9_mask]\n SDSS_r7_z = redshifts[\n redshifts['name'].isin(sdss_r7_data['Name'])\n ]['z'].to_numpy()\n\n histcolors=[\n self.mark_colors[\"Quasar (OPTICAL Sel.)\"],\n self.mark_colors[\"CV\"],\n self.mark_colors[\"Quasar (RADIO Sel.)\"]\n ]\n\n toplot=[SDSS_r7_z, SDSS_r9_z, PKS_z]\n labels=[\n \"Quasar (OPTICAL Sel.) SDSS DR7\", \"Quasar (OPTICAL Sel.) SDSS DR9\",\n \"Quasar (RADIO Sel.) PKS\"\n ]\n\n ax.hist(\n toplot,\n bins=np.arange(0.0,7.5,0.5),\n histtype='bar',\n alpha=1.0,\n label=labels,\n edgecolor=\"None\",\n color=histcolors,\n rwidth=0.95\n )\n\n ax.set_ylabel(\"#\", size=\"x-large\")\n ax.set_xlabel(r\"$z$\", size=\"x-large\")\n ax.grid(True)\n ax.set_yscale('log')\n ax.legend(prop={'size':12})\n ax.set_ylim([0.06, 3000])\n\n return fig_qso_z\n\n def grb_z_histogram(self) -> plt.figure:\n \"\"\"Plot the GRB redshift histogram.\n\n Returns:\n Histogram plot of the GRB redshifts.\n \"\"\"\n grb_redshift_file = Path(pkg_resources.resource_filename(\n __name__, \"./data/GRB_redshifts.txt\"\n ))\n\n grb_redshifts = pd.read_csv(\n grb_redshift_file,\n names=['name', 'z'],\n dtype={'name': str, 'z': float}\n )\n\n data_selection = self.base_data_df.loc[\n self.base_data_df['Type'] == \"GRB\"\n ]\n\n usedgrbzeds = grb_redshifts[\n grb_redshifts['name'].isin(data_selection['Name'])\n ]['z'].to_numpy()\n\n fig_grb_z = plt.figure(figsize=(9,6))\n ax = fig_grb_z.add_subplot(111)\n ax.hist(\n usedgrbzeds,\n bins=np.arange(0.0,5.5,0.5),\n stacked=True,\n histtype='bar',\n alpha=1.0,\n label=\"GRBs\",\n edgecolor=\"None\",\n color=self.mark_colors[\"GRB\"],\n rwidth=0.95\n )\n\n ax.set_ylabel(\"#\", size=\"x-large\")\n ax.set_xlabel(r\"$z$\", size=\"x-large\")\n ax.grid(True)\n ax.legend(prop={'size':12})\n\n return fig_grb_z\n\n def stellar_distance_histogram(self) -> plt.figure:\n \"\"\"Plot the stellar distance histogram.\n\n Returns:\n Histogram plot of the stellar distances.\n \"\"\"\n if not self.group_stellar:\n raise ValueError(\n \"RadioOpticalPlot must be initialised with\"\n \" 'group_stellar=True' to create the stellar distance\"\n \" histogram.\"\n )\n\n s_dist = self._load_stellar_distances()\n s_dist['distance_conv'] = s_dist['distance'] / 1.e3\n\n data_selection = self.base_data_df.loc[\n self.base_data_df['Type'] == \"Stellar\"\n ]\n\n s_distances = s_dist[\n s_dist['object'].isin(data_selection['Name'])\n ]['distance_conv'].to_numpy()\n\n fig_stellar_dist = plt.figure(figsize=(9,6))\n ax = fig_stellar_dist.add_subplot(111)\n ax.hist(\n s_distances,\n bins=np.arange(0.,17.,1.0),\n stacked=True,\n histtype='bar',\n alpha=1.0,\n label=\"Stellar sources\",\n edgecolor=\"None\",\n color=self.mark_colors[\"Stellar\"],\n rwidth=0.95\n )\n\n ax.set_ylabel(\"#\", size=\"x-large\")\n ax.set_xlabel(\"Distance (kpc)\", size=\"x-large\")\n ax.grid(True)\n ax.set_yscale('log')\n ax.legend(prop={'size': 12})\n ax.set_ylim([0.8, 1000])\n\n return fig_stellar_dist\n\n def clear_current_fig(self) -> None:\n \"\"\"Resets the current figure.\n\n Returns:\n None.\n \"\"\"\n self.current_fig = None\n\n\n@docstring_inherit(RadioOpticalPlot)\nclass RadioOpticalTrackPlot(RadioOpticalPlot):\n \"\"\"The main plotting class for the Radio Optical plot with tracks.\n\n Attributes:\n packaged_tracks (Dict[str, str]): The names of objects with tracks\n available to plot.\n trackcolors (Dict[str, str]): What matplotlib colors to use for each\n class.\n trackmarkers (Dict[str, str]): What matplotlib markers to use for each\n class.\n \"\"\"\n def __init__(\n self,\n base_data_file: Optional[str] = None,\n extra_exclude_stellar_types: Optional[List[str]] = None,\n extra_ab_list: Optional[List[str]] = None,\n group_agn: bool = False,\n group_stellar: bool = False,\n transients_file: Optional[str] = None\n ) -> None:\n \"\"\"Init function.\n\n Args:\n base_data_file: The file containing the tab-separated master data.\n If 'None' is entered then the packaged latest master table\n will be used.\n extra_exclude_stellar_types: Extra stellar types to add to the\n stellar exclude list.\n extra_ab_list: Extra names to add to the AB list.\n group_agn: When 'True' the quasars are grouped together under\n the type 'Quasars'.\n group_stellar: When 'True' the stellar sources are all grouped\n under the type 'Stellar'.\n transients_file: Path to the transients file to load. Also accepts\n the names of the packaged transient files\n 'transient_master_table_04072013.txt' and 'Stripe82_QSOs.txt'.\n\n Returns:\n None.\n \"\"\"\n super().__init__(\n base_data_file, group_agn=group_agn, group_stellar=group_stellar,\n transients_file=transients_file\n )\n\n self.packaged_tracks = {\n \"V404 Cyg\": \"XRB\",\n \"GX 339-4\": \"XRB\",\n \"GRB 970508\": \"GRB\",\n \"GRB 991208\": \"GRB\",\n \"GRB 000301C\": \"GRB\",\n \"GRB 030329\": \"GRB\",\n \"GRB 050820A\": \"GRB\",\n \"GRB 060218\": \"GRB\",\n \"GRB 070125\": \"GRB\",\n \"GRB 100418A\": \"GRB\",\n \"BL Lacertae\": \"Quasar\",\n \"BL Lacertae (3 month)\": \"Quasar\",\n \"3C 454.3\": \"Quasar\",\n \"SN 1990B\": \"SN\",\n \"SN 1993J\": \"SN\",\n \"SN 1994I\": \"SN\",\n \"SN 1998bw\": \"SN\",\n \"SN 2002ap\": \"SN\",\n \"SN 2004dj\": \"SN\",\n \"SN 2004dk\": \"SN\",\n \"SN 2004et\": \"SN\",\n \"SN 2004gq\": \"SN\",\n \"SN 2007bg\": \"SN\",\n \"SN 2007gr\": \"SN\",\n \"SN 2007uy\": \"SN\",\n \"SN 2008D\": \"SN\",\n \"SN 2008ax\": \"SN\",\n \"SN 2009bb\": \"SN\",\n # \"SN 2011dh\": \"GRB\",\n \"GRO J0422+32\": \"XRB\",\n \"GRO J1655-40\": \"XRB\",\n \"GS 1124-684\": \"XRB\",\n # \"GS 1354-64\": \"GRB\",\n \"XTE J1550-564\": \"XRB\",\n \"XTE J1859+226\": \"XRB\",\n \"XTE J0421+560\": \"XRB\",\n \"RS Oph\": \"CV\",\n \"SS Cyg\": \"CV\",\n \"T Pyx\": \"CV\",\n \"V1500 Cyg\": \"CV\",\n \"V1974 Cyg\": \"CV\",\n }\n\n # Some attributes to manage the packaged tracks.\n self._cvs = [\n \"SS Cyg\", \"RS Oph\", \"T Pyx\", \"V1500 Cyg\", \"V1974 Cyg\"\n ]\n self._quasars = [\"BL Lacertae\", \"BL Lacertae (3 month)\", \"3C 454.3\"]\n # self._r_band_tracks = [\n # \"GRB 030329\", \"GRB 100418A\", \"SN 1993J\", \"SN 1994I\", \"SN 1998bw\"\n # ]\n self._track_ab_list = [\n \"GRB 010921\", \"GRB 051221A\", \"GRB 080319B\",\n \"GRB 081203B\", \"SN 2008D\"\n ]\n\n def list_packaged_tracks(self) -> None:\n \"\"\"List the source tracks available to load.\n\n Returns:\n None.\n \"\"\"\n pprint(self.packaged_tracks)\n\n def load_track_data(self, label: str) -> pd.DataFrame:\n \"\"\"Load the track data from the packaged files.\n\n Args:\n label: The name of the source to load.\n\n Returns:\n The DataFrame containing the track data.\n \"\"\"\n filenames = {\n \"RS Oph\": \"CV_RSOph_6.0_V_45days.txt\",\n \"SS Cyg\": \"CV_SSCyg_8.6_V_15days.txt\",\n \"T Pyx\": \"CV_TPyx_5_V_445days.txt\",\n \"V1500 Cyg\": \"CV_V1500Cyg_8.1_V_373days.txt\",\n \"V1974 Cyg\": \"CV_V1974Cyg_5.0_V_862days.txt\",\n \"GRB 000301C\": \"GRB_000301C_8.46_R_43days.txt\",\n \"GRB 030329\": \"GRB_030329_8.64_R_65days.txt\",\n \"GRB 050820A\": \"GRB_050820A_8.46_R_25days.txt\",\n \"GRB 060218\": \"GRB_060218_8.46_R_23days.txt\",\n \"GRB 070125\": \"GRB_070125_8.46_R_18days.txt\",\n \"GRB 100418A\": \"GRB_100418A_8.46_R_31days.txt\",\n \"GRB 970508\": \"GRB_970508_8.46_R_82days.txt\",\n \"GRB 991208\": \"GRB_991208_8.46_R_37days.txt\",\n \"3C 454.3\": \"Quasar_3C454_8GHz.txt\",\n \"BL Lacertae (3 month)\": \"Quasar_BLLac_5GHz-3month.txt\",\n \"BL Lacertae\": \"Quasar_BLLac_5GHz.txt\",\n \"SN 1990B\": \"SN_1990B_5.0_V_105days.txt\",\n \"SN 1993J\": \"SN_1993J_8.3_R_305days.txt\",\n \"SN 1994I\": \"SN_1994I_8.3_R_127days.txt\",\n \"SN 1998bw\": \"SN_1998bw_8.64_R_77days.txt\",\n \"SN 2002ap\": \"SN_2002ap_1.43_R_17days.txt\",\n \"SN 2004dj\": \"SN_2004dj_5.0_V_110days.txt\",\n \"SN 2004dk\": \"SN_2004dk_8.5_R_41days.txt\",\n \"SN 2004et\": \"SN_2004et_5.0_R_57days.txt\",\n \"SN 2004gq\": \"SN_2004gq_8.5_R_35days.txt\",\n \"SN 2007bg\": \"SN_2007bg_8.46_R_68days.txt\",\n \"SN 2007gr\": \"SN_2007gr_4.9_R_92days.txt\",\n \"SN 2007uy\": \"SN_2007uy_8.4_R_56days.txt\",\n \"SN 2008D\": \"SN_2008D_4.8_R_115days.txt\",\n \"SN 2008ax\": \"SN_2008ax_8.46_V_46days.txt\",\n \"SN 2009bb\": \"SN_2009bb_8.46_V_44days.txt\",\n \"GRO J0422+32\": \"XRB_GROJ0422+32_5.0_V_155days.txt\",\n \"GS 1124-684\": \"XRB_GS1124-684_4.7_R_13days.txt\",\n \"GX 339-4\": \"XRB_GX339_4_9GHz_80days.txt\",\n \"GRO J1655-40\": \"XRB_J1655402005_4.86_V_45days.txt\",\n \"V404 Cyg\": \"XRB_V404_8_3GHz_35days.txt\",\n \"XTE J0421+560\": \"XRB_XTEJ0421+560_8.0_R_17days.txt\",\n \"XTE J1550-564\": \"XRB_XTEJ1550-564_4.8_V_9days.txt\",\n \"XTE J1859+226\": \"XRB_XTEJ1859+226_2.25_R_74days.txt\",\n }\n\n try:\n file_name = filenames[label]\n except KeyError:\n raise KeyError(f'No packaged file found for object {label}!')\n\n track_file = Path(pkg_resources.resource_filename(\n __name__, f\"./data/dynamic_tracks/{file_name}\"\n ))\n\n track_df = pd.read_csv(track_file, comment=\"#\", sep=\"\\t\")\n\n return track_df, track_file\n\n def _process_track_df(self, data_df: pd.DataFrame, t: str) -> pd.DataFrame:\n \"\"\"Process the track data contained in the dataframe.\n\n Args:\n data_df: The DataFrame containing the track data.\n t: Source label (name).\n\n Returns:\n Processed track DataFrame.\n \"\"\"\n if (np.any(data_df['R'].isna()) or np.any(data_df['R'] == 0.)):\n data_df['optical_mag_used_band'] = 'V'\n data_df['optical_mag_used_value'] = data_df['V']\n else:\n data_df['optical_mag_used_band'] = 'R'\n data_df['optical_mag_used_value'] = data_df['R']\n\n if t not in self._track_ab_list:\n data_df['optical_mag_used_value_processed'] = (\n data_df[[\n 'optical_mag_used_band',\n 'optical_mag_used_value'\n ]].apply(ConvertToABMag_pd, axis=1)\n )\n else:\n data_df['optical_mag_used_value_processed'] = (\n data_df['optical_mag_used_value']\n )\n\n data_df[\"optical_in_mJy\"] = data_df[[\n \"optical_mag_used_value_processed\",\n \"optical_mag_used_band\"\n ]].apply(OpticaltomJy_pd, axis=1)\n\n return data_df\n\n def generate_track_plot(\n self,\n group_tracks: bool = False,\n only_tracks: Optional[List[str]] = None,\n exclude_tracks: Optional[List[str]] = None,\n only_types: Optional[List[str]] = None,\n summary_style: bool = False,\n start_end_only: bool = False,\n empty: bool = False,\n **kwargs\n ) -> plt.figure:\n \"\"\"The main function to generate a Radio Optical plot with a track.\n\n Args:\n group_tracks: Set to 'True' to group the tracks into classes,\n instead of individual sources.\n only_tracks: List of sources to plot - no others will be plot.\n exclude_tracks: List of sources to exclude from plotting. Can not\n be used long with 'only_tracks'.\n only_types: List of types to plot - no sources from types outside\n of those listed will be plotted.\n summary_style: If 'True' the plot is returned in summary style\n where start and ends won't be circled.\n start_end_only: Only plot the start and end points of the tracks.\n empty: Generate an empty plot - useful to plot custom tracks.\n **kwargs: Keyword arguments passed to generate_plot.\n\n Returns:\n The resulting figure.\n \"\"\"\n if only_tracks is not None and exclude_tracks is not None:\n warnings.warn(\n \"Both only_tracks and exclude_tracks options have been\"\n \" used. Ignoring 'exclude_tracks'.\"\n )\n\n exclude_tracks = None\n\n if only_tracks is None and exclude_tracks is None:\n tracks_to_plot = self.packaged_tracks\n\n elif only_tracks is not None:\n tracks_to_plot = [\n i for i in self.packaged_tracks if i in only_tracks\n ]\n\n else:\n tracks_to_plot = [\n i for i in self.packaged_tracks if i not in exclude_tracks\n ]\n\n plot_all = summary_style\n\n if group_tracks and not summary_style:\n warnings.warn(\n 'Summary style automatically selected when using group_tracks.'\n )\n plot_all = True\n\n if only_types is not None:\n for i in only_types:\n if i not in ['GRB', 'XRB', 'SN', 'CV', 'Quasar']:\n raise ValueError(\n f'{i} is not a valid class.'\n )\n\n tracks_to_plot2 = []\n for i in tracks_to_plot:\n if self.packaged_tracks[i] in only_types:\n tracks_to_plot2.append(i)\n\n tracks_to_plot = tracks_to_plot2\n\n if empty:\n tracks_to_plot = []\n\n\n if group_tracks:\n self.trackcolors = {\n \"SN\": self.mark_colors[\"SN\"],\n \"GRB\": self.mark_colors[\"GRB\"],\n \"XRB\": self.mark_colors[\"X-ray binary\"],\n \"CV\": self.mark_colors[\"CV\"],\n \"BL Lacertae\": \"#87CEEB\",\n \"BL Lacertae (3 month)\": \"b\",\n \"3C 454.3\": self.mark_colors[\"Quasar (RADIO Sel.)\"],\n \"Quasar\": self.mark_colors[\"Quasar (RADIO Sel.)\"],\n }\n\n self.trackmarkers = {\n \"SN\": self.markers[\"SN\"],\n \"GRB\": self.markers[\"GRB\"],\n \"XRB\": self.markers[\"X-ray binary\"],\n \"CV\": self.markers[\"CV\"],\n \"BL Lacertae\": \"o\",\n \"BL Lacertae (3 month)\": \"o\",\n \"3C 454.3\": \"o\",\n \"Quasar\": self.markers[\"Quasar (RADIO Sel.)\"]\n }\n\n else:\n self.trackcolors = {\n \"V404 Cyg\": self.mark_colors[\"X-ray binary\"],\n \"GX 339-4\": self.mark_colors[\"GRB\"],\n \"GRB 970508\": \"#6495ed\",\n \"GRB 991208\": \"#20b2aa\",\n \"GRB 000301C\": \"#dda0dd\",\n \"GRB 030329\": self.mark_colors[\"GRB\"],\n \"GRB 050820A\": \"#3cb371\",\n \"GRB 060218\": \"#daa520\",\n \"GRB 070125\": \"#f08080\",\n \"GRB 100418A\": self.mark_colors[\"Quasar (RADIO Sel.)\"],\n \"BL Lacertae\": \"#87CEEB\",\n \"BL Lacertae (3 month)\": \"b\",\n \"3C 454.3\": self.mark_colors[\"Quasar (RADIO Sel.)\"],\n \"SN 1990B\": \"#6495ed\",\n \"SN 1993J\": \"#20b2aa\",\n \"SN 1994I\": \"#dda0dd\",\n \"SN 1998bw\": self.mark_colors[\"GRB\"],\n \"SN 2002ap\": \"#3cb371\",\n \"SN 2004dj\": \"#daa520\",\n \"SN 2004dk\": \"#f08080\",\n \"SN 2004et\": self.mark_colors[\"Quasar (RADIO Sel.)\"],\n \"SN 2004gq\": \"#ba55d3\",\n \"SN 2007bg\": \"#87CEEB\",\n \"SN 2007gr\": '#ff8c00',\n \"SN 2007uy\": '#ee82ee',\n \"SN 2008D\": 'g',\n \"SN 2008ax\": '#4682b4',\n \"SN 2009bb\": '#9370d8',\n # \"SN 2011dh\": '#9acd32',\n \"GRO J0422+32\": \"#6495ed\",\n \"GRO J1655-40\": \"#20b2aa\",\n \"GS 1124-684\": \"#dda0dd\",\n \"GS 1354-64\": self.mark_colors[\"GRB\"],\n \"XTE J1550-564\": \"#3cb371\",\n \"XTE J1859+226\": \"#daa520\",\n \"XTE J0421+560\": \"#f08080\",\n \"RS Oph\": \"#6495ed\",\n \"SS Cyg\": \"#20b2aa\",\n \"T Pyx\": \"#dda0dd\",\n \"V1500 Cyg\": self.mark_colors[\"GRB\"],\n \"V1974 Cyg\": \"#3cb371\"\n }\n\n self.trackmarkers = {\n \"V404 Cyg\": 'd',\n \"GX 339-4\": 'p',\n \"GRB 970508\": \"s\",\n \"GRB 991208\": \"o\",\n \"GRB 000301C\": \"v\",\n \"GRB 030329\": 'D',\n \"GRB 050820A\": \"^\",\n \"GRB 060218\": \"h\",\n \"GRB 070125\": \">\",\n \"GRB 100418A\": \"H\",\n \"SN 1990B\": \"s\",\n \"SN 1993J\": \"o\",\n \"SN 1994I\": 'v',\n \"SN 1998bw\": 'D',\n \"SN 2002ap\": \"^\",\n \"SN 2004dj\": \"h\",\n \"SN 2004dk\": \">\",\n \"SN 2004et\": \"H\",\n \"SN 2004gq\": \"<\",\n \"SN 2007bg\": \"d\",\n \"SN 2007gr\": (4,1,0),\n \"SN 2007uy\": (4,1,45),\n \"SN 2008D\": (6,1,0),\n \"SN 2008ax\": (6,1,90),\n \"SN 2009bb\": (8,1,0),\n \"SN 2011dh\": (12,1,0),\n \"BL Lacertae\": \"o\",\n \"BL Lacertae (3 month)\": \"o\",\n \"3C 454.3\": \"o\",\n \"GRO J0422+32\": \"s\",\n \"GRO J1655-40\": \"o\",\n \"GS 1124-684\": \"v\",\n \"GS 1354-64\": \"D\",\n \"XTE J1550-564\": \"^\",\n \"XTE J1859+226\": \"h\",\n \"XTE J0421+560\": \">\",\n \"RS Oph\": \"s\",\n \"SS Cyg\": \"o\",\n \"T Pyx\": \"v\",\n \"V1500 Cyg\": 'D',\n \"V1974 Cyg\": \"^\"\n }\n\n lightcurve_num = Counter()\n\n # allt_s = []\n fluxbins = {}\n lightcurves = {}\n alltracks_already_done = []\n\n # Generate background plot\n track_fig = self.generate_plot(**kwargs)\n\n ax1 = track_fig.get_axes()[0]\n\n\n for t in tracks_to_plot:\n data_df, file_name = self.load_track_data(t)\n\n # Skip 3 month if plotting all\n if plot_all and \"(3 month)\" in t:\n continue\n\n days = file_name.name.split(\"_\")[-1].split(\".\")[0].split(\"days\")[0]\n\n data_df[\"radio\"] = data_df[\"RadioFlux\"]\n\n if t == \"GRB 050820A\":\n data_df['radio'] = data_df['radio'] / 1.e3\n\n self._process_track_df(data_df, t)\n\n if \"(3 month)\" in t:\n zo = 16\n else:\n zo = 15\n\n if group_tracks:\n if \"GRB\" in t:\n t = \"GRB\"\n elif \"SN\" in t:\n t = \"SN\"\n elif t in self._cvs:\n t = \"CV\"\n elif (\"BL Lacertae\" in t or t == \"3C 454.3\"):\n t = \"Quasar\"\n else:\n t = \"XRB\"\n\n lightcurve_num[t] += 1\n\n if start_end_only:\n size = 600\n else:\n size = 150\n else:\n size = 600\n\n if t in self.trackcolors:\n if not start_end_only:\n ax1.plot(\n data_df['optical_in_mJy'],\n data_df['radio'],\n color=self.trackcolors[t],\n lw=3.0,\n linestyle=\"--\",\n zorder=zo\n )\n if not plot_all:\n ax1.scatter(\n data_df['optical_in_mJy'],\n data_df['radio'],\n marker=self.trackmarkers[t],\n color=self.trackcolors[t],\n s=size,\n zorder=zo,\n label=t+\" ({0} days)\".format(days)\n )\n elif t not in alltracks_already_done:\n if start_end_only:\n ax1.scatter(\n [\n data_df['optical_in_mJy'].to_numpy()[0],\n data_df['optical_in_mJy'].to_numpy()[-1]\n ],\n [\n data_df['radio'].to_numpy()[0],\n data_df['radio'].to_numpy()[-1]\n ],\n marker=self.trackmarkers[t],\n color=self.trackcolors[t],\n s=size,\n zorder=zo,\n label=t\n )\n else:\n ax1.scatter(\n data_df['optical_in_mJy'],\n data_df['radio'],\n marker=self.trackmarkers[t],\n color=self.trackcolors[t],\n s=size,\n zorder=zo,\n label=t\n )\n alltracks_already_done.append(t)\n else:\n if start_end_only:\n ax1.scatter(\n [\n data_df['optical_in_mJy'].to_numpy()[0],\n data_df['optical_in_mJy'].to_numpy()[-1]\n ],\n [\n data_df['radio'].to_numpy()[0],\n data_df['radio'].to_numpy()[-1]\n ],\n marker=self.trackmarkers[t],\n color=self.trackcolors[t],\n s=size,\n zorder=zo\n )\n else:\n ax1.scatter(\n data_df['optical_in_mJy'],\n data_df['radio'],\n marker=self.trackmarkers[t],\n color=self.trackcolors[t],\n s=size,\n zorder=zo\n )\n else:\n ax1.plot(\n data_df['optical_in_mJy'],\n data_df['radio'],\n lw=1.5,\n linestyle=\"--\",\n zorder=zo\n )\n ax1.scatter(\n data_df['optical_in_mJy'],\n data_df['radio'],\n s=size,\n zorder=zo,\n label=t+\" ({0} days)\".format(days)\n )\n\n if t == \"GRB 991208\":\n data_df['Date'] = data_df['Date'] + 2\n\n if \"BL Lacertae\" not in t and \"3C 454.3\" not in t:\n addtracklabels(\n data_df['optical_in_mJy'].to_numpy(),\n data_df['radio'].to_numpy(),\n ax1,\n data_df['Date'].to_numpy(),\n t.split(\" \")[0],\n plot_all,\n t,\n self._cvs,\n start_end_only,\n lightcurve_num[t],\n t\n )\n\n lightcurves[t] = {\n \"time\":data_df['Name'],\n \"radio\":data_df['radio'],\n \"optical\":data_df['optical_in_mJy']\n }\n\n\n self.add_second_legend(ax1)\n\n self.current_fig = track_fig\n\n return self.current_fig\n\n\n def add_custom_track(\n self,\n df: pd.DataFrame,\n name: str,\n marker = \"o\",\n color = 'tab:blue',\n markersize: int = 150,\n ab: bool = True,\n **kwargs\n ) -> plt.figure:\n \"\"\"Add a custom track from a DataFrame to the current figure.\n\n The required columns in the DataFrame are:\n - 'Name'\n - 'Date'\n - 'R' or 'V'\n - 'radio'\n\n If not figure has been generated then a figure will be generated.\n Options for the plot can be declared using **kwargs.\n\n Args:\n df: The DataFrame containing the custom track data.\n name: The name of the object. Will be used as the label.\n marker (maker): The marker to use for the track.\n color (color): The colour to use for the track.\n markersize: The size of the marker\n ab: Set to 'False' if the magnitude is not in the AB system.\n **kwargs: Keyword arguments passed to generate plot there is no\n current plot.\n\n Returns:\n The resulting custom track figure.\n \"\"\"\n\n if self.current_fig is None:\n self.current_fig = self.generate_plot(**kwargs)\n\n if ab:\n self._track_ab_list.append(name)\n\n data_df = self._process_track_df(df, name)\n\n days = data_df['Date'].iloc[-1] - data_df['Date'].iloc[0]\n\n ax1 = self.current_fig.get_axes()[0]\n\n ax1.plot(\n data_df['optical_in_mJy'],\n data_df['radio'],\n color=color,\n lw=3.0,\n linestyle=\"--\",\n zorder=17\n )\n\n ax1.scatter(\n data_df['optical_in_mJy'],\n data_df['radio'],\n marker=marker,\n color=color,\n s=markersize,\n zorder=17,\n label=name+\" ({0} days)\".format(days)\n )\n\n addtracklabels(\n data_df['optical_in_mJy'].to_numpy(),\n data_df['radio'].to_numpy(),\n ax1,\n data_df['Date'].to_numpy(),\n name.split(\" \")[0],\n False,\n name,\n self._cvs,\n False,\n 1,\n name,\n zorder=18\n )\n\n self.add_second_legend(ax1)\n\n return self.current_fig\n" ]
[ [ "matplotlib.patches.Ellipse", "pandas.read_csv", "matplotlib.patches.Arc", "matplotlib.patches.Polygon", "numpy.sqrt", "numpy.logspace", "numpy.arange", "numpy.median", "matplotlib.lines.Line2D", "matplotlib.font_manager.FontProperties", "numpy.std", "numpy.any", "numpy.where", "matplotlib.pyplot.figure" ] ]
chart21/BARFED
[ "803baafc9a0379cdd94f337be8fe39c890098624" ]
[ "MNIST/mnist_byz_attack.py" ]
[ "import torch\nfrom torch import nn\nfrom torch.utils.data import TensorDataset\nfrom torch.utils.data import DataLoader\nimport numpy as np\nfrom fl_utils import distribute_data as dd\nfrom fl_utils import train_nodes as tn\nfrom fl_utils import construct_models as cm\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(\"device: \", device)\n\nnumber_of_samples = 100 # number of participants\n\nis_noniid = True\nif is_noniid:\n n = 2\n min_n_each_node = 2\nelse:\n n = 10\n min_n_each_node = 10\n\nis_cnn = False\nis_organized = True\nhostile_node_percentage = 0.20 #malicious participant ratio\nbyzantine_mean =0\nbyzantine_std =1\n\niteration_num = 200 # number of communication rounds\nlearning_rate = 0.01\n\nweight_decay = 0.0001\nnumEpoch = 10\nbatch_size = 32\nmomentum = 0.9\n\nseed = 1\nuse_seed = 23\nhostility_seed = 88\nconverters_seed = 121\nbyzantine_seed =25\nfactor = 1.5\n\ntrain_amount = 4000\nvalid_amount = 900\ntest_amount = 890\nprint_amount = 3\n\n\nx_train, y_train, x_valid, y_valid, x_test, y_test = dd.load_mnist_data()\nx_test, y_test = dd.get_equal_size_test_data_from_each_label(x_test, y_test, min_amount=test_amount)\n\nx_train, y_train, x_valid, y_valid, x_test, y_test = map(torch.tensor,\n (x_train, y_train, x_valid, y_valid, x_test, y_test))\n\n##train\nlabel_dict_train = dd.split_and_shuffle_labels(y_data=y_train, seed=seed, amount=train_amount)\nnode_label_info_train, total_label_occurences_train, amount_info_table_train = dd.get_info_for_distribute_non_iid_with_different_n_and_amount(\n number_of_samples=number_of_samples, n=n, amount=train_amount, seed=use_seed, min_n_each_node=min_n_each_node)\n\nx_train_dict, y_train_dict = dd.distribute_mnist_data_to_participants(label_dict=label_dict_train,\n amount=train_amount,\n number_of_samples=number_of_samples,\n n=n, x_data=x_train,\n y_data=y_train,\n node_label_info=node_label_info_train,\n amount_info_table=amount_info_table_train,\n x_name=\"x_train\",\n y_name=\"y_train\",\n is_cnn=is_cnn)\n\n## test\nlabel_dict_test = dd.split_and_shuffle_labels(y_data=y_test, seed=seed, amount=test_amount)\nnode_label_info_test, total_label_occurences_test, amount_info_table_test = dd.get_info_for_distribute_non_iid_with_different_n_and_amount(\n number_of_samples=number_of_samples,\n n=n, amount=test_amount, seed=use_seed, min_n_each_node=min_n_each_node)\nx_test_dict, y_test_dict = dd.distribute_mnist_data_to_participants(label_dict=label_dict_test,\n amount=test_amount,\n number_of_samples=number_of_samples,\n n=n, x_data=x_test,\n y_data=y_test,\n node_label_info=node_label_info_test,\n amount_info_table=amount_info_table_test,\n x_name=\"x_test\",\n y_name=\"y_test\", is_cnn=is_cnn)\n\nif is_cnn:\n reshape_size = int(np.sqrt(x_train.shape[1]))\n x_train = x_train.view(-1, 1, reshape_size, reshape_size)\n x_valid = x_valid.view(-1, 1, reshape_size, reshape_size)\n x_test = x_test.view(-1, 1, reshape_size, reshape_size)\n\ntrain_ds = TensorDataset(x_train, y_train)\ntrain_dl = DataLoader(train_ds, batch_size=batch_size, shuffle=True)\n\ntest_ds = TensorDataset(x_test, y_test)\ntest_dl = DataLoader(test_ds, batch_size=batch_size * 2)\n\nif is_cnn:\n main_model = cm.Netcnn()\nelse:\n main_model = cm.Net2nn()\n\nmain_model = main_model.to(device)\ncm.weights_init(main_model)\n\nmain_optimizer = torch.optim.SGD(main_model.parameters(), lr=learning_rate, momentum=momentum, weight_decay=weight_decay)\nmain_criterion = nn.CrossEntropyLoss()\nmodel_dict, optimizer_dict, criterion_dict = tn.create_model_optimizer_criterion_dict_for_mnist(number_of_samples, learning_rate,\n momentum, device, is_cnn)\n\n\ntest_accuracies_of_each_iteration = np.array([], dtype=float)\n\nbyzantine_node_list = dd.get_byzantine_node_list(hostile_node_percentage, number_of_samples, hostility_seed)\nnp.random.seed(byzantine_seed)\nbyzantine_seeds_array = np.random.choice(5000, size=iteration_num, replace=False)\n\nfor iteration in range(iteration_num):\n\n model_dict = tn.send_main_model_to_nodes_and_update_model_dict(main_model, model_dict,\n number_of_samples)\n\n\n if is_organized:\n iteration_byzantine_seed = byzantine_seeds_array[iteration]\n else:\n iteration_byzantine_seed =None\n\n tn.start_train_end_node_process_byzantine(number_of_samples, x_train_dict, y_train_dict, x_test_dict, y_test_dict,\n batch_size, model_dict, criterion_dict, optimizer_dict,\n numEpoch, byzantine_node_list, byzantine_mean, byzantine_std,\n device, iteration_byzantine_seed)\n\n main_model = tn.set_averaged_weights_as_main_model_weights_and_update_main_model(main_model, model_dict, device)\n test_loss, test_accuracy = tn.validation(main_model, test_dl, main_criterion, device)\n\n test_accuracies_of_each_iteration = np.append(test_accuracies_of_each_iteration, test_accuracy)\n print(\"Iteration\", str(iteration + 1), \": main_model accuracy on all test data: {:7.4f}\".format(test_accuracy))\n" ]
[ [ "torch.nn.CrossEntropyLoss", "numpy.sqrt", "numpy.random.seed", "numpy.random.choice", "torch.utils.data.TensorDataset", "torch.utils.data.DataLoader", "numpy.append", "torch.cuda.is_available", "numpy.array" ] ]
mohi7solanki/pywonderland
[ "2b9d61a8414d4cfa92d34325e5e2b9b5d501abca" ]
[ "src/polytopes/models.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nClasses for building models of 3D/4D polytopes\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nSee the doc: \"https://neozhaoliang.github.io/polytopes/\"\n\n\"\"\"\nfrom itertools import combinations\nimport numpy as np\nimport helpers\nfrom todd_coxeter import CosetTable\n\n\nclass BasePolytope(object):\n\n \"\"\"\n Base class for building polyhedron and polychoron using\n Wythoff's construction.\n \"\"\"\n\n def __init__(self, coxeter_diagram, init_dist, extra_relations=()):\n \"\"\"\n parameters\n ----------\n :coxeter_diagram: Coxeter diagram for this polytope.\n :init_dist: distances between the initial vertex and the mirrors.\n\n :extra_relations: a presentation of a star polytope can be obtained by\n imposing more relations on the generators. For example \"(ρ0ρ1ρ2ρ1)^n = 1\"\n for some integer n, where n is the number of sides of a hole.\n See Coxeter's article\n\n \"Regular skew polyhedra in three and four dimensions,\n and their topological analogues\"\n\n \"\"\"\n # Coxeter matrix of the symmetry group\n self.coxeter_matrix = helpers.get_coxeter_matrix(coxeter_diagram)\n self.mirrors = helpers.get_mirrors(coxeter_diagram)\n # reflection transformations about the mirrors\n self.reflections = tuple(helpers.reflection_matrix(v) for v in self.mirrors)\n # the initial vertex\n self.init_v = helpers.get_init_point(self.mirrors, init_dist)\n # a mirror is active if and only if the initial vertex is off it\n self.active = tuple(bool(x) for x in init_dist)\n\n # generators of the symmetry group\n self.symmetry_gens = tuple(range(len(self.coxeter_matrix)))\n # relations between the generators\n self.symmetry_rels = tuple((i, j) * self.coxeter_matrix[i][j]\n for i, j in combinations(self.symmetry_gens, 2))\n\n self.symmetry_rels += tuple(extra_relations)\n\n # to be calculated later\n self.vtable = None\n self.num_vertices = None\n self.vertex_coords = []\n\n self.num_edges = None\n self.edge_indices = []\n self.edge_coords = []\n\n self.num_faces = None\n self.face_indices = []\n self.face_coords = []\n\n def build_geometry(self):\n self.get_vertices()\n self.get_edges()\n self.get_faces()\n\n def get_vertices(self):\n \"\"\"\n This method computes the following data that will be needed later:\n 1. a coset table for the symmetry group.\n 2. a complete list of word representations of the symmetry group.\n 3. coordinates of the vertices.\n \"\"\"\n # generators of the stabilizing subgroup that fixes the initial vertex.\n vgens = [(i,) for i, active in enumerate(self.active) if not active]\n self.vtable = CosetTable(self.symmetry_gens, self.symmetry_rels, vgens)\n self.vtable.run()\n self.vwords = self.vtable.get_words() # get word representations of the vertices\n self.num_vertices = len(self.vwords)\n # use words in the symmetry group to transform the initial vertex to other vertices.\n self.vertex_coords = tuple(self.transform(self.init_v, w) for w in self.vwords)\n\n def get_edges(self):\n \"\"\"\n This method computes the indices and coordinates of all edges.\n\n 1. if the initial vertex lies on the i-th mirror then the reflection\n about this mirror fixes v0 and there are no edges of type i.\n\n 2. else v0 and its virtual image v1 about this mirror generates a base\n edge e, the stabilizing subgroup of e is generated by a single word (i,),\n again we use Todd-Coxeter's procedure to get a complete list of word\n representations for the edges of type i and use them to transform e to other edges.\n \"\"\"\n for i, active in enumerate(self.active):\n if active: # if there are edges of type i\n egens = [(i,)] # generators of the stabilizing subgroup that fixes the base edge.\n etable = CosetTable(self.symmetry_gens, self.symmetry_rels, egens)\n etable.run()\n words = etable.get_words() # get word representations of the edges\n elist = []\n for word in words:\n v1 = self.move(0, word)\n v2 = self.move(0, (i,) + word)\n # avoid duplicates\n if (v1, v2) not in elist and (v2, v1) not in elist:\n elist.append((v1, v2))\n\n self.edge_indices.append(elist)\n self.edge_coords.append([(self.vertex_coords[x], self.vertex_coords[y])\n for x, y in elist])\n self.num_edges = sum(len(elist) for elist in self.edge_indices)\n\n def get_faces(self):\n \"\"\"\n This method computes the indices and coordinates of all faces.\n\n The composition of the i-th and the j-th reflection is a rotation\n which fixes a base face f. The stabilizing group of f is generated\n by [(i,), (j,)].\n \"\"\"\n for i, j in combinations(self.symmetry_gens, 2):\n f0 = []\n m = self.coxeter_matrix[i][j]\n fgens = [(i,), (j,)]\n if self.active[i] and self.active[j]:\n for k in range(m):\n # rotate the base edge m times to get the base f,\n # where m = self.coxeter_matrix[i][j]\n f0.append(self.move(0, (i, j) * k))\n f0.append(self.move(0, (j,) + (i, j) * k))\n elif (self.active[i] or self.active[j]) and m > 2:\n for k in range(m):\n f0.append(self.move(0, (i, j) * k))\n else:\n continue\n\n ftable = CosetTable(self.symmetry_gens, self.symmetry_rels, fgens)\n ftable.run()\n words = ftable.get_words()\n flist = []\n for word in words:\n f = tuple(self.move(v, word) for v in f0)\n if not helpers.check_duplicate_face(f, flist):\n flist.append(f)\n\n self.face_indices.append(flist)\n self.face_coords.append([tuple(self.vertex_coords[x] for x in face)\n for face in flist])\n\n self.num_faces = sum(len(flist) for flist in self.face_indices)\n\n def transform(self, vector, word):\n \"\"\"Transform a vector by a word in the symmetry group.\n \"\"\"\n for w in word:\n vector = np.dot(vector, self.reflections[w])\n return vector\n\n def move(self, vertex, word):\n \"\"\"Transform a vertex by a word in the symmetry group.\n Return the index of the resulting vertex.\n \"\"\"\n for w in word:\n vertex = self.vtable[vertex][w]\n return vertex\n\n def get_latex_format(self, symbol=r\"\\rho\", cols=3, snub=False):\n \"\"\"Return the words of the vertices in latex format.\n `cols` is the number of columns of the output latex array.\n \"\"\"\n def to_latex(word):\n if not word:\n return \"e\"\n else:\n if snub:\n return \"\".join(symbol + \"_{{{}}}\".format(i // 2) for i in word)\n else:\n return \"\".join(symbol + \"_{{{}}}\".format(i) for i in word)\n\n latex = \"\"\n for i, word in enumerate(self.vwords):\n if i > 0 and i % cols == 0:\n latex += r\"\\\\\"\n latex += to_latex(word)\n if i % cols != cols - 1:\n latex += \"&\"\n\n return r\"\\begin{{array}}{{{}}}{}\\end{{array}}\".format(\"l\" * cols, latex)\n\n\nclass Polyhedra(BasePolytope):\n \"\"\"\n Base class for 3d polyhedron.\n \"\"\"\n\n def __init__(self, coxeter_diagram, init_dist, extra_relations=()):\n if not len(coxeter_diagram) == len(init_dist) == 3:\n raise ValueError(\"Length error: the inputs must all have length 3\")\n super().__init__(coxeter_diagram, init_dist, extra_relations)\n\n\nclass Snub(Polyhedra):\n \"\"\"\n A snub polyhedra is generated by the subgroup that consists of only\n rotations in the full symmetry group. This subgroup has presentation\n\n <r, s | r^p = s^q = (rs)^2 = 1>\n\n where r = ρ0ρ1, s = ρ1ρ2 are two rotations.\n Again we solve all words in this subgroup and then use them to\n transform the initial vertex to get all vertices.\n \"\"\"\n\n def __init__(self, coxeter_diagram, init_dist=(1.0, 1.0, 1.0)):\n super().__init__(coxeter_diagram, init_dist, extra_relations=())\n # the representaion is not in the form of a Coxeter group,\n # we must overwrite the relations.\n self.symmetry_gens = (0, 1, 2, 3)\n self.symmetry_rels = ((0,) * self.coxeter_matrix[0][1],\n (2,) * self.coxeter_matrix[1][2],\n (0, 2) * self.coxeter_matrix[0][2],\n (0, 1), (2, 3))\n # order of the generator rotations\n self.rotations = {(0,): self.coxeter_matrix[0][1],\n (2,): self.coxeter_matrix[1][2],\n (0, 2): self.coxeter_matrix[0][2]}\n\n def get_vertices(self):\n self.vtable = CosetTable(self.symmetry_gens, self.symmetry_rels, coxeter=False)\n self.vtable.run()\n self.vwords = self.vtable.get_words()\n self.num_vertices = len(self.vwords)\n self.vertex_coords = tuple(self.transform(self.init_v, w) for w in self.vwords)\n\n def get_edges(self):\n for rot in self.rotations:\n elist = []\n e0 = (0, self.move(0, rot))\n for word in self.vwords:\n e = tuple(self.move(v, word) for v in e0)\n if e not in elist and e[::-1] not in elist:\n elist.append(e)\n\n self.edge_indices.append(elist)\n self.edge_coords.append([(self.vertex_coords[i], self.vertex_coords[j])\n for i, j in elist])\n self.num_edges = sum(len(elist) for elist in self.edge_indices)\n\n def get_faces(self):\n orbits = (tuple(self.move(0, (0,) * k) for k in range(self.rotations[(0,)])),\n tuple(self.move(0, (2,) * k) for k in range(self.rotations[(2,)])),\n (0, self.move(0, (2,)), self.move(0, (0, 2))))\n for f0 in orbits:\n flist = []\n for word in self.vwords:\n f = tuple(self.move(v, word) for v in f0)\n if not helpers.check_duplicate_face(f, flist):\n flist.append(f)\n\n self.face_indices.append(flist)\n self.face_coords.append([tuple(self.vertex_coords[v] for v in face)\n for face in flist])\n\n self.num_faces = sum([len(flist) for flist in self.face_indices])\n\n def transform(self, vertex, word):\n for g in word:\n if g == 0:\n vertex = np.dot(vertex, self.reflections[0])\n vertex = np.dot(vertex, self.reflections[1])\n else:\n vertex = np.dot(vertex, self.reflections[1])\n vertex = np.dot(vertex, self.reflections[2])\n return vertex\n\n\nclass Polychora(BasePolytope):\n \"\"\"\n Base class for 4d polychoron.\n \"\"\"\n\n def __init__(self, coxeter_diagram, init_dist, extra_relations=()):\n if not (len(coxeter_diagram) == 6 and len(init_dist) == 4):\n raise ValueError(\"Length error: the input coxeter_diagram must have length 6 and init_dist has length 4\")\n super().__init__(coxeter_diagram, init_dist, extra_relations)\n\n\nclass Snub24Cell(Polychora):\n \"\"\"The snub 24-cell can be constructed from snub demitesseract [3^(1,1,1)]+,\n the procedure is similar with snub polyhedron above.\n Its symmetric group is generated by three rotations {r, s, t}, a presentation\n is\n G = <r, s, t | r^3 = s^3 = t^3 = (rs)^2 = (rt)^2 = (s^-1 t)^2 = 1>\n\n where r = ρ0ρ1, s = ρ1ρ2, t = ρ1ρ3.\n \"\"\"\n\n def __init__(self):\n coxeter_diagram = (3, 2, 2, 3, 3, 2)\n coxeter_matrix = helpers.make_symmetry_matrix(coxeter_diagram)\n mirrors = helpers.get_mirrors(coxeter_diagram)\n super().__init__(coxeter_matrix, mirrors, (1, 1, 1, 1), extra_relations=())\n self.symmetry_gens = tuple(range(6))\n self.symmetry_rels = ((0,) * 3, (2,) * 3, (4,) * 3,\n (0, 2) * 2, (0, 4) * 2, (3, 4) * 2,\n (0, 1), (2, 3), (4, 5))\n self.rotations = ((0,), (2,), (4,), (0, 2), (0, 4), (3, 4))\n\n def get_vertices(self):\n self.vtable = CosetTable(self.symmetry_gens, self.symmetry_rels, coxeter=False)\n self.vtable.run()\n self.vwords = self.vtable.get_words()\n self.num_vertices = len(self.vwords)\n self.vertex_coords = tuple(self.transform(self.init_v, w) for w in self.vwords)\n\n def get_edges(self):\n for rot in self.rotations:\n elist = []\n e0 = (0, self.move(0, rot))\n for word in self.vwords:\n e = tuple(self.move(v, word) for v in e0)\n if e not in elist and e[::-1] not in elist:\n elist.append(e)\n\n self.edge_indices.append(elist)\n self.edge_coords.append([(self.vertex_coords[i], self.vertex_coords[j])\n for i, j in elist])\n self.num_edges = sum(len(elist) for elist in self.edge_indices)\n\n def get_faces(self):\n orbits = (tuple(self.move(0, (0,) * k) for k in range(3)),\n tuple(self.move(0, (2,) * k) for k in range(3)),\n tuple(self.move(0, (4,) * k) for k in range(3)),\n (0, self.move(0, (2,)), self.move(0, (0, 2))),\n (0, self.move(0, (4,)), self.move(0, (0, 4))),\n (0, self.move(0, (2,)), self.move(0, (5, 2))),\n (0, self.move(0, (0, 2)), self.move(0, (5, 2))))\n for f0 in orbits:\n flist = []\n for word in self.vwords:\n f = tuple(self.move(v, word) for v in f0)\n if not helpers.check_duplicate_face(f, flist):\n flist.append(f)\n\n self.face_indices.append(flist)\n self.face_coords.append([tuple(self.vertex_coords[v] for v in face)\n for face in flist])\n\n self.num_faces = sum([len(flist) for flist in self.face_indices])\n\n def transform(self, vertex, word):\n for g in word:\n if g == 0:\n vertex = np.dot(vertex, self.reflections[0])\n vertex = np.dot(vertex, self.reflections[1])\n elif g == 2:\n vertex = np.dot(vertex, self.reflections[1])\n vertex = np.dot(vertex, self.reflections[2])\n else:\n vertex = np.dot(vertex, self.reflections[1])\n vertex = np.dot(vertex, self.reflections[3])\n return vertex\n\n\nclass Catalan3D(object):\n \"\"\"Construct the dual 3d Catalan solid from a given uniform polytope.\n The computation of edges and faces of this dual shape are all done\n with integer arithmetic so floating comparison is avoided.\n \"\"\"\n def __init__(self, P):\n \"\"\"`P`: a 3d polyhedra.\n \"\"\"\n if len(P.coxeter_matrix) != 3:\n raise ValueError(\"A 3d polyhedra is expected\")\n self.P = P\n\n self.num_vertices = None\n self.vertex_coords = [] # [[v1, v2, ...], [v_k, ...]]\n self.vertex_coords_flatten = [] # [v1, v2, ..., vk, ...]\n\n self.num_edges = None\n self.edge_indices = []\n\n self.num_faces = None\n self.face_indices = []\n\n def build_geometry(self):\n self.P.build_geometry()\n self.num_vertices = self.P.num_faces\n self.num_edges = self.P.num_edges\n self.num_faces = self.P.num_vertices\n self.get_vertices()\n self.get_edges()\n self.get_faces()\n\n def get_vertices(self):\n \"\"\"The vertices in the Catalan solid are in one-to-one correspondence\n with P's faces.\n \"\"\"\n for flist in self.P.face_coords:\n vlist = []\n for face in flist:\n # for each face of P, compute the normal of P\n normal = helpers.get_face_normal(face)\n # compute the dot of the vertices with the normal\n inn = sum([np.dot(normal, p) for p in face]) / len(face)\n # divide the reciprocal, this is the vertex of the dual solid\n v = normal / inn\n vlist.append(v)\n self.vertex_coords_flatten.append(v)\n\n self.vertex_coords.append(vlist)\n\n def get_edges(self):\n \"\"\"Two face centers are connected by an edge if and only if\n their faces are adjacent in P.\n \"\"\"\n P_faces_flatten = [face for flist in self.P.face_indices for face in flist]\n for elist_P in self.P.edge_indices:\n elist = []\n for eP in elist_P:\n e = helpers.find_face_by_edge(eP, P_faces_flatten)\n if e is not None:\n elist.append(e)\n self.edge_indices.append(elist)\n\n def get_faces(self):\n \"\"\"A set of face centers form a face in the Catalan solid if and\n only if their faces surround a common vertex in P.\n \"\"\"\n P_faces_flatten = [face for flist in self.P.face_indices for face in flist]\n for v in range(self.P.num_vertices):\n # for each vertex of v of P, find P' faces that surround v, their indices\n # are stored in f.\n f = []\n for i, face in enumerate(P_faces_flatten):\n if v in face:\n f.append(i)\n # the faces in f may not be in the right order,\n # rearrange them so that f0 and f1 are adjacent, f1 and f2 are adjacent, ... etc.\n nsides = len(f)\n v0 = f[0]\n new_face = [v0]\n visited = set([v0])\n while len(new_face) < nsides:\n v = new_face[-1]\n for u in f:\n if u not in visited and helpers.has_common_edge(P_faces_flatten[v], P_faces_flatten[u]):\n new_face.append(u)\n visited.add(u)\n break\n\n self.face_indices.append(tuple(new_face))\n" ]
[ [ "numpy.dot" ] ]
d-murashkin/sentinel1_routines
[ "bf521dc96193420d88be0c1510ca03481018f3e2" ]
[ "download.py" ]
[ "\"\"\"\nScript for searching and downloading data from sentinel satellites.\ndhusget.sh script from the Copernicus web-page is used.\nSingle scenes can be downloaded from https://datapool.asf.alaska.edu\n\"\"\"\n\n__author__ = 'Dmitrii Murashkin'\n__email__ = '[email protected]'\n\nimport os\nimport subprocess\nimport datetime\nimport shutil\nimport stat\n\nimport pandas as pd\n\nfrom .scene_management import get_scene_folder\nfrom .scene_management import get_date_folder\n\n\ndef set_dir(dir_path):\n \"\"\" Set folder. Create it if it does not exist. \"\"\"\n if not os.path.exists(dir_path):\n os.mkdir(dir_path)\n return dir_path\n\n\ndef create_list_of_products(llcrnrlon, llcrnrlat, urcrnrlon, urcrnrlat, start_date, end_date, login, password, lock_folder='', return_path=False):\n \"\"\" Return list of products that fit the specified rectangle and sensing date.\n \"\"\"\n \"\"\" Ensure that time variables are of the datetime.date type. \"\"\"\n if not ((type(start_date) == datetime.date) or (type(start_date) == datetime.datetime)) and ((type(end_date) == datetime.date) or (type(end_date) == datetime.date)):\n print('start_date and end_date are expected to be of the datetime.date type.')\n return False\n\n list_of_products = []\n page = 1\n while True:\n subprocess.call(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'dhusget.sh') + ' -u {7} -p {8} -L {9} -m Sentinel-1 -c {0},{1}:{2},{3} -T GRD -F \"*_GRDM_*\" -S {5}T00:00:00.000Z -E {6}T00:00:00.000Z -l 100 -P {4}'.format(llcrnrlon, llcrnrlat, urcrnrlon, urcrnrlat, page, start_date.strftime('%Y-%m-%d'), end_date.strftime('%Y-%m-%d'), login, password, lock_folder), shell=True)\n try:\n df = pd.read_csv('products-list.csv', header=None, names=['name', 'address'])\n except:\n break\n if df.empty:\n break\n list_of_products.append(df)\n page += 1\n result = pd.concat(list_of_products)\n if return_path:\n return [{'name': name, 'address': address} for name, address in zip(result['name'].tolist(), result['address'].tolist())]\n return result['name'].tolist()\n\n\ndef download_products(fld, llcrnrlon, llcrnrlat, urcrnrlon, urcrnrlat, start_date, end_date, login, password, lock_folder='', n=2):\n \"\"\" Deprecated.\n Ensure that time variables are of the datetime.date type. \"\"\"\n if not ((type(start_date) == datetime.date) or (type(start_date) == datetime.datetime)) and ((type(end_date) == datetime.date) or (type(end_date) == datetime.date)):\n print('start_date and end_date are expected to be of the datetime.date or the datetime.datetime type.')\n return False\n \n if not lock_folder:\n lock_folder = './dhusget_lock/'\n print('lock folder: {0}'.format(lock_folder))\n \n dhusget = os.path.join(fld, 'dhusget.sh')\n shutil.copyfile(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'dhusget.sh'), dhusget)\n st = os.stat(dhusget)\n try:\n os.chmod(dhusget, st.st_mode | stat.S_IEXEC)\n except:\n \"If Marcus owns the files, it's fine. Otherwise there is a problem with chmod.\"\n pass\n current_fld = os.getcwd()\n os.chdir(fld)\n if os.path.exists(os.path.join(fld, 'PRODUCT')):\n for item in os.listdir(os.path.join(fld, 'PRODUCT')):\n pth = os.path.join(fld, 'PRODUCT', item)\n if os.stat(pth).st_size == 0:\n os.remove(pth)\n page = 1\n while True:\n subprocess.call('./dhusget.sh -u {7} -p {8} -L {9} -m Sentinel-1 -c {0},{1}:{2},{3} -T GRD -F \"*_GRDM_*\" -S {5}T00:00:00.000Z -E {6}T00:00:00.000Z -l 100 -P {4} -n {10} -o product -D'.format(llcrnrlon, llcrnrlat, urcrnrlon, urcrnrlat, page, start_date.strftime('%Y-%m-%d'), end_date.strftime('%Y-%m-%d'), login, password, lock_folder, n), shell=True)\n try:\n df = pd.read_csv('products-list.csv', header=None, names=['name', 'address'])\n except:\n break\n if df.empty:\n break\n page += 1\n os.chdir(current_fld)\n print(\"Download complete.\")\n return True\n\n\ndef download_single_scene(scene_name, root_folder=False, output_folder='./'):\n scene_name = scene_name.split('.')[0]\n '''\n if root_folder:\n try:\n root_folder = os.environ['S1PATH']\n download_path = get_scene_folder(scene_name, root_folder)\n except:\n download_path = output_folder\n else:\n download_path = output_folder\n '''\n if not root_folder:\n download_path = output_folder\n else:\n if root_folder is True:\n try:\n root_folder = os.environ['S1PATH']\n except:\n print('Could not read $S1PATH environment variable.')\n return False\n download_path = get_scene_folder(scene_name, root_folder)\n \n try:\n asf_credentials = os.environ['ASF_CREDENTIALS']\n with open(asf_credentials) as f:\n username = f.readline()[:-1]\n passwd = f.readline()[:-1]\n except:\n print('No credentials provided.')\n return False\n\n cwd = os.getcwd()\n os.chdir(download_path)\n subprocess.call('wget -c -q --show-progress --http-user={0} --http-password={1} \"https://datapool.asf.alaska.edu/GRD_MD/S{2}/{3}.zip\"'.format(username, passwd, scene_name[2], scene_name), shell=True)\n os.chdir(cwd)\n return True\n\n\ndef download_single_day(date, root_folder=False, output_folder='./', extra_folder=''):\n if not (type(date) is datetime.datetime) or (type(date) is datetime.date):\n print('input should be a datetime.datetime or datetime.date instance.')\n return False\n \n if not root_folder:\n download_path = output_folder\n else:\n if root_folder is True:\n try:\n root_folder = os.environ['S1PATH']\n except:\n print('Could not read $S1PATH environment variable.')\n return False\n download_path = get_date_folder(date, root_folder, extra_folder=extra_folder)\n \n try:\n asf_credentials = os.environ['ASF_CREDENTIALS']\n with open(asf_credentials) as f:\n username = f.readline()[:-1]\n passwd = f.readline()[:-1]\n except:\n print('No credentials provided.')\n return False\n\n cwd = os.getcwd()\n os.chdir(download_path)\n search_string = 'https://api.daac.asf.alaska.edu/services/search/param?'\n search_string += 'platform=S1'\n search_string += '&beamSwath=EW'\n search_string += '&processingLevel=GRD_MD'\n search_string += '&start={0}'.format(date.strftime('%Y-%m-%dT00:00:00UTC'))\n search_string += '&end={0}'.format(date.strftime('%Y-%m-%dT23:59:59UTC'))\n search_string += '&output=metalink'\n search_string += '&intersectsWith=polygon((-160.6349 60.8024,-156.8663 69.0027,-128.1877 67.8319,-24.5383 80.8176,-36.4015 66.743,-19.1937 64.2656,36.6742 67.6532,64.5098 66.8212,121.018 70.5129,148.6526 69.0332,-160.6349 60.8024))'.replace('(', '%28').replace(')', '%29').replace(',', '%2C').replace(' ', '+')\n# subprocess.call('aria2c --http-auth-challenge=true --http-user={0} --http-passwd={1} --continue=true --check-integrity=true --max-tries=0 --max-concurrent-downloads=3 \"{2}\"'.format(username, passwd, search_string), shell=True)\n subprocess.call('aria2c --http-auth-challenge=true --http-user={0} --http-passwd={1} --check-integrity=true --max-tries=0 --max-concurrent-downloads=3 \"{2}\"'.format(username, passwd, search_string), shell=True)\n\n os.chdir(cwd)\n return True\n\n\nif __name__ == \"__main__\":\n pass\n" ]
[ [ "pandas.concat", "pandas.read_csv" ] ]
CLAW-Lab/ToM
[ "70770554602e0ef53f3c269e13d9c1b1043b06ae" ]
[ "ibr_game/few_shot_learning_system.py" ]
[ "import os\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nfrom fastcore.utils import mapped\n\nfrom inner_loop_optimizers import LSLRGradientDescentLearningRule\n\n\ndef set_torch_seed(seed):\n \"\"\"\n Sets the pytorch seeds for current experiment run\n :param seed: The seed (int)\n :return: A random number generator to use\n \"\"\"\n rng = np.random.RandomState(seed=seed)\n torch_seed = rng.randint(0, 999999)\n torch.manual_seed(seed=torch_seed)\n\n return rng\n\n\n\nclass MAMLFewShotClassifier(nn.Module):\n def __init__(self, classifier_class, args, listener_args):\n \"\"\"\n Initializes a MAML few shot learning system\n :param classifier_class: The classifier's class\n :param args: A namedtuple of arguments specifying various hyperparameters.\n :param seed\n :param number_of_training_steps_per_iter:\n :param learnable_per_layer_per_step_inner_loop_learning_rate\n :param total_epochs\n :param min_learning_rate\n :param multi_step_loss_num_epochs\n :param enable_inner_loop_optimizable_bn_params\n :param second_order\n :param first_order_to_second_order_epoch\n :param dataset_name\n :param use_multi_step_loss_optimization\n :param listener_args: Listener arguments\n \"\"\"\n super(MAMLFewShotClassifier, self).__init__()\n self.args = args\n self.classifier_class = classifier_class\n self.batch_size = args.batch_size\n self.device = listener_args.device\n self.current_epoch = 0\n\n self.rng = set_torch_seed(seed=listener_args.seed)\n self.classifier = classifier_class(\n args=listener_args).to(device=self.device)\n self.task_learning_rate = args.init_inner_loop_learning_rate\n\n self.inner_loop_optimizer = LSLRGradientDescentLearningRule(device=self.device,\n init_learning_rate=self.task_learning_rate,\n total_num_inner_loop_steps=self.args.number_of_training_steps_per_iter,\n use_learnable_learning_rates=self.args.learnable_per_layer_per_step_inner_loop_learning_rate)\n self.inner_loop_optimizer.initialise(\n names_weights_dict=self.get_inner_loop_parameter_dict(params=self.classifier.named_parameters(), excluded_params=self.args.excluded_params))\n\n print(\"Inner Loop parameters\")\n for key, value in self.inner_loop_optimizer.named_parameters():\n print(key, value.shape)\n\n self.to(self.device)\n print(\"Outer Loop parameters\")\n for name, param in self.named_parameters():\n if param.requires_grad:\n print(name, param.shape, param.device, param.requires_grad)\n\n self.optimizer = optim.Adam(\n self.trainable_parameters(), lr=args.meta_learning_rate, amsgrad=False)\n self.scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer=self.optimizer, T_max=self.args.total_epochs,\n eta_min=self.args.min_learning_rate)\n\n if torch.cuda.is_available():\n if torch.cuda.device_count() > 1:\n self.to(torch.cuda.current_device())\n self.classifier = nn.DataParallel(module=self.classifier)\n else:\n self.to(torch.cuda.current_device())\n\n self.device = torch.cuda.current_device()\n\n def move_to_cuda(self, x):\n return x.to(device=self.device)\n \n def get_per_step_loss_importance_vector(self):\n \"\"\"\n Generates a tensor of dimensionality (num_inner_loop_steps) indicating the importance of each step's target\n loss towards the optimization loss.\n :return: A tensor to be used to compute the weighted average of the loss, useful for\n the MSL (Multi Step Loss) mechanism.\n \"\"\"\n loss_weights = np.ones(shape=(self.args.number_of_training_steps_per_iter)) * (\n 1.0 / self.args.number_of_training_steps_per_iter)\n decay_rate = 1.0 / self.args.number_of_training_steps_per_iter / \\\n self.args.multi_step_loss_num_epochs\n min_value_for_non_final_losses = 0.03 / \\\n self.args.number_of_training_steps_per_iter\n for i in range(len(loss_weights) - 1):\n curr_value = np.maximum(\n loss_weights[i] - (self.current_epoch * decay_rate), min_value_for_non_final_losses)\n loss_weights[i] = curr_value\n\n curr_value = np.minimum(\n loss_weights[-1] + (self.current_epoch *\n (self.args.number_of_training_steps_per_iter - 1) * decay_rate),\n 1.0 - ((self.args.number_of_training_steps_per_iter - 1) * min_value_for_non_final_losses))\n loss_weights[-1] = curr_value\n loss_weights = torch.Tensor(loss_weights).to(device=self.device)\n return loss_weights\n\n def get_inner_loop_parameter_dict(self, params, excluded_params=[]):\n \"\"\"\n Returns a dictionary with the parameters to use for inner loop updates.\n :param params: A dictionary of the network's parameters.\n :return: A dictionary of the parameters to use for the inner loop optimization process.\n \"\"\"\n param_dict = dict()\n for name, param in params:\n if name in excluded_params:\n continue\n if param.requires_grad:\n if self.args.enable_inner_loop_optimizable_bn_params:\n param_dict[name] = param.to(device=self.device)\n else:\n if \"norm_layer\" not in name:\n param_dict[name] = param.to(device=self.device)\n\n return param_dict\n\n def apply_inner_loop_update(self, loss, names_weights_copy, use_second_order, current_step_idx):\n \"\"\"\n Applies an inner loop update given current step's loss, the weights to update, a flag indicating whether to use\n second order derivatives and the current step's index.\n :param loss: Current step's loss with respect to the support set.\n :param names_weights_copy: A dictionary with names to parameters to update.\n :param use_second_order: A boolean flag of whether to use second order derivatives.\n :param current_step_idx: Current step's index.\n :return: A dictionary with the updated weights (name, param)\n \"\"\"\n num_gpus = torch.cuda.device_count()\n if num_gpus > 1:\n self.classifier.module.zero_grad(params=names_weights_copy)\n else:\n self.classifier.zero_grad(params=names_weights_copy)\n\n grads = torch.autograd.grad(loss, names_weights_copy.values(),\n create_graph=use_second_order, allow_unused=True)\n names_grads_copy = dict(zip(names_weights_copy.keys(), grads))\n\n names_weights_copy = {key: value[0]\n for key, value in names_weights_copy.items()}\n\n for key, grad in names_grads_copy.items():\n if grad is None:\n print('Grads not found for inner loop parameter', key)\n names_grads_copy[key] = names_grads_copy[key].sum(dim=0)\n\n names_weights_copy = self.inner_loop_optimizer.update_params(names_weights_dict=names_weights_copy,\n names_grads_wrt_params_dict=names_grads_copy,\n num_step=current_step_idx)\n\n num_devices = torch.cuda.device_count() if torch.cuda.is_available() else 1\n names_weights_copy = {\n name.replace('module.', ''): value.unsqueeze(0).repeat(\n [num_devices] + [1 for i in range(len(value.shape))]) for\n name, value in names_weights_copy.items()}\n\n return names_weights_copy\n\n def get_across_task_loss_metrics(self, total_losses, total_accuracies):\n losses = dict()\n\n losses['loss'] = torch.mean(torch.stack(total_losses))\n losses['accuracy'] = np.mean(total_accuracies)\n\n return losses\n\n def forward(self, data_batch, epoch, use_second_order, use_multi_step_loss_optimization, num_steps, training_phase):\n \"\"\"\n Runs a forward outer loop pass on the batch of tasks using the MAML/++ framework.\n :param data_batch: A data batch containing the support and target sets.\n :param epoch: Current epoch's index\n :param use_second_order: A boolean saying whether to use second order derivatives.\n :param use_multi_step_loss_optimization: Whether to optimize on the outer loop using just the last step's\n target loss (True) or whether to use multi step loss which improves the stability of the system (False)\n :param num_steps: Number of inner loop steps.\n :param training_phase: Whether this is a training phase (True) or an evaluation phase (False)\n :return: A dictionary with the collected losses of the current outer forward propagation.\n \"\"\"\n x_support_set, x_target_set, y_support_set, y_target_set = data_batch\n\n # _, ncs, _ = y_support_set.shape\n\n # self.num_classes_per_set = ncs\n\n total_losses = []\n total_accuracies = []\n per_task_target_preds = [[] for i in range(len(y_target_set))]\n self.classifier.zero_grad()\n for task_id, (x_support_set_task, y_support_set_task, x_target_set_task, y_target_set_task) in \\\n enumerate(zip(zip(*x_support_set),\n y_support_set,\n zip(*x_target_set),\n y_target_set)):\n task_losses = []\n # task_accuracies = []\n per_step_loss_importance_vectors = self.get_per_step_loss_importance_vector()\n names_weights_copy = self.get_inner_loop_parameter_dict(\n self.classifier.named_parameters(), self.args.excluded_params)\n\n\n num_devices = torch.cuda.device_count() if torch.cuda.is_available() else 1\n\n names_weights_copy = {\n name.replace('module.', ''): value.unsqueeze(0).repeat(\n [num_devices] + [1 for i in range(len(value.shape))]) for\n name, value in names_weights_copy.items()}\n\n # _, _, c, h, w = x_target_set_task.shape\n\n # x_support_set_task = x_support_set_task.view(-1, c, h, w)\n # y_support_set_task = y_support_set_task.view(-1)\n # x_target_set_task = x_target_set_task.view(-1, c, h, w)\n # y_target_set_task = y_target_set_task.view(-1)\n\n for num_step in range(num_steps):\n\n support_loss, _ = self.net_forward(x=x_support_set_task,\n y=y_support_set_task,\n weights=names_weights_copy,\n backup_running_statistics=True if (\n num_step == 0) else False,\n training=True, num_step=num_step)\n\n names_weights_copy = self.apply_inner_loop_update(loss=support_loss,\n names_weights_copy=names_weights_copy,\n use_second_order=use_second_order,\n current_step_idx=num_step)\n\n if use_multi_step_loss_optimization and training_phase and epoch < self.args.multi_step_loss_num_epochs:\n target_loss, target_preds = self.net_forward(x=x_target_set_task,\n y=y_target_set_task, weights=names_weights_copy,\n backup_running_statistics=False, training=True,\n num_step=num_step)\n\n task_losses.append(\n per_step_loss_importance_vectors[num_step] * target_loss)\n else:\n if num_step == (self.args.number_of_training_steps_per_iter - 1):\n target_loss, target_preds = self.net_forward(x=x_target_set_task,\n y=y_target_set_task, weights=names_weights_copy,\n backup_running_statistics=False, training=True,\n num_step=num_step)\n task_losses.append(target_loss)\n\n # per_task_target_preds[task_id] = target_preds.detach(\n # ).cpu().numpy()\n per_task_target_preds[task_id] = target_preds.detach()\n _, predicted = torch.max(target_preds.data, 1)\n\n if y_target_set_task.size() != predicted.size():\n y_target_set_task_hard = torch.max(y_target_set_task, dim=-1)[1]\n accuracy = predicted.float().eq(y_target_set_task_hard.data.float()).cpu().float()\n else:\n accuracy = predicted.float().eq(y_target_set_task.data.float()).cpu().float()\n task_losses = torch.sum(torch.stack(task_losses))\n total_losses.append(task_losses)\n total_accuracies.extend(accuracy)\n\n if not training_phase:\n self.classifier.restore_backup_stats()\n\n losses = self.get_across_task_loss_metrics(total_losses=total_losses,\n total_accuracies=total_accuracies)\n\n for idx, item in enumerate(per_step_loss_importance_vectors):\n losses['loss_importance_vector_{}'.format(\n idx)] = item.detach().cpu().numpy()\n\n return losses, per_task_target_preds\n\n def net_forward(self, x, y, weights, backup_running_statistics, training, num_step):\n \"\"\"\n A base model forward pass on some data points x. Using the parameters in the weights dictionary. Also requires\n boolean flags indicating whether to reset the running statistics at the end of the run (if at evaluation phase).\n A flag indicating whether this is the training session and an int indicating the current step's number in the\n inner loop.\n :param x: A data batch of shape b, c, h, w\n :param y: A data targets batch of shape b, n_classes\n :param weights: A dictionary containing the weights to pass to the network.\n :param backup_running_statistics: A flag indicating whether to reset the batch norm running statistics to their\n previous values after the run (only for evaluation)\n :param training: A flag indicating whether the current process phase is a training or evaluation.\n :param num_step: An integer indicating the number of the step in the inner loop.\n :return: the crossentropy losses with respect to the given y, the predictions of the base model.\n \"\"\"\n preds = self.classifier.maml_forward(x=x, params=weights,\n training=training,\n backup_running_statistics=backup_running_statistics, num_step=num_step)\n\n # self.classifier.sim(x, weights, y)\n\n if y.size() == preds.size(): # using soft label\n loss = -torch.mean(torch.sum(\n torch.log_softmax(preds, dim=-1)\n * torch.softmax(y, dim=-1),\n dim=-1\n )\n )\n else:\n loss = F.cross_entropy(input=preds, target=y)\n\n return loss, preds\n\n def trainable_parameters(self):\n \"\"\"\n Returns an iterator over the trainable parameters of the model.\n \"\"\"\n for param in self.parameters():\n if param.requires_grad:\n yield param\n\n def train_forward_prop(self, data_batch, epoch):\n \"\"\"\n Runs an outer loop forward prop using the meta-model and base-model.\n :param data_batch: A data batch containing the support set and the target set input, output pairs.\n :param epoch: The index of the currrent epoch.\n :return: A dictionary of losses for the current step.\n \"\"\"\n losses, per_task_target_preds = self.forward(data_batch=data_batch, epoch=epoch,\n use_second_order=self.args.second_order and\n epoch > self.args.first_order_to_second_order_epoch,\n use_multi_step_loss_optimization=self.args.use_multi_step_loss_optimization,\n num_steps=self.args.number_of_training_steps_per_iter,\n training_phase=True)\n return losses, per_task_target_preds\n\n def evaluation_forward_prop(self, data_batch, epoch):\n \"\"\"\n Runs an outer loop evaluation forward prop using the meta-model and base-model.\n :param data_batch: A data batch containing the support set and the target set input, output pairs.\n :param epoch: The index of the currrent epoch.\n :return: A dictionary of losses for the current step.\n \"\"\"\n losses, per_task_target_preds = self.forward(data_batch=data_batch, epoch=epoch, use_second_order=False,\n use_multi_step_loss_optimization=True,\n num_steps=self.args.number_of_evaluation_steps_per_iter,\n training_phase=False)\n\n return losses, per_task_target_preds\n\n def meta_update(self, loss):\n \"\"\"\n Applies an outer loop update on the meta-parameters of the model.\n :param loss: The current crossentropy loss.\n \"\"\"\n self.optimizer.zero_grad()\n loss.backward()\n if 'imagenet' in self.args.dataset_name:\n for _, param in self.classifier.named_parameters():\n if param.requires_grad:\n # not sure if this is necessary, more experiments are needed\n param.grad.data.clamp_(-10, 10)\n self.optimizer.step()\n\n def run_train_iter(self, data_batch, epoch):\n \"\"\"\n Runs an outer loop update step on the meta-model's parameters.\n :param data_batch: input data batch containing the support set and target set input, output pairs\n :param epoch: the index of the current epoch\n :return: The losses of the ran iteration.\n \"\"\"\n epoch = int(epoch)\n self.scheduler.step(epoch=epoch)\n if self.current_epoch != epoch:\n self.current_epoch = epoch\n\n if not self.training:\n self.train()\n\n x_support_set, x_target_set, y_support_set, y_target_set = data_batch\n\n x_support_set = mapped(self.move_to_cuda, x_support_set)\n x_target_set = mapped(self.move_to_cuda, x_target_set)\n y_support_set = mapped(self.move_to_cuda, y_support_set)\n y_target_set = mapped(self.move_to_cuda, y_target_set)\n\n data_batch = (x_support_set, x_target_set, y_support_set, y_target_set)\n\n losses, per_task_target_preds = self.train_forward_prop(\n data_batch=data_batch, epoch=epoch)\n\n self.meta_update(loss=losses['loss'])\n losses['learning_rate'] = self.scheduler.get_lr()[0]\n self.optimizer.zero_grad()\n self.zero_grad()\n\n return losses, per_task_target_preds\n\n def run_validation_iter(self, data_batch):\n \"\"\"\n Runs an outer loop evaluation step on the meta-model's parameters.\n :param data_batch: input data batch containing the support set and target set input, output pairs\n :param epoch: the index of the current epoch\n :return: The losses of the ran iteration.\n \"\"\"\n\n if self.training:\n # self.eval()\n pass\n\n x_support_set, x_target_set, y_support_set, y_target_set = data_batch\n\n x_support_set = mapped(self.move_to_cuda, x_support_set)\n x_target_set = mapped(self.move_to_cuda, x_target_set)\n y_support_set = mapped(self.move_to_cuda, y_support_set)\n y_target_set = mapped(self.move_to_cuda, y_target_set)\n\n data_batch = (x_support_set, x_target_set, y_support_set, y_target_set)\n\n losses, per_task_target_preds = self.evaluation_forward_prop(\n data_batch=data_batch, epoch=self.current_epoch)\n\n # losses['loss'].backward() # uncomment if you get the weird memory error\n # self.zero_grad()\n # self.optimizer.zero_grad()\n\n return losses, per_task_target_preds\n\n def save_model(self, model_save_dir, state):\n \"\"\"\n Save the network parameter state and experiment state dictionary.\n :param model_save_dir: The directory to store the state at.\n :param state: The state containing the experiment state and the network. It's in the form of a dictionary\n object.\n \"\"\"\n state['network'] = self.state_dict()\n torch.save(state, f=model_save_dir)\n\n def load_model(self, model_save_dir, model_name, model_idx):\n \"\"\"\n Load checkpoint and return the state dictionary containing the network state params and experiment state.\n :param model_save_dir: The directory from which to load the files.\n :param model_name: The model_name to be loaded from the direcotry.\n :param model_idx: The index of the model (i.e. epoch number or 'latest' for the latest saved model of the current\n experiment)\n :return: A dictionary containing the experiment state and the saved model parameters.\n \"\"\"\n filepath = os.path.join(\n model_save_dir, \"{}_{}\".format(model_name, model_idx))\n state = torch.load(filepath)\n state_dict_loaded = state['network']\n self.load_state_dict(state_dict=state_dict_loaded)\n return state\n" ]
[ [ "numpy.minimum", "torch.max", "torch.optim.lr_scheduler.CosineAnnealingLR", "torch.load", "numpy.mean", "torch.cuda.is_available", "torch.save", "torch.softmax", "torch.cuda.current_device", "torch.stack", "torch.cuda.device_count", "numpy.random.RandomState", "numpy.maximum", "torch.Tensor", "torch.manual_seed", "torch.nn.functional.cross_entropy", "numpy.ones", "torch.log_softmax", "torch.nn.DataParallel" ] ]
H4ndsomeJohn/CSNet
[ "10838a30be7cad6068a632fc32309ac13f22d339" ]
[ "graph_representation/methods/mesh.py" ]
[ "# import meshio\nimport numpy as np\nimport scipy.spatial\nimport torch\nfrom graph_representation.methods.edge import add_to_edges, sort_Edges\nfrom graph_representation.MRIData import MRIData\n\n\ndef makeMesh(data: MRIData):\n edges_Ek = [[], []]\n edges_Et = [[], []]\n edges_Eb = [[], []]\n\n # for b in [1]:\n for b in range(data.num):\n n_v = [len(data.v_2d[b][_]) for _ in range(data.slice)]\n n_v = np.nonzero(n_v)\n st_idx, ed_slice = n_v[0].min(), n_v[0].max()\n\n for s in range(data.slice):\n # for s in [21]:\n if len(data.v_2d[b][s]) > 0:\n lines = link_Ek(data, b, s)\n edges_Ek = add_to_edges(lines, edges_Ek)\n if s + 1 < data.slice and len(data.v_2d[b][s]) * len(data.v_2d[b][s + 1]) > 0:\n lines = link_Et(data, b, s)\n edges_Et = add_to_edges(lines, edges_Et)\n if s == st_idx or s == ed_slice:\n lines = link_Eb(data, b, s)\n edges_Eb = add_to_edges(lines, edges_Eb)\n\n edges_Ek = sort_Edges(edges_Ek)\n edges_Et = sort_Edges(edges_Et)\n edges_Eb = sort_Edges(edges_Eb)\n edges = torch.cat([edges_Ek, edges_Et, edges_Eb], dim=1)\n faces = make_faces(edges, len(data.v_3d))\n data.mesh_edges = edges\n data.mesh_faces = faces\n data.mesh_vertices = data.v_3d\n return\n\n\ndef link_Ek(data, b, s):\n idx = data.v_idx[b][s]\n # add edge along surface\n lines = [idx[:-1], idx[1:]]\n # make loop\n lines[0].append(idx[-1])\n lines[1].append(idx[0])\n return lines\n\n\ndef link_Et(data, b, s):\n # make edge in adjacency slices\n lines = [[], []]\n\n # small set link to big set\n if len(data.v_2d[b][s]) > len(data.v_2d[b][s + 1]):\n vertex_b = data.v_2d[b][s]\n vertex_s = data.v_2d[b][s + 1]\n idx_b, idx_s = data.v_idx[b][s], data.v_idx[b][s + 1]\n else:\n vertex_b = data.v_2d[b][s + 1]\n vertex_s = data.v_2d[b][s]\n idx_b, idx_s = data.v_idx[b][s + 1], data.v_idx[b][s]\n n_s = len(vertex_s)\n n_b = len(vertex_b)\n # print(b, s, n_b, n_s)\n\n # concat the closest vertex\n mytree1 = scipy.spatial.cKDTree(vertex_b)\n kd_dist, clost_to_s = mytree1.query(vertex_s)\n\n # fix bugs\n min_idx = np.argmin(clost_to_s)\n for _i in range(n_s - 1):\n if clost_to_s[(min_idx + _i + 1) % n_s] < clost_to_s[(min_idx + _i) % n_s]:\n clost_to_s[(min_idx + _i + 1) % n_s] = clost_to_s[(min_idx + _i) % n_s]\n\n for sid, bid in enumerate(clost_to_s):\n # add line from big set to small set, each vertex in small set is connected\n lines[0].append(idx_s[sid])\n lines[1].append(idx_b[bid])\n # not all vertex in big set is connected, they connect ref by their neighbor\n while bid != clost_to_s[(sid + 1) % n_s]:\n bid = (bid + 1) % n_b\n lines[0].append(idx_s[sid])\n lines[1].append(idx_b[bid])\n\n if n_s == 1:\n # print(b, s)\n for bid in range(n_b):\n # add line from 1 point to big set, to ensure each vertex in big set is connected\n lines[0].append(idx_s[0])\n lines[1].append(idx_b[bid])\n\n return lines\n\n\ndef link_Eb(data, b, s):\n idx = data.v_idx[b][s]\n n = len(idx)\n # add plane in the beside\n if n <= 3:\n return [[idx[0]], [idx[0]]]\n lines = [[], []]\n for _idx in range(n):\n lines[0].append(idx[0])\n lines[1].append(idx[_idx])\n return lines\n\n\ndef make_faces(edge, vertex_num):\n u = torch.cat((edge[0], edge[1]))\n v = torch.cat((edge[1], edge[0]))\n\n n = len(u)\n u_list = [[] for _ in range(vertex_num)]\n # print(u.max().item() + 1, \"/\", vertex_num)\n for i in range(n):\n u_list[u[i]].append(v[i].item())\n face = set()\n for p1 in range(vertex_num):\n for p2 in u_list[p1]:\n for p3 in u_list[p2]:\n if p1 in u_list[p3]:\n f = [p1, p2, p3]\n f.sort()\n f = tuple(f)\n face.add(f)\n face = list(face)\n # print(\"face num:\", len(face))\n return face\n\n\ndef find_direction(p1, p2, p3):\n if ((p2[0] - p1[0]) * (p3[1] - p1[1]) - (p3[0] - p1[0]) * (p2[1] - p1[1])) != 0:\n return ((p2[0] - p1[0]) * (p3[1] - p1[1]) - (p3[0] - p1[0]) * (p2[1] - p1[1])) > 0\n else:\n print(\"point in a line.\")\n exit()\n\n\n# def saveMesh(data, faces):\n# mesh_name = \"./test.obj\"\n# cells = [(\"triangle\", np.array(faces))]\n# mesh = meshio.Mesh(data.v_3d, cells)\n# meshio.write(mesh_name, mesh)\n" ]
[ [ "numpy.argmin", "numpy.nonzero", "torch.cat" ] ]
singularitatem/singularity
[ "3b7992244a4c152be42c207b15c2023e0da305a5" ]
[ "huggingface/nlp/demo.py" ]
[ "#!/usr/bin/env python3\n\nfrom absl import app, flags, logging\n\nimport torch as th\nimport pytorch_lightning as pl\n\nimport nlp\nimport transformers\n\nflags.DEFINE_boolean('debug', False, '')\nflags.DEFINE_integer('epochs', 10, '')\nflags.DEFINE_float('lr', 1e-2, '')\nflags.DEFINE_float('momentum', .9, '')\nflags.DEFINE_string('bert_model', 'bert-base-uncased', '')\nflags.DEFINE_integer('seq_length', 32, '')\nflags.DEFINE_integer('batch_size', 10, '')\n\nFLAGS = flags.FLAGS\n\n\nclass IMDBSentimentClassifier(pl.LightningModule):\n def __init__(self):\n super().__init__()\n self.model = transformers.BertForSequenceClassification.from_pretrained(FLAGS.bert_model)\n self.loss = th.nn.CrossEntropyLoss(reduction='none')\n\n def prepare_data(self):\n logging.info('Initialize Bert Tokenizer...')\n tokenizer = transformers.BertTokenizer.from_pretrained(FLAGS.bert_model)\n \n def _tokenize(x):\n x['input_ids'] = tokenizer.encode(\n x['text'], \n max_length=FLAGS.seq_length,\n pad_to_max_length=True\n )\n return x\n\n def _prepare_ds(split):\n logging.info(f'Prepare imdb {split} dataset...')\n ds = nlp.load_dataset('imdb', split=f'{split}[:{FLAGS.batch_size if FLAGS.debug else \"5%\"}]')\n ds = ds.map(_tokenize)\n ds.set_format(type='torch', columns=['input_ids', 'label'])\n \n # import IPython; IPython.embed(); exit(1)\n self.train_ds = _prepare_ds('train')\n self.test_ds = _prepare_ds('test')\n\n\n def configure_optimizers(self):\n return th.optim.SGD(\n self.parameters(),\n lr=FLAGS.lr,\n momentum=FLAGS.momentum\n )\n\n def forward(self, input_ids):\n mask = (input_ids != 0).float()\n logits, self.model(input_ids, mask)\n return logits\n\n def training_step(self, batch, batch_idx):\n logits = self.forward(batch['input_ids']) \n loss = self.loss(logits, batch['label']).mean()\n return {'loss': loss, 'log': {'train_loss': loss}}\n \n def validation_step(self, batch, batch_idx):\n logits = self.forward(batch['input_ids'])\n loss = self.loss(logits, batch['label'])\n acc = (logits.argmax(-1) == batch['label']).float()\n return {'loss': loss, 'acc': acc}\n\n def validation_epoch_end(self, outputs):\n loss = th.cat([o['loss'] for o in outputs], 0).mean()\n acc = th.cat([o['acc'] for o in outputs], 0).mean()\n out = {'val_loss': loss, 'val_acc': acc}\n return {**out, 'log': out}\n\n def train_dataloader(self):\n return th.utils.data.DataLoader(\n self.train_ds,\n batch_size=FLAGS.batch_size,\n drop_last=True,\n shuffle=True,\n )\n\n def val_dataloader(self):\n return th.utils.data.DataLoader(\n self.test_ds,\n batch_size=FLAGS.batch_size,\n drop_last=False,\n shuffle=True,\n )\n\n\ndef main(_):\n logging.info(f'here our nlp demo start...debug mode: {\"on\" if FLAGS.debug else \"off\"}')\n model = IMDBSentimentClassifier()\n trainer = pl.Trainer(\n default_root_dir='logs',\n gpus=(1 if th.cuda.is_available() else 0),\n max_epochs=FLAGS.epochs,\n fast_dev_run=FLAGS.debug,\n logger=pl.loggers.TensorBoardLogger('logs/', name='imdb', version=0)\n )\n logging.info('Start training the model...')\n trainer.fit(model)\n\n\nif __name__ == '__main__':\n app.run(main)\n\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.utils.data.DataLoader", "torch.cuda.is_available", "torch.cat" ] ]
VU-IVM/Toponym-based-Algorithm-for-Grouped-Geoparsing-of-Social-media
[ "563cab3208c1b86837c45ba6ce33d740113c44af" ]
[ "methods/shapefiles.py" ]
[ "#!/usr/bin/python3\nfrom osgeo import ogr\nimport os\nimport numpy as np\nimport matplotlib.path as mpath\nfrom collections import defaultdict as dd\nimport shapefile as pyshp\nimport matplotlib.path\nimport matplotlib.patches as mpatches\n\nfrom methods import function\n\n\ndef open_shape(shapefile):\n return pyshp.Reader(shapefile)\n\n\ndef open_shp(shapefile):\n return pyshp.Reader(shapefile)\n\n\ndef get_records(shapefile, index=False):\n if not isinstance(shapefile, pyshp.Reader):\n shapefile = open_shape(shapefile)\n\n if not index:\n index = 0\n else:\n index = get_field_names(shapefile).index(index)\n\n records = {\n r.record[index]: r.shape\n for r in shapefile.shapeRecords()\n }\n return records\n\n\ndef get_bboxes(records):\n return {\n key: shape.bbox\n for key, shape in records.items()\n }\n\n\ndef create_paths(region):\n return [matplotlib.path.Path(path) for path in [region.points[i:j] for i, j in zip(list(region.parts), list(region.parts)[1:]+[None])]]\n\n\ndef get_field_names(shapefile):\n fields = shapefile.fields[1:]\n field_names = [field[0] for field in fields]\n return field_names\n\n\ndef create_paths_shapefile_dict(shapefile, index):\n paths = {}\n ds = ogr.Open(shapefile)\n lyr = ds.GetLayer()\n for feat in lyr:\n key = feat.GetField(index)\n if key is None:\n continue\n key = key.strip()\n geom = feat.geometry()\n codes = []\n all_x = []\n all_y = []\n for i in range(geom.GetGeometryCount()):\n geometry = geom.GetGeometryRef(i)\n if geometry.GetGeometryName() == 'POLYGON':\n for j in range(geometry.GetGeometryCount()):\n small_geom = geometry.GetGeometryRef(j)\n x = [small_geom.GetX(j) for j in range(small_geom.GetPointCount())]\n y = [small_geom.GetY(j) for j in range(small_geom.GetPointCount())]\n codes += [mpath.Path.MOVETO] + (len(x)-1)*[mpath.Path.LINETO]\n all_x += x\n all_y += y\n else:\n x = [geometry.GetX(j) for j in range(geometry.GetPointCount())]\n y = [geometry.GetY(j) for j in range(geometry.GetPointCount())]\n codes += [mpath.Path.MOVETO] + (len(x)-1)*[mpath.Path.LINETO]\n all_x += x\n all_y += y\n path = mpath.Path(np.column_stack((all_x, all_y)), codes)\n paths[key] = path\n return paths\n\n\ndef create_paths_shapefile(shapefile):\n paths = []\n for r in shapefile.shapeRecords():\n paths.extend(create_paths(r.shape))\n return paths\n\n\ndef create_patches(shapefile, facecolor, alpha):\n patches = []\n for path in create_paths_shapefile(shapefile):\n patch = mpatches.PathPatch(path, facecolor=facecolor, alpha=alpha, lw=2)\n patches.append(patch)\n return patches\n\n\ndef write_WGS84(name):\n epsg = 'GEOGCS[\"WGS 84\",DATUM[\"WGS_1984\",SPHEROID[\"WGS 84\",6378137,298.257223563]],PRIMEM[\"Greenwich\",0],UNIT[\"degree\",0.0174532925199433]]'\n out_prj = name + '.prj'\n with open(out_prj, 'w') as prj:\n prj.write(epsg)\n\n\ndef points_to_shapefile(points, name, fields):\n w = pyshp.Writer(pyshp.POINT)\n for field in fields:\n w.field(*field)\n\n field_names = [field[0] for field in fields]\n for point in points:\n w.point(point['lon'], point['lat'])\n w.record(*(point[field] for field in field_names))\n\n w.save(name)\n write_WGS84(name)\n\n\ndef shapefile_to_wkt(shapefile, index, force_multipolygon=False):\n shapefile = ogr.Open(shapefile)\n layer = shapefile.GetLayer(0)\n wkt = {}\n for i in range(layer.GetFeatureCount()):\n feature = layer.GetFeature(i)\n field = feature.GetField(index)\n geometry = feature.GetGeometryRef()\n if force_multipolygon and geometry.GetGeometryType() == ogr.wkbPolygon:\n geometry = ogr.ForceToMultiPolygon(geometry)\n wkt[field] = geometry.ExportToWkt()\n return wkt\n\n\ndef merge_shapefile_by_id(infile, outfile, layer_name, ID):\n dirname = os.path.dirname(outfile)\n try:\n os.makedirs(dirname)\n except OSError:\n pass\n inshp = ogr.Open(infile)\n inLayer = inshp.GetLayer()\n\n driver = ogr.GetDriverByName(\"ESRI Shapefile\")\n\n outshp = driver.CreateDataSource(outfile)\n outLayer = outshp.CreateLayer(layer_name, geom_type=ogr.wkbPolygon)\n\n idField = ogr.FieldDefn(ID, ogr.OFTInteger)\n outLayer.CreateField(idField)\n\n gn_ids = [abs(feature.GetField(ID)) for feature in inLayer]\n duplicate_ids = function.find_duplicates(gn_ids)\n\n duplicate_ids_features = dd(list)\n\n outLayerDefn = outLayer.GetLayerDefn()\n\n for i in range(inLayer.GetFeatureCount()):\n inFeature = inLayer.GetFeature(i)\n outFeature = ogr.Feature(outLayerDefn)\n\n gn_id = abs(inFeature.GetField(ID))\n if gn_id not in duplicate_ids:\n\n outFeature.SetField(ID, gn_id)\n\n geom = inFeature.GetGeometryRef()\n outFeature.SetGeometry(geom)\n\n outLayer.CreateFeature(outFeature)\n outFeature = None\n\n else:\n duplicate_ids_features[gn_id].append(i)\n\n for gn_id, features in duplicate_ids_features.items():\n geom = ogr.Geometry(ogr.wkbPolygon)\n for i in features:\n inFeature = inLayer.GetFeature(i)\n geom = geom.Union(inFeature.GetGeometryRef())\n\n outFeature = ogr.Feature(outLayerDefn)\n outFeature.SetField(ID, gn_id)\n outFeature.SetGeometry(geom)\n outLayer.CreateFeature(outFeature)\n outFeature = None\n\n inshp = None\n outshp = None\n\n\ndef get_unique_records(shp, ID):\n inShp = ogr.Open(shp)\n inLayer = inShp.GetLayer()\n return set(feature.GetField(ID) for feature in inLayer)\n" ]
[ [ "matplotlib.patches.PathPatch", "numpy.column_stack" ] ]
szk9876/rlkit
[ "9d8213884b8bc6722041e23ae4f717c6bc17596c", "9d8213884b8bc6722041e23ae4f717c6bc17596c" ]
[ "rlkit/torch/sac/sac.py", "rlkit/torch/sac/twin_sac.py" ]
[ "from collections import OrderedDict\n\nimport numpy as np\nimport torch.optim as optim\nfrom torch import nn as nn\n\nimport rlkit.torch.pytorch_util as ptu\nfrom rlkit.core.eval_util import create_stats_ordered_dict\nfrom rlkit.torch.torch_rl_algorithm import TorchRLAlgorithm\nfrom rlkit.torch.sac.policies import MakeDeterministic\n\n\nclass SoftActorCritic(TorchRLAlgorithm):\n def __init__(\n self,\n env,\n policy,\n qf,\n vf,\n\n policy_lr=1e-3,\n qf_lr=1e-3,\n vf_lr=1e-3,\n policy_mean_reg_weight=1e-3,\n policy_std_reg_weight=1e-3,\n policy_pre_activation_weight=0.,\n optimizer_class=optim.Adam,\n\n train_policy_with_reparameterization=True,\n soft_target_tau=1e-2,\n plotter=None,\n render_eval_paths=False,\n eval_deterministic=True,\n\n use_automatic_entropy_tuning=True,\n target_entropy=None,\n target_entropy_multiplier=1.,\n **kwargs\n ):\n if eval_deterministic:\n eval_policy = MakeDeterministic(policy)\n else:\n eval_policy = policy\n super().__init__(\n env=env,\n exploration_policy=policy,\n eval_policy=eval_policy,\n **kwargs\n )\n self.policy = policy\n self.qf = qf\n self.vf = vf\n self.train_policy_with_reparameterization = (\n train_policy_with_reparameterization\n )\n self.soft_target_tau = soft_target_tau\n self.policy_mean_reg_weight = policy_mean_reg_weight\n self.policy_std_reg_weight = policy_std_reg_weight\n self.policy_pre_activation_weight = policy_pre_activation_weight\n self.plotter = plotter\n self.render_eval_paths = render_eval_paths\n self.use_automatic_entropy_tuning = use_automatic_entropy_tuning\n if self.use_automatic_entropy_tuning:\n if target_entropy:\n self.target_entropy = target_entropy\n else:\n self.target_entropy = - self.target_entropy_multiplier * np.prod(self.env.action_space.shape).item() # heuristic value from Tuomas\n self.log_alpha = ptu.zeros(1, requires_grad=True)\n self.alpha_optimizer = optimizer_class(\n [self.log_alpha],\n lr=policy_lr,\n )\n\n self.target_vf = vf.copy()\n self.qf_criterion = nn.MSELoss()\n self.vf_criterion = nn.MSELoss()\n\n self.policy_optimizer = optimizer_class(\n self.policy.parameters(),\n lr=policy_lr,\n )\n self.qf_optimizer = optimizer_class(\n self.qf.parameters(),\n lr=qf_lr,\n )\n self.vf_optimizer = optimizer_class(\n self.vf.parameters(),\n lr=vf_lr,\n )\n\n def _do_training(self):\n batch = self.get_batch()\n rewards = batch['rewards']\n terminals = batch['terminals']\n obs = batch['observations']\n actions = batch['actions']\n next_obs = batch['next_observations']\n\n q_pred = self.qf(obs, actions)\n v_pred = self.vf(obs)\n # Make sure policy accounts for squashing functions like tanh correctly!\n policy_outputs = self.policy(\n obs,\n reparameterize=self.train_policy_with_reparameterization,\n return_log_prob=True,\n )\n new_actions, policy_mean, policy_log_std, log_pi = policy_outputs[:4]\n if self.use_automatic_entropy_tuning:\n \"\"\"\n Alpha Loss\n \"\"\"\n alpha_loss = -(self.log_alpha * (log_pi + self.target_entropy).detach()).mean()\n self.alpha_optimizer.zero_grad()\n alpha_loss.backward()\n self.alpha_optimizer.step()\n alpha = self.log_alpha.exp()\n else:\n alpha = 1\n alpha_loss = 0\n\n \"\"\"\n QF Loss\n \"\"\"\n target_v_values = self.target_vf(next_obs)\n q_target = rewards + (1. - terminals) * self.discount * target_v_values\n qf_loss = self.qf_criterion(q_pred, q_target.detach())\n\n \"\"\"\n VF Loss\n \"\"\"\n q_new_actions = self.qf(obs, new_actions)\n v_target = q_new_actions - alpha*log_pi\n vf_loss = self.vf_criterion(v_pred, v_target.detach())\n\n \"\"\"\n Policy Loss\n \"\"\"\n if self.train_policy_with_reparameterization:\n policy_loss = (alpha*log_pi - q_new_actions).mean()\n else:\n log_policy_target = q_new_actions - v_pred\n policy_loss = (\n log_pi * (alpha*log_pi - log_policy_target).detach()\n ).mean()\n mean_reg_loss = self.policy_mean_reg_weight * (policy_mean**2).mean()\n std_reg_loss = self.policy_std_reg_weight * (policy_log_std**2).mean()\n pre_tanh_value = policy_outputs[-1]\n pre_activation_reg_loss = self.policy_pre_activation_weight * (\n (pre_tanh_value**2).sum(dim=1).mean()\n )\n policy_reg_loss = mean_reg_loss + std_reg_loss + pre_activation_reg_loss\n policy_loss = policy_loss + policy_reg_loss\n\n \"\"\"\n Update networks\n \"\"\"\n self.qf_optimizer.zero_grad()\n qf_loss.backward()\n self.qf_optimizer.step()\n\n self.vf_optimizer.zero_grad()\n vf_loss.backward()\n self.vf_optimizer.step()\n\n self.policy_optimizer.zero_grad()\n policy_loss.backward()\n self.policy_optimizer.step()\n\n self._update_target_network()\n\n \"\"\"\n Save some statistics for eval using just one batch.\n \"\"\"\n if self.need_to_update_eval_statistics:\n self.need_to_update_eval_statistics = False\n self.eval_statistics['QF Loss'] = np.mean(ptu.get_numpy(qf_loss))\n self.eval_statistics['VF Loss'] = np.mean(ptu.get_numpy(vf_loss))\n self.eval_statistics['Policy Loss'] = np.mean(ptu.get_numpy(\n policy_loss\n ))\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Q Predictions',\n ptu.get_numpy(q_pred),\n ))\n self.eval_statistics.update(create_stats_ordered_dict(\n 'V Predictions',\n ptu.get_numpy(v_pred),\n ))\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Log Pis',\n ptu.get_numpy(log_pi),\n ))\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Policy mu',\n ptu.get_numpy(policy_mean),\n ))\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Policy log std',\n ptu.get_numpy(policy_log_std),\n ))\n if self.use_automatic_entropy_tuning:\n self.eval_statistics['Alpha'] = alpha.item()\n self.eval_statistics['Alpha Loss'] = alpha_loss.item()\n\n @property\n def networks(self):\n return [\n self.policy,\n self.qf,\n self.vf,\n self.target_vf,\n ]\n\n def _update_target_network(self):\n ptu.soft_update_from_to(self.vf, self.target_vf, self.soft_target_tau)\n\n def get_epoch_snapshot(self, epoch):\n snapshot = super().get_epoch_snapshot(epoch)\n snapshot.update(\n qf=self.qf,\n policy=self.policy,\n vf=self.vf,\n target_vf=self.target_vf,\n )\n return snapshot\n", "import numpy as np\nimport torch\nimport torch.optim as optim\n\nimport rlkit.torch.pytorch_util as ptu\nfrom rlkit.core.eval_util import create_stats_ordered_dict\nfrom rlkit.torch.sac.policies import MakeDeterministic\nfrom rlkit.torch.torch_rl_algorithm import TorchRLAlgorithm\n\n\nclass TwinSAC(TorchRLAlgorithm):\n \"\"\"\n SAC with the twin architecture from TD3.\n \"\"\"\n def __init__(\n self,\n env,\n policy,\n qf1,\n qf2,\n vf,\n\n policy_lr=1e-3,\n qf_lr=1e-3,\n vf_lr=1e-3,\n policy_mean_reg_weight=1e-3,\n policy_std_reg_weight=1e-3,\n policy_pre_activation_weight=0.,\n optimizer_class=optim.Adam,\n\n train_policy_with_reparameterization=True,\n soft_target_tau=1e-2,\n policy_update_period=1,\n target_update_period=1,\n plotter=None,\n render_eval_paths=False,\n eval_deterministic=True,\n\n eval_policy=None,\n exploration_policy=None,\n\n use_automatic_entropy_tuning=True,\n target_entropy=None,\n fixed_entropy=0.1,\n target_entropy_multiplier=1.0,\n **kwargs\n ):\n if eval_policy is None:\n if eval_deterministic:\n eval_policy = MakeDeterministic(policy)\n else:\n eval_policy = policy\n super().__init__(\n env=env,\n exploration_policy=exploration_policy or policy,\n eval_policy=eval_policy,\n **kwargs\n )\n self.policy = policy\n self.qf1 = qf1\n self.qf2 = qf2\n self.vf = vf\n self.soft_target_tau = soft_target_tau\n self.policy_update_period = policy_update_period\n self.target_update_period = target_update_period\n self.policy_mean_reg_weight = policy_mean_reg_weight\n self.policy_std_reg_weight = policy_std_reg_weight\n self.policy_pre_activation_weight = policy_pre_activation_weight\n self.train_policy_with_reparameterization = (\n train_policy_with_reparameterization\n )\n self.target_entropy_multiplier = target_entropy_multiplier\n\n self.use_automatic_entropy_tuning = use_automatic_entropy_tuning\n # import pdb; pdb.set_trace()\n if self.use_automatic_entropy_tuning:\n if target_entropy:\n self.target_entropy = target_entropy\n else:\n self.target_entropy = - self.target_entropy_multiplier * np.prod(self.env.action_space.shape).item() # heuristic value from Tuomas\n self.log_alpha = ptu.zeros(1, requires_grad=True)\n self.alpha_optimizer = optimizer_class(\n [self.log_alpha],\n lr=policy_lr,\n )\n else:\n self.fixed_entropy = fixed_entropy\n\n self.plotter = plotter\n self.render_eval_paths = render_eval_paths\n\n self.target_vf = vf.copy()\n self.qf_criterion = torch.nn.MSELoss()\n self.vf_criterion = torch.nn.MSELoss()\n\n self.policy_optimizer = optimizer_class(\n self.policy.parameters(),\n lr=policy_lr,\n )\n self.qf1_optimizer = optimizer_class(\n self.qf1.parameters(),\n lr=qf_lr,\n )\n self.qf2_optimizer = optimizer_class(\n self.qf2.parameters(),\n lr=qf_lr,\n )\n self.vf_optimizer = optimizer_class(\n self.vf.parameters(),\n lr=vf_lr,\n )\n\n def _do_training(self):\n batch = self.get_batch()\n rewards = batch['rewards']\n terminals = batch['terminals']\n obs = batch['observations']\n actions = batch['actions']\n next_obs = batch['next_observations']\n\n q1_pred = self.qf1(obs, actions)\n q2_pred = self.qf2(obs, actions)\n v_pred = self.vf(obs)\n # Make sure policy accounts for squashing functions like tanh correctly!\n policy_outputs = self.policy(obs,\n reparameterize=self.train_policy_with_reparameterization,\n return_log_prob=True)\n new_actions, policy_mean, policy_log_std, log_pi = policy_outputs[:4]\n\n \"\"\"\n Alpha Loss (if applicable)\n \"\"\"\n if self.use_automatic_entropy_tuning:\n \"\"\"\n Alpha Loss\n \"\"\"\n alpha_loss = -(self.log_alpha * (log_pi + self.target_entropy).detach()).mean()\n self.alpha_optimizer.zero_grad()\n alpha_loss.backward()\n self.alpha_optimizer.step()\n alpha = self.log_alpha.exp()\n else:\n alpha = self.fixed_entropy\n alpha_loss = 0\n\n \"\"\"\n QF Loss\n \"\"\"\n target_v_values = self.target_vf(next_obs)\n q_target = self.reward_scale * rewards.squeeze_().unsqueeze_(-1) + (1. - terminals) * self.discount * target_v_values\n qf1_loss = self.qf_criterion(q1_pred, q_target.detach())\n qf2_loss = self.qf_criterion(q2_pred, q_target.detach())\n\n \"\"\"\n VF Loss\n \"\"\"\n q_new_actions = torch.min(\n self.qf1(obs, new_actions),\n self.qf2(obs, new_actions),\n )\n v_target = q_new_actions - alpha*log_pi\n vf_loss = self.vf_criterion(v_pred, v_target.detach())\n\n \"\"\"\n Update networks\n \"\"\"\n self.qf1_optimizer.zero_grad()\n qf1_loss.backward()\n self.qf1_optimizer.step()\n\n self.qf2_optimizer.zero_grad()\n qf2_loss.backward()\n self.qf2_optimizer.step()\n\n self.vf_optimizer.zero_grad()\n vf_loss.backward()\n self.vf_optimizer.step()\n\n policy_loss = None\n if self._n_train_steps_total % self.policy_update_period == 0:\n \"\"\"\n Policy Loss\n \"\"\"\n if self.train_policy_with_reparameterization:\n policy_loss = (alpha*log_pi - q_new_actions).mean()\n else:\n log_policy_target = q_new_actions - v_pred\n policy_loss = (\n log_pi * (alpha*log_pi - log_policy_target).detach()\n ).mean()\n mean_reg_loss = self.policy_mean_reg_weight * (policy_mean**2).mean()\n std_reg_loss = self.policy_std_reg_weight * (policy_log_std**2).mean()\n pre_tanh_value = policy_outputs[-1]\n pre_activation_reg_loss = self.policy_pre_activation_weight * (\n (pre_tanh_value**2).sum(dim=1).mean()\n )\n policy_reg_loss = mean_reg_loss + std_reg_loss + pre_activation_reg_loss\n policy_loss = policy_loss + policy_reg_loss\n\n self.policy_optimizer.zero_grad()\n policy_loss.backward()\n self.policy_optimizer.step()\n\n if self._n_train_steps_total % self.target_update_period == 0:\n ptu.soft_update_from_to(\n self.vf, self.target_vf, self.soft_target_tau\n )\n\n \"\"\"\n Save some statistics for eval using just one batch.\n \"\"\"\n if self.need_to_update_eval_statistics:\n self.need_to_update_eval_statistics = False\n if policy_loss is None:\n if self.train_policy_with_reparameterization:\n policy_loss = (log_pi - q_new_actions).mean()\n else:\n log_policy_target = q_new_actions - v_pred\n policy_loss = (\n log_pi * (log_pi - log_policy_target).detach()\n ).mean()\n\n mean_reg_loss = self.policy_mean_reg_weight * (policy_mean**2).mean()\n std_reg_loss = self.policy_std_reg_weight * (policy_log_std**2).mean()\n pre_tanh_value = policy_outputs[-1]\n pre_activation_reg_loss = self.policy_pre_activation_weight * (\n (pre_tanh_value**2).sum(dim=1).mean()\n )\n policy_reg_loss = mean_reg_loss + std_reg_loss + pre_activation_reg_loss\n policy_loss = policy_loss + policy_reg_loss\n\n self.eval_statistics['QF1 Loss'] = np.mean(ptu.get_numpy(qf1_loss))\n self.eval_statistics['QF2 Loss'] = np.mean(ptu.get_numpy(qf2_loss))\n self.eval_statistics['VF Loss'] = np.mean(ptu.get_numpy(vf_loss))\n self.eval_statistics['Policy Loss'] = np.mean(ptu.get_numpy(\n policy_loss\n ))\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Q1 Predictions',\n ptu.get_numpy(q1_pred),\n ))\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Q2 Predictions',\n ptu.get_numpy(q2_pred),\n ))\n self.eval_statistics.update(create_stats_ordered_dict(\n 'V Predictions',\n ptu.get_numpy(v_pred),\n ))\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Log Pis',\n ptu.get_numpy(log_pi),\n ))\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Policy mu',\n ptu.get_numpy(policy_mean),\n ))\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Policy log std',\n ptu.get_numpy(policy_log_std),\n ))\n if self.use_automatic_entropy_tuning:\n self.eval_statistics['Alpha'] = alpha.item()\n self.eval_statistics['Alpha Loss'] = alpha_loss.item()\n\n @property\n def networks(self):\n return [\n self.policy,\n self.qf1,\n self.qf2,\n self.vf,\n self.target_vf,\n ]\n\n def get_epoch_snapshot(self, epoch):\n snapshot = super().get_epoch_snapshot(epoch)\n snapshot['qf1'] = self.qf1\n snapshot['qf2'] = self.qf2\n snapshot['policy'] = self.policy\n snapshot['vf'] = self.vf\n snapshot['target_vf'] = self.target_vf\n return snapshot\n" ]
[ [ "torch.nn.MSELoss", "numpy.prod" ], [ "torch.nn.MSELoss", "numpy.prod" ] ]
epapoutsellis/CCPi-Dynamic
[ "d108cd7e9652992001a116347941eda7e75b3301" ]
[ "Wrappers/Python/test/test_DataProcessor.py" ]
[ "import sys\r\nimport unittest\r\nimport numpy\r\nfrom ccpi.framework import DataProcessor\r\nfrom ccpi.framework import DataContainer\r\nfrom ccpi.framework import ImageData\r\nfrom ccpi.framework import AcquisitionData\r\nfrom ccpi.framework import ImageGeometry\r\nfrom ccpi.framework import AcquisitionGeometry\r\nfrom timeit import default_timer as timer\r\n\r\nfrom ccpi.framework import AX, CastDataContainer, PixelByPixelDataProcessor\r\n\r\nclass TestDataProcessor(unittest.TestCase):\r\n\r\n def test_DataProcessorChaining(self):\r\n shape = (2,3,4,5)\r\n size = shape[0]\r\n for i in range(1, len(shape)):\r\n size = size * shape[i]\r\n #print(\"a refcount \" , sys.getrefcount(a))\r\n a = numpy.asarray([i for i in range( size )])\r\n a = numpy.reshape(a, shape)\r\n ds = DataContainer(a, False, ['X', 'Y','Z' ,'W'])\r\n c = ds.subset(['Z','W','X'])\r\n arr = c.as_array()\r\n #[ 0 60 1 61 2 62 3 63 4 64 5 65 6 66 7 67 8 68 9 69 10 70 11 71\r\n # 12 72 13 73 14 74 15 75 16 76 17 77 18 78 19 79]\r\n \r\n ax = AX()\r\n ax.scalar = 2\r\n ax.set_input(c)\r\n #ax.apply()\r\n print (\"ax in {0} out {1}\".format(c.as_array().flatten(),\r\n ax.get_output().as_array().flatten()))\r\n numpy.testing.assert_array_equal(ax.get_output().as_array(), arr*2)\r\n \r\n cast = CastDataContainer(dtype=numpy.float32)\r\n cast.set_input(c)\r\n out = cast.get_output()\r\n self.assertTrue(out.as_array().dtype == numpy.float32)\r\n out *= 0 \r\n axm = AX()\r\n axm.scalar = 0.5\r\n axm.set_input(c)\r\n axm.get_output(out)\r\n numpy.testing.assert_array_equal(out.as_array(), arr*0.5)\r\n \r\n # check out in DataSetProcessor\r\n #a = numpy.asarray([i for i in range( size )])\r\n \r\n # create a PixelByPixelDataProcessor\r\n \r\n #define a python function which will take only one input (the pixel value)\r\n pyfunc = lambda x: -x if x > 20 else x\r\n clip = PixelByPixelDataProcessor()\r\n clip.pyfunc = pyfunc \r\n clip.set_input(c) \r\n #clip.apply()\r\n v = clip.get_output().as_array()\r\n \r\n self.assertTrue(v.max() == 19)\r\n self.assertTrue(v.min() == -79)\r\n \r\n print (\"clip in {0} out {1}\".format(c.as_array(), clip.get_output().as_array()))\r\n \r\n #dsp = DataProcessor()\r\n #dsp.set_input(ds)\r\n #dsp.input = a\r\n # pipeline\r\n \r\n chain = AX()\r\n chain.scalar = 0.5\r\n chain.set_input_processor(ax)\r\n print (\"chain in {0} out {1}\".format(ax.get_output().as_array(), chain.get_output().as_array()))\r\n numpy.testing.assert_array_equal(chain.get_output().as_array(), arr)" ]
[ [ "numpy.reshape" ] ]
ebaals/Thesis
[ "988bba5626b01599b6414a6a196ba22371a4fb89" ]
[ "sharppy/sharptab/watch_type.py" ]
[ "from sharppy.sharptab import thermo, utils, interp, params, constants\nimport numpy as np\n\n## Routines implemented in Python by Greg Blumberg - CIMMS and Kelton Halbert (OU SoM)\n## [email protected], [email protected], [email protected], [email protected]\n\ndef wind_chill(prof):\n '''\n Surface Wind Chill Equation\n\n Computes wind chill at the surface data point in the profile object\n using the equation found at:\n\n www.nws.noaa.gov/os/windchill/index.shtml\n\n Parameters\n ----------\n prof : Profile object\n\n Returns\n -------\n wind_chill : wind chill value in (F)\n '''\n # Needs to be tested\n\n sfc_temp = thermo.ctof(prof.tmpc[prof.get_sfc()])\n sfc_wspd = utils.KTS2MPH(prof.wspd[prof.get_sfc()])\n\n wind_chill = 35.74 + (0.6215*sfc_temp) - (35.75*(sfc_wspd**0.16)) + \\\n 0.4275 * (sfc_temp) * (sfc_wspd**0.16)\n return wind_chill\n\ndef init_phase(prof):\n '''\n Inital Precipitation Phase\n Adapted from SHARP code donated by Rich Thompson (SPC)\n\n This function determines the initial phase of any precipitation source in the profile.\n It does this either by finding a source of precipitation by searching for the highest 50 mb \n layer that has a relative humidity greater than 80 percent at the top and the bottom\n of the layer. This layer may be found either in the lowest 5 km of the profile, and if\n an OMEG profile is specified in the profile object, it will search for the layers with\n upward motion.\n\n The precipitation type is determined by using a.) the interpolated temperature in the middle\n of the precipitation source layer and b.) set temperature thresholds to determine the \n precipitation type. The type may be \"Rain\", \"Freezing Rain\", \"ZR/S Mix\", or \"Snow\".\n\n Parameters\n ----------\n prof : Profile object (omega profile optional)\n\n Returns\n -------\n plevel : the pressure level of the precipitation source (mb)\n phase : the phase type of the precipitation (int)\n phase == 0 for \"Rain\"\n phase == 1 for \"Freezing Rain\" or \"ZR/S Mix\"\n phase == 3 for \"Snow\"\n tmp : the temperature at the level that is the precipitation source\n st : a string naming the precipitation type\n\n '''\n # Needs to be tested\n\n plevel = 0\n phase = -1\n\n # First, determine whether Upward VVELS are available. If they are, \n # use them to determine level where precipitation will develop.\n avail = np.ma.where(prof.omeg < .1)[0]\n\n hght_agl = interp.to_agl(prof, prof.hght)\n if len(avail) < 5:\n # No VVELS...must look for saturated level \n # Find the highest near-saturated 50mb layer below 5km agl\n below_5km_idx = np.ma.where((hght_agl < 5000.) &\\\n (hght_agl >= 0))[0]\n\n else:\n # Use the VV to find the source of precip.\n below_5km_idx = np.ma.where((hght_agl < 5000.) &\\\n (hght_agl >= 0) &\\\n (prof.omeg <= 0))[0]\n\n # Compute the RH at the top and bottom of 50 mb layers\n rh = thermo.relh(prof.pres, prof.tmpc, prof.dwpc)[below_5km_idx]\n sats = np.ma.where(rh > 80)[0]\n new_pres = prof.pres[below_5km_idx][sats] + 50.\n new_temp = interp.temp(prof, new_pres)\n new_dwpt = interp.dwpt(prof, new_pres)\n rh_plus50 = thermo.relh(new_pres, new_temp, new_dwpt)\n # Find layers where the RH is >80% at the top and bottom\n layers_idx = np.ma.where(rh_plus50 > 80)[0]\n\n if len(layers_idx) == 0:\n # Found no precipitation source layers\n st = \"N/A\"\n return prof.missing, phase, prof.missing, st\n\n # Find the highest layer up via the largest index\n top_most_layer = np.ma.max(layers_idx)\n plevel = new_pres[top_most_layer] - 25.\n\n # Determine the initial precip type based on the temp in the layer\n tmp = interp.temp(prof, plevel)\n if tmp > 0:\n phase = 0\n st = \"Rain\"\n elif tmp <= 0 and tmp > -5:\n phase = 1\n st = \"Freezing Rain\"\n elif tmp <=-5 and tmp > -9:\n phase = 1\n st = \"ZR/S Mix\"\n elif tmp <= -9:\n phase = 3\n st = \"Snow\"\n else:\n st = \"N/A\"\n\n return plevel, phase, tmp, st\n\ndef posneg_temperature(prof, start=-1):\n '''\n Positive/Negative Temperature profile\n Adapted from SHARP code donated by Rich Thompson (SPC)\n\n Description:\n This routine calculates the positive (above 0 C) and negative (below 0 C)\n areas of the temperature profile starting from a specified pressure (start).\n If the specified pressure is not given, this routine calls init_phase()\n to obtain the pressure level the precipitation expected to fall begins at.\n\n This is an routine considers only the temperature profile as opposed to the wet-bulb\n profile.\n\n Parameters\n ----------\n prof : Profile object\n start : the pressure level the precpitation originates from (found by calling init_phase())\n\n Returns\n -------\n pos : the positive area (> 0 C) of the temperature profile in J/kg\n neg : the negative area (< 0 C) of the temperature profile in J/kg\n top : the top of the precipitation layer pressure in mb\n bot : the bottom of the precipitation layer pressure in mb\n\n '''\n # Needs to be tested\n \n # If there is no sounding, don't compute anything\n if utils.QC(interp.temp(prof, 500)) == False and utils.QC(interp.temp(prof, 850)) == False:\n return np.ma.masked, np.ma.masked, np.ma.masked, np.ma.masked\n\n # Find lowest obs in layer\n lower = prof.pres[prof.get_sfc()]\n lptr = prof.get_sfc()\n\n # Find the highest obs in the layer\n if start == -1:\n lvl, phase, st = init_phase(prof)\n if lvl > 0:\n upper = lvl\n else:\n upper = 500.\n else:\n upper = start\n\n # Find the level where the pressure is just greater than the upper pressure\n idxs = np.where(prof.pres > upper)[0]\n if len(idxs) == 0:\n uptr = 0\n else:\n uptr = idxs[-1]\n\n # Start with the top layer\n pe1 = upper;\n h1 = interp.hght(prof, pe1)\n te1 = interp.temp(prof, pe1)\n tp1 = 0\n\n warmlayer = coldlayer = lyre = totp = totn = tote = ptop = pbot = lyrlast = 0\n\n for i in np.arange(uptr, lptr-1, -1):\n pe2 = prof.pres[i]\n h2 = prof.hght[i]\n te2 = interp.temp(prof, pe2)\n tp2 = 0\n tdef1 = (0 - te1) / thermo.ctok(te1);\n tdef2 = (0 - te2) / thermo.ctok(te2);\n lyrlast = lyre;\n lyre = 9.8 * (tdef1 + tdef2) / 2.0 * (h2 - h1);\n\n # Has a warm layer been found yet?\n if te2 > 0:\n if warmlayer == 0:\n warmlayer = 1\n ptop = pe2\n\n # Has a cold layer been found yet?\n if te2 < 0:\n if warmlayer == 1 and coldlayer == 0:\n coldlayer = 1\n pbot = pe2\n\n if warmlayer > 0:\n if lyre > 0:\n totp += lyre\n else:\n totn += lyre\n tote += lyre\n\n pelast = pe1\n pe1 = pe2\n h1 = h2\n te1 = te2\n tp1 = tp2\n \n if warmlayer == 1 and coldlayer == 1:\n pos = totp\n neg = totn\n top = ptop\n bot = pbot\n else:\n neg = 0\n pos = 0\n bot = 0\n top = 0\n\n return pos, neg, top, bot\n\n\ndef posneg_wetbulb(prof, start=-1):\n '''\n Positive/Negative Wetbulb profile\n Adapted from SHARP code donated by Rich Thompson (SPC)\n\n Description:\n This routine calculates the positive (above 0 C) and negative (below 0 C)\n areas of the wet bulb profile starting from a specified pressure (start).\n If the specified pressure is not given, this routine calls init_phase()\n to obtain the pressure level the precipitation expected to fall begins at.\n\n This is an routine considers the wet-bulb profile instead of the temperature profile\n in case the profile beneath the profile beneath the falling precipitation becomes saturated.\n\n Parameters\n ----------\n prof : Profile object\n start : the pressure level the precpitation originates from (found by calling init_phase())\n\n Returns\n -------\n pos : the positive area (> 0 C) of the wet-bulb profile in J/kg\n neg : the negative area (< 0 C) of the wet-bulb profile in J/kg\n top : the top of the precipitation layer pressure in mb\n bot : the bottom of the precipitation layer pressure in mb\n\n '''\n # Needs to be tested\n\n # If there is no sounding, don't compute anything\n if utils.QC(interp.temp(prof, 500)) == False and utils.QC(interp.temp(prof, 850)) == False:\n return np.ma.masked, np.ma.masked, np.ma.masked, np.ma.masked\n\n # Find lowest obs in layer\n lower = prof.pres[prof.get_sfc()]\n lptr = prof.get_sfc()\n\n # Find the highest obs in the layer\n if start == -1:\n lvl, phase, st = init_phase(prof)\n if lvl > 0:\n upper = lvl\n else:\n upper = 500.\n else:\n upper = start\n\n # Find the level where the pressure is just greater than the upper pressure\n idxs = np.where(prof.pres > upper)[0]\n if len(idxs) == 0:\n uptr = 0\n else:\n uptr = idxs[-1]\n\n # Start with the upper layer\n pe1 = upper;\n h1 = interp.hght(prof, pe1);\n te1 = thermo.wetbulb(pe1, interp.temp(prof, pe1), interp.dwpt(prof, pe1))\n tp1 = 0\n\n warmlayer = coldlayer = lyre = totp = totn = tote = ptop = pbot = lyrlast = 0\n\n for i in np.arange(uptr, lptr-1, -1):\n pe2 = prof.pres[i]\n h2 = prof.hght[i]\n te2 = thermo.wetbulb(pe2, interp.temp(prof, pe2), interp.dwpt(prof, pe2))\n tp2 = 0\n tdef1 = (0 - te1) / thermo.ctok(te1);\n tdef2 = (0 - te2) / thermo.ctok(te2);\n lyrlast = lyre;\n lyre = 9.8 * (tdef1 + tdef2) / 2.0 * (h2 - h1);\n\n # Has a warm layer been found yet?\n if te2 > 0:\n if warmlayer == 0:\n warmlayer = 1\n ptop = pe2\n\n # Has a cold layer been found yet?\n if te2 < 0:\n if warmlayer == 1 and coldlayer == 0:\n coldlayer = 1\n pbot = pe2\n\n if warmlayer > 0:\n if lyre > 0:\n totp += lyre\n else:\n totn += lyre\n tote += lyre\n\n pelast = pe1\n pe1 = pe2\n h1 = h2\n te1 = te2\n tp1 = tp2\n \n if warmlayer == 1 and coldlayer == 1:\n pos = totp\n neg = totn\n top = ptop\n bot = pbot\n else:\n neg = 0\n pos = 0\n bot = 0\n top = 0\n\n return pos, neg, top, bot\n\ndef best_guess_precip(prof, init_phase, init_lvl, init_temp, tpos, tneg):\n '''\n Best Guess Precipitation type\n Adapted from SHARP code donated by Rich Thompson (SPC)\n\n Description:\n This algorithm utilizes the output from the init_phase() and posneg_temperature()\n functions to make a best guess at the preciptation type one would observe\n at the surface given a thermodynamic profile.\n\n Precipitation Types Supported:\n - None\n - Rain\n - Snow\n - Sleet and Snow\n - Sleet\n - Freezing Rain/Drizzle\n - Unknown\n\n Parameters\n ----------\n prof : Profile object\n init_phase : the initial phase of the precipitation (int) (see 2nd value returned from init_phase())\n init_lvl : the inital level of the precipitation source (mb) (see 1st value returned from init_phase())\n init_temp : the inital level of the precipitation source (C) (see 3rd value returned from init_phase())\n tpos : the positive area (> 0 C) in the temperature profile (J/kg)\n\n Returns\n -------\n precip_type : a string containing the best guess precipitation type\n '''\n # Needs to be tested\n\n precip_type = None\n\n # Case: No precip\n if init_phase < 0:\n precip_type = \"None.\"\n\n # Case: Always too warm - Rain\n elif init_phase == 0 and tneg >=0 and prof.tmpc[prof.get_sfc()] > 0:\n precip_type = \"Rain.\"\n\n # Case: always too cold\n elif init_phase == 3 and tpos <= 0 and prof.tmpc[prof.get_sfc()] <= 0:\n precip_type = \"Snow.\"\n\n # Case: ZR too warm at sfc - Rain\n elif init_phase == 1 and tpos <= 0 and prof.tmpc[prof.get_sfc()] > 0:\n precip_type = \"Rain.\"\n\n # Case: non-snow init...always too cold - Initphase & sleet\n elif init_phase == 1 and tpos <= 0 and prof.tmpc[prof.get_sfc()] <= 0:\n #print interp.to_agl(prof, interp.hght(prof, init_lvl))\n if interp.to_agl(prof, interp.hght(prof, init_lvl)) >= 3000:\n if init_temp <= -4:\n precip_type = \"Sleet and Snow.\"\n else:\n precip_type = \"Sleet.\"\n else:\n precip_type = \"Freezing Rain/Drizzle.\"\n\n # Case: Snow...but warm at sfc\n elif init_phase == 3 and tpos <= 0 and prof.tmpc[prof.get_sfc()] > 0:\n if prof.tmpc[prof.get_sfc()] > 4:\n precip_type = \"Rain.\"\n else:\n precip_type = \"Snow.\"\n \n # Case: Warm layer.\n elif tpos > 0:\n x1 = tpos\n y1 = -tneg\n y2 = (0.62 * x1) + 60.0\n if y1 > y2:\n precip_type = \"Sleet.\"\n else:\n if prof.tmpc[prof.get_sfc()] <= 0:\n precip_type = \"Freezing Rain.\"\n else:\n precip_type = \"Rain.\"\n else:\n precip_type = \"Unknown.\"\n\n return precip_type\n\ndef precip_type(prof):\n '''\n OLD PROPOSED FUNCTION\n '''\n #\n # This function looks at the current SHARPPY profile (prof)\n # and makes a single guess of the precipitation type associated with\n # that profile.\n #\n # it would be nice to produce probabilites of the preciptation type using\n # different methods, but it's 12 AM now.\n #\n # it would also be nice to have BUFKIT's precpitation intensity and type algorithm\n\n # Step 1: Check for ice in a cloud (is there a cloud with temps of -10 to -18 C?)\n\n # if no ice in cloud, check surface temp\n # if surface temp > 0 C, it's rain\n # if surface temp < 0 C, it's freezing rain\n\n # if there is ice in the cloud, what are the temperatures below it?\n # if the temperature below is less than 0.5 C, it's snow, but ony if T_w <= 0 C\n # otherwise if T_w > 0 C in the lowest 100 meters, and sfc T_w > 33 F, it's rain\n\n # if the temperatures below the ice cloud are between 0.5 to 3 C, there will be melting\n # if T_w or T are <= 0C, it's a mix (if warm layer is near 1 C) or sleet ( if warm layer is near 3 C)\n # if T_w >= 0 C in lowest 100 m and T_w > 33F, it's rain or drizzle\n\n # if the temperatures below the ice cloud are > 3 C, there's total melting\n # if minimum cold layer temp is > -12 C and sfc_T <= 0 C, it's freezing rain\n # if minimum cold layer temp is > -12 C and sfc_T > 0 C, it's rain.\n # if minimum cold layer temp is < -12 C and sfc_T_w < 33 F, it's snow and sleet\n return\n\ndef possible_watch(prof):\n '''\n Possible Weather/Hazard/Watch Type\n \n This function generates a list of possible significant weather types\n one can expect given a Profile object. (Currently works only for ConvectiveProfile.)\n\n These possible weather types are computed via fuzzy logic through set thresholds that\n have been found through a.) analyzing ingredients within the profile and b.) combining those ingredients\n with forecasting experience to produce a suggestion of what hazards may exist. Some of the logic is \n based on experience, some of it is based on actual National Weather Service criteria.\n\n This function has not been formally verified and is not meant to be comprehensive nor\n a source of strict guidance for weather forecasters. As always, the raw data is to be \n consulted.\n\n This code base is currently under development.\n\n Wx Categories (ranked in terms of severity):\n - PDS TOR\n - TOR\n - MRGL TOR\n - SVR\n - MRGL SVR\n - FLASH FLOOD\n - BLIZZARD\n - WINTER STORM\n - WIND CHILL\n - FIRE WEATHER\n - EXCESSIVE HEAT\n - FREEZE\n \n Suggestions for severe/tornado thresholds were contributed by Rich Thompson - NOAA Storm Prediction Center\n\n Parameters\n ----------\n prof : ConvectiveProfile object\n\n Returns\n -------\n watch_types : a list of strings containing the weather types in code\n colors : a list of the HEX colors corresponding to each weather type\n '''\n \n watch_types = []\n colors = []\n \n lr1 = params.lapse_rate( prof, 0, 1000, pres=False )\n stp_eff = prof.stp_cin\n stp_fixed = prof.stp_fixed\n srw_4_6km = utils.mag(prof.srw_4_6km[0],prof.srw_4_6km[1])\n sfc_8km_shear = utils.mag(prof.sfc_8km_shear[0],prof.sfc_8km_shear[1])\n right_esrh = prof.right_esrh[0]\n srh1km = prof.srh1km[0]\n if stp_eff >= 3 and stp_fixed >= 3 and srh1km >= 200 and right_esrh >= 200 and srw_4_6km >= 15.0 and \\\n sfc_8km_shear > 45.0 and prof.sfcpcl.lclhght < 1000. and prof.mlpcl.lclhght < 1200 and lr1 >= 5.0 and \\\n prof.mlpcl.bminus > -50 and prof.ebotm == 0:\n watch_types.append(\"PDS TOR\")\n colors.append(constants.MAGENTA)\n elif (stp_eff >= 3 or stp_fixed >= 4) and prof.mlpcl.bminus > -125. and prof.ebotm == 0:\n watch_types.append(\"TOR\")\n colors.append(\"#FF0000\")\n elif (stp_eff >= 1 or stp_fixed >= 1) and (srw_4_6km >= 15.0 or sfc_8km_shear >= 40) and \\\n prof.mlpcl.bminus > -50 and prof.ebotm == 0:\n watch_types.append(\"TOR\")\n colors.append(\"#FF0000\")\n elif (stp_eff >= 1 or stp_fixed >= 1) and ((prof.low_rh + prof.mid_rh)/2. >= 60) and lr1 >= 5.0 and \\\n prof.mlpcl.bminus > -50 and prof.ebotm == 0:\n watch_types.append(\"TOR\")\n colors.append(\"#FF0000\")\n elif (stp_eff >= 1 or stp_fixed >= 1) and prof.mlpcl.bminus > -150 and prof.ebotm == 0.:\n watch_types.append(\"MRGL TOR\")\n colors.append(\"#FF0000\")\n elif (stp_eff >= 0.5 and prof.right_esrh >= 150) or (stp_fixed >= 0.5 and srh1km >= 150) and \\\n prof.mlpcl.bminus > -50 and prof.ebotm == 0.:\n watch_types.append(\"MRGL TOR\")\n colors.append(\"#FF0000\")\n\n #SVR LOGIC\n if (stp_fixed >= 1.0 or prof.right_scp >= 4.0 or stp_eff >= 1.0) and prof.mupcl.bminus >= -50:\n colors.append(\"#FFFF00\")\n watch_types.append(\"SVR\")\n elif prof.right_scp >= 2.0 and (prof.ship >= 1.0 or prof.dcape >= 750) and prof.mupcl.bminus >= -50:\n colors.append(\"#FFFF00\")\n watch_types.append(\"SVR\")\n elif prof.sig_severe >= 30000 and prof.mmp >= 0.6 and prof.mupcl.bminus >= -50:\n colors.append(\"#FFFF00\")\n watch_types.append(\"SVR\")\n elif prof.mupcl.bminus >= -75.0 and (prof.wndg >= 0.5 or prof.ship >= 0.5 or prof.right_scp >= 0.5):\n colors.append(\"#0099CC\")\n watch_types.append(\"MRGL SVR\")\n \n # Flash Flood Watch PWV is larger than normal and cloud layer mean wind speeds are slow\n # This is trying to capture the ingredients of moisture and advection speed, but cannot\n # handle precipitation efficiency or vertical motion\n pw_climo_flag = prof.pwv_flag\n pwat = prof.pwat\n upshear = utils.comp2vec(prof.upshear_downshear[0],prof.upshear_downshear[1])\n if pw_climo_flag >= 2 and upshear[1] < 25:\n watch_types.append(\"FLASH FLOOD\")\n colors.append(\"#5FFB17\")\n #elif pwat > 1.3 and upshear[1] < 25:\n # watch_types.append(\"FLASH FLOOD\")\n # colors.append(\"#5FFB17\")\n \n # Blizzard if sfc winds > 35 mph and precip type detects snow \n # Still needs to be tied into the \n sfc_wspd = utils.KTS2MPH(prof.wspd[prof.get_sfc()])\n if sfc_wspd > 35. and prof.tmpc[prof.get_sfc()] <= 0:\n watch_types.append(\"BLIZZARD\")\n colors.append(\"#3366FF\")\n \n # Wind Chill (if wind chill gets below -20 F)\n if wind_chill(prof) < -20.:\n watch_types.append(\"WIND CHILL\")\n colors.append(\"#3366FF\")\n \n # Fire WX (sfc RH < 30% and sfc_wind speed > 15 mph) (needs to be updated to include SPC Fire Wx Indices)\n if sfc_wspd > 15. and thermo.relh(prof.pres[prof.get_sfc()], prof.tmpc[prof.get_sfc()], prof.dwpc[prof.get_sfc()]) < 30. :\n watch_types.append(\"FIRE WEATHER\")\n colors.append(\"#FF9900\")\n \n # Excessive Heat (if Max_temp > 105 F and sfc dewpoint > 75 F)\n if thermo.ctof(prof.dwpc[prof.get_sfc()]) > 75. and thermo.ctof(params.max_temp(prof)) >= 105.:\n watch_types.append(\"EXCESSIVE HEAT\")\n colors.append(\"#CC33CC\")\n \n # Freeze (checks to see if wetbulb is below freezing and temperature isn't and wind speeds are low)\n # Still in testing.\n if thermo.ctof(prof.dwpc[prof.get_sfc()]) <= 32. and thermo.ctof(prof.wetbulb[prof.get_sfc()]) <= 32 and prof.wspd[prof.get_sfc()] < 5.:\n watch_types.append(\"FREEZE\")\n colors.append(\"#3366FF\")\n \n watch_types.append(\"NONE\")\n colors.append(\"#FFCC33\")\n \n return np.asarray(watch_types), np.asarray(colors)\n\n" ]
[ [ "numpy.asarray", "numpy.arange", "numpy.ma.where", "numpy.ma.max", "numpy.where" ] ]
tmct/statsmodels
[ "305258f5245e76409f6deab24d217ffbbc352ba0" ]
[ "statsmodels/stats/dist_dependence_measures.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\" Distance dependence measure and the dCov test.\n\nImplementation of Székely et al. (2007) calculation of distance\ndependence statistics, including the Distance covariance (dCov) test\nfor independence of random vectors of arbitrary length.\n\nAuthor: Ron Itzikovitch\n\nReferences\n----------\n.. Szekely, G.J., Rizzo, M.L., and Bakirov, N.K. (2007)\n \"Measuring and testing dependence by correlation of distances\".\n Annals of Statistics, Vol. 35 No. 6, pp. 2769-2794.\n\n\"\"\"\nimport numpy as np\nimport warnings\nfrom collections import namedtuple\n\nfrom scipy.spatial.distance import pdist, squareform\nfrom scipy.stats import norm\n\n\nDistDependStat = namedtuple(\n \"DistDependStat\",\n [\"test_statistic\", \"distance_correlation\",\n \"distance_covariance\", \"dvar_x\", \"dvar_y\", \"S\"],\n)\n\n\ndef distance_covariance_test(x, y, B=None, method=\"auto\"):\n r\"\"\"The Distance Covariance (dCov) test\n\n Apply the Distance Covariance (dCov) test of independence to `x` and `y`.\n This test was introduced in [1]_, and is based on the distance covariance\n statistic. The test is applicable to random vectors of arbitrary length\n (see the notes section for more details).\n\n Parameters\n ----------\n x : array_like, 1-D or 2-D\n If `x` is 1-D than it is assumed to be a vector of observations of a\n single random variable. If `x` is 2-D than the rows should be\n observations and the columns are treated as the components of a\n random vector, i.e., each column represents a different component of\n the random vector `x`.\n y : array_like, 1-D or 2-D\n Same as `x`, but only the number of observation has to match that of\n `x`. If `y` is 2-D note that the number of columns of `y` (i.e., the\n number of components in the random vector) does not need to match\n the number of columns in `x`.\n B : int, optional, default=`None`\n The number of iterations to perform when evaluating the null\n distribution of the test statistic when the `emp` method is\n applied (see below). if `B` is `None` than as in [1]_ we set\n `B` to be ``B = 200 + 5000/n``, where `n` is the number of\n observations.\n method : {'auto', 'emp', 'asym'}, optional, default=auto\n The method by which to obtain the p-value for the test.\n\n - `auto` : Default method. The number of observations will be used to\n determine the method.\n - `emp` : Empirical evaluation of the p-value using permutations of\n the rows of `y` to obtain the null distribution.\n - `asym` : An asymptotic approximation of the distribution of the test\n statistic is used to find the p-value.\n\n Returns\n -------\n test_statistic : float\n The value of the test statistic used in the test.\n pval : float\n The p-value.\n chosen_method : str\n The method that was used to obtain the p-value. Mostly relevant when\n the function is called with `method='auto'`.\n\n Notes\n -----\n The test applies to random vectors of arbitrary dimensions, i.e., `x`\n can be a 1-D vector of observations for a single random variable while\n `y` can be a `k` by `n` 2-D array (where `k > 1`). In other words, it\n is also possible for `x` and `y` to both be 2-D arrays and have the\n same number of rows (observations) while differing in the number of\n columns.\n\n As noted in [1]_ the statistics are sensitive to all types of departures\n from independence, including nonlinear or nonmonotone dependence\n structure.\n\n References\n ----------\n .. [1] Szekely, G.J., Rizzo, M.L., and Bakirov, N.K. (2007)\n \"Measuring and testing by correlation of distances\".\n Annals of Statistics, Vol. 35 No. 6, pp. 2769-2794.\n\n Examples\n --------\n >>> from statsmodels.stats.dist_dependence_measures import\n ... distance_covariance_test\n >>> data = np.random.rand(1000, 10)\n >>> x, y = data[:, :3], data[:, 3:]\n >>> x.shape\n (1000, 3)\n >>> y.shape\n (1000, 7)\n >>> distance_covariance_test(x, y)\n (1.0426404792714983, 0.2971148340813543, 'asym')\n # (test_statistic, pval, chosen_method)\n\n \"\"\"\n x, y = _validate_and_tranform_x_and_y(x, y)\n\n n = x.shape[0]\n stats = distance_statistics(x, y)\n\n if method == \"auto\" and n <= 500 or method == \"emp\":\n chosen_method = \"emp\"\n test_statistic, pval = _empirical_pvalue(x, y, B, n, stats)\n\n elif method == \"auto\" and n > 500 or method == \"asym\":\n chosen_method = \"asym\"\n test_statistic, pval = _asymptotic_pvalue(stats)\n\n else:\n raise ValueError(\"Unknown 'method' parameter: {}\".format(method))\n\n # In case we got an extreme p-value (0 or 1) when using the empirical\n # distribution of the test statistic under the null, we fall back\n # to the asymptotic approximation.\n if chosen_method == \"emp\" and pval in [0, 1]:\n msg = (\n \"p-value was {} when using the empirical method. \".format(pval)\n + \"The asymptotic approximation will be used instead\"\n )\n warnings.warn(msg)\n _, pval = _asymptotic_pvalue(stats)\n\n return test_statistic, pval, chosen_method\n\n\ndef _validate_and_tranform_x_and_y(x, y):\n r\"\"\"Ensure `x` and `y` have proper shape and transform/reshape them if\n required.\n\n Parameters\n ----------\n x : array_like, 1-D or 2-D\n If `x` is 1-D than it is assumed to be a vector of observations of a\n single random variable. If `x` is 2-D than the rows should be\n observations and the columns are treated as the components of a\n random vector, i.e., each column represents a different component of\n the random vector `x`.\n y : array_like, 1-D or 2-D\n Same as `x`, but only the number of observation has to match that of\n `x`. If `y` is 2-D note that the number of columns of `y` (i.e., the\n number of components in the random vector) does not need to match\n the number of columns in `x`.\n\n Returns\n -------\n x : array_like, 1-D or 2-D\n y : array_like, 1-D or 2-D\n\n Raises\n ------\n ValueError\n If `x` and `y` have a different number of observations.\n\n \"\"\"\n x = np.asanyarray(x)\n y = np.asanyarray(y)\n\n if x.shape[0] != y.shape[0]:\n raise ValueError(\n \"x and y must have the same number of observations (rows).\"\n )\n\n if len(x.shape) == 1:\n x = x.reshape((x.shape[0], 1))\n\n if len(y.shape) == 1:\n y = y.reshape((y.shape[0], 1))\n\n return x, y\n\n\ndef _empirical_pvalue(x, y, B, n, stats):\n r\"\"\"Calculate the empirical p-value based on permutations of `y`'s rows\n\n Parameters\n ----------\n x : array_like, 1-D or 2-D\n If `x` is 1-D than it is assumed to be a vector of observations of a\n single random variable. If `x` is 2-D than the rows should be\n observations and the columns are treated as the components of a\n random vector, i.e., each column represents a different component of\n the random vector `x`.\n y : array_like, 1-D or 2-D\n Same as `x`, but only the number of observation has to match that of\n `x`. If `y` is 2-D note that the number of columns of `y` (i.e., the\n number of components in the random vector) does not need to match\n the number of columns in `x`.\n B : int\n The number of iterations when evaluating the null distribution.\n n : Number of observations found in each of `x` and `y`.\n stats: namedtuple\n The result obtained from calling ``distance_statistics(x, y)``.\n\n Returns\n -------\n test_statistic : float\n The empirical test statistic.\n pval : float\n The empirical p-value.\n\n \"\"\"\n B = int(B) if B else int(np.floor(200 + 5000 / n))\n empirical_dist = _get_test_statistic_distribution(x, y, B)\n pval = 1 - np.searchsorted(\n sorted(empirical_dist), stats.test_statistic\n ) / len(empirical_dist)\n test_statistic = stats.test_statistic\n\n return test_statistic, pval\n\n\ndef _asymptotic_pvalue(stats):\n r\"\"\"Calculate the p-value based on an approximation of the distribution of\n the test statistic under the null.\n\n Parameters\n ----------\n stats: namedtuple\n The result obtained from calling ``distance_statistics(x, y)``.\n\n Returns\n -------\n test_statistic : float\n The test statistic.\n pval : float\n The asymptotic p-value.\n\n \"\"\"\n test_statistic = np.sqrt(stats.test_statistic / stats.S)\n pval = (1 - norm.cdf(test_statistic)) * 2\n\n return test_statistic, pval\n\n\ndef _get_test_statistic_distribution(x, y, B):\n r\"\"\"\n Parameters\n ----------\n x : array_like, 1-D or 2-D\n If `x` is 1-D than it is assumed to be a vector of observations of a\n single random variable. If `x` is 2-D than the rows should be\n observations and the columns are treated as the components of a\n random vector, i.e., each column represents a different component of\n the random vector `x`.\n y : array_like, 1-D or 2-D\n Same as `x`, but only the number of observation has to match that of\n `x`. If `y` is 2-D note that the number of columns of `y` (i.e., the\n number of components in the random vector) does not need to match\n the number of columns in `x`.\n B : int\n The number of iterations to perform when evaluating the null\n distribution.\n\n Returns\n -------\n emp_dist : array_like\n The empirical distribution of the test statistic.\n\n \"\"\"\n y = y.copy()\n emp_dist = np.zeros(B)\n x_dist = squareform(pdist(x, \"euclidean\"))\n\n for i in range(B):\n np.random.shuffle(y)\n emp_dist[i] = distance_statistics(x, y, x_dist=x_dist).test_statistic\n\n return emp_dist\n\n\ndef distance_statistics(x, y, x_dist=None, y_dist=None):\n r\"\"\"Calculate various distance dependence statistics.\n\n Calculate several distance dependence statistics as described in [1]_.\n\n Parameters\n ----------\n x : array_like, 1-D or 2-D\n If `x` is 1-D than it is assumed to be a vector of observations of a\n single random variable. If `x` is 2-D than the rows should be\n observations and the columns are treated as the components of a\n random vector, i.e., each column represents a different component of\n the random vector `x`.\n y : array_like, 1-D or 2-D\n Same as `x`, but only the number of observation has to match that of\n `x`. If `y` is 2-D note that the number of columns of `y` (i.e., the\n number of components in the random vector) does not need to match\n the number of columns in `x`.\n x_dist : array_like, 2-D, optional\n A square 2-D array_like object whose values are the euclidean\n distances between `x`'s rows.\n y_dist : array_like, 2-D, optional\n A square 2-D array_like object whose values are the euclidean\n distances between `y`'s rows.\n\n Returns\n -------\n namedtuple\n A named tuple of distance dependence statistics (DistDependStat) with\n the following values:\n\n - test_statistic : float - The \"basic\" test statistic (i.e., the one\n used when the `emp` method is chosen when calling\n ``distance_covariance_test()``\n - distance_correlation : float - The distance correlation\n between `x` and `y`.\n - distance_covariance : float - The distance covariance of\n `x` and `y`.\n - dvar_x : float - The distance variance of `x`.\n - dvar_y : float - The distance variance of `y`.\n - S : float - The mean of the euclidean distances in `x` multiplied\n by those of `y`. Mostly used internally.\n\n References\n ----------\n .. [1] Szekely, G.J., Rizzo, M.L., and Bakirov, N.K. (2007)\n \"Measuring and testing dependence by correlation of distances\".\n Annals of Statistics, Vol. 35 No. 6, pp. 2769-2794.\n\n Examples\n --------\n\n >>> from statsmodels.stats.dist_dependence_measures import\n ... distance_statistics\n >>> distance_statistics(np.random.random(1000), np.random.random(1000))\n DistDependStat(test_statistic=0.07948284320205831,\n distance_correlation=0.04269511890990793,\n distance_covariance=0.008915315092696293,\n dvar_x=0.20719027438266704, dvar_y=0.21044934264957588,\n S=0.10892061635588891)\n\n \"\"\"\n x, y = _validate_and_tranform_x_and_y(x, y)\n\n n = x.shape[0]\n\n a = x_dist if x_dist is not None else squareform(pdist(x, \"euclidean\"))\n b = y_dist if y_dist is not None else squareform(pdist(y, \"euclidean\"))\n\n a_row_means = a.mean(axis=0, keepdims=True)\n b_row_means = b.mean(axis=0, keepdims=True)\n a_col_means = a.mean(axis=1, keepdims=True)\n b_col_means = b.mean(axis=1, keepdims=True)\n a_mean = a.mean()\n b_mean = b.mean()\n\n A = a - a_row_means - a_col_means + a_mean\n B = b - b_row_means - b_col_means + b_mean\n\n S = a_mean * b_mean\n dcov = np.sqrt(np.multiply(A, B).mean())\n dvar_x = np.sqrt(np.multiply(A, A).mean())\n dvar_y = np.sqrt(np.multiply(B, B).mean())\n dcor = dcov / np.sqrt(dvar_x * dvar_y)\n\n test_statistic = n * dcov ** 2\n\n return DistDependStat(\n test_statistic=test_statistic,\n distance_correlation=dcor,\n distance_covariance=dcov,\n dvar_x=dvar_x,\n dvar_y=dvar_y,\n S=S,\n )\n\n\ndef distance_covariance(x, y):\n r\"\"\"Distance covariance.\n\n Calculate the empirical distance covariance as described in [1]_.\n\n Parameters\n ----------\n x : array_like, 1-D or 2-D\n If `x` is 1-D than it is assumed to be a vector of observations of a\n single random variable. If `x` is 2-D than the rows should be\n observations and the columns are treated as the components of a\n random vector, i.e., each column represents a different component of\n the random vector `x`.\n y : array_like, 1-D or 2-D\n Same as `x`, but only the number of observation has to match that of\n `x`. If `y` is 2-D note that the number of columns of `y` (i.e., the\n number of components in the random vector) does not need to match\n the number of columns in `x`.\n\n Returns\n -------\n float\n The empirical distance covariance between `x` and `y`.\n\n References\n ----------\n .. [1] Szekely, G.J., Rizzo, M.L., and Bakirov, N.K. (2007)\n \"Measuring and testing dependence by correlation of distances\".\n Annals of Statistics, Vol. 35 No. 6, pp. 2769-2794.\n\n Examples\n --------\n\n >>> from statsmodels.stats.dist_dependence_measures import\n ... distance_covariance\n >>> distance_covariance(np.random.random(1000), np.random.random(1000))\n 0.007575063951951362\n\n \"\"\"\n return distance_statistics(x, y).distance_covariance\n\n\ndef distance_variance(x):\n r\"\"\"Distance variance.\n\n Calculate the empirical distance variance as described in [1]_.\n\n Parameters\n ----------\n x : array_like, 1-D or 2-D\n If `x` is 1-D than it is assumed to be a vector of observations of a\n single random variable. If `x` is 2-D than the rows should be\n observations and the columns are treated as the components of a\n random vector, i.e., each column represents a different component of\n the random vector `x`.\n\n Returns\n -------\n float\n The empirical distance variance of `x`.\n\n References\n ----------\n .. [1] Szekely, G.J., Rizzo, M.L., and Bakirov, N.K. (2007)\n \"Measuring and testing dependence by correlation of distances\".\n Annals of Statistics, Vol. 35 No. 6, pp. 2769-2794.\n\n Examples\n --------\n\n >>> from statsmodels.stats.dist_dependence_measures import\n ... distance_variance\n >>> distance_variance(np.random.random(1000))\n 0.21732609190659702\n\n \"\"\"\n return distance_covariance(x, x)\n\n\ndef distance_correlation(x, y):\n r\"\"\"Distance correlation.\n\n Calculate the empirical distance correlation as described in [1]_.\n This statistic is analogous to product-moment correlation and describes\n the dependence between `x` and `y`, which are random vectors of\n arbitrary length. The statistics' values range between 0 (implies\n independence) and 1 (implies complete dependence).\n\n Parameters\n ----------\n x : array_like, 1-D or 2-D\n If `x` is 1-D than it is assumed to be a vector of observations of a\n single random variable. If `x` is 2-D than the rows should be\n observations and the columns are treated as the components of a\n random vector, i.e., each column represents a different component of\n the random vector `x`.\n y : array_like, 1-D or 2-D\n Same as `x`, but only the number of observation has to match that of\n `x`. If `y` is 2-D note that the number of columns of `y` (i.e., the\n number of components in the random vector) does not need to match\n the number of columns in `x`.\n\n Returns\n -------\n float\n The empirical distance correlation between `x` and `y`.\n\n References\n ----------\n .. [1] Szekely, G.J., Rizzo, M.L., and Bakirov, N.K. (2007)\n \"Measuring and testing dependence by correlation of distances\".\n Annals of Statistics, Vol. 35 No. 6, pp. 2769-2794.\n\n Examples\n --------\n\n >>> from statsmodels.stats.dist_dependence_measures import\n ... distance_correlation\n >>> distance_correlation(np.random.random(1000), np.random.random(1000))\n 0.04060497840149489\n\n \"\"\"\n return distance_statistics(x, y).distance_correlation\n" ]
[ [ "numpy.sqrt", "scipy.stats.norm.cdf", "numpy.multiply", "numpy.random.shuffle", "numpy.asanyarray", "scipy.spatial.distance.pdist", "numpy.floor", "numpy.zeros" ] ]
hsilva664/nstep_airl
[ "c08650d8fddf6a27081d2b63cb8f9eb23d746b6c" ]
[ "airl_state_scripts/no_transfer_scripts/pendulum_irl_state_action.py" ]
[ "import tensorflow as tf\n\nfrom sandbox.rocky.tf.policies.gaussian_mlp_policy import GaussianMLPPolicy\nfrom sandbox.rocky.tf.envs.base import TfEnv\nfrom rllab.baselines.linear_feature_baseline import LinearFeatureBaseline\nfrom rllab.envs.gym_env import GymEnv\n\n\nfrom inverse_rl.algos.irl_trpo import IRLTRPO\nfrom inverse_rl.models.imitation_learning import AIRLStateAction\nfrom inverse_rl.models.airl_state import *\nfrom inverse_rl.utils.log_utils import rllab_logdir, load_latest_experts\n\nfrom inverse_rl.utils.hyper_sweep import run_sweep_parallel, run_sweep_serial\n\n\ndef main(exp_name=None, fusion=False, visible_gpus='0', discount=0.99):\n env = TfEnv(GymEnv('Pendulum-v0', record_video=False, record_log=False))\n\n gpu_options = tf.GPUOptions(allow_growth=True,visible_device_list=args.visible_gpus)\n tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1, gpu_options=gpu_options)\n\n experts = load_latest_experts('data/pendulum', n=5, visible_gpus=visible_gpus)\n\n irl_model = AIRL(discount=discount, env=env, expert_trajs=experts, state_only=False, fusion=args.fusion, max_itrs=10)\n policy = GaussianMLPPolicy(name='policy', env_spec=env.spec, hidden_sizes=(32, 32))\n algo = IRLTRPO(\n env=env,\n policy=policy,\n irl_model=irl_model,\n n_itr=200,\n batch_size=1000,\n max_path_length=100,\n discount=discount,\n store_paths=True,\n discrim_train_itrs=50,\n irl_model_wt=1.0,\n entropy_weight=0.1, # this should be 1.0 but 0.1 seems to work better\n zero_environment_reward=True,\n baseline=LinearFeatureBaseline(env_spec=env.spec)\n )\n\n with rllab_logdir(algo=algo, dirname='data/pendulum_airl_state_action'):\n with tf.Session(config=tf_config) as sess:\n algo.train(sess)\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--visible_gpus', type=str, default='0')\n parser.add_argument('--fusion', action='store_false')\n args = parser.parse_args()\n params_dict = {\n 'fusion': [args.fusion],\n 'visible_gpus': [args.visible_gpus]\n }\n run_sweep_parallel(main, params_dict, repeat=2)\n" ]
[ [ "tensorflow.ConfigProto", "tensorflow.GPUOptions", "tensorflow.Session" ] ]
kkkris7/License-Plate-Detection---Deep-Learning
[ "31812cb21fc6fb4e8df8ed4427440d4628704e92" ]
[ "detect.py" ]
[ "import argparse\nimport shutil\nimport time\nfrom pathlib import Path\nfrom sys import platform\nimport moviepy.editor as mpe\nimport numpy as np\nimport os\n\nfrom models import *\nfrom utils.datasets import *\nfrom utils.utils import *\nimport logging\n\n\ndef detect(\n cfg,\n weights,\n images,\n output='output', # output folder\n img_size=416,\n conf_thres=0.5,\n nms_thres=0.45,\n save_txt=False,\n save_images=True,\n webcam=False\n):\n device = torch_utils.select_device()\n if os.path.exists(output):\n shutil.rmtree(output) # delete output folder\n os.makedirs(output) # make new output folder\n\n # Initialize model\n model = Darknet(cfg, img_size)\n\n # Load weights\n if weights.endswith('.pt'): # pytorch format\n if weights.endswith('yolov3.pt') and not os.path.exists(weights):\n if (platform == 'darwin') or (platform == 'linux'):\n os.system('wget https://storage.googleapis.com/ultralytics/yolov3.pt -O ' + weights)\n model.load_state_dict(torch.load(weights, map_location='cpu')['model'])\n else: # darknet format\n load_darknet_weights(model, weights)\n\n model.to(device).eval()\n\n # Set Dataloader\n if webcam:\n save_images = False\n dataloader = LoadWebcam(img_size=img_size)\n else:\n dataloader = LoadImages(images, img_size=img_size)\n\n # Get classes and colors\n classes = load_classes(parse_data_cfg('cfg/coco.data')['names'])\n colors = [[random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)] for _ in range(len(classes))]\n \n time_frame = []\n for i, (path, img, im0) in enumerate(dataloader):\n t = time.time()\n if webcam:\n print('webcam frame %g: ' % (i + 1), end='')\n else:\n print('image %g/%g %s: ' % (i + 1, len(dataloader), path), end='')\n save_path = str(Path(output) / Path(path).name)\n\n # Get detections\n img = torch.from_numpy(img).unsqueeze(0).to(device)\n if ONNX_EXPORT:\n torch.onnx.export(model, img, 'weights/model.onnx', verbose=True)\n return\n pred = model(img)\n pred = pred[pred[:, :, 4] > conf_thres] # remove boxes < threshold\n\n if len(pred) > 0:\n # Run NMS on predictions\n try :\n detections = non_max_suppression(pred.unsqueeze(0), conf_thres, nms_thres)[0]\n #print(detections)\n\n # Rescale boxes from 416 to true image size\n detections[:, :4] = scale_coords(img_size, detections[:, :4], im0.shape)\n #print(detections[:, :4])\n\n # Print results to screen\n unique_classes = detections[:, -1].cpu().unique()\n for c in unique_classes:\n n = (detections[:, -1].cpu() == c).sum()\n print('%g %ss' % (n, classes[int(c)]), end=', ')\n\n # Draw bounding boxes and labels of detections\n for x1, y1, x2, y2, conf, cls_conf, cls in detections:\n if save_txt: # Write to file\n with open(save_path + '.txt', 'a') as file:\n file.write('%g %g %g %g %g %g\\n' %\n (x1, y1, x2, y2, cls, cls_conf * conf))\n\n # Add bbox to the image\n label = '%s %.2f' % (classes[int(cls)], conf)\n plot_one_box([x1, y1, x2, y2], im0, label=label, color=colors[int(cls)])\n except:\n print(\"sth wrong\")\n\n dt = time.time() - t\n time_frame.append(dt)\n print('Done. (%.3fs)' % dt)\n\n if save_images: # Save generated image with detections\n cv2.imwrite(save_path, im0)\n\n if webcam: # Show live webcam\n #cv2.imshow(weights + ' - %.2f FPS' % (1 / dt), im0)\n cv2.imshow(\"im\",im0)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows() \n\n if save_images and (platform == 'darwin'): # linux/macos\n os.system('open ' + output + ' ' + save_path)\n \n print('The Processing time for frame is (%.3fs)' % float(sum(time_frame)))\n \n return time_frame\n\n\nif __name__ == '__main__':\n logging.basicConfig(filename='log.log',level=logging.DEBUG)\n start_time = time.time()\n t_preprocess = time.time()\n video = mpe.VideoFileClip('test2ot.mp4')\n #np_frame = video.get_frame(2) # get the frame at t=2 seconds\n c=0\n t = np.arange(0,32,0.03)\n\n if os.path.exists('test2ot'):\n shutil.rmtree('test2ot') # delete output folder\n os.makedirs('test2ot')\n\n for i in t:\n video.save_frame('./test2ot/'+str(i)+'.jpg', t=i) # save frame at t=2 as JPEG\n \n dt_preprocess = time.time() - t_preprocess\n\n print('The preprocess time is (%.3fs)'% dt_preprocess)\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')\n parser.add_argument('--weights', type=str, default='weights/best.pt', help='path to weights file')\n parser.add_argument('--images', type=str, default='data/samples', help='path to images')\n parser.add_argument('--img-size', type=int, default=416, help='size of each image dimension')\n parser.add_argument('--conf-thres', type=float, default=0.40, help='object confidence threshold')\n parser.add_argument('--nms-thres', type=float, default=0.45, help='iou threshold for non-maximum suppression')\n opt = parser.parse_args()\n print(opt)\n\n\n\n\n \n\n with torch.no_grad():\n detect(\n opt.cfg,\n opt.weights,\n opt.images,\n img_size=opt.img_size,\n conf_thres=opt.conf_thres,\n nms_thres=opt.nms_thres\n )\n \n t_back2video = time.time()\n import moviepy.editor as mpe\n pic_list = os.listdir('./output')\n pic_list.sort(key=lambda x: float(x.split('.j')[0]))\n pic_lis = []\n for i in pic_list:\n pic_lis.append('./output/'+i)\n clip = mpe.ImageSequenceClip(pic_lis, fps=30)\n clip.write_videofile('./output_video/video_test.mp4', fps=30)\n\n dt_back2video = time.time() - t_back2video\n print('The preprocess time is (%.3fs)'% dt_back2video)\n #total_time = dt_back2video+dt_preprocess+float(sum(time_frame)\n #print(total_time)\n #print(time_frame)\n logging.info('--- %s seconds ---' % (time.time()-start_time))\n\n\n" ]
[ [ "numpy.arange" ] ]
rexarski/ANLY580-final-project
[ "093eeaab7ab9fb382ad695335243e6ded893fb27" ]
[ "model_deploy/prepare/generate_wiki_index.py" ]
[ "import pickle\nfrom time import time\n\nimport datasets\nimport numpy as np\nimport faiss\n\nif __name__ == '__main__':\n st_time = time()\n wiki40b_snippets = datasets.load_dataset('wiki_snippets', name='wiki40b_en_100_0')['train']\n wiki40b_file_name = 'wiki40b.dat'\n wiki40b_passage_reps = np.memmap(wiki40b_file_name, dtype='float32',\n mode='r', shape=(wiki40b_snippets.num_rows, 128))\n\n print('Start to load wiki index from %s' % wiki40b_file_name, time() - st_time)\n quantiser = faiss.IndexFlatL2(128)\n wiki40b_index_flat = faiss.IndexIVFFlat(quantiser, 128, 128, faiss.METRIC_L2)\n\n print('Train wiki index from %s' % wiki40b_file_name, time() - st_time)\n wiki40b_index_flat.train(wiki40b_passage_reps)\n wiki40b_index_flat.add(wiki40b_passage_reps)\n\n print('Save wiki index', time() - st_time)\n with open('../models/wiki40b_index.bin', 'wb') as f:\n pickle.dump(wiki40b_index_flat, f)\n" ]
[ [ "numpy.memmap" ] ]
vishwanath1306/katana
[ "81a4bbe798d9bee887cadd22c7a349bdab292feb" ]
[ "metagraph-plugin/tests/test_algorithms.py" ]
[ "import metagraph as mg\nimport numpy as np\nimport pytest\n\n\n# directed graph\[email protected](autouse=True)\ndef kg_from_nx_di_8_12(networkx_weighted_directed_8_12):\n pg_test_case = mg.translate(networkx_weighted_directed_8_12, mg.wrappers.Graph.KatanaGraph)\n return pg_test_case\n\n\n# undirected graph\[email protected](autouse=True)\ndef kg_from_nx_ud_8_12(networkx_weighted_undirected_8_12):\n pg_test_case = mg.translate(networkx_weighted_undirected_8_12, mg.wrappers.Graph.KatanaGraph)\n return pg_test_case\n\n\ndef test_bfs(networkx_weighted_directed_8_12, kg_from_nx_di_8_12):\n bfs1_nx = mg.algos.traversal.bfs_iter(networkx_weighted_directed_8_12, 0)\n bfs2_nx = mg.algos.traversal.bfs_iter(networkx_weighted_directed_8_12, 2)\n bfs1_kg = mg.algos.traversal.bfs_iter(kg_from_nx_di_8_12, 0, 2 ** 30 - 1)\n bfs2_kg = mg.algos.traversal.bfs_iter(kg_from_nx_di_8_12, 2, 2 ** 30 - 1)\n assert bfs1_kg.tolist() == bfs1_nx.tolist()\n assert bfs2_kg.tolist() == bfs2_nx.tolist()\n assert bfs1_kg.tolist() == [0, 1, 3, 4, 7]\n assert bfs2_kg.tolist() == [2, 4, 5, 6, 7]\n\n\ndef test_sssp_bellman_ford(networkx_weighted_directed_8_12, kg_from_nx_di_8_12):\n src_node = 0\n sssp_nx = mg.algos.traversal.bellman_ford(networkx_weighted_directed_8_12, src_node) # source node is 0\n parents_nx = sssp_nx[0]\n distances_nx = sssp_nx[1]\n assert isinstance(parents_nx, dict)\n assert isinstance(distances_nx, dict)\n assert parents_nx == {0: 0, 1: 0, 3: 0, 4: 3, 7: 4}\n assert distances_nx == {0: 0, 1: 4, 3: 2, 4: 3, 7: 7}\n parents_kg, distances_kg = mg.algos.traversal.bellman_ford(kg_from_nx_di_8_12, src_node)\n assert parents_nx == parents_kg\n assert distances_nx == distances_kg\n\n\ndef test_sssp_dijkstra(networkx_weighted_directed_8_12, kg_from_nx_di_8_12):\n src_node = 1\n sssp_nx = mg.algos.traversal.dijkstra(networkx_weighted_directed_8_12, src_node) # source node is 1\n parents_nx = sssp_nx[0]\n distances_nx = sssp_nx[1]\n assert isinstance(parents_nx, dict)\n assert isinstance(distances_nx, dict)\n assert parents_nx == {1: 1, 3: 1, 4: 3, 7: 4}\n assert distances_nx == {1: 0, 3: 3, 4: 4, 7: 8}\n parents_kg, distances_kg = mg.algos.traversal.dijkstra(kg_from_nx_di_8_12, src_node)\n assert parents_nx == parents_kg\n assert distances_nx == distances_kg\n\n\ndef test_connected_components(networkx_weighted_undirected_8_12, kg_from_nx_ud_8_12):\n cc_nx = mg.algos.clustering.connected_components(networkx_weighted_undirected_8_12)\n cc_kg = mg.algos.clustering.connected_components(kg_from_nx_ud_8_12)\n assert isinstance(cc_kg, dict)\n assert isinstance(cc_kg, dict)\n assert cc_kg == cc_nx\n\n\ndef test_pagerank(networkx_weighted_directed_8_12, kg_from_nx_di_8_12):\n pr_nx = mg.algos.centrality.pagerank(networkx_weighted_directed_8_12)\n pr_kg = mg.algos.centrality.pagerank(kg_from_nx_di_8_12)\n assert isinstance(pr_nx, dict)\n assert isinstance(pr_kg, dict)\n assert pr_nx == pr_kg\n\n\ndef test_betweenness_centrality(networkx_weighted_directed_8_12, kg_from_nx_di_8_12):\n bc_nx = mg.algos.centrality.betweenness(networkx_weighted_directed_8_12)\n bc_kg = mg.algos.centrality.betweenness(kg_from_nx_di_8_12)\n assert isinstance(bc_nx, dict)\n assert isinstance(bc_kg, dict)\n assert bc_nx == bc_kg\n\n\ndef test_triangle_counting(networkx_weighted_undirected_8_12, kg_from_nx_ud_8_12):\n tc_nx = mg.algos.clustering.triangle_count(networkx_weighted_undirected_8_12)\n tc_kg = mg.algos.clustering.triangle_count(kg_from_nx_ud_8_12)\n assert isinstance(tc_nx, int)\n assert isinstance(tc_kg, int)\n assert tc_nx == tc_kg\n\n\ndef test_louvain_community_detection(networkx_weighted_undirected_8_12, kg_from_nx_ud_8_12):\n lc_nx = mg.algos.clustering.louvain_community(networkx_weighted_undirected_8_12)\n lc_kg = mg.algos.clustering.louvain_community(kg_from_nx_ud_8_12)\n assert isinstance(lc_nx[0], dict)\n assert isinstance(lc_kg[0], dict)\n assert isinstance(lc_nx[1], float)\n assert isinstance(lc_kg[1], float)\n assert lc_nx[0] == lc_kg[0]\n assert lc_nx[1] == lc_kg[1]\n\n\ndef test_translation_subgraph_extraction(networkx_weighted_directed_8_12, kg_from_nx_di_8_12):\n se_nx = mg.algos.subgraph.extract_subgraph(networkx_weighted_directed_8_12, {0, 2, 3})\n se_kg = mg.algos.subgraph.extract_subgraph(kg_from_nx_di_8_12, {0, 2, 3})\n assert isinstance(se_nx, mg.wrappers.Graph.NetworkXGraph)\n assert isinstance(se_kg, mg.wrappers.Graph.NetworkXGraph)\n assert list(se_nx.value.edges(data=True)) == list(se_kg.value.edges(data=True))\n\n\ndef test_labal_propagation(networkx_weighted_undirected_8_12, kg_from_nx_ud_8_12):\n cd_nx = mg.algos.clustering.label_propagation_community(networkx_weighted_undirected_8_12)\n cd_kg = mg.algos.clustering.label_propagation_community(kg_from_nx_ud_8_12)\n assert isinstance(cd_nx, dict)\n assert isinstance(cd_kg, dict)\n assert cd_nx == cd_kg\n\n\ndef test_jaccard_similarity(networkx_weighted_undirected_8_12, kg_from_nx_ud_8_12):\n compare_node = 0\n prop_name = \"jaccard_prop_with_\" + str(compare_node)\n jcd_nx = mg.algos.traversal.jaccard(networkx_weighted_undirected_8_12, compare_node)\n jcd_kg = mg.algos.traversal.jaccard(kg_from_nx_ud_8_12, compare_node)\n assert isinstance(jcd_nx, np.ndarray)\n assert isinstance(jcd_kg, np.ndarray)\n assert jcd_nx.tolist() == jcd_kg.tolist()\n assert jcd_kg[compare_node] == 1\n\n\ndef test_local_clustering_coefficient(networkx_weighted_undirected_8_12, kg_from_nx_ud_8_12):\n prop_name = \"output_prop\"\n lcc_nx = mg.algos.clustering.local_clustering_coefficient(networkx_weighted_undirected_8_12, prop_name)\n lcc_kg = mg.algos.clustering.local_clustering_coefficient(kg_from_nx_ud_8_12, prop_name)\n assert isinstance(lcc_nx, np.ndarray)\n assert isinstance(lcc_kg, np.ndarray)\n assert lcc_kg.tolist() == lcc_nx.tolist()\n assert lcc_kg[-1] == 0\n assert not np.any(np.isnan(lcc_kg))\n" ]
[ [ "numpy.isnan" ] ]
n1labs/IDentif.AI
[ "dd69e2913c3a8e58e1c09fc0abd9e5c7cac5340a" ]
[ "monotherapy/monotherapy.py" ]
[ "import pandas as pd\r\nfrom openpyxl import load_workbook\r\nimport numpy as np\r\nimport logging\r\n\r\ndef get_raw_data(file_name, sheet_name):\r\n df = pd.read_excel(file_name, sheet_name=sheet_name).astype(float)\r\n\r\n return df\r\n\r\n\r\ndef get_control(df, dmso, extra_str):\r\n if dmso:\r\n if 'tox' in extra_str:\r\n upper_bound = df['DC'].mean()\r\n sd_upper_bound = df['DC'].std()\r\n lower_bound = 0\r\n sd_lower_bound = 0\r\n elif 'eff' in extra_str:\r\n lower_bound = df['DC'].mean()\r\n sd_lower_bound = df['DC'].std()\r\n upper_bound = df['Cells only'].mean()\r\n sd_upper_bound = df['Cells only'].std()\r\n else:\r\n if 'tox' in extra_str:\r\n upper_bound = df['NDC'].mean()\r\n sd_upper_bound = df['NDC'].std()\r\n lower_bound = 0\r\n sd_lower_bound = 0\r\n elif 'eff' in extra_str:\r\n lower_bound = df['NDC'].mean()\r\n sd_lower_bound = df['NDC'].std()\r\n upper_bound = df['Cells only'].mean()\r\n sd_upper_bound = df['Cells only'].std()\r\n\r\n return upper_bound, lower_bound, sd_upper_bound, sd_lower_bound\r\n\r\n\r\ndef calculate_y(dmso, df_eff, df_ver):\r\n\r\n # inhibition (viral plate)\r\n cell_drug_virus = df_eff.iloc[:, 1:4]\r\n e = df_eff.iloc[:, 1:4].mean(skipna=True, axis=1)\r\n sd_e = df_eff.iloc[:, 1:4].std(axis=1)\r\n cell_vehicle, cell_virus, sd_c, sd_v = get_control(df_eff, dmso, 'eff')\r\n df_eff_out = (cell_drug_virus - cell_virus) / (cell_vehicle - cell_virus) * 100\r\n df_eff_out = get_average_stdev(df_eff_out, e, sd_e, cell_vehicle, sd_c, cell_virus, sd_v, 'eff')\r\n\r\n # cytotoxicity plate (drug plate)\r\n cell_drug = df_ver.iloc[:, 1:4]\r\n e = df_ver.iloc[:, 1:4].mean(skipna=True, axis=1)\r\n sd_e = df_ver.iloc[:, 1:4].std(axis=1)\r\n cell_vehicle, cell_virus, sd_c, sd_v = get_control(df_ver, dmso, 'tox')\r\n df_tox_out = (cell_vehicle - cell_drug) / cell_vehicle * 100\r\n df_tox_out = get_average_stdev(df_tox_out, e, sd_e, cell_vehicle, sd_c, cell_virus, sd_v, 'tox')\r\n\r\n return df_eff_out, df_tox_out\r\n\r\ndef get_average_stdev(df, e, sd_e, c, sd_c, v, sd_v, extra_str):\r\n df = df.copy(deep=True)\r\n\r\n df['average'] = df.mean(skipna=True, axis=1)\r\n output = df['average']\r\n if 'eff' in extra_str:\r\n df['sd'] = np.abs(output) * np.sqrt((sd_e / (e - v)) ** 2 +\r\n ((e - c) * sd_v / ((c - v) * (e - v)))** 2 +\r\n (sd_c / (c - v)) ** 2)\r\n elif 'tox' in extra_str:\r\n df['sd'] = np.abs(output) * np.sqrt(((e * sd_c) / (c * (c - e))) ** 2 +\r\n (sd_e / (c - e)) ** 2)\r\n else:\r\n raise ValueError('wrong plate name: ' + extra_str)\r\n\r\n return df\r\n\r\n\r\ndef save_file(file_name, df1, df2, df3, drug_name):\r\n book = None\r\n try:\r\n book = load_workbook(file_name)\r\n except Exception:\r\n logging.debug('Creating new workbook at %s', file_name)\r\n with pd.ExcelWriter(file_name, engine='openpyxl') as writer:\r\n if book is not None:\r\n writer.book = book\r\n\r\n df = pd.concat([df1, df2, df3], axis=1)\r\n df.columns = ['concentration', 'inhib 1', 'inhib 2', 'inhib 3', 'inhib_avg', 'inhib_sd',\r\n 'tox 1', 'tox 2', 'tox 3', 'tox_avg', 'tox_sd']\r\n\r\n df.to_excel(writer, sheet_name=drug_name, index=False)\r\n\r\n writer.save()\r\n writer.close()\r\n\r\n pass\r\n\r\ndef calculate_zprime(df):\r\n avg = df.mean(skipna=True)\r\n sd = df.std()\r\n df = df.append(avg, ignore_index=True)\r\n df = df.append(sd, ignore_index=True)\r\n\r\n pc = df['Cells only'].iloc[-2]\r\n sd_pc = df['Cells only'].iloc[-1]\r\n nc = df['DC'].iloc[-2]\r\n sd_nc = df['DC'].iloc[-1]\r\n\r\n Z = 1 - ((3 * (sd_pc + sd_nc)) / (abs(pc - nc)))\r\n print(\"Z' for this experiment = \", round(Z, 3))\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n file_input = 'monotherapy.xlsx'\r\n file_output = 'monotherapy_results.xlsx'\r\n\r\n # get list: if drug was dissolved in DMSO (1), no DMSO (0)\r\n df_dmso = pd.read_excel(file_input, sheet_name='solvent')\r\n cols = ['DC', 'NDC', 'Cells only', 'Virus only']\r\n df_ctrl = pd.DataFrame(columns=cols, index=range(0,24))\r\n\r\n for i, drug_name in enumerate(df_dmso['Drug']):\r\n try:\r\n df_eff = get_raw_data(file_input, 'eff_' + drug_name)\r\n df_tox = get_raw_data(file_input, 'tox_' + drug_name)\r\n\r\n # pool controls to calculate Z'\r\n if df_dmso['Plate'][i]:\r\n df_ctrl = pd.concat([df_ctrl[cols], df_eff[cols][df_eff[cols].notnull()]], axis=0)\r\n\r\n drug_conc = df_tox.iloc[:, 0]\r\n inhibition, cytotoxicity = calculate_y(df_dmso['DMSO'][i], df_eff, df_tox)\r\n\r\n save_file(file_output, drug_conc, inhibition, cytotoxicity, drug_name=drug_name)\r\n except Exception as e:\r\n print('Failed to compile for ' + drug_name + ', ' + str(e))\r\n pass\r\n\r\n df_ctrl.dropna(how='all', axis=0, inplace=True)\r\n df_ctrl.reset_index(inplace=True, drop=True)\r\n\r\n # Calculate Z':\r\n calculate_zprime(df_ctrl)\r\n\r\n" ]
[ [ "pandas.concat", "pandas.read_excel", "numpy.abs", "numpy.sqrt", "pandas.ExcelWriter" ] ]
irinaespejo/excursion
[ "c5a5c6d882b8dd1008fbabf1a3b81eaba382bef6" ]
[ "excursion/utils.py" ]
[ "from scipy.stats import norm\nimport numpy as np\nimport torch\nimport importlib\nimport sys\n\ndef cdf(value):\n return 0.5 * (\n 1 + torch.erf((value - self.loc) * self.scale.reciprocal() / math.sqrt(2))\n )\n\n\ndef load_example(example):\n testcase = None\n if example == \"1Dtoyanalysis\":\n testcase = importlib.import_module(\"excursion.testcases.fast_1D\")\n elif example == \"1D_test\":\n testcase = importlib.import_module(\"excursion.testcases.1D_test\")\n elif example == \"2D_test\":\n testcase = importlib.import_module(\"excursion.testcases.2D_test\")\n elif example == \"3D_test\":\n testcase = importlib.import_module(\"excursion.testcases.3D_test\")\n elif example == \"2Dtoyanalysis\":\n testcase = importlib.import_module(\"excursion.testcases.fast_2D\")\n elif example == \"2Dtoyanalysis_multiple\":\n testcase = importlib.import_module(\"excursion.testcases.fast_2D_multiple\")\n elif example == \"darkhiggs\":\n testcase = importlib.import_module(\"excursion.testcases.darkhiggs\")\n elif example == \"checkmate\":\n testcase = importlib.import_module(\"excursion.testcases.checkmate\")\n elif example == \"3dfoursheets\":\n testcase = importlib.import_module(\"excursion.testcases.toy3d_foursheets\")\n elif example == \"3Dtoyanalysis\":\n testcase = importlib.import_module(\"excursion.testcases.fast_3D\")\n elif example == \"darkhiggs\":\n testcase = importlib.import_module(\"excursion.testcases.darkhiggs\")\n elif example == \"checkmate\":\n testcase = importlib.import_module(\"excursion.testcases.checkmate\")\n elif example.startswith(\"parabola_nD\"):\n testcase = importlib.import_module(\"excursion.testcases.parabola_nD\")\n # elif example == \"parabola_1D\":\n # testcase = importlib.import_module(\"excursion.testcases.parabola_1D\")\n # elif example == \"parabola_2D\":\n # testcase = importlib.import_module(\"excursion.testcases.parabola_2D\")\n # elif example == \"parabola_3D\":\n # testcase = importlib.import_module(\"excursion.testcases.parabola_3D\")\n # elif example == \"parabola_4D\":\n # testcase = importlib.import_module(\"excursion.testcases.parabola_4D\")\n else:\n raise RuntimeError(\"unnkown test case\")\n return testcase\n\n\ndef point_entropy(mu_stds, thresholds):\n thresholds = np.concatenate([[-np.inf], thresholds, [np.inf]])\n\n entropies = []\n for mu, std in mu_stds:\n entropy = 0\n for j in range(len(thresholds) - 1):\n p_within = norm(mu, std).cdf(thresholds[j + 1]) - norm(mu, std).cdf(\n thresholds[j]\n )\n p_within[p_within < 1e-9] = 1e-9\n p_within[p_within > 1 - 1e-9] = 1 - 1e-9\n entropy -= p_within * np.log(p_within)\n entropies.append(entropy)\n return np.mean(np.stack(entropies), axis=0)\n\n\ndef point_entropy_gpytorch(mu_stds, thresholds):\n thresholds = np.concatenate([[-np.inf], thresholds, [np.inf]])\n\n entropies = []\n for obs_pred in mu_stds:\n entropy = 0\n for j in range(len(thresholds) - 1):\n p_within = norm(\n obs_pred.mean.detach().numpy(), obs_pred.stddev.detach().numpy()\n ).cdf(thresholds[j + 1]) - norm(\n obs_pred.mean.detach().numpy(), obs_pred.stddev.detach().numpy()\n ).cdf(\n thresholds[j]\n )\n p_within[p_within < 1e-9] = 1e-9\n p_within[p_within > 1 - 1e-9] = 1 - 1e-9\n entropy -= p_within * np.log(p_within)\n entropies.append(entropy)\n return np.mean(np.stack(entropies), axis=0)\n\n\ndef mesh2points(grid, npoints_tuple):\n ndim = len(npoints_tuple)\n X = np.moveaxis(grid, 0, ndim).reshape(int(np.product(npoints_tuple)), ndim)\n return X\n\n\ndef points2mesh(X, npoints_tuple):\n ndim = len(npoints_tuple)\n grid = np.moveaxis(X.reshape(*(npoints_tuple + [ndim,])), ndim, 0)\n return grid\n\n\ndef mgrid(rangedef):\n _rangedef = np.array(rangedef, dtype=\"complex128\")\n slices = [slice(*_r) for _r in _rangedef]\n return np.mgrid[slices]\n\n\ndef values2mesh(values, rangedef, invalid, invalid_value=np.nan):\n grid = mgrid(rangedef)\n allX = mesh2points(grid, rangedef[:, 2])\n allv = np.zeros(len(allX))\n inv = invalid(allX)\n\n if torch.cuda.is_available() and type(values) == torch.Tensor:\n allv[~inv] = values.cpu()\n else:\n allv[~inv] = values\n\n if np.any(inv):\n allv[inv] = invalid_value\n return allv.reshape(*map(int, rangedef[:, 2]))\n\n\ndef h_normal(var):\n return torch.log(var * (2 * np.e * np.pi) ** 0.5)\n\n\ndef normal_pdf(x):\n return 1.0 / (2 * np.pi) ** 0.5 * torch.exp(-0.2 * x ** 2)\n\n\ndef truncated_std_conditional(Y_pred_all, a, b):\n mu_grid = Y_pred_all.mean[1:]\n std_grid = Y_pred_all.variance[1:] ** 0.5\n mu_candidate = Y_pred_all.mean[0]\n std_candidate = Y_pred_all.variance[0] ** 0.5\n rho = Y_pred_all.covariance_matrix[0, 1:] / (std_candidate * std_grid)\n\n # norm needs to be a normal distribution but in python\n normal = torch.distributions.Normal(loc=0, scale=1)\n alpha = (a - mu_grid) / std_grid\n beta = (b - mu_grid) / std_grid\n c = normal.cdf(beta) - normal.cdf(alpha)\n\n # phi(beta) = normal(0,1) at x = beta\n beta_phi_beta = beta * normal_pdf(beta)\n beta_phi_beta[~torch.isfinite(beta_phi_beta)] = 0.0\n alpha_phi_alpha = alpha * normal_pdf(alpha)\n alpha_phi_alpha[~torch.isfinite(alpha_phi_alpha)] = 0.0\n\n # unnormalized\n first_moment = mu_candidate - std_candidate * rho / c * (\n normal_pdf(beta) - normal_pdf(alpha)\n )\n\n second_moment = (\n std_candidate ** 2 * (1 - rho ** 2 / c) * (beta_phi_beta - alpha_phi_alpha)\n - mu_candidate ** 2\n + 2 * mu_candidate * first_moment\n )\n\n return second_moment ** 0.5\n" ]
[ [ "numpy.log", "numpy.product", "numpy.stack", "numpy.concatenate", "torch.exp", "torch.isfinite", "scipy.stats.norm", "torch.log", "numpy.any", "torch.cuda.is_available", "torch.distributions.Normal", "numpy.array", "numpy.moveaxis" ] ]
gnniaaa/2048-api
[ "f8383d5173a40532965636fa1fc7026fdae194f0" ]
[ "game2048/zaixian2048.py" ]
[ "import os\nfrom torch.autograd import Variable\nfrom RNN import RNN1\nimport torch\nimport torch.nn as nn\nbatch_size = 128\nNUM_CLASSES = 4 # 分类数目\nNUM_EPOCHS = 20 # 训练的迭代次数\nimport pandas as pd\nfrom game import Game\nfrom displays import Display\nfrom agents import ExpectiMaxAgent, MyAgent1\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\n\nimage = []\nlabel = []\n\ndisplay1 = Display()\ndisplay2 = Display()\n\nstop_number = 2048\n\ndata = pd.read_csv('./DATA.csv')\ndata = data.values\nX= data[:,0:16]\nY = data[:,16]\nimage=np.reshape(X,(-1,4,4))\nimage=image.tolist()\nlabel=Y.tolist()\n\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.01,shuffle=False)\nX_train = torch.FloatTensor(X_train)\nX_test = torch.FloatTensor(X_test)\nY_train = torch.LongTensor(Y_train)\nY_test = torch.LongTensor(Y_test)\n\n\ntraindataset = torch.utils.data.TensorDataset(X_train,Y_train)\ntestdataset = torch.utils.data.TensorDataset(X_test,Y_test)\n\ntrainloader = torch.utils.data.DataLoader(dataset=traindataset,\n batch_size=batch_size,\n shuffle=True,\n)\n\ntestloader = torch.utils.data.DataLoader(dataset=testdataset,\n batch_size=batch_size,\n shuffle=False\n)\n\n\n\nsequence_length = 16 # 序列长度,将图像的每一列作为一个序列\ninput_size = 1 # 输入数据的维度\nhidden_size = 256 # 隐藏层\nnum_layers = 4 # 有多少层\n\nnum_classes = 4\nbatch_size = 128\nNUM_EPOCHS = 6\nlearning_rate = 0.0001\n\nmodel = RNN1(input_size, hidden_size, num_layers, num_classes)\nmodel = model.cuda()\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr = 0.001)\ncorrect=0\n\nfor epoch in range(NUM_EPOCHS):\n i = 0\n running_loss = 0\n print('EPOCHS', epoch + 1)\n correct = 0\n for i, (images, labels) in enumerate(trainloader):\n\n images, labels = Variable(images), Variable(labels)\n labels = labels.long()\n optimizer.zero_grad()\n images = images.reshape(-1, 16, 1).cuda()\n output = model(images).reshape(-1, 4).cuda()\n labels = labels.float().reshape(-1).cuda()\n loss = criterion(output, labels.long())\n loss.backward()\n optimizer.step()\n correct += (labels.cpu().numpy() == output.cpu().detach().numpy().argmax(axis=1)).sum()\n if i % 1000 == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, i * len(data), len(trainloader.dataset),\n 100. * i / len(trainloader), loss.item()))\n print(\"train accuracy: \", correct/float(X_train.shape[0]))\ntorch.save(model.state_dict(),'modelxin1.pth' )\n\n\ncount = 0\n\n\nwhile count < 200:\n\n image = []\n label = []\n count = 0\n\n for k in range(0, 200):\n\n game = Game(4, score_to_win=2048, random=False)\n agent = ExpectiMaxAgent(game, display=display1)\n my_agent = MyAgent1(game, display=display2)\n\n while game.end == False:\n direction1 = agent.step()\n\n board = game.board\n temp = np.amax(board)\n board[board == 0] = 1\n board = np.log2(board).flatten()\n board = torch.FloatTensor(board)\n board = board.reshape((-1, 16, 1))\n board = board.cuda()\n output = model(board).reshape(-1, 4)\n direction2 = output.cpu().detach().numpy().argmax(axis=1)\n\n image.append(board.tolist())\n label.append(direction1)\n game.move(direction2[0])\n\n display1.display(game)\n\n\n if temp == 1024:\n count += 1\n print(count)\n\n if count > 150:\n break\n else:\n image = np.array(image)\n label = np.array(label)\n\n x_train, x_test, y_train, y_test = train_test_split(image, label, test_size=0.1, random_state=30)\n\n x_train = torch.FloatTensor(x_train)\n x_test = torch.FloatTensor(x_test)\n y_train = torch.LongTensor(y_train)\n y_test = torch.LongTensor(y_test)\n\n\n traindataset = torch.utils.data.TensorDataset(x_train, y_train)\n testdataset = torch.utils.data.TensorDataset(x_test, y_test)\n\n trainloader = torch.utils.data.DataLoader(dataset=traindataset,\n batch_size=batch_size,\n shuffle=True,\n )\n testloader = torch.utils.data.DataLoader(dataset=testdataset,\n batch_size=batch_size,\n shuffle=True,\n )\n\n\n correct = 0\n running_loss = 0\n\n\n for i, (images, labels) in enumerate(trainloader):\n epoch=count\n images, labels = Variable(images), Variable(labels)\n labels = labels.long()\n optimizer.zero_grad()\n images = images.reshape(-1, 16, 1).cuda()\n output = model(images).reshape(-1, 4).cuda()\n labels = labels.float().reshape(-1).cuda()\n correct += (labels.cpu().numpy() == output.cpu().detach().numpy().argmax(axis=1)).sum()\n loss = criterion(output, labels.long())\n if i % 1000 == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, i * len(data), len(trainloader.dataset),\n 100. * i / len(trainloader), loss.item()))\n running_loss += float(loss)\n loss.backward()\n optimizer.step()\n print(\"train accuracy: \", correct / float(X_train.shape[0]))\n torch.save(model.state_dict(), 'modelxin1.pth')\n" ]
[ [ "torch.LongTensor", "pandas.read_csv", "torch.nn.CrossEntropyLoss", "numpy.amax", "numpy.log2", "numpy.reshape", "torch.utils.data.TensorDataset", "torch.utils.data.DataLoader", "sklearn.model_selection.train_test_split", "torch.FloatTensor", "numpy.array", "torch.autograd.Variable" ] ]
zigaLuksic/glitch-doctor
[ "85ae7bfec15e0a0c564f31669c3d9f04a76aa159" ]
[ "tester.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport time\nfrom scipy.optimize import rosen, differential_evolution\nfrom metamodel import Metamodel\nfrom sklearn import ensemble, tree\nfrom test_models.repressilator import repressilator, repressilator_bounds\n\n# -----------------------------------------------------------------------------\n# These are all the default values of the MetaModel\n# -----------------------------------------------------------------------------\nmetamodel_kwargs = {\"random_seed\": 28537}\n\nmodel_kwargs = {\"dimension\": 1,\n \"function\": lambda x: 0}\n\nsurrogate_kwargs = {\"rebuild_interval\": 100,\n \"predictor\": ensemble.RandomForestRegressor()}\n\nthreshold_kwargs = {\"type\": \"alpha-beta\",\n \"desired_surr_rate\": 0.7,\n \"acceptable_offset\": 0.05,\n \"step\": 0.0001,\n \"alpha\": 42,\n \"beta\": 10}\n\nrelevator_kwargs = {\"rebuild_interval\": 100,\n \"threshold_kwargs\": threshold_kwargs,\n \"fresh_info\": None,\n \"predictor\": ensemble.RandomForestRegressor()}\n\nhistory_kwargs = {\"size\": 500,\n \"use_size\": 200}\n# -----------------------------------------------------------------------------\n\n# Example setup for optimisation\n\n# Set up the optimisation algorithm\nseed = 12\ntestfun = repressilator\nd = 4\nbounds = repressilator_bounds\n\n# Set up the meta model\nmetamodel_kwargs = {\"seed\": seed}\nmodel_kwargs = {\"dimension\": d,\n \"function\": testfun}\nsurrogate_kwargs = {\"predictor\": ensemble.RandomForestRegressor(n_estimators=100)}\nrelevator_kwargs = {\"fresh_info\": 10,\n \"predictor\": ensemble.RandomForestRegressor(n_estimators=100)}\nhistory_kwargs = {\"size\": 400}\n\nmetamodel = Metamodel(metamodel_kwargs, model_kwargs, surrogate_kwargs,\n relevator_kwargs, history_kwargs)\n\n# Wrap function so that it keeps history\npure_history = []\ndef history_fun(x):\n result = testfun(x)\n pure_history.append((result, 1))\n return result\n\n# Also wrap metamodel so that we get history\nmm_history = []\ndef history_mm(x):\n result = metamodel.evaluate(x)\n # Find out whether the model was used\n i = metamodel.history._use_write_index - 1\n model_used = metamodel.history._use_data[i]\n mm_history.append((result, model_used))\n return result\n\n\n# Evaluate pure function\nstart = time.perf_counter()\n\nnp.random.seed(seed)\nresult = differential_evolution(history_fun, bounds, maxiter=100, tol=0.000001)\n\nprint(\"Time spent is {:.3f} s\".format(time.perf_counter() - start))\nprint(result.x, result.fun)\nprint(\"True model evaluations: {}\".format(len(pure_history)))\n\n# Evaluate metamodel\nstart = time.perf_counter()\n\nnp.random.seed(seed)\nresult = differential_evolution(history_mm, bounds, maxiter=300, tol=0.000001)\n\nprint(\"Time spent is {:.3f} s\".format(time.perf_counter() - start))\nprint(result.x, result.fun)\nprint(\"True model evaluations: {}\".format(metamodel.model_evaluations))\n\n\n# Plot metamodel procedure\nmm_history = mm_history[:]\nsurr_y = [v for (v, r) in mm_history if r == 0]\nmod_y = [v for (v, r) in mm_history if r == 1]\nsurr_x = [i for (i, (v, r)) in enumerate(mm_history) if r == 0]\nmod_x = [i for (i, (v, r)) in enumerate(mm_history) if r == 1]\n\nfig = plt.figure()\nax1 = fig.add_subplot(111)\n\nax1.scatter(mod_x, mod_y, s=2, c='b', marker=\".\", label='model')\nax1.scatter(surr_x, surr_y, s=2, c='r', marker=\".\", label='surrogate')\nplt.legend(loc='upper left')\nplt.show()\n\n# Compare best result of function and meta model\n# (number of TRUE MODEL (objective function) evaluations)\nfig = plt.figure()\nax1 = fig.add_subplot(111)\n\nmin_mod = [min(pure_history[:i])[0] for i in range(1, len(pure_history))]\nmin_mm = [min(mod_y[:i]) for i in range(1, len(mod_y))]\n\nax1.scatter(range(len(min_mod)), min_mod, s=2, c='b', marker=\".\", label='model')\nax1.scatter(range(len(min_mm)), min_mm, s=2, c='r', marker=\".\", label='MM')\nplt.legend(loc='upper left')\nplt.show()\n\n# Do the comparison on logarithmic scale\nfig = plt.figure()\nax1 = fig.add_subplot(111)\n\nlog_min_mod = [np.log(x) for x in min_mod]\nlog_min_mm = [np.log(x) for x in min_mm]\n\nax1.scatter(range(len(log_min_mod)), log_min_mod, s=2, c='b', marker=\".\", label='model')\nax1.scatter(range(len(log_min_mm)), log_min_mm, s=2, c='r', marker=\".\", label='MM')\nplt.legend(loc='upper left')\nplt.show()" ]
[ [ "sklearn.ensemble.RandomForestRegressor", "matplotlib.pyplot.legend", "numpy.log", "scipy.optimize.differential_evolution", "numpy.random.seed", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
matheushent/clfs-models
[ "e2f95064cc036289c203080e75718964873a5d8a" ]
[ "utils/stacking.py" ]
[ "\"\"\"Core module for stacking related operations\"\"\"\nfrom sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone\nfrom sklearn.model_selection import KFold\n\nimport numpy as np\n\nimport pickle\nimport os\n\nclass StackingAveragedModels(BaseEstimator, RegressorMixin, TransformerMixin):\n def __init__(self, base_models, meta_model, n_folds=5):\n\n self.base_models = base_models\n self.meta_model = meta_model\n self.n_folds = n_folds\n \n # fit the data on clones of the original models\n def fit(self, X, y):\n\n self.base_models_ = [list() for x in self.base_models]\n self.meta_model_ = clone(self.meta_model)\n kfold = KFold(n_splits=self.n_folds, shuffle=True, random_state=156)\n \n # train cloned base models then create out-of-fold predictions\n # that are needed to train the cloned meta-model\n out_of_fold_predictions = np.zeros((X.shape[0], len(self.base_models)))\n for i, model in enumerate(self.base_models):\n for train_index, holdout_index in kfold.split(X, y):\n instance = clone(model)\n self.base_models_[i].append(instance)\n instance.fit(X[train_index], y[train_index])\n y_pred = instance.predict(X[holdout_index])\n out_of_fold_predictions[holdout_index, i] = y_pred\n \n # Now train the cloned meta-model using the out-of-fold predictions as new feature\n self.meta_model_.fit(out_of_fold_predictions, y)\n return self\n \n # do the predictions of all base models on the test data and use the averaged predictions as \n # meta-features for the final prediction which is done by the meta-model\n def predict(self, X):\n\n meta_features = np.column_stack([\n np.column_stack([model.predict(X) for model in base_models]).mean(axis=1) for base_models in self.base_models_\n ])\n return self.meta_model_.predict(meta_features)\n\n def save(self, path, base_list, meta_name):\n\n # save base models\n for base_model, base_name in zip(self.base_models_, base_list):\n with open(os.path.join(path, base_name + '.pickle'), 'wb') as f:\n pickle.dump(base_model[0], f)\n\n # save meta model\n with open(os.path.join(path, meta_name + '.pickle'), 'wb') as f:\n pickle.dump(self.meta_model_, f)\n\n def load(self, path, base_list, meta_name):\n\n # load base models\n self.base_models_ = [list() for x in base_list]\n\n for i, base_name in base_list:\n with open(os.path.join(path, base_name + '.pickle'), 'rb') as f:\n self.base_models_[i].append(pickle.load(f))\n\n # load meta model\n with open(os.path.join(path, meta_name + '.pickle'), 'rb') as f:\n self.meta_model_ = pickle.load(f)" ]
[ [ "sklearn.base.clone", "sklearn.model_selection.KFold" ] ]
AutoDiff-Dream-Team/cs107-FinalProject
[ "4c3b0f6945acfe6fd3fe2757858538ec3cceb819" ]
[ "Auto_diff/tests/test_jacobian.py" ]
[ "import numpy as np\nfrom Auto_diff import FD, Jacobian\n\ndef test_function_jacobian():\n x = Jacobian([1, 3, 4])\n fun = np.sin(3*x[0] + 2*x[1] - x[2])\n assert isinstance(fun[0], FD), AssertionError('Not an instance of AD.')\n assert isinstance(fun[0].val, int) or isinstance(fun[0].val, float), AssertionError('Value is not a number.')\n assert isinstance(fun[0].der, int) or isinstance(fun[0].der, float), AssertionError('Derivative is not a number.')\n jacob = FD.get_derivatives(fun)\n values = FD.get_values(fun)\n assert isinstance(jacob[0], np.ndarray), AssertionError(\"get_derivatives method doesn't return numpy ndarray\")\n assert isinstance(values[0], np.ndarray), AssertionError(\"get_values method doesn't return numpy ndarray\")\n\ndef test_multiple_functions_jacobian():\n x = Jacobian([1, 3, 4])\n fun = [np.sin(3*x[0] + 2*x[1] - x[2]), x[0]**x[1]-x[2]]\n assert len(fun) == 2, AssertionError(\"Didn't return correct number of functions.\")\n assert len(fun[0]) == 3, AssertionError(\"Didn't return correct number of variables.\")\n assert isinstance(fun[0][0], FD), AssertionError('Not an instance of AD.')\n assert isinstance(fun[0][0].val, int) or isinstance(fun[0][0].val, float), AssertionError('Value is not a number.')\n assert isinstance(fun[0][0].der, int) or isinstance(fun[0][0].der, float), AssertionError('Derivative is not a number.')\n jacob = FD.get_derivatives(fun)\n values = FD.get_values(fun)\n assert jacob.shape == (2,3), AssertionError(\"Jacobian Matrix doesn't have correct dimensions\")\n assert values.shape == (2,3), AssertionError(\"Values Matrix doesn't have correct dimensions\")\n assert isinstance(jacob[0], np.ndarray), AssertionError(\"get_derivatives method doesn't return numpy ndarray\")\n assert isinstance(values[0], np.ndarray), AssertionError(\"get_values method doesn't return numpy ndarray\")\n\ntest_function_jacobian()\ntest_multiple_functions_jacobian()" ]
[ [ "numpy.sin" ] ]
chilleo/ALPHA
[ "61ac17a08eb4eb778cec9236f4792c97969311a2" ]
[ "raxmlOutputWindows/matplotlibCustomBackend/customFormlayout.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nformlayout\n==========\n\nModule creating Qt form dialogs/layouts to edit various type of parameters\n\n\nformlayout License Agreement (MIT License)\n------------------------------------------\n\nCopyright (c) 2009 Pierre Raybaut\n\nPermission is hereby granted, free of charge, to any person\nobtaining a copy of this software and associated documentation\nfiles (the \"Software\"), to deal in the Software without\nrestriction, including without limitation the rights to use,\ncopy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following\nconditions:\n\nThe above copyright notice and this permission notice shall be\nincluded in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\nOF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\nNONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\nHOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\nWHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\nOTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\n# History:\n# 1.0.10: added float validator (disable \"Ok\" and \"Apply\" button when not valid)\n# 1.0.7: added support for \"Apply\" button\n# 1.0.6: code cleaning\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\n__version__ = '1.0.10'\n__license__ = __doc__\n\nDEBUG = False\n\nimport copy\nimport datetime\nimport warnings\n\nimport six\n\nfrom matplotlib import colors as mcolors\nfrom matplotlib.backends.qt_compat import QtGui, QtWidgets, QtCore\n\n\nBLACKLIST = set([\"title\", \"label\"])\n\n\nclass ColorButton(QtWidgets.QPushButton):\n \"\"\"\n Color choosing push button\n \"\"\"\n colorChanged = QtCore.Signal(QtGui.QColor)\n\n def __init__(self, parent=None):\n QtWidgets.QPushButton.__init__(self, parent)\n self.setFixedSize(20, 20)\n self.setIconSize(QtCore.QSize(12, 12))\n self.clicked.connect(self.choose_color)\n self._color = QtGui.QColor()\n\n def choose_color(self):\n color = QtWidgets.QColorDialog.getColor(\n self._color, self.parentWidget(), \"\",\n QtWidgets.QColorDialog.ShowAlphaChannel)\n if color.isValid():\n self.set_color(color)\n\n def get_color(self):\n return self._color\n\n @QtCore.Slot(QtGui.QColor)\n def set_color(self, color):\n if color != self._color:\n self._color = color\n self.colorChanged.emit(self._color)\n pixmap = QtGui.QPixmap(self.iconSize())\n pixmap.fill(color)\n self.setIcon(QtGui.QIcon(pixmap))\n\n color = QtCore.Property(QtGui.QColor, get_color, set_color)\n\n\ndef to_qcolor(color):\n \"\"\"Create a QColor from a matplotlib color\"\"\"\n qcolor = QtGui.QColor()\n try:\n rgba = mcolors.to_rgba(color)\n except ValueError:\n warnings.warn('Ignoring invalid color %r' % color)\n return qcolor # return invalid QColor\n qcolor.setRgbF(*rgba)\n return qcolor\n\n\nclass ColorLayout(QtWidgets.QHBoxLayout):\n \"\"\"Color-specialized QLineEdit layout\"\"\"\n def __init__(self, color, parent=None):\n QtWidgets.QHBoxLayout.__init__(self)\n assert isinstance(color, QtGui.QColor)\n self.lineedit = QtWidgets.QLineEdit(\n mcolors.to_hex(color.getRgbF(), keep_alpha=True), parent)\n self.lineedit.editingFinished.connect(self.update_color)\n self.addWidget(self.lineedit)\n self.colorbtn = ColorButton(parent)\n self.colorbtn.color = color\n self.colorbtn.colorChanged.connect(self.update_text)\n self.addWidget(self.colorbtn)\n\n def update_color(self):\n color = self.text()\n qcolor = to_qcolor(color)\n self.colorbtn.color = qcolor # defaults to black if not qcolor.isValid()\n\n def update_text(self, color):\n self.lineedit.setText(mcolors.to_hex(color.getRgbF(), keep_alpha=True))\n\n def text(self):\n return self.lineedit.text()\n\n\ndef font_is_installed(font):\n \"\"\"Check if font is installed\"\"\"\n return [fam for fam in QtGui.QFontDatabase().families()\n if six.text_type(fam) == font]\n\n\ndef tuple_to_qfont(tup):\n \"\"\"\n Create a QFont from tuple:\n (family [string], size [int], italic [bool], bold [bool])\n \"\"\"\n if not (isinstance(tup, tuple) and len(tup) == 4\n and font_is_installed(tup[0])\n and isinstance(tup[1], int)\n and isinstance(tup[2], bool)\n and isinstance(tup[3], bool)):\n return None\n font = QtGui.QFont()\n family, size, italic, bold = tup\n font.setFamily(family)\n font.setPointSize(size)\n font.setItalic(italic)\n font.setBold(bold)\n return font\n\n\ndef qfont_to_tuple(font):\n return (six.text_type(font.family()), int(font.pointSize()),\n font.italic(), font.bold())\n\n\nclass FontLayout(QtWidgets.QGridLayout):\n \"\"\"Font selection\"\"\"\n def __init__(self, value, parent=None):\n QtWidgets.QGridLayout.__init__(self)\n font = tuple_to_qfont(value)\n assert font is not None\n\n # Font family\n self.family = QtWidgets.QFontComboBox(parent)\n self.family.setCurrentFont(font)\n self.addWidget(self.family, 0, 0, 1, -1)\n\n # Font size\n self.size = QtWidgets.QComboBox(parent)\n self.size.setEditable(True)\n sizelist = list(range(6, 12)) + list(range(12, 30, 2)) + [36, 48, 72]\n size = font.pointSize()\n if size not in sizelist:\n sizelist.append(size)\n sizelist.sort()\n self.size.addItems([str(s) for s in sizelist])\n self.size.setCurrentIndex(sizelist.index(size))\n self.addWidget(self.size, 1, 0)\n\n # Italic or not\n self.italic = QtWidgets.QCheckBox(self.tr(\"Italic\"), parent)\n self.italic.setChecked(font.italic())\n self.addWidget(self.italic, 1, 1)\n\n # Bold or not\n self.bold = QtWidgets.QCheckBox(self.tr(\"Bold\"), parent)\n self.bold.setChecked(font.bold())\n self.addWidget(self.bold, 1, 2)\n\n def get_font(self):\n font = self.family.currentFont()\n font.setItalic(self.italic.isChecked())\n font.setBold(self.bold.isChecked())\n font.setPointSize(int(self.size.currentText()))\n return qfont_to_tuple(font)\n\n\ndef is_edit_valid(edit):\n text = edit.text()\n state = edit.validator().validate(text, 0)[0]\n\n return state == QtGui.QDoubleValidator.Acceptable\n\n\nclass FormWidget(QtWidgets.QWidget):\n update_buttons = QtCore.Signal()\n def __init__(self, data, comment=\"\", parent=None):\n QtWidgets.QWidget.__init__(self, parent)\n self.data = copy.deepcopy(data)\n self.widgets = []\n self.formlayout = QtWidgets.QFormLayout(self)\n if comment:\n self.formlayout.addRow(QtWidgets.QLabel(comment))\n self.formlayout.addRow(QtWidgets.QLabel(\" \"))\n if DEBUG:\n print(\"\\n\"+(\"*\"*80))\n print(\"DATA:\", self.data)\n print(\"*\"*80)\n print(\"COMMENT:\", comment)\n print(\"*\"*80)\n\n def get_dialog(self):\n \"\"\"Return FormDialog instance\"\"\"\n dialog = self.parent()\n while not isinstance(dialog, QtWidgets.QDialog):\n dialog = dialog.parent()\n return dialog\n\n def setup(self):\n # self.formlayout.setFieldGrowthPolicy(1)\n for label, value in self.data:\n if DEBUG:\n print(\"value:\", value)\n if label is None and value is None:\n # Separator: (None, None)\n self.formlayout.addRow(QtWidgets.QLabel(\" \"), QtWidgets.QLabel(\" \"))\n self.widgets.append(None)\n continue\n elif label is None:\n # Comment\n self.formlayout.addRow(QtWidgets.QLabel(value))\n self.widgets.append(None)\n continue\n elif tuple_to_qfont(value) is not None:\n field = FontLayout(value, self)\n elif (label.lower() not in BLACKLIST\n and mcolors.is_color_like(value)):\n field = ColorLayout(to_qcolor(value), self)\n elif isinstance(value, six.string_types):\n field = QtWidgets.QLineEdit(value, self)\n field.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Maximum))\n elif isinstance(value, (list, tuple)):\n if isinstance(value, tuple):\n value = list(value)\n selindex = value.pop(0)\n field = QtWidgets.QComboBox(self)\n if isinstance(value[0], (list, tuple)):\n keys = [key for key, _val in value]\n value = [val for _key, val in value]\n else:\n keys = value\n field.addItems(value)\n if selindex in value:\n selindex = value.index(selindex)\n elif selindex in keys:\n selindex = keys.index(selindex)\n elif not isinstance(selindex, int):\n warnings.warn(\n \"index '%s' is invalid (label: %s, value: %s)\" %\n (selindex, label, value))\n selindex = 0\n field.setCurrentIndex(selindex)\n elif isinstance(value, bool):\n field = QtWidgets.QCheckBox(self)\n if value:\n field.setCheckState(QtCore.Qt.Checked)\n else:\n field.setCheckState(QtCore.Qt.Unchecked)\n elif isinstance(value, float):\n field = QtWidgets.QLineEdit(repr(value), self)\n field.setCursorPosition(0)\n field.setValidator(QtGui.QDoubleValidator(field))\n field.validator().setLocale(QtCore.QLocale(\"C\"))\n dialog = self.get_dialog()\n dialog.register_float_field(field)\n field.textChanged.connect(lambda text: dialog.update_buttons())\n elif isinstance(value, int):\n field = QtWidgets.QSpinBox(self)\n field.setRange(-1e9, 1e9)\n field.setValue(value)\n elif isinstance(value, datetime.datetime):\n field = QtWidgets.QDateTimeEdit(self)\n field.setDateTime(value)\n elif isinstance(value, datetime.date):\n field = QtWidgets.QDateEdit(self)\n field.setDate(value)\n else:\n field = QtWidgets.QLineEdit(repr(value), self)\n self.formlayout.addRow(label, field)\n # print(self.formlayout.fieldGrowthPolicy())\n self.widgets.append(field)\n\n def get(self):\n valuelist = []\n for index, (label, value) in enumerate(self.data):\n field = self.widgets[index]\n if label is None:\n # Separator / Comment\n continue\n elif tuple_to_qfont(value) is not None:\n value = field.get_font()\n elif (isinstance(value, six.string_types)\n or mcolors.is_color_like(value)):\n value = six.text_type(field.text())\n elif isinstance(value, (list, tuple)):\n index = int(field.currentIndex())\n if isinstance(value[0], (list, tuple)):\n value = value[index][0]\n else:\n value = value[index]\n elif isinstance(value, bool):\n value = field.checkState() == QtCore.Qt.Checked\n elif isinstance(value, float):\n value = float(str(field.text()))\n elif isinstance(value, int):\n value = int(field.value())\n elif isinstance(value, datetime.datetime):\n value = field.dateTime().toPyDateTime()\n elif isinstance(value, datetime.date):\n value = field.date().toPyDate()\n else:\n value = eval(str(field.text()))\n valuelist.append(value)\n return valuelist\n\n\nclass FormComboWidget(QtWidgets.QWidget):\n update_buttons = QtCore.Signal()\n\n def __init__(self, datalist, comment=\"\", parent=None):\n QtWidgets.QWidget.__init__(self, parent)\n layout = QtWidgets.QVBoxLayout()\n self.setLayout(layout)\n self.combobox = QtWidgets.QComboBox()\n layout.addWidget(self.combobox)\n\n self.stackwidget = QtWidgets.QStackedWidget(self)\n layout.addWidget(self.stackwidget)\n self.combobox.currentIndexChanged.connect(self.stackwidget.setCurrentIndex)\n\n self.widgetlist = []\n for data, title, comment in datalist:\n self.combobox.addItem(title)\n widget = FormWidget(data, comment=comment, parent=self)\n self.stackwidget.addWidget(widget)\n self.widgetlist.append(widget)\n\n def setup(self):\n for widget in self.widgetlist:\n widget.setup()\n\n def get(self):\n return [widget.get() for widget in self.widgetlist]\n\n\nclass FormTabWidget(QtWidgets.QWidget):\n update_buttons = QtCore.Signal()\n\n def __init__(self, datalist, comment=\"\", parent=None):\n QtWidgets.QWidget.__init__(self, parent)\n layout = QtWidgets.QVBoxLayout()\n self.tabwidget = QtWidgets.QTabWidget()\n layout.addWidget(self.tabwidget)\n self.setLayout(layout)\n self.widgetlist = []\n for data, title, comment in datalist:\n if len(data[0]) == 3:\n widget = FormComboWidget(data, comment=comment, parent=self)\n else:\n widget = FormWidget(data, comment=comment, parent=self)\n index = self.tabwidget.addTab(widget, title)\n self.tabwidget.setTabToolTip(index, comment)\n self.widgetlist.append(widget)\n\n def setup(self):\n for widget in self.widgetlist:\n widget.setup()\n\n def get(self):\n return [widget.get() for widget in self.widgetlist]\n\n\nclass FormDialog(QtWidgets.QDialog):\n \"\"\"Form Dialog\"\"\"\n def __init__(self, data, title=\"\", comment=\"\", icon=None, parent=None, apply=None):\n QtWidgets.QDialog.__init__(self, parent)\n\n self.apply_callback = apply\n\n # Form\n if isinstance(data[0][0], (list, tuple)):\n self.formwidget = FormTabWidget(data, comment=comment, parent=self)\n elif len(data[0]) == 3:\n self.formwidget = FormComboWidget(data, comment=comment, parent=self)\n else:\n self.formwidget = FormWidget(data, comment=comment, parent=self)\n layout = QtWidgets.QVBoxLayout()\n layout.addWidget(self.formwidget)\n\n self.float_fields = []\n self.formwidget.setup()\n\n # Button box\n self.bbox = bbox = QtWidgets.QDialogButtonBox(\n QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel)\n self.formwidget.update_buttons.connect(self.update_buttons)\n if self.apply_callback is not None:\n apply_btn = bbox.addButton(QtWidgets.QDialogButtonBox.Apply)\n apply_btn.clicked.connect(self.apply)\n\n bbox.accepted.connect(self.accept)\n bbox.rejected.connect(self.reject)\n layout.addWidget(bbox)\n\n self.setLayout(layout)\n\n self.setWindowTitle(title)\n if not isinstance(icon, QtGui.QIcon):\n icon = QtWidgets.QWidget().style().standardIcon(QtWidgets.QStyle.SP_MessageBoxQuestion)\n self.setWindowIcon(icon)\n\n def register_float_field(self, field):\n self.float_fields.append(field)\n\n def update_buttons(self):\n valid = True\n for field in self.float_fields:\n if not is_edit_valid(field):\n valid = False\n for btn_type in (QtWidgets.QDialogButtonBox.Ok,\n QtWidgets.QDialogButtonBox.Apply):\n btn = self.bbox.button(btn_type)\n if btn is not None:\n btn.setEnabled(valid)\n\n def accept(self):\n self.data = self.formwidget.get()\n QtWidgets.QDialog.accept(self)\n\n def reject(self):\n self.data = None\n QtWidgets.QDialog.reject(self)\n\n def apply(self):\n self.apply_callback(self.formwidget.get())\n\n def get(self):\n \"\"\"Return form result\"\"\"\n return self.data\n\n\ndef fedit(data, title=\"\", comment=\"\", icon=None, parent=None, apply=None):\n \"\"\"\n Create form dialog and return result\n (if Cancel button is pressed, return None)\n\n data: datalist, datagroup\n title: string\n comment: string\n icon: QIcon instance\n parent: parent QWidget\n apply: apply callback (function)\n\n datalist: list/tuple of (field_name, field_value)\n datagroup: list/tuple of (datalist *or* datagroup, title, comment)\n\n -> one field for each member of a datalist\n -> one tab for each member of a top-level datagroup\n -> one page (of a multipage widget, each page can be selected with a combo\n box) for each member of a datagroup inside a datagroup\n\n Supported types for field_value:\n - int, float, str, unicode, bool\n - colors: in Qt-compatible text form, i.e. in hex format or name (red,...)\n (automatically detected from a string)\n - list/tuple:\n * the first element will be the selected index (or value)\n * the other elements can be couples (key, value) or only values\n \"\"\"\n\n # Create a QApplication instance if no instance currently exists\n # (e.g., if the module is used directly from the interpreter)\n if QtWidgets.QApplication.startingUp():\n _app = QtWidgets.QApplication([])\n dialog = FormDialog(data, title, comment, icon, parent, apply)\n if dialog.exec_():\n return dialog.get()\n\n\nif __name__ == \"__main__\":\n\n # def create_datalist_example():\n # return [('str', 'this is a string'),\n # ('list', [0, '1', '3', '4']),\n # ('list2', ['--', ('none', 'None'), ('--', 'Dashed'),\n # ('-.', 'DashDot'), ('-', 'Solid'),\n # ('steps', 'Steps'), (':', 'Dotted')]),\n # ('float', 1.2),\n # (None, 'Other:'),\n # ('int', 12),\n # ('font', ('Arial', 10, False, True)),\n # ('color', '#123409'),\n # ('bool', True),\n # ('date', datetime.date(2010, 10, 10)),\n # ('datetime', datetime.datetime(2010, 10, 10)),\n # ]\n #\n # def create_datagroup_example():\n # datalist = create_datalist_example()\n # return ((datalist, \"Category 1\", \"Category 1 comment\"),\n # (datalist, \"Category 2\", \"Category 2 comment\"),\n # (datalist, \"Category 3\", \"Category 3 comment\"))\n #\n # #--------- datalist example\n # datalist = create_datalist_example()\n #\n # def apply_test(data):\n # print(\"data:\", data)\n # print(\"result:\", fedit(datalist, title=\"Example\",\n # comment=\"This is just an <b>example</b>.\",\n # apply=apply_test))\n\n # --------- datagroup example\n # datagroup = create_datagroup_example()\n # print(\"result:\", fedit(datagroup, \"Global title\"))\n\n #--------- datagroup inside a datagroup example\n # datalist = create_datalist_example()\n # datagroup = create_datagroup_example()\n # print(\"result:\", fedit(((datagroup, \"Title 1\", \"Tab 1 comment\"),\n # (datalist, \"Title 2\", \"Tab 2 comment\"),\n # (datalist, \"Title 3\", \"Tab 3 comment\")),\n # \"Global title\"))\n\n\n# MY TEST\n\n data = [('str', 'this is a string'),\n ('str', 'this is a string'),\n ('str', 'this is a string'),\n ('list', [0, '1', '3', '4']),\n ('list', [2, '1', '3', '4']),\n ('list2', ['--', ('none', 'None'), ('--', 'Dashed'),\n ('-.', 'DashDot'), ('-', 'Solid'),\n ('steps', 'Steps'), (':', 'Dotted')]),\n ('float', 1.2),\n (None, 'Other:'),\n ('int', 12),\n ('font', ('Arial', 10, False, True)),\n ('color', '#123409'),\n ('bool', True),\n ('date', datetime.date(2010, 10, 10)),\n ('datetime', datetime.datetime(2010, 10, 10)),\n ]\n\n def apply_test(a):\n print(a)\n\n fedit(data, title='henlo', comment='haahha', apply=apply_test)" ]
[ [ "matplotlib.backends.qt_compat.QtCore.Property", "matplotlib.backends.qt_compat.QtWidgets.QApplication", "matplotlib.backends.qt_compat.QtGui.QIcon", "matplotlib.colors.to_rgba", "matplotlib.backends.qt_compat.QtWidgets.QStackedWidget", "matplotlib.backends.qt_compat.QtWidgets.QApplication.startingUp", "matplotlib.backends.qt_compat.QtCore.Signal", "matplotlib.backends.qt_compat.QtWidgets.QFontComboBox", "matplotlib.backends.qt_compat.QtWidgets.QLineEdit", "matplotlib.backends.qt_compat.QtGui.QDoubleValidator", "matplotlib.backends.qt_compat.QtWidgets.QCheckBox", "matplotlib.backends.qt_compat.QtCore.QLocale", "matplotlib.backends.qt_compat.QtGui.QColor", "matplotlib.backends.qt_compat.QtGui.QFontDatabase", "matplotlib.backends.qt_compat.QtWidgets.QGridLayout.__init__", "matplotlib.backends.qt_compat.QtWidgets.QComboBox", "matplotlib.colors.is_color_like", "matplotlib.backends.qt_compat.QtWidgets.QDateEdit", "matplotlib.backends.qt_compat.QtCore.QSize", "matplotlib.backends.qt_compat.QtWidgets.QFormLayout", "matplotlib.backends.qt_compat.QtWidgets.QDialog.reject", "matplotlib.backends.qt_compat.QtWidgets.QSpinBox", "matplotlib.backends.qt_compat.QtWidgets.QLabel", "matplotlib.backends.qt_compat.QtWidgets.QDialogButtonBox", "matplotlib.backends.qt_compat.QtWidgets.QDateTimeEdit", "matplotlib.backends.qt_compat.QtGui.QSizePolicy", "matplotlib.backends.qt_compat.QtWidgets.QVBoxLayout", "matplotlib.backends.qt_compat.QtWidgets.QWidget.__init__", "matplotlib.backends.qt_compat.QtWidgets.QHBoxLayout.__init__", "matplotlib.backends.qt_compat.QtWidgets.QDialog.__init__", "matplotlib.backends.qt_compat.QtWidgets.QWidget", "matplotlib.backends.qt_compat.QtWidgets.QPushButton.__init__", "matplotlib.backends.qt_compat.QtWidgets.QDialog.accept", "matplotlib.backends.qt_compat.QtWidgets.QTabWidget", "matplotlib.backends.qt_compat.QtCore.Slot", "matplotlib.backends.qt_compat.QtGui.QFont" ] ]
sflinter/hypergraph
[ "c3108ee51361d2e4b8ddc7eced1953f1548ce8d8" ]
[ "hypergraph/bandits.py" ]
[ "# Multi-armed bandit-based algorithms\n\nimport numpy as np\nfrom .genetic import GeneticOperators\n\n\nclass HyperBand:\n def __init__(self, config_ranges, loss, max_resources_per_conf, eta=3):\n \"\"\"\n Init the hyper band algorithm\n :param config_ranges: Config ranges eg taken from graph.get_hpopt_config_ranges()\n :param loss: A loss function with params (config, status, resources) which evaluates the configuration given\n the resources allocation. The returned value is the tuple (loss_value, status). The status is an internal value used\n by the function loss to store information during successive calls. The initial status (first execution) is None.\n The status parameter is optional.\n :param max_resources_per_conf: The maximum amount of resources allocated to evaluate a configuration\n :param eta: Param that controls the proportion of configurations discarded in each round of successive halving.\n \"\"\"\n self.config_ranges = dict(config_ranges)\n self.loss = loss\n self.max_resources_per_conf = max_resources_per_conf\n self.eta = eta\n\n def __call__(self):\n \"\"\"\n Execute the algorithm\n :return: The best config\n \"\"\"\n # TODO statistics\n\n f_eta = float(self.eta)\n s_max = int(np.floor(np.log(self.max_resources_per_conf)/np.log(f_eta)))\n budget = (s_max+1.)*self.max_resources_per_conf\n gene = GeneticOperators(self.config_ranges)\n best = (np.inf, None) # the best observed config, the tuple is of the form (loss_value, config)\n\n for s in range(s_max, -1, -1):\n n = int(np.ceil((budget/self.max_resources_per_conf)*np.power(f_eta, s)/(s+1)))\n r = self.max_resources_per_conf*np.power(f_eta, -s)\n # begin SuccessiveHalving with (n, r) inner loop\n configs = gene.create_population(size=n)\n for i in range(s+1):\n assert len(configs) > 0\n n_i = int(np.floor(n*np.power(f_eta, -i)))\n r_i = r*np.power(f_eta, i) # TODO is this supposed to be integer?\n results = [self.loss(config=config, resources=r_i)[0] for config in configs]\n k = int(np.floor(n_i/f_eta))\n if k == 0:\n break\n # take top k performing configurations indexes (based on loss)\n selection = np.argsort(results)[:k]\n\n # update the best observed config\n local_best_loss = results[selection[0]]\n if local_best_loss < best[0]:\n best = (local_best_loss, configs[selection[0]])\n\n configs = [configs[t] for t in selection]\n\n return best[1]\n" ]
[ [ "numpy.argsort", "numpy.log", "numpy.floor", "numpy.power" ] ]
d4l-data4life/BBNOrchestra-for-VQAmed2021
[ "b083c0c9c2be73419ca43536bf185fff379cf98c" ]
[ "lib/core/combiner.py" ]
[ "import numpy as np\nimport torch, math\nfrom core.evaluate import accuracy\n\nclass Combiner:\n def __init__(self, cfg, device):\n self.cfg = cfg\n self.type = cfg.TRAIN.COMBINER.TYPE\n self.device = device\n self.epoch_number = cfg.TRAIN.MAX_EPOCH\n self.func = torch.nn.Softmax(dim=1)\n self.initilize_all_parameters()\n\n def initilize_all_parameters(self):\n self.alpha = 0.2\n if self.epoch_number in [90, 180]:\n self.div_epoch = 100 * (self.epoch_number // 100 + 1)\n else:\n self.div_epoch = self.epoch_number\n\n def reset_epoch(self, epoch):\n self.epoch = epoch\n \n def forward(self, model, criterion, image, label, meta, **kwargs):\n return eval(\"self.{}\".format(self.type))(\n model, criterion, image, label, meta, **kwargs\n )\n\n def default(self, model, criterion, image, label, **kwargs):\n image, label = image.to(self.device), label.to(self.device)\n output = model(image)\n loss = criterion(output, label)\n now_result = torch.argmax(self.func(output), 1)\n now_acc = accuracy(now_result.cpu().numpy(), label.cpu().numpy())[0]\n\n return loss, now_acc\n\n def bbn_mix(self, model, criterion, image, label, meta, **kwargs):\n\n image_a, image_b = image.to(self.device), meta[\"sample_image\"].to(self.device)\n label_a, label_b = label.to(self.device), meta[\"sample_label\"].to(self.device)\n\n feature_a, feature_b = (\n model(image_a, feature_cb=True),\n model(image_b, feature_rb=True),\n )\n\n l = 1 - ((self.epoch - 1) / self.div_epoch) ** 2 # parabolic decay\n #l = 0.5 # fix\n #l = math.cos((self.epoch-1) / self.div_epoch * math.pi /2) # cosine decay\n #l = 1 - (1 - ((self.epoch - 1) / self.div_epoch) ** 2) * 1 # parabolic increment\n #l = 1 - (self.epoch-1) / self.div_epoch # linear decay\n #l = np.random.beta(self.alpha, self.alpha) # beta distribution\n #l = 1 if self.epoch <= 120 else 0 # seperated stage\n\n mixed_feature = 2 * torch.cat((l * feature_a, (1-l) * feature_b), dim=1)\n output = model(mixed_feature, classifier_flag=True)\n loss = l * criterion(output, label_a) + (1 - l) * criterion(output, label_b)\n\n now_result = torch.argmax(self.func(output), 1)\n now_acc = (\n l * accuracy(now_result.cpu().numpy(), label_a.cpu().numpy())[0]\n + (1 - l) * accuracy(now_result.cpu().numpy(), label_b.cpu().numpy())[0]\n )\n\n return loss, now_acc\n\n" ]
[ [ "torch.nn.Softmax", "torch.cat" ] ]
hliang2/ContrlledAE
[ "c2af71d99b0f79d76c6348c08eca9b11d9e3016e" ]
[ "code/gan_train.py" ]
[ "import os\nimport time\nimport torch\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom torch.utils.data import DataLoader\nfrom config import DATASET_PARAMETERS, NETWORKS_PARAMETERS\nfrom parse_dataset import get_dataset\nfrom network import get_network\nfrom utils import Meter, cycle, save_model, test_image, experiment\nfrom loss import *\nfrom dataset import reload_batch_face, reload_batch_voice\n\n# dataset and dataloader\nprint('Parsing your dataset...')\nvoice_list, face_list, id_class_num, voice_dict, face_dict = get_dataset(DATASET_PARAMETERS)\nNETWORKS_PARAMETERS['c']['output_channel'] = id_class_num\n\nprint('Preparing the datasets...')\nvoice_dataset = DATASET_PARAMETERS['voice_dataset'](voice_list,\n DATASET_PARAMETERS['nframe_range'])\nface_dataset = DATASET_PARAMETERS['face_dataset'](face_list)\n\nprint('Preparing the dataloaders...')\ncollate_fn = DATASET_PARAMETERS['collate_fn'](DATASET_PARAMETERS['nframe_range'])\nvoice_loader = DataLoader(voice_dataset, shuffle=True, drop_last=True,\n batch_size=1,\n num_workers=DATASET_PARAMETERS['workers_num'],\n collate_fn=collate_fn)\nface_loader = DataLoader(face_dataset, shuffle=True, drop_last=True,\n batch_size=1,\n num_workers=DATASET_PARAMETERS['workers_num'])\n\nvoice_iterator = iter(cycle(voice_loader))\nface_iterator = iter(cycle(face_loader))\n\n# networks, Fe, Fg, Fd (f+d), Fc (f+c)\nprint('Initializing networks...')\ne_net, e_optimizer = get_network('e', NETWORKS_PARAMETERS, train=False) # voice embedding\n# g_net, g_optimizer = get_network('g', NETWORKS_PARAMETERS, train=True)\nf_net, f_optimizer = get_network('f', NETWORKS_PARAMETERS, train=True)\ng_net, g_optimizer = get_network('u', NETWORKS_PARAMETERS, train=True) # unet\nd_net, d_optimizer = get_network('d', NETWORKS_PARAMETERS, train=True) # discriminator\nc_net, c_optimizer = get_network('c', NETWORKS_PARAMETERS, train=True) # classifier, train=False\n\nd_scheduler = torch.optim.lr_scheduler.StepLR(d_optimizer, step_size=1, gamma=0.96)\ng_scheduler = torch.optim.lr_scheduler.StepLR(g_optimizer, step_size=1, gamma=0.96)\n\nexperiment(e_net, voice_iterator, face_iterator, face_dict, voice_dict)\n# Meters for recording the training status\niteration = Meter('Iter', 'sum', ':5d')\n# data_time = Meter('Data', 'sum', ':4.2f')\n# batch_time = Meter('Time', 'sum', ':4.2f')\n\nmeter_D_real = Meter('D_real', 'avg', ':3.2f')\nmeter_D_fake = Meter('D_fake', 'avg', ':3.2f')\nmeter_D = Meter('D', 'avg', ':3.2f')\nmeter_C_real = Meter('C_real', 'avg', ':3.2f')\nmeter_GD_fake = Meter('G_D_fake', 'avg', ':3.2f')\nmeter_GC_fake = Meter('G_C_fake', 'avg', ':3.2f')\nmeter_G_L2_fake = Meter('G_l2_fake', 'avg', ':3.2f')\nmeter_G = Meter('G', 'avg', ':3.2f')\n\"\"\" \"\"\"\n\ng_net = torch.load('G_addedA.pt')\ng_net.train()\n# d_net = torch.load('FD.pt')\n# d_net.train()\n\nprint('Training models...')\nmin_g_loss = None\nmin_d_loss = None\nmin_c_loss = None\nfor it in range(DATASET_PARAMETERS['num_batches']):\n # data\n start_time = time.time()\n\n voiceB, voiceB_label = next(voice_iterator)\n faceA, faceA_label = next(face_iterator) # real face\n voiceB_label = voiceB_label.repeat(DATASET_PARAMETERS['batch_size'])\n # TODO: since voiceB and faceA in different identities,\n # need to reuse load_voice and load_face to get corresponding faceB and voiceA\n faceB_items = [face_dict[v_label.item()] for v_label in voiceB_label]\n voiceA_items = [voice_dict[f_label.item()] for f_label in faceA_label]\n faceB = reload_batch_face(faceB_items)\n voiceA = reload_batch_voice(voiceA_items, DATASET_PARAMETERS['nframe_range'][1])\n # noise = 0.05 * torch.randn(DATASET_PARAMETERS['batch_size'], 64, 1, 1) # shape 4d!\n\n # print(voiceB.shape)\n # torch.Size([64, 64, 438])\n # print(faceA.shape)\n # torch.Size([64, 3, 64, 64])\n # use GPU or not\n if NETWORKS_PARAMETERS['GPU']:\n voiceB, voiceB_label = voiceB.cuda(), voiceB_label.cuda()\n faceA, faceA_label = faceA.cuda(), faceA_label.cuda()\n faceB, voiceA = faceB.cuda(), voiceA.cuda()\n # real_label, fake_label = real_label.cuda(), fake_label.cuda()\n # noise = noise.cuda()\n # data_time.update(time.time() - start_time)\n\n # TODO: scale the input images, notice when inference ??\n # scaled_images = face * 2 - 1\n\n # get voice embeddings\n embedding_B = e_net(voiceB)\n embedding_B = F.normalize(embedding_B).view(embedding_B.size()[0], -1)\n # print(embedding_B.shape) #(64, 64)\n # introduce some permutations to voice --> deprecated\n # embeddings = embeddings + noise\n # embeddings = F.normalize(embeddings)\n\n\n # TODO: introduce some permutations to image !!!\n\n # ============================================\n # TRAIN THE DISCRIMINATOR\n # ============================================\n\n # if it != 1 and it % 10 == 1:\n f_optimizer.zero_grad()\n d_optimizer.zero_grad()\n c_optimizer.zero_grad()\n\n # 0. get generated faces\n scaled_images = faceA * 2 - 1\n fake_faceB = g_net(scaled_images, embedding_B)\n fake_faceB = (fake_faceB + 1) / 2\n # 1. Train with real images\n D_real_A = d_net(f_net(faceA))\n D_real_A_loss = true_D_loss(torch.sigmoid(D_real_A))\n\n # 2. Train with fake images\n D_fake_B = d_net(f_net(fake_faceB).detach())\n # D_fake = d_net(f_net(fake_face.detach())) # TODO: is detach necessary here ???\n D_fake_B_loss = fake_D_loss(torch.sigmoid(D_fake_B))\n\n # 3. Train with identity / gender classification\n real_classification = c_net(f_net(faceA))\n C_real_loss = identity_D_loss(real_classification, faceA_label)\n\n # D_real_loss = F.binary_cross_entropy(torch.sigmoid(D_real), real_label)\n # D_fake_loss = F.binary_cross_entropy(torch.sigmoid(D_fake), fake_label)\n\n # backprop\n D_loss = D_real_A_loss + D_fake_B_loss + C_real_loss\n # update meters\n meter_D_real.update(D_real_A_loss.item())\n meter_D_fake.update(D_fake_B_loss.item())\n meter_C_real.update(C_real_loss.item())\n meter_D.update(D_loss.item())\n\n D_loss.backward()\n\n f_optimizer.step()\n c_optimizer.step()\n d_optimizer.step()\n\n # =========================================\n # TRAIN THE GENERATOR\n # =========================================\n g_optimizer.zero_grad()\n\n # 0. get generated faces\n fake_faceB = g_net(scaled_images, embedding_B)\n fake_faceB = (fake_faceB + 1) / 2\n\n # 0.5 Train with L2 loss with A\n l2lossA = l1_loss_G(fake_faceB, faceA, 1)\n\n # 1. Train with discriminator\n D_fake_B = d_net(f_net(fake_faceB))\n D_B_loss = true_D_loss(torch.sigmoid(D_fake_B))\n\n # 2. Train with classifier\n fake_classfication = c_net(f_net(fake_faceB))\n C_fake_loss = identity_D_loss((fake_classfication), voiceB_label)\n # C_fake_loss = F.nll_loss(F.log_softmax(fake_classfication, 1), voice_label)\n\n # GD_fake_loss = F.binary_cross_entropy(torch.sigmoid(D_fake), real_label)\n # GC_fake_loss = F.nll_loss(F.log_softmax(fake_classfication, 1), voice_label)\n\n # 3. Train with L2 loss\n l2lossB = l1_loss_G(fake_faceB, faceB)\n\n # 4. Train with consistency loss\n # TODO: to be tested, after getting embedding_A and ??\n # scaled_fake = fake_faceB * 2 - 1\n # # get voice embeddings\n # embedding_A = e_net(voiceA)\n # embedding_A = F.normalize(embedding_A).view(embedding_A.size()[0], -1)\n # fake_faceA = g_net(fake_faceB, embedding_A)\n # fake_faceA = (fake_faceA + 1) / 2\n # consistency_loss = l1_loss_G(fake_faceA, faceA)\n\n # backprop\n # G_loss = l2lossA + l2lossB\n G_loss = C_fake_loss + l2lossA + l2lossB + D_B_loss\n G_loss.backward()\n meter_GD_fake.update(D_B_loss.item())\n # meter_GC_fake.update(C_fake_loss.item())\n # meter_G_L2_fake.update(l2loss.item() + consistency_loss.item())\n meter_G_L2_fake.update(l2lossB.item())\n meter_G.update(G_loss.item())\n g_optimizer.step()\n\n # batch_time.update(time.time() - start_time)\n\n # print status\n if it % DATASET_PARAMETERS['print_stat_freq'] == 0:\n f_net = torch.load('face.pt')\n c_net = torch.load('C.pt')\n \n cos = test_image(it, f_net, c_net, e_net, g_net, voice_loader, face_loader, 'double_test', face_dict)\n print(iteration, meter_D, meter_G, meter_GD_fake, meter_G_L2_fake, 'cos:', cos)\n # data_time.reset()\n # batch_time.reset()\n meter_G.reset()\n meter_G_L2_fake.reset()\n meter_D.reset()\n # meter_D_real.reset()\n # meter_D_fake.reset()\n # meter_C_real.reset()\n meter_GD_fake.reset()\n # meter_GC_fake.reset()\n\n # snapshot\n # save_model(g_net, NETWORKS_PARAMETERS['u']['model_path'])\n\n # cos = test_image(it, f_net, c_net, e_net, g_net, voice_loader, face_loader, 'final_test', face_dict)\n\n # save model for debugging purpose\n # if min_g_loss is None or G_loss < min_g_loss:\n # min_g_loss = G_loss\n # torch.save(g_net, 'G.pt')\n # if min_d_loss is None or D_loss < min_d_loss:\n # min_d_loss = D_loss\n # torch.save(d_net, 'FD.pt')\n # if min_c_loss is None or C_real_loss < min_c_loss:\n # min_d_loss = D_loss\n # torch.save(c_net, 'C.pt')\n # torch.save(f_net, 'face.pt')\n\n iteration.update(1)" ]
[ [ "torch.nn.functional.normalize", "torch.sigmoid", "torch.load", "torch.utils.data.DataLoader", "torch.optim.lr_scheduler.StepLR" ] ]
zh123-art/SpatialSense
[ "477fd6e458377185cd50e05af26825bd14834ffd" ]
[ "baselines/models/recurrent_phrase_encoder.py" ]
[ "import torch\nimport torch.nn as nn\n\n\nclass RecurrentPhraseEncoder(nn.Module):\n def __init__(self, word_embedding_dim, feature_dim):\n super().__init__()\n self.feature_dim = feature_dim\n self.gru = nn.GRU(\n input_size=word_embedding_dim,\n hidden_size=feature_dim // 2,\n num_layers=1,\n batch_first=True,\n bidirectional=True,\n )\n\n def forward(self, phrase):\n batchsize = phrase.size(0)\n h0 = torch.zeros(2, batchsize, self.feature_dim // 2)\n if torch.cuda.is_available():\n h0 = h0.cuda()\n output, hn = self.gru(phrase, h0)\n return output[:, -1, :]\n" ]
[ [ "torch.nn.GRU", "torch.cuda.is_available", "torch.zeros" ] ]
ghPRao/pneumonia_detectiion_from_chest_xray
[ "645fa5ac94729ba4d073630688fa340275fd3b8c" ]
[ "src/model_utils.py" ]
[ "import tensorflow\nfrom tensorflow import keras\n\n# Keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Conv2D , MaxPool2D , Flatten , Dropout , BatchNormalization\nfrom keras.callbacks import ReduceLROnPlateau\nfrom keras.preprocessing.image import ImageDataGenerator\n\n#tqdm\nimport sys\nfrom sys import stderr\nimport numpy as np\nimport pandas as pd\nimport random \nimport six\nfrom keras.callbacks import Callback\n\nimport skimage #conda install scikit-image\nimport lime #conda install lime\nimport lime.lime_image as li\nfrom skimage.segmentation import mark_boundaries\n\nfrom sklearn.metrics import classification_report, confusion_matrix\n\n# # Keras-Tuner\n# from kerastuner.tuners import RandomSearch\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\nCONV2D_3x3_32 = 'Conv2D3x3_32'\nCONV2D_3x3_64 = 'Conv2D3x3_64'\nCONV2D_3x3_128 = 'Conv2D3x3_128'\nCONV2D_2x2_32 = 'Conv2D2x2_32'\nCONV2D_2x2_64 = 'Conv2D2x2_64'\nCONV2D_2x2_128 = 'Conv2D2x2_128'\n\nBATCH_NORMAL = 'BatchNormal'\nMAXPOOL_2D = 'MaxPool2D'\nDROPOUT_10PERCENT = 'DropOut_10percent'\nDROPOUT_20PERCENT = 'DropOut_20percent'\nFLATTEN_LAYERS = 'Flatten_layers'\nDENSE = 'Dense'\nDENSE_512 = 'Dense512'\nDENSE_128 = 'Dense128'\n\ndef augment_images(images):\n ''' \n Purpose: Data augmentation to prevent overfitting and handling the imbalance in dataset\n Function: augment_images \n Input: array of images\n Output: return output of .fit\n\n '''\n datagen = ImageDataGenerator(\n featurewise_center=False, # set input mean to 0 over the dataset\n samplewise_center=False, # set each sample mean to 0\n featurewise_std_normalization=False, # divide inputs by std of the dataset\n samplewise_std_normalization=False, # divide each input by its std\n zca_whitening=False, # apply ZCA whitening\n rotation_range = 30, # randomly rotate images in the range (degrees, 0 to 180)\n zoom_range = 0.2, # Randomly zoom image \n width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)\n height_shift_range=0.1, # randomly shift images vertically (fraction of total height)\n horizontal_flip = True, # randomly flip images\n vertical_flip=False) # randomly flip images )\n\n datagen.fit(images)\n return datagen \n\n \nclass cnn_model:\n '''\n Takes data from the Chest X-Ray dataset and processes it for modeling. This includes: \n 1) Set default keras deep network parameters\n 2) Sets hyper-parameters \n\n '''\n def __init__(self, X_train, y_train, X_test, y_test, X_val, y_val): \n '''\n Set default parameters for calling CNN keras models\n '''\n self.model = Sequential()\n \n self.X_train = X_train\n self.y_train = y_train\n self.X_test = X_test\n self.y_test = y_test\n self.X_val = X_val\n self.y_val = y_val\n \n self.datagen = augment_images(self.X_train)\n \n self.model_params = {\n 'units': 32,\n 'filter': (3,3),\n 'kernel_size': 3,\n 'activation': 'relu',\n 'padding': 'same',\n 'pool_size': (2,2),\n 'strides': 2,\n 'dense_units': 2,\n 'drop_out_10percent': 0.1,\n 'drop_out_20percent': 0.2,\n 'input_shape': (150, 150, 3),\n 'optimizer': 'adam',\n 'loss': 'binary_cross_entropy',\n 'metrics': ['accuracy', 'Recall'],\n 'output_activation': 'sigmoid', \n 'batch_size':32\n } \n \n def build_model(self, layers):\n\n for layer in layers:\n if layer == CONV2D_3x3_32: \n self.model_params['strides'] = 1\n self.model.add(Conv2D(self.model_params['units'], \n self.model_params['filter'],\n padding = self.model_params['padding'],\n strides = self.model_params['strides'],\n activation = self.model_params['activation'],\n input_shape = self.model_params['input_shape'])) \n if layer == CONV2D_3x3_64: # 64 units, 3x3 filter, 'relu', stride 2\n self.model_params['units'] = 64\n self.model_params['strides'] = 1\n self.model_params['filter'] = (3,3)\n self.model.add(Conv2D(self.model_params['units'], \n self.model_params['filter'],\n padding = self.model_params['padding'],\n strides = self.model_params['strides'],\n activation = self.model_params['activation'])) \n if layer == CONV2D_3x3_128: # 128 units, 3x3 filter, 'relu', stride 2\n self.model_params['units'] = 128\n self.model.add(Conv2D(self.model_params['units'], \n self.model_params['filter'],\n padding = self.model_params['padding'],\n strides = self.model_params['strides'],\n activation = self.model_params['activation'],\n input_shape = self.model_params['input_shape'])) \n if layer == CONV2D_2x2_32: # default 32 units, 3x3 filter, 'relu', stride 2\n self.model_params['units'] = 32\n self.model_params['filter'] = (2,2)\n self.model.add(Conv2D(self.model_params['units'], \n self.model_params['filter'],\n padding = self.model_params['padding'],\n strides = self.model_params['strides'],\n activation = self.model_params['activation'],\n input_shape = self.model_params['input_shape'])) \n if layer == CONV2D_2x2_64: # 64 units, 3x3 filter, 'relu', stride 2\n self.model_params['units'] = 64\n self.model_params['filter'] = (2,2)\n self.model_params['strides'] = 1\n self.model.add(Conv2D(self.model_params['units'], \n self.model_params['filter'],\n padding = self.model_params['padding'],\n strides = self.model_params['strides'],\n activation = self.model_params['activation'],\n input_shape = self.model_params['input_shape'])) \n if layer == CONV2D_2x2_128: # 64 units, 3x3 filter, 'relu', stride 2\n self.model_params['units'] = 128\n elf.model_params['filter'] = (2,2)\n self.model.add(Conv2D(self.model_params['units'], \n self.model_params['filter'],\n padding = self.model_params['padding'],\n strides = self.model_params['strides'],\n activation = self.model_params['activation'],\n input_shape = self.model_params['input_shape'])) \n elif layer == BATCH_NORMAL:\n self.model.add(BatchNormalization())\n elif layer == MAXPOOL_2D:\n self.model_params['strides'] = 2\n self.model_params['pool_size'] = (2,2)\n self.model.add(MaxPool2D( self.model_params['pool_size'],\n self.model_params['strides'],\n self.model_params['padding']))\n elif layer == DROPOUT_10PERCENT:\n self.model.add(Dropout(self.model_params['drop_out_10percent']))\n elif layer == FLATTEN_LAYERS:\n self.model.add(Flatten())\n elif layer == DENSE_128:\n self.model.add(Dense(units=128, activation='relu'))\n elif layer == DENSE_512:\n self.model.add(Dense(units=512, activation='relu'))\n else: \n if layer == DROPOUT_20PERCENT:\n self.model.add(Dropout(self.model_params['drop_out_20percent']))\n \n # output layer\n self.model.add(Dense(units=1 , activation=self.model_params['output_activation'])) \n \n self.model.compile(optimizer = \"rmsprop\" , loss = 'binary_crossentropy' , metrics = self.model_params['metrics']) \n self.model.summary()\n return self.model \n \n def fit_report_model (self, rpt_title, BATCH_SIZE=32, EPOCHS=12):\n self.datagen_train = augment_images(self.X_train) \n self.batch_size = BATCH_SIZE\n self.report_title = rpt_title\n self.epochs = EPOCHS\n \n # fit & report the model and return history \n learning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy', patience = 2, verbose=0,factor=0.3, min_lr=0.000001)\n self.history = self.model.fit(self.datagen.flow(self.X_train,self.y_train, batch_size = self.batch_size) , epochs = self.epochs, verbose=0,\n validation_data = self.datagen.flow(self.X_val, self.y_val) ,callbacks = [learning_rate_reduction]) \n self.report_model()\n return self.history\n \n def report_model (self):\n epochs = [i for i in range(self.epochs)]\n labels = ['PNEUMONIA', 'NORMAL'] \n history = self.history\n \n print(history.history)\n train_accuracy = history.history['accuracy']\n train_loss = history.history['loss']\n train_recall = history.history['recall']\n\n val_accuracy = history.history['val_accuracy']\n val_loss = history.history['val_loss']\n\n fig , ax = plt.subplots(1,2)\n fig.set_size_inches(20,10)\n\n ax[0].plot(epochs , train_accuracy , 'go-' , label = 'Training Accuracy')\n ax[0].plot(epochs , val_accuracy , 'ro-' , label = 'Validation Accuracy')\n ax[0].set_title('Training & Validation Accuracy ')\n ax[0].legend()\n ax[0].set_xlabel(\"Epochs\")\n ax[0].set_ylabel(\"Accuracy\")\n\n ax[1].plot(epochs , train_recall , 'g-o' , label = 'Training Recall')\n # ax[1].plot(epochs , val_recall , 'r-o' , label = 'Validation Recall')\n ax[1].set_title('Traing Recall')\n ax[1].legend()\n ax[1].set_xlabel(\"Epochs\")\n ax[1].set_ylabel(\"Recall\")\n plt.show()\n fig.savefig('../../visualization/{} model_accuracy_recall.png'.format(self.report_title), dpi=150) \n self.plot_confusion_matrix()\n \n def plot_confusion_matrix(self):\n '''\n Print Confustion Matrix\n '''\n\n labels = ['PNEUMONIA', 'NORMAL'] \n self.predictions = self.model.predict_classes(self.X_test)\n predictions = self.predictions.reshape(1,-1)[0]\n\n print(classification_report(self.y_test, predictions, target_names = ['Pneumonia (Class 0)','Normal (Class 1)']))\n\n #Print Confusion matrix\n cm = confusion_matrix(self.y_test,predictions)\n print( cm)\n\n cm = pd.DataFrame(cm , index = ['0','1'] , columns = ['0','1']) \n plt.figure(figsize = (10,10))\n sns.heatmap(cm,cmap= \"Blues\", linecolor = 'black' , linewidth = 1 , annot = True, fmt='',xticklabels = labels,yticklabels = labels)\n plt.savefig('../../visualization/{} confusion-metrics.png'.format(self.report_title), dpi=150)\n\n self.sample_pneumonia(predictions) \n self.sample_normal(predictions)\n\n def sample_pneumonia(self,predictions):\n '''\n Print sample of Normal X-Rays \n '''\n \n print(\"Sample of Penumonia X-rays\")\n correct = np.nonzero(predictions == self.y_test)[0]\n f = plt.figure(figsize=(16,16))\n gs = f.add_gridspec(2,3)\n\n with sns.axes_style(\"darkgrid\"):\n ax = f.add_subplot(gs[0, 0])\n plt.imshow(self.X_test[0], cmap=\"gray\", interpolation='none')\n plt.title(\"Predicted Class {},Actual Class {}\".format(predictions[0], self.y_test[0]))\n\n with sns.axes_style(\"darkgrid\"):\n ax = f.add_subplot(gs[0, 1])\n plt.imshow(self.X_test[1], cmap=\"gray\", interpolation='none')\n plt.title(\"Predicted Class {},Actual Class {}\".format(predictions[1], self.y_test[1]))\n\n with sns.axes_style(\"darkgrid\"):\n ax = f.add_subplot(gs[0,2])\n plt.imshow(self.X_test[2], cmap=\"gray\", interpolation='none')\n plt.title(\"Predicted Class {},Actual Class {}\".format(predictions[2], self.y_test[2]))\n\n with sns.axes_style(\"darkgrid\"):\n ax = f.add_subplot(gs[1, 0]) \n plt.imshow(self.X_test[3], cmap=\"gray\", interpolation='none')\n plt.title(\"Predicted Class {},Actual Class {}\".format(predictions[3], self.y_test[3]))\n\n with sns.axes_style(\"darkgrid\"):\n ax = f.add_subplot(gs[1,1])\n plt.imshow(self.X_test[4], cmap=\"gray\", interpolation='none')\n plt.title(\"Predicted Class {},Actual Class {}\".format(predictions[4], self.y_test[4]))\n\n with sns.axes_style(\"darkgrid\"):\n ax = f.add_subplot(gs[1,2]) \n plt.imshow(self.X_test[5], cmap=\"gray\", interpolation='none');\n plt.title(\"Predicted Class {},Actual Class {}\".format(predictions[5], self.y_test[5]))\n f.tight_layout()\n \n \n f.savefig(\"../../visualization/{}Pneuominia_Validation_Images.png\".format(self.report_title))\n \n def sample_normal(self, predictions):\n '''\n Visualize Normal Images\n '''\n incorrect = np.nonzero(predictions != self.y_test)[0]\n f = plt.figure(figsize=(16, 16))\n gs = f.add_gridspec(2,3)\n\n with sns.axes_style(\"darkgrid\"):\n ax = f.add_subplot(gs[0, 1])\n \n with sns.axes_style(\"darkgrid\"):\n ax = f.add_subplot(gs[0, 0])\n plt.imshow(self.X_test[0], cmap=\"gray\", interpolation='none')\n plt.title(\"Predicted Class {},Actual Class {}\".format(predictions[0], self.y_test[0]))\n\n with sns.axes_style(\"darkgrid\"):\n ax = f.add_subplot(gs[0, 1])\n plt.imshow(self.X_test[1], cmap=\"gray\", interpolation='none')\n plt.title(\"Predicted Class {},Actual Class {}\".format(predictions[1], self.y_test[1]))\n\n with sns.axes_style(\"darkgrid\"):\n ax = f.add_subplot(gs[0,2])\n plt.imshow(self.X_test[2], cmap=\"gray\", interpolation='none')\n plt.title(\"Predicted Class {},Actual Class {}\".format(predictions[2], self.y_test[2]))\n\n with sns.axes_style(\"darkgrid\"):\n ax = f.add_subplot(gs[1, 0]) \n plt.imshow(self.X_test[3], cmap=\"gray\", interpolation='none')\n plt.title(\"Predicted Class {},Actual Class {}\".format(predictions[3], self.y_test[3]))\n\n with sns.axes_style(\"darkgrid\"):\n ax = f.add_subplot(gs[1,1])\n plt.imshow(self.X_test[4], cmap=\"gray\", interpolation='none')\n plt.title(\"Predicted Class {},Actual Class {}\".format(predictions[4], self.y_test[4]))\n\n with sns.axes_style(\"darkgrid\"):\n ax = f.add_subplot(gs[1,2]) \n plt.imshow(self.X_test[5], cmap=\"gray\", interpolation='none');\n plt.title(\"Predicted Class {},Actual Class {}\".format(predictions[5], self.y_test[5]))\n \n f.tight_layout()\n f.savefig(\"../../visualization/{} Normal_Validation_Images.png\".format(self.report_title))\n\n def lime_images(self, image, main_title, rows=3, columns=3):\n '''\n This function uses LimeImageExplainer from lime.lime_image to create a visual of the inner workings\n of the image processing neural network.\n It does this by separating the image into various regions known as \"superpixels\" and judging performance\n with and without these superpixels on the image.\n Inputs: model (keras sequantial model)\n image (array, tensor) iamge to be analyzed\n *kwargs\n Returns: subplots of image with masks of superpixels \n ''' \n \n figsize=(15,15)\n min_superpixels=1 \n max_superpixels=1+rows*columns \n positive_only=False\n negative_only=False \n hide_rest=False\n axis_off='off'\n subplot_titles=None\n \n # Instantiate image explainer\n explainer = li.LimeImageExplainer()\n \n # instantiate plot to populate with explained images\n fig, ax = plt.subplots(rows, columns, figsize=figsize)\n \n # loop through number of superpixels to be included\n for i in range(min_superpixels, max_superpixels):\n # create index for subplots\n m = (i-min_superpixels) // rows\n n = (i-min_superpixels) % rows\n \n # explain prediciton with lime\n explanation = explainer.explain_instance(image, self.model.predict, top_labels=5, hide_color=0, num_samples=1000)\n temp, mask = explanation.get_image_and_mask(0, num_features=i, positive_only=positive_only, negative_only=negative_only, hide_rest=hide_rest)\n \n # plot results\n ax[m,n].imshow(mark_boundaries(temp/2 + 0.5, mask))\n ax[m,n].axis(axis_off)\n \n if not subplot_titles:\n ax[m,n].set_title(f'# of Superpixels: {i}') \n else:\n ax[m,n].set_title(subplot_titles)\n \n fig.suptitle(main_title)\n plt.savefig(\"../../visualization/{}.png\".format(main_title), dpi=300) \n \n def print_truePred_type1_type2_images(self, dimension=3):\n '''\n Prints a set of LIME Imags from 3 lists: True Predictions, Type_1 Error, and Type_2 Error\n Input: dimension print a nxn images\n Returns: Lime Images of a sample image from each st.\n '''\n true_preds = type_1 = type_2 = []\n \n for i in range(len(self.y_test)):\n if self.y_test[i] == self.predictions[i]:\n true_preds.append(i)\n elif self.y_test[i] == 1:\n type_1.append(i)\n else:\n type_2.append(i)\n # Select images from each classification \n print_true_preds = random.sample(true_preds, dimension+1)\n print_type_1 = random.sample(type_1, dimension+1)\n print_type_2 = random.sample(type_2, dimension+1)\n type(print_true_preds)\n\n \n # Print images of True Predictions\n self.lime_images(self.X_train[print_true_preds[0]], \"Sample_True_Predictions_\"+str(dimension), dimension, dimension)\n self.lime_images(self.X_train[print_type_1[0]], \"Sample_Type1_\"+str(dimension), dimension, dimension)\n self.lime_images(self.X_train[print_type_2[0]], \"Sample_Type2_\"+str(dimension), dimension, dimension)\n\n " ]
[ [ "matplotlib.pyplot.imshow", "numpy.nonzero", "sklearn.metrics.confusion_matrix", "matplotlib.pyplot.subplots", "pandas.DataFrame", "matplotlib.pyplot.show", "sklearn.metrics.classification_report", "matplotlib.pyplot.figure" ] ]
dwhalen/holophrasm
[ "0d971428f9879ad3d6c0a781f1a021cff73fb1ce" ]
[ "multitrainer.py" ]
[ "import learning_history\nimport data_utils5 as data_utils\n\nimport random\nimport numpy as np\nimport time\n\nimport os\nimport sys\n\nimport pickle as pickle\nimport matplotlib.pyplot as plt\n\n#from pathos.multiprocessing import ProcessingPool as Pool\nfrom multiprocessing import Pool\nfrom multiprocessing import Process, Queue\n\nimport signal\n\nglobal_trainer = None\n\nclass Trainer(object):\n def __init__(self, config, load=False, load_location=None, save_location='./weights/', model=None, write_every=10,\n draw_plots=True):\n assert model is not None\n if load_location is None: load_location = save_location\n if not os.path.exists(save_location):\n os.makedirs(save_location)\n self.load_location = load_location\n self.save_location = save_location\n self.p = None\n self.model = model\n self.draw_plots = draw_plots\n\n global global_trainer\n global_trainer = self\n\n self.config = config\n self.lm = self.config.lm\n self.training_steps = 0\n self.reset_batch()\n self.write_every = write_every\n self.learning_history = learning_history.LearningHistory(draw_plots=draw_plots)\n\n self.v = model.Variables(self.config)\n if load:\n self.v.load(self.load_location + '/train.weights')\n self.config.load(self.load_location + '/train.parameters')\n self.learning_history.load(load_location + '/train.history')\n else:\n self.reset_log()\n\n self.validation_data_set = self.model.validation_data_set(self.lm)\n self.training_data_set = None # we generate this for each epoch\n self.test_data_set = self.model.test_data_set(self.lm)\n\n def save_session(self, file_name=None):\n if file_name is None: file_name = self.save_location\n start = time.time()\n self.v.save(file_name+'/train.weights')\n self.config.save(file_name+'/train.parameters')\n self.save_learning_history(file_name+'/train.history')\n out_string = 'saved session in '+str(time.time()-start) + 's'\n print(out_string)\n self.add_to_log(out_string)\n\n def load_learning_history(self, load_location):\n # I think that this will automatically open a plot if there\n # was one already\n with open(load_location, 'rb') as handle:\n self.learning_history = pickle.load(handle)\n if not self.learning_history.draw_plots and self.draw_plots:\n self.learning_history.prep_plot(self)\n self.learning_history.draw_plots = self.draw_plots\n\n def save_learning_history(self, file_path):\n with open(file_path, 'wb') as handle:\n pickle.dump(self.learning_history, handle)\n\n def reset_log(self):\n file_location = self.save_location + '/log.txt'\n with open(file_location, \"w\") as log_file:\n log_file.write(\"Starting log\")\n\n def add_to_log(self, string):\n file_location = self.save_location + '/log.txt'\n with open(file_location, \"a\") as log_file:\n log_file.write(string+'\\n')\n\n # this is kind of ugly: it resets the stored accuracy and loss totals\n def reset_batch(self):\n # this is a lot of stuff to remember. It's almost impressive...\n self.total_time = np.array([0.0, 0.0, 0.0])\n self.outputs = None\n self.output_counts = None\n self.model_calls = 0\n\n def add_to_history(self, data_type):\n # now that we've finished a batch, we should add it to the history\n #print\n #print 'adding',data_type\n\n if self.output_counts is None:\n #print 'empty outputs'\n return\n for x in self.output_counts: assert x > 0\n\n output_means = [total/count for total,count in zip(self.outputs, self.output_counts)]\n avg_loss = output_means[0]\n acc_list = output_means[1:]\n avg_time = self.total_time / self.model_calls\n\n self.learning_history.append(avg_loss, acc_list, type=data_type)\n out_string = self.batch_string(data_type)\n print(out_string)\n self.add_to_log(out_string)\n\n self.reset_batch()\n\n def batch_string(self,data_type):\n for x in self.output_counts: assert x > 0\n assert self.model_calls > 0\n\n output_means = [total/count for total,count in zip(self.outputs, self.output_counts)]\n avg_loss = output_means[0]\n acc_list = output_means[1:]\n avg_time = self.total_time / self.model_calls\n\n out_string = '\\r{0:10s} loss = {1:6.3f}, outputs ='.format(data_type, avg_loss)\n for x in acc_list:\n out_string+= ' {0:4.1f}'.format(100.0*x)\n out_string += ', model_time ='\n for t in avg_time:\n out_string += ' {0:6.5f},'.format(t)\n out_string += ' at '+time.strftime(\"%Y-%m-%d %H:%M:%S\")\n return out_string\n\n def batch_call(self, i):\n # runs the model and returns a tuple, a dictionary\n # d_list, outputs, output_counts\n data_type = self.current_data_type\n if data_type == 'validation':\n proof_step= self.validation_data_set[i]\n elif data_type == 'training':\n proof_step = self.training_data_set[i]\n elif data_type == 'training':\n proof_step = self.test_data_set[i]\n model = self.model.Model(self.v, self.config, proof_step, train=(data_type=='training') )\n d_list = [v.d for v in self.v.vs]\n return d_list, model.outputs, model.output_counts\n\n\n def steps(self, proof_steps, data_type='training', multiprocessing=None):\n self.current_data_type = data_type\n assert len(proof_steps) > 0\n\n # create the model\n t0 = time.time()\n\n if multiprocessing is not None:\n\n #t1 = time.time()\n if self.reorder:\n reorder(proof_steps, data_type)\n #self.total_time[1]+=time.time()-t1\n # multiprocessing\n t2 = time.time()\n with withPool(multiprocessing) as p:\n self.total_time[1]+=time.time()-t2\n self.p=p.p\n t3 = time.time()\n out = self.p.map(batch_call_multi, proof_steps, chunksize=1)\n self.total_time[2]+=time.time()-t3\n else:\n # normalprocessing\n t3 = time.time()\n out = list(map(batch_call_multi, proof_steps))\n self.total_time[2]+=time.time()-t3\n\n #print [(np.mean(x[0][0])) for x in out]\n\n # now run the minimize operation\n #t4 = time.time()\n if data_type == 'training':\n self.v.optimizer.minimize(d_vars=[sum(x[0][i] for x in out) for i in range(len(self.v.vs))])\n #self.total_time[4]+=time.time()-t4\n\n # update the model counts\n self.total_time[0] += time.time()-t0\n for x in out:\n _, outputs, output_counts = x\n self.model_calls+=1\n\n # update the state information\n if self.outputs is None:\n self.outputs = outputs\n self.output_counts = output_counts\n else:\n for i in range(len(outputs)):\n self.outputs[i]+=1.0*outputs[i]\n self.output_counts[i]+=1.0*output_counts[i]\n\n if self.model_calls % self.write_every == 0:\n sys.stdout.write(self.batch_string(data_type))\n sys.stdout.flush()\n\n def run_epoch(self):\n self.last_save_time = time.time()\n\n epoch_start_time = time.time()\n self.training_data_set = self.model.training_data_set(self.lm)\n data_set = self.training_data_set\n self.index = 0\n self.reset_batch()\n\n # make sure that we get rid of all the processes\n if self.p is not None:\n self.p.close()\n self.p.join()\n\n batch = []\n for tindex in range(len(data_set)):\n t = data_set[tindex]\n self.index+=1\n batch.append(tindex)\n if len(batch) == self.batch_size or (len(batch)>0 and tindex==len(data_set)-1):\n # if we've filled out a batch or\n self.steps(batch, data_type='training', multiprocessing=self.multiprocessing)\n batch = []\n\n if (self.index % self.plot_every) == 0:\n self.add_to_history('training')\n self.learning_history.plot()\n self.reset_batch()\n\n if self.save_every is not None and time.time()-self.last_save_time > 60 * self.save_every:\n self.last_save_time = time.time()\n self.save_session()\n\n if self.validate_every is not None and (self.index % self.validate_every == 0):\n self.add_to_history('training')\n self.learning_history.plot()\n self.reset_batch()\n self.validate()\n\n ####\n if self.early_stop and self.index>self.early_stop: return\n\n # add any stragglers to the training\n self.add_to_history('training')\n self.learning_history.plot()\n self.reset_batch()\n\n self.validate()\n\n # save the session every epoch. Possibly this should happen more often.\n # takes ~10 seconds\n if self.save_epoch:\n self.save_session(file_name=self.save_location)\n\n out_string = 'total epoch time {0:11.2f}'.format( time.time()-epoch_start_time )\n print(out_string)\n self.add_to_log(out_string)\n\n\n def validate(self):\n # validation\n batch = []\n data_set = self.validation_data_set\n for tindex in range(len(data_set)):\n t = data_set[tindex]\n batch.append(tindex)\n if len(batch) == self.batch_size or (len(batch)>0 and tindex==len(data_set)-1):\n # if we've filled out a batch or\n self.steps(batch, data_type='validation', multiprocessing=self.multiprocessing)\n batch = []\n val = (1.0*self.outputs[0])/self.output_counts[0]\n self.add_to_history('validation')\n self.learning_history.plot()\n self.reset_batch()\n\n # validation decay\n if val > self.last_val:\n out_string = 'Reducing learning rate '+str(self.config.p.lr)+' -> '+str(self.config.p.lr / self.config.p.lr_reduction)\n print(out_string)\n self.add_to_log(out_string)\n self.config.p.lr /= self.config.p.lr_reduction\n self.v.optimizer.update_learning_rate(self.config.p.lr) # this works, although I don't like it.\n self.last_val = val\n\n def test(self):\n # validation\n self.reset_batch()\n self.early_stop = None\n batch = []\n data_set = self.test_data_set\n print('number of test data,', len(data_set))\n for tindex in range(len(data_set)):\n t = data_set[tindex]\n batch.append(tindex)\n if len(batch) == self.batch_size or (len(batch)>0 and tindex==len(data_set)-1):\n # if we've filled out a batch or\n self.steps(batch, data_type='test', multiprocessing=self.multiprocessing)\n batch = []\n self.add_to_history('test')\n self.learning_history.plot()\n self.reset_batch()\n\n def run_many_epochs(self,lm,\n plot_every=1000, write_every=10, early_stop=None,\n save_every=10000,\n multiprocessing=None, batch_size=50,\n validate_every=None, reorder=True):\n self.reorder = reorder\n self.plot_every = plot_every\n self.write_every = write_every\n self.early_stop = early_stop\n self.save_every = save_every\n self.save_best = True # because saving is not fast\n self.save_epoch = True # because saving is not fast\n self.multiprocessing = multiprocessing\n self.batch_size = batch_size\n self.validate_every = validate_every\n\n self.best_val = 10000000\n self.last_val = 10000000\n\n print('running epochs')\n\n global global_trainer\n global_trainer = self\n\n best_val_epoch = 0\n best_val = 10000000\n epoch = -1\n while True:\n epoch += 1\n if self.config.p.max_epochs is not None and epoch >= self.config.p.max_epochs: return\n self.run_epoch()\n out_string = 'finished epoch '+str(epoch)+'\\n'\n print(out_string)\n self.add_to_log(out_string)\n #print 'added'\n if not early_stop is None: return\n val = self.learning_history.validation_loss[-1]\n if val < best_val:\n best_val = val\n best_val_epoch = epoch\n if self.save_best:\n out_string = 'saving best epoch'\n print(out_string)\n self.add_to_log(out_string)\n self.v.save(self.save_location+'/best.weights')\n print('saved best')\n\ndef init_func():\n signal.signal(signal.SIGINT, signal.SIG_IGN)\n\nclass withPool:\n def __init__(self, procs):\n self.p = Pool(procs, init_func)\n def __enter__(self):\n return self\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.p.close()\n\ndef proof_step_difficulty(i, data_type):\n sel = global_trainer\n # approximate the difficulty of the proof step\n if data_type == 'validation':\n t=sel.validation_data_set[i]\n elif data_type == 'training':\n t=sel.training_data_set[i]\n elif data_type == 'test':\n proof_step = sel.test_data_set[i]\n return (t.tree.size() + sum(h.tree.size() for h in t.prop.hyps if h.type=='e')\n +sum(h.tree.size() for h in t.context.hyps if h.type=='e'))\n\ndef reorder(proof_steps, data_type):\n decorated = [(-1*proof_step_difficulty(t, data_type) , t) for t in proof_steps]\n decorated.sort()\n proof_steps[:] = [t for _, t in decorated]\n\n\ndef batch_call_multi(i):\n sel = global_trainer\n data_type = sel.current_data_type\n if data_type == 'validation':\n proof_step=sel.validation_data_set[i]\n elif data_type == 'training':\n proof_step = sel.training_data_set[i]\n elif data_type == 'test':\n proof_step = sel.test_data_set[i]\n model = sel.model.Model(sel.v, sel.config, proof_step, train=(data_type=='training') )\n d_list = [1.0*v.d for v in sel.v.vs]\n return d_list, model.outputs, model.output_counts\n" ]
[ [ "numpy.array" ] ]
hysts/pl_gaze_estimation
[ "1123053a57115da5bb8ea9675911e7d9230f3ec9" ]
[ "pl_gaze_estimation/models/utils/utils.py" ]
[ "import torch\nfrom omegaconf import DictConfig\n\nfrom .initializer import create_initializer\n\n\ndef initialize_weight(init_config: DictConfig, model: torch.nn.Module) -> None:\n initializer = create_initializer(init_config)\n model.apply(initializer)\n\n\ndef load_weight(model_config: DictConfig, model: torch.nn.Module) -> None:\n if 'PRETRAINED' not in model_config or not model_config.PRETRAINED.PATH:\n return\n checkpoint = torch.load(model_config.PRETRAINED.PATH, map_location='cpu')\n state_dict = checkpoint[model_config.PRETRAINED.KEY]\n for key, val in list(state_dict.items()):\n remove_prefix = model_config.PRETRAINED.REMOVE_PREFIX\n add_prefix = model_config.PRETRAINED.ADD_PREFIX\n new_key = key\n if remove_prefix:\n new_key = new_key[len(remove_prefix) + 1:]\n if add_prefix:\n new_key = f'{add_prefix}.{new_key}'\n if new_key != key:\n state_dict[new_key] = val\n del state_dict[key]\n model.load_state_dict(state_dict)\n" ]
[ [ "torch.load" ] ]
aps-7bm/tomopy
[ "ccf2cfa2df1aba8987e7a8827ac360d0b5534c55" ]
[ "test/test_tomopy/test_recon/test_algorithm.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# #########################################################################\n# Copyright (c) 2015-2019, UChicago Argonne, LLC. All rights reserved. #\n# #\n# Copyright 2015-2019. UChicago Argonne, LLC. This software was produced #\n# under U.S. Government contract DE-AC02-06CH11357 for Argonne National #\n# Laboratory (ANL), which is operated by UChicago Argonne, LLC for the #\n# U.S. Department of Energy. The U.S. Government has rights to use, #\n# reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR #\n# UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR #\n# ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is #\n# modified to produce derivative works, such modified software should #\n# be clearly marked, so as not to confuse it with the version available #\n# from ANL. #\n# #\n# Additionally, redistribution and use in source and binary forms, with #\n# or without modification, are permitted provided that the following #\n# conditions are met: #\n# #\n# * Redistributions of source code must retain the above copyright #\n# notice, this list of conditions and the following disclaimer. #\n# #\n# * Redistributions in binary form must reproduce the above copyright #\n# notice, this list of conditions and the following disclaimer in #\n# the documentation and/or other materials provided with the #\n# distribution. #\n# #\n# * Neither the name of UChicago Argonne, LLC, Argonne National #\n# Laboratory, ANL, the U.S. Government, nor the names of its #\n# contributors may be used to endorse or promote products derived #\n# from this software without specific prior written permission. #\n# #\n# THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS #\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago #\n# Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #\n# POSSIBILITY OF SUCH DAMAGE. #\n# #########################################################################\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport os\nimport unittest\nfrom ..util import read_file\nfrom tomopy.recon.algorithm import recon\nfrom numpy.testing import assert_allclose\nimport numpy as np\n\n__author__ = \"Doga Gursoy\"\n__copyright__ = \"Copyright (c) 2015, UChicago Argonne, LLC.\"\n__docformat__ = 'restructuredtext en'\n\ntry:\n import mkl_fft\n found_mkl = True\nexcept ImportError:\n found_mkl = False\n\ntry:\n import cv2\n found_opencv = True\nexcept ImportError:\n found_opencv = False\n\n\nclass ReconstructionAlgorithmTestCase(unittest.TestCase):\n def setUp(self):\n self.prj = read_file('proj.npy')\n self.ang = read_file('angle.npy').astype('float32')\n\n def test_art(self):\n os.environ[\"TOMOPY_USE_C_ART\"] = \"1\"\n assert_allclose(\n recon(self.prj, self.ang, algorithm='art', num_iter=4),\n read_file('art.npy'), rtol=1e-2)\n\n def test_bart(self):\n assert_allclose(\n recon(self.prj, self.ang, algorithm='bart', num_iter=4),\n read_file('bart.npy'), rtol=1e-2)\n\n def test_fbp(self):\n assert_allclose(\n recon(self.prj, self.ang, algorithm='fbp'),\n read_file('fbp.npy'), rtol=1e-2)\n\n @unittest.skipUnless(found_mkl, \"Gridrec requires MKL.\")\n def test_gridrec_custom(self):\n assert_allclose(\n recon(self.prj, self.ang, algorithm='gridrec', filter_name='none'),\n recon(\n self.prj, self.ang, algorithm='gridrec', filter_name='custom',\n filter_par=np.ones(self.prj.shape[-1], dtype=np.float32)))\n\n @unittest.skipUnless(found_mkl, \"Gridrec requires MKL.\")\n def test_gridrec(self):\n assert_allclose(\n recon(self.prj, self.ang, algorithm='gridrec', filter_name='none'),\n read_file('gridrec_none.npy'), rtol=1e-2)\n assert_allclose(\n recon(self.prj, self.ang, algorithm='gridrec', filter_name='shepp'),\n read_file('gridrec_shepp.npy'), rtol=1e-2)\n assert_allclose(\n recon(self.prj, self.ang, algorithm='gridrec', filter_name='cosine'),\n read_file('gridrec_cosine.npy'), rtol=1e-2)\n assert_allclose(\n recon(self.prj, self.ang, algorithm='gridrec', filter_name='hann'),\n read_file('gridrec_hann.npy'), rtol=1e-2)\n assert_allclose(\n recon(self.prj, self.ang, algorithm='gridrec', filter_name='hamming'),\n read_file('gridrec_hamming.npy'), rtol=1e-2)\n assert_allclose(\n recon(self.prj, self.ang, algorithm='gridrec', filter_name='ramlak'),\n read_file('gridrec_ramlak.npy'), rtol=1e-2)\n assert_allclose(\n recon(self.prj, self.ang, algorithm='gridrec', filter_name='parzen'),\n read_file('gridrec_parzen.npy'), rtol=1e-2)\n assert_allclose(\n recon(self.prj, self.ang, algorithm='gridrec',\n filter_name='butterworth'),\n read_file('gridrec_butterworth.npy'), rtol=1e-2)\n\n def test_mlem(self):\n result = recon(self.prj, self.ang, algorithm='mlem', num_iter=4)\n assert_allclose(result, read_file('mlem.npy'), rtol=1e-2)\n\n @unittest.skipUnless(found_opencv, \"CPU acceleration requires OpenCV.\")\n def test_mlem_accel(self):\n result = recon(self.prj, self.ang, algorithm='mlem', num_iter=4,\n accelerated=True, device='cpu', ncore=1, pool_size=3)\n assert_allclose(result, read_file('mlem_accel.npy'), rtol=1e-2)\n\n @unittest.skipUnless(\"CUDA_VERSION\" in os.environ, \"CUDA_VERSION not set.\")\n def test_mlem_gpu(self):\n result = recon(self.prj, self.ang, algorithm='mlem', num_iter=4,\n accelerated=True, device='gpu', ncore=1, pool_size=3)\n assert_allclose(result, read_file('mlem_accel_gpu.npy'), rtol=1e-2)\n\n def test_osem(self):\n assert_allclose(\n recon(self.prj, self.ang, algorithm='osem', num_iter=4),\n read_file('osem.npy'), rtol=1e-2)\n\n def test_ospml_hybrid(self):\n assert_allclose(\n recon(self.prj, self.ang, algorithm='ospml_hybrid', num_iter=4),\n read_file('ospml_hybrid.npy'), rtol=1e-2)\n\n def test_ospml_quad(self):\n assert_allclose(\n recon(self.prj, self.ang, algorithm='ospml_quad', num_iter=4),\n read_file('ospml_quad.npy'), rtol=1e-2)\n\n def test_pml_hybrid(self):\n assert_allclose(\n recon(self.prj, self.ang, algorithm='pml_hybrid', num_iter=4),\n read_file('pml_hybrid.npy'), rtol=1e-2)\n\n def test_pml_quad(self):\n assert_allclose(\n recon(self.prj, self.ang, algorithm='pml_quad', num_iter=4),\n read_file('pml_quad.npy'), rtol=1e-2)\n\n def test_sirt(self):\n result = recon(self.prj, self.ang, algorithm='sirt', num_iter=4)\n assert_allclose(result, read_file('sirt.npy'), rtol=1e-2)\n\n @unittest.skipUnless(found_opencv, \"CPU acceleration requires OpenCV.\")\n def test_sirt_accel(self):\n result = recon(self.prj, self.ang, algorithm='sirt', num_iter=4,\n accelerated=True, device='cpu', ncore=1, pool_size=3)\n assert_allclose(result, read_file('sirt_accel.npy'), rtol=1e-2)\n\n @unittest.skipUnless(\"CUDA_VERSION\" in os.environ, \"CUDA_VERSION not set.\")\n def test_sirt_gpu(self):\n result = recon(self.prj, self.ang, algorithm='sirt', num_iter=4,\n accelerated=True, device='gpu', ncore=1, pool_size=3)\n assert_allclose(result, read_file('sirt_accel_gpu.npy'), rtol=1e-2)\n\n def test_tv(self):\n assert_allclose(\n recon(self.prj, self.ang, algorithm='tv', num_iter=4),\n read_file('tv.npy'), rtol=1e-2)\n\n def test_grad(self):\n assert_allclose(\n recon(self.prj, self.ang, algorithm='grad', num_iter=4),\n read_file('grad.npy'), rtol=1e-2)\n\n def test_tikh(self):\n assert_allclose(\n recon(self.prj, self.ang, algorithm='tikh', num_iter=4),\n read_file('tikh.npy'), rtol=1e-2)\n" ]
[ [ "numpy.ones" ] ]
craffel/t-zero
[ "11f42bf6880a85ee72d4415e53b5054d42f4aa0b" ]
[ "inference/model_offload.py" ]
[ "# coding=utf-8\n\n\"\"\"\nScript showcasing how to run inference of T0++ on a single GPU using offloading.\nIt relies on Deepspeed (https://github.com/microsoft/DeepSpeed) and the ZeRO-3 offloading implementation.\n\nThe script is adapted from https://huggingface.co/transformers/main_classes/deepspeed.html#non-trainer-deepspeed-integration\n\"\"\"\n\n\nfrom transformers import AutoTokenizer, AutoModelForSeq2SeqLM\nfrom transformers.deepspeed import HfDeepSpeedConfig\nimport deepspeed\nimport os\nimport torch\n\nos.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\" # To avoid warnings about parallelism in tokenizers\n\nmodel_name = \"bigscience/T0pp\"\n\nds_config = {\n \"fp16\": {\n \"enabled\": False,\n },\n \"zero_optimization\": {\n \"stage\": 3,\n \"offload_param\": {\n \"device\": \"cpu\",\n \"pin_memory\": True\n },\n \"stage3_param_persistence_threshold\": 4e7, # Tune this value depending on the capacity of your GPU. With the current value, the GPU memory will peak at ~24GB.\n },\n \"train_batch_size\": 1,\n}\n\n_ = HfDeepSpeedConfig(ds_config)\nmodel = AutoModelForSeq2SeqLM.from_pretrained(model_name)\ntokenizer = AutoTokenizer.from_pretrained(model_name)\nprint(\"Model and tokenizer loaded\")\n\ninputs = tokenizer.encode(\"Review: this is the best cast iron skillet you will ever buy. Is this review positive or negative?\", return_tensors=\"pt\")\ninputs = inputs.to(\"cuda:0\")\n\ndeepspeed_engine, _, _, _ = deepspeed.initialize(\n model=model,\n config_params=ds_config,\n model_parameters=None,\n optimizer=None,\n lr_scheduler=None\n)\n\ndeepspeed_engine.module.eval()\nwith torch.no_grad():\n outputs = deepspeed_engine.module.generate(inputs)\n\nprint(tokenizer.decode(outputs[0], skip_special_tokens=True))\nprint(\"FINISHED\")\n" ]
[ [ "torch.no_grad" ] ]
suyash/tensorflow
[ "968d511ebf5fe5651752580a13e457498ebab50d" ]
[ "tensorflow/python/compat/compat.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Utilities for API compatibility between TensorFlow release versions.\n\nSee [Version\nCompatibility](https://tensorflow.org/guide/version_compat#backward_forward)\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport datetime\n\nfrom tensorflow.python.util import tf_contextlib\nfrom tensorflow.python.util.tf_export import tf_export\n\n_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2019, 7, 11)\n\n\n@tf_export(\"compat.forward_compatible\")\ndef forward_compatible(year, month, day):\n \"\"\"Return true if the forward compatibility window has expired.\n\n See [Version\n compatibility](https://tensorflow.org/guide/version_compat#backward_forward).\n\n Forward-compatibility refers to scenarios where the producer of a TensorFlow\n model (a GraphDef or SavedModel) is compiled against a version of the\n TensorFlow library newer than what the consumer was compiled against. The\n \"producer\" is typically a Python program that constructs and trains a model\n while the \"consumer\" is typically another program that loads and serves the\n model.\n\n TensorFlow has been supporting a 3 week forward-compatibility window for\n programs compiled from source at HEAD.\n\n For example, consider the case where a new operation `MyNewAwesomeAdd` is\n created with the intent of replacing the implementation of an existing Python\n wrapper - `tf.add`. The Python wrapper implementation should change from\n something like:\n\n ```python\n def add(inputs, name=None):\n return gen_math_ops.add(inputs, name)\n ```\n\n to:\n\n ```python\n from tensorflow.python.compat import compat\n\n def add(inputs, name=None):\n if compat.forward_compatible(year, month, day):\n # Can use the awesome new implementation.\n return gen_math_ops.my_new_awesome_add(inputs, name)\n # To maintain forward compatibiltiy, use the old implementation.\n return gen_math_ops.add(inputs, name)\n ```\n\n Where `year`, `month`, and `day` specify the date beyond which binaries\n that consume a model are expected to have been updated to include the\n new operations. This date is typically at least 3 weeks beyond the date\n the code that adds the new operation is committed.\n\n Args:\n year: A year (e.g., 2018). Must be an `int`.\n month: A month (1 <= month <= 12) in year. Must be an `int`.\n day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an\n `int`.\n\n Returns:\n True if the caller can expect that serialized TensorFlow graphs produced\n can be consumed by programs that are compiled with the TensorFlow library\n source code after (year, month, day).\n \"\"\"\n return _FORWARD_COMPATIBILITY_HORIZON > datetime.date(year, month, day)\n\n\n@tf_export(\"compat.forward_compatibility_horizon\")\n@tf_contextlib.contextmanager\ndef forward_compatibility_horizon(year, month, day):\n \"\"\"Context manager for testing forward compatibility of generated graphs.\n\n See [Version\n compatibility](https://tensorflow.org/guide/version_compat#backward_forward).\n\n To ensure forward compatibility of generated graphs (see `forward_compatible`)\n with older binaries, new features can be gated with:\n\n ```python\n if compat.forward_compatible(year=2018, month=08, date=01):\n generate_graph_with_new_features()\n else:\n generate_graph_so_older_binaries_can_consume_it()\n ```\n\n However, when adding new features, one may want to unittest it before\n the forward compatibility window expires. This context manager enables\n such tests. For example:\n\n ```python\n from tensorflow.python.compat import compat\n\n def testMyNewFeature(self):\n with compat.forward_compatibility_horizon(2018, 08, 02):\n # Test that generate_graph_with_new_features() has an effect\n ```\n\n Args:\n year: A year (e.g., 2018). Must be an `int`.\n month: A month (1 <= month <= 12) in year. Must be an `int`.\n day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an\n `int`.\n\n Yields:\n Nothing.\n \"\"\"\n global _FORWARD_COMPATIBILITY_HORIZON\n try:\n old_compat_date = _FORWARD_COMPATIBILITY_HORIZON\n _FORWARD_COMPATIBILITY_HORIZON = datetime.date(year, month, day)\n yield\n finally:\n _FORWARD_COMPATIBILITY_HORIZON = old_compat_date\n" ]
[ [ "tensorflow.python.util.tf_export.tf_export" ] ]
FullMetalNicky/PULP-Streamer
[ "d26269aaeff28cf9adae7fb49dff9aa2bdebd40d" ]
[ "script/himax_driver.py" ]
[ "#!/usr/bin/env python\n\nimport fcntl\nimport os\n\nimport numpy as np\n\nimport rospy\nfrom cv_bridge import CvBridge\nfrom sensor_msgs.msg import Image\n# import cv2\n\npage_size = 655536 # 4092 # 8192 # 1024 # 4096\nF_GETPIPE_SZ = 1032 # Linux 2.6.35+\nF_SETPIPE_SZ = 1031 # Linux 2.6.35+\n\n\ndef read_from_pipe(pipein, size):\n # type: (int, int) -> np.array\n # rospy.loginfo(\"read_from_pipe %d %d\", pipein, size)\n remaining_size = size\n data = []\n while(remaining_size > 0):\n # rospy.loginfo(\"Will read %d\", min(remaining_size, page_size))\n output = os.read(pipein, min(remaining_size, page_size))\n remaining_size = remaining_size - len(output)\n # rospy.loginfo(\"RS %d\", remaining_size)\n data.append(output)\n data_str = ''.join(data)\n if (len(data_str) < size):\n rospy.loginfo(\"Error, expecting {} bytes, received {}.\".\n format(size, len(data_str)))\n return None\n data_arr = np.frombuffer(data_str, dtype=np.uint8)\n return data_arr\n\n\ndef main():\n # type: () -> None\n rospy.init_node('himax_driver', anonymous=True)\n width = rospy.get_param('~width')\n height = rospy.get_param('~height')\n pipe = rospy.get_param('~pipe', '/tmp/image_pipe')\n rospy.loginfo(\"Ready to listen for %d x %d images on %s\", width, height, pipe)\n image_pub = rospy.Publisher(\"image_raw\", Image, queue_size=1)\n bridge = CvBridge()\n if not os.path.exists(pipe):\n os.mkfifo(pipe)\n pipein = os.open(pipe, os.O_RDONLY)\n fcntl.fcntl(pipein, F_SETPIPE_SZ, 1000000)\n seq = 0\n while not rospy.is_shutdown():\n data = read_from_pipe(pipein, width * height)\n # rospy.loginfo(\"D\")\n if data is not None:\n cv_image = np.reshape(data, (height, width))\n\n # rospy.loginfo(\"Mean %d\", np.mean(cv_image))\n\n # cv_image = cv_image.astype(np.float)\n # cv_image = cv_image * 1.5\n # cv_image = np.minimum(255, cv_image)\n # cv_image = cv_image.astype(np.uint8)\n # cv_image = cv2.equalizeHist(cv_image)\n msg = bridge.cv2_to_imgmsg(cv_image)\n msg.header.stamp = rospy.Time.now()\n msg.header.seq = seq\n seq += 1\n image_pub.publish(msg)\n rospy.sleep(0)\n os.close(pipein)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.reshape", "numpy.frombuffer" ] ]
tanglef/geomloss
[ "4e09e3bfd376d92f2bb7efdf0854a5b7c756eb0d" ]
[ "geomloss/examples/optimal_transport/plot_interpolation_3D.py" ]
[ "\"\"\"Creating a fancy interpolation video between 3D meshes.\n==============================================================\n\nN.B.: I am currently very busy writing my PhD thesis. Comments will come soon!\n\"\"\"\n\n\n################################################################################\n# Setup\n# ----------------------\n#\n# Standard imports.\n#\n\n\nimport numpy as np\nimport torch\nimport os\n\nuse_cuda = torch.cuda.is_available()\ntensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor\nnumpy = lambda x : x.detach().cpu().numpy()\n\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nfrom geomloss import SamplesLoss\nfrom pykeops.torch import LazyTensor\n\n\n################################################################################\n# Utility: turn a triangle mesh into a weighted point cloud.\n\ndef to_measure(points, triangles):\n \"\"\"Turns a triangle into a weighted point cloud.\"\"\"\n\n # Our mesh is given as a collection of ABC triangles:\n A, B, C = points[triangles[:, 0]], points[triangles[:, 1]], points[triangles[:, 2]]\n\n # Locations and weights of our Dirac atoms:\n X = (A + B + C) / 3 # centers of the faces\n S = np.sqrt(np.sum(np.cross(B - A, C - A) ** 2, 1)) / 2 # areas of the faces\n\n print(\"File loaded, and encoded as the weighted sum of {:,} atoms in 3D.\".format(len(X)))\n\n # We return a (normalized) vector of weights + a \"list\" of points\n return tensor(S / np.sum(S)), tensor(X)\n\n\n################################################################################\n# Utility: load \".ply\" mesh file.\n# \n\nfrom plyfile import PlyData, PlyElement\n\ndef load_ply_file(fname) :\n \"\"\"Loads a .ply mesh to return a collection of weighted Dirac atoms: one per triangle face.\"\"\"\n\n # Load the data, and read the connectivity information:\n plydata = PlyData.read(fname)\n triangles = np.vstack( plydata['face'].data['vertex_indices'] )\n\n # Normalize the point cloud, as specified by the user:\n points = np.vstack( [ [v[0],v[1],v[2]] for v in plydata['vertex'] ] )\n\n return to_measure(points, triangles)\n \n\n################################################################################\n# Utility: load \".nii\" volume file.\n#\n\nimport SimpleITK as sitk\nimport skimage\n\ndef load_nii_file(fname, threshold=.5):\n \"\"\"Uses the marching cube algorithm to turn a .nii binary mask into a surface weighted point cloud.\"\"\"\n\n mask = sitk.GetArrayFromImage(sitk.ReadImage(fname))\n #mask = skimage.transform.downscale_local_mean(mask, (4,4,4))\n verts, faces, normals, values = skimage.measure.marching_cubes_lewiner(mask, threshold)\n\n return to_measure(verts, faces)\n\n\n################################################################################\n# Synthetic sphere - a typical source measure:\n#\n\ndef create_sphere(n_samples = 1000):\n \"\"\"Creates a uniform sample on the unit sphere.\"\"\"\n n_samples = int(n_samples)\n\n indices = np.arange(0, n_samples, dtype=float) + 0.5\n phi = np.arccos(1 - 2 * indices / n_samples)\n theta = np.pi * (1 + 5**0.5) * indices\n\n x, y, z = np.cos(theta) * np.sin(phi), np.sin(theta) * np.sin(phi), np.cos(phi);\n points = np.vstack( (x, y, z)).T\n weights = np.ones(n_samples) / n_samples\n\n return tensor(weights), tensor(points)\n\n############################################################\n# Simple (slow) display routine:\n#\n\ndef display_cloud(ax, measure, color) :\n\n w_i, x_i = numpy( measure[0] ), numpy( measure[1] )\n\n ax.view_init(elev=110, azim=-90)\n #ax.set_aspect('equal')\n\n weights = w_i / w_i.sum()\n ax.scatter( x_i[:,0], x_i[:,1], x_i[:,2], \n s = 25*500 * weights, c = color )\n\n ax.axes.set_xlim3d(left=-1.4, right=1.4) \n ax.axes.set_ylim3d(bottom=-1.4, top=1.4) \n ax.axes.set_zlim3d(bottom=-1.4, top=1.4) \n\n############################################################\n# Save the output as a VTK folder, to be rendered with Paraview:\n\nfolder = \"output/wasserstein_3D/\"\nos.makedirs(os.path.dirname(\"output/wasserstein_3D/\"), exist_ok=True)\n\nfrom pyvtk import PolyData, PointData, CellData, Scalars, VtkData, PointData\n\ndef save_vtk(fname, points, colors):\n \"\"\"N.B.: Paraview is a good VTK viewer, which supports ray-tracing.\"\"\"\n \n structure = PolyData(points = points, vertices = np.arange(len(points)))\n values = PointData( Scalars(colors, name=\"colors\") )\n vtk = VtkData(structure, values)\n\n vtk.tofile( folder + fname, 'binary' ) \n\n#################################################################\n# Data\n# ----------------\n#\n# Shall we work on subsampled data or at full resolution?\n\nfast_demo = False if use_cuda else True\n\nif use_cuda:\n Npoints = 1e4 if fast_demo else 2e5\nelse:\n Npoints = 1e3\n\n##############################################################\n# Create a reference template:\n\ntemplate = create_sphere( Npoints )\n\n#################################################\n# Use color labels to track the particles:\n#\n\nK = 12\ncolors = (K * template[1][:,0]).cos()\ncolors = colors.view(-1).detach().cpu().numpy()\n\n\n#################################################\n# Fetch the data:\n#\n\n\nos.makedirs(os.path.dirname(\"data/\"), exist_ok=True)\nif not os.path.exists(\"data/wasserstein_3D_models/Stanford_dragon_200k.ply\"):\n print(\"Fetching the data... \", end='', flush=True)\n import urllib.request\n urllib.request.urlretrieve(\n \"http://www.kernel-operations.io/data/wasserstein_3D_models.zip\", \n \"data/wasserstein_3D_models.zip\")\n\n import shutil\n shutil.unpack_archive('data/wasserstein_3D_models.zip', 'data')\n print(\"Done.\")\n\n\n#############################################################\n# Load the data on the GPU:\n\n\nprint(\"Loading the data:\")\n# N.B.: Since Plyfile is far from being optimized, this may take some time!\ntargets = [ load_ply_file( \"data/wasserstein_3D_models/Stanford_dragon_200k.ply\" ),\n load_ply_file( \"data/wasserstein_3D_models/vertebrae_400k_biol260_sketchfab_CC.ply\" ),\n load_nii_file( \"data/wasserstein_3D_models/brain.nii.gz\" ),\n ]\n\n#################################################################\n# Normalize and subsample everyone, if required:\n\ndef normalize(measure, n = None):\n \"\"\"Reduce a point cloud to at most n points and normalize the weights and point cloud.\"\"\"\n weights, locations = measure\n N = len(weights)\n\n if n is not None and n < N:\n n = int(n)\n indices = torch.randperm(N)\n indices = indices[:n]\n weights, locations = weights[indices], locations[indices]\n\n weights = weights / weights.sum()\n weights, locations = weights.contiguous(), locations.contiguous()\n\n # Center, normalize the point cloud\n mean = (weights.view(-1,1) * locations).sum(dim=0)\n locations -= mean\n std = (weights.view(-1) * (locations**2).sum(dim=1).view(-1)).sum().sqrt()\n locations /= std\n\n return weights, locations\n\n\ntargets = [ normalize(t, n = Npoints ) for t in targets ]\n\n########################################################################\n# Fine tuning:\n\ntemplate = template[0], template[1] / 2 + tensor([.5,0.,0.]) # Smaller sphere, towards the back of the dragon\ntargets[1] = targets[1][0], targets[1][1]@tensor([[0,0,1],[0,1,0],[1,0,0]]) # Turn the vertebra\ntargets[2] = targets[2][0],-targets[2][1] # Flip the brain\n\n#########################################################################\n# Optimal Transport matchings\n# --------------------------------\n#\n# Define our solver:\n\n\nimport time\n\nLoss = SamplesLoss(\"sinkhorn\", p=2, blur=.01, scaling=.5, truncate=1)\n\ndef OT_registration(source, target, name):\n a, x = source # weights, locations\n b, y = target # weights, locations\n\n x.requires_grad = True\n z = x.clone() # Moving point cloud\n\n\n if use_cuda: torch.cuda.synchronize()\n start = time.time()\n\n nits = 4 if fast_demo else 10\n\n for it in range(nits):\n wasserstein_zy = Loss(a, z, b, y)\n [grad_z] = torch.autograd.grad(wasserstein_zy, [z])\n z -= grad_z / a[:,None] # Apply the regularized Brenier map\n \n # save_vtk(f\"matching_{name}_it_{it}.vtk\", numpy(z), colors)\n\n end = time.time()\n print(\"Registered {} in {:.3f}s.\".format(name, end-start))\n\n return z\n\n#################################################################\n# Register the source onto the targets:\n#\n\nmatchings = [ OT_registration(template, target, f\"shape{i+1}\") \n for (i, target) in enumerate(targets) ]\n\n#################################################################\n# Display our matchings:\n\nfor (i, (matching, target)) in enumerate(zip(matchings, targets)):\n\n fig = plt.figure(figsize=(6,6))\n plt.set_cmap(\"hsv\")\n\n ax = fig.add_subplot(1, 1, 1, projection='3d')\n\n display_cloud(ax, (template[0], matching), colors)\n display_cloud(ax, target, 'blue')\n ax.set_title(\"Registered (N={:,}) and target {} (M={:,}) point clouds\".format(len(matching), i+1, len(target[0])))\n plt.tight_layout()\n\n\n#################################################################\n# Movie\n# -------------\n#\n# Save them as a collection of VTK files:\n\nFPS = 32 if fast_demo else 32\n\nsource = template[1]\npairs = [ \n (source, source),\n (source, matchings[0]),\n (matchings[0], matchings[0]),\n (matchings[0], matchings[1]),\n (matchings[1], matchings[1]),\n (matchings[1], matchings[2]),\n (matchings[2], matchings[2]),\n (matchings[2], source),\n]\n\nframe = 0\n\nprint(\"Save as a VTK movie...\", end=\"\", flush=True)\nfor (A, B) in pairs:\n A, B = numpy(A), numpy(B)\n for t in np.linspace(0, 1, FPS):\n save_vtk(f\"frame_{frame}.vtk\", (1-t) * A + t * B, colors)\n frame += 1\n\nprint(\"Done.\")\nplt.show()\n" ]
[ [ "torch.cuda.synchronize", "matplotlib.pyplot.tight_layout", "numpy.linspace", "torch.randperm", "numpy.arange", "numpy.arccos", "numpy.cos", "matplotlib.pyplot.set_cmap", "numpy.ones", "numpy.sin", "torch.cuda.is_available", "numpy.cross", "matplotlib.pyplot.show", "torch.autograd.grad", "numpy.sum", "numpy.vstack", "matplotlib.pyplot.figure" ] ]
RafalSkolasinski/simulation-codes-arxiv-1806.01815
[ "fc5c27c6c625cc7f45cf5d7627eaafa3d3df4b9e" ]
[ "codes/shapes.py" ]
[ "# Collection of shape functions used in this projects\n\n\nimport numpy as np\nfrom functools import lru_cache\n\n\n@lru_cache()\ndef define_shape(name, R):\n \"\"\"Define shape of a given type and specified radius.\n\n Parameters\n ----------\n shape : string\n Name of the chosen shape. Options are 'hexagon', 'circle', 'square',\n or 'rectangular'.\n R : int or float\n Radius for the shape, i.e. in case of \"square\" it will mean half of\n the side's length. For \"rectangular\" it must be a tuple.\n\n Returns\n -------\n shape function to be used with ``.fill`` method of Kwant's builders.\n \"\"\"\n if name not in ['hexagon', 'circle', 'square', 'rectangular']:\n raise ValueError('Wrong type of shape: \"{}\".'.format(name))\n\n if name == 'rectangular' and not isinstance(R, tuple):\n raise ValueError('If shape is \"rectangular\" then \"R\" must be a tuple.')\n\n if name == 'hexagon':\n shape = define_hexagon(R)\n\n if name == 'circle':\n shape = define_circle(R)\n\n if name == 'square':\n shape = define_square(R)\n\n if name == 'rectangular':\n shape = define_rectangular(R[0], R[1])\n\n return shape\n\n\ndef define_hexagon(R):\n \"\"\"Return shape function for hexagon.\"\"\"\n def shape(site):\n x1, x2 = np.array(site.pos)[:2]\n\n a0 = 0.5*R\n b0 = np.sin(np.pi/3.0)*R\n\n return (x2 >- b0 and x2 < b0 and\n x2 > -(b0/a0) * (2*a0 - x1) and\n x2 < -(b0/a0) * (x1 - 2*a0) and\n x2 < (b0/a0) * (x1 + 2*a0) and\n x2 > -(b0/a0) * (x1 + 2*a0))\n\n return shape\n\n\ndef define_circle(R):\n \"\"\"Return shape function for circle.\"\"\"\n def shape(site):\n return site.pos[0]**2 + site.pos[1]**2 < R**2\n\n return shape\n\n\ndef define_square(W):\n \"\"\"Return shape function for square.\"\"\"\n def shape(site):\n return np.abs(site.pos[0]) < W and np.abs(site.pos[1]) < W\n\n return shape\n\n\ndef define_rectangular(a, b):\n \"\"\"Return shape function for rectangular.\"\"\"\n def shape(site):\n return np.abs(site.pos[0]) < a and np.abs(site.pos[1]) < b\n\n return shape\n" ]
[ [ "numpy.array", "numpy.abs", "numpy.sin" ] ]
RunzheYang/lingvo
[ "1291e29812f9ee9836f9cacbb05db9ec6b095234" ]
[ "lingvo/core/attention_test.py" ]
[ "# Lint as: python3\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for attention.\"\"\"\n\nimport math\nfrom absl.testing import parameterized\nimport lingvo.compat as tf\nfrom lingvo.core import attention\nfrom lingvo.core import layers\nfrom lingvo.core import py_utils\nfrom lingvo.core import quant_utils\nfrom lingvo.core import test_utils\nimport numpy as np\n\n\nclass AttentionTest(test_utils.TestCase, parameterized.TestCase):\n \"\"\"Test attention models.\"\"\"\n\n def _CheckStaticShapes(self, atten_vec, atten_prob, target_batch_size,\n source_length, context_dim):\n \"\"\"Static shape must be set correctly for RNN beam search compatibility.\"\"\"\n self.assertIsNotNone(atten_prob.shape.ndims)\n self.assertEqual((target_batch_size, source_length), atten_prob.shape)\n self.assertIsNotNone(atten_vec.shape.ndims)\n self.assertEqual((target_batch_size, context_dim), atten_vec.shape)\n\n def _AdditiveAttentionInputs(self, packed_inputs=False, tgt_bs=6):\n np.random.seed(12345)\n source_vecs = tf.constant(np.random.rand(6, 3, 4), dtype=tf.float32)\n source_contexts = tf.constant(np.random.rand(6, 3, 5), dtype=tf.float32)\n source_padding = tf.transpose(\n tf.constant(\n [[0, 0, 1, 1, 0, 0], [1, 0, 0, 0, 1, 0], [0, 0, 1, 0, 1, 0]],\n dtype=tf.float32))\n source_segment_id = tf.transpose(\n tf.constant(\n [[0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 2, 2], [0, 1, 1, 1, 1, 2]],\n dtype=tf.float32))\n query_vec = tf.constant(np.random.rand(tgt_bs, 7), dtype=tf.float32)\n qsi = [0, 1, 1, 1, 2, 2]\n query_segment_id = tf.constant(qsi[:tgt_bs], dtype=tf.float32)\n\n params = attention.AdditiveAttention.Params()\n params.name = 'atten'\n params.params_init = py_utils.WeightInit.Gaussian(0.1, 12345)\n params.source_dim = 4\n params.query_dim = 7\n params.hidden_dim = 7\n params.vn.global_vn = False\n params.vn.per_step_vn = False\n params.packed_input = packed_inputs\n tensors = (source_vecs, source_contexts, source_padding, source_segment_id,\n query_vec, query_segment_id)\n return params, tensors\n\n def testAdditiveAttention(self):\n with self.session(use_gpu=True):\n params, tensors = self._AdditiveAttentionInputs()\n source_vecs, source_contexts, source_padding, _, query_vec, _ = tensors\n atten = attention.AdditiveAttention(params)\n self.assertLen(atten.vars.Flatten(), 3)\n atten.InitForSourcePacked(atten.theta, source_vecs, source_contexts,\n source_padding)\n atten_vec, atten_prob, _ = atten.ComputeContextVector(\n atten.theta, query_vec)\n\n self._CheckStaticShapes(\n atten_vec,\n atten_prob,\n target_batch_size=query_vec.shape[0],\n source_length=source_contexts.shape[0],\n context_dim=source_contexts.shape[2])\n\n # TODO(yonghui): add atten.vars for the variables attention model\n # declares.\n atten_vars = tf.get_collection('AdditiveAttention_vars')\n self.assertLen(atten_vars, 3)\n\n self.evaluate(tf.global_variables_initializer())\n\n all_vars = tf.trainable_variables()\n for v in all_vars:\n print(v.eval())\n\n atten_vec_out, prob_out = self.evaluate([atten_vec, atten_prob])\n\n print(['additive attention prob_out', np.array_repr(prob_out)])\n print(['additive attention atten_vec_out', np.array_repr(atten_vec_out)])\n\n # pyformat: disable\n # pylint: disable=bad-whitespace\n expected_prob_out = [\n [0.2555742 , 0.24073002, 0. , 0. , 0.25412574,\n 0.24957004],\n [0. , 0.25394136, 0.24764746, 0.25480017, 0. ,\n 0.24361098],\n [0.25094295, 0.2499937 , 0. , 0.24308342, 0. ,\n 0.25597993],\n [0.25559244, 0.24070661, 0. , 0. , 0.25412717,\n 0.24957375],\n [0. , 0.25393167, 0.24765188, 0.25481117, 0. ,\n 0.24360526],\n [0.25113183, 0.24990553, 0. , 0.24246082, 0. ,\n 0.25650182]]\n\n expected_atten_vec_out = [\n [0.49745506, 0.63471669, 0.49220526, 0.5683012 , 0.42753702],\n [0.51502365, 0.56183743, 0.37644109, 0.87425125, 0.46182787],\n [0.57862502, 0.44246522, 0.36931852, 0.41002905, 0.14327194],\n [0.49745634, 0.63471717, 0.49220967, 0.56829125, 0.4275257 ],\n [0.51501834, 0.56183696, 0.37644821, 0.87425053, 0.46182543],\n [0.57893348, 0.44248882, 0.36938411, 0.41006744, 0.14328158]]\n # pylint: enable=bad-whitespace\n # pyformat: enable\n\n self.assertAllClose(expected_prob_out, prob_out)\n self.assertAllClose(expected_atten_vec_out, atten_vec_out)\n\n def testAdditiveAttentionWithPackedInputs(self):\n with self.session(use_gpu=True):\n params, tensors = self._AdditiveAttentionInputs(packed_inputs=True)\n (source_vecs, source_contexts, source_padding, source_segment_id,\n query_vec, query_segment_id) = tensors\n atten = attention.AdditiveAttention(params)\n self.assertLen(atten.vars.Flatten(), 3)\n atten.InitForSourcePacked(atten.theta, source_vecs, source_contexts,\n source_padding, source_segment_id)\n atten_vec, atten_prob, _ = atten.ComputeContextVector(\n atten.theta, query_vec, query_segment_id=query_segment_id)\n\n # TODO(yonghui): add atten.vars for the variables attention model\n # declares.\n atten_vars = tf.get_collection('AdditiveAttention_vars')\n self.assertLen(atten_vars, 3)\n\n self.evaluate(tf.global_variables_initializer())\n\n all_vars = tf.trainable_variables()\n for v in all_vars:\n print(v.eval())\n\n atten_vec_out, prob_out = self.evaluate([atten_vec, atten_prob])\n\n self._CheckStaticShapes(\n atten_vec,\n atten_prob,\n target_batch_size=query_vec.shape[0],\n source_length=source_contexts.shape[0],\n context_dim=source_contexts.shape[2])\n\n print(['packed additive attention prob_out', np.array_repr(prob_out)])\n print([\n 'packed additive attention atten_vec_out',\n np.array_repr(atten_vec_out)\n ])\n\n # pyformat: disable\n # pylint: disable=bad-whitespace\n expected_prob_out = [\n [0.51495469, 0.48504525, 0 , 0 , 0 ,\n 0 ],\n [0 , 0 , 0.49288213, 0.50711787, 0 ,\n 0 ],\n [0. , 0.5070073 , 0. , 0.4929927 , 0. ,\n 0 ],\n [0. , 0 , 0. , 0. , 0.50451994,\n 0.49548006],\n [0. , 0. , 0. , 0. , 0. ,\n 1 ],\n [0. , 0. , 0. , 0. , 0. ,\n 1 ]]\n\n expected_atten_vec_out = [\n [0.35256192, 0.68348885, 0.41128731, 0.48906463, 0.50537711],\n [0.45880911, 0.6068666 , 0.59867024, 0.82797134, 0.33504993],\n [0.54934788, 0.50335771, 0.26117462, 0.32834488, 0.16398546],\n [0.64022166, 0.58665955, 0.571935 , 0.64637613, 0.35084069],\n [0.27927336, 0.06444023, 0.19862361, 0.93168277, 0.85441357],\n [0.95473474, 0.05225335, 0.57947171, 0.48049626, 0.02170898]]\n # pylint: enable=bad-whitespace\n # pyformat: enable\n\n self.assertAllClose(expected_prob_out, prob_out)\n self.assertAllClose(expected_atten_vec_out, atten_vec_out)\n\n def testAdditiveAttentionDeterministicDropout(self):\n with self.session(use_gpu=True):\n params, tensors = self._AdditiveAttentionInputs()\n source_vecs, source_contexts, source_padding, _, query_vec, _ = tensors\n params.atten_dropout_prob = 0.5\n params.atten_dropout_deterministic = True\n params.random_seed = 78924\n\n atten = attention.AdditiveAttention(params)\n self.assertLen(atten.vars.Flatten(), 3)\n atten.InitForSourcePacked(atten.theta, source_vecs, source_contexts,\n source_padding)\n atten_vec, atten_prob, _ = atten.ComputeContextVector(\n atten.theta, query_vec)\n\n self._CheckStaticShapes(\n atten_vec,\n atten_prob,\n target_batch_size=query_vec.shape[0],\n source_length=source_contexts.shape[0],\n context_dim=source_contexts.shape[2])\n\n atten_vars = tf.get_collection('AdditiveAttention_vars')\n self.assertLen(atten_vars, 3)\n\n self.evaluate(tf.global_variables_initializer())\n\n all_vars = tf.trainable_variables()\n for v in all_vars:\n print(v.eval())\n\n atten_vec_out, prob_out = self.evaluate([atten_vec, atten_prob])\n\n print('additive attention prob_out %r' % prob_out)\n print('additive attention atten_vec_out %r' % atten_vec_out)\n\n expected_prob_out = [\n [0.51114839, 0.48146003, 0., 0., 0., 0.],\n [0., 0.50788271, 0., 0.50960034, 0., 0.48722193],\n [0., 0.49998739, 0., 0., 0., 0.51195991],\n [0., 0.48141322, 0., 0., 0.50825435, 0.4991475],\n [0., 0.50786334, 0.49530372, 0., 0., 0.48721054],\n [0., 0.49981108, 0., 0., 0., 0.51300365],\n ]\n\n expected_atten_vec_out = [\n [0.34995595, 0.67843682, 0.40824726, 0.4854497, 0.50164163],\n [0.60576487, 0.80303985, 0.46480939, 1.3962903, 0.79863495],\n [0.90196574, 0.47579059, 0.31802341, 0.34388986, 0.15836108],\n [0.81517166, 0.90433061, 0.72681838, 1.02123988, 0.72982419],\n [0.99326241, 0.83445895, 0.43935478, 1.26866817, 0.71197236],\n [0.90281653, 0.47568679, 0.31862068, 0.34435683, 0.15833181],\n ]\n\n self.assertAllClose(expected_prob_out, prob_out)\n self.assertAllClose(expected_atten_vec_out, atten_vec_out)\n\n def _testSameBatchSize(self, same_batch_size, packed_inputs=False):\n with self.session(use_gpu=True, graph=tf.Graph()):\n tf.random.set_seed(398847392)\n params, tensors = self._AdditiveAttentionInputs(packed_inputs, tgt_bs=3)\n source_vecs, source_contexts, source_padding, _, query_vec, _ = tensors\n params.same_batch_size = same_batch_size\n\n atten = attention.AdditiveAttention(params)\n atten.InitForSourcePacked(atten.theta, source_vecs, source_contexts,\n source_padding)\n atten_vec, atten_prob, _ = atten.ComputeContextVector(\n atten.theta, query_vec)\n\n self._CheckStaticShapes(\n atten_vec,\n atten_prob,\n target_batch_size=query_vec.shape[0],\n source_length=source_contexts.shape[0],\n context_dim=source_contexts.shape[2])\n\n self.assertLen(atten.vars.Flatten(), 3)\n\n self.evaluate(tf.global_variables_initializer())\n atten_vec_out, prob_out = self.evaluate([atten_vec, atten_prob])\n return atten_vec_out, prob_out\n\n def testAdditiveAttentionSameBatchSize(self):\n vec0, prob0 = self._testSameBatchSize(False)\n vec1, prob1 = self._testSameBatchSize(True)\n self.assertAllClose(vec0, vec1)\n self.assertAllClose(prob0, prob1)\n\n def testAdditiveAttentionSameBatchSizePackedInputs(self):\n vec0, prob0 = self._testSameBatchSize(False, True)\n vec1, prob1 = self._testSameBatchSize(True, True)\n self.assertAllClose(vec0, vec1)\n self.assertAllClose(prob0, prob1)\n\n def testAdditiveAttentionSmallerHiddenLayer(self):\n with self.session(use_gpu=True):\n np.random.seed(12345)\n source_vecs = tf.constant(np.random.rand(6, 3, 4), dtype=tf.float32)\n source_contexts = tf.constant(np.random.rand(6, 3, 5), dtype=tf.float32)\n source_padding = tf.transpose(\n tf.constant(\n [[0, 0, 1, 1, 0, 0], [1, 0, 0, 0, 1, 0], [0, 0, 1, 0, 1, 0]],\n dtype=tf.float32))\n query_vec = tf.constant(np.random.rand(6, 7), dtype=tf.float32)\n\n params = attention.AdditiveAttention.Params()\n params.name = 'atten'\n params.params_init = py_utils.WeightInit.Gaussian(0.1, 12345)\n params.source_dim = 4\n params.query_dim = 7\n params.hidden_dim = 5\n params.vn.global_vn = False\n params.vn.per_step_vn = False\n\n atten = attention.AdditiveAttention(params)\n atten.InitForSourcePacked(atten.theta, source_vecs, source_contexts,\n source_padding)\n atten_vec, atten_prob, _ = atten.ComputeContextVector(\n atten.theta, query_vec)\n\n self._CheckStaticShapes(\n atten_vec,\n atten_prob,\n target_batch_size=query_vec.shape[0],\n source_length=source_contexts.shape[0],\n context_dim=source_contexts.shape[2])\n\n atten_vars = tf.get_collection('AdditiveAttention_vars')\n self.assertLen(atten_vars, 3)\n\n self.evaluate(tf.global_variables_initializer())\n\n all_vars = tf.trainable_variables()\n for v in all_vars:\n print(v.eval())\n\n atten_vec_out, prob_out = self.evaluate([atten_vec, atten_prob])\n\n print(['prob_out smaller hidden layer', np.array_repr(prob_out)])\n print(\n ['atten_vec_out smaller hidden layer',\n np.array_repr(atten_vec_out)])\n\n # pyformat: disable\n # pylint: disable=bad-whitespace\n expected_prob_out = [\n [0.25242305, 0.24356601, 0. , 0. , 0.25346902,\n 0.25054196],\n [0. , 0.25230604, 0.24693871, 0.25406054, 0. ,\n 0.24669473],\n [0.2501823 , 0.24922216, 0. , 0.24693316, 0. ,\n 0.25366238],\n [0.25267059, 0.24300526, 0. , 0. , 0.25369659,\n 0.25062758],\n [0. , 0.25272119, 0.24642748, 0.25435579, 0. ,\n 0.24649554],\n [0.25044653, 0.24924593, 0. , 0.24560687, 0. ,\n 0.25470066]]\n\n expected_atten_vec_out = [\n [0.49746257, 0.63428223, 0.4914251 , 0.57035601, 0.42964566],\n [0.51383036, 0.55960417, 0.37601081, 0.87443453, 0.46342701],\n [0.57660079, 0.44147781, 0.36953348, 0.41017395, 0.14293665],\n [0.49755943, 0.63429612, 0.49157569, 0.57015073, 0.42933062],\n [0.51371205, 0.55982226, 0.37590009, 0.87454152, 0.4633899 ],\n [0.57732767, 0.44161472, 0.36958888, 0.41019297, 0.14298658]]\n # pylint: enable=bad-whitespace\n # pyformat: enable\n\n self.assertAllClose(expected_prob_out, prob_out)\n self.assertAllClose(expected_atten_vec_out, atten_vec_out)\n\n def testAdditiveAttentionFp16NoNaN(self):\n with self.session(use_gpu=True):\n np.random.seed(12345)\n source_vecs = tf.constant(np.random.rand(6, 3, 4), dtype=tf.float16)\n source_contexts = tf.constant(np.random.rand(6, 3, 5), dtype=tf.float16)\n source_padding = tf.transpose(\n tf.constant(\n [[0, 0, 1, 1, 0, 0], [1, 0, 0, 0, 1, 0], [0, 0, 1, 0, 1, 0]],\n dtype=tf.float16))\n query_vec = tf.constant(np.random.rand(6, 7), dtype=tf.float16)\n\n params = attention.AdditiveAttention.Params()\n params.dtype = tf.float16\n params.name = 'atten'\n params.params_init = py_utils.WeightInit.Gaussian(0.1, 12345)\n params.source_dim = 4\n params.query_dim = 7\n params.hidden_dim = 7\n params.vn.global_vn = False\n params.vn.per_step_vn = False\n\n atten = attention.AdditiveAttention(params)\n self.assertLen(atten.vars.Flatten(), 3)\n atten.InitForSourcePacked(atten.theta, source_vecs, source_contexts,\n source_padding)\n atten_vec, atten_prob, _ = atten.ComputeContextVector(\n atten.theta, query_vec)\n\n self._CheckStaticShapes(\n atten_vec,\n atten_prob,\n target_batch_size=query_vec.shape[0],\n source_length=source_contexts.shape[0],\n context_dim=source_contexts.shape[2])\n\n # TODO(yonghui): add atten.vars for the variables attention model\n # declares.\n atten_vars = tf.get_collection('AdditiveAttention_vars')\n self.assertLen(atten_vars, 3)\n\n self.evaluate(tf.global_variables_initializer())\n\n all_vars = tf.trainable_variables()\n for v in all_vars:\n print(v.eval())\n\n atten_vec_out, prob_out = self.evaluate([atten_vec, atten_prob])\n print(atten_vec_out.dtype)\n print(prob_out.dtype)\n self.assertTrue(np.all(np.isfinite(atten_vec_out)))\n self.assertTrue(np.all(np.isfinite(prob_out)))\n\n def testAdditiveAttentionVN64bits(self):\n with self.session(use_gpu=True):\n np.random.seed(12345)\n source_vecs = tf.constant(np.random.rand(5, 3, 4), dtype=tf.float64)\n source_contexts = tf.constant(np.random.rand(5, 3, 5), dtype=tf.float64)\n source_padding = tf.transpose(\n tf.constant([[0, 0, 1, 1, 0], [1, 0, 0, 0, 1], [0, 0, 1, 0, 1]],\n dtype=tf.float64))\n query_vec = tf.constant(np.random.rand(6, 7), dtype=tf.float64)\n\n params = attention.AdditiveAttention.Params()\n params.name = 'atten'\n params.dtype = tf.float64\n params.params_init = py_utils.WeightInit.Gaussian(0.1, 12345)\n params.source_dim = 4\n params.query_dim = 7\n params.hidden_dim = 5\n params.vn.global_vn = True\n params.vn.per_step_vn = True\n params.vn.scale = 1.0\n params.vn.seed = 54321\n\n atten = attention.AdditiveAttention(params)\n atten.InitForSourcePacked(atten.theta, source_vecs, source_contexts,\n source_padding)\n atten_vec, atten_prob, _ = atten.ComputeContextVector(\n atten.theta, query_vec)\n\n self._CheckStaticShapes(\n atten_vec,\n atten_prob,\n target_batch_size=query_vec.shape[0],\n source_length=source_contexts.shape[0],\n context_dim=source_contexts.shape[2])\n\n atten_vars = tf.get_collection('AdditiveAttention_vars')\n self.assertLen(atten_vars, 3)\n\n self.evaluate(tf.global_variables_initializer())\n\n all_vars = tf.trainable_variables()\n for v in all_vars:\n print(v.eval())\n\n atten_vec_out, prob_out = self.evaluate([atten_vec, atten_prob])\n\n print('prob_out with vn: %r' % prob_out)\n print('atten_vec_out with vn: %r' % atten_vec_out)\n\n expected_prob_out = [\n [0.34381885, 0.32317923, 0., 0., 0.33300192],\n [0., 0.26230015, 0.49741682, 0.24028302, 0.],\n [0.34746732, 0.32810605, 0., 0.32442663, 0.],\n [0.38146799, 0.3461788, 0., 0., 0.27235321],\n [0., 0.26704709, 0.46667228, 0.26628063, 0.],\n [0.35243919, 0.32645217, 0., 0.32110863, 0.],\n ]\n expected_atten_vec_out = [\n [0.51341882, 0.35943304, 0.47567072, 0.41358617, 0.54388313],\n [0.87185038, 0.36767787, 0.69357746, 0.74208826, 0.12255736],\n [0.78364668, 0.45808038, 0.40468006, 0.6830947, 0.43659402],\n [0.47686953, 0.34565806, 0.51092794, 0.39762747, 0.53235401],\n [0.86682548, 0.36643946, 0.69277134, 0.72939703, 0.1235218],\n [0.78405494, 0.45800471, 0.40414068, 0.68330528, 0.43695764],\n ]\n\n self.assertAllClose(expected_prob_out, prob_out)\n self.assertAllClose(expected_atten_vec_out, atten_vec_out)\n\n def _DotProductAttention(self, packed_inputs):\n with self.session(use_gpu=True):\n np.random.seed(12345)\n # source_vecs_p, source_contexts_p, source_padding_p, query_vec_p are used\n # for both TensorFlow and numpy computation.\n source_vecs_p = [np.random.rand(3, 4) for _ in range(6)]\n source_vecs = tf.stack(\n [tf.constant(x, dtype=tf.float32) for x in source_vecs_p])\n source_contexts_p = [np.random.rand(3, 5) for _ in range(6)]\n source_contexts = tf.stack(\n [tf.constant(x, dtype=tf.float32) for x in source_contexts_p])\n source_padding_p = [[0, 0, 1, 1, 0, 0], [1, 0, 0, 0, 1, 0],\n [0, 0, 1, 0, 1, 0]]\n source_padding = tf.transpose(\n tf.constant(source_padding_p, dtype=tf.float32))\n query_vec_p = np.random.rand(6, 4)\n query_vec = tf.constant(query_vec_p, dtype=tf.float32)\n query_segment_id_p = [0, 1, 1, 1, 2, 2]\n source_segment_id_p = [[0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 2, 2],\n [0, 1, 1, 1, 1, 2]]\n source_segment_id = None\n query_segment_id = None\n if packed_inputs:\n source_segment_id = tf.transpose(\n tf.constant(source_segment_id_p, dtype=tf.float32))\n query_segment_id = tf.constant(query_segment_id_p, dtype=tf.float32)\n params = attention.DotProductAttention.Params()\n params.name = 'dotproduct_atten'\n params.source_dim = 4\n params.query_dim = 4\n params.hidden_dim = 4\n params.packed_input = packed_inputs\n atten = attention.DotProductAttention(params)\n atten.InitForSourcePacked(atten.theta, source_vecs, source_contexts,\n source_padding, source_segment_id)\n self.evaluate(tf.global_variables_initializer())\n atten_vec, atten_prob, _ = atten.ComputeContextVector(\n atten.theta, query_vec, query_segment_id=query_segment_id)\n atten_vec_out, prob_out = self.evaluate([atten_vec, atten_prob])\n # Use numpy to perform the same computation to generate expected results.\n source_vecs_p = np.array(source_vecs_p)\n # Dot-product part.\n expected_logit = np.array([\n np.dot(source_vecs_p[:, i % 3, :], query_vec_p[i, :])\n for i in range(6)\n ]) / math.sqrt(4)\n elexp = np.exp(expected_logit)\n source_padding_p = np.array(source_padding_p)\n elexp *= (1 - np.tile(source_padding_p, (2, 1)))\n if packed_inputs:\n # Manually constructed packed input mask.\n mask = np.asarray([[1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0],\n [0, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 1],\n [0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 0, 1]])\n elexp *= mask\n expected_prob_out = elexp / np.expand_dims(np.sum(elexp, axis=1), axis=1)\n expanded_epo = np.expand_dims(expected_prob_out, axis=2)\n source_contexts_p = np.array(source_contexts_p)\n expected_atten_vec_out = np.array([\n np.sum(\n source_contexts_p[:, i % 3, :] * expanded_epo[i, :, :], axis=0)\n for i in range(6)\n ])\n\n print(['additive attention prob_out', np.array_repr(prob_out)])\n print(['additive attention atten_vec_out', np.array_repr(atten_vec_out)])\n\n self.assertAllClose(expected_prob_out, prob_out)\n self.assertAllClose(expected_atten_vec_out, atten_vec_out)\n\n def testDotProductAttention(self):\n self._DotProductAttention(packed_inputs=False)\n\n def testDotProductAttentionPackedInput(self):\n self._DotProductAttention(packed_inputs=True)\n\n def _MultiHeadedAttentionInputs(self, source_dim=4, dtype=tf.float32):\n np.random.seed(6348575)\n # source_vecs_p, source_contexts_p, source_padding_p, query_vec_p are used\n # for both TensorFlow and numpy computation.\n source_vecs_p = [np.random.rand(3, source_dim) for _ in range(6)]\n source_vecs = tf.stack([tf.constant(x, dtype=dtype) for x in source_vecs_p])\n source_contexts_p = [np.random.rand(3, 6) for _ in range(6)]\n source_contexts = tf.stack(\n [tf.constant(x, dtype=dtype) for x in source_contexts_p])\n source_padding_p = [[0, 0, 1, 1, 0, 0], [1, 0, 0, 0, 1, 0],\n [0, 0, 1, 0, 1, 0]]\n source_padding = tf.transpose(tf.constant(source_padding_p, dtype=dtype))\n query_vec_p = np.random.rand(6, 4)\n query_vec = tf.constant(query_vec_p, dtype=dtype)\n query_segment_id_p = [0, 1, 1, 1, 2, 2]\n source_segment_id_p = [[0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 2, 2],\n [0, 1, 1, 1, 1, 2]]\n source_segment_id = tf.transpose(\n tf.constant(source_segment_id_p, dtype=dtype))\n query_segment_id = tf.constant(query_segment_id_p, dtype=dtype)\n return (source_vecs, source_contexts, source_padding, source_padding_p,\n query_vec, source_segment_id, query_segment_id)\n\n def testMultiHeadedAttentionDotProductWithFeedinProbs(self):\n with self.session(use_gpu=True):\n (source_vecs, source_contexts, source_padding, _, _, _,\n _) = self._MultiHeadedAttentionInputs()\n iap = attention.DotProductAttention.Params()\n iap.name = 'dot_atten'\n params = attention.MultiHeadedAttention.Params().Set(\n name='multihead_atten',\n source_dim=4,\n query_dim=4,\n hidden_dim=4,\n inner_atten_params=iap,\n num_attention_heads=2,\n use_source_vec_as_attention_value=False)\n atten = params.Instantiate()\n packed_src = atten.InitForSourcePacked(atten.theta, source_vecs,\n source_contexts, source_padding)\n self.evaluate(tf.global_variables_initializer())\n atten_probs = tf.constant([[1.0] + [0.0] * 5] * 3 * 2, dtype=tf.float32)\n atten_vec_proj, atten_vec = atten.ComputeContextVectorWithAttenProbs(\n atten.theta, packed_src.source_contexts, atten_probs)\n atten_vec_proj, atten_vec, packed_context = self.evaluate(\n [atten_vec_proj, atten_vec, packed_src.source_contexts])\n self.assertAllClose(\n atten_vec,\n np.reshape(np.transpose(packed_context, (0, 2, 1)), [3, 6, 6])[:, :,\n 0])\n self.assertAllClose([2.5694468, 4.36386967, 3.24537992],\n np.sum(atten_vec_proj, axis=1))\n\n def _testMultiHeadedAttentionExtendCachedSourceVecsHelper(\n self, additive_atten, dtype, fprop_dtype):\n # source_batch:3, target_batch:6. Test n = 2 case.\n use_gpu = (dtype == tf.float32 and fprop_dtype == tf.float32)\n with self.session(use_gpu=use_gpu):\n (source_vecs, source_contexts, source_padding, _, query_vec,\n source_seg_id,\n query_seg_id) = self._MultiHeadedAttentionInputs(dtype=fprop_dtype)\n if additive_atten:\n iap = attention.AdditiveAttention.Params()\n iap.name = 'add_atten'\n else:\n iap = attention.DotProductAttention.Params()\n iap.name = 'dot_atten'\n params = attention.MultiHeadedAttention.Params().Set(\n name='multihead_atten',\n dtype=dtype,\n fprop_dtype=fprop_dtype,\n source_dim=4,\n query_dim=4,\n hidden_dim=4,\n inner_atten_params=iap,\n num_attention_heads=2,\n use_source_vec_as_attention_value=False,\n packed_input=True)\n atten = params.Instantiate()\n theta = atten.theta\n packed_src1 = atten.InitForSourcePacked(theta, source_vecs,\n source_contexts, source_padding,\n source_seg_id)\n cached_src = py_utils.NestedMap(\n source_vecs=tf.zeros([0, 3, 4], dtype=packed_src1.source_vecs.dtype),\n source_contexts=tf.zeros([0, 3, 6],\n dtype=packed_src1.source_contexts.dtype),\n source_padding=tf.zeros([0, 3, 2],\n dtype=packed_src1.source_padding.dtype),\n source_segment_id=tf.zeros([0, 3, 2],\n dtype=packed_src1.source_segment_id.dtype))\n for i in range(6):\n cached_src = atten.ExtendSourcePacked(theta, source_vecs[i, :, :],\n source_contexts[i, :, :],\n source_padding[i, :],\n source_seg_id[i, :], cached_src)\n packed_src2 = atten.PackCachedSource(cached_src)\n self.evaluate(tf.global_variables_initializer())\n\n atten_vec_1, prob_1, _ = atten.ComputeContextVectorWithSource(\n theta, packed_src1, query_vec, query_segment_id=query_seg_id)\n atten_vec_2, prob_2, _ = atten.ComputeContextVectorWithCachedSource(\n theta, cached_src, query_vec, query_segment_id=query_seg_id)\n\n packed_src1_v, packed_src2_v, cached_src_v = self.evaluate(\n [packed_src1, packed_src2, cached_src])\n tf.logging.info('packed_src1=%s', packed_src1_v)\n tf.logging.info('packed_src2=%s', packed_src2_v)\n tf.logging.info('cached_src=%s', cached_src_v)\n self.assertAllClose(packed_src1_v.source_vecs, packed_src2_v.source_vecs)\n self.assertAllClose(packed_src1_v.source_contexts,\n packed_src2_v.source_contexts)\n self.assertAllClose(packed_src1_v.source_padding,\n packed_src2_v.source_padding)\n self.assertAllClose(packed_src1_v.source_segment_id,\n packed_src2_v.source_segment_id)\n atten_vec1_v, prob1_v, atten_vec2_v, prob2_v = self.evaluate(\n [atten_vec_1, prob_1, atten_vec_2, prob_2])\n self.assertAllClose(prob1_v, prob2_v)\n self.assertAllClose(atten_vec1_v, atten_vec2_v)\n\n def testMultiHeadedAttentionExtendCachedSourceVecsAdditiveFloat32(self):\n self._testMultiHeadedAttentionExtendCachedSourceVecsHelper(\n additive_atten=True, dtype=tf.float32, fprop_dtype=tf.float32)\n\n def testMultiHeadedAttentionExtendCachedSourceVecsAdditiveFloat32Float16(\n self):\n self._testMultiHeadedAttentionExtendCachedSourceVecsHelper(\n additive_atten=True, dtype=tf.float32, fprop_dtype=tf.float16)\n\n def testMultiHeadedAttentionExtendCachedSourceVecsDotFloat32(self):\n self._testMultiHeadedAttentionExtendCachedSourceVecsHelper(\n additive_atten=False, dtype=tf.float32, fprop_dtype=tf.float32)\n\n def testMultiHeadedAttentionExtendCachedSourceVecsDotFloat32Float16(self):\n self._testMultiHeadedAttentionExtendCachedSourceVecsHelper(\n additive_atten=False, dtype=tf.float32, fprop_dtype=tf.float16)\n\n def _testMultiHeadedAttentionExtendCachedSourceVecsNoPaddingsHelper(\n self, additive_attention=False):\n # source_batch:3, target_batch:6. Test n = 2 case.\n with self.session(use_gpu=True):\n (source_vecs, source_contexts, _, _, query_vec, _,\n _) = self._MultiHeadedAttentionInputs()\n source_padding = tf.zeros([6, 3])\n if additive_attention:\n iap = attention.AdditiveAttention.Params()\n iap.name = 'add_atten'\n else:\n iap = attention.DotProductAttention.Params()\n iap.name = 'dot_atten'\n params = attention.MultiHeadedAttention.Params().Set(\n name='multihead_atten',\n source_dim=4,\n query_dim=4,\n hidden_dim=4,\n inner_atten_params=iap,\n num_attention_heads=2,\n use_source_vec_as_attention_value=False)\n atten = params.Instantiate()\n packed_src1 = atten.InitForSourcePacked(atten.theta, source_vecs,\n source_contexts, source_padding)\n cached_src = py_utils.NestedMap(\n source_vecs=tf.zeros([0, 3, 4], dtype=packed_src1.source_vecs.dtype),\n source_contexts=tf.zeros([0, 3, 6],\n dtype=packed_src1.source_contexts.dtype),\n source_padding=None,\n source_seg_id=None)\n for i in range(6):\n cached_src = atten.ExtendSourcePacked(atten.theta, source_vecs[i, :, :],\n source_contexts[i, :, :], None,\n None, cached_src)\n packed_src2 = atten.PackCachedSource(cached_src)\n self.evaluate(tf.global_variables_initializer())\n\n atten_vec_1, prob_1, _ = atten.ComputeContextVectorWithSource(\n atten.theta, packed_src1, query_vec)\n atten_vec_2, prob_2, _ = atten.ComputeContextVectorWithCachedSource(\n atten.theta, cached_src, query_vec)\n\n (source_vec1_v, source_context1_v, source_vec2_v, source_context2_v,\n atten_vec1_v, prob1_v, atten_vec2_v, prob2_v) = self.evaluate([\n packed_src1.source_vecs, packed_src1.source_contexts,\n packed_src2.source_vecs, packed_src2.source_contexts, atten_vec_1,\n prob_1, atten_vec_2, prob_2\n ])\n self.assertAllClose(source_vec1_v, source_vec2_v)\n self.assertAllClose(source_context1_v, source_context2_v)\n self.assertAllClose(atten_vec1_v, atten_vec2_v)\n self.assertAllClose(prob1_v, prob2_v)\n\n def testMultiHeadedDotAttentionExtendCachedSourceVecsNoPaddings(self):\n self._testMultiHeadedAttentionExtendCachedSourceVecsNoPaddingsHelper(False)\n\n def testMultiHeadedAddAttentionExtendCachedSourceVecsNoPaddings(self):\n self._testMultiHeadedAttentionExtendCachedSourceVecsNoPaddingsHelper(True)\n\n def testMultiHeadedAttentionDotProduct(self):\n # source_batch:3, target_batch:6. Test n = 2 case.\n with self.session(use_gpu=True):\n (source_vecs, source_contexts, source_padding, source_padding_p,\n query_vec, _, _) = self._MultiHeadedAttentionInputs()\n iap = attention.DotProductAttention.Params()\n iap.name = 'dot_atten'\n params = attention.MultiHeadedAttention.Params().Set(\n name='multihead_atten',\n source_dim=4,\n query_dim=4,\n hidden_dim=4,\n inner_atten_params=iap,\n num_attention_heads=2,\n use_source_vec_as_attention_value=False)\n atten = params.Instantiate()\n atten.InitForSourcePacked(atten.theta, source_vecs, source_contexts,\n source_padding)\n self.evaluate(tf.global_variables_initializer())\n atten_vec, atten_prob, _ = atten.ComputeContextVector(\n atten.theta, query_vec)\n\n self._CheckStaticShapes(\n atten_vec,\n atten_prob,\n target_batch_size=query_vec.shape[0],\n source_length=source_contexts.shape[0],\n context_dim=source_contexts.shape[2])\n\n atten_vec_out, prob_out = self.evaluate([atten_vec, atten_prob])\n print('atten_vec_out', np.sum(atten_vec_out, axis=1))\n self.assertAllClose([\n 2.84679317, 2.36924601, 3.54831171, 2.86487937, 2.3537426, 3.54308939\n ], np.sum(atten_vec_out, axis=1))\n print('atten_vec_out', atten_vec_out)\n print('prob_out', prob_out)\n t_batch_size = 6\n s_batch_size = 3\n for i in range(t_batch_size):\n # Test to make sure we didn't mess up indexing.\n s_index = i % s_batch_size\n atten.InitForSourcePacked(atten.theta,\n source_vecs[:, s_index:s_index + 1, :],\n source_contexts[:, s_index:s_index + 1, :],\n source_padding[:, s_index:s_index + 1])\n atten_vec_i, prob_i, _ = atten.ComputeContextVector(\n atten.theta, query_vec[i:i + 1])\n atten_vec_i_out, prob_i_out = self.evaluate([atten_vec_i, prob_i])\n self.assertAllClose(prob_i_out, prob_out[i:i + 1])\n self.assertAllClose(atten_vec_i_out, atten_vec_out[i:i + 1])\n padding_i = source_padding_p[s_index]\n # Check to make sure prob exists only on valid timesteps.\n self.assertEqual(0.0, np.sum(padding_i * prob_i_out))\n\n def testMultiHeadedAttentionDotProductNoPerDimScaleNoBias(self):\n # source_batch:3, target_batch:6. Test n = 2 case.\n with self.session(use_gpu=True):\n (source_vecs, source_contexts, source_padding, source_padding_p,\n query_vec, _, _) = self._MultiHeadedAttentionInputs()\n iap = attention.DotProductAttention.Params()\n iap.name = 'dot_atten'\n params = attention.MultiHeadedAttention.Params().Set(\n name='multihead_atten',\n source_dim=4,\n query_dim=4,\n hidden_dim=4,\n inner_atten_params=iap,\n num_attention_heads=2,\n use_source_vec_as_attention_value=False,\n enable_per_dim_scale=False,\n use_bias=False)\n atten = params.Instantiate()\n atten.InitForSourcePacked(atten.theta, source_vecs, source_contexts,\n source_padding)\n self.evaluate(tf.global_variables_initializer())\n atten_vec, atten_prob, _ = atten.ComputeContextVector(\n atten.theta, query_vec)\n\n self._CheckStaticShapes(\n atten_vec,\n atten_prob,\n target_batch_size=query_vec.shape[0],\n source_length=source_contexts.shape[0],\n context_dim=source_contexts.shape[2])\n\n atten_vec_out, prob_out = self.evaluate([atten_vec, atten_prob])\n print('atten_vec_out', np.sum(atten_vec_out, axis=1))\n self.assertAllClose([\n 2.84679317, 2.36924601, 3.54831171, 2.86487937, 2.3537426, 3.54308939\n ], np.sum(atten_vec_out, axis=1))\n print('atten_vec_out', atten_vec_out)\n print('prob_out', prob_out)\n t_batch_size = 6\n s_batch_size = 3\n for i in range(t_batch_size):\n # Test to make sure we didn't mess up indexing.\n s_index = i % s_batch_size\n atten.InitForSourcePacked(atten.theta,\n source_vecs[:, s_index:s_index + 1, :],\n source_contexts[:, s_index:s_index + 1, :],\n source_padding[:, s_index:s_index + 1])\n atten_vec_i, prob_i, _ = atten.ComputeContextVector(\n atten.theta, query_vec[i:i + 1])\n atten_vec_i_out, prob_i_out = self.evaluate([atten_vec_i, prob_i])\n self.assertAllClose(prob_i_out, prob_out[i:i + 1])\n self.assertAllClose(atten_vec_i_out, atten_vec_out[i:i + 1])\n padding_i = source_padding_p[s_index]\n # Check to make sure prob exists only on valid timesteps.\n self.assertEqual(0.0, np.sum(padding_i * prob_i_out))\n\n def testMultiHeadedAttentionDotProductPackedInput(self):\n # source_batch:3, target_batch:6. Test n = 2 case.\n with self.session(use_gpu=True):\n (source_vecs, source_contexts, source_padding, source_padding_p,\n query_vec, source_seg_id,\n query_seg_id) = self._MultiHeadedAttentionInputs()\n iap = attention.DotProductAttention.Params()\n iap.name = 'dot_atten'\n params = attention.MultiHeadedAttention.Params().Set(\n name='multihead_atten',\n source_dim=4,\n query_dim=4,\n hidden_dim=4,\n inner_atten_params=iap,\n num_attention_heads=2,\n use_source_vec_as_attention_value=False,\n packed_input=True)\n atten = params.Instantiate()\n atten.InitForSourcePacked(atten.theta, source_vecs, source_contexts,\n source_padding, source_seg_id)\n self.evaluate(tf.global_variables_initializer())\n atten_vec, atten_prob, _ = atten.ComputeContextVector(\n atten.theta, query_vec, query_segment_id=query_seg_id)\n atten_vec_out, prob_out = self.evaluate([atten_vec, atten_prob])\n print('atten_vec_out', np.sum(atten_vec_out, axis=1))\n self.assertAllClose(\n [2.565648, 2.268182, 3.739031, 3.093884, 2.770367, 3.580353],\n np.sum(atten_vec_out, axis=1))\n print('atten_vec_out', atten_vec_out)\n print('prob_out', prob_out)\n t_batch_size = 6\n s_batch_size = 3\n for i in range(t_batch_size):\n # Test to make sure we didn't mess up indexing.\n s_index = i % s_batch_size\n src_seg_id = source_seg_id[:, s_index:s_index + 1]\n atten.InitForSourcePacked(atten.theta,\n source_vecs[:, s_index:s_index + 1, :],\n source_contexts[:, s_index:s_index + 1, :],\n source_padding[:, s_index:s_index + 1],\n src_seg_id)\n qry_seg_id = query_seg_id[i:i + 1]\n atten_vec_i, prob_i, _ = atten.ComputeContextVector(\n atten.theta, query_vec[i:i + 1], query_segment_id=qry_seg_id)\n atten_vec_i_out, prob_i_out = self.evaluate([atten_vec_i, prob_i])\n self.assertAllClose(prob_i_out, prob_out[i:i + 1])\n self.assertAllClose(atten_vec_i_out, atten_vec_out[i:i + 1])\n padding_i = source_padding_p[s_index]\n # Check to make sure prob exists only on valid timesteps.\n self.assertEqual(0.0, np.sum(padding_i * prob_i_out))\n\n def testMultiHeadedAttentionDotProductDeterministicDropout(self):\n # source_batch:3, target_batch:6. Test n = 2 case.\n with self.session(use_gpu=True):\n source_vecs, source_contexts, source_padding, _, query_vec, _, _ = (\n self._MultiHeadedAttentionInputs())\n iap = attention.DotProductAttention.Params()\n iap.name = 'dot_atten'\n params = attention.MultiHeadedAttention.Params().Set(\n name='multihead_atten',\n source_dim=4,\n query_dim=4,\n hidden_dim=4,\n inner_atten_params=iap,\n num_attention_heads=2,\n atten_dropout_prob=0.5,\n atten_dropout_deterministic=True,\n random_seed=7249528,\n use_source_vec_as_attention_value=False)\n atten = params.Instantiate()\n atten.InitForSourcePacked(atten.theta, source_vecs, source_contexts,\n source_padding)\n atten_state = atten.ZeroAttentionState(2, 6)\n print('atten_state:', atten_state)\n\n atten_vec, atten_prob, atten_state = atten.ComputeContextVector(\n atten.theta, query_vec, atten_state)\n\n self.evaluate(tf.global_variables_initializer())\n atten_vec_out, prob_out = self.evaluate([atten_vec, atten_prob])\n\n print('atten_vec_out', np.sum(atten_vec_out, axis=1))\n self.assertAllClose(\n [4.152538, 3.423376, 4.782746, 4.238819, 1.785596, 3.239613],\n np.sum(atten_vec_out, axis=1))\n\n print('atten_vec_out %r' % atten_vec_out)\n print('prob_out %r' % prob_out)\n\n expected_prob_out = [\n [0.235899, 0.26425028, 0., 0., 0.2534047, 0.24644604],\n [0., 0.28664297, 0.2365877, 0.24659133, 0., 0.23017803],\n [0.2340432, 0.26618454, 0., 0.257336, 0., 0.24243627],\n [0.25304407, 0.25804248, 0., 0., 0.24191463, 0.24699883],\n [0., 0.24431466, 0.25021935, 0.24959373, 0., 0.2558723],\n [0.2792741, 0.21746796, 0., 0.25468093, 0., 0.24857706]\n ]\n expected_atten_vec_out = [\n [\n 0.87741864, 0.73626477, 0.90619636, 0.64786565, 0.6392238,\n 0.3455683\n ],\n [0.39735186, 0.6100546, 1.1262057, 0.43140602, 0.29049844, 0.5678591],\n [\n 0.90514874, 1.1781378, 0.82465374, 0.64402366, 0.8315829,\n 0.39919922\n ],\n [\n 0.7801036, 0.92708683, 0.8839339, 0.70208144, 0.56675506,\n 0.37885764\n ],\n [0.1725478, 0.02231066, 0.06681296, 0.5807549, 0.25445306, 0.6887169],\n [0.46632785, 0.9006454, 0.8054941, 0.4738411, 0.30304378, 0.2902605]\n ]\n self.assertAllClose(expected_prob_out, prob_out)\n self.assertAllClose(expected_atten_vec_out, atten_vec_out)\n\n def testMultiHeadedAttentionMonotonic(self):\n # source_batch:3, target_batch:6. Test n = 2 case.\n with self.session(use_gpu=True):\n (source_vecs, source_contexts, source_padding, source_padding_p,\n query_vec, _, _) = self._MultiHeadedAttentionInputs()\n iap = attention.MonotonicAttention.Params()\n iap.name = 'mono_atten'\n params = attention.MultiHeadedAttention.Params().Set(\n name='multihead_atten',\n source_dim=4,\n query_dim=4,\n hidden_dim=4,\n inner_atten_params=iap,\n num_attention_heads=2,\n use_source_vec_as_attention_value=False)\n atten = params.Instantiate()\n atten.InitForSourcePacked(atten.theta, source_vecs, source_contexts,\n source_padding)\n # [batch * 2 heads, time]\n atten_init_state = self._attentionStateWithRandomEmitProbabilities(\n atten, 12, 6)\n print('atten_init_state', atten_init_state)\n self.evaluate(tf.global_variables_initializer())\n atten_vec, atten_prob, _ = atten.ComputeContextVector(\n atten.theta, query_vec, atten_init_state)\n atten_vec_out, prob_out = self.evaluate([atten_vec, atten_prob])\n print('atten_vec_out', np.sum(atten_vec_out, axis=1))\n self.assertAllClose(\n [1.494033, 1.120422, 1.699309, 1.508609, 1.1329, 1.670303],\n np.sum(atten_vec_out, axis=1))\n print('atten_vec_out', atten_vec_out)\n print('prob_out', prob_out)\n t_batch_size = 6\n s_batch_size = 3\n for i in range(t_batch_size):\n # Test to make sure we didn't mess up indexing.\n s_index = i % s_batch_size\n atten.InitForSourcePacked(atten.theta,\n source_vecs[:, s_index:s_index + 1, :],\n source_contexts[:, s_index:s_index + 1, :],\n source_padding[:, s_index:s_index + 1])\n j = i * 2\n sliced_atten_state = py_utils.NestedMap(\n emit_probs=atten_init_state.emit_probs[j:j + 2])\n atten_vec_i, prob_i, _ = atten.ComputeContextVector(\n atten.theta, query_vec[i:i + 1], sliced_atten_state)\n atten_vec_i_out, prob_i_out = self.evaluate([atten_vec_i, prob_i])\n self.assertAllClose(prob_i_out, prob_out[i:i + 1])\n self.assertAllClose(atten_vec_i_out, atten_vec_out[i:i + 1])\n padding_i = source_padding_p[s_index]\n # Check to make sure prob exists only on valid timesteps.\n self.assertEqual(0.0, np.sum(padding_i * prob_i_out))\n\n def testMultiHeadedAttentionDotProductWithAllProj(self):\n # source_batch:3, target_batch:6. Test n = 2 case.\n with self.session(use_gpu=True):\n (source_vecs, source_contexts, source_padding, source_padding_p,\n query_vec, _, _) = self._MultiHeadedAttentionInputs()\n iap = attention.DotProductAttention.Params()\n iap.name = 'dot_atten'\n params = attention.MultiHeadedAttention.Params().Set(\n name='multihead_atten',\n source_dim=4,\n query_dim=4,\n hidden_dim=4,\n inner_atten_params=iap,\n num_attention_heads=2,\n use_source_vec_as_attention_value=False,\n enable_ctx_pre_proj=True,\n enable_ctx_post_proj=True,\n ctx_post_proj_dim=5,\n context_dim=6)\n atten = params.Instantiate()\n atten.InitForSourcePacked(atten.theta, source_vecs, source_contexts,\n source_padding)\n self.evaluate(tf.global_variables_initializer())\n atten_vec, atten_prob, _ = atten.ComputeContextVector(\n atten.theta, query_vec)\n\n self._CheckStaticShapes(\n atten_vec,\n atten_prob,\n target_batch_size=query_vec.shape[0],\n source_length=source_contexts.shape[0],\n context_dim=params.ctx_post_proj_dim)\n\n atten_vec_out, prob_out = self.evaluate([atten_vec, atten_prob])\n print('atten_vec_out', np.sum(atten_vec_out, axis=1))\n self.assertAllClose([\n 1.356745, 0.65274805, 1.39460433, 1.34961343, 0.63025361, 1.41543126\n ], np.sum(atten_vec_out, axis=1))\n print('atten_vec_out', atten_vec_out)\n print('prob_out', prob_out)\n t_batch_size = 6\n s_batch_size = 3\n for i in range(t_batch_size):\n # Test to make sure we didn't mess up indexing.\n s_index = i % s_batch_size\n atten.InitForSourcePacked(atten.theta,\n source_vecs[:, s_index:s_index + 1, :],\n source_contexts[:, s_index:s_index + 1, :],\n source_padding[:, s_index:s_index + 1])\n atten_vec_i, prob_i, _ = atten.ComputeContextVector(\n atten.theta, query_vec[i:i + 1])\n atten_vec_i_out, prob_i_out = self.evaluate([atten_vec_i, prob_i])\n self.assertAllClose(prob_i_out, prob_out[i:i + 1])\n self.assertAllClose(atten_vec_i_out, atten_vec_out[i:i + 1])\n padding_i = source_padding_p[s_index]\n # Check to make sure prob exists only on valid timesteps.\n self.assertEqual(0.0, np.sum(padding_i * prob_i_out))\n\n def testMultiHeadedAttentionDotProductMultiPostProj(self):\n # Test for multiple attention post-projection.\n # source_batch:3, target_batch:6. Test n = 2 case.\n with self.session(use_gpu=True):\n (source_vecs, source_contexts, source_padding, _, query_vec, _,\n _) = self._MultiHeadedAttentionInputs()\n iap = attention.DotProductAttention.Params()\n iap.name = 'dot_atten'\n params = attention.MultiHeadedAttention.Params().Set(\n name='multihead_atten',\n source_dim=4,\n query_dim=4,\n hidden_dim=4,\n inner_atten_params=iap,\n num_attention_heads=2,\n use_source_vec_as_attention_value=False,\n enable_ctx_pre_proj=True,\n enable_ctx_post_proj=True,\n ctx_post_proj_dim=6,\n context_dim=6,\n num_post_proj=2)\n atten = params.Instantiate()\n packed_src = atten.InitForSourcePacked(atten.theta, source_vecs,\n source_contexts, source_padding)\n # Duplicate atten_idx n=2 times.\n atten_idx = tf.constant([0, 1, 1] * 2, dtype=tf.int32)\n self.evaluate(tf.global_variables_initializer())\n atten_vec, atten_prob, _ = atten.ComputeContextVectorWithSource(\n atten.theta, packed_src, query_vec, atten_idx=atten_idx)\n\n self._CheckStaticShapes(\n atten_vec,\n atten_prob,\n target_batch_size=query_vec.shape[0],\n source_length=source_contexts.shape[0],\n context_dim=source_contexts.shape[2])\n\n atten_vec_out, prob_out = self.evaluate([atten_vec, atten_prob])\n self.assertAllClose([\n 0.66697717, 0.52266854, 0.7827165, 0.65693897, 0.51808167, 0.82977116\n ], np.sum(atten_vec_out, axis=1))\n print('atten_vec_out', atten_vec_out)\n print('prob_out', prob_out)\n\n def _testMultiHeadedAttentionAdditiveHelper(self,\n source_dim,\n expected_vec,\n packed_input=False):\n # source_batch:3, target_batch:6. Test n = 2 case.\n with self.session(use_gpu=True):\n (source_vecs, source_contexts, source_padding, source_padding_p,\n query_vec, source_seg_id,\n query_seg_id) = self._MultiHeadedAttentionInputs(source_dim)\n if not packed_input:\n source_seg_id = None\n query_seg_id = None\n iap = attention.AdditiveAttention.Params()\n iap.name = 'add_atten'\n params = attention.MultiHeadedAttention.Params().Set(\n name='multihead_atten',\n source_dim=source_dim,\n query_dim=4,\n hidden_dim=4,\n num_attention_heads=2,\n inner_atten_params=iap,\n use_source_vec_as_attention_value=False,\n vn=py_utils.VariationalNoiseParams(0.0, False, False),\n packed_input=packed_input)\n atten = params.Instantiate()\n atten.InitForSourcePacked(atten.theta, source_vecs, source_contexts,\n source_padding, source_seg_id)\n atten_vec, atten_prob, _ = atten.ComputeContextVector(\n atten.theta, query_vec, query_segment_id=query_seg_id)\n\n self._CheckStaticShapes(\n atten_vec,\n atten_prob,\n target_batch_size=query_vec.shape[0],\n source_length=source_contexts.shape[0],\n context_dim=source_contexts.shape[2])\n\n self.evaluate(tf.global_variables_initializer())\n atten_vec_out, prob_out = self.evaluate([atten_vec, atten_prob])\n print('atten_vec_out', np.sum(atten_vec_out, axis=1))\n self.assertAllClose(expected_vec, np.sum(atten_vec_out, axis=1))\n print('atten_vec_out', atten_vec_out)\n print('prob_out', prob_out)\n t_batch_size = 6\n s_batch_size = 3\n for i in range(t_batch_size):\n # Test to make sure we didn't mess up indexing.\n s_index = i % s_batch_size\n src_seg_id = None\n if packed_input:\n src_seg_id = source_seg_id[:, s_index:s_index + 1]\n atten.InitForSourcePacked(atten.theta,\n source_vecs[:, s_index:s_index + 1, :],\n source_contexts[:, s_index:s_index + 1, :],\n source_padding[:, s_index:s_index + 1],\n src_seg_id)\n qry_seg_id = None\n if packed_input:\n qry_seg_id = query_seg_id[i:i + 1]\n atten_vec_i, prob_i, _ = atten.ComputeContextVector(\n atten.theta, query_vec[i:i + 1], query_segment_id=qry_seg_id)\n atten_vec_i_out, prob_i_out = self.evaluate([atten_vec_i, prob_i])\n self.assertAllClose(prob_i_out, prob_out[i:i + 1])\n self.assertAllClose(atten_vec_i_out, atten_vec_out[i:i + 1])\n padding_i = source_padding_p[s_index]\n # Check to make sure prob exists only on valid timesteps.\n self.assertEqual(0.0, np.sum(padding_i * prob_i_out))\n\n def testMultiHeadedAttentionAttStateSingleProb(self):\n # source_batch:3, target_batch:6. Test n = 2 case.\n with self.session(use_gpu=True):\n (source_vecs, source_contexts, source_padding, _, query_vec, _,\n _) = self._MultiHeadedAttentionInputs()\n iap = attention.DotProductAttention.Params()\n iap.name = 'dot_atten'\n params = attention.MultiHeadedAttention.Params().Set(\n name='multihead_atten',\n source_dim=4,\n query_dim=4,\n hidden_dim=4,\n inner_atten_params=iap,\n num_attention_heads=2,\n use_source_vec_as_attention_value=False,\n attention_head_prob_index=1)\n atten = params.Instantiate()\n atten.InitForSourcePacked(atten.theta, source_vecs, source_contexts,\n source_padding)\n atten_state = atten.ZeroAttentionState(2, 6)\n print('atten_state:', atten_state)\n\n atten_vec, atten_prob, atten_state = atten.ComputeContextVector(\n atten.theta, query_vec, atten_state)\n self.evaluate(tf.global_variables_initializer())\n atten_vec_out, _, atten_state = self.evaluate(\n [atten_vec, atten_prob, atten_state])\n print('atten_vec_out', np.sum(atten_vec_out, axis=1))\n print('atten_state', atten_state)\n self.assertAllClose([\n 2.84679317, 2.36924601, 3.54831171, 2.86487937, 2.3537426, 3.54308939\n ], np.sum(atten_vec_out, axis=1))\n expected_prob_atten_state = ([\n [0.24530524, 0.24182455, 0, 0, 0.2497975, 0.2630728],\n [0, 0.24692935, 0.23176268, 0.26929834, 0, 0.2520097],\n [0.28280658, 0.23664463, 0, 0.18057014, 0, 0.29997864],\n [0.21294391, 0.2421909, 0, 0, 0.2702513, 0.27461392],\n [0, 0.25139052, 0.24466391, 0.25138932, 0, 0.25255626],\n [0.25900277, 0.2514635, 0, 0.23059677, 0, 0.25893703]\n ])\n self.assertAllClose(expected_prob_atten_state,\n atten_state.selected_attention_head_probs)\n\n def testMultiHeadedAttentionAdditive(self):\n self._testMultiHeadedAttentionAdditiveHelper(\n 4, [2.858081, 2.33295, 3.529434, 2.856466, 2.342262, 3.526487])\n\n def testMultiHeadedAttentionAdditivePackedInput(self):\n self._testMultiHeadedAttentionAdditiveHelper(\n 4, [2.585192, 2.267683, 3.708972, 3.107646, 2.770367, 3.580353],\n packed_input=True)\n\n def testMultiHeadedAttentionAdditiveUnequalDim(self):\n self._testMultiHeadedAttentionAdditiveHelper(\n 14, [3.189594, 2.462574, 2.912001, 3.19924, 2.462459, 2.909231])\n\n def testLocationSensitiveAttention1(self):\n with self.session(use_gpu=True):\n np.random.seed(12345)\n source_vecs = tf.stack([\n tf.constant(np.random.rand(3, 4), dtype=tf.float32) for _ in range(6)\n ])\n source_contexts = tf.stack([\n tf.constant(np.random.rand(3, 5), dtype=tf.float32) for _ in range(6)\n ])\n source_padding = tf.transpose(\n tf.constant(\n [[0, 0, 1, 1, 0, 0], [1, 0, 0, 0, 1, 0], [0, 0, 1, 0, 1, 0]],\n dtype=tf.float32))\n query_vec = tf.constant(np.random.rand(6, 7), dtype=tf.float32)\n\n params = attention.LocationSensitiveAttention.Params()\n params.name = 'loc_atten'\n params.params_init = py_utils.WeightInit.Gaussian(0.1, 12345)\n params.source_dim = 4\n params.query_dim = 7\n params.hidden_dim = 7\n params.vn.global_vn = False\n params.vn.per_step_vn = False\n params.location_filter_size = 3\n params.location_num_filters = 4\n\n atten = attention.LocationSensitiveAttention(params)\n atten.InitForSourcePacked(atten.theta, source_vecs, source_contexts,\n source_padding)\n\n atten_init_state = tf.nn.softmax(\n tf.constant(\n np.random.rand(6, len(params.location_features), 6),\n dtype=tf.float32))\n\n atten_vec, atten_prob, atten_state = atten.ComputeContextVector(\n atten.theta, query_vec, atten_init_state)\n\n atten_vars = tf.get_collection('LocationSensitiveAttention_vars')\n self.assertLen(atten_vars, 5)\n\n self.evaluate(tf.global_variables_initializer())\n\n atten_vec_out, prob_out, atten_init_state_out, atten_state_out = self.evaluate(\n [atten_vec, atten_prob, atten_init_state, atten_state])\n\n self.assertEqual(atten_init_state_out.shape, atten_state_out.shape)\n\n print(['additive attention prob_out', np.array_repr(prob_out)])\n print(['additive attention atten_vec_out', np.array_repr(atten_vec_out)])\n\n # pyformat: disable\n # pylint: disable=bad-whitespace\n expected_prob_out = [\n [ 0.25557119, 0.2407331 , 0. , 0. , 0.25413439,\n 0.24956135],\n [ 0. , 0.2539435 , 0.24765202, 0.25480285, 0. ,\n 0.24360162],\n [ 0.25094694, 0.25000173, 0. , 0.24308425, 0. ,\n 0.25596702],\n [ 0.25559491, 0.24071115, 0. , 0. , 0.2541317 ,\n 0.24956223],\n [ 0. , 0.25393987, 0.24765508, 0.25481141, 0. ,\n 0.24359357],\n [ 0.25112614, 0.24990462, 0. , 0.24246819, 0. ,\n 0.25650105]]\n expected_atten_vec_out = [\n [ 0.49745601, 0.63471878, 0.49220741, 0.56829882, 0.42753279],\n [ 0.51502693, 0.56184328, 0.37644374, 0.87425017, 0.46182287],\n [ 0.57862061, 0.44247472, 0.36931327, 0.41002682, 0.14327496],\n [ 0.49745524, 0.63471991, 0.49221092, 0.56828701, 0.427522 ],\n [ 0.51502484, 0.5618462 , 0.37644884, 0.87424958, 0.46181911],\n [ 0.57893252, 0.44248456, 0.36938512, 0.4100675 , 0.14328022]]\n # pyformat: enable\n # pylint: enable=bad-whitespace\n\n self.assertAllClose(expected_prob_out, prob_out)\n self.assertAllClose(expected_atten_vec_out, atten_vec_out)\n\n def testLocationSensitiveAttention2(self):\n with self.session(use_gpu=True):\n np.random.seed(12345)\n source_vecs = tf.stack([\n tf.constant(np.random.rand(3, 4), dtype=tf.float32) for _ in range(6)\n ])\n source_contexts = tf.stack([\n tf.constant(np.random.rand(3, 5), dtype=tf.float32) for _ in range(6)\n ])\n source_padding = tf.transpose(\n tf.constant(\n [[0, 0, 1, 1, 0, 0], [1, 0, 0, 0, 1, 0], [0, 0, 1, 0, 1, 0]],\n dtype=tf.float32))\n query_vec = tf.constant(np.random.rand(6, 7), dtype=tf.float32)\n\n params = attention.LocationSensitiveAttention.Params()\n params.name = 'loc_atten'\n params.params_init = py_utils.WeightInit.Gaussian(0.1, 12345)\n params.source_dim = 4\n params.query_dim = 7\n params.hidden_dim = 7\n params.vn.global_vn = False\n params.vn.per_step_vn = False\n params.location_filter_size = 3\n params.location_num_filters = 4\n params.location_features = ['PREV_PROBS', 'CUMULATIVE_PROBS']\n\n atten = attention.LocationSensitiveAttention(params)\n atten.InitForSourcePacked(atten.theta, source_vecs, source_contexts,\n source_padding)\n\n atten_init_state = atten.ZeroAttentionState(tf.shape(source_vecs)[0], 6)\n\n (unused_atten_vec, unused_atten_prob,\n atten_state) = atten.ComputeContextVector(atten.theta, query_vec,\n atten_init_state)\n\n atten_vars = tf.get_collection('LocationSensitiveAttention_vars')\n self.assertLen(atten_vars, 5)\n\n self.evaluate(tf.global_variables_initializer())\n\n atten_init_state_out, atten_state_out = self.evaluate(\n [atten_init_state, atten_state])\n\n self.assertEqual(atten_init_state_out.shape, atten_state_out.shape)\n\n def _testLocationSensitiveAttentionSameBatchSizeHelper(\n self, same_batch_size, quantized=False):\n with self.session(tf.Graph(), use_gpu=True):\n np.random.seed(12345)\n dtype = tf.float32 if quantized else tf.float64\n source_vecs = tf.constant(np.random.rand(6, 3, 4), dtype=dtype)\n source_contexts = tf.constant(np.random.rand(6, 3, 5), dtype=dtype)\n source_padding = tf.transpose(\n tf.constant(\n [[0, 0, 1, 1, 0, 0], [1, 0, 0, 0, 1, 0], [0, 0, 1, 0, 1, 0]],\n dtype=dtype))\n\n query_vec = tf.constant(np.random.rand(3, 7), dtype=dtype)\n\n params = attention.LocationSensitiveAttention.Params()\n params.dtype = dtype\n params.name = 'loc_atten'\n params.params_init = py_utils.WeightInit.Gaussian(0.1, 12345)\n params.source_dim = 4\n params.query_dim = 7\n params.hidden_dim = 7\n params.vn.global_vn = False\n params.vn.per_step_vn = False\n params.location_filter_size = 3\n params.location_num_filters = 4\n params.same_batch_size = same_batch_size\n\n if quantized:\n cc_schedule = quant_utils.FakeQuantizationSchedule.Params().Set(\n clip_start_step=0,\n clip_end_step=13000,\n quant_start_step=14000,\n start_cap=8.0,\n end_cap=1.0)\n qdomain = quant_utils.SymmetricScheduledClipQDomain.Params().Set(\n cc_schedule=cc_schedule.Copy())\n params.qdomain.default = qdomain.Copy()\n params.qdomain.atten_context = qdomain.Copy()\n\n atten = attention.LocationSensitiveAttention(params)\n atten.InitForSourcePacked(atten.theta, source_vecs, source_contexts,\n source_padding)\n\n atten_init_state = tf.nn.softmax(\n tf.constant(\n np.random.rand(3, len(params.location_features), 6), dtype=dtype))\n\n atten_vec, atten_prob, atten_state = atten.ComputeContextVector(\n atten.theta, query_vec, atten_init_state)\n\n atten_vars = tf.get_collection('LocationSensitiveAttention_vars')\n self.assertLen(atten_vars, 5)\n\n self.evaluate(tf.global_variables_initializer())\n\n atten_vec_out, prob_out, atten_init_state_out, atten_state_out = self.evaluate(\n [atten_vec, atten_prob, atten_init_state, atten_state])\n\n self.assertEqual(atten_init_state_out.shape, atten_state_out.shape)\n return atten_vec_out, prob_out, atten_init_state_out, atten_state_out\n\n def testLocationSensitiveAttentionSameBatchSize(self):\n (atten_vec_out1, prob_out1, atten_init_state_out1, atten_state_out1) = (\n self._testLocationSensitiveAttentionSameBatchSizeHelper(True))\n (atten_vec_out2, prob_out2, atten_init_state_out2, atten_state_out2) = (\n self._testLocationSensitiveAttentionSameBatchSizeHelper(False))\n self.assertAllClose(atten_vec_out1, atten_vec_out2, rtol=1e-04, atol=1e-04)\n self.assertAllClose(prob_out1, prob_out2, rtol=1e-04, atol=1e-04)\n self.assertAllClose(\n atten_init_state_out1, atten_init_state_out2, rtol=1e-04, atol=1e-04)\n self.assertAllClose(\n atten_state_out1, atten_state_out2, rtol=1e-04, atol=1e-04)\n\n def testLocationSensitiveAttentionQuantized(self):\n (atten_vec_out1, prob_out1, atten_init_state_out1, atten_state_out1) = (\n self._testLocationSensitiveAttentionSameBatchSizeHelper(False, False))\n (atten_vec_out2, prob_out2, atten_init_state_out2, atten_state_out2) = (\n self._testLocationSensitiveAttentionSameBatchSizeHelper(False, True))\n self.assertAllClose(atten_vec_out1, atten_vec_out2, rtol=1e-02, atol=1e-02)\n self.assertAllClose(prob_out1, prob_out2, rtol=1e-02, atol=1e-02)\n self.assertAllClose(\n atten_init_state_out1, atten_init_state_out2, rtol=1e-02, atol=1e-02)\n self.assertAllClose(\n atten_state_out1, atten_state_out2, rtol=1e-02, atol=1e-02)\n\n def testLocationSensitiveAttentionQuantizedSameBatch(self):\n (atten_vec_out1, prob_out1, atten_init_state_out1, atten_state_out1) = (\n self._testLocationSensitiveAttentionSameBatchSizeHelper(True, False))\n (atten_vec_out2, prob_out2, atten_init_state_out2, atten_state_out2) = (\n self._testLocationSensitiveAttentionSameBatchSizeHelper(True, True))\n self.assertAllClose(atten_vec_out1, atten_vec_out2, rtol=1e-02, atol=1e-02)\n self.assertAllClose(prob_out1, prob_out2, rtol=1e-02, atol=1e-02)\n self.assertAllClose(\n atten_init_state_out1, atten_init_state_out2, rtol=1e-02, atol=1e-02)\n self.assertAllClose(\n atten_state_out1, atten_state_out2, rtol=1e-02, atol=1e-02)\n\n def _attentionStateWithRandomEmitProbabilities(self,\n atten,\n batch_size,\n time,\n dtype=tf.float32):\n atten_state = atten.ZeroAttentionState(time, batch_size)\n atten_state.emit_probs = tf.nn.softmax(\n tf.constant(np.random.rand(batch_size, time), dtype=dtype))\n return atten_state\n\n def testMonotonicAttention(self):\n with self.session(use_gpu=True):\n np.random.seed(12345)\n batch_size = 3\n source_dim = 4\n context_dim = 5\n time = 6\n query_dim = 7\n source_vecs = tf.constant(\n np.random.rand(time, batch_size, source_dim), dtype=tf.float32)\n source_contexts = tf.constant(\n np.random.rand(time, batch_size, context_dim), dtype=tf.float32)\n source_padding = tf.transpose(\n tf.constant(\n [[0, 0, 1, 1, 0, 0], [1, 0, 0, 0, 1, 0], [0, 0, 1, 0, 1, 0]],\n dtype=tf.float32))\n query_vec = tf.constant(\n np.random.rand(batch_size, query_dim), dtype=tf.float32)\n\n params = attention.MonotonicAttention.Params()\n params.name = 'monotonic_attention'\n params.params_init = py_utils.WeightInit.Gaussian(0.1, 12345)\n params.source_dim = source_dim\n params.query_dim = query_dim\n params.hidden_dim = query_dim\n params.vn.global_vn = False\n params.vn.per_step_vn = False\n\n atten = attention.MonotonicAttention(params)\n atten.InitForSourcePacked(atten.theta, source_vecs, source_contexts,\n source_padding)\n\n atten_init_state = self._attentionStateWithRandomEmitProbabilities(\n atten, batch_size, time)\n atten_vec, atten_prob, atten_state = atten.ComputeContextVector(\n atten.theta, query_vec, atten_init_state)\n\n atten_vars = tf.get_collection('MonotonicAttention_vars')\n self.assertLen(atten_vars, 6)\n\n self.evaluate(tf.global_variables_initializer())\n\n atten_vec_out, prob_out, atten_init_state_out, atten_state_out = self.evaluate(\n [atten_vec, atten_prob, atten_init_state, atten_state])\n\n self.assertEqual(atten_init_state_out.emit_probs.shape,\n atten_state_out.emit_probs.shape)\n\n print(['monotonic attention prob_out', np.array_repr(prob_out)])\n print(['monotonic attention atten_vec_out', np.array_repr(atten_vec_out)])\n\n expected_prob_out = [[\n 0.03654566, 0.05925026, 0., 0., 0.20958641, 0.19560105\n ], [0., 0.09670404, 0.13182665, 0.13221622, 0.,\n 0.18074416], [0.04112773, 0.07072841, 0., 0.13837409, 0., 0.23935230]]\n\n expected_atten_vec_out = [\n [0.2937718, 0.30372939, 0.27034321, 0.31328040, 0.19393572],\n [0.2553753, 0.26388022, 0.20429659, 0.47469878, 0.27512118],\n [0.33394262, 0.1191523, 0.22405925, 0.21366173, 0.03946214]\n ]\n\n self.assertAllClose(expected_prob_out, prob_out)\n self.assertAllClose(expected_atten_vec_out, atten_vec_out)\n\n def testMonotonicAttentionHard(self):\n with self.session(use_gpu=True):\n batch_size = 3\n source_dim = 4\n context_dim = 5\n time = 6\n query_dim = 10\n source_vecs = tf.constant(\n np.random.randn(time, batch_size, source_dim), dtype=tf.float32)\n source_contexts = tf.constant(\n np.random.randn(time, batch_size, context_dim), dtype=tf.float32)\n source_padding = tf.zeros((time, batch_size), dtype=tf.float32)\n query_vec = tf.constant(\n np.random.randn(batch_size, query_dim), dtype=tf.float32)\n\n params = attention.MonotonicAttention.Params()\n params.name = 'monotonic_attention'\n params.params_init = py_utils.WeightInit.Gaussian(0.1)\n params.source_dim = source_dim\n params.query_dim = query_dim\n params.hidden_dim = query_dim\n params.vn.global_vn = False\n params.vn.per_step_vn = False\n params.hard_sigmoid = True\n # To encourage some probabilities to be > 0\n params.hidden_bias_init = 0.\n\n atten = attention.MonotonicAttention(params)\n atten.InitForSourcePacked(atten.theta, source_vecs, source_contexts,\n source_padding)\n\n atten_init_state = atten.ZeroAttentionState(time, batch_size)\n\n _, atten_prob, atten_state = atten.ComputeContextVector(\n atten.theta, query_vec, atten_init_state)\n\n atten_vars = tf.get_collection('MonotonicAttention_vars')\n self.assertLen(atten_vars, 6)\n\n self.evaluate(tf.global_variables_initializer())\n\n prob_out, atten_state_out = self.evaluate([atten_prob, atten_state])\n print(['hard monotonic prob', np.array_repr(prob_out)])\n # Make sure all probabilities are binary\n self.assertTrue(np.all(np.logical_or(prob_out == 0, prob_out == 1)))\n # Make sure either one index was attended or none were\n prob_sum = np.sum(prob_out, 1)\n self.assertTrue(np.all(np.logical_or(prob_sum == 1, prob_sum == 0)))\n\n query_vec = tf.constant(\n np.random.randn(batch_size, query_dim), dtype=tf.float32)\n # Feed state back in\n _, atten_prob, atten_state = atten.ComputeContextVector(\n atten.theta, query_vec, atten_state_out)\n prob_out2 = self.evaluate(atten_prob)\n print(['hard monotonic prob2', np.array_repr(prob_out2)])\n # Get indices of where attention was assigned at each output timestep\n idx1 = np.argmax(prob_out, 1)\n idx2 = np.argmax(prob_out2, 1)\n # Either the index must have increased, or all probs were 0\n self.assertTrue(\n np.all(np.logical_or(idx1 <= idx2,\n np.sum(prob_out2, 1) == 0)))\n\n def testMonotonicAttentionBackProp(self):\n with self.session(use_gpu=True) as sess:\n # Use float64 dtype for numeric checks\n dtype = tf.float64\n tf.random.set_seed(398847392)\n np.random.seed(12345)\n batch_size = 3\n source_dim = 4\n context_dim = 5\n time = 6\n query_dim = 7\n source_vecs = tf.constant(\n np.random.rand(time, batch_size, source_dim), dtype=tf.float64)\n source_contexts = tf.constant(\n np.random.rand(time, batch_size, context_dim), dtype=tf.float64)\n source_padding = tf.zeros((time, batch_size), dtype=tf.float64)\n query_vec = tf.constant(\n np.random.rand(batch_size, query_dim), dtype=tf.float64)\n\n params = attention.MonotonicAttention.Params()\n params.name = 'monotonic_attention'\n params.params_init = py_utils.WeightInit.Gaussian(0.1, 12345)\n params.source_dim = source_dim\n params.query_dim = query_dim\n params.hidden_dim = query_dim\n params.dtype = dtype\n params.vn.global_vn = False\n params.vn.per_step_vn = False\n\n atten = attention.MonotonicAttention(params)\n atten.InitForSourcePacked(atten.theta, source_vecs, source_contexts,\n source_padding)\n\n atten_init_state = self._attentionStateWithRandomEmitProbabilities(\n atten, batch_size, time, dtype=dtype)\n\n atten_vec, _, _ = atten.ComputeContextVector(atten.theta, query_vec,\n atten_init_state)\n\n loss = tf.reduce_sum(atten_vec)\n\n all_vars = tf.trainable_variables()\n self.assertLen(all_vars, 6)\n\n grads = tf.gradients(loss, all_vars)\n self.evaluate(tf.global_variables_initializer())\n sym_grads = [sg.eval() for sg in grads]\n num_grads = [\n test_utils.ComputeNumericGradient(sess, loss, v) for v in all_vars\n ]\n\n print(sym_grads)\n print(num_grads)\n\n for sg, ng in zip(sym_grads, num_grads):\n self.assertAllClose(sg, ng, rtol=1e-06, atol=1e-06)\n\n def _testPerStepSourcePaddingHelper(self, atten, depth=6, atten_state=None):\n with self.session(use_gpu=True):\n np.random.seed(505837249)\n source_vecs = tf.stack([\n tf.constant(np.random.rand(2, depth), dtype=tf.float32)\n for _ in range(6)\n ])\n source_contexts = tf.stack([\n tf.constant(np.random.rand(2, depth), dtype=tf.float32)\n for _ in range(6)\n ])\n source_padding = tf.transpose(\n tf.constant([[0, 0, 1, 1, 0, 0], [1, 0, 0, 0, 1, 0]],\n dtype=tf.float32))\n query_vec = tf.constant(np.random.rand(2, depth), dtype=tf.float32)\n query_vec = tf.concat([query_vec, query_vec], 0)\n\n atten.InitForSourcePacked(atten.theta, source_vecs, source_contexts,\n source_padding)\n # No per_step_padding.\n atten_vec1, atten_prob1, _ = atten.ComputeContextVector(\n atten.theta,\n query_vec,\n attention_state=atten_state,\n per_step_source_padding=None)\n per_step_padding = tf.constant(\n [[0.0, 1.0, 1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 1.0, 1.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],\n dtype=tf.float32)\n atten_vec2, atten_prob2, _ = atten.ComputeContextVector(\n atten.theta,\n query_vec,\n attention_state=atten_state,\n per_step_source_padding=per_step_padding)\n\n self.evaluate(tf.global_variables_initializer())\n atten_vec1_out, atten_prob1_out = self.evaluate([atten_vec1, atten_prob1])\n atten_vec2_out, atten_prob2_out = self.evaluate([atten_vec2, atten_prob2])\n print('atten_prob1_out', atten_prob1_out)\n print('atten_prob2_out', atten_prob2_out)\n print('atten_vec1_out', atten_vec1_out)\n print('atten_vec2_out', atten_vec2_out)\n self.assertAllClose(atten_prob1_out[:2], atten_prob1_out[2:])\n self.assertAllClose(atten_vec1_out[:2], atten_vec1_out[2:])\n self.assertAllClose(atten_prob1_out[1], atten_prob2_out[1])\n self.assertAllClose(atten_vec1_out[1], atten_vec2_out[1])\n self.assertAllClose(atten_prob1_out[3], atten_prob2_out[3])\n self.assertAllClose(atten_vec1_out[3], atten_vec2_out[3])\n self.assertAllClose(atten_prob2_out[1], atten_prob2_out[3])\n self.assertAllClose(atten_vec2_out[1], atten_vec2_out[3])\n self.assertGreater(\n np.max(np.abs(atten_prob1_out[0] - atten_prob2_out[0])), 0.1)\n self.assertGreater(\n np.max(np.abs(atten_prob1_out[2] - atten_prob2_out[2])), 0.1)\n self.assertGreater(\n np.max(np.abs(atten_prob2_out[0] - atten_prob2_out[2])), 0.1)\n return atten_prob2_out, atten_vec2_out\n\n def testPerStepSourcePaddingAdditiveAttention(self):\n params = attention.AdditiveAttention.Params()\n params.name = 'atten'\n params.params_init = py_utils.WeightInit.Gaussian(0.1, 877374)\n depth = 6\n params.source_dim = depth\n params.query_dim = depth\n params.hidden_dim = depth\n params.vn.global_vn = False\n params.vn.per_step_vn = False\n atten = params.Instantiate()\n prob_out, vec_out = self._testPerStepSourcePaddingHelper(atten, depth)\n print('vec_out', np.array_repr(np.sum(vec_out, 1)))\n self.assertAllClose([2.00084352, 3.2933836, 2.30622029, 3.2933836],\n np.sum(vec_out, 1))\n self.assertAllClose([1.0, 1.0, 1.0, 1.0], np.sum(prob_out, 1))\n\n def testPerStepSourcePaddingDotProductAttention(self):\n params = attention.DotProductAttention.Params()\n params.name = 'atten'\n depth = 6\n params.source_dim = depth\n params.query_dim = depth\n params.hidden_dim = depth\n params.vn.global_vn = False\n params.vn.per_step_vn = False\n atten = params.Instantiate()\n prob_out, vec_out = self._testPerStepSourcePaddingHelper(atten, depth)\n print('vec_out', np.array_repr(np.sum(vec_out, 1)))\n self.assertAllClose([2.02671742, 3.38590097, 2.34964013, 3.38590097],\n np.sum(vec_out, 1))\n self.assertAllClose([1.0, 1.0, 1.0, 1.0], np.sum(prob_out, 1))\n\n def testPerStepSourcePaddingMultiHeadedAttention(self):\n params = attention.MultiHeadedAttention.Params()\n params.name = 'atten'\n params.params_init = py_utils.WeightInit.Gaussian(0.1, 877374)\n depth = 6\n params.source_dim = depth\n params.query_dim = depth\n params.hidden_dim = depth\n params.vn.global_vn = False\n params.vn.per_step_vn = False\n atten = params.Instantiate()\n prob_out, vec_out = self._testPerStepSourcePaddingHelper(atten, depth)\n print('vec_out', np.array_repr(np.sum(vec_out, 1)))\n self.assertAllClose([-0.006338, -0.025153, 0.041647, -0.025153],\n np.sum(vec_out, 1))\n self.assertAllClose([1.0, 1.0, 1.0, 1.0], np.sum(prob_out, 1))\n\n def testPerStepSourcePaddingLocationSensitiveAttention(self):\n params = attention.LocationSensitiveAttention.Params()\n params.name = 'atten'\n params.params_init = py_utils.WeightInit.Gaussian(0.1, 877374)\n depth = 6\n params.source_dim = depth\n params.query_dim = depth\n params.hidden_dim = depth\n params.location_filter_size = 3\n params.location_num_filters = 4\n params.vn.global_vn = False\n params.vn.per_step_vn = False\n atten_state = tf.concat(\n [tf.ones([4, 1], tf.float32),\n tf.zeros([4, 5], tf.float32)], 1)\n atten_state = tf.expand_dims(atten_state, 1)\n atten = params.Instantiate()\n prob_out, vec_out = self._testPerStepSourcePaddingHelper(\n atten, depth, atten_state=atten_state)\n print('vec_out', np.array_repr(np.sum(vec_out, 1)))\n self.assertAllClose([2.001103, 3.293414, 2.306448, 3.293414],\n np.sum(vec_out, 1))\n self.assertAllClose([1.0, 1.0, 1.0, 1.0], np.sum(prob_out, 1))\n\n def testPerStepSourcePaddingMonotonicAttention(self):\n params = attention.MonotonicAttention.Params()\n params.name = 'atten'\n params.params_init = py_utils.WeightInit.Gaussian(0.1, 877374)\n depth = 6\n params.source_dim = depth\n params.query_dim = depth\n params.hidden_dim = depth\n params.vn.global_vn = False\n params.vn.per_step_vn = False\n atten = params.Instantiate()\n atten_state = atten.ZeroAttentionState(6, 4)\n atten_state.emit_probs = tf.concat(\n [tf.ones([4, 1], tf.float32),\n tf.zeros([4, 5], tf.float32)], 1)\n prob_out, vec_out = self._testPerStepSourcePaddingHelper(\n atten, depth, atten_state=atten_state)\n print('prob_out', np.array_repr(np.sum(prob_out, 1)))\n print('vec_out', np.array_repr(np.sum(vec_out, 1)))\n\n def testGmmMonotonicAttentionDropout(self):\n p = attention.GmmMonotonicAttention.Params().Set(\n name='gmm_monotonic_attention', atten_dropout_prob=0.5)\n with self.assertRaises(NotImplementedError):\n p.Instantiate()\n\n def testGmmMonotonicAttention(self):\n with self.session(use_gpu=True):\n np.random.seed(12345)\n source_vecs = tf.stack([\n tf.constant(np.random.rand(3, 4), dtype=tf.float32) for _ in range(6)\n ])\n source_contexts = tf.stack([\n tf.constant(np.random.rand(3, 5), dtype=tf.float32) for _ in range(6)\n ])\n source_padding = tf.transpose(\n tf.constant(\n [[0, 0, 1, 1, 0, 0], [1, 0, 0, 0, 1, 0], [0, 0, 1, 0, 1, 0]],\n dtype=tf.float32))\n query_vec = tf.constant(np.random.rand(6, 7), dtype=tf.float32)\n\n params = attention.GmmMonotonicAttention.Params()\n params.name = 'gmm_atten'\n params.params_init = py_utils.WeightInit.Xavier(0.1)\n params.source_dim = 4\n params.query_dim = 7\n params.hidden_dim = 7\n params.num_mixtures = 2\n params.vn.global_vn = False\n params.vn.per_step_vn = False\n\n atten = params.Instantiate()\n atten.InitForSourcePacked(atten.theta, source_vecs, source_contexts,\n source_padding)\n # target_batch=6\n atten_init_state = atten.ZeroAttentionState(tf.shape(source_vecs)[0], 6)\n\n atten_vec, atten_prob, atten_state = atten.ComputeContextVector(\n atten.theta, query_vec, atten_init_state)\n\n self.evaluate(tf.global_variables_initializer())\n\n atten_vec_out, prob_out, atten_init_state_out, atten_state_out = self.evaluate(\n [atten_vec, atten_prob, atten_init_state, atten_state])\n\n self.assertEqual(atten_init_state_out.shape, atten_state_out.shape)\n self.assertEqual(atten_init_state_out.shape, (6, 2, 4))\n\n print(['gmm attention prob_out', np.array_repr(prob_out)])\n print(['gmm attention atten_vec_out', np.array_repr(atten_vec_out)])\n\n # pyformat: disable\n # pylint: disable=bad-whitespace\n expected_prob_out = [\n [ 2.45764434e-01, 3.97835493e-01, 0., 0., 4.25808690e-03,\n 1.29864624e-04],\n [ 0., 3.98021877e-01, 2.37964690e-01, 5.23146540e-02, 0.,\n 1.29256863e-04],\n [ 2.46294901e-01, 3.97767872e-01, 0., 5.21243662e-02, 0.,\n 1.29372784e-04],\n [ 2.45875627e-01, 3.97635251e-01, 0., 0., 4.27022483e-03,\n 1.30706903e-04],\n [ 0., 3.97709191e-01, 2.37897262e-01, 5.24106659e-02, 0.,\n 1.30714150e-04],\n [ 2.46048093e-01, 3.97871077e-01, 0., 5.21884784e-02, 0.,\n 1.29211781e-04]]\n expected_atten_vec_out = [\n [ 0.23010808, 0.43757612, 0.25150469, 0.3631629 , 0.37140277],\n [ 0.54693544, 0.56182981, 0.21333349, 0.58108622, 0.21566363],\n [ 0.4048025 , 0.53986353, 0.13288836, 0.22497796, 0.17450145],\n [ 0.23008531, 0.4375343 , 0.25150725, 0.36303982, 0.37127423],\n [ 0.54661846, 0.5615437 , 0.21332006, 0.58084518, 0.21558265],\n [ 0.40484226, 0.53978455, 0.13283314, 0.22490481, 0.17447782]]\n # pyformat: enable\n # pylint: enable=bad-whitespace\n\n self.assertAllClose(expected_prob_out, prob_out)\n self.assertAllClose(expected_atten_vec_out, atten_vec_out)\n\n def testMergerLayerMean(self):\n with self.session(use_gpu=True):\n np.random.seed(505837249)\n depth = 4\n batch = 5\n n_sources = 3\n p_ctxs = [\n np.random.rand(batch, depth).astype('float32')\n for _ in range(n_sources)\n ]\n ctxs = [tf.constant(ctx, dtype=tf.float32) for ctx in p_ctxs]\n\n p = attention.MergerLayer.Params()\n p.name = 'merger_layer'\n p.merger_op = 'mean'\n p.source_dim = depth\n merger = p.Instantiate()\n\n ctx = merger.FProp(merger.theta, ctxs)\n self.evaluate(tf.global_variables_initializer())\n actual_ctx = self.evaluate([ctx])[0]\n\n expected_ctx = np.mean(p_ctxs, axis=0)\n self.assertEqual(actual_ctx.shape, (batch, depth))\n self.assertAllClose(expected_ctx, actual_ctx, rtol=1e-05, atol=1e-05)\n\n def testMergerLayerAdditiveAttention(self):\n with self.session(use_gpu=True):\n np.random.seed(505837249)\n depth = 4\n batch = 5\n query_dim = 7\n n_sources = 3\n ctxs = [\n tf.constant(np.random.rand(batch, depth), dtype=tf.float32)\n for _ in range(n_sources)\n ]\n query_vec = tf.constant(\n np.random.rand(batch * 2, query_dim), dtype=tf.float32)\n p = attention.MergerLayer.Params()\n p.name = 'merger_layer'\n p.merger_op = 'atten'\n p.source_dim = depth\n p.query_dim = query_dim\n p.hidden_dim = depth\n merger = p.Instantiate()\n\n ctx = merger.FProp(merger.theta, ctxs, query_vec)\n self.evaluate(tf.global_variables_initializer())\n actual_ctx = self.evaluate(ctx)\n\n # pylint: disable=bad-whitespace\n # pyformat: disable\n expected_ctx = [\n [ 0.40796196, 0.50855637, 0.92564321, 0.72608167],\n [ 0.34300309, 0.17305931, 0.64801621, 0.4161588 ],\n [ 0.40570667, 0.28166312, 0.07109687, 0.07077176],\n [ 0.44923055, 0.56033343, 0.70899796, 0.73256713],\n [ 0.56362778, 0.42331296, 0.47032064, 0.76701462],\n [ 0.40873578, 0.50516003, 0.92537481, 0.72435796],\n [ 0.33702248, 0.17404726, 0.65101075, 0.41883218],\n [ 0.40316698, 0.28128177, 0.0709244 , 0.07073996],\n [ 0.44036126, 0.53640223, 0.68623006, 0.75264776],\n [ 0.54324883, 0.42487082, 0.4616943 , 0.77234119]]\n # pyformat: enable\n # pylint: enable=bad-whitespace\n self.assertEqual(actual_ctx.shape, (batch * 2, depth))\n self.assertAllClose(expected_ctx, actual_ctx, rtol=1e-05, atol=1e-05)\n\n def testMergerLayerDotProductAttention(self):\n with self.session(use_gpu=True):\n np.random.seed(505837249)\n depth = 4\n batch = 5\n n_sources = 3\n ctxs = [\n tf.constant(np.random.rand(batch, depth), dtype=tf.float32)\n for _ in range(n_sources)\n ]\n query_vec = tf.constant(\n np.random.rand(batch * 2, depth), dtype=tf.float32)\n p = attention.MergerLayer.Params()\n p.name = 'merger_layer'\n p.merger_op = 'atten'\n p.source_dim = depth\n p.query_dim = depth\n p.hidden_dim = depth\n p.attention_tpl = attention.DotProductAttention.Params()\n merger = p.Instantiate()\n\n ctx = merger.FProp(merger.theta, ctxs, query_vec)\n self.evaluate(tf.global_variables_initializer())\n actual_ctx = self.evaluate(ctx)\n\n # pylint: disable=bad-whitespace\n # pyformat: disable\n expected_ctx = [\n [ 0.40122974, 0.53032947, 0.92722446, 0.73408204],\n [ 0.37834394, 0.16492322, 0.6284582 , 0.40583336],\n [ 0.43172807, 0.28519249, 0.07334236, 0.07126588],\n [ 0.48187545, 0.56433642, 0.7028234 , 0.77750808],\n [ 0.59640014, 0.46689704, 0.47688526, 0.74523771],\n [ 0.41653261, 0.50926942, 0.92638767, 0.74147904],\n [ 0.34954029, 0.16965927, 0.64286244, 0.41876066],\n [ 0.44629157, 0.28723121, 0.07451884, 0.07151417],\n [ 0.509902 , 0.62019253, 0.75361776, 0.74199384],\n [ 0.56122077, 0.42407531, 0.46921006, 0.76747787]]\n # pyformat: enable\n # pylint: enable=bad-whitespace\n self.assertEqual(actual_ctx.shape, (batch * 2, depth))\n self.assertAllClose(expected_ctx, actual_ctx, rtol=1e-05, atol=1e-05)\n\n def testMergerLayerConcat(self):\n with self.session(use_gpu=True):\n np.random.seed(505837249)\n depth = 4\n batch = 5\n n_sources = 3\n ctxs = [\n tf.constant(np.random.rand(batch, depth), dtype=tf.float32)\n for _ in range(n_sources)\n ]\n p = attention.MergerLayer.Params()\n p.name = 'merger_layer'\n p.merger_op = 'concat'\n p.source_dim = depth\n merger = p.Instantiate()\n\n ctx = merger.FProp(merger.theta, ctxs)\n self.evaluate(tf.global_variables_initializer())\n actual_ctx = self.evaluate([ctx])[0]\n\n # pylint: disable=bad-whitespace\n # pyformat: disable\n expected_ctx = [\n [ 0.1177848 , 0.94777811, 0.94537693, 0.6216979 , 0.51051533,\n 0.5474115 , 0.93749231, 0.93760508, 0.5904724 , 0.05267439,\n 0.89581013, 0.63010913],\n [ 0.25139269, 0.13851869, 0.65362513, 0.57537138, 0.05093541,\n 0.28593501, 0.84663856, 0.39284077, 0.79584485, 0.07670615,\n 0.40381077, 0.26504567],\n [ 0.1108813 , 0.23381528, 0.05560364, 0.06867393, 0.77289224,\n 0.32918185, 0.10567363, 0.07876136, 0.35448784, 0.28477612,\n 0.05394353, 0.06531866],\n [ 0.82317245, 0.78475511, 0.82936037, 0.99494314, 0.07920805,\n 0.02165302, 0.25108394, 0.92048419, 0.44413447, 0.81940264,\n 0.98786688, 0.35846332],\n [ 0.86243463, 0.75607926, 0.54042 , 0.58698255, 0.13624814,\n 0.47994047, 0.28561282, 0.87185597, 0.66811442, 0.07942203,\n 0.56781054, 0.83598584]]\n # pyformat: enable\n # pylint: enable=bad-whitespace\n self.assertEqual(actual_ctx.shape, (batch, n_sources * depth))\n self.assertAllClose(expected_ctx, actual_ctx, rtol=1e-05, atol=1e-05)\n\n def testMergerLayerConcatPostProj(self):\n with self.session(use_gpu=True):\n np.random.seed(505837249)\n depth = 4\n batch = 5\n n_sources = 3\n ctxs = [\n tf.constant(np.random.rand(batch, depth), dtype=tf.float32)\n for _ in range(n_sources)\n ]\n p = attention.MergerLayer.Params()\n p.name = 'merger_layer'\n p.merger_op = 'concat'\n p.source_dim = depth\n # Post projection to a dimensionality of 4.\n p.post_proj = layers.ProjectionLayer.Params().Set(\n name='post_proj',\n batch_norm=False,\n weight_norm=False,\n has_bias=True,\n input_dim=12,\n output_dim=4)\n merger = p.Instantiate()\n ctx = merger.FProp(merger.theta, ctxs)\n self.evaluate(tf.global_variables_initializer())\n actual_ctx = self.evaluate([ctx])[0]\n\n # pylint: disable=bad-whitespace\n # pyformat: disable\n expected_ctx = [\n [0.05845007, 0.14603308, 2.099096 , 0.03618803],\n [0.50603 , 0.1128372 , 1.0714196 , 0.3054366 ],\n [0. , 0.17477296, 0. , 0. ],\n [0.34721488, 0. , 0.9593564 , 0.6714128 ],\n [0.012324 , 0. , 1.3537602 , 0.16794051]]\n\n # pyformat: enable\n # pylint: enable=bad-whitespace\n tf.logging.info(np.array_repr(actual_ctx))\n self.assertEqual(actual_ctx.shape, (batch, depth))\n self.assertAllClose(expected_ctx, actual_ctx, rtol=1e-05, atol=1e-05)\n\n def testMergerLayerConcatPreProjections(self):\n with self.session(use_gpu=True):\n np.random.seed(505837249)\n depth = 4\n batch = 5\n n_sources = 3\n ctxs = [\n tf.constant(np.random.rand(batch, depth), dtype=tf.float32)\n for _ in range(n_sources)\n ]\n p = attention.MergerLayer.Params()\n # We down project all of the sources to dimensionality 1.\n p.pre_proj_input_dims = [4, 4, 4]\n p.pre_proj_output_dims = [1, 1, 1]\n p.name = 'merger_layer'\n p.merger_op = 'concat'\n p.source_dim = depth\n merger = p.Instantiate()\n\n ctx = merger.FProp(merger.theta, ctxs)\n self.evaluate(tf.global_variables_initializer())\n actual_ctx = self.evaluate([ctx])[0]\n\n # pylint: disable=bad-whitespace\n # pyformat: disable\n expected_ctx = [\n [ 0., 0.72890908, 0. ],\n [ 0.4647972, 0.28266785, 0. ],\n [ 0., 0.74580085, 0.09588336],\n [ 0.46080768, 0., 0.66402191],\n [ 0.19947493, 0.38837075, 0. ],\n ]\n # pyformat: enable\n # pylint: enable=bad-whitespace\n tf.logging.info(np.array_repr(actual_ctx))\n # The final context vector will have shape (5, 3) since each source\n # has dimensionality 1 after the down projection above.\n self.assertEqual(actual_ctx.shape, (batch, n_sources))\n self.assertAllClose(expected_ctx, actual_ctx, rtol=1e-05, atol=1e-05)\n\n def testInvalidPreProjections(self):\n with self.session(use_gpu=True):\n np.random.seed(505837249)\n depth = 4\n p = attention.MergerLayer.Params()\n # We intentionally set output_dims to be of a different\n # length. This should cause a ValueError to be raised\n # during init.\n p.pre_proj_input_dims = [4, 4, 4]\n p.pre_proj_output_dims = [1, 1]\n p.name = 'merger_layer'\n p.merger_op = 'concat'\n p.source_dim = depth\n with self.assertRaisesRegex(\n ValueError, 'Output dims should be the same length as input dims.*'):\n _ = p.Instantiate()\n\n def testMergerLayerWeightedSum(self):\n with self.session(use_gpu=True):\n np.random.seed(505837249)\n depth = 4\n batch = 2\n n_sources = 3\n ctxs = [[[1.0, 2.0, 3.0, 4.0], [2.0, 3.0, 4.0, 5.0]],\n [[3.0, 4.0, 5.0, 6.0], [6.0, 7.0, 8.0, 9.0]],\n [[4.0, 5.0, 6.0, 7.0], [7.0, 8.0, 1.0, 2.0]]]\n p = attention.MergerLayer.Params()\n p.name = 'merger_layer'\n p.merger_op = 'weighted_sum'\n p.source_dim = depth\n p.num_sources = n_sources\n merger = p.Instantiate()\n\n ctxs = [tf.expand_dims(i, 2) for i in ctxs]\n ctx = tf.squeeze(merger.FProp(merger.theta, ctxs), 2)\n self.evaluate(tf.global_variables_initializer())\n actual_ctx = self.evaluate(ctx)\n\n # pylint: disable=bad-whitespace\n # pyformat: disable\n expected_ctx = [[ 2.66666675, 3.66666675, 4.66666698, 5.66666698],\n [ 5.0, 6.0, 4.33333349, 5.33333349]]\n # pyformat: enable\n # pylint: enable=bad-whitespace\n self.assertEqual(actual_ctx.shape, (batch, depth))\n self.assertAllClose(expected_ctx, actual_ctx, rtol=1e-05, atol=1e-05)\n\n def testMergerLayerGatedAvg(self):\n with self.session(use_gpu=True):\n np.random.seed(505837249)\n depth = 4\n batch = 2\n n_sources = 3\n\n inp_1 = np.asarray([[0.0, 0.0, 0.0, 0.0], [-1.0, -1.0, 1.0, 1.0]],\n dtype=np.float32)\n inp_2 = np.asarray([[1.0, 1.0, 1.0, 1.0], [-1.0, -1.0, 1.0, 1.0]],\n dtype=np.float32)\n inp_3 = np.asarray([[-1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, 1.0, 1.0]],\n dtype=np.float32)\n p = attention.MergerLayer.Params()\n p.name = 'merger_layer'\n p.merger_op = 'gated_avg'\n p.source_dim = depth\n p.num_sources = n_sources\n merger = p.Instantiate()\n\n ctx = merger.FProp(merger.theta, [inp_1, inp_2, inp_3])\n self.evaluate(tf.global_variables_initializer())\n actual_ctx = self.evaluate(ctx)\n\n # pylint: disable=bad-whitespace\n # pyformat: disable\n expected_ctx = [\n [ 0.365041, 0.365041, 0.365041, 0.365041],\n [ -1.0, -1.0, 1.0 , 1.0]]\n # pyformat: enable\n # pylint: enable=bad-whitespace\n self.assertEqual(actual_ctx.shape, (batch, depth))\n self.assertAllClose(expected_ctx, actual_ctx, rtol=1e-05, atol=1e-05)\n\n def testMultiSourceMultiHeadedAttention(self):\n with self.session(use_gpu=True) as sess:\n (source_vecs, source_contexts, source_padding, source_padding_p,\n query_vec, _, _) = self._MultiHeadedAttentionInputs()\n iap = attention.DotProductAttention.Params()\n iap.name = 'dot_atten'\n mha_params = attention.MultiHeadedAttention.Params().Set(\n name='multihead_atten',\n source_dim=4,\n query_dim=4,\n hidden_dim=4,\n inner_atten_params=iap,\n num_attention_heads=2,\n use_source_vec_as_attention_value=False)\n\n # Single-source attention.\n params = attention.MultiSourceAttention.Params().Set(\n name='one_source_atten',\n source_dim=4,\n query_dim=4,\n source_atten_tpls=[('src_1', mha_params)],\n primary_source_key='src_1')\n atten = params.Instantiate()\n atten.InitForSourcePacked(atten.theta,\n py_utils.NestedMap(src_1=source_vecs),\n py_utils.NestedMap(src_1=source_contexts),\n py_utils.NestedMap(src_1=source_padding))\n tf.global_variables_initializer().run()\n atten_vec, atten_prob, _ = atten.ComputeContextVector(\n atten.theta, query_vec)\n self._CheckStaticShapes(\n atten_vec,\n atten_prob,\n target_batch_size=query_vec.shape[0],\n source_length=source_contexts.shape[0],\n context_dim=source_contexts.shape[2])\n\n atten_vec_out, prob_out = sess.run([atten_vec, atten_prob])\n print('atten_vec_out', np.sum(atten_vec_out, axis=1))\n\n self.assertAllClose(\n [2.8940253, 2.2901258, 3.5679011, 2.894734, 2.2989905, 3.5306041],\n np.sum(atten_vec_out, axis=1))\n print('atten_vec_out', atten_vec_out)\n print('prob_out', prob_out)\n t_batch_size = 6\n s_batch_size = 3\n for i in range(t_batch_size):\n # Test to make sure we didn't mess up indexing.\n s_i = i % s_batch_size\n atten.InitForSourcePacked(\n atten.theta, py_utils.NestedMap(src_1=source_vecs[:,\n s_i:s_i + 1, :]),\n py_utils.NestedMap(src_1=source_contexts[:, s_i:s_i + 1, :]),\n py_utils.NestedMap(src_1=source_padding[:, s_i:s_i + 1]))\n atten_vec_i, prob_i, _ = atten.ComputeContextVector(\n atten.theta, query_vec[i:i + 1])\n atten_vec_i_out, prob_i_out = sess.run([atten_vec_i, prob_i])\n self.assertAllClose(prob_i_out, prob_out[i:i + 1])\n self.assertAllClose(atten_vec_i_out, atten_vec_out[i:i + 1])\n padding_i = source_padding_p[s_i]\n # Check to make sure prob exists only on valid timesteps.\n self.assertEqual(0.0, np.sum(padding_i * prob_i_out))\n\n # Two-source attention.\n atten_merger_p = attention.MergerLayer.Params().Set(\n params_init=py_utils.WeightInit.Uniform(0.04),\n merger_op='concat', # concatenate attention\n pre_proj_input_dims=[6, 6],\n pre_proj_output_dims=[6, 6])\n params = attention.MultiSourceAttention.Params().Set(\n name='two_source_atten',\n source_dim=4,\n query_dim=4,\n source_atten_tpls=[('src_1', mha_params),\n ('src_2',\n mha_params.Copy().Set(name='multihead_atten2'))],\n primary_source_key='src_1',\n atten_merger_tpl=atten_merger_p)\n atten = params.Instantiate()\n\n (source_vecs2, source_contexts2, source_padding2, source_padding_p,\n query_vec, _, _) = self._MultiHeadedAttentionInputs()\n atten.InitForSourcePacked(\n atten.theta,\n py_utils.NestedMap(src_1=source_vecs, src_2=source_vecs2),\n py_utils.NestedMap(src_1=source_contexts, src_2=source_contexts2),\n py_utils.NestedMap(src_1=source_padding, src_2=source_padding2))\n tf.global_variables_initializer().run()\n atten_vec, atten_prob, _ = atten.ComputeContextVector(\n atten.theta, query_vec)\n self._CheckStaticShapes(\n atten_vec,\n atten_prob,\n target_batch_size=query_vec.shape[0],\n source_length=source_contexts.shape[0],\n context_dim=2 * source_contexts.shape[2])\n\n atten_vec_out, prob_out = sess.run([atten_vec, atten_prob])\n print('atten_vec_out', np.sum(atten_vec_out, axis=1))\n\n self.assertAllClose(\n [2.860059, 2.022061, 3.128138, 2.8762774, 2.103229, 3.1187325],\n np.sum(atten_vec_out, axis=1))\n print('atten_vec_out', atten_vec_out)\n print('prob_out', prob_out)\n t_batch_size = 6\n s_batch_size = 3\n for i in range(t_batch_size):\n # Test to make sure we didn't mess up indexing.\n s_i = i % s_batch_size\n atten.InitForSourcePacked(\n atten.theta,\n py_utils.NestedMap(\n src_1=source_vecs[:, s_i:s_i + 1, :],\n src_2=source_vecs2[:, s_i:s_i + 1, :]),\n py_utils.NestedMap(\n src_1=source_contexts[:, s_i:s_i + 1, :],\n src_2=source_contexts2[:, s_i:s_i + 1, :]),\n py_utils.NestedMap(\n src_1=source_padding[:, s_i:s_i + 1],\n src_2=source_padding2[:, s_i:s_i + 1]))\n atten_vec_i, prob_i, _ = atten.ComputeContextVector(\n atten.theta, query_vec[i:i + 1])\n atten_vec_i_out, prob_i_out = sess.run([atten_vec_i, prob_i])\n self.assertAllClose(prob_i_out, prob_out[i:i + 1])\n self.assertAllClose(atten_vec_i_out, atten_vec_out[i:i + 1])\n padding_i = source_padding_p[s_i]\n # Check to make sure prob exists only on valid timesteps.\n self.assertEqual(0.0, np.sum(padding_i * prob_i_out))\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "numpy.dot", "numpy.expand_dims", "numpy.abs", "numpy.random.seed", "numpy.isfinite", "numpy.asarray", "numpy.array_repr", "numpy.tile", "numpy.logical_or", "numpy.argmax", "numpy.mean", "numpy.random.rand", "numpy.random.randn", "numpy.transpose", "numpy.array", "numpy.exp", "numpy.sum" ] ]
AKA-ZSZ/AI_Artistry
[ "64384c8c7fd6f8db89d0fe918bf94bec81f18a7a" ]
[ "histogram_matching.py" ]
[ "#!/usr/bin/env python\n \n'''\nWelcome to the Histogram Matching Program!\n \nGiven a source image and a reference image, this program\nreturns a modified version of the source image that matches\nthe histogram of the reference image.\n \nImage Requirements:\n - Source image must be color.\n - Reference image must be color.\n - The sizes of the source image and reference image do not\n have to be the same.\n - The program supports an optional third image (mask) as\n an argument.\n - When the mask image is provided, it will be rescaled to\n be the same size as the source image, and the resulting\n matched image will be masked by the mask image.\n \nUsage:\n python histogram_matching.py <source_image> <ref_image> [<mask_image>]\n'''\n \n# Python 2/3 compatibility\nfrom __future__ import print_function\n \nimport cv2 # Import the OpenCV library\nimport numpy as np # Import Numpy library\nimport matplotlib.pyplot as plt # Import matplotlib functionality\nimport sys # Enables the passing of arguments\n \n# Project: Histogram Matching Using OpenCV\n# Author: Addison Sears-Collins\n# Date created: 9/27/2019\n# Python version: 3.7\n \n# Define the file name of the images\nSOURCE_IMAGE = \"Starry-Night-canvas-Vincent-van-Gogh-New-1889.jpg\"\nREFERENCE_IMAGE = \"main-image.jpg\"\nMASK_IMAGE = \"mask.jpg\"\nOUTPUT_IMAGE = \"aspens_in_fall_forest_output\"\nOUTPUT_MASKED_IMAGE = \"aspens_in_fall_forest_output_masked.jpg\"\n \ndef calculate_cdf(histogram):\n \"\"\"\n This method calculates the cumulative distribution function\n :param array histogram: The values of the histogram\n :return: normalized_cdf: The normalized cumulative distribution function\n :rtype: array\n \"\"\"\n # Get the cumulative sum of the elements\n cdf = histogram.cumsum()\n \n # Normalize the cdf\n normalized_cdf = cdf / float(cdf.max())\n \n return normalized_cdf\n \ndef calculate_lookup(src_cdf, ref_cdf):\n \"\"\"\n This method creates the lookup table\n :param array src_cdf: The cdf for the source image\n :param array ref_cdf: The cdf for the reference image\n :return: lookup_table: The lookup table\n :rtype: array\n \"\"\"\n lookup_table = np.zeros(256)\n lookup_val = 0\n for src_pixel_val in range(len(src_cdf)):\n lookup_val\n for ref_pixel_val in range(len(ref_cdf)):\n if ref_cdf[ref_pixel_val] >= src_cdf[src_pixel_val]:\n lookup_val = ref_pixel_val\n break\n lookup_table[src_pixel_val] = lookup_val\n\n return lookup_table\n \ndef match_histograms(src_image, ref_image):\n \"\"\"\n This method matches the source image histogram to the\n reference signal\n :param image src_image: The original source image\n :param image ref_image: The reference image\n :return: image_after_matching\n :rtype: image (array)\n \"\"\"\n # Split the images into the different color channels\n # b means blue, g means green and r means red\n src_b, src_g, src_r = cv2.split(src_image)\n ref_b, ref_g, ref_r = cv2.split(ref_image)\n \n\n # Compute the b, g, and r histograms separately\n # The flatten() Numpy method returns a copy of the array c\n # collapsed into one dimension.\n src_hist_blue, bin_0 = np.histogram(src_b.flatten(), 256, [0,256])\n src_hist_green, bin_1 = np.histogram(src_g.flatten(), 256, [0,256])\n src_hist_red, bin_2 = np.histogram(src_r.flatten(), 256, [0,256]) \n ref_hist_blue, bin_3 = np.histogram(ref_b.flatten(), 256, [0,256]) \n ref_hist_green, bin_4 = np.histogram(ref_g.flatten(), 256, [0,256])\n ref_hist_red, bin_5 = np.histogram(ref_r.flatten(), 256, [0,256])\n \n # Compute the normalized cdf for the source and reference image\n src_cdf_blue = calculate_cdf(src_hist_blue)\n src_cdf_green = calculate_cdf(src_hist_green)\n src_cdf_red = calculate_cdf(src_hist_red)\n ref_cdf_blue = calculate_cdf(ref_hist_blue)\n ref_cdf_green = calculate_cdf(ref_hist_green)\n ref_cdf_red = calculate_cdf(ref_hist_red)\n \n # Make a separate lookup table for each color\n blue_lookup_table = calculate_lookup(src_cdf_blue, ref_cdf_blue)\n green_lookup_table = calculate_lookup(src_cdf_green, ref_cdf_green)\n red_lookup_table = calculate_lookup(src_cdf_red, ref_cdf_red)\n \n # Use the lookup function to transform the colors of the original\n # source image\n blue_after_transform = cv2.LUT(src_b, blue_lookup_table)\n green_after_transform = cv2.LUT(src_g, green_lookup_table)\n red_after_transform = cv2.LUT(src_r, red_lookup_table)\n \n # Put the image back together\n image_after_matching = cv2.merge([\n blue_after_transform, green_after_transform, red_after_transform])\n image_after_matching = cv2.convertScaleAbs(image_after_matching)\n \n print(src_b)\n \n # export data into csvs\n write_1D_list_to_csv(\"data/src_hist_blue\",src_hist_blue)\n write_1D_list_to_csv(\"data/ref_hist_green\",ref_hist_green)\n write_1D_list_to_csv(\"data/src_cdf_blue\",src_cdf_blue)\n write_1D_list_to_csv(\"data/ref_cdf_blue\",ref_cdf_blue)\n write_1D_list_to_csv(\"data/blue_lookup_table\",blue_lookup_table)\n write_1D_list_to_csv(\"data/green_lookup_table\",green_lookup_table)\n \n # create hists\n # configure and draw the histogram figure of source image\n plt.figure()\n plt.title(\"Color Histogram of Source Image\")\n plt.xlabel(\"Color value\")\n plt.ylabel(\"Pixels Count\")\n plt.xlim([0, 256]) # <- named arguments do not work here\n\n plt.plot(bin_0[0:-1], src_hist_blue,color='blue') # <- or here\n plt.plot(bin_1[0:-1], src_hist_green,color='green') # <- or here\n plt.plot(bin_2[0:-1], src_hist_red,color='red') # <- or here\n plt.show()\n\n # configure and draw the histogram figure of ref image\n plt.figure()\n plt.title(\"Color Histogram of Reference Image\")\n plt.xlabel(\"Color value\")\n plt.ylabel(\"Pixels Count\")\n plt.xlim([0, 256]) # <- named arguments do not work here\n\n plt.plot(bin_3[0:-1], ref_hist_blue,color='blue') # <- or here\n plt.plot(bin_4[0:-1], ref_hist_green,color='green') # <- or here\n plt.plot(bin_5[0:-1], ref_hist_red,color='red') # <- or here\n plt.show()\n\n \n \n return image_after_matching\n\ndef write_1D_list_to_csv(li_header,li):\n np.savetxt(f\"{li_header}.csv\", li, delimiter=\",\", fmt='%s', header=li_header)\n\n\ndef mask_image(image, mask):\n \"\"\"\n This method overlays a mask on top of an image\n :param image image: The color image that you want to mask\n :param image mask: The mask\n :return: masked_image\n :rtype: image (array)\n \"\"\"\n \n # Split the colors into the different color channels\n blue_color, green_color, red_color = cv2.split(image)\n \n # Resize the mask to be the same size as the source image\n resized_mask = cv2.resize(\n mask, (image.shape[1], image.shape[0]), cv2.INTER_NEAREST)\n \n # Normalize the mask\n normalized_resized_mask = resized_mask / float(255)\n \n # Scale the color values\n blue_color = blue_color * normalized_resized_mask\n blue_color = blue_color.astype(int)\n green_color = green_color * normalized_resized_mask\n green_color = green_color.astype(int)\n red_color = red_color * normalized_resized_mask\n red_color = red_color.astype(int)\n \n # Put the image back together again\n merged_image = cv2.merge([blue_color, green_color, red_color])\n masked_image = cv2.convertScaleAbs(merged_image)\n return masked_image\n \ndef main():\n \"\"\"\n Main method of the program.\n \"\"\"\n start_the_program = input(\"Press ENTER to perform histogram matching...\") \n \n # A flag to indicate if the mask image was provided or not by the user\n mask_provided = False\n \n # Pull system arguments\n try:\n image_src_name = sys.argv[1]\n image_ref_name = sys.argv[2]\n except:\n image_src_name = SOURCE_IMAGE\n image_ref_name = REFERENCE_IMAGE\n \n try:\n image_mask_name = sys.argv[3]\n mask_provided = True\n except:\n print(\"\\nNote: A mask was not provided.\\n\")\n \n # Load the images and store them into a variable\n image_src = cv2.imread(cv2.samples.findFile(image_src_name))\n image_ref = cv2.imread(cv2.samples.findFile(image_ref_name))\n \n image_mask = None\n if mask_provided:\n image_mask = cv2.imread(cv2.samples.findFile(image_mask_name))\n \n # Check if the images loaded properly\n if image_src is None:\n print('Failed to load source image file:', image_src_name)\n sys.exit(1)\n elif image_ref is None:\n print('Failed to load reference image file:', image_ref_name)\n sys.exit(1)\n else:\n # Do nothing\n pass\n \n # Convert the image mask to grayscale\n if mask_provided:\n image_mask = cv2.cvtColor(image_mask, cv2.COLOR_BGR2GRAY)\n \n # Calculate the matched image\n output_image = match_histograms(image_src, image_ref)\n \n # Mask the matched image\n if mask_provided:\n output_masked = mask_image(output_image, image_mask)\n \n # Save the output images\n cv2.imwrite(\"new_generated.jpg\", output_image)\n if mask_provided:\n cv2.imwrite(OUTPUT_MASKED_IMAGE, output_masked)\n \n ## Display images, used for debugging\n cv2.imshow('Source Image', image_src)\n cv2.imshow('Reference Image', image_ref)\n cv2.imshow('Output Image', output_image)\n if mask_provided:\n cv2.imshow('Mask', image_mask)\n cv2.imshow('Output Image (Masked)', output_masked)\n \n cv2.waitKey(0) # Wait for a keyboard event\n \nif __name__ == '__main__':\n print(__doc__)\n main()\n cv2.destroyAllWindows()" ]
[ [ "matplotlib.pyplot.title", "matplotlib.pyplot.figure", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlim", "numpy.savetxt", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "numpy.zeros", "matplotlib.pyplot.ylabel" ] ]
mesjou/human-frictions
[ "2a5c919039bb29643a3e8dd36c0fa13ce7d93e0e" ]
[ "human_friction/utils/metric_generator.py" ]
[ "import collections\nfrom abc import ABC\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom human_friction.rllib.rllib_env import RllibDiscrete\n\n\nclass MetricGenerator(ABC):\n def __init__(self, env: RllibDiscrete):\n agent_metrics = [\"wage\", \"budget\", \"consumption\", \"labor\", \"reward\"]\n environment_metrics = [\"interest\", \"inflation\", \"unemployment\", \"production\", \"price\"]\n\n self.agent_data = collections.defaultdict(dict)\n for metric in agent_metrics:\n for agent in env.wrapped_env.agents.values():\n self.agent_data[metric][agent.agent_id] = []\n\n self.env_data = {}\n for metric in environment_metrics:\n self.env_data[metric] = []\n\n def analyze(self, env, rews):\n for metric, agent_ids in self.agent_data.items():\n for agent_id, data in agent_ids.items():\n agent = env.wrapped_env.agents[agent_id]\n value = getattr(agent, metric, None)\n if value is None:\n value = rews[agent_id]\n data.append(value)\n\n for metric, data in self.env_data.items():\n value = getattr(env.wrapped_env, metric, None)\n if value is None:\n value = getattr(env.wrapped_env.firm, metric, None)\n data.append(value)\n\n def plot(self):\n df = pd.DataFrame.from_dict(self.env_data, orient=\"index\").transpose()\n df = df.rolling(5, axis=0).mean()\n df.plot()\n plt.show()\n\n for metric, data in self.agent_data.items():\n df = pd.DataFrame.from_dict(data, orient=\"index\").transpose()\n df = df.rolling(5, axis=0).mean()\n df.plot()\n plt.title(metric)\n plt.show()\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.title", "pandas.DataFrame.from_dict" ] ]
MichelKhalaf/traffic
[ "84e315d84a4ab9d8711414e7c275733e27a089ed" ]
[ "traffic/data/basic/airways.py" ]
[ "from __future__ import annotations\n\nimport logging\nfrom io import BytesIO\nfrom pathlib import Path\nfrom typing import Tuple, Union\n\nimport pandas as pd\nfrom shapely.geometry.base import BaseGeometry\n\nfrom ...core.mixins import GeoDBMixin\nfrom ...core.structure import Navaid, Route\n\n__github_url = \"https://raw.githubusercontent.com/\"\nbase_url = __github_url + \"xoolive/traffic/master/data/navdata\"\n\nBoundsType = Union[BaseGeometry, Tuple[float, float, float, float]]\n\n\nclass Airways(GeoDBMixin):\n \"\"\"\n An ATS route is a specified route designed for channelling the flow of\n traffic as necessary for the provision of air traffic services.\n\n The term “ATS route” is used to mean variously, airway, advisory route,\n controlled or uncontrolled route, arrival or departure route, etc.\n\n An ATS route is defined by route specifications which include an ATS route\n designator, the track to or from significant points (waypoints), distance\n between significant points, reporting requirements and, as determined by the\n appropriate ATS authority, the lowest safe altitude. (ICAO Annex 11 - Air\n Traffic Services)\n\n A (deprecated) database of world ATS routes is available as:\n\n >>> from traffic.data import airways\n\n Any ATS route can be accessed by the bracket notation:\n\n >>> airways['Z50']\n Route('Z50', navaids=['EGOBA', 'SOT', 'BULTI', 'AYE', 'AVMON', ...])\n\n >>> airways.extent(\"Occitanie\")[\"UN869\"]\n Route('UN869', navaids=['XOMBO', 'TIVLI', 'AGN', 'NARAK', 'NASEP', ...])\n\n .. note::\n The following snippet plots the (in)famous `Silk Road Airway (L888)\n <https://flugdienstberater.org/l888>`_ over the Himalaya mountains,\n which requires special qualifications.\n\n .. jupyter-execute::\n\n import matplotlib.pyplot as plt\n\n from traffic.data import airways\n from cartes.crs import Orthographic\n\n with plt.style.context(\"traffic\"):\n\n fig, ax = plt.subplots(\n figsize=(7, 7),\n subplot_kw=dict(projection=Orthographic(95, 30)),\n )\n\n ax.stock_img()\n ax.coastlines()\n\n airways[\"L888\"].plot(\n ax, linewidth=2, linestyle=\"solid\", color=\"crimson\"\n )\n\n for navaid in airways[\"L888\"].navaids:\n navaid.plot(\n ax, s=20, marker=\".\", color=\"crimson\",\n text_kw=dict(fontsize=8)\n )\n\n \"\"\"\n\n cache_dir: Path\n alternatives: dict[str, \"Airways\"] = dict()\n name: str = \"default\"\n\n def __init__(self, data: None | pd.DataFrame = None) -> None:\n self._data = data\n if self.available:\n Airways.alternatives[self.name] = self\n\n def download_data(self) -> None: # coverage: ignore\n from .. import session\n\n cache_file = self.cache_dir / \"earth_awy.dat\"\n if cache_file.exists():\n self._data = pd.read_csv(cache_file, sep=\" \", header=None)\n else:\n c = session.get(f\"{base_url}/earth_awy.dat\")\n c.raise_for_status()\n b = BytesIO(c.content)\n self._data = pd.read_csv(b, sep=\" \", header=None)\n\n self._data.columns = [\"route\", \"id\", \"navaid\", \"latitude\", \"longitude\"]\n self._data.to_pickle(self.cache_dir / \"traffic_airways.pkl\")\n\n @property\n def available(self) -> bool:\n return True\n\n @property\n def data(self) -> pd.DataFrame:\n if self._data is not None:\n return self._data\n\n if not (self.cache_dir / \"traffic_airways.pkl\").exists():\n self.download_data()\n else:\n logging.info(\"Loading airways database\")\n self._data = pd.read_pickle(self.cache_dir / \"traffic_airways.pkl\")\n\n if self._data is not None:\n self._data = self._data.rename(\n columns=dict(lat=\"latitude\", lon=\"longitude\")\n )\n\n return self._data\n\n def __getitem__(self, name: str) -> None | Route:\n output = self.data.query(\"route == @name\").sort_values(\"id\")\n if output.shape[0] == 0:\n return None\n return Route(\n name,\n list(\n Navaid(\n x[\"navaid\"],\n \"FIX\",\n x[\"latitude\"],\n x[\"longitude\"],\n 0,\n None,\n None,\n None,\n )\n for _, x in output.iterrows()\n ),\n )\n\n def global_get(self, name: str) -> None | Route:\n \"\"\"Search for a route from all alternative data sources.\"\"\"\n for _key, value in sorted(\n self.alternatives.items(),\n # lowest priority for the default source of information\n key=lambda key: 1 if key[0] == \"default\" else 0,\n ):\n alt = value[name]\n if alt is not None:\n return alt\n return None\n\n def search(self, name: str) -> \"Airways\":\n \"\"\"\n Selects the subset of airways matching name in the route name or in the\n passed navigational beacon.\n\n >>> airways.extent('Switzerland').search(\"Z50\")\n route id navaid latitude longitude\n ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n Z50 7 GERSA 47.04 8.532\n Z50 8 KELIP 46.96 8.762\n Z50 9 SOPER 46.89 8.944\n Z50 10 PELAD 46.6 9.726\n Z50 11 RESIA 46.48 10.04\n\n >>> airways.search(\"NARAK\")\n route id navaid latitude longitude\n ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n N869 88 NARAK 44.3 1.749\n UN859 15 NARAK 44.3 1.749\n UN869 23 NARAK 44.3 1.749\n UT122 15 NARAK 44.3 1.749\n UY155 2 NARAK 44.3 1.749\n UZ365 3 NARAK 44.3 1.749\n \"\"\"\n output = self.__class__(\n self.data.query(\"route == @name.upper() or navaid == @name.upper()\")\n )\n return output\n" ]
[ [ "pandas.read_csv", "pandas.read_pickle" ] ]
KraProgrammer/AdventOfCode2020
[ "937e743bfab228449e6bad2d06267c9c999afd1d" ]
[ "src/day5.py" ]
[ "from math import ceil, floor\n\nimport numpy as np\nfrom aocd.models import Puzzle\n\n\ndef solve_puzzle_one(input_array):\n ids = map(lambda bp: calc_id(bp, 7), input_array)\n print(max(ids))\n\n\ndef calc_id(bp, length_first=7):\n first = bp[0:length_first]\n sec = bp[length_first:]\n first_upper = pow(2, len(first)) - 1\n sec_upper = pow(2, len(sec)) - 1\n row = calc_id_rec(first, 0, first_upper)\n column = calc_id_rec(sec, 0, sec_upper)\n return row * 8 + column\n\n\ndef calc_id_rec(id, l, u):\n if len(id) == 0:\n assert (l == u)\n return l\n update = (u - l) / 2\n if id[0] == 'F' or id[0] == 'L':\n return calc_id_rec(id[1:], l, floor(u - update))\n else:\n return calc_id_rec(id[1:], ceil(l + update), u)\n\n\ndef solve_puzzle_two(input_array):\n ids = map(lambda bp: calc_id(bp, 7), input_array)\n\n sorted_ids = np.array(np.sort(list(ids)))\n missing = np.diff(sorted_ids)\n print(sorted_ids[np.argwhere(missing > 1)[0, 0]] + 1)\n\n\ndef parse_input(data):\n return np.array(data.splitlines())\n\n\ntest_input = \"\"\"\nFBFBBFFRLR\"\"\"\n\nif __name__ == '__main__':\n puzzle = Puzzle(year=2020, day=5)\n if False:\n array = parse_input(test_input)\n else:\n array = parse_input(puzzle.input_data)\n solve_puzzle_one(array)\n solve_puzzle_two(array)\n" ]
[ [ "numpy.diff", "numpy.argwhere" ] ]
gpdsec/HSD
[ "42c8d1c338b09cb7b4f7bb2275253cb207fbce97" ]
[ "data/coco.py" ]
[ "\"\"\"VOC Dataset Classes\n\nOriginal author: Francisco Massa\nhttps://github.com/fmassa/vision/blob/voc_dataset/torchvision/datasets/voc.py\n\nUpdated by: Ellis Brown, Max deGroot\n\"\"\"\n\nimport os\nimport pickle\nimport os.path\nimport sys\nimport torch\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nimport cv2\nimport numpy as np\nimport json\nimport uuid\n\nfrom pycocotools.coco import COCO\nfrom pycocotools.cocoeval import COCOeval\nfrom pycocotools import mask as COCOmask\n\n\n# no use\nclass COCOAnnotationTransform(object):\n \"\"\"Transforms a VOC annotation into a Tensor of bbox coords and label index\n Initilized with a dictionary lookup of classnames to indexes\n\n Arguments:\n class_to_ind (dict, optional): dictionary lookup of classnames -> indexes\n (default: alphabetic indexing of VOC's 20 classes)\n keep_difficult (bool, optional): keep difficult instances or not\n (default: False)\n height (int): height\n width (int): width\n \"\"\"\n\n def __init__(self):\n pass\n\n def __call__(self, target, width, height):\n \"\"\"\n Arguments:\n target (annotation) : the target annotation to be made usable\n will be not normlized\n Returns:\n a list containing lists of bounding boxes [bbox coords, class name]\n \"\"\"\n\n boxes = target[:, :-1].copy()\n labels = target[:, -1].copy()\n boxes[:, 0::2] /= width\n boxes[:, 1::2] /= height\n b_w = (boxes[:, 2] - boxes[:, 0]) * 1.\n b_h = (boxes[:, 3] - boxes[:, 1]) * 1.\n mask_b = np.minimum(b_w, b_h) > 0.01\n boxes_t = boxes[mask_b]\n labels_t = labels[mask_b].copy()\n\n return boxes_t, labels_t\n\n\nclass COCODetection(data.Dataset):\n \"\"\"VOC Detection Dataset Object\n\n input is image, target is annotation\n\n Arguments:\n root (string): filepath to VOCdevkit folder.\n image_set (string): imageset to use (eg. 'train', 'val', 'test')\n transform (callable, optional): transformation to perform on the\n input image\n target_transform (callable, optional): transformation to perform on the\n target `annotation`\n (eg: take in caption string, return tensor of word indices)\n dataset_name (string, optional): which dataset to load\n (default: 'VOC2007')\n \"\"\"\n\n def __init__(self, root, image_sets, transform=None, dataset_name='COCO'):\n self.root = root\n self.cache_path = os.path.join(self.root, 'cache')\n self.image_set = image_sets\n self.transform = transform\n self.name = dataset_name\n self.ids = list()\n self.annotations = list()\n self._view_map = {\n 'minival2014': 'val2014', # 5k val2014 subset\n 'valminusminival2014': 'val2014', # val2014 \\setminus minival2014\n 'test-dev2015': 'test2015',\n }\n\n for (year, image_set) in image_sets:\n coco_name = image_set + year\n data_name = (self._view_map[coco_name]\n if coco_name in self._view_map else coco_name)\n annofile = self._get_ann_file(coco_name)\n _COCO = COCO(annofile)\n self._COCO = _COCO\n self.coco_name = coco_name\n cats = _COCO.loadCats(_COCO.getCatIds())\n self._classes = tuple(['__background__'] +\n [c['name'] for c in cats])\n self.num_classes = len(self._classes)\n self._class_to_ind = dict(\n zip(self._classes, range(self.num_classes)))\n self._class_to_coco_cat_id = dict(\n zip([c['name'] for c in cats], _COCO.getCatIds()))\n indexes = _COCO.getImgIds()\n self.image_indexes = indexes\n self.ids.extend([\n self.image_path_from_index(data_name, index)\n for index in indexes\n ])\n if image_set.find('test') != -1:\n print('test set will not load annotations!')\n else:\n self.annotations.extend(\n self._load_coco_annotations(coco_name, indexes, _COCO))\n\n def image_path_from_index(self, name, index):\n \"\"\"\n Construct an image path from the image's \"index\" identifier.\n \"\"\"\n # Example image path for index=119993:\n # images/train2014/COCO_train2014_000000119993.jpg\n file_name = ('COCO_' + name + '_' + str(index).zfill(12) + '.jpg')\n image_path = os.path.join(self.root, 'images', name, file_name)\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path\n\n def _get_ann_file(self, name):\n prefix = 'instances' if name.find('test') == -1 \\\n else 'image_info'\n return os.path.join(self.root, 'annotations',\n prefix + '_' + name + '.json')\n\n def _load_coco_annotations(self, coco_name, indexes, _COCO):\n cache_file = os.path.join(self.cache_path, coco_name + '_gt_roidb.pkl')\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n roidb = pickle.load(fid)\n print('{} gt roidb loaded from {}'.format(coco_name, cache_file))\n return roidb\n\n gt_roidb = [\n self._annotation_from_index(index, _COCO) for index in indexes\n ]\n with open(cache_file, 'wb') as fid:\n pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)\n print('wrote gt roidb to {}'.format(cache_file))\n return gt_roidb\n\n def _annotation_from_index(self, index, _COCO):\n \"\"\"\n Loads COCO bounding-box instance annotations. Crowd instances are\n handled by marking their overlaps (with all categories) to -1. This\n overlap value means that crowd \"instances\" are excluded from training.\n \"\"\"\n im_ann = _COCO.loadImgs(index)[0]\n width = im_ann['width']\n height = im_ann['height']\n\n annIds = _COCO.getAnnIds(imgIds=index, iscrowd=None)\n objs = _COCO.loadAnns(annIds)\n # Sanitize bboxes -- some are invalid\n valid_objs = []\n for obj in objs:\n x1 = np.max((0, obj['bbox'][0]))\n y1 = np.max((0, obj['bbox'][1]))\n x2 = np.min((width - 1, x1 + np.max((0, obj['bbox'][2] - 1))))\n y2 = np.min((height - 1, y1 + np.max((0, obj['bbox'][3] - 1))))\n if obj['area'] > 0 and x2 >= x1 and y2 >= y1:\n obj['clean_bbox'] = [x1, y1, x2, y2]\n valid_objs.append(obj)\n objs = valid_objs\n num_objs = len(objs)\n\n res = np.zeros((num_objs, 5))\n\n # Lookup table to map from COCO category ids to our internal class\n # indices\n coco_cat_id_to_class_ind = dict([(self._class_to_coco_cat_id[cls],\n self._class_to_ind[cls])\n for cls in self._classes[1:]])\n\n for ix, obj in enumerate(objs):\n cls = coco_cat_id_to_class_ind[obj['category_id']]\n res[ix, 0:4] = obj['clean_bbox']\n res[ix, 4] = cls\n\n return res\n\n def __getitem__(self, index):\n img_id = self.ids[index]\n target = self.annotations[index] if self.coco_name.find('test') == -1 else 1\n img = cv2.imread(img_id, cv2.IMREAD_COLOR)\n # img0 = img[:, ::-1, :]\n height, width, _ = img.shape\n img_info = [width, height]\n # if self.target_transform is not None:\n # target = self.target_transform(target)\n\n if self.transform is not None:\n img, target = self.transform(img, target)\n # img0, target = self.transform(img0, target)\n # img = torch.cat([img, img0], dim=0)\n\n return img, target, img_info\n\n def __len__(self):\n return len(self.ids)\n\n def pull_image(self, index):\n '''Returns the original image object at index in PIL form\n\n Note: not using self.__getitem__(), as any transformations passed in\n could mess up this functionality.\n\n Argument:\n index (int): index of img to show\n Return:\n PIL img\n '''\n img_id = self.ids[index]\n return cv2.imread(img_id, cv2.IMREAD_COLOR)\n\n def pull_tensor(self, index):\n '''Returns the original image at an index in tensor form\n\n Note: not using self.__getitem__(), as any transformations passed in\n could mess up this functionality.\n\n Argument:\n index (int): index of img to show\n Return:\n tensorized version of img, squeezed\n '''\n to_tensor = transforms.ToTensor()\n return torch.Tensor(self.pull_image(index)).unsqueeze_(0)\n\n def _print_detection_eval_metrics(self, coco_eval):\n IoU_lo_thresh = 0.5\n IoU_hi_thresh = 0.95\n\n def _get_thr_ind(coco_eval, thr):\n ind = np.where((coco_eval.params.iouThrs > thr - 1e-5) &\n (coco_eval.params.iouThrs < thr + 1e-5))[0][0]\n iou_thr = coco_eval.params.iouThrs[ind]\n assert np.isclose(iou_thr, thr)\n return ind\n\n ind_lo = _get_thr_ind(coco_eval, IoU_lo_thresh)\n ind_hi = _get_thr_ind(coco_eval, IoU_hi_thresh)\n # precision has dims (iou, recall, cls, area range, max dets)\n # area range index 0: all area ranges\n # max dets index 2: 100 per image\n precision = \\\n coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, :, 0, 2]\n ap_default = np.mean(precision[precision > -1])\n print('~~~~ Mean and per-category AP @ IoU=[{:.2f},{:.2f}] '\n '~~~~'.format(IoU_lo_thresh, IoU_hi_thresh))\n print('{:.1f}'.format(100 * ap_default))\n for cls_ind, cls in enumerate(self._classes):\n if cls == '__background__':\n continue\n # minus 1 because of __background__\n precision = coco_eval.eval['precision'][ind_lo:(\n ind_hi + 1), :, cls_ind - 1, 0, 2]\n ap = np.mean(precision[precision > -1])\n print('{:.1f}'.format(100 * ap))\n\n print('~~~~ Summary metrics ~~~~')\n coco_eval.summarize()\n\n def _do_detection_eval(self, res_file, output_dir):\n ann_type = 'bbox'\n coco_dt = self._COCO.loadRes(res_file)\n coco_eval = COCOeval(self._COCO, coco_dt)\n coco_eval.params.useSegm = (ann_type == 'segm')\n coco_eval.evaluate()\n coco_eval.accumulate()\n self._print_detection_eval_metrics(coco_eval)\n eval_file = os.path.join(output_dir, 'detection_results.pkl')\n with open(eval_file, 'wb') as fid:\n pickle.dump(coco_eval, fid, pickle.HIGHEST_PROTOCOL)\n print('Wrote COCO eval results to: {}'.format(eval_file))\n\n def _coco_results_one_category(self, boxes, cat_id):\n results = []\n for im_ind, index in enumerate(self.image_indexes):\n dets = boxes[im_ind].astype(np.float)\n if dets == []:\n continue\n scores = dets[:, -1]\n xs = dets[:, 0]\n ys = dets[:, 1]\n ws = dets[:, 2] - xs + 1\n hs = dets[:, 3] - ys + 1\n results.extend([{\n 'image_id': index,\n 'category_id': cat_id,\n 'bbox': [xs[k], ys[k], ws[k], hs[k]],\n 'score': scores[k]\n } for k in range(dets.shape[0])])\n return results\n\n def _write_coco_results_file(self, all_boxes, res_file):\n # [{\"image_id\": 42,\n # \"category_id\": 18,\n # \"bbox\": [258.15,41.29,348.26,243.78],\n # \"score\": 0.236}, ...]\n results = []\n for cls_ind, cls in enumerate(self._classes):\n if cls == '__background__':\n continue\n print('Collecting {} results ({:d}/{:d})'.format(\n cls, cls_ind, self.num_classes))\n coco_cat_id = self._class_to_coco_cat_id[cls]\n results.extend(\n self._coco_results_one_category(all_boxes[cls_ind],\n coco_cat_id))\n '''\n if cls_ind ==30:\n res_f = res_file+ '_1.json'\n print('Writing results json to {}'.format(res_f))\n with open(res_f, 'w') as fid:\n json.dump(results, fid)\n results = []\n '''\n #res_f2 = res_file+'_2.json'\n print('Writing results json to {}'.format(res_file))\n with open(res_file, 'w') as fid:\n json.dump(results, fid)\n\n def evaluate_detections(self, all_boxes, output_dir):\n res_file = os.path.join(output_dir,\n ('detections_' + self.coco_name + '_results'))\n res_file += '.json'\n self._write_coco_results_file(all_boxes, res_file)\n # Only do evaluation on non-test sets\n if self.coco_name.find('test') == -1:\n self._do_detection_eval(res_file, output_dir)\n # Optionally cleanup results json file\n" ]
[ [ "numpy.minimum", "numpy.max", "numpy.mean", "numpy.zeros", "numpy.where", "numpy.isclose" ] ]
SamuelMarks/recommenders
[ "17d2c6dc9e20a3bcbabff3d4edf0e054128a4a3e" ]
[ "tensorflow_recommenders/models/base.py" ]
[ "# Copyright 2020 The TensorFlow Recommenders Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# lint-as: python3\n\"\"\"Base model.\"\"\"\n\nimport tensorflow as tf\n\n\nclass Model(tf.keras.Model):\n \"\"\"Base model for TFRS models.\n\n Many recommender models are relatively complex, and do not neatly fit into\n supervised or unsupervised paradigms. This base class makes it easy to\n define custom training and test losses for such complex models.\n\n This is done by asking the user to implement the following methods:\n - `__init__` to set up your model. Variable, task, loss, and metric\n initialization should go here.\n - `compute_loss` to define the training loss. The method takes as input the\n raw features passed into the model, and returns a loss tensor for training.\n As part of doing so, it should also update the model's metrics.\n - [Optional] `call` to define how the model computes its predictions. This\n is not always necessary: for example, two-tower retrieval models have two\n well-defined submodels whose `call` methods are normally used directly.\n\n Note that this base class is a thin conveniece wrapper for tf.keras.Model, and\n equivalent functionality can easily be achieved by overriding the `train_step`\n and `test_step` methods of a plain Keras model. Doing so also makes it easy\n to build even more complex training mechanisms, such as the use of\n different optimizers for different variables, or manipulating gradients.\n\n Keras has an excellent tutorial on how to\n do this [here](\n https://www.tensorflow.org/guide/keras/customizing_what_happens_in_fit).\n \"\"\"\n\n def compute_loss(self, inputs, training: bool = False) -> tf.Tensor:\n \"\"\"Defines the loss function.\n\n Args:\n inputs: A data structure of tensors: raw inputs to the model. These will\n usually contain labels and weights as well as features.\n training: Whether the model is in training mode.\n\n Returns:\n Loss tensor.\n \"\"\"\n\n raise NotImplementedError(\n \"Implementers must implement the `compute_loss` method.\")\n\n def train_step(self, inputs):\n \"\"\"Custom train step using the `compute_loss` method.\"\"\"\n\n with tf.GradientTape() as tape:\n loss = self.compute_loss(inputs, training=True)\n\n # Handle regularization losses as well.\n regularization_loss = sum(self.losses)\n\n total_loss = loss + regularization_loss\n\n gradients = tape.gradient(total_loss, self.trainable_variables)\n self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))\n\n metrics = {metric.name: metric.result() for metric in self.metrics}\n metrics[\"loss\"] = loss\n metrics[\"regularization_loss\"] = regularization_loss\n metrics[\"total_loss\"] = total_loss\n\n return metrics\n\n def test_step(self, inputs):\n \"\"\"Custom test step using the `compute_loss` method.\"\"\"\n\n loss = self.compute_loss(inputs, training=False)\n\n # Handle regularization losses as well.\n regularization_loss = sum(self.losses)\n\n total_loss = loss + regularization_loss\n\n metrics = {metric.name: metric.result() for metric in self.metrics}\n metrics[\"loss\"] = loss\n metrics[\"regularization_loss\"] = regularization_loss\n metrics[\"total_loss\"] = total_loss\n\n return metrics\n" ]
[ [ "tensorflow.GradientTape" ] ]
SelmansThesis/stable-baselines3
[ "0146c1d96237b84ce7e5d39022f40e583d7e262b" ]
[ "tests/test_cnn.py" ]
[ "import os\nfrom copy import deepcopy\n\nimport numpy as np\nimport pytest\nimport torch as th\n\nfrom stable_baselines3 import A2C, DQN, PPO, SAC, TD3\nfrom stable_baselines3.common.identity_env import FakeImageEnv\nfrom stable_baselines3.common.utils import zip_strict\n\n\[email protected](\"model_class\", [A2C, PPO, SAC, TD3, DQN])\ndef test_cnn(tmp_path, model_class):\n SAVE_NAME = \"cnn_model.zip\"\n # Fake grayscale with frameskip\n # Atari after preprocessing: 84x84x1, here we are using lower resolution\n # to check that the network handle it automatically\n env = FakeImageEnv(screen_height=40, screen_width=40, n_channels=1, discrete=model_class not in {SAC, TD3})\n if model_class in {A2C, PPO}:\n kwargs = dict(n_steps=100)\n else:\n # Avoid memory error when using replay buffer\n # Reduce the size of the features\n kwargs = dict(buffer_size=250, policy_kwargs=dict(features_extractor_kwargs=dict(features_dim=32)))\n model = model_class(\"CnnPolicy\", env, **kwargs).learn(250)\n\n obs = env.reset()\n\n action, _ = model.predict(obs, deterministic=True)\n\n model.save(tmp_path / SAVE_NAME)\n del model\n\n model = model_class.load(tmp_path / SAVE_NAME)\n\n # Check that the prediction is the same\n assert np.allclose(action, model.predict(obs, deterministic=True)[0])\n\n os.remove(str(tmp_path / SAVE_NAME))\n\n\ndef patch_dqn_names_(model):\n # Small hack to make the test work with DQN\n if isinstance(model, DQN):\n model.critic = model.q_net\n model.critic_target = model.q_net_target\n\n\ndef params_should_match(params, other_params):\n for param, other_param in zip_strict(params, other_params):\n assert th.allclose(param, other_param)\n\n\ndef params_should_differ(params, other_params):\n for param, other_param in zip_strict(params, other_params):\n assert not th.allclose(param, other_param)\n\n\ndef check_td3_feature_extractor_match(model):\n for (key, actor_param), critic_param in zip(model.actor_target.named_parameters(), model.critic_target.parameters()):\n if \"features_extractor\" in key:\n assert th.allclose(actor_param, critic_param), key\n\n\ndef check_td3_feature_extractor_differ(model):\n for (key, actor_param), critic_param in zip(model.actor_target.named_parameters(), model.critic_target.parameters()):\n if \"features_extractor\" in key:\n assert not th.allclose(actor_param, critic_param), key\n\n\[email protected](\"model_class\", [SAC, TD3, DQN])\[email protected](\"share_features_extractor\", [True, False])\ndef test_features_extractor_target_net(model_class, share_features_extractor):\n if model_class == DQN and share_features_extractor:\n pytest.skip()\n\n env = FakeImageEnv(screen_height=40, screen_width=40, n_channels=1, discrete=model_class not in {SAC, TD3})\n # Avoid memory error when using replay buffer\n # Reduce the size of the features\n kwargs = dict(buffer_size=250, learning_starts=100, policy_kwargs=dict(features_extractor_kwargs=dict(features_dim=32)))\n if model_class != DQN:\n kwargs[\"policy_kwargs\"][\"share_features_extractor\"] = share_features_extractor\n\n model = model_class(\"CnnPolicy\", env, seed=0, **kwargs)\n\n patch_dqn_names_(model)\n\n if share_features_extractor:\n # Check that the objects are the same and not just copied\n assert id(model.policy.actor.features_extractor) == id(model.policy.critic.features_extractor)\n if model_class == TD3:\n assert id(model.policy.actor_target.features_extractor) == id(model.policy.critic_target.features_extractor)\n # Actor and critic feature extractor should be the same\n td3_features_extractor_check = check_td3_feature_extractor_match\n else:\n # Actor and critic feature extractor should differ same\n td3_features_extractor_check = check_td3_feature_extractor_differ\n # Check that the object differ\n if model_class != DQN:\n assert id(model.policy.actor.features_extractor) != id(model.policy.critic.features_extractor)\n\n if model_class == TD3:\n assert id(model.policy.actor_target.features_extractor) != id(model.policy.critic_target.features_extractor)\n\n # Critic and target should be equal at the begginning of training\n params_should_match(model.critic.parameters(), model.critic_target.parameters())\n\n # TD3 has also a target actor net\n if model_class == TD3:\n params_should_match(model.actor.parameters(), model.actor_target.parameters())\n\n model.learn(200)\n\n # Critic and target should differ\n params_should_differ(model.critic.parameters(), model.critic_target.parameters())\n\n if model_class == TD3:\n params_should_differ(model.actor.parameters(), model.actor_target.parameters())\n td3_features_extractor_check(model)\n\n # Re-initialize and collect some random data (without doing gradient steps,\n # since 10 < learning_starts = 100)\n model = model_class(\"CnnPolicy\", env, seed=0, **kwargs).learn(10)\n\n patch_dqn_names_(model)\n\n original_param = deepcopy(list(model.critic.parameters()))\n original_target_param = deepcopy(list(model.critic_target.parameters()))\n if model_class == TD3:\n original_actor_target_param = deepcopy(list(model.actor_target.parameters()))\n\n # Deactivate copy to target\n model.tau = 0.0\n model.train(gradient_steps=1)\n\n # Target should be the same\n params_should_match(original_target_param, model.critic_target.parameters())\n\n if model_class == TD3:\n params_should_match(original_actor_target_param, model.actor_target.parameters())\n td3_features_extractor_check(model)\n\n # not the same for critic net (updated by gradient descent)\n params_should_differ(original_param, model.critic.parameters())\n\n # Update the reference as it should not change in the next step\n original_param = deepcopy(list(model.critic.parameters()))\n\n if model_class == TD3:\n original_actor_param = deepcopy(list(model.actor.parameters()))\n\n # Deactivate learning rate\n model.lr_schedule = lambda _: 0.0\n # Re-activate polyak update\n model.tau = 0.01\n # Special case for DQN: target net is updated in the `collect_rollout()`\n # not the `train()` method\n if model_class == DQN:\n model.target_update_interval = 1\n model._on_step()\n\n model.train(gradient_steps=1)\n\n # Target should have changed now (due to polyak update)\n params_should_differ(original_target_param, model.critic_target.parameters())\n\n # Critic should be the same\n params_should_match(original_param, model.critic.parameters())\n\n if model_class == TD3:\n params_should_differ(original_actor_target_param, model.actor_target.parameters())\n\n params_should_match(original_actor_param, model.actor.parameters())\n\n td3_features_extractor_check(model)\n" ]
[ [ "torch.allclose" ] ]
commonsense-exception/commonsense-exception
[ "ab83323a2d566f49b6de7b4b06c3c338ceec895f" ]
[ "analysis/jaccard/kbias_analysis.py" ]
[ "import pandas as pd\nimport argparse\nfrom tqdm import tqdm\n\n\"\"\"\nThe goal of this file is to produce jaccard analysis to analyze to what extent the\nk-associative bias of one model coincides with that of another - across the different\nmodel pairs and different Ks\n\"\"\"\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='arguments for analyzing the top-k bias generation')\n parser.add_argument('-top', help=\"Size of intersection of interest\", default=10)\n parser.add_argument('-d', help=\"source data path (bias generation)\",\\\n default=\"../../data/assets/k_analysis/\")\n parser.add_argument('-o', help=\"output path to get the cross jaccard statistics\",\\\n default=\"./assets/\")\n \n # Brown University argument\n parser.add_argument('-dept', help=\"whether we're on the department machine or not\", default=\"True\")\n return parser.parse_args()\n \n# Name of the models that we're dealing with\nMODEL_NAMES = [\"BERT_base\", \"BERT_large\", \"RoBERTa_small\",\\\n \"RoBERTa_large\", \"DistilRoBERTa\", \"DistilBERT\",\\\n \"SqueezeBERT\", \"MobileBERT\", \"Longformer_base\",\\\n \"Longformer_large\", \"ALBERT_base\", \"ALBERT_large\",\\\n \"ALBERT_xlarge\", \"ALBERT_xxlarge\"]\n\n# different values of K\nKs = [1, 3, 5, 8, 10]\n\ndef jaccard_index(input_one, input_two):\n \"\"\"\n Function to get jaccard index between two sets separated by commas\n \n input_one: str, of format \"a,b,c\"\n input_two: str, of format \"a,b,c\"\n \"\"\"\n if pd.isnull(input_one): input_one = \"\"\n if pd.isnull(input_two): input_two = \"\"\n set_one = set(input_one.split(\",\"))\n set_two = set(input_two.split(\",\"))\n return len(set_one.intersection(set_two)) / len(set_one.union(set_two))\n\ndef get_cross_jaccard_index(df, jaccard_outp_path):\n \"\"\"\n Draws a table where each row and column corresponds to a \n \"\"\"\n def record_row(row, col_one_name, col_two_name, mutating_dict):\n val_one, val_two = row[col_one_name], row[col_two_name]\n mutating_dict[\"sum_jaccard\"] += jaccard_index(val_one, val_two)\n mutating_dict[\"rows_recorded\"] += 1\n\n # keeps track of models that we have completed jaccard analysis for, and\n # the results\n models_done = {}\n # for each pair of models\n for m_1 in MODEL_NAMES:\n for m_2 in MODEL_NAMES:\n # if the pair has yet to be computed, and are distinct\n if (m_2, m_1) in models_done or (m_1, m_2) in models_done: continue\n right_column_one, right_column_two = f\"{m_1}_common\", f\"{m_2 }_common\"\n models_dict = {\"sum_jaccard\": 0, \"rows_recorded\": 0}\n # record the jaccard statistics for this pair\n df.apply(lambda x: record_row(x, right_column_one, right_column_two, models_dict), axis=1)\n models_done[(m_1, m_2)] = models_dict\n\n # After we are done, then we will draw up the table\n list_of_rows = []\n for m_1 in MODEL_NAMES:\n new_row = [m_1]\n for m_2 in MODEL_NAMES:\n # get the stats for the pair\n pair = (m_1, m_2)\n if pair not in models_done: pair = (m_2, m_1)\n according_dict = models_done[pair]\n # Get the jaccard statistics\n average_jaccard = according_dict[\"sum_jaccard\"] / according_dict[\"rows_recorded\"]\n new_row.append(average_jaccard)\n \n # after we have considered all the possible other models, append this row to list of rows\n list_of_rows.append(new_row)\n \n # and then we will create a new pandas df\n column_names = [\"models\"]\n column_names.extend(MODEL_NAMES)\n final_df = pd.DataFrame(list_of_rows, columns=column_names)\n # final_df.to_csv(jaccard_outp_path, sep=\"\\t\", index=False)\n return final_df\n \n\n\n\n\n\nif __name__ == \"__main__\":\n print(\"-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- JACCARD ANALYSIS -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\")\n args = parse_args()\n for k in tqdm(Ks):\n correct_input_path = args.d + f\"things_k{k}.tsv\"\n correct_output_path = args.o + f\"jaccard_k{k}.tsv\"\n correct_df = pd.read_csv(correct_input_path, sep=\"\\t\")\n print(get_cross_jaccard_index(correct_df, correct_output_path))\n\n\n \n\n" ]
[ [ "pandas.read_csv", "pandas.isnull", "pandas.DataFrame" ] ]
bhky/targetran
[ "ae836949448c383d58bb5b5ce70b3a4547f32102" ]
[ "targetran/np/_np.py" ]
[ "\"\"\"\nAPI for NumPy usage.\n\"\"\"\n\nimport functools\nfrom typing import Any, Callable, List, Optional, Sequence, Tuple\n\nimport numpy as np\n\nfrom targetran._check import (\n _check_shear_input,\n _check_translate_input,\n _check_crop_input,\n _check_input_range,\n)\nfrom targetran._np_functional import (\n _np_convert,\n _np_range,\n _np_cast_to_int,\n _np_round_to_int,\n _np_resize_image,\n _np_boolean_mask,\n _np_logical_and,\n _np_pad_image,\n _np_gather_image,\n)\nfrom targetran._transform import (\n _AffineDependency,\n _affine_transform,\n _flip_left_right,\n _flip_up_down,\n _rotate,\n _shear,\n _translate,\n _get_crop_inputs,\n _get_random_size_fractions,\n _crop,\n _resize,\n _get_flip_left_right_mats,\n _get_flip_up_down_mats,\n _get_rotate_mats,\n _get_shear_mats,\n _get_translate_mats,\n)\nfrom targetran._typing import NDFloatArray, NDIntArray\nfrom targetran.utils import Interpolation\n\n\ndef _np_get_affine_dependency() -> _AffineDependency:\n return _AffineDependency(\n _np_convert, np.shape, np.reshape, np.expand_dims, np.squeeze,\n _np_pad_image, _np_range, _np_cast_to_int, _np_round_to_int,\n np.repeat, np.tile, np.ones_like, np.stack, np.concatenate, np.matmul,\n np.clip, np.floor, np.ceil, _np_gather_image,\n np.copy, np.max, np.min,\n _np_logical_and, _np_boolean_mask\n )\n\n\ndef _np_affine_transform(\n image: NDFloatArray,\n bboxes: NDFloatArray,\n labels: NDFloatArray,\n image_dest_tran_mat: NDFloatArray,\n bboxes_tran_mat: NDFloatArray,\n interpolation: Interpolation\n) -> Tuple[NDFloatArray, NDFloatArray, NDFloatArray]:\n return _affine_transform(\n image, bboxes, labels, image_dest_tran_mat, bboxes_tran_mat,\n interpolation, _np_get_affine_dependency()\n )\n\n\ndef flip_left_right(\n image: NDFloatArray,\n bboxes: NDFloatArray,\n labels: NDFloatArray\n) -> Tuple[NDFloatArray, NDFloatArray, NDFloatArray]:\n return _flip_left_right(\n image, bboxes, labels,\n Interpolation.NEAREST, _np_get_affine_dependency()\n )\n\n\ndef flip_up_down(\n image: NDFloatArray,\n bboxes: NDFloatArray,\n labels: NDFloatArray\n) -> Tuple[NDFloatArray, NDFloatArray, NDFloatArray]:\n return _flip_up_down(\n image, bboxes, labels,\n Interpolation.NEAREST, _np_get_affine_dependency()\n )\n\n\ndef rotate(\n image: NDFloatArray,\n bboxes: NDFloatArray,\n labels: NDFloatArray,\n angle_deg: float,\n interpolation: Interpolation = Interpolation.BILINEAR\n) -> Tuple[NDFloatArray, NDFloatArray, NDFloatArray]:\n return _rotate(\n image, bboxes, labels, _np_convert(angle_deg), np.cos, np.sin,\n interpolation, _np_get_affine_dependency()\n )\n\n\ndef shear(\n image: NDFloatArray,\n bboxes: NDFloatArray,\n labels: NDFloatArray,\n angle_deg: float,\n interpolation: Interpolation = Interpolation.BILINEAR,\n _check_input: bool = True\n) -> Tuple[NDFloatArray, NDFloatArray, NDFloatArray]:\n if _check_input:\n _check_shear_input(angle_deg)\n return _shear(\n image, bboxes, labels, _np_convert(angle_deg), np.tan,\n interpolation, _np_get_affine_dependency()\n )\n\n\ndef translate(\n image: NDFloatArray,\n bboxes: NDFloatArray,\n labels: NDFloatArray,\n translate_height: int,\n translate_width: int,\n interpolation: Interpolation = Interpolation.BILINEAR,\n _check_input: bool = True\n) -> Tuple[NDFloatArray, NDFloatArray, NDFloatArray]:\n if _check_input:\n _check_translate_input(image.shape, translate_height, translate_width)\n return _translate(\n image, bboxes, labels,\n _np_convert(translate_height), _np_convert(translate_width),\n interpolation, _np_get_affine_dependency()\n )\n\n\ndef _np_get_crop_inputs(\n image_height: int,\n image_width: int,\n height_fraction_range: Tuple[float, float],\n width_fraction_range: Tuple[float, float],\n rand_fn: Callable[..., NDFloatArray]\n) -> Tuple[NDFloatArray, NDFloatArray, NDFloatArray, NDFloatArray]:\n return _get_crop_inputs(\n image_height, image_width, height_fraction_range, width_fraction_range,\n rand_fn, _np_convert, _np_round_to_int\n )\n\n\ndef crop(\n image: NDFloatArray,\n bboxes: NDFloatArray,\n labels: NDFloatArray,\n offset_height: int,\n offset_width: int,\n crop_height: int,\n crop_width: int,\n _check_input: bool = True\n) -> Tuple[NDFloatArray, NDFloatArray, NDFloatArray]:\n if _check_input:\n _check_crop_input(image.shape, offset_height, offset_width)\n return _crop(\n image, bboxes, labels,\n _np_convert(offset_height), _np_convert(offset_width),\n _np_convert(crop_height), _np_convert(crop_width),\n _np_convert, np.shape, np.reshape, np.concatenate,\n _np_logical_and, np.squeeze, np.clip, _np_boolean_mask\n )\n\n\ndef resize(\n image: NDFloatArray,\n bboxes: NDFloatArray,\n labels: NDFloatArray,\n dest_size: Tuple[int, int]\n) -> Tuple[NDFloatArray, NDFloatArray, NDFloatArray]:\n return _resize(\n image, bboxes, labels, dest_size,\n _np_convert, np.shape, np.reshape, _np_resize_image, np.concatenate\n )\n\n\nclass RandomTransform:\n\n def __init__(\n self,\n np_fn: Callable[..., Tuple[NDFloatArray, NDFloatArray, NDFloatArray]],\n probability: float,\n seed: Optional[int],\n name: str,\n is_affine: bool\n ) -> None:\n self._np_fn = np_fn\n self.probability = probability\n self._rng = np.random.default_rng(seed=seed)\n self.name = name\n self.is_affine = is_affine\n\n def _rand_fn(self, shape: Sequence[int] = ()) -> NDFloatArray:\n return self._rng.random(shape)\n\n def _get_mats(\n self,\n image: NDFloatArray,\n rand_fn: Callable[..., NDFloatArray]\n ) -> Tuple[NDFloatArray, NDFloatArray]:\n pass\n\n def __call__(\n self,\n image: NDFloatArray,\n bboxes: NDFloatArray,\n labels: NDFloatArray,\n *args: Any,\n **kwargs: Any\n ) -> Tuple[NDFloatArray, NDFloatArray, NDFloatArray]:\n # Make sure inputs are in the needed format.\n image = _np_convert(image)\n bboxes = _np_convert(bboxes)\n labels = _np_convert(labels)\n\n if self._rand_fn() < self.probability:\n return self._np_fn(image, bboxes, labels, *args, **kwargs)\n return image, bboxes, labels\n\n\nclass CombineAffine(RandomTransform):\n\n def __init__(\n self,\n transforms: Sequence[RandomTransform],\n num_selected_transforms: Optional[int] = None,\n selected_probabilities: Optional[List[float]] = None,\n interpolation: Interpolation = Interpolation.BILINEAR,\n probability: float = 1.0,\n seed: Optional[int] = None\n ) -> None:\n not_affine_trans = list(filter(lambda t: not t.is_affine, transforms))\n if not_affine_trans:\n raise AssertionError(\n f\"Non-affine transforms cannot be included in CombineAffine: \"\n f\"{[t.name for t in not_affine_trans]}\"\n )\n if num_selected_transforms and selected_probabilities:\n if len(selected_probabilities) != len(transforms):\n raise ValueError(\n \"Number of items in selected_probabilities should be \"\n \"the same as the number of items in transforms.\"\n )\n super().__init__(\n _np_affine_transform, probability, seed, \"CombineAffine\", True\n )\n self._transforms = transforms\n self._num_selected_transforms = num_selected_transforms\n self._selected_probabilities = selected_probabilities\n self._interpolation = interpolation\n self._identity_mat = np.expand_dims(np.array([ # type: ignore\n [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]\n ]), axis=0)\n\n def _combine_mats(\n self,\n image: NDFloatArray,\n rand_fn: Callable[..., NDFloatArray]\n ) -> Tuple[NDFloatArray, NDFloatArray]:\n image_dest_tran_mats, bboxes_tran_mats, probs = tuple(zip(\n *[(*t._get_mats(image, rand_fn), t.probability)\n for i, t in enumerate(self._transforms)]\n ))\n\n if self._num_selected_transforms:\n indices = self._rng.choice(\n len(self._transforms),\n self._num_selected_transforms,\n replace=False, p=self._selected_probabilities\n ).tolist()\n image_dest_tran_mats = np.take(image_dest_tran_mats, indices, 0)\n bboxes_tran_mats = np.take(bboxes_tran_mats, indices, 0)\n else:\n conditions = np.reshape(rand_fn() < probs, (len(probs), 1, 1))\n image_dest_tran_mats = np.where(\n conditions, image_dest_tran_mats, self._identity_mat\n )\n bboxes_tran_mats = np.where(\n conditions, bboxes_tran_mats, self._identity_mat\n )\n\n image_dest_tran_mat = functools.reduce(\n np.matmul, image_dest_tran_mats\n )\n # Note the reversed order for the bboxes tran matrices.\n bboxes_tran_mat = functools.reduce(\n np.matmul, bboxes_tran_mats[::-1]\n )\n return image_dest_tran_mat, bboxes_tran_mat\n\n def __call__(\n self,\n image: NDFloatArray,\n bboxes: NDFloatArray,\n labels: NDFloatArray,\n *args: Any,\n **kwargs: Any\n ) -> Tuple[NDFloatArray, NDFloatArray, NDFloatArray]:\n image_dest_tran_mat, bboxes_tran_mat = self._combine_mats(\n image, self._rand_fn\n )\n return super().__call__(\n image, bboxes, labels, image_dest_tran_mat, bboxes_tran_mat,\n self._interpolation\n )\n\n\nclass RandomFlipLeftRight(RandomTransform):\n\n def __init__(\n self,\n probability: float = 0.5,\n seed: Optional[int] = None\n ) -> None:\n super().__init__(\n flip_left_right, probability, seed, \"RandomFlipLeftRight\", True\n )\n\n def _get_mats(\n self,\n image: NDFloatArray,\n rand_fn: Callable[..., NDFloatArray]\n ) -> Tuple[NDFloatArray, NDFloatArray]:\n return _get_flip_left_right_mats(_np_convert)\n\n def __call__(\n self,\n image: NDFloatArray,\n bboxes: NDFloatArray,\n labels: NDFloatArray,\n *args: Any,\n **kwargs: Any\n ) -> Tuple[NDFloatArray, NDFloatArray, NDFloatArray]:\n return super().__call__(image, bboxes, labels)\n\n\nclass RandomFlipUpDown(RandomTransform):\n\n def __init__(\n self,\n probability: float = 0.5,\n seed: Optional[int] = None\n ) -> None:\n super().__init__(\n flip_up_down, probability, seed, \"RandomFlipUpDown\", True\n )\n\n def _get_mats(\n self,\n image: NDFloatArray,\n rand_fn: Callable[..., NDFloatArray]\n ) -> Tuple[NDFloatArray, NDFloatArray]:\n return _get_flip_up_down_mats(_np_convert)\n\n def __call__(\n self,\n image: NDFloatArray,\n bboxes: NDFloatArray,\n labels: NDFloatArray,\n *args: Any,\n **kwargs: Any\n ) -> Tuple[NDFloatArray, NDFloatArray, NDFloatArray]:\n return super().__call__(image, bboxes, labels)\n\n\nclass RandomRotate(RandomTransform):\n\n def __init__(\n self,\n angle_deg_range: Tuple[float, float] = (-15.0, 15.0),\n interpolation: Interpolation = Interpolation.BILINEAR,\n probability: float = 0.9,\n seed: Optional[int] = None\n ) -> None:\n _check_input_range(angle_deg_range, None, \"angle_deg_range\")\n super().__init__(rotate, probability, seed, \"RandomRotate\", True)\n self.angle_deg_range = np.array(angle_deg_range)\n self.interpolation = interpolation\n\n def _get_angle_deg(\n self,\n rand_fn: Callable[..., NDFloatArray]\n ) -> NDFloatArray:\n return ( # type: ignore\n self.angle_deg_range[1] - self.angle_deg_range[0]\n * rand_fn() + self.angle_deg_range[0]\n )\n\n def _get_mats(\n self,\n image: NDFloatArray,\n rand_fn: Callable[..., NDFloatArray]\n ) -> Tuple[NDFloatArray, NDFloatArray]:\n return _get_rotate_mats(\n self._get_angle_deg(rand_fn), _np_convert, np.cos, np.sin\n )\n\n def __call__(\n self,\n image: NDFloatArray,\n bboxes: NDFloatArray,\n labels: NDFloatArray,\n *args: Any,\n **kwargs: Any\n ) -> Tuple[NDFloatArray, NDFloatArray, NDFloatArray]:\n return super().__call__(\n image, bboxes, labels, self._get_angle_deg(self._rand_fn),\n self.interpolation\n )\n\n\nclass RandomShear(RandomTransform):\n\n def __init__(\n self,\n angle_deg_range: Tuple[float, float] = (-10.0, 10.0),\n interpolation: Interpolation = Interpolation.BILINEAR,\n probability: float = 0.9,\n seed: Optional[int] = None\n ) -> None:\n _check_input_range(angle_deg_range, (-90.0, 90.0), \"angle_deg_range\")\n super().__init__(shear, probability, seed, \"RandomShear\", True)\n self.angle_deg_range = np.array(angle_deg_range)\n self.interpolation = interpolation\n\n def _get_angle_deg(\n self,\n rand_fn: Callable[..., NDFloatArray]\n ) -> NDFloatArray:\n return ( # type: ignore\n self.angle_deg_range[1] - self.angle_deg_range[0]\n * rand_fn() + self.angle_deg_range[0]\n )\n\n def _get_mats(\n self,\n image: NDFloatArray,\n rand_fn: Callable[..., NDFloatArray]\n ) -> Tuple[NDFloatArray, NDFloatArray]:\n return _get_shear_mats(self._get_angle_deg(rand_fn), _np_convert, np.tan)\n\n def __call__(\n self,\n image: NDFloatArray,\n bboxes: NDFloatArray,\n labels: NDFloatArray,\n *args: Any,\n **kwargs: Any\n ) -> Tuple[NDFloatArray, NDFloatArray, NDFloatArray]:\n return super().__call__(\n image, bboxes, labels, self._get_angle_deg(self._rand_fn),\n self.interpolation, False\n )\n\n\nclass RandomTranslate(RandomTransform):\n\n def __init__(\n self,\n translate_height_fraction_range: Tuple[float, float] = (-0.1, 0.1),\n translate_width_fraction_range: Tuple[float, float] = (-0.1, 0.1),\n interpolation: Interpolation = Interpolation.BILINEAR,\n probability: float = 0.9,\n seed: Optional[int] = None\n ) -> None:\n _check_input_range(\n translate_height_fraction_range, (-1.0, 1.0),\n \"translate_height_fraction_range\"\n )\n _check_input_range(\n translate_width_fraction_range, (-1.0, 1.0),\n \"translate_width_fraction_range\"\n )\n super().__init__(translate, probability, seed, \"RandomTranslate\", True)\n self.translate_height_fraction_range = translate_height_fraction_range\n self.translate_width_fraction_range = translate_width_fraction_range\n self.interpolation = interpolation\n\n def _get_translate_height_and_width(\n self,\n image: NDFloatArray,\n rand_fn: Callable[..., NDFloatArray]\n ) -> Tuple[NDIntArray, NDIntArray]:\n height_fraction, width_fraction = _get_random_size_fractions(\n self.translate_height_fraction_range,\n self.translate_width_fraction_range,\n rand_fn, _np_convert\n )\n translate_height = _np_round_to_int(\n np.shape(image)[0] * height_fraction\n )\n translate_width = _np_round_to_int(\n np.shape(image)[1] * width_fraction\n )\n return translate_height, translate_width\n\n def _get_mats(\n self,\n image: NDFloatArray,\n rand_fn: Callable[..., NDFloatArray]\n ) -> Tuple[NDFloatArray, NDFloatArray]:\n translate_height, translate_width = \\\n self._get_translate_height_and_width(image, rand_fn)\n return _get_translate_mats(\n translate_height, translate_width, _np_convert\n )\n\n def __call__(\n self,\n image: NDFloatArray,\n bboxes: NDFloatArray,\n labels: NDFloatArray,\n *args: Any,\n **kwargs: Any\n ) -> Tuple[NDFloatArray, NDFloatArray, NDFloatArray]:\n translate_height, translate_width = \\\n self._get_translate_height_and_width(image, self._rand_fn)\n return super().__call__(\n image, bboxes, labels, translate_height, translate_width,\n self.interpolation, False\n )\n\n\nclass RandomCrop(RandomTransform):\n\n def __init__(\n self,\n crop_height_fraction_range: Tuple[float, float] = (0.8, 0.9),\n crop_width_fraction_range: Tuple[float, float] = (0.8, 0.9),\n probability: float = 0.9,\n seed: Optional[int] = None\n ) -> None:\n _check_input_range(\n crop_height_fraction_range, (0.0, 1.0), \"crop_height_fraction_range\"\n )\n _check_input_range(\n crop_width_fraction_range, (0.0, 1.0), \"crop_width_fraction_range\"\n )\n super().__init__(crop, probability, seed, \"RandomCrop\", False)\n self.crop_height_fraction_range = crop_height_fraction_range\n self.crop_width_fraction_range = crop_width_fraction_range\n\n def __call__(\n self,\n image: NDFloatArray,\n bboxes: NDFloatArray,\n labels: NDFloatArray,\n *args: Any,\n **kwargs: Any\n ) -> Tuple[NDFloatArray, NDFloatArray, NDFloatArray]:\n offset_height, offset_width, crop_height, crop_width = \\\n _np_get_crop_inputs(\n np.shape(image)[0], np.shape(image)[1],\n self.crop_height_fraction_range,\n self.crop_width_fraction_range,\n self._rand_fn\n )\n\n return super().__call__(\n image, bboxes, labels,\n offset_height, offset_width, crop_height, crop_width, False\n )\n\n\nclass Resize:\n\n def __init__(self, dest_size: Tuple[int, int]) -> None:\n self.dest_size = dest_size\n self.name = \"Resize\"\n self.is_affine = False\n\n def __call__(\n self,\n image: NDFloatArray,\n bboxes: NDFloatArray,\n labels: NDFloatArray\n ) -> Tuple[NDFloatArray, NDFloatArray, NDFloatArray]:\n return resize(image, bboxes, labels, self.dest_size)\n" ]
[ [ "numpy.take", "numpy.shape", "numpy.array", "numpy.where", "numpy.random.default_rng" ] ]
xaviermouy/ecosound
[ "25d333807e090e737b3ac910c2dcf8e5850b91b4" ]
[ "ecosound/core/metadata.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 6 10:56:38 2020\n\n@author: xavier.mouy\n\"\"\"\n\nimport pandas as pd\nimport os\n\nclass DeploymentInfo():\n \"\"\"\n A class to handle acoustic deployment metadadata .\n\n Object carrying deployment metadata that can be used for example to populate\n metadata fields in Annotation or Measurement objects.\n\n Attributes\n ----------\n data : pandas DataFrame\n DataFranme with deploymnent information.\n\n Methods\n -------\n write_template(filepath)\n Create an empty template csv file with the proper headers.\n read(filepath)\n Populates the DeploymentInfo object with the information from a csv\n file. The csv file must follow the samestructure as the one created by\n the method write_template.\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialize object with empty .data attribute.\n\n Returns\n -------\n None.\n\n \"\"\"\n self.data =[]\n\n def write_template(self, filepath):\n \"\"\"\n Create a blank deployment file.\n\n Create an empty template csv file with the proper headers. The created\n csv file has only the headers and an operator must fill in all the\n deployment information manually. Once filled in, this file can be used\n by the DeploymentInfo.read method\n\n Parameters\n ----------\n filepath : str\n path and name of the deployment csv file to create.\n\n Returns\n -------\n None. Write a blank csv deployment file that an operator can fill in.\n\n \"\"\"\n if os.path.isfile(filepath):\n raise ValueError('File already exists.')\n metadata = pd.DataFrame({\n 'audio_channel_number': [],\n 'UTC_offset': [],\n 'sampling_frequency': [],\n 'bit_depth': [],\n 'mooring_platform_name': [],\n 'recorder_type': [],\n 'recorder_SN': [],\n 'hydrophone_model': [],\n 'hydrophone_SN': [],\n 'hydrophone_depth': [],\n 'location_name': [],\n 'location_lat': [],\n 'location_lon': [],\n 'location_water_depth': [],\n 'deployment_ID': [],\n 'deployment_date':[],\n 'recovery_date':[],\n })\n metadata.to_csv(filepath,\n sep=',',\n encoding='utf-8',\n header=True,\n index=False,\n )\n\n def read(self, filepath):\n \"\"\"\n Read metadata information from csv file.\n\n Load data from a csv file containing the deployment metadat information\n and populated the data attribute of the DeploymentInfo object. The csv\n file must follow the same headers and data format as the csv file\n template generated by DeploymentInfo.write_template.\n\n Parameters\n ----------\n filepath : str\n Path of the csv file to read.\n\n Returns\n -------\n None. Populates the pandas dataframe in teh .data attribute of the\n DeploymentInfo object.\n\n \"\"\"\n df = pd.read_csv(filepath,\n delimiter=',',\n #header=None,\n skiprows=0,\n na_values=None,\n )\n self.data = df\n return df\n\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]