repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
zhooda/learn-ml
|
[
"e35765fdc3b27fd923cff89cc086d5093eeee25b"
] |
[
"src/derivatives.py"
] |
[
"import matplotlib.pyplot as plt\nimport numpy as np\n\ndef f(x):\n return 2*x**2\n\nx = np.arange(0, 5, 0.001)\ny = f(x)\n\nplt.plot(x, y)\n\ncolors = ['k', 'g', 'r', 'b', 'c']\n\ndef approximate_tangent_line(x, approximate_derivative):\n return (approximate_derivative*x) + b\n\nfor i in range(5):\n p2_delta = 0.0001\n x1 = i\n x2 = x1 + p2_delta\n\n y1 = f(x1)\n y2 = f(x2)\n\n print((x1, y1), (x2, y2))\n approximate_derivative =(y2-y1)/(x2-x1)\n b = y2 - approximate_derivative*x2\n\n to_plot = [x1-0.9, x1, x1+0.9]\n plt.scatter(x1, y1, c=colors[i])\n plt.plot([point for point in to_plot],\n [approximate_tangent_line(point, approximate_derivative) for point in to_plot],\n c=colors[i])\n\n print('approximate derivtive for f(x)', f'where x = {x1} is {approximate_derivative}')\n\nplt.show()"
] |
[
[
"matplotlib.pyplot.plot",
"numpy.arange",
"matplotlib.pyplot.show",
"matplotlib.pyplot.scatter"
]
] |
dumpmemory/Uneven_training_data
|
[
"63350037744b761619d4d8bc7d2122d2bffa2c95"
] |
[
"cartography/classification/run_glue.py"
] |
[
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nFinetuning the library models for sequence classification on GLUE-style tasks\n(BERT, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa); modified for Dataset Cartography.\n\"\"\"\n\nimport _jsonnet\nimport argparse\nimport glob\nimport json\nimport logging\nimport numpy as np\nimport os\nimport random\nimport shutil\nimport torch\nfrom scipy.spatial import distance\nfrom scipy.stats import entropy\nfrom itertools import cycle\n\nimport torch.nn as nn\n#label propagation\nimport word_level_augment\nimport torch.nn.functional as F\nimport mmd_loss\n\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm, trange\n\nfrom transformers import (\n WEIGHTS_NAME,\n AdamW,\n BertConfig,\n BertTokenizer,\n RobertaConfig,\n RobertaTokenizer,\n get_linear_schedule_with_warmup,\n)\n\nfrom cartography.classification.glue_utils import adapted_glue_compute_metrics as compute_metrics\nfrom cartography.classification.glue_utils import adapted_glue_convert_examples_to_features as convert_examples_to_features\nfrom cartography.classification.glue_utils import glue_output_modes as output_modes\nfrom cartography.classification.glue_utils import glue_processors as processors\nfrom cartography.classification.diagnostics_evaluation import evaluate_by_category\nfrom cartography.classification.models import (\n AdaptedBertForMultipleChoice,\n AdaptedBertForSequenceClassification,\n AdaptedRobertaForMultipleChoice,\n AdaptedRobertaForSequenceClassification\n)\nfrom cartography.classification.multiple_choice_utils import convert_mc_examples_to_features\nfrom cartography.classification.params import Params, save_args_to_file\n\nfrom cartography.selection.selection_utils import log_training_dynamics\nfrom cartography.data_utils_glue import convert_string_to_unique_number\n\ntry:\n from torch.utils.tensorboard import SummaryWriter\nexcept ImportError:\n from tensorboardX import SummaryWriter\n\n\nlogger = logging.getLogger(__name__)\n\nALL_MODELS = sum(\n (\n tuple(conf.pretrained_config_archive_map.keys())\n for conf in (\n BertConfig,\n RobertaConfig,\n )\n ),\n (),\n)\n\nMODEL_CLASSES = {\n \"bert\": (BertConfig, AdaptedBertForSequenceClassification, BertTokenizer),\n \"bert_mc\": (BertConfig, AdaptedBertForMultipleChoice, BertTokenizer),\n \"roberta\": (RobertaConfig, AdaptedRobertaForSequenceClassification, RobertaTokenizer),\n \"roberta_mc\": (RobertaConfig, AdaptedRobertaForMultipleChoice, RobertaTokenizer),\n}\n\n\ndef set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n\n\nclass TripleDataset(torch.utils.data.Dataset):\n def __init__(self, *datasets):\n self.datasets = datasets\n\n def __getitem__(self, i):\n return tuple(d[i] for d in self.datasets)\n\n def __len__(self):\n return min(len(d) for d in self.datasets)\n\n\n\ndef train(args, train_dataset, model, tokenizer, flag_in_training):\n \"\"\" Train the model \"\"\"\n if args.local_rank in [-1, 0]:\n tb_writer = SummaryWriter()\n\n args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n # train_sampler = RandomSampler(\n # train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)\n train_dataloader = DataLoader(\n train_dataset, batch_size=args.train_batch_size, shuffle=True)\n\n if args.max_steps > 0:\n t_total = args.max_steps\n args.num_train_epochs = args.max_steps // (\n len(train_dataloader) // args.gradient_accumulation_steps) + 1\n else:\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args.weight_decay,\n },\n {\"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0\n },\n ]\n\n if flag_in_training =='finetune':\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.finetune_learning_rate, eps=args.adam_epsilon)\n else:\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n\n scheduler = get_linear_schedule_with_warmup(\n optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total\n )\n\n # Check if saved optimizer or scheduler states exist\n if os.path.isfile(os.path.join(args.model_name_or_path, \"optimizer.pt\")) and os.path.isfile(\n os.path.join(args.model_name_or_path, \"scheduler.pt\")\n ):\n # Load in optimizer and scheduler states\n optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"optimizer.pt\")))\n scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"scheduler.pt\")))\n\n if args.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\n \"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n\n # multi-gpu training (should be after apex fp16 initialization)\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Distributed training (should be after apex fp16 initialization)\n if args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[args.local_rank],\n output_device=args.local_rank,\n find_unused_parameters=True,\n )\n\n # Train!\n\n # args.local_rank = -1\n # get_world_size = 1\n # args.train_batch_size = 128\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Num Epochs = %d\", args.num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", args.per_gpu_train_batch_size)\n logger.info(\n \" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n args.train_batch_size\n * args.gradient_accumulation_steps\n * (torch.distributed.get_world_size() if args.local_rank != -1 else 1),\n )\n logger.info(\" local_rank = %d\", args.local_rank)\n logger.info(\" Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n\n global_step = 0\n epochs_trained = 0\n steps_trained_in_this_epoch = 0\n # Check if continuing training from a checkpoint\n if os.path.exists(args.model_name_or_path):\n # set global_step to gobal_step of last saved checkpoint from model path\n global_step = int(args.model_name_or_path.split(\"-\")[-1].split(\"/\")[0])\n epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)\n steps_trained_in_this_epoch = global_step % (\n len(train_dataloader) // args.gradient_accumulation_steps)\n\n logger.info(f\" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(f\" Continuing training from epoch {epochs_trained}\")\n logger.info(f\" Continuing training from global step {global_step}\")\n logger.info(f\" Will skip the first {steps_trained_in_this_epoch} steps in the first epoch\")\n\n tr_loss, logging_loss, epoch_loss = 0.0, 0.0, 0.0\n model.zero_grad()\n if flag_in_training =='finetune':\n train_iterator = trange(epochs_trained,\n (int(args.num_train_epochs)*3),\n desc=\"Epoch\",\n disable=args.local_rank not in [-1, 0],\n mininterval=10,\n ncols=100)\n else:\n train_iterator = trange(epochs_trained,\n int(args.num_train_epochs),\n desc=\"Epoch\",\n disable=args.local_rank not in [-1, 0],\n mininterval=10,\n ncols=100)\n\n set_seed(args) # Added here for reproductibility\n best_dev_performance = 0\n best_epoch = epochs_trained\n\n train_acc = 0.0\n total_entropy = 0.\n total_sample_size = 0\n for epoch, _ in enumerate(train_iterator):\n epoch_iterator = tqdm(train_dataloader,\n desc=\"Iteration\",\n disable=args.local_rank not in [-1, 0],\n mininterval=10,\n ncols=100)\n\n train_iterator.set_description(f\"train_epoch: {epoch} train_acc: {train_acc:.4f}\")\n train_ids = None\n train_golds = None\n train_logits = None\n train_losses = None\n\n #label propagation\n # lingyige_loader = None\n for step, batch in enumerate(epoch_iterator):\n # Skip past any already trained steps if resuming training\n if steps_trained_in_this_epoch > 0:\n steps_trained_in_this_epoch -= 1\n continue\n #在这个for loop 里面或外面 加data loader 的判断,就是两个data loader load进去\n\n model.train()\n batch = tuple(t.to(args.device) for t in batch)\n inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"labels\": batch[3]}\n if args.model_type != \"distilbert\":\n inputs[\"token_type_ids\"] = (\n batch[2] if args.model_type in [\"bert\", \"xlnet\", \"albert\"] else None\n ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids\n outputs = model(**inputs)\n loss = outputs[0] # model outputs are always tuple in transformers (see doc)\n\n entropy=outputs[-1]\n\n if train_logits is None: # Keep track of training dynamics.\n train_ids = batch[4].detach().cpu().numpy()\n train_logits = outputs[1].detach().cpu().numpy()\n train_golds = inputs[\"labels\"].detach().cpu().numpy()\n # train_golds = [l.tolist() for l in train_golds]\n # print('initial_train_gold', train_golds)\n train_losses = loss.detach().cpu().numpy()\n train_entropy = entropy.detach().cpu().numpy()\n print(entropy.size(), \"check entropy size\")\n\n else:\n train_ids = np.append(train_ids, batch[4].detach().cpu().numpy())\n train_logits = np.append(train_logits, outputs[1].detach().cpu().numpy(), axis=0)\n\n\n train_golds = np.concatenate((train_golds, inputs[\"labels\"].detach().cpu().numpy()), 0)\n\n train_losses = np.append(train_losses, loss.detach().cpu().numpy())\n train_entropy = np.append(train_entropy, entropy.detach().cpu().numpy())\n\n if args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n tr_loss += loss.item()\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if args.fp16:\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n else:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model.zero_grad()\n global_step += 1\n\n if (\n args.local_rank in [-1, 0] and\n args.logging_steps > 0 and\n global_step % args.logging_steps == 0\n ):\n epoch_log = {}\n # Only evaluate when single GPU otherwise metrics may not average well\n if args.local_rank == -1 and args.evaluate_during_training_epoch:\n logger.info(f\"From within the epoch at step {step}\")\n results, _ = evaluate(args, model, tokenizer)\n for key, value in results.items():\n eval_key = \"eval_{}\".format(key)\n epoch_log[eval_key] = value\n\n epoch_log[\"learning_rate\"] = scheduler.get_lr()[0]\n epoch_log[\"loss\"] = (tr_loss - logging_loss) / args.logging_steps\n logging_loss = tr_loss\n\n for key, value in epoch_log.items():\n tb_writer.add_scalar(key, value, global_step)\n logger.info(json.dumps({**epoch_log, **{\"step\": global_step}}))\n\n if (\n args.local_rank in [-1, 0] and\n args.save_steps > 0 and\n global_step % args.save_steps == 0\n ):\n # Save model checkpoint\n output_dir = os.path.join(args.output_dir, \"checkpoint-{}\".format(global_step))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n model_to_save = (\n model.module if hasattr(model, \"module\") else model\n ) # Take care of distributed/parallel training\n model_to_save.save_pretrained(output_dir)\n tokenizer.save_pretrained(output_dir)\n\n torch.save(args, os.path.join(output_dir, \"training_args.bin\"))\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\n torch.save(optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n torch.save(scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n logger.info(\"Saving optimizer and scheduler states to %s\", output_dir)\n\n epoch_iterator.set_description(f\"lr = {scheduler.get_lr()[0]:.8f}, \"\n f\"loss = {(tr_loss-epoch_loss)/(step+1):.4f}\")\n if args.max_steps > 0 and global_step > args.max_steps:\n epoch_iterator.close()\n break\n # mean_entropy = total_entropy / total_sample_size\n mean_entropy= np.sum(train_entropy) / np.where(train_entropy < -(np.ones_like(train_entropy) * 1e-10),\n np.ones_like(train_entropy),\n np.zeros_like(train_entropy)).sum()\n logger.info(f\"*********************************selected_questions*********************************: {mean_entropy:.4f}***\")\n\n\n #### Post epoch eval ####\n # Only evaluate when single GPU otherwise metrics may not average well\n if args.local_rank == -1 and args.evaluate_during_training:\n best_dev_performance, best_epoch = save_model(\n args, model, tokenizer, epoch, best_epoch, best_dev_performance)\n\n\n log_training_dynamics(output_dir=args.output_dir,\n epoch=epoch,\n train_ids=list(train_ids),\n train_logits=list(train_logits),\n train_golds=list(train_golds))\n\n\n train_result = compute_metrics(args.task_name, np.argmax(train_logits, axis=1), train_golds)\n\n\n train_acc = train_result[\"acc\"]\n\n epoch_log = {\"epoch\": epoch,\n \"train_acc\": train_acc,\n \"best_dev_performance\": best_dev_performance,\n \"avg_batch_loss\": (tr_loss - epoch_loss) / args.per_gpu_train_batch_size,\n \"learning_rate\": scheduler.get_lr()[0],}\n epoch_loss = tr_loss\n\n logger.info(f\" End of epoch : {epoch}\")\n with open(os.path.join(args.output_dir, f\"eval_metrics_train.json\"), \"a\") as toutfile:\n toutfile.write(json.dumps(epoch_log) + \"\\n\")\n for key, value in epoch_log.items():\n tb_writer.add_scalar(key, value, global_step)\n logger.info(f\" {key}: {value:.6f}\")\n\n if args.max_steps > 0 and global_step > args.max_steps:\n train_iterator.close()\n break\n elif args.evaluate_during_training and epoch - best_epoch >= args.patience:\n logger.info(f\"Ran out of patience. Best epoch was {best_epoch}. \"\n f\"Stopping training at epoch {epoch} out of {args.num_train_epochs} epochs.\")\n train_iterator.close()\n break\n\n if args.local_rank in [-1, 0]:\n tb_writer.close()\n\n return global_step, tr_loss / global_step\n\n\n\ndef interleave(x, size):\n s = list(x.shape)\n return x.reshape([-1, size] + s[1:]).transpose(0, 1).reshape([-1] + s[1:])\n\n\n\n\ndef lp_train(args, train_dataset, single_dataset, single_aug_dataset, model, tokenizer, flag_in_training):\n \"\"\" Train the model \"\"\"\n if args.local_rank in [-1, 0]:\n tb_writer = SummaryWriter()\n\n args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n # train_sampler = RandomSampler(\n # train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)\n train_dataloader = DataLoader(\n train_dataset, batch_size=args.train_batch_size, shuffle=True)\n\n if args.label_propagation and args.do_finetune:\n single_aug= TripleDataset(single_dataset, single_aug_dataset)\n\n single_train_dataloader = DataLoader(\n single_aug, batch_size=args.train_batch_size, shuffle=True)\n\n\n\n if args.max_steps > 0:\n t_total = args.max_steps\n args.ft_num_train_epochs = args.max_steps // (\n len(train_dataloader) // args.gradient_accumulation_steps) + 1\n else:\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.ft_num_train_epochs\n\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args.weight_decay,\n },\n {\"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0\n },\n ]\n\n if flag_in_training =='finetune':\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.finetune_learning_rate, eps=args.adam_epsilon)\n else:\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n\n scheduler = get_linear_schedule_with_warmup(\n optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total\n )\n\n # Check if saved optimizer or scheduler states exist\n if os.path.isfile(os.path.join(args.model_name_or_path, \"optimizer.pt\")) and os.path.isfile(\n os.path.join(args.model_name_or_path, \"scheduler.pt\")\n ):\n # Load in optimizer and scheduler states\n optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"optimizer.pt\")))\n scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"scheduler.pt\")))\n\n if args.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\n \"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n\n # multi-gpu training (should be after apex fp16 initialization)\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Distributed training (should be after apex fp16 initialization)\n if args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[args.local_rank],\n output_device=args.local_rank,\n find_unused_parameters=True,\n )\n\n # Train!\n\n # args.local_rank = -1\n # get_world_size = 1\n # args.train_batch_size = 128\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Num Epochs = %d\", args.ft_num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", args.per_gpu_train_batch_size)\n logger.info(\n \" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n args.train_batch_size\n * args.gradient_accumulation_steps\n * (torch.distributed.get_world_size() if args.local_rank != -1 else 1),\n )\n logger.info(\" local_rank = %d\", args.local_rank)\n logger.info(\" Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n\n global_step = 0\n epochs_trained = 0\n steps_trained_in_this_epoch = 0\n # Check if continuing training from a checkpoint\n if os.path.exists(args.model_name_or_path):\n # set global_step to gobal_step of last saved checkpoint from model path\n global_step = int(args.model_name_or_path.split(\"-\")[-1].split(\"/\")[0])\n epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)\n steps_trained_in_this_epoch = global_step % (\n len(train_dataloader) // args.gradient_accumulation_steps)\n\n logger.info(f\" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(f\" Continuing training from epoch {epochs_trained}\")\n logger.info(f\" Continuing training from global step {global_step}\")\n logger.info(f\" Will skip the first {steps_trained_in_this_epoch} steps in the first epoch\")\n\n tr_loss, logging_loss, epoch_loss = 0.0, 0.0, 0.0\n model.zero_grad()\n if flag_in_training =='finetune':\n train_iterator = trange(epochs_trained,\n int(args.ft_num_train_epochs),\n desc=\"Epoch\",\n disable=args.local_rank not in [-1, 0],\n mininterval=10,\n ncols=100)\n else:\n train_iterator = trange(epochs_trained,\n int(args.num_train_epochs),\n desc=\"Epoch\",\n disable=args.local_rank not in [-1, 0],\n mininterval=10,\n ncols=100)\n\n set_seed(args) # Added here for reproductibility\n best_dev_performance = 0\n best_epoch = epochs_trained\n\n train_acc = 0.0\n total_entropy = 0.\n total_sample_size = 0\n for epoch, _ in enumerate(train_iterator):\n epoch_iterator = tqdm(train_dataloader,\n desc=\"Iteration\",\n disable=args.local_rank not in [-1, 0],\n mininterval=10,\n ncols=100)\n\n train_iterator.set_description(f\"train_epoch: {epoch} train_acc: {train_acc:.4f}\")\n train_ids = None\n train_golds = None\n train_logits = None\n train_losses = None\n\n #label propagation\n # lingyige_loader = None\n single_iter = iter(single_train_dataloader)\n for step, batch in enumerate(epoch_iterator):\n # Skip past any already trained steps if resuming training\n if steps_trained_in_this_epoch > 0:\n steps_trained_in_this_epoch -= 1\n continue\n #在这个for loop 里面或外面 加data loader 的判断,就是两个data loader load进去\n\n model.train()\n\n try:\n inputs_u_w, inputs_u_s = single_iter.next()\n except StopIteration:\n single_iter = iter(single_train_dataloader)\n inputs_u_w, inputs_u_s = single_iter.next()\n\n\n\n\n batch = tuple(t.to(args.device) for t in batch)\n inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"labels\": batch[3]}\n if args.model_type != \"distilbert\":\n inputs[\"token_type_ids\"] = (\n batch[2] if args.model_type in [\"bert\", \"xlnet\", \"albert\"] else None\n ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids\n outputs = model(**inputs)\n loss = outputs[0] # model outputs are always tuple in transformers (see doc)\n\n\n if args.label_propagation and args.do_finetune:\n\n # import pdb\n # pdb.set_trace()\n batch_single = tuple(t.to(args.device) for t in inputs_u_w)\n inputs_single = {\"input_ids\": batch_single[0], \"attention_mask\": batch_single[1], \"labels\": batch_single[3]}\n if args.model_type != \"distilbert\":\n inputs_single[\"token_type_ids\"] = (\n batch_single[2] if args.model_type in [\"bert\", \"xlnet\", \"albert\"] else None\n ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids\n\n batch_single_aug = tuple(t.to(args.device) for t in inputs_u_s)\n inputs_single_aug = {\"input_ids\": batch_single_aug[0], \"attention_mask\": batch_single_aug[1], \"labels\": batch_single_aug[3]}\n if args.model_type != \"distilbert\":\n inputs_single_aug[\"token_type_ids\"] = (\n batch_single_aug[2] if args.model_type in [\"bert\", \"xlnet\", \"albert\"] else None\n ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids\n\n\n\n reg_loss=nn.KLDivLoss()(F.log_softmax(model(**inputs_single_aug)[1]), torch.softmax(model(**inputs_single)[1], dim=-1).detach())\n\n\n loss= loss + reg_loss *1.0\n entropy=outputs[-1]\n\n if train_logits is None: # Keep track of training dynamics.\n train_ids = batch[4].detach().cpu().numpy()\n train_logits = outputs[1].detach().cpu().numpy()\n train_golds = inputs[\"labels\"].detach().cpu().numpy()\n # train_golds = [l.tolist() for l in train_golds]\n # print('initial_train_gold', train_golds)\n train_losses = loss.detach().cpu().numpy()\n train_entropy = entropy.detach().cpu().numpy()\n print(entropy.size(), \"check entropy size\")\n\n else:\n train_ids = np.append(train_ids, batch[4].detach().cpu().numpy())\n train_logits = np.append(train_logits, outputs[1].detach().cpu().numpy(), axis=0)\n\n\n train_golds = np.concatenate((train_golds, inputs[\"labels\"].detach().cpu().numpy()), 0)\n\n train_losses = np.append(train_losses, loss.detach().cpu().numpy())\n train_entropy = np.append(train_entropy, entropy.detach().cpu().numpy())\n\n\n if args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n tr_loss += loss.item()\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if args.fp16:\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n else:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model.zero_grad()\n global_step += 1\n\n if (\n args.local_rank in [-1, 0] and\n args.logging_steps > 0 and\n global_step % args.logging_steps == 0\n ):\n epoch_log = {}\n # Only evaluate when single GPU otherwise metrics may not average well\n if args.local_rank == -1 and args.evaluate_during_training_epoch:\n logger.info(f\"From within the epoch at step {step}\")\n results, _ = evaluate(args, model, tokenizer)\n for key, value in results.items():\n eval_key = \"eval_{}\".format(key)\n epoch_log[eval_key] = value\n\n epoch_log[\"learning_rate\"] = scheduler.get_lr()[0]\n epoch_log[\"loss\"] = (tr_loss - logging_loss) / args.logging_steps\n logging_loss = tr_loss\n\n for key, value in epoch_log.items():\n tb_writer.add_scalar(key, value, global_step)\n logger.info(json.dumps({**epoch_log, **{\"step\": global_step}}))\n\n if (\n args.local_rank in [-1, 0] and\n args.save_steps > 0 and\n global_step % args.save_steps == 0\n ):\n # Save model checkpoint\n output_dir = os.path.join(args.output_dir, \"checkpoint-{}\".format(global_step))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n model_to_save = (\n model.module if hasattr(model, \"module\") else model\n ) # Take care of distributed/parallel training\n model_to_save.save_pretrained(output_dir)\n tokenizer.save_pretrained(output_dir)\n\n torch.save(args, os.path.join(output_dir, \"training_args.bin\"))\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\n torch.save(optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n torch.save(scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n logger.info(\"Saving optimizer and scheduler states to %s\", output_dir)\n\n epoch_iterator.set_description(f\"lr = {scheduler.get_lr()[0]:.8f}, \"\n f\"loss = {(tr_loss-epoch_loss)/(step+1):.4f}\")\n if args.max_steps > 0 and global_step > args.max_steps:\n epoch_iterator.close()\n break\n # mean_entropy = total_entropy / total_sample_size\n mean_entropy= np.sum(train_entropy) / np.where(train_entropy < -(np.ones_like(train_entropy) * 1e-10),\n np.ones_like(train_entropy),\n np.zeros_like(train_entropy)).sum()\n logger.info(f\"*********************************selected_questions*********************************: {mean_entropy:.4f}***\")\n # logger.info(f\"*** Found BEST model, and saved checkpoint. \"\n # f\"BEST dev performance : {dev_performance:.4f} ***\")\n\n\n #### Post epoch eval ####\n # Only evaluate when single GPU otherwise metrics may not average well\n if args.local_rank == -1 and args.evaluate_during_training:\n best_dev_performance, best_epoch = save_model(\n args, model, tokenizer, epoch, best_epoch, best_dev_performance)\n\n\n log_training_dynamics(output_dir=args.output_dir,\n epoch=epoch,\n train_ids=list(train_ids),\n train_logits=list(train_logits),\n train_golds=list(train_golds))\n\n\n train_result = compute_metrics(args.task_name, np.argmax(train_logits, axis=1), train_golds)\n\n\n train_acc = train_result[\"acc\"]\n\n epoch_log = {\"epoch\": epoch,\n \"train_acc\": train_acc,\n \"best_dev_performance\": best_dev_performance,\n \"avg_batch_loss\": (tr_loss - epoch_loss) / args.per_gpu_train_batch_size,\n \"learning_rate\": scheduler.get_lr()[0],}\n epoch_loss = tr_loss\n\n logger.info(f\" End of epoch : {epoch}\")\n with open(os.path.join(args.output_dir, f\"eval_metrics_train.json\"), \"a\") as toutfile:\n toutfile.write(json.dumps(epoch_log) + \"\\n\")\n for key, value in epoch_log.items():\n tb_writer.add_scalar(key, value, global_step)\n logger.info(f\" {key}: {value:.6f}\")\n\n if args.max_steps > 0 and global_step > args.max_steps:\n train_iterator.close()\n break\n elif args.evaluate_during_training and epoch - best_epoch >= args.patience:\n logger.info(f\"Ran out of patience. Best epoch was {best_epoch}. \"\n f\"Stopping training at epoch {epoch} out of {args.ft_num_train_epochs} epochs.\")\n train_iterator.close()\n break\n\n if args.local_rank in [-1, 0]:\n tb_writer.close()\n\n return global_step, tr_loss / global_step\n\n\n\ndef save_model(args, model, tokenizer, epoch, best_epoch, best_dev_performance):\n results, _ = evaluate(args, model, tokenizer, prefix=\"in_training\")\n # TODO(SS): change hard coding `acc` as the desired metric, might not work for all tasks.\n desired_metric = \"acc\"\n dev_performance = results.get(desired_metric)\n # if dev_performance > best_dev_performance:\n if True:\n best_epoch = epoch\n best_dev_performance = dev_performance\n\n # Save model checkpoint\n # Take care of distributed/parallel training\n model_to_save = (model.module if hasattr(model, \"module\") else model)\n model_to_save.save_pretrained(args.output_dir)\n tokenizer.save_pretrained(args.output_dir)\n torch.save(args, os.path.join(args.output_dir, \"training_args.bin\"))\n\n logger.info(f\"*** Found BEST model, and saved checkpoint. \"\n f\"BEST dev performance : {dev_performance:.4f} ***\")\n return best_dev_performance, best_epoch\n\n\n#Entropy\ndef JSD(P, Q):\n M = 0.5 * (P + Q)\n # print('entropy', entropy(P, M), P, M)\n return 0.5 * (entropy(P, M) + entropy(Q, M))\n\n\n#torch Kl_div\n\ndef JSD_2(P, Q):\n P= np.array(P, dtype=float)\n Q= np.array(Q, dtype=float)\n M = 0.5 * (P+Q)\n _jsd = 0.5* ((torch.nn.functional.kl_div(torch.log(torch.from_numpy(M)), torch.from_numpy(P)).numpy() - 0) + (torch.nn.functional.kl_div(torch.log(torch.from_numpy(M)), torch.from_numpy(Q)).numpy() - 0))\n return _jsd\n\ndef evaluate(args, model, tokenizer, prefix=\"\", eval_split=\"dev\"):\n # We do not really need a loop to handle MNLI double evaluation (matched, mis-matched).\n eval_task_names = (args.task_name,)\n eval_outputs_dirs = (args.output_dir,)\n\n results = {}\n all_predictions = {}\n for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):\n eval_dataset, pair_id = eval_load_and_cache_examples(\n args, eval_task, tokenizer, evaluate=True, data_split=f\"{eval_split}_{prefix}\")\n\n if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(eval_output_dir)\n\n args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)\n # Note that DistributedSampler samples randomly\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(\n eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n # multi-gpu eval\n if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):\n model = torch.nn.DataParallel(model)\n\n # Eval!\n logger.info(f\"***** Running {eval_task} {prefix} evaluation on {eval_split} *****\")\n logger.info(f\" Num examples = {len(eval_dataset)}\")\n logger.info(f\" Batch size = {args.eval_batch_size}\")\n eval_loss = 0.0\n nb_eval_steps = 0\n preds = None\n out_label_ids = None\n\n example_ids = []\n gold_labels = []\n\n for batch in tqdm(eval_dataloader, desc=\"Evaluating\", mininterval=10, ncols=100):\n model.eval()\n batch = tuple(t.to(args.device) for t in batch)\n\n with torch.no_grad():\n inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"labels\": batch[3]}\n if args.model_type != \"distilbert\":\n inputs[\"token_type_ids\"] = (\n batch[2] if args.model_type in [\"bert\", \"xlnet\", \"albert\"] else None\n ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids\n outputs = model(**inputs)\n tmp_eval_loss, logits = outputs[:2]\n\n eval_loss += tmp_eval_loss.mean().item()\n example_ids += batch[4].tolist()\n gold_labels += batch[3].tolist()\n nb_eval_steps += 1\n if preds is None:\n preds = logits.detach().cpu().numpy()\n out_label_ids = inputs[\"labels\"].detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(\n out_label_ids, inputs[\"labels\"].detach().cpu().numpy(), axis=0)\n\n eval_loss = eval_loss / nb_eval_steps\n if args.output_mode == \"classification\":\n probs = torch.nn.functional.softmax(torch.Tensor(preds), dim=-1)\n if args.do_temperature:\n probs = torch.nn.functional.softmax(torch.Tensor(preds)/1.75, dim=-1)\n max_confidences = (torch.max(probs, dim=-1)[0]).tolist()\n preds = np.argmax(preds, axis=1) # Max of logit is the same as max of probability.\n elif args.output_mode == \"regression\":\n preds = np.squeeze(preds)\n\n result = compute_metrics(eval_task, preds, out_label_ids)\n # order: [E, N, C]\n results.update(result)\n\n\n\n output_eval_file = os.path.join(\n eval_output_dir, f\"eval_metrics_{eval_task}_{eval_split}_{prefix}.json\")\n logger.info(f\"***** {eval_task} {eval_split} results {prefix} *****\")\n for key in sorted(result.keys()):\n logger.info(f\"{eval_task} {eval_split} {prefix} {key} = {result[key]:.4f}\")\n with open(output_eval_file, \"a\") as writer:\n writer.write(json.dumps(results) + \"\\n\")\n\n # predictions\n all_predictions[eval_task] = []\n output_pred_file = os.path.join(\n eval_output_dir, f\"predictions_{eval_task}_{eval_split}_{prefix}.lst\")\n with open(output_pred_file, \"w\") as writer:\n logger.info(f\"***** Write {eval_task} {eval_split} predictions {prefix} *****\")\n for ex_id, pred, gold, max_conf, prob in zip(\n example_ids, preds, gold_labels, max_confidences, probs.tolist()):\n # print(pred, prob, gold);input()\n # print('gold_label', processors[args.task_name]().get_labels()[int(max(gold))])\n record = {\"guid\": ex_id,\n \"label\": processors[args.task_name]().get_labels()[pred],\n \"gold\": processors[args.task_name]().get_labels()[int(np.argmax(gold))],\n \"confidence\": max_conf,\n \"probabilities\": prob}\n all_predictions[eval_task].append(record)\n writer.write(json.dumps(record) + \"\\n\")\n\n # order: [E, N, C]\n\n combined_id = dict()\n for id in pair_id[0]:\n each_id= convert_string_to_unique_number(id)\n combined_id[each_id] = id\n\n ours_file = os.path.join(\n eval_output_dir, f\"ours_{eval_task}_{eval_split}_{prefix}.json\")\n result_dict=dict()\n\n result_dict['ours']=dict()\n js_divergence_list = []\n prediction_entropy_list=[]\n kl_divergence_list = []\n new_js_divergence_list=[]\n new_js_divergence_list_2 = []\n\n\n with open(ours_file, \"w\") as writer:\n logger.info(f\"***** Write ours {eval_task} {eval_split} predictions {prefix} *****\")\n for ex_id, pred, gold, max_conf, prob in zip(\n example_ids, preds, gold_labels, max_confidences, probs.tolist()):\n\n # print(pred, prob, gold);input()\n if ex_id in list(combined_id.keys()):\n ex_idvalue = combined_id[ex_id]\n else:\n ex_idvalue ='000000'\n\n # ex_idvalue =combined_id[ex_id]\n result_dict['ours'][ex_idvalue]= {\"uid\": ex_idvalue,\n \"predicted_probabilities\": prob,\n \"predicted_label\": processors[args.task_name]().get_labels()[pred]}\n\n gold_dist = gold\n\n\n cur_js_divergence = distance.jensenshannon(gold_dist, prob)\n if np.isnan(cur_js_divergence):\n print(\"JS for this example is `nan', we will set JS to 0 for the current example. \"\n \"This can be a potential error.\",\n \"Gold distribution:\", gold_dist,\n \"Model distribution:\", prob,)\n # \"UID:\", ex_idvalue)\n cur_js_divergence = 0 # set error to 0.\n else:\n pass\n\n new_cur_js_divergence=JSD(np.array(prob, dtype=float), np.array(gold_dist, dtype=float))\n\n new_cur_js_divergence_2 = JSD_2(prob, gold_dist)\n\n\n js_divergence_list.append(cur_js_divergence)\n new_js_divergence_list.append(new_cur_js_divergence)\n new_js_divergence_list_2.append(new_cur_js_divergence_2)\n\n # cur_kl_divergence = entropy(gold_dist, prob)\n\n prediction_entropy = entropy(prob)\n prediction_entropy_list.append(prediction_entropy)\n\n # print(prob, gold_dist);input()\n cur_kl_divergence = torch.nn.functional.kl_div(torch.log(torch.from_numpy(np.array(prob, dtype=float))), torch.from_numpy(np.array(gold_dist, dtype=float))).numpy() - 0\n kl_divergence_list.append(cur_kl_divergence)\n\n writer.write(json.dumps(result_dict) + \"\\n\")\n\n avg_js_div = np.mean(js_divergence_list)\n new_avg_js_div= np.mean(new_js_divergence_list)\n new_avg_js_div_2 = np.mean(new_js_divergence_list_2)\n avg_kl_div = np.mean(kl_divergence_list)\n avg_entropy=np.mean(prediction_entropy_list)\n\n logger.info(f\"***** JS {eval_task} {eval_split} {prefix} = {avg_js_div:.4f}\")\n logger.info(f\"***** entropy JS {eval_task} {eval_split} {prefix} = {new_avg_js_div:.4f}\")\n logger.info(f\"***** kl JS {eval_task} {eval_split} {prefix} = {new_avg_js_div_2:.4f}\")\n logger.info(f\"***** KL {eval_task} {eval_split} {prefix} = {avg_kl_div:.4f}\")\n logger.info(f\"***** Prediction Entropy {eval_task} {eval_split} {prefix} = {avg_entropy:.4f}\")\n return results, all_predictions\n\n\ndef load_dataset(args, task, eval_split=\"train\"):\n processor = processors[task]()\n if eval_split == \"train\":\n if args.train is None:\n examples = processor.get_train_examples(args.data_dir)\n else:\n examples = processor.get_examples(args.train, \"train\")\n\n elif \"finetune\" in eval_split:\n if args.finetune is None:\n examples = processor.get_finetune_examples(args.data_dir)\n else:\n examples = processor.get_examples(args.finetune, \"finetune\")\n\n elif \"dev\" in eval_split:\n if args.dev is None:\n examples = processor.get_dev_examples(args.data_dir)\n else:\n examples = processor.get_examples(args.dev, \"dev\")\n elif \"test\" in eval_split:\n if args.test is None:\n examples = processor.get_test_examples(args.data_dir)\n else:\n examples = processor.get_examples(args.test, \"test\")\n else:\n raise ValueError(f\"eval_split should be train / dev / test, but was given {eval_split}\")\n\n return examples\n\n\ndef get_winogrande_tensors(features):\n def select_field(features, field):\n return [[choice[field] for choice in feature.choices_features] for feature in features]\n\n # Convert to Tensors and build dataset\n input_ids = torch.tensor(select_field(features, \"input_ids\"), dtype=torch.long)\n input_mask = torch.tensor(select_field(features, \"input_mask\"), dtype=torch.long)\n segment_ids = torch.tensor(select_field(features, \"segment_ids\"), dtype=torch.long)\n label_ids = torch.tensor([f.label for f in features], dtype=torch.long)\n example_ids = torch.tensor([f.example_id for f in features], dtype=torch.long)\n\n dataset = TensorDataset(input_ids, input_mask, segment_ids, label_ids, example_ids)\n return dataset\n\n\ndef load_and_cache_examples(args, task, tokenizer, evaluate=False, data_split=\"train\"):\n if args.local_rank not in [-1, 0] and not evaluate:\n # Make sure only the first process in distributed training process the dataset,\n # and the others will use the cache\n torch.distributed.barrier()\n\n processor = processors[task]()\n output_mode = output_modes[task]\n\n if not os.path.exists(args.features_cache_dir):\n os.makedirs(args.features_cache_dir)\n cached_features_file = os.path.join(\n args.features_cache_dir,\n \"cached_{}_{}_{}_{}\".format(\n data_split,\n list(filter(None, args.model_name_or_path.split(\"/\"))).pop(),\n str(args.max_seq_length),\n str(task),\n ),\n )\n # Load data features from cache or dataset file\n if os.path.exists(cached_features_file) and not args.overwrite_cache:\n logger.info(\"Loading features from cached file %s\", cached_features_file)\n features = torch.load(cached_features_file)\n # original_id = torch.load(cached_id_file)\n\n\n else:\n logger.info(\"Creating features from dataset file at %s\", args.data_dir)\n label_list = processor.get_labels()\n print('label_list', label_list)\n if task in [\"mnli\", \"mnli-mm\"] and args.model_type in [\"roberta\", \"xlmroberta\"]:\n # HACK(label indices are swapped in RoBERTa pretrained model)\n label_list[1], label_list[2] = label_list[2], label_list[1]\n examples = load_dataset(args, task, data_split)\n original_id = []\n if task == \"winogrande\":\n features = convert_mc_examples_to_features(\n examples,\n label_list,\n args.max_seq_length,\n tokenizer,\n pad_on_left=bool(args.model_type in [\"xlnet\"]), # pad on the left for xlnet\n pad_token=tokenizer.pad_token_id,\n pad_token_segment_id=tokenizer.pad_token_type_id,)\n else:\n features, or_id = convert_examples_to_features(\n examples,\n tokenizer,\n label_list=label_list,\n max_length=args.max_seq_length,\n output_mode=output_mode,\n pad_on_left=bool(args.model_type in [\"xlnet\"]), # pad on the left for xlnet\n pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],\n pad_token_segment_id=4 if args.model_type in [\"xlnet\"] else 0,)\n original_id.append(or_id)\n print('len_1', len(features))\n if args.local_rank in [-1, 0]:\n logger.info(\"Saving features into cached file %s\", cached_features_file)\n # logger.info(\"Saving original_id into cached file %s\", cached_id_file)\n torch.save(features, cached_features_file)\n # torch.save(original_id, cached_id_file)\n\n if args.local_rank == 0 and not evaluate:\n # Make sure only the first process in distributed training\n # process the dataset, and the others will use the cache\n torch.distributed.barrier()\n\n if task == \"winogrande\":\n return get_winogrande_tensors(features)\n\n # Convert to Tensors and build dataset\n\n if args.do_train:\n if args.setting =='1':\n\n features = features\n print('setting_1')\n if args.setting == '2':\n features=features\n print('setting_2')\n if args.setting == '3':\n features=features\n print('setting_3')\n if args.setting =='549k_2_1':\n sub_features = features\n new_features= random.sample(sub_features, 544368)\n features = new_features\n if args.setting =='549k_2_2':\n sub_features = features\n new_features= random.sample(sub_features, 544368)\n features = new_features\n if args.setting =='549k_2_3':\n sub_features = features\n new_features= random.sample(sub_features, 544368)\n features = new_features\n if args.setting =='549k_3_1':\n sub_features = features\n new_features= random.sample(sub_features, 539368)\n features = new_features\n\n\n print('len_2', len(features))\n # print('label', [item.label for item in features])\n\n\n\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)\n all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)\n all_example_ids = torch.tensor([f.example_id for f in features], dtype=torch.long)\n # print('example_id', all_example_ids)\n if output_mode == \"classification\":\n final_label=[]\n for f in features:\n if type(f.label)==list:\n n_0 = 0\n n_1 = 0\n n_2 = 0\n for i in f.label:\n if i==0:\n n_0=n_0+1\n if i==1:\n n_1=n_1+1\n if i==2:\n n_2=n_2+1\n final_label.append([n_0/10, n_1/10, n_2/10])\n else:\n if f.label == 0:\n label = [1, 0, 0]\n\n if f.label == 1:\n label = [0, 1, 0]\n\n if f.label == 2:\n label = [0, 0, 1]\n\n final_label.append(label)\n all_labels = torch.tensor([item for item in final_label], dtype=torch.float)\n\n elif output_mode == \"regression\":\n all_labels = torch.tensor([f.label for f in features], dtype=torch.float)\n\n dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels, all_example_ids)\n return dataset #, original_id\n\ndef finetune_load_and_cache_examples(args, task, tokenizer, evaluate=False, data_split=\"finetune\"):\n if args.local_rank not in [-1, 0] and not evaluate:\n # Make sure only the first process in distributed training process the dataset,\n # and the others will use the cache\n torch.distributed.barrier()\n\n processor = processors[task]()\n output_mode = output_modes[task]\n\n if not os.path.exists(args.features_cache_dir):\n os.makedirs(args.features_cache_dir)\n cached_features_file = os.path.join(\n args.features_cache_dir,\n \"cached_{}_{}_{}_{}\".format(\n data_split,\n list(filter(None, args.model_name_or_path.split(\"/\"))).pop(),\n str(args.max_seq_length),\n str(task),\n ),\n )\n # Load data features from cache or dataset file\n if os.path.exists(cached_features_file) and not args.overwrite_cache:\n logger.info(\"Loading features from cached file %s\", cached_features_file)\n features = torch.load(cached_features_file)\n # original_id = torch.load(cached_id_file)\n else:\n logger.info(\"Creating features from dataset file at %s\", args.data_dir)\n label_list = processor.get_labels()\n print('label_list', label_list)\n if task in [\"mnli\", \"mnli-mm\"] and args.model_type in [\"roberta\", \"xlmroberta\"]:\n # HACK(label indices are swapped in RoBERTa pretrained model)\n label_list[1], label_list[2] = label_list[2], label_list[1]\n examples = load_dataset(args, task, data_split)\n original_id = []\n if task == \"winogrande\":\n features = convert_mc_examples_to_features(\n examples,\n label_list,\n args.max_seq_length,\n tokenizer,\n pad_on_left=bool(args.model_type in [\"xlnet\"]), # pad on the left for xlnet\n pad_token=tokenizer.pad_token_id,\n pad_token_segment_id=tokenizer.pad_token_type_id,)\n else:\n features, or_id = convert_examples_to_features(\n examples,\n tokenizer,\n label_list=label_list,\n max_length=args.max_seq_length,\n output_mode=output_mode,\n pad_on_left=bool(args.model_type in [\"xlnet\"]), # pad on the left for xlnet\n pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],\n pad_token_segment_id=4 if args.model_type in [\"xlnet\"] else 0,)\n original_id.append(or_id)\n print('len_1', len(features))\n if args.local_rank in [-1, 0]:\n logger.info(\"Saving features into cached file %s\", cached_features_file)\n # logger.info(\"Saving original_id into cached file %s\", cached_id_file)\n torch.save(features, cached_features_file)\n # torch.save(original_id, cached_id_file)\n\n if args.local_rank == 0 and not evaluate:\n # Make sure only the first process in distributed training\n # process the dataset, and the others will use the cache\n torch.distributed.barrier()\n\n if task == \"winogrande\":\n return get_winogrande_tensors(features)\n\n # Convert to Tensors and build dataset\n\n print('finetune_features', len(features))\n\n\n\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)\n all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)\n all_example_ids = torch.tensor([f.example_id for f in features], dtype=torch.long)\n if output_mode == \"classification\":\n final_label=[]\n for f in features:\n if type(f.label)==list:\n n_0 = 0\n n_1 = 0\n n_2 = 0\n for i in f.label:\n if i==0:\n n_0=n_0+1\n if i==1:\n n_1=n_1+1\n if i==2:\n n_2=n_2+1\n final_label.append([n_0/10, n_1/10, n_2/10])\n else:\n if f.label==0:\n label=[1, 0, 0]\n if f.label==1:\n label=[0, 1, 0]\n if f.label==2:\n label=[0, 0, 1]\n final_label.append(label)\n all_labels = torch.tensor([item for item in final_label], dtype=torch.float)\n\n elif output_mode == \"regression\":\n all_labels = torch.tensor([f.label for f in features], dtype=torch.float)\n\n dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels, all_example_ids)\n return dataset #, original_id\n\n\ndef Convert(string):\n li = list(string.split(\" \"))\n return li\n\n\n\ndef build_vocab(examples):\n vocab = {}\n def add_to_vocab(word_list):\n for word in word_list:\n if word not in vocab:\n vocab[word] = len(vocab)\n for i in range(len(examples)):\n add_to_vocab(Convert(examples[i].text_a))\n if examples[i].text_b:\n add_to_vocab(Convert(examples[i].text_b))\n return vocab\n\n\ndef lp_finetune_load_and_cache_examples(args, task, tokenizer, label_flag, evaluate=False, data_split=\"train\"):\n if args.local_rank not in [-1, 0] and not evaluate:\n # Make sure only the first process in distributed training process the dataset,\n # and the others will use the cache\n torch.distributed.barrier()\n\n processor = processors[task]()\n output_mode = output_modes[task]\n\n if not os.path.exists(args.features_cache_dir):\n os.makedirs(args.features_cache_dir)\n cached_features_file = os.path.join(\n args.features_cache_dir,\n \"cached_{}_{}_{}_{}\".format(\n data_split,\n list(filter(None, args.model_name_or_path.split(\"/\"))).pop(),\n str(args.max_seq_length),\n str(task),\n ),\n )\n # Load data features from cache or dataset file\n if os.path.exists(cached_features_file) and not args.overwrite_cache:\n logger.info(\"Loading features from cached file %s\", cached_features_file)\n features = torch.load(cached_features_file)\n # original_id = torch.load(cached_id_file)\n else:\n logger.info(\"Creating features from dataset file at %s\", args.data_dir)\n label_list = processor.get_labels()\n print('label_list', label_list)\n if task in [\"mnli\", \"mnli-mm\"] and args.model_type in [\"roberta\", \"xlmroberta\"]:\n # HACK(label indices are swapped in RoBERTa pretrained model)\n label_list[1], label_list[2] = label_list[2], label_list[1]\n\n\n #label propagation\n\n if label_flag=='single_aug_label':\n examples_beg = load_dataset(args, task, data_split)\n\n data_stats = word_level_augment.get_data_stats(examples_beg)\n\n aug_ops = \"tf_idf-0.18\"\n\n word_vocab = build_vocab(examples_beg)\n\n examples_aug = word_level_augment.word_level_augment(\n examples_beg, aug_ops, word_vocab, data_stats)\n\n for i in examples_aug:\n listToStr_a = ' '.join([str(elem) for elem in i.text_a])\n listToStr_b = ' '.join([str(elem) for elem in i.text_b])\n i.text_a = listToStr_a\n i.text_b =listToStr_b\n if label_flag =='single_label':\n original_examples = load_dataset(args, task, data_split)\n\n\n # import pdb\n # pdb.set_trace()\n\n\n original_id = []\n if task == \"winogrande\":\n examples =original_examples\n features = convert_mc_examples_to_features(\n examples,\n label_list,\n args.max_seq_length,\n tokenizer,\n pad_on_left=bool(args.model_type in [\"xlnet\"]), # pad on the left for xlnet\n pad_token=tokenizer.pad_token_id,\n pad_token_segment_id=tokenizer.pad_token_type_id,)\n else:\n if label_flag =='single_label':\n examples = original_examples\n # print('single', examples[0])\n features, or_id = convert_examples_to_features(\n examples,\n tokenizer,\n label_list=label_list,\n max_length=args.max_seq_length,\n output_mode=output_mode,\n pad_on_left=bool(args.model_type in [\"xlnet\"]), # pad on the left for xlnet\n pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],\n pad_token_segment_id=4 if args.model_type in [\"xlnet\"] else 0,)\n original_id.append(or_id)\n print('len_1', len(features))\n if label_flag =='single_aug_label':\n examples = examples_aug\n # print('aug', examples[0])\n features, or_id = convert_examples_to_features(\n examples,\n tokenizer,\n label_list=label_list,\n max_length=args.max_seq_length,\n output_mode=output_mode,\n pad_on_left=bool(args.model_type in [\"xlnet\"]), # pad on the left for xlnet\n pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],\n pad_token_segment_id=4 if args.model_type in [\"xlnet\"] else 0, )\n original_id.append(or_id)\n print('len_1', len(features))\n if args.local_rank in [-1, 0]:\n logger.info(\"Saving features into cached file %s\", cached_features_file)\n # logger.info(\"Saving original_id into cached file %s\", cached_id_file)\n torch.save(features, cached_features_file)\n # torch.save(original_id, cached_id_file)\n\n if args.local_rank == 0 and not evaluate:\n # Make sure only the first process in distributed training\n # process the dataset, and the others will use the cache\n torch.distributed.barrier()\n\n if task == \"winogrande\":\n return get_winogrande_tensors(features)\n\n # Convert to Tensors and build dataset\n\n print('finetune_features', len(features))\n\n\n\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)\n all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)\n all_example_ids = torch.tensor([f.example_id for f in features], dtype=torch.long)\n # print('example_id', all_example_ids)\n if output_mode == \"classification\":\n final_label=[]\n for f in features:\n if type(f.label)==list:\n n_0 = 0\n n_1 = 0\n n_2 = 0\n for i in f.label:\n if i==0:\n n_0=n_0+1\n if i==1:\n n_1=n_1+1\n if i==2:\n n_2=n_2+1\n final_label.append([n_0/10, n_1/10, n_2/10])\n else:\n if f.label==0:\n label=[1, 0, 0]\n if f.label==1:\n label=[0, 1, 0]\n if f.label==2:\n label=[0, 0, 1]\n final_label.append(label)\n all_labels = torch.tensor([item for item in final_label], dtype=torch.float)\n # print('final_label', final_label)\n # print('train_all_labels', all_labels)\n elif output_mode == \"regression\":\n all_labels = torch.tensor([f.label for f in features], dtype=torch.float)\n\n dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels, all_example_ids)\n return dataset #, original_id\n\n\ndef eval_load_and_cache_examples(args, task, tokenizer, evaluate=True, data_split=f\"dev_\"\"\"):\n if args.local_rank not in [-1, 0] and not evaluate:\n # Make sure only the first process in distributed training process the dataset,\n # and the others will use the cache\n torch.distributed.barrier()\n\n processor = processors[task]()\n output_mode = output_modes[task]\n\n if not os.path.exists(args.features_cache_dir):\n os.makedirs(args.features_cache_dir)\n cached_features_file = os.path.join(\n args.features_cache_dir,\n \"cached_{}_{}_{}_{}\".format(\n data_split,\n list(filter(None, args.model_name_or_path.split(\"/\"))).pop(),\n str(args.max_seq_length),\n str(task),\n ),\n )\n cached_id_file = os.path.join(\n args.features_cache_dir,\n \"id_cached_{}_{}_{}_{}\".format(\n data_split,\n list(filter(None, args.model_name_or_path.split(\"/\"))).pop(),\n str(args.max_seq_length),\n str(task),\n ),\n )\n\n cached_eval_data_file = os.path.join(\n args.features_cache_dir,\n \"eval_data_cached_{}_{}_{}_{}\".format(\n data_split,\n list(filter(None, args.model_name_or_path.split(\"/\"))).pop(),\n str(args.max_seq_length),\n str(task),\n ),\n )\n\n # Load data features from cache or dataset file\n if os.path.exists(cached_eval_data_file):\n logger.info(\"Loading features from cached file %s\", cached_eval_data_file)\n features = torch.load(cached_eval_data_file)\n original_id = torch.load(cached_id_file)\n else:\n logger.info(\"Creating features from dataset file at %s\", args.data_dir)\n label_list = processor.get_labels()\n if task in [\"mnli\", \"mnli-mm\"] and args.model_type in [\"roberta\", \"xlmroberta\"]:\n # HACK(label indices are swapped in RoBERTa pretrained model)\n label_list[1], label_list[2] = label_list[2], label_list[1]\n examples = load_dataset(args, task, data_split)\n original_id = []\n if task == \"winogrande\":\n features = convert_mc_examples_to_features(\n examples,\n label_list,\n args.max_seq_length,\n tokenizer,\n pad_on_left=bool(args.model_type in [\"xlnet\"]), # pad on the left for xlnet\n pad_token=tokenizer.pad_token_id,\n pad_token_segment_id=tokenizer.pad_token_type_id,)\n else:\n features, or_id = convert_examples_to_features(\n examples,\n tokenizer,\n label_list=label_list,\n max_length=args.max_seq_length,\n output_mode=output_mode,\n pad_on_left=bool(args.model_type in [\"xlnet\"]), # pad on the left for xlnet\n pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],\n pad_token_segment_id=4 if args.model_type in [\"xlnet\"] else 0,)\n original_id.append(or_id)\n\n\n logger.info(\"***********Create New Feautrs****************************************\")\n print('creating_eval_len_new_features', len(features))\n\n\n if args.local_rank in [-1, 0]:\n logger.info(\"Saving features into cached file %s\", cached_features_file)\n logger.info(\"Saving original_id into cached file %s\", cached_id_file)\n logger.info(\"Saving eval data into cached file %s\", cached_eval_data_file)\n torch.save(features, cached_features_file)\n torch.save(original_id, cached_id_file)\n torch.save(features, cached_eval_data_file)\n\n if args.local_rank == 0 and not evaluate:\n # Make sure only the first process in distributed training\n # process the dataset, and the others will use the cache\n torch.distributed.barrier()\n\n if task == \"winogrande\":\n return get_winogrande_tensors(features)\n\n\n print('eval_features', len(features))\n\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)\n all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)\n all_example_ids = torch.tensor([f.example_id for f in features], dtype=torch.long)\n if output_mode == \"classification\":\n final_label = []\n for f in features:\n if type(f.label) == list:\n n_0 = 0\n n_1 = 0\n n_2 = 0\n for i in f.label:\n if i == 0:\n n_0 = n_0 + 1\n if i == 1:\n n_1 = n_1 + 1\n if i == 2:\n n_2 = n_2 + 1\n final_label.append([n_0 / 10, n_1 / 10, n_2 / 10])\n else:\n if f.label == 0:\n label = [1, 0, 0]\n if f.label == 1:\n label = [0, 1, 0]\n if f.label == 2:\n label = [0, 0, 1]\n final_label.append(label)\n all_labels = torch.tensor([item for item in final_label], dtype=torch.float)\n\n elif output_mode == \"regression\":\n all_labels = torch.tensor([f.label for f in features], dtype=torch.float)\n\n dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels, all_example_ids)\n return dataset, original_id\n\n\n\ndef run_transformer(args):\n if (os.path.exists(args.output_dir)\n and os.listdir(args.output_dir)\n and args.do_train\n and not args.overwrite_output_dir):\n raise ValueError(\n f\"Output directory ({args.output_dir}) already exists and is not empty.\"\n f\" Use --overwrite_output_dir to overcome.\")\n\n # Setup distant debugging if needed\n if args.server_ip and args.server_port:\n # Distant debugging - see\n # https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n import ptvsd\n\n logger.info(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n # Setup CUDA, GPU & distributed training\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = torch.cuda.device_count()\n else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend=\"nccl\")\n args.n_gpu = 1\n args.device = device\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,)\n logger.warning(\n \"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\n args.local_rank,\n device,\n args.n_gpu,\n bool(args.local_rank != -1),\n args.fp16,)\n\n # Set seed\n set_seed(args)\n\n # Prepare GLUE task\n args.task_name = args.task_name.lower()\n if args.task_name not in processors:\n raise ValueError(\"Task not found: %s\" % (args.task_name))\n processor = processors[args.task_name]()\n args.output_mode = output_modes[args.task_name]\n label_list = processor.get_labels()\n num_labels = len(label_list)\n\n # Load pretrained model and tokenizer\n if args.local_rank not in [-1, 0]:\n # Make sure only the first process in distributed training will download model & vocab\n torch.distributed.barrier()\n\n args.model_type = args.model_type.lower()\n config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]\n config = config_class.from_pretrained(\n args.config_name if args.config_name else args.model_name_or_path,\n num_labels=num_labels,\n finetuning_task=args.task_name,\n cache_dir=args.cache_dir if args.cache_dir else None,)\n tokenizer = tokenizer_class.from_pretrained(\n args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,\n do_lower_case=args.do_lower_case,\n cache_dir=args.cache_dir if args.cache_dir else None,)\n model = model_class.from_pretrained(\n args.model_name_or_path,\n from_tf=bool(\".ckpt\" in args.model_name_or_path),\n config=config,\n cache_dir=args.cache_dir if args.cache_dir else None,)\n\n if args.local_rank == 0:\n # Make sure only the first process in distributed training will download model & vocab\n torch.distributed.barrier()\n\n model.to(args.device)\n\n logger.info(\"Training/evaluation parameters %s\", args)\n\n # Training\n args.learning_rate = float(args.learning_rate)\n\n if args.setting !='1' and args.do_finetune:\n args.finetune_learning_rate = float(args.finetune_learning_rate)\n if args.do_train:\n # If training for the first time, remove cache. If training from a checkpoint, keep cache.\n if os.path.exists(args.features_cache_dir) and not args.overwrite_output_dir:\n logger.info(f\"Found existing cache for the same seed {args.seed}: \"\n f\"{args.features_cache_dir}...Deleting!\")\n shutil.rmtree(args.features_cache_dir)\n\n # Create output directory if needed\n if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(args.output_dir)\n save_args_to_file(args, mode=\"train\")\n\n train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)\n\n print('len_train_dataset', len(train_dataset))\n\n flag_in_training ='train'\n global_step, tr_loss = train(args, train_dataset, model, tokenizer, flag_in_training)\n logger.info(f\" global_step = {global_step}, average loss = {tr_loss:.4f}\")\n\n\n\n #Finetune small dataset\n if args.setting !='1' and args.do_finetune:\n finetune_dataset=finetune_load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)\n flag_in_training = 'finetune'\n if args.label_propagation:\n label_flag='single_label'\n single_dataset = lp_finetune_load_and_cache_examples(args, args.task_name, tokenizer, label_flag, evaluate=False)\n\n label_flag = 'single_aug_label'\n single_aug_dataset=lp_finetune_load_and_cache_examples(args, args.task_name, tokenizer, label_flag, evaluate=False)\n\n global_step, tr_loss = lp_train(args, finetune_dataset, single_dataset, single_aug_dataset,\n model, tokenizer, flag_in_training)\n logger.info(f\" global_step = {global_step}, average loss = {tr_loss:.4f}\")\n else:\n global_step, tr_loss = train(args, finetune_dataset,\n model, tokenizer, flag_in_training)\n logger.info(f\" global_step = {global_step}, average loss = {tr_loss:.4f}\")\n\n\n # Saving best-practices: if you use defaults names for the model,\n # you can reload it using from_pretrained()\n if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):\n\n if not args.evaluate_during_training:\n logger.info(\"Saving model checkpoint to %s\", args.output_dir)\n # Save a trained model, configuration and tokenizer using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n\n # Take care of distributed/parallel training\n model_to_save = (model.module if hasattr(model, \"module\") else model)\n model_to_save.save_pretrained(args.output_dir)\n tokenizer.save_pretrained(args.output_dir)\n\n # Good practice: save your training arguments together with the trained model\n torch.save(args, os.path.join(args.output_dir, \"training_args.bin\"))\n\n logger.info(\" **** Done with training ****\")\n\n # Evaluation\n eval_splits = []\n if args.do_eval:\n eval_splits.append(\"dev\")\n if args.do_test:\n eval_splits.append(\"test\")\n\n if args.do_test or args.do_eval and args.local_rank in [-1, 0]:\n\n tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)\n checkpoints = [args.output_dir]\n if args.eval_all_checkpoints:\n checkpoints = list(\n os.path.dirname(c) for c in sorted(\n glob.glob(args.output_dir + \"/**/\" + WEIGHTS_NAME, recursive=True))\n )\n logging.getLogger(\"transformers.modeling_utils\").setLevel(logging.WARN) # Reduce logging\n logger.info(\"Evaluate the following checkpoints: %s\", checkpoints)\n results = {}\n prefix = args.test.split(\"/\")[-1].split(\".tsv\")[0] if args.test else \"\"\n for checkpoint in checkpoints:\n global_step = checkpoint.split(\"-\")[-1] if len(checkpoints) > 1 else \"\"\n prefix += checkpoint.split(\"/\")[-1] if checkpoint.find(\"checkpoint\") != -1 else \"\"\n\n model = model_class.from_pretrained(checkpoint)\n model.to(args.device)\n for eval_split in eval_splits:\n save_args_to_file(args, mode=eval_split)\n result, predictions = evaluate(args, model, tokenizer, prefix=prefix, eval_split=eval_split)\n result = dict((k + f\"_{global_step}\", v) for k, v in result.items())\n results.update(result)\n\n if args.test and \"diagnostic\" in args.test:\n # For running diagnostics with MNLI, run as SNLI and use hack.\n evaluate_by_category(predictions[args.task_name],\n mnli_hack=True if args.task_name in [\"SNLI\", \"snli\"] and \"mnli\" in args.output_dir else False,\n eval_filename=os.path.join(args.output_dir, f\"eval_metrics_diagnostics.json\"),\n diagnostics_file_carto=args.test)\n logger.info(\" **** Done ****\")\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--config\",\n \"-c\",\n type=os.path.abspath,\n required=True,\n help=\"Main config file with basic arguments.\")\n parser.add_argument(\"--output_dir\",\n \"-o\",\n type=os.path.abspath,\n required=True,\n help=\"Output directory for model.\")\n parser.add_argument(\"--do_train\",\n action=\"store_true\",\n help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\",\n action=\"store_true\",\n help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\"--do_test\",\n action=\"store_true\",\n help=\"Whether to run eval on the (OOD) test set.\")\n parser.add_argument(\"--do_finetune\",\n action=\"store_true\",\n help=\"Whether to finetune.\")\n\n parser.add_argument(\"--label_propagation\",\n action=\"store_true\",\n help=\"Whether to label propagation.\")\n\n parser.add_argument('--ft_num_train_epochs', type=float, help=\"finetuning epochs\")\n\n # parser.add_argument(\"--model_name_or_path\",\n # type=os.path.abspath,\n # required=True,\n # help=\"Model Chekpoints\")\n\n parser.add_argument(\"--do_temperature\",\n action=\"store_true\",\n help=\"Whether to temperature scaling.\")\n\n parser.add_argument(\"--do_train_label_smooth\",\n action=\"store_true\",\n help=\"Whether to do train label smoothing.\")\n\n parser.add_argument(\"--overwrite_output_dir\",\n action=\"store_true\",\n help=\"Whether to overwrite the previous output.\")\n parser.add_argument(\"--overwrite_cache\",\n action=\"store_true\",\n help=\"Whether to overwrite the previous dqta cache.\")\n\n parser.add_argument(\"--use_existing_eval_data\",\n action=\"store_true\",\n help=\"Whether to use the existing eval data to eval.\")\n\n parser.add_argument('--setting', type=str, help=\"Different setting\")\n\n parser.add_argument(\"--test\",\n type=os.path.abspath,\n help=\"OOD test set.\")\n\n # TODO(SS): Automatically map tasks to OOD test sets.\n\n args_from_cli = parser.parse_args()\n\n other_args = json.loads(_jsonnet.evaluate_file(args_from_cli.config))\n other_args.update(**vars(args_from_cli))\n args = Params(MODEL_CLASSES, ALL_MODELS, processors, other_args)\n run_transformer(args)\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"scipy.stats.entropy.size",
"torch.max",
"torch.load",
"numpy.squeeze",
"torch.utils.data.DataLoader",
"numpy.mean",
"torch.no_grad",
"torch.cuda.manual_seed_all",
"torch.cuda.is_available",
"torch.device",
"torch.distributed.get_rank",
"numpy.zeros_like",
"torch.save",
"numpy.ones_like",
"torch.distributed.init_process_group",
"torch.utils.data.TensorDataset",
"torch.from_numpy",
"torch.distributed.barrier",
"torch.tensor",
"scipy.spatial.distance.jensenshannon",
"numpy.argmax",
"numpy.isnan",
"torch.cuda.device_count",
"numpy.array",
"torch.distributed.get_world_size",
"numpy.sum",
"torch.nn.parallel.DistributedDataParallel",
"scipy.stats.entropy.detach",
"torch.nn.KLDivLoss",
"numpy.random.seed",
"torch.cuda.set_device",
"torch.Tensor",
"torch.manual_seed",
"torch.utils.data.SequentialSampler",
"scipy.stats.entropy",
"torch.nn.DataParallel"
]
] |
ajd98/shap
|
[
"922fa0fe9f198011edd875289fc68b906ff9f2b8"
] |
[
"shap/explainers/kernel.py"
] |
[
"from iml.common import convert_to_instance, convert_to_model, match_instance_to_data, match_model_to_data, convert_to_instance_with_index\nfrom iml.explanations import AdditiveExplanation\nfrom iml.links import convert_to_link, IdentityLink\nfrom iml.datatypes import convert_to_data, DenseData\nfrom scipy.special import binom\nimport numpy as np\nimport pandas as pd\nimport logging\nimport copy\nimport itertools\nfrom sklearn.linear_model import LassoLarsIC, Lasso\nfrom sklearn.cluster import KMeans\nfrom tqdm import tqdm\n\nlog = logging.getLogger('shap')\n\n\ndef kmeans(X, k, round_values=True):\n \"\"\" Summarize a dataset with k mean samples weighted by the number of data points they\n each represent.\n\n Parameters\n ----------\n X : numpy.array or pandas.DataFrame\n Matrix of data samples to summarize (# samples x # features)\n\n k : int\n Number of means to use for approximation.\n\n round_values : bool\n For all i, round the ith dimension of each mean sample to match the nearest value\n from X[:,i]. This ensures discrete features always get a valid value.\n\n Returns\n -------\n DenseData object.\n \"\"\"\n\n group_names = [str(i) for i in range(X.shape[1])]\n if str(type(X)).endswith(\"'pandas.core.frame.DataFrame'>\"):\n group_names = X.columns\n X = X.values\n kmeans = KMeans(n_clusters=k, random_state=0).fit(X)\n\n if round_values:\n for i in range(k):\n for j in range(X.shape[1]):\n ind = np.argmin(np.abs(X[:,j] - kmeans.cluster_centers_[i,j]))\n kmeans.cluster_centers_[i,j] = X[ind,j]\n\n return DenseData(kmeans.cluster_centers_, group_names, None, 1.0*np.bincount(kmeans.labels_))\n\n\nclass KernelExplainer(object):\n \"\"\"Uses the Kernel SHAP method to explain the output of any function.\n\n Kernel SHAP is a method that uses a special weighted linear regression\n to compute the importance of each feature. The computed importance values\n are Shapley values from game theory and also coefficents from a local linear\n regression.\n\n\n Parameters\n ----------\n model : function or iml.Model\n User supplied function that takes a matrix of samples (# samples x # features) and\n computes a the output of the model for those samples. The output can be a vector\n (# samples) or a matrix (# samples x # model outputs).\n\n data : numpy.array or pandas.DataFrame or iml.DenseData\n The background dataset to use for integrating out features. To determine the impact\n of a feature, that feature is set to \"missing\" and the change in the model output\n is observed. Since most models aren't designed to handle arbitrary missing data at test\n time, we simulate \"missing\" by replacing the feature with the values it takes in the\n background dataset. So if the background dataset is a simple sample of all zeros, then\n we would approximate a feature being missing by setting it to zero. For small problems\n this background datset can be the whole training set, but for larger problems consider\n using a single reference value or using the kmeans function to summarize the dataset.\n\n link : \"identity\" or \"logit\"\n A generalized linear model link to connect the feature importance values to the model\n output. Since the feature importance values, phi, sum up to the model output, it often makes\n sense to connect them to the ouput with a link function where link(outout) = sum(phi).\n If the model output is a probability then the LogitLink link function makes the feature\n importance values have log-odds units.\n \"\"\"\n\n def __init__(self, model, data, link=IdentityLink(), **kwargs):\n\n # convert incoming inputs to standardized iml objects\n self.link = convert_to_link(link)\n self.model = convert_to_model(model)\n self.keep_index = kwargs.get(\"keep_index\", False)\n self.data = convert_to_data(data, keep_index=self.keep_index)\n match_model_to_data(self.model, self.data)\n\n # enforce our current input type limitations\n assert isinstance(self.data, DenseData), \"Shap explainer only supports the DenseData input currently.\"\n assert not self.data.transposed, \"Shap explainer does not support transposed DenseData currently.\"\n\n # warn users about large background data sets\n if len(self.data.weights) > 100:\n log.warning(\"Using \" + str(len(self.data.weights)) + \" background data samples could cause \" +\n \"slower run times. Consider using shap.kmeans(data, K) to summarize the background \" +\n \"as K weighted samples.\")\n\n # init our parameters\n self.N = self.data.data.shape[0]\n self.P = self.data.data.shape[1]\n self.linkfv = np.vectorize(self.link.f)\n self.nsamplesAdded = 0\n self.nsamplesRun = 0\n\n # find E_x[f(x)]\n if self.keep_index:\n model_null = self.model.f(self.data.convert_to_df())\n else:\n model_null = self.model.f(self.data.data)\n if isinstance(model_null, (pd.DataFrame, pd.Series)):\n model_null = model_null.values\n self.fnull = np.sum((model_null.T * self.data.weights).T, 0)\n\n # see if we have a vector output\n self.vector_out = True\n if len(self.fnull.shape) == 0:\n self.vector_out = False\n self.fnull = np.array([self.fnull])\n self.D = 1\n else:\n self.D = self.fnull.shape[0]\n\n def shap_values(self, X, **kwargs):\n \"\"\" Estimate the SHAP values for a set of samples.\n\n Parameters\n ----------\n X : numpy.array or pandas.DataFrame\n A matrix of samples (# samples x # features) on which to explain the model's output.\n\n nsamples : \"auto\" or int\n Number of times to re-evaluate the model when explaining each prediction. More samples\n lead to lower variance estimates of the SHAP values.\n\n l1_reg : \"auto\" or float\n The l1 regularization to use for feature selection (the estimation procedure is based on\n a debiased lasso). Set this to zero to remove the feature selection step before estimation.\n\n Returns\n -------\n For a models with a single output this returns a matrix of SHAP values\n (# samples x # features + 1). The last column is the base value of the model, which is\n the expected value of the model applied to the background dataset. This causes each row to\n sum to the model output for that sample. For models with vector outputs this returns a list\n of such matrices, one for each output.\n \"\"\"\n\n # convert dataframes\n if str(type(X)).endswith(\"pandas.core.series.Series'>\"):\n X = X.values\n elif str(type(X)).endswith(\"'pandas.core.frame.DataFrame'>\"):\n if self.keep_index:\n index_value = X.index.values\n index_name = X.index.name\n column_name = list(X.columns)\n X = X.values\n\n assert str(type(X)).endswith(\"'numpy.ndarray'>\"), \"Unknown instance type: \" + str(type(X))\n assert len(X.shape) == 1 or len(X.shape) == 2, \"Instance must have 1 or 2 dimensions!\"\n\n # single instance\n if len(X.shape) == 1:\n data = X.reshape((1, X.shape[0]))\n if self.keep_index:\n data = convert_to_instance_with_index(data, column_name, index_name, index_value)\n explanation = self.explain(data, **kwargs)\n\n # vector-output\n s = explanation.effects.shape\n if len(s) == 2:\n outs = [np.zeros(s[0] + 1) for j in range(s[1])]\n for j in range(s[1]):\n outs[j][:-1] = explanation.effects[:, j]\n outs[j][-1] = explanation.base_value[j]\n return outs\n\n # single-output\n else:\n out = np.zeros(s[0] + 1)\n out[:-1] = explanation.effects\n out[-1] = explanation.base_value\n return out\n\n # explain the whole dataset\n elif len(X.shape) == 2:\n explanations = []\n for i in tqdm(range(X.shape[0]), disable=kwargs.get(\"silent\", False)):\n data = X[i:i + 1, :]\n if self.keep_index:\n data = convert_to_instance_with_index(data, column_name, index_value[i:i + 1], index_name)\n explanations.append(self.explain(data, **kwargs))\n\n # vector-output\n s = explanations[0].effects.shape\n if len(s) == 2:\n outs = [np.zeros((X.shape[0], s[0] + 1)) for j in range(s[1])]\n for i in range(X.shape[0]):\n for j in range(s[1]):\n outs[j][i, :-1] = explanations[i].effects[:, j]\n outs[j][i, -1] = explanations[i].base_value[j]\n return outs\n\n # single-output\n else:\n out = np.zeros((X.shape[0], s[0] + 1))\n for i in range(X.shape[0]):\n out[i, :-1] = explanations[i].effects\n out[i, -1] = explanations[i].base_value\n return out\n\n def explain(self, incoming_instance, **kwargs):\n # convert incoming input to a standardized iml object\n instance = convert_to_instance(incoming_instance)\n match_instance_to_data(instance, self.data)\n\n # find the feature groups we will test. If a feature does not change from its\n # current value then we know it doesn't impact the model\n self.varyingInds = self.varying_groups(instance.x)\n self.varyingFeatureGroups = [self.data.groups[i] for i in self.varyingInds]\n self.M = len(self.varyingFeatureGroups)\n\n # find f(x)\n if self.keep_index:\n model_out = self.model.f(instance.convert_to_df())\n else:\n model_out = self.model.f(instance.x)\n if isinstance(model_out, (pd.DataFrame, pd.Series)):\n model_out = model_out.values[0]\n self.fx = model_out[0]\n\n if not self.vector_out:\n self.fx = np.array([self.fx])\n\n # if no features vary then there no feature has an effect\n if self.M == 0:\n phi = np.zeros((len(self.data.groups), self.D))\n phi_var = np.zeros((len(self.data.groups), self.D))\n\n # if only one feature varies then it has all the effect\n elif self.M == 1:\n phi = np.zeros((len(self.data.groups), self.D))\n phi_var = np.zeros((len(self.data.groups), self.D))\n diff = self.link.f(self.fx) - self.link.f(self.fnull)\n for d in range(self.D):\n phi[self.varyingInds[0],d] = diff[d]\n\n # if more than one feature varies then we have to do real work\n else:\n self.l1_reg = kwargs.get(\"l1_reg\", \"auto\")\n\n # pick a reasonable number of samples if the user didn't specify how many they wanted\n self.nsamples = kwargs.get(\"nsamples\", \"auto\")\n if self.nsamples == \"auto\":\n self.nsamples = 2 * self.M + 2**11\n\n # if we have enough samples to enumerate all subsets then ignore the unneeded samples\n self.max_samples = 2 ** 30\n if self.M <= 30:\n self.max_samples = 2 ** self.M - 2\n if self.nsamples > self.max_samples:\n self.nsamples = self.max_samples\n\n # reserve space for some of our computations\n self.allocate()\n\n # weight the different subset sizes\n num_subset_sizes = np.int(np.ceil((self.M - 1) / 2.0))\n num_paired_subset_sizes = np.int(np.floor((self.M - 1) / 2.0))\n weight_vector = np.array([(self.M - 1.0) / (i * (self.M - i)) for i in range(1, num_subset_sizes + 1)])\n weight_vector[:num_paired_subset_sizes] *= 2\n weight_vector /= np.sum(weight_vector)\n log.debug(\"weight_vector = {0}\".format(weight_vector))\n log.debug(\"num_subset_sizes = {0}\".format(num_subset_sizes))\n log.debug(\"num_paired_subset_sizes = {0}\".format(num_paired_subset_sizes))\n log.debug(\"M = {0}\".format(self.M))\n\n # fill out all the subset sizes we can completely enumerate\n # given nsamples*remaining_weight_vector[subset_size]\n num_full_subsets = 0\n num_samples_left = self.nsamples\n group_inds = np.arange(self.M, dtype='int64')\n mask = np.zeros(self.M)\n remaining_weight_vector = copy.copy(weight_vector)\n for subset_size in range(1, num_subset_sizes + 1):\n\n # determine how many subsets (and their complements) are of the current size\n nsubsets = binom(self.M, subset_size)\n if subset_size <= num_paired_subset_sizes: nsubsets *= 2\n log.debug(\"subset_size = {0}\".format(subset_size))\n log.debug(\"nsubsets = {0}\".format(nsubsets))\n log.debug(\"self.nsamples*weight_vector[subset_size-1] = {0}\".format(\n num_samples_left * remaining_weight_vector[subset_size - 1]))\n log.debug(\"self.nsamples*weight_vector[subset_size-1/nsubsets = {0}\".format(\n num_samples_left * remaining_weight_vector[subset_size - 1] / nsubsets))\n\n # see if we have enough samples to enumerate all subsets of this size\n if num_samples_left * remaining_weight_vector[subset_size - 1] / nsubsets >= 1.0 - 1e-8:\n num_full_subsets += 1\n num_samples_left -= nsubsets\n\n # rescale what's left of the remaining weight vector to sum to 1\n if remaining_weight_vector[subset_size - 1] < 1.0:\n remaining_weight_vector /= (1 - remaining_weight_vector[subset_size - 1])\n\n # add all the samples of the current subset size\n w = weight_vector[subset_size - 1] / binom(self.M, subset_size)\n if subset_size <= num_paired_subset_sizes: w /= 2.0\n for inds in itertools.combinations(group_inds, subset_size):\n mask[:] = 0.0\n mask[np.array(inds, dtype='int64')] = 1.0\n self.addsample(instance.x, mask, w)\n if subset_size <= num_paired_subset_sizes:\n mask[:] = np.abs(mask - 1)\n self.addsample(instance.x, mask, w)\n else:\n break\n log.info(\"num_full_subsets = {0}\".format(num_full_subsets))\n\n # add random samples from what is left of the subset space\n samples_left = self.nsamples - self.nsamplesAdded\n log.debug(\"samples_left = {0}\".format(samples_left))\n if num_full_subsets != num_subset_sizes:\n weight_left = np.sum(weight_vector[num_full_subsets:])\n rand_sample_weight = weight_left / samples_left\n log.info(\"weight_left = {0}\".format(weight_left))\n log.info(\"rand_sample_weight = {0}\".format(rand_sample_weight))\n remaining_weight_vector = weight_vector[num_full_subsets:]\n remaining_weight_vector /= np.sum(remaining_weight_vector)\n log.info(\"remaining_weight_vector = {0}\".format(remaining_weight_vector))\n log.info(\"num_paired_subset_sizes = {0}\".format(num_paired_subset_sizes))\n ind_set = np.arange(len(remaining_weight_vector))\n while samples_left > 0:\n mask[:] = 0.0\n np.random.shuffle(group_inds)\n ind = np.random.choice(ind_set, 1, p=remaining_weight_vector)[0]\n mask[group_inds[:ind + num_full_subsets + 1]] = 1.0\n samples_left -= 1\n self.addsample(instance.x, mask, rand_sample_weight)\n\n # add the compliment sample\n if samples_left > 0:\n mask -= 1.0\n mask[:] = np.abs(mask)\n self.addsample(instance.x, mask, rand_sample_weight)\n samples_left -= 1\n\n # execute the model on the synthetic samples we have created\n self.run()\n\n # solve then expand the feature importance (Shapley value) vector to contain the non-varying features\n phi = np.zeros((len(self.data.groups), self.D))\n phi_var = np.zeros((len(self.data.groups), self.D))\n for d in range(self.D):\n vphi, vphi_var = self.solve(self.nsamples / self.max_samples, d)\n phi[self.varyingInds, d] = vphi\n phi_var[self.varyingInds, d] = vphi_var\n\n if not self.vector_out:\n phi = np.squeeze(phi, axis=1)\n phi_var = np.squeeze(phi_var, axis=1)\n\n # return the Shapley values along with variances of the estimates\n # note that if features were eliminated by l1 regression their\n # variance will be 0, even though they are not perfectly known\n return AdditiveExplanation(\n self.link.f(self.fnull if self.vector_out else self.fnull[0]),\n self.link.f(self.fx if self.vector_out else self.fx[0]),\n phi, phi_var, instance, self.link, self.model, self.data\n )\n\n def varying_groups(self, x):\n varying = np.zeros(len(self.data.groups))\n for i in range(0, len(self.data.groups)):\n inds = self.data.groups[i]\n num_matches = sum(np.abs(x[0, inds] - self.data.data[:, inds]) < 1e-7, 0)\n varying[i] = sum(num_matches != len(inds) * self.data.data.shape[0])\n return np.nonzero(varying)[0]\n\n def allocate(self):\n self.synth_data = np.zeros((self.nsamples * self.N, self.P))\n self.maskMatrix = np.zeros((self.nsamples, self.M))\n self.kernelWeights = np.zeros(self.nsamples)\n self.y = np.zeros((self.nsamples * self.N, self.D))\n self.ey = np.zeros((self.nsamples, self.D))\n self.lastMask = np.zeros(self.nsamples)\n self.nsamplesAdded = 0\n self.nsamplesRun = 0\n if self.keep_index:\n self.synth_data_index = [None] * (self.nsamples * self.N)\n\n def addsample(self, x, m, w):\n offset = self.nsamplesAdded * self.N\n for i in range(self.N):\n if self.keep_index:\n self.synth_data_index[offset+i] = self.data.index_value[i]\n for j in range(self.M):\n for k in self.varyingFeatureGroups[j]:\n if m[j] == 1.0:\n self.synth_data[offset + i, k] = x[0, k]\n else:\n self.synth_data[offset + i, k] = self.data.data[i, k]\n\n self.maskMatrix[self.nsamplesAdded, :] = m\n self.kernelWeights[self.nsamplesAdded] = w\n self.nsamplesAdded += 1\n\n def run(self):\n num_to_run = self.nsamplesAdded * self.N - self.nsamplesRun * self.N\n data = self.synth_data[self.nsamplesRun*self.N:self.nsamplesAdded*self.N,:]\n if self.keep_index:\n index = self.synth_data_index[self.nsamplesRun*self.N:self.nsamplesAdded*self.N]\n index = pd.DataFrame(index, columns=[self.data.index_name])\n data = pd.DataFrame(data, columns=self.data.group_names)\n data = pd.concat([index, data], axis=1).set_index(self.data.index_name)\n modelOut = self.model.f(data)\n if isinstance(modelOut, (pd.DataFrame, pd.Series)):\n modelOut = modelOut.values\n # if len(modelOut.shape) > 1:\n # raise ValueError(\"The supplied model function should output a vector not a matrix!\")\n self.y[self.nsamplesRun * self.N:self.nsamplesAdded * self.N, :] = np.reshape(modelOut, (num_to_run, self.D))\n\n # find the expected value of each output\n for i in range(self.nsamplesRun, self.nsamplesAdded):\n eyVal = np.zeros(self.D)\n for j in range(0, self.N):\n eyVal += self.y[i * self.N + j, :] * self.data.weights[j]\n\n self.ey[i, :] = eyVal\n self.nsamplesRun += 1\n\n def solve(self, fraction_evaluated, dim):\n eyAdj = self.linkfv(self.ey[:, dim]) - self.link.f(self.fnull[dim])\n s = np.sum(self.maskMatrix, 1)\n\n # do feature selection if we have not well enumerated the space\n nonzero_inds = np.arange(self.M)\n log.debug(\"fraction_evaluated = {0}\".format(fraction_evaluated))\n if (self.l1_reg not in [\"auto\", False, 0]) or (fraction_evaluated < 0.2 and self.l1_reg == \"auto\"):\n w_aug = np.hstack((self.kernelWeights * (self.M - s), self.kernelWeights * s))\n log.info(\"np.sum(w_aug) = {0}\".format(np.sum(w_aug)))\n log.info(\"np.sum(self.kernelWeights) = {0}\".format(np.sum(self.kernelWeights)))\n w_sqrt_aug = np.sqrt(w_aug)\n eyAdj_aug = np.hstack((eyAdj, eyAdj - (self.link.f(self.fx[dim]) - self.link.f(self.fnull[dim]))))\n eyAdj_aug *= w_sqrt_aug\n mask_aug = np.transpose(w_sqrt_aug * np.transpose(np.vstack((self.maskMatrix, self.maskMatrix - 1))))\n var_norms = np.array([np.linalg.norm(mask_aug[:, i]) for i in range(mask_aug.shape[1])])\n\n if self.l1_reg == \"auto\":\n model = LassoLarsIC(criterion=\"aic\")\n elif self.l1_reg == \"bic\" or self.l1_reg == \"aic\":\n model = LassoLarsIC(criterion=self.l1_reg)\n else:\n model = Lasso(alpha=self.l1_reg)\n\n model.fit(mask_aug, eyAdj_aug)\n nonzero_inds = np.nonzero(model.coef_)[0]\n\n if len(nonzero_inds) == 0:\n return np.zeros(self.M), np.ones(self.M)\n\n # eliminate one variable with the constraint that all features sum to the output\n eyAdj2 = eyAdj - self.maskMatrix[:, nonzero_inds[-1]] * (\n self.link.f(self.fx[dim]) - self.link.f(self.fnull[dim]))\n etmp = np.transpose(np.transpose(self.maskMatrix[:, nonzero_inds[:-1]]) - self.maskMatrix[:, nonzero_inds[-1]])\n log.debug(\"etmp[:4,:] {0}\".format(etmp[:4, :]))\n\n # solve a weighted least squares equation to estimate phi\n tmp = np.transpose(np.transpose(etmp) * np.transpose(self.kernelWeights))\n tmp2 = np.linalg.inv(np.dot(np.transpose(tmp), etmp))\n w = np.dot(tmp2, np.dot(np.transpose(tmp), eyAdj2))\n log.debug(\"np.sum(w) = {0}\".format(np.sum(w)))\n log.debug(\"self.link(self.fx) - self.link(self.fnull) = {0}\".format(\n self.link.f(self.fx[dim]) - self.link.f(self.fnull[dim])))\n log.debug(\"self.fx = {0}\".format(self.fx[dim]))\n log.debug(\"self.link(self.fx) = {0}\".format(self.link.f(self.fx[dim])))\n log.debug(\"self.fnull = {0}\".format(self.fnull[dim]))\n log.debug(\"self.link(self.fnull) = {0}\".format(self.link.f(self.fnull[dim])))\n phi = np.zeros(self.M)\n phi[nonzero_inds[:-1]] = w\n phi[nonzero_inds[-1]] = (self.link.f(self.fx[dim]) - self.link.f(self.fnull[dim])) - sum(w)\n log.info(\"phi = {0}\".format(phi))\n\n # clean up any rounding errors\n for i in range(self.M):\n if np.abs(phi[i]) < 1e-10:\n phi[i] = 0\n\n return phi, np.ones(len(phi))\n"
] |
[
[
"numpy.sqrt",
"sklearn.cluster.KMeans",
"numpy.squeeze",
"pandas.DataFrame",
"numpy.hstack",
"numpy.reshape",
"numpy.arange",
"sklearn.linear_model.Lasso",
"numpy.ceil",
"numpy.zeros",
"pandas.concat",
"numpy.nonzero",
"numpy.random.choice",
"scipy.special.binom",
"numpy.floor",
"numpy.transpose",
"numpy.array",
"numpy.sum",
"numpy.abs",
"numpy.linalg.norm",
"numpy.random.shuffle",
"numpy.ones",
"sklearn.linear_model.LassoLarsIC",
"numpy.vectorize",
"numpy.bincount",
"numpy.vstack"
]
] |
brendanjmeade/dmd_gps
|
[
"bddc401e7a2e8d0fb44fa830d58197f1319b11c9"
] |
[
"dmd_japan.py"
] |
[
"import json\nimport datetime\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pydmd as dmd\n\nplt.close(\"all\")\n\n\nwith open('GeonetTimeSeries.json') as json_file: \n data = json.load(json_file)\n\nn_stations = len(data)\nstation_idx = 1\nstation_data = data[station_idx]\n\n# Loop over all time series and extract starts and stops\nall_start_seconds = np.zeros(n_stations)\nall_n_days = np.zeros(n_stations)\nfor i in range(0, len(data)):\n station_current = data[i]\n all_start_seconds[i] = station_current[\"start\"]\n all_n_days[i] = len(station_current[\"lon\"])\n\nall_start_days = np.round((all_start_seconds - all_start_seconds.min()) / 86400).astype(int)\nall_end_days = (all_start_days + all_n_days).astype(int)\n\nlon_mat = np.zeros((n_stations, all_end_days.max().astype(int)))\nlat_mat = np.zeros((n_stations, all_end_days.max().astype(int)))\n\n\nfor i in range(0, len(data)):\n lon_mat[i, all_start_days[i] : all_end_days[i]] = data[i][\"lon\"]\n lat_mat[i, all_start_days[i] : all_end_days[i]] = data[i][\"lat\"]\n\n# Subtract out mean of each time series?\n# FIll in missing data with linear interpolation?\n# lon_mat[lon_mat == 0] = np.nan\n# lat_mat[lat_mat == 0] = np.nan\n\nplt.matshow(lon_mat[600:800, 3000:4000])\n# plt.matshow(lat_mat[600:800, 3000:4000])\n\n\n\n# Do I have to find a maximium sized submatrix with no nans?\n# https://www.geeksforgeeks.org/maximum-size-sub-matrix-with-all-1s-in-a-binary-matrix/\n\n# # Try dynamic mode decomposition\n# dmdout = dmd.DMD(svd_rank=10, tlsq_rank=10, exact=True, opt=True)\ndmdout = dmd.DMD(svd_rank=1)\ndmdout.fit(lon_mat[600:800, 3000:4000])\n\n\n# Plot decomposition\nplt.matshow(dmdout.reconstructed_data.real)\nplt.show(block=False)\n"
] |
[
[
"matplotlib.pyplot.matshow",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.close"
]
] |
zhfeing/deep-learning-lib-PyTorch
|
[
"f96e3a71ae2dbeb44696725ec127ff8f37d4c6e9"
] |
[
"cv_lib/classification/models/cifar_large_resnet.py"
] |
[
"import torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\n\r\n__all__ = [\r\n \"ResNet_CL\",\r\n \"resnet18_cl\",\r\n \"resnet34_cl\",\r\n \"resnet50_cl\",\r\n \"resnet101_cl\",\r\n \"resnet152_cl\"\r\n]\r\n\r\n\r\nclass BasicBlock(nn.Module):\r\n expansion = 1\r\n\r\n def __init__(self, in_planes, planes, stride=1, is_last=False):\r\n super(BasicBlock, self).__init__()\r\n self.is_last = is_last\r\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\r\n self.bn1 = nn.BatchNorm2d(planes)\r\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.bn2 = nn.BatchNorm2d(planes)\r\n\r\n self.shortcut = nn.Sequential()\r\n if stride != 1 or in_planes != self.expansion * planes:\r\n self.shortcut = nn.Sequential(\r\n nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),\r\n nn.BatchNorm2d(self.expansion * planes)\r\n )\r\n\r\n def forward(self, x):\r\n out = F.relu(self.bn1(self.conv1(x)))\r\n out = self.bn2(self.conv2(out))\r\n out += self.shortcut(x)\r\n preact = out\r\n out = F.relu(out)\r\n if self.is_last:\r\n return out, preact\r\n else:\r\n return out\r\n\r\n\r\nclass Bottleneck(nn.Module):\r\n expansion = 4\r\n\r\n def __init__(self, in_planes, planes, stride=1, is_last=False):\r\n super(Bottleneck, self).__init__()\r\n self.is_last = is_last\r\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)\r\n self.bn1 = nn.BatchNorm2d(planes)\r\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\r\n self.bn2 = nn.BatchNorm2d(planes)\r\n self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)\r\n self.bn3 = nn.BatchNorm2d(self.expansion * planes)\r\n\r\n self.shortcut = nn.Sequential()\r\n if stride != 1 or in_planes != self.expansion * planes:\r\n self.shortcut = nn.Sequential(\r\n nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),\r\n nn.BatchNorm2d(self.expansion * planes)\r\n )\r\n\r\n def forward(self, x):\r\n out = F.relu(self.bn1(self.conv1(x)))\r\n out = F.relu(self.bn2(self.conv2(out)))\r\n out = self.bn3(self.conv3(out))\r\n out += self.shortcut(x)\r\n preact = out\r\n out = F.relu(out)\r\n if self.is_last:\r\n return out, preact\r\n else:\r\n return out\r\n\r\n\r\nclass ResNet_CL(nn.Module):\r\n \"\"\"\r\n Resnet for cifar dataset (large version).\r\n\r\n @ Different from PyTorch version `in ()`:\r\n 1. First conv layer has kernel size of 3 (7) and stride 1 (2)\r\n 2. Using non-inplace relu for feature extracting\r\n \"\"\"\r\n def __init__(self, block, num_blocks, num_classes=10, zero_init_residual=False):\r\n super(ResNet_CL, self).__init__()\r\n self.in_planes = 64\r\n\r\n self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.bn1 = nn.BatchNorm2d(64)\r\n self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)\r\n self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\r\n self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\r\n self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\r\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\r\n self.linear = nn.Linear(512 * block.expansion, num_classes)\r\n\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n nn.init.kaiming_normal_(m.weight, mode=\"fan_out\", nonlinearity=\"relu\")\r\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\r\n nn.init.constant_(m.weight, 1)\r\n nn.init.constant_(m.bias, 0)\r\n\r\n # Zero-initialize the last BN in each residual branch,\r\n # so that the residual branch starts with zeros, and each residual block behaves like an identity.\r\n # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677\r\n if zero_init_residual:\r\n for m in self.modules():\r\n if isinstance(m, Bottleneck):\r\n nn.init.constant_(m.bn3.weight, 0)\r\n elif isinstance(m, BasicBlock):\r\n nn.init.constant_(m.bn2.weight, 0)\r\n\r\n def get_feat_modules(self):\r\n feat_m = nn.ModuleList([])\r\n feat_m.append(self.conv1)\r\n feat_m.append(self.bn1)\r\n feat_m.append(self.layer1)\r\n feat_m.append(self.layer2)\r\n feat_m.append(self.layer3)\r\n feat_m.append(self.layer4)\r\n return feat_m\r\n\r\n def get_bn_before_relu(self):\r\n if isinstance(self.layer1[0], Bottleneck):\r\n bn1 = self.layer1[-1].bn3\r\n bn2 = self.layer2[-1].bn3\r\n bn3 = self.layer3[-1].bn3\r\n bn4 = self.layer4[-1].bn3\r\n elif isinstance(self.layer1[0], BasicBlock):\r\n bn1 = self.layer1[-1].bn2\r\n bn2 = self.layer2[-1].bn2\r\n bn3 = self.layer3[-1].bn2\r\n bn4 = self.layer4[-1].bn2\r\n else:\r\n raise NotImplementedError(\"ResNet unknown block error !!!\")\r\n\r\n return [bn1, bn2, bn3, bn4]\r\n\r\n def _make_layer(self, block, planes, num_blocks, stride):\r\n strides = [stride] + [1] * (num_blocks - 1)\r\n layers = []\r\n for i in range(num_blocks):\r\n stride = strides[i]\r\n layers.append(block(self.in_planes, planes, stride, i == num_blocks - 1))\r\n self.in_planes = planes * block.expansion\r\n return nn.Sequential(*layers)\r\n\r\n def forward(self, x, is_feat=False, preact=False):\r\n out = F.relu(self.bn1(self.conv1(x)))\r\n f0 = out\r\n out, f1_pre = self.layer1(out)\r\n f1 = out\r\n out, f2_pre = self.layer2(out)\r\n f2 = out\r\n out, f3_pre = self.layer3(out)\r\n f3 = out\r\n out, f4_pre = self.layer4(out)\r\n f4 = out\r\n out = self.avgpool(out)\r\n out = out.view(out.size(0), -1)\r\n f5 = out\r\n out = self.linear(out)\r\n if is_feat:\r\n if preact:\r\n return [[f0, f1_pre, f2_pre, f3_pre, f4_pre, f5], out]\r\n else:\r\n return [f0, f1, f2, f3, f4, f5], out\r\n else:\r\n return out\r\n\r\n\r\ndef resnet18_cl(**kwargs):\r\n return ResNet_CL(BasicBlock, [2, 2, 2, 2], **kwargs)\r\n\r\n\r\ndef resnet34_cl(**kwargs):\r\n return ResNet_CL(BasicBlock, [3, 4, 6, 3], **kwargs)\r\n\r\n\r\ndef resnet50_cl(**kwargs):\r\n return ResNet_CL(Bottleneck, [3, 4, 6, 3], **kwargs)\r\n\r\n\r\ndef resnet101_cl(**kwargs):\r\n return ResNet_CL(Bottleneck, [3, 4, 23, 3], **kwargs)\r\n\r\n\r\ndef resnet152_cl(**kwargs):\r\n return ResNet_CL(Bottleneck, [3, 8, 36, 3], **kwargs)\r\n\r\n"
] |
[
[
"torch.nn.Sequential",
"torch.nn.init.constant_",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.functional.relu",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.init.kaiming_normal_"
]
] |
photoszzt/pytorch
|
[
"f2857883c4c148ced4f920431b38532fe8081b73"
] |
[
"test/distributed/test_c10d_gloo.py"
] |
[
"import copy\nimport logging\nimport math\nimport operator\nimport os\nimport random\nimport sys\nimport tempfile\nimport unittest\nfrom functools import reduce\nfrom itertools import groupby\n\nimport torch\nimport torch.distributed as c10d\n\nif not c10d.is_available():\n print(\"c10d not available, skipping tests\", file=sys.stderr)\n sys.exit(0)\n\nimport torch.distributed as dist\nimport torch.nn.functional as F\nimport torch.testing._internal.common_utils as common\nfrom torch import nn\nfrom torch.nn.parallel import DistributedDataParallel\nfrom torch.testing._internal.common_distributed import (\n MultiProcessTestCase,\n requires_gloo,\n skip_if_lt_x_gpu,\n simple_sparse_reduce_tests,\n skip_if_win32,\n create_device,\n verify_ddp_error_logged,\n skip_if_rocm,\n)\nfrom torch.testing._internal.common_utils import (\n TestCase,\n run_tests,\n retry_on_connect_failures,\n TEST_WITH_TSAN,\n)\nimport test_c10d_common\nfrom test_c10d_common import (\n LOOPBACK,\n gpus_for_rank,\n Task,\n ModuleForDdpCommHook,\n SparseGradientModule,\n)\n\n\ndef simple_reduce_tests(rank, world_size):\n tests = [\n (\n c10d.ReduceOp.SUM,\n torch.tensor([rank + 1.0]),\n torch.tensor([float(world_size * (world_size + 1) / 2)]),\n ),\n (\n c10d.ReduceOp.PRODUCT,\n torch.tensor([rank + 1.0]),\n torch.tensor([float(math.factorial(world_size))]),\n ),\n (\n c10d.ReduceOp.MIN,\n torch.tensor([rank + 1.0]),\n torch.tensor([1.0]),\n ),\n (\n c10d.ReduceOp.MAX,\n torch.tensor([rank + 1.0]),\n torch.tensor([world_size]),\n ),\n ]\n\n # Generate tests for BAND.\n # The bit that is set changes in every iteration to check\n # that the output changes accordingly.\n for i in range(4):\n vin = rank | (1 << i)\n vout = 1 << i\n tests.append(\n (\n c10d.ReduceOp.BAND,\n torch.tensor([vin], dtype=torch.int32),\n torch.tensor([vout], dtype=torch.int32),\n ),\n )\n\n # Generate tests for BOR.\n # These emulate a larger world size per iteration by having every\n # rank contribute multiple values that are pre-OR'ed.\n for i in range(1, 5):\n vin = reduce(operator.or_, [rank * i + j for j in range(i)])\n vout = reduce(operator.or_, range(world_size * i))\n tests.append(\n (\n c10d.ReduceOp.BOR,\n torch.tensor([vin], dtype=torch.int32),\n torch.tensor([vout], dtype=torch.int32),\n ),\n )\n\n # Generate tests for XOR.\n # These emulate a larger world size per iteration by having every\n # rank contribute multiple values that are pre-XOR'ed.\n for i in range(1, 5):\n vin = reduce(operator.xor, [rank * i + j for j in range(i)])\n vout = reduce(operator.xor, range(world_size * i))\n tests.append(\n (\n c10d.ReduceOp.BXOR,\n torch.tensor([vin], dtype=torch.int32),\n torch.tensor([vout], dtype=torch.int32),\n ),\n )\n\n return tests\n\n\ndef simple_coalesced_reduce_tests(rank, world_size):\n return [\n (\n c10d.ReduceOp.SUM,\n [torch.tensor([rank + 1]), torch.tensor([(rank + 1) ** 2])],\n [\n torch.tensor([float(world_size * (world_size + 1) / 2)]),\n torch.tensor(\n [float(world_size * (world_size + 1) * (2 * world_size + 1) / 6)]\n ),\n ],\n ),\n (\n c10d.ReduceOp.PRODUCT,\n [torch.tensor([rank + 1.0]), torch.tensor([rank + 2.0])],\n [\n torch.tensor([float(math.factorial(world_size))]),\n torch.tensor([float(math.factorial(world_size + 1))]),\n ],\n ),\n (\n c10d.ReduceOp.MIN,\n [torch.tensor([rank + x]) for x in [0.0, 1.0]],\n [torch.tensor([0.0]), torch.tensor([1.0])],\n ),\n (\n c10d.ReduceOp.MAX,\n [torch.tensor([rank + x]) for x in [1.0, 2.0]],\n [torch.tensor([world_size]), torch.tensor([world_size + 1.0])],\n ),\n ]\n\n\ndef simple_multi_input_reduce_tests(rank, world_size):\n return [\n (\n c10d.ReduceOp.SUM,\n [torch.tensor([2 * rank + 0.0]), torch.tensor([2 * rank + 1.0])],\n torch.tensor([float(world_size * (2 * world_size - 1))]),\n ),\n (\n c10d.ReduceOp.PRODUCT,\n [torch.tensor([2 * rank + 1.0]), torch.tensor([2 * rank + 2.0])],\n torch.tensor([float(math.factorial(2 * world_size))]),\n ),\n (\n c10d.ReduceOp.MIN,\n [torch.tensor([2 * rank + 1.0]), torch.tensor([2 * rank + 2.0])],\n torch.tensor([1.0]),\n ),\n (\n c10d.ReduceOp.MAX,\n [torch.tensor([2 * rank + 1.0]), torch.tensor([2 * rank + 2.0])],\n torch.tensor([2 * world_size]),\n ),\n ]\n\n\nclass RendezvousEnvTest(TestCase):\n @requires_gloo()\n @retry_on_connect_failures\n def test_logging_init(self):\n os.environ[\"WORLD_SIZE\"] = \"1\"\n os.environ[\"MASTER_ADDR\"] = \"127.0.0.1\"\n os.environ[\"MASTER_PORT\"] = str(common.find_free_port())\n os.environ[\"RANK\"] = \"0\"\n\n previous_handlers = logging.root.handlers\n\n c10d.init_process_group(backend=\"gloo\", init_method=\"env://\")\n\n current_handlers = logging.root.handlers\n self.assertEqual(len(previous_handlers), len(current_handlers))\n for current, previous in zip(current_handlers, previous_handlers):\n self.assertEqual(current, previous)\n\n c10d.destroy_process_group()\n\n\nclass TimeoutTest(test_c10d_common.AbstractTimeoutTest, TestCase):\n @requires_gloo()\n @retry_on_connect_failures\n def test_default_store_timeout_gloo(self):\n self._test_default_store_timeout(\"gloo\")\n\n@requires_gloo()\[email protected](\n TEST_WITH_TSAN,\n \"TSAN is not fork-safe since we're forking in a multi-threaded environment\",\n)\nclass ProcessGroupGlooTest(MultiProcessTestCase):\n def setUp(self):\n super(ProcessGroupGlooTest, self).setUp()\n\n # For Windows platform, Python does not support fork, change it to spawn here.\n if sys.platform == \"win32\":\n self._spawn_processes()\n else:\n self._fork_processes()\n\n def opts(self, threads=2):\n opts = c10d.ProcessGroupGloo._Options()\n opts._timeout = 5.0\n opts._devices = [create_device(interface=LOOPBACK)]\n opts._threads = threads\n return opts\n\n def test_multi_device_constructor(self):\n store = c10d.FileStore(self.file_name, self.world_size)\n opts = c10d.ProcessGroupGloo._Options()\n opts._timeout = 5.0\n opts._devices = [\n create_device(interface=LOOPBACK),\n create_device(interface=LOOPBACK),\n ]\n pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, opts)\n\n # Execute 2x the number of operations to ensure we use every device.\n for fut in [pg.allreduce(torch.ones(i + 1)).get_future() for i in range(4)]:\n fut.wait()\n\n def test_empty_tensors(self):\n store = c10d.FileStore(self.file_name, self.world_size)\n pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())\n\n xs = [torch.FloatTensor([])]\n fut = pg.broadcast(xs).get_future()\n fut.wait()\n output = fut.value()\n self.assertEqual(0, output[0].numel())\n self.assertEqualIgnoreType(xs[0], output[0])\n\n def test_broadcast_checks(self):\n store = c10d.FileStore(self.file_name, self.world_size)\n pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())\n\n t1 = torch.zeros([1], dtype=torch.float32)\n t2 = torch.zeros([1], dtype=torch.float64)\n t3 = torch.zeros([2], dtype=torch.float32)\n\n with self.assertRaisesRegex(ValueError, \"invalid root rank\"):\n opts = c10d.BroadcastOptions()\n opts.rootRank = -1\n opts.rootTensor = 0\n pg.broadcast([t1], opts)\n\n with self.assertRaisesRegex(ValueError, \"invalid root rank\"):\n opts = c10d.BroadcastOptions()\n opts.rootRank = self.world_size\n opts.rootTensor = 0\n pg.broadcast([t1], opts)\n\n with self.assertRaisesRegex(ValueError, \"invalid root tensor\"):\n opts = c10d.BroadcastOptions()\n opts.rootRank = self.rank\n opts.rootTensor = -1\n pg.broadcast([t1], opts)\n\n with self.assertRaisesRegex(ValueError, \"invalid root tensor\"):\n opts = c10d.BroadcastOptions()\n opts.rootRank = self.rank\n opts.rootTensor = 1\n pg.broadcast([t1], opts)\n\n with self.assertRaisesRegex(ValueError, \"invalid root tensor\"):\n opts = c10d.BroadcastOptions()\n opts.rootRank = self.rank\n opts.rootTensor = 0\n pg.broadcast([], opts)\n\n with self.assertRaisesRegex(ValueError, \"invalid tensor type\"):\n opts = c10d.BroadcastOptions()\n opts.rootRank = self.rank\n opts.rootTensor = 0\n pg.broadcast([t1, t2], opts)\n\n with self.assertRaisesRegex(ValueError, \"invalid tensor size\"):\n opts = c10d.BroadcastOptions()\n opts.rootRank = self.rank\n opts.rootTensor = 0\n pg.broadcast([t1, t3], opts)\n\n def _test_broadcast_basics(self, fn):\n store = c10d.FileStore(self.file_name, self.world_size)\n pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())\n\n def broadcast(xs, rootRank, rootTensor):\n opts = c10d.BroadcastOptions()\n opts.rootRank = rootRank\n opts.rootTensor = rootTensor\n fut = pg.broadcast(xs, opts).get_future()\n fut.wait()\n return fut.value()\n\n # Every rank is root once\n for i in range(self.world_size):\n # Run with 1 input tensor\n x = fn(torch.tensor([self.rank]))\n output = broadcast([x], i, 0)\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(torch.tensor([i]), output[0])\n\n # Run with 2 input tensors\n num = 2\n for j in range(num):\n xs = [\n fn(torch.tensor([self.rank * num + 0.0])),\n fn(torch.tensor([self.rank * num + 1.0])),\n ]\n\n output = broadcast(xs, i, j)\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(torch.tensor([i * num + j]), output[0])\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(torch.tensor([i * num + j]), output[1])\n\n # Test overloaded convenience function\n x = torch.tensor([self.rank + 1.0])\n fut = pg.broadcast(x, root=0).get_future()\n fut.wait()\n result = fut.value()\n self.assertEqual(torch.tensor([1.0]), result[0])\n\n def test_broadcast_basics(self):\n self._test_broadcast_basics(lambda t: t.clone())\n\n @skip_if_lt_x_gpu(2)\n def test_broadcast_basics_cuda(self):\n self._test_broadcast_basics(lambda t: t.clone().cuda())\n\n def _test_broadcast_stress(self, inputs):\n store = c10d.FileStore(self.file_name, self.world_size)\n pg = c10d.ProcessGroupGloo(\n store, self.rank, self.world_size, self.opts(threads=8)\n )\n work_handles = [\n pg.broadcast(inputs[i], root=(i % self.world_size))\n for i in range(len(inputs))\n ]\n for i, work_handle in enumerate(work_handles):\n work_handle.wait()\n self.assertEqual(\n torch.tensor([(i * self.world_size) + (i % self.world_size)]),\n inputs[i],\n msg=(\"Mismatch in iteration %d\" % i),\n )\n\n def test_broadcast_stress(self):\n inputs = [torch.tensor([i * self.world_size + self.rank]) for i in range(1000)]\n self._test_broadcast_stress(inputs)\n\n @skip_if_lt_x_gpu(2)\n def test_broadcast_stress_cuda(self):\n inputs = [\n torch.tensor([i * self.world_size + self.rank]).cuda() for i in range(1000)\n ]\n self._test_broadcast_stress(inputs)\n\n def test_allreduce_checks(self):\n store = c10d.FileStore(self.file_name, self.world_size)\n pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())\n\n t1 = torch.zeros([1], dtype=torch.float32)\n t2 = torch.zeros([1], dtype=torch.float64)\n t3 = torch.zeros([2], dtype=torch.float32)\n\n with self.assertRaisesRegex(ValueError, \"requires non-empty tensor list\"):\n opts = c10d.AllreduceOptions()\n pg.allreduce([], opts)\n\n with self.assertRaisesRegex(ValueError, \"invalid tensor type\"):\n opts = c10d.AllreduceOptions()\n pg.allreduce([t1, t2], opts)\n\n with self.assertRaisesRegex(ValueError, \"invalid tensor size\"):\n opts = c10d.AllreduceOptions()\n pg.allreduce([t1, t3], opts)\n\n def _test_allreduce_basics(self, fn):\n store = c10d.FileStore(self.file_name, self.world_size)\n pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())\n\n # Single input tests\n tests = simple_reduce_tests(self.rank, self.world_size)\n for (op, input, expected) in tests:\n opts = c10d.AllreduceOptions()\n opts.reduceOp = op\n tensor = fn(input)\n fut = pg.allreduce([tensor], opts).get_future()\n fut.wait()\n result = fut.value()\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(expected, result[0])\n\n # Multi input tests\n tests = simple_multi_input_reduce_tests(self.rank, self.world_size)\n for (op, inputs, output) in tests:\n opts = c10d.AllreduceOptions()\n opts.reduceOp = op\n tensors = [fn(input) for input in inputs]\n fut = pg.allreduce(tensors, opts).get_future()\n fut.wait()\n result = fut.value()\n for tensor in result:\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(output, tensor)\n\n # Test overloaded convenience function (defaults to using sum)\n x = fn(torch.tensor([self.rank + 1.0]))\n fut = pg.allreduce(x).get_future()\n fut.wait()\n result = fut.value()\n self.assertEqual(\n torch.tensor([float(self.world_size * (self.world_size + 1) / 2)]), result[0]\n )\n\n def test_allreduce_basics(self):\n self._test_allreduce_basics(lambda t: t.clone())\n\n @skip_if_lt_x_gpu(2)\n def test_allreduce_basics_cuda(self):\n self._test_allreduce_basics(lambda t: t.clone().cuda())\n\n # _using_work_api tests are to make sure we still properly support work API.\n # This should go away as we deprecate it.\n def _test_allreduce_basics_using_work_api(self, fn):\n store = c10d.FileStore(self.file_name, self.world_size)\n pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())\n\n # Single input tests\n tests = simple_reduce_tests(self.rank, self.world_size)\n for (op, input, expected) in tests:\n opts = c10d.AllreduceOptions()\n opts.reduceOp = op\n tensor = fn(input)\n work = pg.allreduce([tensor], opts)\n work.wait()\n result = work.result()\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(expected, result[0])\n\n # Multi input tests\n tests = simple_multi_input_reduce_tests(self.rank, self.world_size)\n for (op, inputs, output) in tests:\n opts = c10d.AllreduceOptions()\n opts.reduceOp = op\n tensors = [fn(input) for input in inputs]\n work = pg.allreduce(tensors, opts)\n work.wait()\n result = work.result()\n for tensor in result:\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(output, tensor)\n\n # Test overloaded convenience function (defaults to using sum)\n x = fn(torch.tensor([self.rank + 1.0]))\n work = pg.allreduce(x)\n work.wait()\n result = work.result()\n self.assertEqual(\n torch.tensor([float(self.world_size * (self.world_size + 1) / 2)]), result[0]\n )\n\n def test_allreduce_basics_using_work_api(self):\n self._test_allreduce_basics_using_work_api(lambda t: t.clone())\n\n @skip_if_lt_x_gpu(2)\n def test_allreduce_basics_cuda_using_work_api(self):\n self._test_allreduce_basics_using_work_api(lambda t: t.clone().cuda())\n\n def _test_allreduce_stress(self, inputs):\n store = c10d.FileStore(self.file_name, self.world_size)\n pg = c10d.ProcessGroupGloo(\n store, self.rank, self.world_size, self.opts(threads=8)\n )\n future_handles = [pg.allreduce(inputs[i]).get_future() for i in range(len(inputs))]\n for i, future_handle in enumerate(future_handles):\n future_handle.wait()\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(\n torch.tensor(\n [\n (i * self.world_size)\n + (self.world_size * (self.world_size - 1) / 2)\n ]\n ),\n future_handle.value()[0],\n msg=(\"Mismatch in iteration %d\" % i),\n )\n\n def test_allreduce_stress(self):\n inputs = [torch.tensor([i + self.rank]) for i in range(1000)]\n self._test_allreduce_stress(inputs)\n\n @skip_if_lt_x_gpu(2)\n @skip_if_rocm\n def test_allreduce_stress_cuda(self):\n inputs = [torch.tensor([i + self.rank]).cuda() for i in range(1000)]\n self._test_allreduce_stress(inputs)\n\n def test_allreduce_coalesced_checks(self):\n store = c10d.FileStore(self.file_name, self.world_size)\n pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())\n\n t1 = torch.zeros(1, dtype=torch.float32)\n t2 = torch.zeros(1, dtype=torch.float64)\n t3 = torch.sparse_coo_tensor([[0]], [1], size=(1,))\n\n with self.assertRaisesRegex(ValueError, \"requires non-empty tensor list\"):\n opts = c10d.AllreduceCoalescedOptions()\n pg.allreduce_coalesced([], opts)\n\n with self.assertRaisesRegex(ValueError, \"tensors must all have the same type\"):\n opts = c10d.AllreduceCoalescedOptions()\n pg.allreduce_coalesced([t1, t2], opts)\n\n with self.assertRaisesRegex(ValueError, \"invalid tensor layout at index\"):\n opts = c10d.AllreduceCoalescedOptions()\n pg.allreduce_coalesced([t1, t3], opts)\n\n with self.assertRaisesRegex(ValueError, \"unsupported layout\"):\n opts = c10d.AllreduceCoalescedOptions()\n pg.allreduce_coalesced([t3, t3.clone()], opts)\n\n @skip_if_lt_x_gpu(1)\n def test_allreduce_coalesced_checks_cuda(self):\n store = c10d.FileStore(self.file_name, self.world_size)\n pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())\n\n t1 = torch.zeros(1, dtype=torch.float32)\n\n with self.assertRaisesRegex(ValueError, \"unsupported device type\"):\n opts = c10d.AllreduceCoalescedOptions()\n pg.allreduce_coalesced([t1.cuda(), t1.cuda()], opts)\n\n def _test_allreduce_coalesced_basics(self, fn):\n store = c10d.FileStore(self.file_name, self.world_size)\n pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())\n\n test_cases = simple_coalesced_reduce_tests(self.rank, self.world_size)\n for op, inputs, outputs in test_cases:\n opts = c10d.AllreduceCoalescedOptions()\n opts.reduceOp = op\n tensors = [fn(x) for x in inputs]\n fut = pg.allreduce_coalesced(tensors, opts).get_future()\n fut.wait()\n result = fut.value()\n for result_tensor, expected in zip(result, outputs):\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(result_tensor, expected)\n\n def test_allreduce_coalesced_basics(self):\n self._test_allreduce_coalesced_basics(lambda t: t.clone())\n\n def _test_allreduce_coalesced_stress(self, inputs):\n store = c10d.FileStore(self.file_name, self.world_size)\n pg = c10d.ProcessGroupGloo(\n store, self.rank, self.world_size, self.opts(threads=8)\n )\n future_handles = [pg.allreduce_coalesced(input).get_future() for input in inputs]\n for i, future_handle in enumerate(future_handles):\n future_handle.wait()\n result = future_handle.value()\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(\n 2\n * [\n torch.tensor(\n [\n (i * self.world_size)\n + (self.world_size * (self.world_size - 1) / 2)\n ]\n )\n ],\n result,\n msg=\"Mismatch in interation {}\".format(i),\n )\n\n def test_allreduce_coalesced_stress(self):\n inputs = [2 * [torch.tensor([i + self.rank])] for i in range(1000)]\n self._test_allreduce_coalesced_stress(inputs)\n\n def test_sparse_allreduce_checks(self):\n store = c10d.FileStore(self.file_name, self.world_size)\n pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())\n\n t1 = torch.zeros([1])\n t2 = torch.sparse_coo_tensor([[0]], [1], size=(2,))\n t3 = torch.sparse_coo_tensor([[0]], [1], size=(4,))\n\n with self.assertRaisesRegex(ValueError, \"requires non-empty tensor list\"):\n opts = c10d.AllreduceOptions()\n pg.allreduce([], opts)\n\n with self.assertRaisesRegex(ValueError, \"invalid tensor layout\"):\n opts = c10d.AllreduceOptions()\n pg.allreduce([t1, t2], opts)\n\n with self.assertRaisesRegex(ValueError, \"invalid tensor size\"):\n opts = c10d.AllreduceOptions()\n pg.allreduce([t2, t3], opts)\n\n # Sparse allreduce only works with c10d.ReduceOp.SUM.\n for op in [c10d.ReduceOp.PRODUCT, c10d.ReduceOp.MIN, c10d.ReduceOp.MAX]:\n with self.assertRaisesRegex(ValueError, \"unsupported reduction operation\"):\n opts = c10d.AllreduceOptions()\n opts.reduceOp = op\n pg.allreduce([t3], opts)\n\n def _test_sparse_allreduce_basics(self, fn):\n store = c10d.FileStore(self.file_name, self.world_size)\n pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())\n\n for num_inputs_per_rank in [1, 2]:\n tests = simple_sparse_reduce_tests(\n self.rank, self.world_size, num_inputs=num_inputs_per_rank\n )\n for (inputs, outputs) in tests:\n tensors = [fn(input) for input in inputs]\n fut = pg.allreduce(tensors).get_future()\n fut.wait()\n result = fut.value()\n self.assertEqual(tensors, outputs)\n self.assertEqual(result, outputs)\n\n @unittest.skip(\"intermittent failures on Windows, in CI\")\n def test_sparse_allreduce_basics(self):\n self._test_sparse_allreduce_basics(lambda t: t)\n\n @skip_if_lt_x_gpu(2)\n def test_sparse_allreduce_basics_cuda(self):\n self._test_sparse_allreduce_basics(lambda t: t.clone().cuda())\n\n def test_scatter_checks(self):\n store = c10d.FileStore(self.file_name, self.world_size)\n pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())\n\n t1 = torch.zeros([1], dtype=torch.float32)\n t2 = torch.zeros([1], dtype=torch.float64)\n t3 = torch.zeros([2], dtype=torch.float32)\n\n with self.assertRaisesRegex(ValueError, \"invalid root rank\"):\n opts = c10d.ScatterOptions()\n opts.rootRank = -1\n pg.scatter([t1], [], opts)\n\n with self.assertRaisesRegex(ValueError, \"invalid root rank\"):\n opts = c10d.ScatterOptions()\n opts.rootRank = self.world_size\n pg.scatter([t1], [], opts)\n\n with self.assertRaisesRegex(\n ValueError, \"requires a single-element output tensor list\"\n ):\n opts = c10d.ScatterOptions()\n opts.rootRank = 0\n pg.scatter([], [], opts)\n\n with self.assertRaisesRegex(\n ValueError, \"requires a single-element output tensor list\"\n ):\n opts = c10d.ScatterOptions()\n opts.rootRank = 0\n pg.scatter([t1, t1], [], opts)\n\n with self.assertRaisesRegex(ValueError, \"requires a single-element input list\"):\n opts = c10d.ScatterOptions()\n opts.rootRank = self.rank\n pg.scatter([t1], [], opts)\n\n with self.assertRaisesRegex(ValueError, \"requires a single-element input list\"):\n opts = c10d.ScatterOptions()\n opts.rootRank = self.rank\n pg.scatter([t1], [[t1] * self.world_size, [t1] * self.world_size], opts)\n\n desired_list_size = self.world_size\n incorrect_list_size = self.world_size - 1\n err_str = \"Incorrect input list size {}. Input list size should be {}\"\n with self.assertRaisesRegex(\n ValueError, err_str.format(incorrect_list_size, desired_list_size)\n ):\n opts = c10d.ScatterOptions()\n opts.rootRank = self.rank\n pg.scatter([t1], [[t1] * incorrect_list_size], opts)\n\n incorrect_list_size = self.world_size + 1\n with self.assertRaisesRegex(\n ValueError, err_str.format(incorrect_list_size, desired_list_size)\n ):\n opts = c10d.ScatterOptions()\n opts.rootRank = self.rank\n pg.scatter([t1], [[t1] * incorrect_list_size], opts)\n\n with self.assertRaisesRegex(ValueError, \"invalid tensor type\"):\n opts = c10d.ScatterOptions()\n opts.rootRank = self.rank\n pg.scatter([t1], [[t2] * self.world_size], opts)\n\n with self.assertRaisesRegex(ValueError, \"invalid tensor size\"):\n opts = c10d.ScatterOptions()\n opts.rootRank = self.rank\n pg.scatter([t1], [[t3] * self.world_size], opts)\n\n with self.assertRaisesRegex(ValueError, \"requires empty input on non-root\"):\n opts = c10d.ScatterOptions()\n opts.rootRank = (self.rank + 1) % self.world_size\n pg.scatter([t1], [[t1] * self.world_size], opts)\n\n def _test_scatter_basics(self, fn):\n store = c10d.FileStore(self.file_name, self.world_size)\n pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())\n\n # Preallocate tensors for input/output\n input = [fn(torch.tensor([self.rank])) for _ in range(self.world_size)]\n outputs = [fn(torch.tensor([-1])) for _ in range(self.world_size)]\n\n # Take turns being the scatter root and accumulate work items\n futures = []\n for i in range(self.world_size):\n opts = c10d.ScatterOptions()\n opts.rootRank = i\n if i == self.rank:\n futures.append(pg.scatter([outputs[i]], [input], opts).get_future())\n else:\n futures.append(pg.scatter([outputs[i]], [], opts).get_future())\n\n # Wait for work to complete\n for i in range(self.world_size):\n futures[i].wait()\n result = futures[i].value()\n self.assertEqual(torch.tensor([i]), result[0])\n\n def test_scatter_basics(self):\n self._test_scatter_basics(lambda t: t.clone())\n\n @skip_if_lt_x_gpu(2)\n def test_scatter_basics_cuda(self):\n self._test_scatter_basics(lambda t: t.clone().cuda())\n\n def _test_scatter_stress(self, inputs, fn):\n store = c10d.FileStore(self.file_name, self.world_size)\n pg = c10d.ProcessGroupGloo(\n store, self.rank, self.world_size, self.opts(threads=8)\n )\n outputs = [\n [fn(torch.tensor([-1])) for _ in range(self.world_size)]\n for _ in range(len(inputs))\n ]\n future_handles = []\n for i in range(len(inputs)):\n for root in range(self.world_size):\n opts = c10d.ScatterOptions()\n opts.rootRank = root\n if root == self.rank:\n fut = pg.scatter(\n [outputs[i][root]], [[fn(e) for e in inputs[i]]], opts\n ).get_future()\n else:\n fut = pg.scatter([outputs[i][root]], [], opts).get_future()\n future_handles.append(fut)\n\n for i, future_handle in enumerate(future_handles):\n future_handle.wait()\n iter = i // self.world_size\n root = i % self.world_size\n result = future_handle.value()\n\n self.assertEqual(\n torch.tensor([iter + root]),\n result[0],\n msg=(\"Mismatch in iteration %d for rank %d\" % (iter, root)),\n )\n\n def test_scatter_stress(self):\n inputs = [\n [torch.tensor([i + self.rank]) for _ in range(self.world_size)]\n for i in range(1000)\n ]\n self._test_scatter_stress(inputs, lambda t: t.clone())\n\n @unittest.skip(\"Test is flaky, see https://github.com/pytorch/pytorch/issues/15963\")\n @skip_if_lt_x_gpu(2)\n def test_scatter_stress_cuda(self):\n inputs = [\n [torch.tensor([i + self.rank]) for _ in range(self.world_size)]\n for i in range(1000)\n ]\n self._test_scatter_stress(inputs, lambda t: t.clone().cuda())\n\n def test_gather_checks(self):\n store = c10d.FileStore(self.file_name, self.world_size)\n pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())\n\n t1 = torch.zeros([1], dtype=torch.float32)\n t2 = torch.zeros([1], dtype=torch.float64)\n t3 = torch.zeros([2], dtype=torch.float32)\n\n with self.assertRaisesRegex(ValueError, \"invalid root rank\"):\n opts = c10d.GatherOptions()\n opts.rootRank = -1\n pg.gather([], [t1], opts)\n\n with self.assertRaisesRegex(ValueError, \"invalid root rank\"):\n opts = c10d.GatherOptions()\n opts.rootRank = self.world_size\n pg.gather([], [t1], opts)\n\n with self.assertRaisesRegex(\n ValueError, \"requires a single-element input tensor list\"\n ):\n opts = c10d.GatherOptions()\n opts.rootRank = 0\n pg.gather([], [], opts)\n\n with self.assertRaisesRegex(\n ValueError, \"requires a single-element input tensor list\"\n ):\n opts = c10d.GatherOptions()\n opts.rootRank = 0\n pg.gather([], [t1, t1], opts)\n\n with self.assertRaisesRegex(\n ValueError, \"requires a single-element output list\"\n ):\n opts = c10d.GatherOptions()\n opts.rootRank = self.rank\n pg.gather([], [t1], opts)\n\n with self.assertRaisesRegex(\n ValueError, \"requires a single-element output list\"\n ):\n opts = c10d.GatherOptions()\n opts.rootRank = self.rank\n pg.gather([[t1] * self.world_size, [t1] * self.world_size], [t1], opts)\n\n desired_list_size = self.world_size\n incorrect_list_size = self.world_size - 1\n err_str = \"Incorrect output list size {}. Output list size should be {}\"\n with self.assertRaisesRegex(\n ValueError, err_str.format(incorrect_list_size, desired_list_size)\n ):\n opts = c10d.GatherOptions()\n opts.rootRank = self.rank\n pg.gather([[t1] * incorrect_list_size], [t1], opts)\n\n incorrect_list_size = self.world_size + 1\n with self.assertRaisesRegex(\n ValueError, err_str.format(incorrect_list_size, desired_list_size)\n ):\n opts = c10d.GatherOptions()\n opts.rootRank = self.rank\n pg.gather([[t1] * incorrect_list_size], [t1], opts)\n\n with self.assertRaisesRegex(ValueError, \"invalid tensor type\"):\n opts = c10d.GatherOptions()\n opts.rootRank = self.rank\n pg.gather([[t2] * self.world_size], [t1], opts)\n\n with self.assertRaisesRegex(ValueError, \"invalid tensor size\"):\n opts = c10d.GatherOptions()\n opts.rootRank = self.rank\n pg.gather([[t3] * self.world_size], [t1], opts)\n\n with self.assertRaisesRegex(ValueError, \"requires empty output on non-root\"):\n opts = c10d.GatherOptions()\n opts.rootRank = (self.rank + 1) % self.world_size\n pg.gather([[t1] * self.world_size], [t1], opts)\n\n def _test_gather_basics(self, fn):\n store = c10d.FileStore(self.file_name, self.world_size)\n pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())\n\n # Preallocate tensors for input/output\n input = [fn(torch.tensor([self.rank]))]\n outputs = [fn(torch.tensor([-1])) for _ in range(self.world_size)]\n\n # Take turns being the gather root and accumulate work items\n futures = []\n for i in range(self.world_size):\n opts = c10d.GatherOptions()\n opts.rootRank = i\n if i == self.rank:\n futures.append(pg.gather([outputs], input, opts).get_future())\n else:\n futures.append(pg.gather([], input, opts).get_future())\n\n # Wait for work to complete\n expected = [torch.tensor([rank]) for rank in range(self.world_size)]\n for i in range(self.world_size):\n futures[i].wait()\n result = futures[i].value()\n if i == self.rank:\n self.assertEqual(expected, result)\n\n def test_gather_basics(self):\n self._test_gather_basics(lambda t: t.clone())\n\n @skip_if_lt_x_gpu(2)\n def test_gather_basics_cuda(self):\n self._test_gather_basics(lambda t: t.clone().cuda())\n\n def _test_gather_stress(self, inputs, fn):\n store = c10d.FileStore(self.file_name, self.world_size)\n pg = c10d.ProcessGroupGloo(\n store, self.rank, self.world_size, self.opts(threads=8)\n )\n future_handles = []\n outputs = [\n [[fn(torch.tensor([-1])) for _ in range(self.world_size)]]\n for _ in range(len(inputs))\n ]\n expected_outputs = [\n [[torch.tensor([i + j]) for j in range(self.world_size)]]\n for i in range(len(inputs))\n ]\n for i in range(len(inputs)):\n for root in range(self.world_size):\n opts = c10d.GatherOptions()\n opts.rootRank = root\n if root == self.rank:\n fut = pg.gather(outputs[i], [fn(inputs[i])], opts).get_future()\n else:\n fut = pg.gather([], [fn(inputs[i])], opts).get_future()\n future_handles.append(fut)\n\n for i, future_handle in enumerate(future_handles):\n future_handle.wait()\n iter = i // self.world_size\n root = i % self.world_size\n if root == self.rank:\n result = future_handle.value()\n self.assertEqual(\n expected_outputs[iter],\n [result],\n msg=(\"Mismatch in iteration %d for root %d\" % (iter, root)),\n )\n\n def test_gather_stress(self):\n inputs = [torch.tensor([i + self.rank]) for i in range(1000)]\n self._test_gather_stress(inputs, lambda t: t.clone())\n\n @skip_if_lt_x_gpu(2)\n @skip_if_rocm\n def test_gather_stress_cuda(self):\n inputs = [torch.tensor([i + self.rank]).cuda() for i in range(1000)]\n self._test_gather_stress(inputs, lambda t: t.clone().cuda())\n\n def test_allgather_checks(self):\n store = c10d.FileStore(self.file_name, self.world_size)\n pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())\n\n t1 = torch.zeros([1], dtype=torch.float32)\n t2 = torch.zeros([1], dtype=torch.float64)\n t3 = torch.zeros([2], dtype=torch.float32)\n\n with self.assertRaisesRegex(ValueError, \"requires non-empty input tensor list\"):\n pg.allgather([], [])\n\n with self.assertRaisesRegex(\n ValueError, \"requires input/output tensor lists to have the same length\"\n ):\n pg.allgather([], [t1])\n\n with self.assertRaisesRegex(\n ValueError, \"requires input/output tensor lists to have the same length\"\n ):\n pg.allgather([[t1] * self.world_size, [t1] * self.world_size], [t1])\n\n with self.assertRaisesRegex(ValueError, \"invalid output tensor list\"):\n pg.allgather([[t1] * (self.world_size - 1)], [t1])\n\n with self.assertRaisesRegex(ValueError, \"invalid output tensor list\"):\n pg.allgather([[t1] * (self.world_size + 1)], [t1])\n\n with self.assertRaisesRegex(ValueError, \"invalid tensor type\"):\n pg.allgather(\n [[t1, t1] * (self.world_size), [t1, t1] * (self.world_size)], [t1, t2]\n )\n\n with self.assertRaisesRegex(ValueError, \"invalid tensor size\"):\n pg.allgather(\n [[t1, t1] * (self.world_size), [t1, t1] * (self.world_size)], [t1, t3]\n )\n\n with self.assertRaisesRegex(ValueError, \"invalid tensor type\"):\n pg.allgather([([t1, t2] * (self.world_size))[: self.world_size]], [t1])\n\n with self.assertRaisesRegex(ValueError, \"invalid tensor size\"):\n pg.allgather([([t1, t3] * (self.world_size))[: self.world_size]], [t1])\n\n def _test_allgather_basics(self, fn):\n store = c10d.FileStore(self.file_name, self.world_size)\n pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())\n\n # Run with N input tensor per rank\n for n in [1, 2, 3]:\n input = [fn(torch.tensor([n * self.rank + i])) for i in range(n)]\n output = [\n [fn(torch.tensor([-1])) for _ in range(n * self.world_size)]\n for _ in range(n)\n ]\n expected_output = [\n [torch.tensor([i]) for i in range(n * self.world_size)]\n for _ in range(n)\n ]\n fut = pg.allgather(output, input).get_future()\n fut.wait()\n result = fut.value()\n if n == 1:\n result = [result]\n self.assertEqual(expected_output, result)\n\n def test_allgather_basics(self):\n self._test_allgather_basics(lambda t: t.clone())\n\n @skip_if_lt_x_gpu(2)\n def test_allgather_basics_cuda(self):\n self._test_allgather_basics(lambda t: t.clone().cuda())\n\n def _test_allgather_stress(self, inputs, fn):\n store = c10d.FileStore(self.file_name, self.world_size)\n pg = c10d.ProcessGroupGloo(\n store, self.rank, self.world_size, self.opts(threads=8)\n )\n future_handles = []\n outputs = [\n [[fn(torch.tensor([-1])) for _ in range(self.world_size)]]\n for _ in range(len(inputs))\n ]\n expected_outputs = [\n [[torch.tensor([i + j]) for j in range(self.world_size)]]\n for i in range(len(inputs))\n ]\n for i in range(len(inputs)):\n fut = pg.allgather(outputs[i], [fn(inputs[i])]).get_future()\n future_handles.append(fut)\n\n for i, future_handle in enumerate(future_handles):\n future_handle.wait()\n result = future_handle.value()\n self.assertEqual(\n expected_outputs[i],\n [result],\n msg=(\"Mismatch in iteration %d\" % i),\n )\n\n def test_allgather_stress(self):\n inputs = [torch.tensor([i + self.rank]) for i in range(1000)]\n self._test_allgather_stress(inputs, lambda t: t.clone())\n\n @skip_if_lt_x_gpu(2)\n @skip_if_rocm\n def test_allgather_stress_cuda(self):\n inputs = [torch.tensor([i + self.rank]).cuda() for i in range(1000)]\n self._test_allgather_stress(inputs, lambda t: t.clone().cuda())\n\n def test_allgather_coalesced_checks(self):\n store = c10d.FileStore(self.file_name, self.world_size)\n pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())\n dummy_input = [torch.zeros([1], dtype=torch.float32)]\n dummy_output_lists = [\n [torch.zeros([1], dtype=torch.float32)] for _ in range(self.world_size)\n ]\n\n # One of output tensors does not match input list.\n dummy_output_lists[0] = [torch.zeros([0], dtype=torch.float32)]\n with self.assertRaisesRegex(\n ValueError, \"invalid size of output tensor at index 0\"\n ):\n c10d.all_gather_coalesced(dummy_output_lists, dummy_input, pg)\n\n # One of output tensors does not match input list.\n dummy_output_lists[0] = [torch.zeros([1], dtype=torch.float64)]\n with self.assertRaisesRegex(ValueError, \"invalid tensor type at index 0\"):\n c10d.all_gather_coalesced(dummy_output_lists, dummy_input, pg)\n\n # Output lists have too many elements\n dummy_output_lists = [\n [torch.zeros([1], dtype=torch.float32)] for _ in range(self.world_size + 1)\n ]\n with self.assertRaisesRegex(\n ValueError, \"output lists should be equal to world size\"\n ):\n c10d.all_gather_coalesced(dummy_output_lists, dummy_input, pg)\n\n # Output is not a list of lists.\n dummy_output_lists = [torch.zeros([0], dtype=torch.float32)]\n with self.assertRaisesRegex(\n RuntimeError, \"Invalid function argument.*output_tensor_lists\"\n ):\n c10d.all_gather_coalesced(dummy_output_lists, dummy_input, pg)\n\n def test_reduce_checks(self):\n store = c10d.FileStore(self.file_name, self.world_size)\n pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())\n\n t1 = torch.zeros([1], dtype=torch.float32)\n\n with self.assertRaisesRegex(ValueError, \"invalid root rank\"):\n opts = c10d.ReduceOptions()\n opts.rootRank = -1\n opts.rootTensor = 0\n pg.reduce([t1], opts)\n\n with self.assertRaisesRegex(ValueError, \"invalid root rank\"):\n opts = c10d.ReduceOptions()\n opts.rootRank = self.world_size\n opts.rootTensor = 0\n pg.reduce([t1], opts)\n\n with self.assertRaisesRegex(ValueError, \"invalid root tensor\"):\n opts = c10d.ReduceOptions()\n opts.rootRank = self.rank\n opts.rootTensor = 1\n pg.reduce([t1], opts)\n\n with self.assertRaisesRegex(\n ValueError, \"requires a single-element tensor list\"\n ):\n opts = c10d.ReduceOptions()\n opts.rootRank = self.rank\n opts.rootTensor = 0\n pg.reduce([t1, t1], opts)\n\n def _test_reduce_basics(self, fn):\n store = c10d.FileStore(self.file_name, self.world_size)\n pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())\n for (op, input, output) in simple_reduce_tests(self.rank, self.world_size):\n for root in range(self.world_size):\n opts = c10d.ReduceOptions()\n opts.reduceOp = op\n opts.rootRank = root\n tmp = fn(input)\n fut = pg.reduce([tmp], opts).get_future()\n fut.wait()\n result = fut.value()\n if root == self.rank:\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(output, result[0])\n\n def test_reduce_basics(self):\n self._test_reduce_basics(lambda t: t.clone())\n\n @skip_if_lt_x_gpu(2)\n def test_reduce_basics_cuda(self):\n self._test_reduce_basics(lambda t: t.clone().cuda())\n\n def _test_reduce_stress(self, inputs):\n store = c10d.FileStore(self.file_name, self.world_size)\n pg = c10d.ProcessGroupGloo(\n store, self.rank, self.world_size, self.opts(threads=8)\n )\n future_handles = []\n outputs = []\n for i in range(len(inputs)):\n for root in range(self.world_size):\n opts = c10d.ReduceOptions()\n opts.rootRank = root\n tmp = inputs[i].clone()\n outputs.append(tmp)\n fut = pg.reduce([tmp], opts).get_future()\n future_handles.append(fut)\n\n for i, future_handle in enumerate(future_handles):\n future_handle.wait()\n result = future_handle.value()\n iter = i // self.world_size\n root = i % self.world_size\n if root == self.rank:\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(\n torch.tensor(\n [\n (iter * self.world_size)\n + (self.world_size * (self.world_size - 1) / 2)\n ]\n ),\n result[0],\n msg=(\"Mismatch in iteration %d with root rank %d\" % (iter, root)),\n )\n\n def test_reduce_stress(self):\n inputs = [torch.tensor([i + self.rank]) for i in range(1000)]\n self._test_reduce_stress(inputs)\n\n @skip_if_lt_x_gpu(2)\n @skip_if_rocm\n def test_reduce_stress_cuda(self):\n inputs = [torch.tensor([i + self.rank]).cuda() for i in range(1000)]\n self._test_reduce_stress(inputs)\n\n def test_send_recv_all_to_all(self):\n store = c10d.FileStore(self.file_name, self.world_size)\n pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())\n\n # Preallocate tensors for input/output\n inputs = [torch.tensor([self.rank]) for _ in range(self.world_size)]\n outputs = [torch.tensor([-1]) for _ in range(self.world_size)]\n\n # Issue sends\n send_work = []\n for i in range(self.world_size):\n if i == self.rank:\n continue\n send_work.append(pg.send([inputs[i]], i, 0))\n\n # Issue recvs\n recv_work = []\n for i in range(self.world_size):\n if i == self.rank:\n continue\n recv_work.append(pg.recv([outputs[i]], i, 0))\n\n # Wait for sends to complete\n for work in send_work:\n work.wait()\n self.assertTrue(work.is_completed())\n\n # Wait for recvs to complete\n for work in recv_work:\n work.wait()\n self.assertTrue(work.is_completed())\n\n # Test that every output other than our own contains the respective rank\n for i in range(self.world_size):\n if i == self.rank:\n continue\n self.assertEqual(torch.tensor([i]), outputs[i])\n\n def test_barrier_implies_wait(self):\n store = c10d.FileStore(self.file_name, self.world_size)\n pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())\n\n # Kick off allreduce operations\n size = (100, 100)\n num = 16\n tensors = [torch.full(size, float(i)) for i in range(num)]\n for tensor in tensors:\n # Note: leak the returned work handle\n pg.allreduce(tensor)\n\n # Barrier should ensure all previous work has completed\n pg.barrier().get_future().wait()\n\n for i, tensor in enumerate(tensors):\n self.assertEqual(torch.full(size, float(i * self.world_size)), tensor)\n\n @skip_if_win32()\n def test_round_robin(self):\n num_process_groups = 2\n store = c10d.FileStore(self.file_name, self.world_size)\n pg = c10d._round_robin_process_groups(\n [\n c10d.ProcessGroupGloo(\n c10d.PrefixStore(str(i), store), self.rank, self.world_size, self.opts()\n )\n for i in range(num_process_groups)\n ]\n )\n\n # Run a few collectives so that we have called each process group\n for _ in range(num_process_groups + 1):\n tensor = torch.full([100, 100], float(self.rank))\n pg.broadcast(tensor, root=0).wait()\n self.assertEqual(torch.full([100, 100], 0.0), tensor)\n\n @skip_if_win32()\n def test_round_robin_create_destroy(self):\n store = c10d.FileStore(self.file_name, self.world_size)\n\n def create(num, prefix):\n return c10d._round_robin_process_groups(\n [\n c10d.ProcessGroupGloo(\n c10d.PrefixStore(\"%s/%d\" % (prefix, i), store),\n self.rank,\n self.world_size,\n self.opts()\n )\n for i in range(num)\n ]\n )\n\n # Run create/use/destroy twice\n for i in range(2):\n num_process_groups = 2\n pg = create(num=num_process_groups, prefix=i)\n for _ in range(3):\n tensor = torch.ones([10, 10])\n pg.allreduce(tensor).wait()\n self.assertEqual(torch.full([10, 10], float(self.world_size)), tensor)\n del pg\n\n\[email protected](\n TEST_WITH_TSAN,\n \"TSAN is not fork-safe since we're forking in a multi-threaded environment\",\n)\nclass DistributedDataParallelTest(test_c10d_common.AbstractDistributedDataParallelTest, MultiProcessTestCase):\n\n def setUp(self):\n super(DistributedDataParallelTest, self).setUp()\n if sys.platform == \"win32\":\n self._spawn_processes()\n else:\n self._fork_processes()\n\n def _test_gloo_backend(\n self, devices, device_ids, multi_device=False, gradient_as_bucket_view=False\n ):\n store = c10d.FileStore(self.file_name, self.world_size)\n options = c10d.ProcessGroupGloo._Options()\n options._devices = [create_device(interface=LOOPBACK)]\n process_group = c10d.ProcessGroupGloo(\n store, self.rank, self.world_size, options\n )\n self._test_ddp_with_process_group(\n process_group, devices, device_ids, multi_device, gradient_as_bucket_view\n )\n\n @requires_gloo()\n def test_gloo_backend_cpu_module(self):\n self._test_gloo_backend([torch.device(\"cpu\")], None)\n\n @requires_gloo()\n def test_gloo_backend_cpu_module_grad_is_view(self):\n self._test_gloo_backend([torch.device(\"cpu\")], None, gradient_as_bucket_view=True)\n\n @requires_gloo()\n @skip_if_lt_x_gpu(2)\n def test_gloo_backend_1gpu_module_device_ids_integer_list(self):\n int_devices = gpus_for_rank(self.world_size)[self.rank][:1]\n devices = [torch.device(\"cuda:\" + str(i)) for i in int_devices]\n self._test_gloo_backend(devices, int_devices)\n\n @requires_gloo()\n @skip_if_lt_x_gpu(2)\n def test_gloo_backend_1gpu_module_device_ids_torch_device_list(self):\n int_devices = gpus_for_rank(self.world_size)[self.rank][:1]\n devices = [torch.device(\"cuda:\" + str(i)) for i in int_devices]\n self._test_gloo_backend(devices, devices)\n\n @requires_gloo()\n @skip_if_lt_x_gpu(4)\n def test_gloo_backend_2gpu_module(self):\n int_devices = gpus_for_rank(self.world_size)[self.rank][:2]\n devices = [torch.device(\"cuda:\" + str(i)) for i in int_devices]\n self._test_gloo_backend(devices, None, multi_device=True)\n\n @requires_gloo()\n @skip_if_lt_x_gpu(8)\n def test_gloo_backend_4gpu_module(self):\n int_devices = gpus_for_rank(self.world_size)[self.rank][:4]\n devices = [torch.device(\"cuda:\" + str(i)) for i in int_devices]\n self._test_gloo_backend(devices, None, multi_device=True)\n\n def _test_global_local_unused_params_grad(self, gradient_as_bucket_view=False, static_graph=False):\n \"\"\"\n By simulating a multi-task training, this test is to make sure:\n 1) DDP does not touch the grad of globally unused parameters.\n 2) DDP does update the grad of locally unused parameters.\n \"\"\"\n\n class GlobalLocalUnusedParamModule(nn.Module):\n def __init__(self):\n super(GlobalLocalUnusedParamModule, self).__init__()\n self.t0 = Task()\n self.t1 = Task()\n self.task_unused = Task()\n\n def task_parameters(self):\n return (self.t0.p, self.t1.p, self.task_unused.p)\n\n def forward(self, x, rank):\n return self.t0(x) if rank == 0 else self.t1(x)\n\n def run_and_verify_grad(model):\n # Run forward\n output = model(8, self.rank)\n\n # The grads of all parameters should be None at this point.\n t0_p, t1_p, task_unused_p = model.module.task_parameters()\n self.assertIsNone(t0_p.grad)\n self.assertIsNone(t1_p.grad)\n self.assertIsNone(task_unused_p.grad)\n\n # Run backward\n output.mean().backward()\n\n # Now locally unused parameter should have grad updated on all ranks.\n # However the globally unused parameter should still have None grad.\n self.assertIsNotNone(t0_p.grad)\n self.assertIsNotNone(t1_p.grad)\n self.assertIsNone(task_unused_p.grad)\n\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)\n\n # Test on CPU\n cpu_model = DistributedDataParallel(\n GlobalLocalUnusedParamModule().cpu(),\n process_group=process_group,\n find_unused_parameters=True,\n gradient_as_bucket_view=gradient_as_bucket_view,\n )\n if static_graph:\n cpu_model._set_static_graph()\n run_and_verify_grad(cpu_model)\n\n # Test on GPU\n device_id = gpus_for_rank(self.world_size)[self.rank][0]\n gpu_model = DistributedDataParallel(\n GlobalLocalUnusedParamModule().to(device_id),\n device_ids=[device_id],\n process_group=process_group,\n find_unused_parameters=True,\n gradient_as_bucket_view=gradient_as_bucket_view,\n )\n if static_graph:\n gpu_model._set_static_graph()\n run_and_verify_grad(gpu_model)\n\n @requires_gloo()\n @skip_if_lt_x_gpu(2)\n def test_global_local_unused_params_grad(self):\n self._test_global_local_unused_params_grad()\n\n @requires_gloo()\n @skip_if_lt_x_gpu(2)\n def test_global_local_unused_params_grad_with_grad_is_view(self):\n self._test_global_local_unused_params_grad(gradient_as_bucket_view=True)\n\n @requires_gloo()\n @skip_if_lt_x_gpu(2)\n def test_global_local_unused_params_grad_with_static_graph(self):\n self._test_global_local_unused_params_grad(static_graph=True)\n\n @requires_gloo()\n @skip_if_lt_x_gpu(2)\n def test_find_unused_parameters_when_unused_parameters_empty(self):\n \"\"\"\n An empty unused_parameters array does not imply find_unused_parameters =\n false. This test makes sure that DDP allreduces unused parameters\n accordingly where the forward pass in some process uses all parameters.\n This unit test creates a module that uses all parameters in rank = 0, and\n has unused parameters in other ranks.\n \"\"\"\n\n class FindUnusedParamModule(nn.Module):\n def __init__(self):\n super(FindUnusedParamModule, self).__init__()\n self.t0 = Task()\n self.t1 = Task()\n\n def task_parameters(self):\n return (self.t0.p, self.t1.p)\n\n def forward(self, x, rank):\n return self.t1(self.t0(x)) if rank == 0 else self.t1(x)\n\n def run_and_verify_grad(model):\n # Run forward\n output = model(8, self.rank)\n\n # The grads of all parameters should be None at this point.\n [self.assertIsNone(t_p.grad) for t_p in model.module.task_parameters()]\n\n # Run backward\n output.mean().backward()\n\n # Now locally unused parameter should have grad updated on all ranks.\n [self.assertIsNotNone(t_p.grad) for t_p in model.module.task_parameters()]\n\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)\n\n # Test on CPU\n cpu_model = DistributedDataParallel(\n FindUnusedParamModule().cpu(),\n process_group=process_group,\n find_unused_parameters=True,\n )\n run_and_verify_grad(cpu_model)\n\n # Test on GPU\n device_id = gpus_for_rank(self.world_size)[self.rank][0]\n gpu_model = DistributedDataParallel(\n FindUnusedParamModule().to(device_id),\n device_ids=[device_id],\n process_group=process_group,\n find_unused_parameters=True,\n )\n run_and_verify_grad(gpu_model)\n\n @requires_gloo()\n def test_ignored_output(self):\n \"\"\"\n Test that the output of a model can be ignored and that there is no\n implicit requirement that `backward` gets called.\n \"\"\"\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)\n\n class IgnoredOutput(nn.Module):\n def __init__(self):\n super(IgnoredOutput, self).__init__()\n self.fc1 = nn.Linear(2, 10, bias=False)\n self.fc2 = nn.Linear(10, 4, bias=False)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n x = self.relu(self.fc1(x))\n x = self.relu(self.fc2(x))\n return F.softmax(x, dim=1)\n\n model = DistributedDataParallel(\n IgnoredOutput().float(),\n process_group=process_group,\n )\n\n batch_size = 4\n criterion = nn.CrossEntropyLoss()\n input = torch.rand([batch_size, 2], dtype=torch.float)\n target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])\n\n # Run a few iterations where we ignore the output.\n for _ in range(4):\n output = model(input)\n del output\n\n # Run a few iterations where we use the output.\n for _ in range(4):\n output = model(input)\n loss = criterion(output, target)\n loss.backward()\n\n @requires_gloo()\n def test_ignored_output_with_unused_parameters(self):\n \"\"\"\n Test that the output of a model can be ignored and that there is no\n implicit requirement that `backward` gets called, if not all model\n parameters participated in computing the model output.\n \"\"\"\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)\n\n class IgnoredOutputWithUnusedParameters(nn.Module):\n def __init__(self):\n super(IgnoredOutputWithUnusedParameters, self).__init__()\n self.fc1 = nn.Linear(2, 10, bias=False)\n self.fc2 = nn.Linear(10, 4, bias=False)\n self.fc3 = nn.Linear(4, 4, bias=False)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n x = self.relu(self.fc1(x))\n x = self.relu(self.fc2(x))\n return F.softmax(x, dim=1)\n\n model = DistributedDataParallel(\n IgnoredOutputWithUnusedParameters().float(),\n process_group=process_group,\n find_unused_parameters=True,\n )\n\n batch_size = 4\n criterion = nn.CrossEntropyLoss()\n input = torch.rand([batch_size, 2], dtype=torch.float)\n target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])\n\n # Run a few iterations where we ignore the output.\n for _ in range(4):\n output = model(input)\n del output\n\n # Run a few iterations where we use the output.\n for _ in range(4):\n output = model(input)\n loss = criterion(output, target)\n loss.backward()\n\n def _run_and_verify_sparse_gradients(self, vanilla_model, ddp_model):\n mult = 2\n batch_size = mult * self.world_size\n criterion = nn.CrossEntropyLoss()\n input = torch.randint(0, 10, [batch_size, 2])\n target = torch.randint(0, 10, [batch_size])\n\n # Run with entire batch against single process version\n criterion(vanilla_model(input), target).backward()\n\n # Run with partial batch against multi process version\n partial_input = input.split(mult)[self.rank]\n partial_target = target.split(mult)[self.rank]\n criterion(ddp_model(partial_input), partial_target).backward()\n\n # Check that the gradients are sparse and identical\n vanilla_parameter = next(vanilla_model.parameters())\n ddp_parameter = next(ddp_model.parameters())\n self.assertEqual(vanilla_parameter.grad, ddp_parameter.grad)\n\n @requires_gloo()\n @skip_if_lt_x_gpu(2)\n def test_save_load_checkpoint(self):\n dist.init_process_group(\n \"gloo\",\n init_method=f\"file://{self.file_name}\",\n world_size=self.world_size,\n rank=self.rank,\n )\n\n class TestModel(nn.Module):\n def __init__(self):\n super(TestModel, self).__init__()\n self.fc1 = nn.Linear(2, 10, bias=False)\n self.fc2 = nn.Linear(10, 4, bias=False)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n x = self.relu(self.fc1(x))\n x = self.relu(self.fc2(x))\n return F.softmax(x, dim=1)\n\n def train_loop(model, optimizer, iterations):\n for _ in range(iterations):\n optimizer.zero_grad()\n output = model(input)\n loss = criterion(output, target)\n loss.backward()\n optimizer.step()\n\n device_id = gpus_for_rank(self.world_size)[self.rank][0]\n\n model_withload = TestModel().float().to(device_id)\n model_withoutload = TestModel().float().to(device_id)\n\n ddp_withload = DistributedDataParallel(\n model_withload,\n device_ids=[device_id],\n )\n ddp_withoutload = DistributedDataParallel(\n model_withoutload,\n device_ids=[device_id],\n )\n\n # ensure that all the three models start with the same set of parameters. By default they are randomized on construction\n for p in ddp_withload.parameters():\n with torch.no_grad():\n p.zero_()\n for p in model_withload.parameters():\n with torch.no_grad():\n p.zero_()\n for p in ddp_withoutload.parameters():\n with torch.no_grad():\n p.zero_()\n\n batch_size = 4\n criterion = nn.CrossEntropyLoss()\n\n optimizer_withload = torch.optim.SGD(ddp_withload.parameters(), lr=0.001)\n optimizer_non_ddp_withload = torch.optim.SGD(model_withload.parameters(), lr=0.001)\n optimizer_withoutload = torch.optim.SGD(ddp_withoutload.parameters(), lr=0.001)\n\n input = torch.rand([batch_size, 2], dtype=torch.float).to(device_id)\n target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(\n device_id\n )\n\n # run the model for 6 iterations, with a checkpoint in the middle\n train_loop(ddp_withload, optimizer_withload, 3)\n\n # zero out parameters of both DDP and non-DDP models and reload them from the DDP state dict\n checkpoint_path = tempfile.gettempdir() + \"/model.checkpoint\"\n if self.rank == 0:\n torch.save(ddp_withload.state_dict(), checkpoint_path)\n\n dist.barrier()\n map_location = {\"cuda:%d\" % 0: \"cuda:%d\" % self.rank}\n ddp_state_dict = torch.load(checkpoint_path, map_location=map_location)\n\n for model in [ddp_withload, model_withload]:\n for p in ddp_withload.parameters():\n with torch.no_grad():\n p.zero_()\n ddp_withload.load_state_dict(ddp_state_dict)\n # the non-DDP model needs to first remove the prefix of \"module.\" from the DDP state dict\n torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(ddp_state_dict, \"module.\")\n model_withload.load_state_dict(ddp_state_dict)\n\n train_loop(ddp_withload, optimizer_withload, 3)\n train_loop(model_withload, optimizer_non_ddp_withload, 3)\n\n # re-run the model with the same inputs for 6 iterations with no checkpoint\n train_loop(ddp_withoutload, optimizer_withoutload, 6)\n\n for p_withload, p_withoutload, p_non_ddp_withload in zip(\n ddp_withload.parameters(), ddp_withoutload.parameters(), model_withload.parameters()\n ):\n self.assertEqual(p_withload, p_withoutload)\n self.assertEqual(p_non_ddp_withload, p_withoutload)\n\n def _test_sparse_gradients(self, gradient_as_bucket_view=False):\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)\n\n # Ensure initialized weights and inputs are identical across processes\n torch.manual_seed(1337)\n\n vanilla_model = SparseGradientModule()\n ddp_model = DistributedDataParallel(\n copy.deepcopy(vanilla_model),\n process_group=process_group,\n gradient_as_bucket_view=gradient_as_bucket_view,\n )\n\n self._run_and_verify_sparse_gradients(vanilla_model, ddp_model)\n\n @requires_gloo()\n def test_sparse_gradients(self):\n self._test_sparse_gradients()\n\n @requires_gloo()\n def test_sparse_gradients_grad_is_view(self):\n self._test_sparse_gradients(gradient_as_bucket_view=True)\n\n @requires_gloo()\n def test_ddp_comm_hook_future_passing_cpu(self):\n \"\"\"\n This unit test verifies whether the Future object is passed properly.\n The callback function creates a Future object and sets a value to it.\n \"\"\"\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)\n\n # Test on CPU\n cpu_model = DistributedDataParallel(\n ModuleForDdpCommHook().cpu(), process_group=process_group\n )\n\n # Register DDP Communication Hook\n cpu_model.register_comm_hook(None, self._simple_hook)\n\n # check whether the grads are equal to what then callback returns.\n # without the comm_hook, result would be 0.25 * torch.ones(2, 2).\n self._run_and_verify_hook(cpu_model, 8, 2 * torch.ones(2, 2))\n\n def _gpu_model_with_ddp_comm_hook(\n self, process_group, hook=None, gradient_as_bucket_view=False, state=None\n ):\n device_id = gpus_for_rank(self.world_size)[self.rank][0]\n gpu_model = DistributedDataParallel(\n ModuleForDdpCommHook().to(device_id),\n device_ids=[device_id],\n process_group=process_group,\n gradient_as_bucket_view=gradient_as_bucket_view,\n )\n\n # Register a DDP communication hook if any.\n if hook is not None:\n gpu_model.register_comm_hook(state, hook)\n\n return gpu_model\n\n @requires_gloo()\n @skip_if_lt_x_gpu(2)\n def test_ddp_comm_hook_future_passing_gpu_gloo(self):\n \"\"\"\n This unit test verifies whether the Future object is passed properly using gloo backend.\n The hook callback function creates a Future object and sets a value to it.\n \"\"\"\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)\n\n # Get GPU model with simple_hook registered.\n gpu_model = self._gpu_model_with_ddp_comm_hook(process_group, self._simple_hook)\n\n # check whether the grads are equal to what simple_hook's then callback returns.\n # without the comm_hook, result would be 0.25 * torch.ones(2, 2).\n self._run_and_verify_hook(gpu_model, 8, 2 * torch.ones(2, 2))\n\n @requires_gloo()\n def test_ddp_invalid_comm_hook_init(self):\n \"\"\"\n This unit test makes sure that register_comm_hook properly checks the format\n of hook defined by user. The Python hook must be callable. This test also\n checks whether bucket annotation checked properly if defined.\n \"\"\"\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)\n\n model = DistributedDataParallel(\n ModuleForDdpCommHook(), process_group=process_group\n )\n\n with self.assertRaisesRegex(TypeError, \"Communication hook must be callable.\"):\n model.register_comm_hook(state=None, hook=1)\n\n with self.assertRaisesRegex(\n ValueError, \"bucket annotation should be dist.GradBucket.\"\n ):\n def comm_hook(state: object, bucket: int) -> torch.futures.Future:\n return torch.futures.Future()\n\n model.register_comm_hook(state=None, hook=comm_hook)\n\n @requires_gloo()\n def test_ddp_invalid_comm_hook_return_type(self):\n \"\"\"\n This test checks whether return annotation checked properly if defined. It also\n checks whether an internal error is thrown if return type is incorrect and user\n hasn't specified any return type annotation.\n \"\"\"\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)\n\n model = DistributedDataParallel(\n ModuleForDdpCommHook(), process_group=process_group\n )\n\n expected_err = \"Communication hook: return annotation should be torch.futures.Future or torch._C.Future.\"\n with self.assertRaisesRegex(\n ValueError,\n expected_err,\n ):\n def comm_hook(state: object, bucket: dist.GradBucket) -> int:\n return torch.futures.Future()\n\n model.register_comm_hook(state=None, hook=comm_hook)\n\n verify_ddp_error_logged(model, expected_err)\n\n with self.assertRaisesRegex(\n RuntimeError,\n \"callback must return a torch.futures.Future or torch._C.Future object, but got\",\n ):\n def comm_hook(state: object, bucket: dist.GradBucket):\n return 1\n\n model.register_comm_hook(state=None, hook=comm_hook)\n\n # Run forward\n output = model(8, self.rank)\n\n # Run backward\n output.mean().backward()\n\n @requires_gloo()\n def test_ddp_comm_hook_register_just_once(self):\n \"\"\"\n DDP communication hook can only be registered once. This test validates whether\n the error is thrown properly when register_comm_hook is called more than once.\n \"\"\"\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)\n\n model = DistributedDataParallel(\n ModuleForDdpCommHook(), process_group=process_group\n )\n\n def dummy_hook(state, bucket):\n fut = torch.futures.Future()\n fut.set_result([bucket.get_tensor()])\n return fut\n\n model.register_comm_hook(None, dummy_hook)\n\n with self.assertRaisesRegex(\n RuntimeError,\n \"register_comm_hook or register_builtin_comm_hook can only be called once.\",\n ):\n model.register_comm_hook(None, dummy_hook)\n\n @requires_gloo()\n def test_ddp_comm_hook_sparse_gradients(self):\n \"\"\"\n Runs \"test_sparse_gradients\" unit test with DDP communication hook. We define a\n simple hook that does allreduce and works with gloo backend for this test.\n \"\"\"\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)\n\n # Ensure initialized weights and inputs are identical across processes\n torch.manual_seed(1337)\n\n vanilla_model = SparseGradientModule()\n ddp_model = DistributedDataParallel(\n copy.deepcopy(vanilla_model),\n process_group=process_group,\n )\n\n def allreduce_hook_gloo(\n state: object, bucket: dist.GradBucket\n ) -> torch.futures.Future:\n def div_by_world_size(fut):\n # Divide the result by 2 * world_size.\n return [t / self.world_size for t in fut.wait()]\n\n # Prepare allreduced grad bucket tensors by running an async work.\n fut = process_group.allreduce([bucket.get_tensor()]).get_future()\n return fut.then(div_by_world_size)\n\n ddp_model.register_comm_hook(None, allreduce_hook_gloo)\n\n self._run_and_verify_sparse_gradients(vanilla_model, ddp_model)\n\n\nclass ReducerModule(nn.Module):\n def __init__(self):\n super(ReducerModule, self).__init__()\n self.fc1 = nn.Linear(2, 10, bias=False)\n self.fc2 = nn.Linear(10, 4, bias=False)\n self.fc3 = nn.Linear(4, 4, bias=False)\n self.relu = nn.ReLU()\n\n def forward(self, x, use_fc3=True):\n x = self.relu(self.fc1(x)).float()\n x = self.relu(self.fc2(x)).float()\n if use_fc3:\n x = self.fc3(x).float()\n return F.softmax(x, dim=1)\n\n\n@requires_gloo()\nclass ReducerTest(TestCase):\n def setUp(self):\n self.file = tempfile.NamedTemporaryFile(delete=False)\n self.store = c10d.FileStore(self.file.name, 1)\n self.process_group = c10d.ProcessGroupGloo(self.store, 0, 1)\n\n def test_single_dtype_single_bucket(self):\n model = ReducerModule()\n parameters = list(model.parameters())\n buckets = [list(range(len(parameters)))]\n dist.Reducer([parameters], buckets, self.process_group)\n\n def _create_mixed_precision_model(self):\n model = ReducerModule()\n model.float()\n model.fc1.double()\n return model\n\n def test_multi_dtype_single_bucket(self):\n model = self._create_mixed_precision_model()\n\n # Raise if there are multiple types per bucket.\n # In this case we create one bucket for all parameters.\n with self.assertRaises(RuntimeError):\n parameters = [list(model.parameters())]\n buckets = [list(range(len(parameters[0])))]\n dist.Reducer(parameters, buckets, self.process_group)\n\n def test_multi_dtype_multi_bucket(self):\n model = self._create_mixed_precision_model()\n parameters = [list(model.parameters())]\n group_by_dtype = groupby(\n range(len(parameters[0])), key=lambda i: parameters[0][i].dtype\n )\n buckets = [list(indices) for _, indices in group_by_dtype]\n dist.Reducer(parameters, buckets, self.process_group)\n\n def _create_reducer_for_models(self, models, find_unused_parameters=False):\n parameters = [list(model.parameters()) for model in models]\n group_by_dtype = groupby(\n range(len(parameters[0])), key=lambda i: parameters[0][i].dtype\n )\n buckets = [list(indices) for _, indices in group_by_dtype]\n return dist.Reducer(\n parameters,\n buckets,\n self.process_group,\n find_unused_parameters=find_unused_parameters,\n )\n\n def test_reducer_no_multi_replicas(self):\n num_replicas = 2\n models = [self._create_mixed_precision_model() for _ in range(num_replicas)]\n with self.assertRaisesRegex(\n RuntimeError,\n \"Expected exactly one model replica.\",\n ):\n reducer = self._create_reducer_for_models(models)\n\n def test_forward_backward(self):\n batch_size = 10\n model = self._create_mixed_precision_model()\n reducer = self._create_reducer_for_models([model])\n reducer.prepare_for_forward()\n loss = nn.CrossEntropyLoss()\n input = torch.rand([batch_size, 2], dtype=torch.double)\n target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])\n output = loss(model(input), target)\n reducer.prepare_for_backward(output)\n output.backward()\n\n def test_forward_backward_unused_parameters(self):\n batch_size = 10\n model = self._create_mixed_precision_model()\n reducer = self._create_reducer_for_models([model], find_unused_parameters=True)\n reducer.prepare_for_forward()\n loss = nn.CrossEntropyLoss()\n input = torch.rand([batch_size, 2], dtype=torch.double)\n target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])\n output = loss(model(input, use_fc3=False), target)\n\n # Check that the grad of fc3 is not set.\n self.assertEqual(None, model.fc3.weight.grad)\n\n # Compute and accumulate gradients.\n reducer.prepare_for_backward(output)\n output.backward()\n\n # The reducer will have marked the grad of fc3 as ready, because\n # it doesn't show up in the autograd graph of `output`. Since fc3.weight\n # is considered being globally unused, it will be kept untouched as None.\n self.assertEqual(None, model.fc3.weight.grad)\n\n def test_forward_backward_optimizer(self):\n batch_size = 10\n model = self._create_mixed_precision_model()\n reducer = self._create_reducer_for_models([model], find_unused_parameters=True)\n reducer.prepare_for_forward()\n loss = nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(model.parameters())\n for i in range(3):\n input = torch.rand([batch_size, 2], dtype=torch.double)\n target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])\n\n # The `zero_grad` function calls `detach_` and `zero_` on the grad\n # tensors of model parameters. If we tried to set the grad tensors\n # to a view of the reducer's bucket tensors, this would blow up.\n optimizer.zero_grad()\n\n # Unused parameter only in the first iteration.\n output = loss(model(input, use_fc3=(i > 0)), target)\n reducer.prepare_for_backward(output)\n output.backward()\n optimizer.step()\n\n\[email protected](\n TEST_WITH_TSAN,\n \"TSAN is not fork-safe since we're forking in a multi-threaded environment\",\n)\nclass CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):\n\n def setUp(self):\n super(CommTest, self).setUp()\n if sys.platform == \"win32\":\n self._spawn_processes()\n else:\n self._fork_processes()\n\n def tearDown(self):\n super(CommTest, self).tearDown()\n try:\n os.remove(self.file_name)\n except OSError:\n pass\n\n def _test_broadcast_coalesced(self, process_group, device, root_rank):\n half = torch.float16\n\n # No support for float16 for CPU tensors\n if device == torch.device(\"cpu\"):\n half = torch.float32\n\n target = torch.arange(60, dtype=half, device=device).chunk(5)\n target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)\n target += torch.arange(60, dtype=half, device=device).chunk(5)\n target += torch.arange(60, dtype=torch.float64, device=device).chunk(5)\n target += torch.arange(60, dtype=half, device=device).chunk(5)\n target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)\n\n # The tensors to pass to broadcast are idential to the target\n # only on the process that is the root of the broadcast.\n if self.rank == root_rank:\n tensors = list(tensor.clone() for tensor in target)\n else:\n tensors = list(torch.zeros_like(tensor) for tensor in target)\n\n if self.rank != root_rank:\n self.assertNotEqual(tensors, target)\n\n c10d._broadcast_coalesced(\n process_group, tensors, buffer_size=256, src=root_rank\n )\n\n if self.rank != root_rank:\n self.assertEqual(tensors, target)\n\n @requires_gloo()\n @skip_if_lt_x_gpu(2)\n def test_broadcast_coalesced_gloo_cuda(self):\n store = c10d.FileStore(self.file_name, self.world_size)\n options = c10d.ProcessGroupGloo._Options()\n options._devices = [create_device(interface=LOOPBACK)]\n process_group = c10d.ProcessGroupGloo(\n store, self.rank, self.world_size, options\n )\n device = torch.device(\"cuda:%d\" % self.rank)\n ranks = list(range(self.world_size))\n for root_rank in ranks:\n self._test_broadcast_coalesced(process_group, device, root_rank)\n\n @requires_gloo()\n def test_broadcast_coalesced_gloo_cpu(self):\n store = c10d.FileStore(self.file_name, self.world_size)\n options = c10d.ProcessGroupGloo._Options()\n options._devices = [create_device(interface=LOOPBACK)]\n process_group = c10d.ProcessGroupGloo(\n store, self.rank, self.world_size, options\n )\n device = torch.device(\"cpu\")\n ranks = list(range(self.world_size))\n for root_rank in ranks:\n self._test_broadcast_coalesced(process_group, device, root_rank)\n\n @requires_gloo()\n @skip_if_lt_x_gpu(2)\n def test_sequence_num_set_default_pg_gloo(self):\n self._test_sequence_num_set_default_pg(backend=\"gloo\")\n\n @requires_gloo()\n @skip_if_lt_x_gpu(2)\n def test_sequence_num_set_gloo_new_group(self):\n self._test_sequence_num_set_new_group(backend=\"gloo\")\n\n @skip_if_lt_x_gpu(2)\n @requires_gloo()\n def test_sequence_num_incremented_gloo_default(self):\n self._test_sequence_num_incremented_default_group(\"gloo\")\n\n @skip_if_lt_x_gpu(4)\n @requires_gloo()\n def test_sequence_num_incremented_gloo_subgroup(self):\n if self.world_size < 4:\n return unittest.skip(\"Test requires world_size of at least 4\")\n self._test_sequence_num_incremented_subgroup(\"gloo\")\n\n @requires_gloo()\n def test_gloo_barrier_device_ids(self):\n store = c10d.FileStore(self.file_name, self.world_size)\n c10d.init_process_group(\n backend=\"gloo\", rank=self.rank, world_size=self.world_size, store=store\n )\n\n with self.assertRaisesRegex(RuntimeError, \"device_ids not supported\"):\n c10d.barrier(device_ids=[self.rank])\n\n\nif __name__ == \"__main__\":\n assert (\n not torch.cuda._initialized\n ), \"test_distributed must not have initialized CUDA context on main process\"\n\n run_tests()\n"
] |
[
[
"torch.nn.functional.softmax",
"torch.randint",
"torch.testing._internal.common_utils.find_free_port",
"torch.zeros",
"torch.load",
"torch.testing._internal.common_distributed.verify_ddp_error_logged",
"torch.distributed.PrefixStore",
"torch.sparse_coo_tensor",
"torch.FloatTensor",
"torch.no_grad",
"torch.device",
"torch.distributed._broadcast_coalesced",
"torch.testing._internal.common_distributed.requires_gloo",
"torch.nn.CrossEntropyLoss",
"torch.ones",
"torch.distributed.init_process_group",
"torch.distributed.ReduceOptions",
"torch.futures.Future",
"torch.distributed.ProcessGroupGloo._Options",
"torch.tensor",
"torch.distributed.FileStore",
"torch.distributed.barrier",
"torch.distributed.BroadcastOptions",
"torch.testing._internal.common_distributed.simple_sparse_reduce_tests",
"torch.distributed.ScatterOptions",
"torch.rand",
"torch.nn.modules.utils.consume_prefix_in_state_dict_if_present",
"torch.arange",
"torch.testing._internal.common_distributed.skip_if_lt_x_gpu",
"torch.distributed.AllreduceOptions",
"torch.distributed.all_gather_coalesced",
"torch.full",
"torch.testing._internal.common_distributed.create_device",
"torch.zeros_like",
"torch.nn.Linear",
"torch.distributed.is_available",
"torch.distributed.AllreduceCoalescedOptions",
"torch.distributed.destroy_process_group",
"torch.testing._internal.common_utils.run_tests",
"torch.nn.parallel.DistributedDataParallel",
"torch.testing._internal.common_distributed.skip_if_win32",
"torch.manual_seed",
"torch.distributed.ProcessGroupGloo",
"torch.nn.ReLU",
"torch.distributed.Reducer",
"torch.distributed.GatherOptions"
]
] |
zhangyingying94/models
|
[
"bf46247b4207698bbeb315d9086eb81662015359"
] |
[
"adversarial_text/data/data_utils.py"
] |
[
"# Copyright 2017 Google, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Utilities for generating/preprocessing data for adversarial text models.\"\"\"\n\nimport operator\nimport os\nimport random\nimport re\nimport tensorflow as tf\n\nEOS_TOKEN = '</s>'\n\n# Data filenames\n# Sequence Autoencoder\nALL_SA = 'all_sa.tfrecords'\nTRAIN_SA = 'train_sa.tfrecords'\nTEST_SA = 'test_sa.tfrecords'\n# Language Model\nALL_LM = 'all_lm.tfrecords'\nTRAIN_LM = 'train_lm.tfrecords'\nTEST_LM = 'test_lm.tfrecords'\n# Classification\nTRAIN_CLASS = 'train_classification.tfrecords'\nTEST_CLASS = 'test_classification.tfrecords'\nVALID_CLASS = 'validate_classification.tfrecords'\n# LM with bidirectional LSTM\nTRAIN_REV_LM = 'train_reverse_lm.tfrecords'\nTEST_REV_LM = 'test_reverse_lm.tfrecords'\n# Classification with bidirectional LSTM\nTRAIN_BD_CLASS = 'train_bidir_classification.tfrecords'\nTEST_BD_CLASS = 'test_bidir_classification.tfrecords'\nVALID_BD_CLASS = 'validate_bidir_classification.tfrecords'\n\n\nclass ShufflingTFRecordWriter(object):\n \"\"\"Thin wrapper around TFRecordWriter that shuffles records.\"\"\"\n\n def __init__(self, path):\n self._path = path\n self._records = []\n self._closed = False\n\n def write(self, record):\n assert not self._closed\n self._records.append(record)\n\n def close(self):\n assert not self._closed\n random.shuffle(self._records)\n with tf.python_io.TFRecordWriter(self._path) as f:\n for record in self._records:\n f.write(record)\n self._closed = True\n\n def __enter__(self):\n return self\n\n def __exit__(self, unused_type, unused_value, unused_traceback):\n self.close()\n\n\nclass Timestep(object):\n \"\"\"Represents a single timestep in a SequenceWrapper.\"\"\"\n\n def __init__(self, token, label, weight, multivalent_tokens=False):\n \"\"\"Constructs Timestep from empty Features.\"\"\"\n self._token = token\n self._label = label\n self._weight = weight\n self._multivalent_tokens = multivalent_tokens\n self._fill_with_defaults()\n\n @property\n def token(self):\n if self._multivalent_tokens:\n raise TypeError('Timestep may contain multiple values; use `tokens`')\n return self._token.int64_list.value[0]\n\n @property\n def tokens(self):\n return self._token.int64_list.value\n\n @property\n def label(self):\n return self._label.int64_list.value[0]\n\n @property\n def weight(self):\n return self._weight.float_list.value[0]\n\n def set_token(self, token):\n if self._multivalent_tokens:\n raise TypeError('Timestep may contain multiple values; use `add_token`')\n self._token.int64_list.value[0] = token\n return self\n\n def add_token(self, token):\n self._token.int64_list.value.append(token)\n return self\n\n def set_label(self, label):\n self._label.int64_list.value[0] = label\n return self\n\n def set_weight(self, weight):\n self._weight.float_list.value[0] = weight\n return self\n\n def copy_from(self, timestep):\n self.set_token(timestep.token).set_label(timestep.label).set_weight(\n timestep.weight)\n return self\n\n def _fill_with_defaults(self):\n if not self._multivalent_tokens:\n self._token.int64_list.value.append(0)\n self._label.int64_list.value.append(0)\n self._weight.float_list.value.append(0.0)\n\n\nclass SequenceWrapper(object):\n \"\"\"Wrapper around tf.SequenceExample.\"\"\"\n\n F_TOKEN_ID = 'token_id'\n F_LABEL = 'label'\n F_WEIGHT = 'weight'\n\n def __init__(self, multivalent_tokens=False):\n self._seq = tf.train.SequenceExample()\n self._flist = self._seq.feature_lists.feature_list\n self._timesteps = []\n self._multivalent_tokens = multivalent_tokens\n\n @property\n def seq(self):\n return self._seq\n\n @property\n def multivalent_tokens(self):\n return self._multivalent_tokens\n\n @property\n def _tokens(self):\n return self._flist[SequenceWrapper.F_TOKEN_ID].feature\n\n @property\n def _labels(self):\n return self._flist[SequenceWrapper.F_LABEL].feature\n\n @property\n def _weights(self):\n return self._flist[SequenceWrapper.F_WEIGHT].feature\n\n def add_timestep(self):\n timestep = Timestep(\n self._tokens.add(),\n self._labels.add(),\n self._weights.add(),\n multivalent_tokens=self._multivalent_tokens)\n self._timesteps.append(timestep)\n return timestep\n\n def __iter__(self):\n for timestep in self._timesteps:\n yield timestep\n\n def __len__(self):\n return len(self._timesteps)\n\n def __getitem__(self, idx):\n return self._timesteps[idx]\n\n\ndef build_reverse_sequence(seq):\n \"\"\"Builds a sequence that is the reverse of the input sequence.\"\"\"\n reverse_seq = SequenceWrapper()\n\n # Copy all but last timestep\n for timestep in reversed(seq[:-1]):\n reverse_seq.add_timestep().copy_from(timestep)\n\n # Copy final timestep\n reverse_seq.add_timestep().copy_from(seq[-1])\n\n return reverse_seq\n\n\ndef build_bidirectional_seq(seq, rev_seq):\n bidir_seq = SequenceWrapper(multivalent_tokens=True)\n for forward_ts, reverse_ts in zip(seq, rev_seq):\n bidir_seq.add_timestep().add_token(forward_ts.token).add_token(\n reverse_ts.token)\n\n return bidir_seq\n\n\ndef build_lm_sequence(seq):\n \"\"\"Builds language model sequence from input sequence.\n\n Args:\n seq: SequenceWrapper.\n\n Returns:\n SequenceWrapper with `seq` tokens copied over to output sequence tokens and\n labels (offset by 1, i.e. predict next token) with weights set to 1.0.\n \"\"\"\n lm_seq = SequenceWrapper()\n for i, timestep in enumerate(seq[:-1]):\n lm_seq.add_timestep().set_token(timestep.token).set_label(\n seq[i + 1].token).set_weight(1.0)\n\n return lm_seq\n\n\ndef build_seq_ae_sequence(seq):\n \"\"\"Builds seq_ae sequence from input sequence.\n\n Args:\n seq: SequenceWrapper.\n\n Returns:\n SequenceWrapper with `seq` inputs copied and concatenated, and with labels\n copied in on the right-hand (i.e. decoder) side with weights set to 1.0.\n The new sequence will have length `len(seq) * 2 - 1`, as the last timestep\n of the encoder section and the first step of the decoder section will\n overlap.\n \"\"\"\n seq_ae_seq = SequenceWrapper()\n\n for i in range(len(seq) * 2 - 1):\n ts = seq_ae_seq.add_timestep()\n\n if i < len(seq) - 1:\n # Encoder\n ts.set_token(seq[i].token)\n elif i == len(seq) - 1:\n # Transition step\n ts.set_token(seq[i].token)\n ts.set_label(seq[0].token)\n ts.set_weight(1.0)\n else:\n # Decoder\n ts.set_token(seq[i % len(seq)].token)\n ts.set_label(seq[(i + 1) % len(seq)].token)\n ts.set_weight(1.0)\n\n return seq_ae_seq\n\n\ndef build_labeled_sequence(seq, class_label, label_gain=False):\n \"\"\"Builds labeled sequence from input sequence.\n\n Args:\n seq: SequenceWrapper.\n class_label: bool.\n label_gain: bool. If True, class_label will be put on every timestep and\n weight will increase linearly from 0 to 1.\n\n Returns:\n SequenceWrapper with `seq` copied in and `class_label` added as label to\n final timestep.\n \"\"\"\n label_seq = SequenceWrapper(multivalent_tokens=seq.multivalent_tokens)\n\n # Copy sequence without labels\n seq_len = len(seq)\n final_timestep = None\n for i, timestep in enumerate(seq):\n label_timestep = label_seq.add_timestep()\n if seq.multivalent_tokens:\n for token in timestep.tokens:\n label_timestep.add_token(token)\n else:\n label_timestep.set_token(timestep.token)\n if label_gain:\n label_timestep.set_label(int(class_label))\n weight = 1.0 if seq_len < 2 else float(i) / (seq_len - 1)\n label_timestep.set_weight(weight)\n if i == (seq_len - 1):\n final_timestep = label_timestep\n\n # Edit final timestep to have class label and weight = 1.\n final_timestep.set_label(int(class_label)).set_weight(1.0)\n\n return label_seq\n\n\ndef split_by_punct(segment):\n \"\"\"Splits str segment by punctuation, filters our empties and spaces.\"\"\"\n return [s for s in re.split(r'\\W+', segment) if s and not s.isspace()]\n\n\ndef sort_vocab_by_frequency(vocab_freq_map):\n \"\"\"Sorts vocab_freq_map by count.\n\n Args:\n vocab_freq_map: dict<str term, int count>, vocabulary terms with counts.\n\n Returns:\n list<tuple<str term, int count>> sorted by count, descending.\n \"\"\"\n return sorted(\n vocab_freq_map.items(), key=operator.itemgetter(1), reverse=True)\n\n\ndef write_vocab_and_frequency(ordered_vocab_freqs, output_dir):\n \"\"\"Writes ordered_vocab_freqs into vocab.txt and vocab_freq.txt.\"\"\"\n tf.gfile.MakeDirs(output_dir)\n with open(os.path.join(output_dir, 'vocab.txt'), 'w') as vocab_f:\n with open(os.path.join(output_dir, 'vocab_freq.txt'), 'w') as freq_f:\n for word, freq in ordered_vocab_freqs:\n vocab_f.write('{}\\n'.format(word))\n freq_f.write('{}\\n'.format(freq))\n"
] |
[
[
"tensorflow.gfile.MakeDirs",
"tensorflow.train.SequenceExample",
"tensorflow.python_io.TFRecordWriter"
]
] |
mrdon/manim
|
[
"d023c76cdef915226bb9a1e41c20168959a02881"
] |
[
"manim/scene/scene.py"
] |
[
"\"\"\"Basic canvas for animations.\"\"\"\n\n\n__all__ = [\"Scene\"]\n\n\nimport inspect\nimport random\nimport warnings\nimport platform\nimport copy\n\nfrom tqdm import tqdm as ProgressDisplay\nimport numpy as np\n\nfrom .. import config, logger\nfrom ..animation.animation import Animation, Wait\nfrom ..animation.transform import MoveToTarget, ApplyMethod\nfrom ..camera.camera import Camera\nfrom ..constants import *\nfrom ..container import Container\nfrom ..mobject.mobject import Mobject\nfrom ..scene.scene_file_writer import SceneFileWriter\nfrom ..utils.iterables import list_update, list_difference_update\nfrom ..utils.hashing import get_hash_from_play_call, get_hash_from_wait_call\nfrom ..utils.family import extract_mobject_family_members\nfrom ..renderer.cairo_renderer import CairoRenderer\nfrom ..utils.exceptions import EndSceneEarlyException\n\n\nclass Scene(Container):\n \"\"\"A Scene is the canvas of your animation.\n\n All of your own named Scenes will be subclasses of Scene, or other named\n scenes.\n\n Examples\n --------\n Override the construct() method to tell Manim what should go on in the\n Scene.\n\n .. code-block:: python\n\n class MyScene(Scene):\n def construct(self):\n self.play(\n Write(Text(\"Hello World!\"))\n )\n\n Some important variables to note are:\n camera: The camera object to be used for the scene.\n file_writer : The object that writes the animations in the scene to a video file.\n mobjects : The list of mobjects present in the scene.\n foreground_mobjects : List of mobjects explicitly in the foreground.\n random_seed: The seed with which all random operations are done.\n\n \"\"\"\n\n CONFIG = {\n \"camera_class\": Camera,\n \"skip_animations\": False,\n \"always_update_mobjects\": False,\n \"random_seed\": 0,\n }\n\n def __init__(self, renderer=None, **kwargs):\n Container.__init__(self, **kwargs)\n if renderer is None:\n self.renderer = CairoRenderer(camera_class=self.camera_class)\n else:\n self.renderer = renderer\n self.renderer.init(self)\n\n self.mobjects = []\n # TODO, remove need for foreground mobjects\n self.foreground_mobjects = []\n if self.random_seed is not None:\n random.seed(self.random_seed)\n np.random.seed(self.random_seed)\n\n self.setup()\n\n def render(self):\n \"\"\"\n Render this Scene.\n \"\"\"\n self.original_skipping_status = config[\"skip_animations\"]\n try:\n self.construct()\n except EndSceneEarlyException:\n pass\n self.tear_down()\n # We have to reset these settings in case of multiple renders.\n config[\"skip_animations\"] = self.original_skipping_status\n self.renderer.finish(self)\n logger.info(\n f\"Rendered {str(self)}\\nPlayed {self.renderer.num_plays} animations\"\n )\n\n def setup(self):\n \"\"\"\n This is meant to be implemented by any scenes which\n are comonly subclassed, and have some common setup\n involved before the construct method is called.\n \"\"\"\n pass\n\n def tear_down(self):\n \"\"\"\n This is meant to be implemented by any scenes which\n are comonly subclassed, and have some common method\n to be invoked before the scene ends.\n \"\"\"\n pass\n\n def construct(self):\n \"\"\"\n The primary method for constructing (i.e adding content to)\n the Scene.\n \"\"\"\n pass # To be implemented in subclasses\n\n def __str__(self):\n return self.__class__.__name__\n\n def set_variables_as_attrs(self, *objects, **newly_named_objects):\n \"\"\"\n This method is slightly hacky, making it a little easier\n for certain methods (typically subroutines of construct)\n to share local variables.\n \"\"\"\n caller_locals = inspect.currentframe().f_back.f_locals\n for key, value in list(caller_locals.items()):\n for o in objects:\n if value is o:\n setattr(self, key, value)\n for key, value in list(newly_named_objects.items()):\n setattr(self, key, value)\n return self\n\n def get_attrs(self, *keys):\n \"\"\"\n Gets attributes of a scene given the attribute's identifier/name.\n\n Parameters\n ----------\n *keys : str\n Name(s) of the argument(s) to return the attribute of.\n\n Returns\n -------\n list\n List of attributes of the passed identifiers.\n \"\"\"\n return [getattr(self, key) for key in keys]\n\n def update_mobjects(self, dt):\n \"\"\"\n Begins updating all mobjects in the Scene.\n\n Parameters\n ----------\n dt: int or float\n Change in time between updates. Defaults (mostly) to 1/frames_per_second\n \"\"\"\n for mobject in self.mobjects:\n mobject.update(dt)\n\n def should_update_mobjects(self):\n \"\"\"\n Returns True if any mobject in Scene is being updated\n or if the scene has always_update_mobjects set to true.\n\n Returns\n -------\n bool\n \"\"\"\n return self.always_update_mobjects or any(\n [mob.has_time_based_updater() for mob in self.get_mobject_family_members()]\n )\n\n def get_top_level_mobjects(self):\n \"\"\"\n Returns all mobjects which are not submobjects.\n\n Returns\n -------\n list\n List of top level mobjects.\n \"\"\"\n # Return only those which are not in the family\n # of another mobject from the scene\n mobjects = self.get_mobjects()\n families = [m.get_family() for m in mobjects]\n\n def is_top_level(mobject):\n num_families = sum([(mobject in family) for family in families])\n return num_families == 1\n\n return list(filter(is_top_level, mobjects))\n\n def get_mobject_family_members(self):\n \"\"\"\n Returns list of family-members of all mobjects in scene.\n If a Circle() and a VGroup(Rectangle(),Triangle()) were added,\n it returns not only the Circle(), Rectangle() and Triangle(), but\n also the VGroup() object.\n\n Returns\n -------\n list\n List of mobject family members.\n \"\"\"\n return extract_mobject_family_members(\n self.mobjects, use_z_index=self.renderer.camera.use_z_index\n )\n\n def add(self, *mobjects):\n \"\"\"\n Mobjects will be displayed, from background to\n foreground in the order with which they are added.\n\n Parameters\n ---------\n *mobjects : Mobject\n Mobjects to add.\n\n Returns\n -------\n Scene\n The same scene after adding the Mobjects in.\n\n \"\"\"\n mobjects = [*mobjects, *self.foreground_mobjects]\n self.restructure_mobjects(to_remove=mobjects)\n self.mobjects += mobjects\n return self\n\n def add_mobjects_among(self, values):\n \"\"\"\n This is meant mostly for quick prototyping,\n e.g. to add all mobjects defined up to a point,\n call self.add_mobjects_among(locals().values())\n \"\"\"\n self.add(*filter(lambda m: isinstance(m, Mobject), values))\n return self\n\n def add_mobjects_from_animations(self, animations):\n\n curr_mobjects = self.get_mobject_family_members()\n for animation in animations:\n # Anything animated that's not already in the\n # scene gets added to the scene\n mob = animation.mobject\n if mob is not None and mob not in curr_mobjects:\n self.add(mob)\n curr_mobjects += mob.get_family()\n\n def remove(self, *mobjects):\n \"\"\"\n Removes mobjects in the passed list of mobjects\n from the scene and the foreground, by removing them\n from \"mobjects\" and \"foreground_mobjects\"\n\n Parameters\n ----------\n *mobjects : Mobject\n The mobjects to remove.\n \"\"\"\n for list_name in \"mobjects\", \"foreground_mobjects\":\n self.restructure_mobjects(mobjects, list_name, False)\n return self\n\n def restructure_mobjects(\n self, to_remove, mobject_list_name=\"mobjects\", extract_families=True\n ):\n \"\"\"\n tl:wr\n If your scene has a Group(), and you removed a mobject from the Group,\n this dissolves the group and puts the rest of the mobjects directly\n in self.mobjects or self.foreground_mobjects.\n\n In cases where the scene contains a group, e.g. Group(m1, m2, m3), but one\n of its submobjects is removed, e.g. scene.remove(m1), the list of mobjects\n will be edited to contain other submobjects, but not m1, e.g. it will now\n insert m2 and m3 to where the group once was.\n\n Parameters\n ----------\n to_remove : Mobject\n The Mobject to remove.\n\n mobject_list_name : str, optional\n The list of mobjects (\"mobjects\", \"foreground_mobjects\" etc) to remove from.\n\n extract_families : bool, optional\n Whether the mobject's families should be recursively extracted.\n\n Returns\n -------\n Scene\n The Scene mobject with restructured Mobjects.\n \"\"\"\n if extract_families:\n to_remove = extract_mobject_family_members(\n to_remove, use_z_index=self.renderer.camera.use_z_index\n )\n _list = getattr(self, mobject_list_name)\n new_list = self.get_restructured_mobject_list(_list, to_remove)\n setattr(self, mobject_list_name, new_list)\n return self\n\n def get_restructured_mobject_list(self, mobjects, to_remove):\n \"\"\"\n Given a list of mobjects and a list of mobjects to be removed, this\n filters out the removable mobjects from the list of mobjects.\n\n Parameters\n ----------\n\n mobjects : list\n The Mobjects to check.\n\n to_remove : list\n The list of mobjects to remove.\n\n Returns\n -------\n list\n The list of mobjects with the mobjects to remove removed.\n \"\"\"\n\n new_mobjects = []\n\n def add_safe_mobjects_from_list(list_to_examine, set_to_remove):\n for mob in list_to_examine:\n if mob in set_to_remove:\n continue\n intersect = set_to_remove.intersection(mob.get_family())\n if intersect:\n add_safe_mobjects_from_list(mob.submobjects, intersect)\n else:\n new_mobjects.append(mob)\n\n add_safe_mobjects_from_list(mobjects, set(to_remove))\n return new_mobjects\n\n # TODO, remove this, and calls to this\n def add_foreground_mobjects(self, *mobjects):\n \"\"\"\n Adds mobjects to the foreground, and internally to the list\n foreground_mobjects, and mobjects.\n\n Parameters\n ----------\n *mobjects : Mobject\n The Mobjects to add to the foreground.\n\n Returns\n ------\n Scene\n The Scene, with the foreground mobjects added.\n \"\"\"\n self.foreground_mobjects = list_update(self.foreground_mobjects, mobjects)\n self.add(*mobjects)\n return self\n\n def add_foreground_mobject(self, mobject):\n \"\"\"\n Adds a single mobject to the foreground, and internally to the list\n foreground_mobjects, and mobjects.\n\n Parameters\n ----------\n mobject : Mobject\n The Mobject to add to the foreground.\n\n Returns\n ------\n Scene\n The Scene, with the foreground mobject added.\n \"\"\"\n return self.add_foreground_mobjects(mobject)\n\n def remove_foreground_mobjects(self, *to_remove):\n \"\"\"\n Removes mobjects from the foreground, and internally from the list\n foreground_mobjects.\n\n Parameters\n ----------\n *to_remove : Mobject\n The mobject(s) to remove from the foreground.\n\n Returns\n ------\n Scene\n The Scene, with the foreground mobjects removed.\n \"\"\"\n self.restructure_mobjects(to_remove, \"foreground_mobjects\")\n return self\n\n def remove_foreground_mobject(self, mobject):\n \"\"\"\n Removes a single mobject from the foreground, and internally from the list\n foreground_mobjects.\n\n Parameters\n ----------\n mobject : Mobject\n The mobject to remove from the foreground.\n\n Returns\n ------\n Scene\n The Scene, with the foreground mobject removed.\n \"\"\"\n return self.remove_foreground_mobjects(mobject)\n\n def bring_to_front(self, *mobjects):\n \"\"\"\n Adds the passed mobjects to the scene again,\n pushing them to he front of the scene.\n\n Parameters\n ----------\n *mobjects : Mobject\n The mobject(s) to bring to the front of the scene.\n\n Returns\n ------\n Scene\n The Scene, with the mobjects brought to the front\n of the scene.\n \"\"\"\n self.add(*mobjects)\n return self\n\n def bring_to_back(self, *mobjects):\n \"\"\"\n Removes the mobject from the scene and\n adds them to the back of the scene.\n\n Parameters\n ----------\n *mobjects : Mobject\n The mobject(s) to push to the back of the scene.\n\n Returns\n ------\n Scene\n The Scene, with the mobjects pushed to the back\n of the scene.\n \"\"\"\n self.remove(*mobjects)\n self.mobjects = list(mobjects) + self.mobjects\n return self\n\n def clear(self):\n \"\"\"\n Removes all mobjects present in self.mobjects\n and self.foreground_mobjects from the scene.\n\n Returns\n ------\n Scene\n The Scene, with all of its mobjects in\n self.mobjects and self.foreground_mobjects\n removed.\n \"\"\"\n self.mobjects = []\n self.foreground_mobjects = []\n return self\n\n def get_mobjects(self):\n \"\"\"\n Returns all the mobjects in self.mobjects\n\n Returns\n ------\n list\n The list of self.mobjects .\n \"\"\"\n return list(self.mobjects)\n\n def get_mobject_copies(self):\n \"\"\"\n Returns a copy of all mobjects present in\n self.mobjects .\n\n Returns\n ------\n list\n A list of the copies of all the mobjects\n in self.mobjects\n \"\"\"\n return [m.copy() for m in self.mobjects]\n\n def get_moving_mobjects(self, *animations):\n \"\"\"\n Gets all moving mobjects in the passed animation(s).\n\n Parameters\n ----------\n *animations : Animation\n The animations to check for moving mobjects.\n\n Returns\n ------\n list\n The list of mobjects that could be moving in\n the Animation(s)\n \"\"\"\n # Go through mobjects from start to end, and\n # as soon as there's one that needs updating of\n # some kind per frame, return the list from that\n # point forward.\n animation_mobjects = [anim.mobject for anim in animations]\n mobjects = self.get_mobject_family_members()\n for i, mob in enumerate(mobjects):\n update_possibilities = [\n mob in animation_mobjects,\n len(mob.get_family_updaters()) > 0,\n mob in self.foreground_mobjects,\n ]\n if any(update_possibilities):\n return mobjects[i:]\n return []\n\n def get_moving_and_stationary_mobjects(self, animations):\n moving_mobjects = self.get_moving_mobjects(*animations)\n all_mobjects = list_update(self.mobjects, self.foreground_mobjects)\n all_mobject_families = extract_mobject_family_members(\n all_mobjects,\n use_z_index=self.renderer.camera.use_z_index,\n only_those_with_points=True,\n )\n moving_mobjects = self.get_moving_mobjects(*animations)\n all_moving_mobject_families = extract_mobject_family_members(\n moving_mobjects,\n use_z_index=self.renderer.camera.use_z_index,\n )\n stationary_mobjects = list_difference_update(\n all_mobject_families, all_moving_mobject_families\n )\n return all_moving_mobject_families, stationary_mobjects\n\n def compile_play_args_to_animation_list(self, *args, **kwargs):\n \"\"\"\n Each arg can either be an animation, or a mobject method\n followed by that methods arguments (and potentially follow\n by a dict of kwargs for that method).\n This animation list is built by going through the args list,\n and each animation is simply added, but when a mobject method\n s hit, a MoveToTarget animation is built using the args that\n follow up until either another animation is hit, another method\n is hit, or the args list runs out.\n\n Parameters\n ----------\n *args : Animation or method of a mobject, which is followed by that method's arguments\n\n **kwargs : any named arguments like run_time or lag_ratio.\n\n Returns\n -------\n list : list of animations with the parameters applied to them.\n \"\"\"\n animations = []\n state = {\n \"curr_method\": None,\n \"last_method\": None,\n \"method_args\": [],\n }\n\n def compile_method(state):\n if state[\"curr_method\"] is None:\n return\n mobject = state[\"curr_method\"].__self__\n if state[\"last_method\"] and state[\"last_method\"].__self__ is mobject:\n animations.pop()\n # method should already have target then.\n else:\n mobject.generate_target()\n #\n if len(state[\"method_args\"]) > 0 and isinstance(\n state[\"method_args\"][-1], dict\n ):\n method_kwargs = state[\"method_args\"].pop()\n else:\n method_kwargs = {}\n state[\"curr_method\"].__func__(\n mobject.target, *state[\"method_args\"], **method_kwargs\n )\n animations.append(MoveToTarget(mobject))\n state[\"last_method\"] = state[\"curr_method\"]\n state[\"curr_method\"] = None\n state[\"method_args\"] = []\n\n for arg in args:\n if isinstance(arg, Animation):\n compile_method(state)\n animations.append(arg)\n elif inspect.ismethod(arg):\n compile_method(state)\n state[\"curr_method\"] = arg\n elif state[\"curr_method\"] is not None:\n state[\"method_args\"].append(arg)\n elif isinstance(arg, Mobject):\n raise ValueError(\n \"\"\"\n I think you may have invoked a method\n you meant to pass in as a Scene.play argument\n \"\"\"\n )\n else:\n raise ValueError(\"Invalid play arguments\")\n compile_method(state)\n\n for animation in animations:\n # This is where kwargs to play like run_time and rate_func\n # get applied to all animations\n animation.update_config(**kwargs)\n\n return animations\n\n def get_time_progression(\n self, run_time, n_iterations=None, override_skip_animations=False\n ):\n \"\"\"\n You will hardly use this when making your own animations.\n This method is for Manim's internal use.\n\n Returns a CommandLine ProgressBar whose ``fill_time``\n is dependent on the ``run_time`` of an animation,\n the iterations to perform in that animation\n and a bool saying whether or not to consider\n the skipped animations.\n\n Parameters\n ----------\n run_time : float\n The ``run_time`` of the animation.\n\n n_iterations : int, optional\n The number of iterations in the animation.\n\n override_skip_animations : bool, optional\n Whether or not to show skipped animations in the progress bar.\n\n Returns\n -------\n ProgressDisplay\n The CommandLine Progress Bar.\n \"\"\"\n if config[\"skip_animations\"] and not override_skip_animations:\n times = [run_time]\n else:\n step = 1 / self.renderer.camera.frame_rate\n times = np.arange(0, run_time, step)\n time_progression = ProgressDisplay(\n times,\n total=n_iterations,\n leave=config[\"leave_progress_bars\"],\n ascii=True if platform.system() == \"Windows\" else None,\n disable=not config[\"progress_bar\"],\n )\n return time_progression\n\n def get_animation_time_progression(self, animations):\n \"\"\"\n You will hardly use this when making your own animations.\n This method is for Manim's internal use.\n\n Uses :func:`~.get_time_progression` to obtain a\n CommandLine ProgressBar whose ``fill_time`` is\n dependent on the qualities of the passed Animation,\n\n Parameters\n ----------\n animations : List[:class:`~.Animation`, ...]\n The list of animations to get\n the time progression for.\n\n Returns\n -------\n ProgressDisplay\n The CommandLine Progress Bar.\n \"\"\"\n run_time = self.get_run_time(animations)\n time_progression = self.get_time_progression(run_time)\n time_progression.set_description(\n \"\".join(\n [\n \"Animation {}: \".format(self.renderer.num_plays),\n str(animations[0]),\n (\", etc.\" if len(animations) > 1 else \"\"),\n ]\n )\n )\n return time_progression\n\n def get_wait_time_progression(self, duration, stop_condition):\n \"\"\"\n This method is used internally to obtain the CommandLine\n Progressbar for when self.wait() is called in a scene.\n\n Parameters\n ----------\n duration : int or float\n duration of wait time\n\n stop_condition : function\n The function which determines whether to continue waiting.\n\n Returns\n -------\n ProgressBar\n The CommandLine ProgressBar of the wait time\n\n \"\"\"\n if stop_condition is not None:\n time_progression = self.get_time_progression(\n duration,\n n_iterations=-1, # So it doesn't show % progress\n override_skip_animations=True,\n )\n time_progression.set_description(\n \"Waiting for {}\".format(stop_condition.__name__)\n )\n else:\n time_progression = self.get_time_progression(duration)\n time_progression.set_description(\n \"Waiting {}\".format(self.renderer.num_plays)\n )\n return time_progression\n\n def get_run_time(self, animations):\n \"\"\"\n Gets the total run time for a list of animations.\n\n Parameters\n ----------\n animations : List[:class:`Animation`, ...]\n A list of the animations whose total\n ``run_time`` is to be calculated.\n\n Returns\n -------\n float\n The total ``run_time`` of all of the animations in the list.\n \"\"\"\n\n return np.max([animation.run_time for animation in animations])\n\n def play(self, *args, **kwargs):\n self.renderer.play(self, *args, **kwargs)\n\n def wait(self, duration=DEFAULT_WAIT_TIME, stop_condition=None):\n self.play(Wait(duration=duration, stop_condition=stop_condition))\n\n def wait_until(self, stop_condition, max_time=60):\n \"\"\"\n Like a wrapper for wait().\n You pass a function that determines whether to continue waiting,\n and a max wait time if that is never fulfilled.\n\n Parameters\n ----------\n stop_condition : function\n The function whose boolean return value determines whether to continue waiting\n\n max_time : int or float, optional\n The maximum wait time in seconds, if the stop_condition is never fulfilled.\n \"\"\"\n self.wait(max_time, stop_condition=stop_condition)\n\n def play_internal(self, *args, **kwargs):\n \"\"\"\n This method is used to prep the animations for rendering,\n apply the arguments and parameters required to them,\n render them, and write them to the video file.\n\n Parameters\n ----------\n *args : Animation or mobject with mobject method and params\n **kwargs : named parameters affecting what was passed in *args e.g\n run_time, lag_ratio etc.\n \"\"\"\n if len(args) == 0:\n warnings.warn(\"Called Scene.play with no animations\")\n return\n\n animations = self.compile_play_args_to_animation_list(*args, **kwargs)\n if (\n len(animations) == 1\n and isinstance(animations[0], Wait)\n and not self.should_update_mobjects()\n ):\n self.add_static_frames(animations[0].duration)\n return\n\n moving_mobjects = None\n static_mobjects = None\n duration = None\n stop_condition = None\n time_progression = None\n if len(animations) == 1 and isinstance(animations[0], Wait):\n # TODO, be smart about setting a static image\n # the same way Scene.play does\n duration = animations[0].duration\n stop_condition = animations[0].stop_condition\n self.static_image = None\n time_progression = self.get_wait_time_progression(duration, stop_condition)\n else:\n # Paint all non-moving objects onto the screen, so they don't\n # have to be rendered every frame\n (\n moving_mobjects,\n stationary_mobjects,\n ) = self.get_moving_and_stationary_mobjects(animations)\n self.renderer.update_frame(self, mobjects=stationary_mobjects)\n self.static_image = self.renderer.get_frame()\n time_progression = self.get_animation_time_progression(animations)\n\n for animation in animations:\n animation.begin()\n\n last_t = 0\n for t in time_progression:\n dt = t - last_t\n last_t = t\n for animation in animations:\n animation.update_mobjects(dt)\n alpha = t / animation.run_time\n animation.interpolate(alpha)\n self.update_mobjects(dt)\n self.renderer.update_frame(self, moving_mobjects, self.static_image)\n self.renderer.add_frame(self.renderer.get_frame())\n if stop_condition is not None and stop_condition():\n time_progression.close()\n break\n\n for animation in animations:\n animation.finish()\n animation.clean_up_from_scene(self)\n\n def add_static_frames(self, duration):\n self.renderer.update_frame(self)\n dt = 1 / self.renderer.camera.frame_rate\n self.renderer.add_frame(\n self.renderer.get_frame(),\n num_frames=int(duration / dt),\n )\n\n def add_sound(self, sound_file, time_offset=0, gain=None, **kwargs):\n \"\"\"\n This method is used to add a sound to the animation.\n\n Parameters\n ----------\n sound_file : str\n The path to the sound file.\n\n time_offset : int,float, optional\n The offset in the sound file after which\n the sound can be played.\n\n gain :\n\n \"\"\"\n if config[\"skip_animations\"]:\n return\n time = self.time + time_offset\n self.renderer.file_writer.add_sound(sound_file, time, gain, **kwargs)\n"
] |
[
[
"numpy.max",
"numpy.arange",
"numpy.random.seed"
]
] |
AutoDash/AutoDash
|
[
"3924795a04159f80ea3b65b2172747babd15f35f"
] |
[
"src/executor/SegmentSplitter.py"
] |
[
"from ..data.VideoItem import VideoItem\nfrom ..data.VideoFile import VideoFile\nfrom .iExecutor import iExecutor\nfrom ..signals.SkipSignal import SkipSignal\nimport re\nimport numpy as np\n\nclass SegmentSplitter(iExecutor):\n def __init__(self, *parents, clip_length='5s', length_threshold='3s'):\n super().__init__(*parents)\n self.clip_len_s = SegmentSplitter.parse_time(clip_length)\n self.len_thresh_s = SegmentSplitter.parse_time(length_threshold)\n \n def split_segment(self, item):\n metadata = iExecutor.get_metadata(item)\n video = VideoFile(item.filepath)\n # First we find the length of BBs\n bbs = metadata.bb_fields.get_bbs_as_arrs()\n collision_locations = metadata.bb_fields.collision_locations\n if len(collision_locations) < 1:\n raise SkipSignal(\"Item has no collision_locations\")\n if len(bbs) == 0:\n raise SkipSignal(\"Item has no bounding boxes\")\n if metadata.start_i is None:\n metadata.start_i = 0\n if metadata.end_i is None:\n metadata.end_i = video.true_length\n dtype = [\n ('frame', np.int),\n ('id', np.int),\n ('class', object),\n ('x1', np.int), \n ('y1', np.int), \n ('x2', np.int), \n ('y2', np.int), \n ('has_collision', np.bool), \n ]\n bbs = np.array(bbs, dtype=dtype)\n collision_locations = np.sort(collision_locations)\n frames = np.unique(bbs['frame'])\n segments = [ ]\n segments += self.create_positives(collision_locations, frames, metadata, video)\n segments += self.create_negatives(segments, collision_locations, frames, metadata, video) \n items = [ ]\n for idx, (begin, end) in enumerate(segments):\n item = metadata.clone()\n item.bb_fields.crop_range(begin, end)\n item.id = metadata.id + f'-{idx}'\n item.start_i = begin + metadata.start_i\n item.end_i = end + metadata.start_i\n items.append(item)\n return items\n\n def create_positives(self, ALs, frames, metadata, video):\n cover = [ ]\n begin = 0\n for al in ALs:\n min_end = video.get_frame_after_time_elapsed(begin + metadata.start_i, self.len_thresh_s * 1000)\n # Check for minimum range\n if al + metadata.start_i < min_end:\n continue\n begin = video.get_frame_after_time_elapsed(metadata.start_i + al, -self.clip_len_s * 1000)\n begin = max(0, begin - metadata.start_i)\n it_begin = np.searchsorted(frames, begin)\n it_end = np.searchsorted(frames, al)\n it_end = min(frames.shape[0] - 1, it_end) # Prevent out of index access for ALs with no BBs\n # Add coverage\n cover.append((frames[it_begin], frames[it_end]))\n begin = frames[it_end]\n return cover\n\n def create_negatives(self, positive_cover, ALs, frames, metadata, video):\n cover = [ ]\n begin = 0\n end = frames[-1]\n for prange in positive_cover + [(end, end)]:\n end, next_begin = prange\n it_begin = np.searchsorted(frames, begin)\n it_end = np.searchsorted(frames, end)\n total_delta = video.get_time_delta(begin + metadata.start_i, end + metadata.start_i) / 1000\n n_covers = int(total_delta / self.clip_len_s)\n begin = next_begin\n if n_covers < 1:\n continue\n delta = (it_end - it_begin) / n_covers\n cover_frames = [ it_begin ]\n for _ in range(0, n_covers):\n next_frame = video.get_frame_after_time_elapsed(cover_frames[-1], self.clip_len_s * 1000)\n cover_frames.append(next_frame)\n cover += [ (int(cover_frames[i]), int(cover_frames[i+1])) \n for i in range(0, n_covers) ]\n return cover\n \n\n def parse_time(time):\n pattern = r'(?:(\\d+)h)?(?:(\\d+)m)?(?:(\\d+)s)?'\n result = re.match(pattern, time)\n if not result:\n raise ValueError(f'Invalid time: {time}. Expected digit followed by [smh]')\n hours, minutes, seconds = result.groups()\n time_s = int(hours or 0)\n time_s *= 60\n time_s += int(minutes or 0)\n time_s *= 60\n time_s += int(seconds or 0)\n if time_s <= 0:\n raise ValueError(f'Invalid time: {time}. Expected a non-zero positive value')\n return time_s\n\n \n def run(self, item: VideoItem):\n return map(\n lambda mdi: VideoItem(mdi, filepath=item.filepath),\n self.split_segment(item)\n )\n"
] |
[
[
"numpy.searchsorted",
"numpy.array",
"numpy.sort",
"numpy.unique"
]
] |
Joao16am/si
|
[
"813ca373022fc5ee35eac69147b5567275718b46"
] |
[
"src/si/util/im2col.py"
] |
[
"import numpy as np\n\n\ndef calc_pad_dims_2D(X_shape, out_dim, kernel_shape, stride):\n if not isinstance(X_shape, tuple):\n raise ValueError(\"`X_shape` must be of type tuple\")\n\n if not isinstance(out_dim, tuple):\n raise ValueError(\"`out_dim` must be of type tuple\")\n\n if not isinstance(kernel_shape, tuple):\n raise ValueError(\"`kernel_shape` must be of type tuple\")\n\n if not isinstance(stride, int):\n raise ValueError(\"`stride` must be of type int\")\n\n fr, fc = kernel_shape\n out_rows, out_cols = out_dim\n n_ex, in_rows, in_cols, in_ch = X_shape\n\n pr = int((stride * (out_rows - 1) + fr - in_rows) / 2)\n pc = int((stride * (out_cols - 1) + fc - in_cols) / 2)\n\n out_rows1 = int(1 + (in_rows + 2 * pr - fr) / stride)\n out_cols1 = int(1 + (in_cols + 2 * pc - fc) / stride)\n\n # add asymmetric padding pixels to right / bottom\n pr1, pr2 = pr, pr\n if out_rows1 == out_rows - 1:\n pr1, pr2 = pr, pr + 1\n elif out_rows1 != out_rows:\n raise AssertionError\n\n pc1, pc2 = pc, pc\n if out_cols1 == out_cols - 1:\n pc1, pc2 = pc, pc + 1\n elif out_cols1 != out_cols:\n raise AssertionError\n\n if any(np.array([pr1, pr2, pc1, pc2]) < 0):\n raise ValueError(\n \"Padding cannot be less than 0. Got: {}\".format((pr1, pr2, pc1, pc2))\n )\n return (pr1, pr2, pc1, pc2)\n\n\ndef pad2D(X, pad, kernel_shape=None, stride=None):\n p = pad\n if isinstance(p, int):\n p = (p, p, p, p)\n\n if isinstance(p, tuple):\n if len(p) == 2:\n p = (p[0], p[0], p[1], p[1])\n\n X_pad = np.pad(\n X,\n pad_width=((0, 0), (p[0], p[1]), (p[2], p[3]), (0, 0)),\n mode=\"constant\",\n constant_values=0,\n )\n\n # compute the correct padding dims for a 'same' convolution\n if p == \"same\" and kernel_shape and stride is not None:\n p = calc_pad_dims_2D(\n X.shape, X.shape[1:3], kernel_shape, stride)\n X_pad, p = pad2D(X, p)\n return X_pad, p\n\n\ndef _im2col_indices(X_shape, fr, fc, p, s):\n pr1, pr2, pc1, pc2 = p\n n_ex, n_in, in_rows, in_cols = X_shape\n\n out_rows = (in_rows + pr1 + pr2 - fr) // s + 1\n out_cols = (in_cols + pc1 + pc2 - fc) // s + 1\n\n\n if any([out_rows <= 0, out_cols <= 0]):\n raise ValueError(\n \"Dimension mismatch during convolution: \"\n \"out_rows = {}, out_cols = {}\".format(out_rows, out_cols)\n )\n\n i0 = np.repeat(np.arange(fr), fc)\n i0 = np.tile(i0, n_in)\n i1 = s * np.repeat(np.arange(out_rows), out_cols)\n j0 = np.tile(np.arange(fc), fr * n_in)\n j1 = s * np.tile(np.arange(out_cols), out_rows)\n\n i = i0.reshape(-1, 1) + i1.reshape(1, -1)\n j = j0.reshape(-1, 1) + j1.reshape(1, -1)\n k = np.repeat(np.arange(n_in), fr * fc).reshape(-1, 1)\n return k, i, j\n\n\ndef im2col(X, W_shape, pad, stride):\n fr, fc, n_in, n_out = W_shape\n s, p = stride, pad\n n_ex, in_rows, in_cols, n_in = X.shape\n\n # zero-pad the input\n X_pad, p = pad2D(X, p, W_shape[:2], stride=s)\n pr1, pr2, pc1, pc2 = p\n\n # shuffle to have channels as the first dim\n X_pad = X_pad.transpose(0, 3, 1, 2)\n\n # get the indices for im2col\n k, i, j = _im2col_indices((n_ex, n_in, in_rows, in_cols), fr, fc, p, s)\n\n X_col = X_pad[:, k, i, j]\n X_col = X_col.transpose(1, 2, 0).reshape(fr * fc * n_in, -1)\n return X_col, p\n\n\ndef col2im(X_col, X_shape, W_shape, pad, stride):\n s = stride\n pr1, pr2, pc1, pc2 = pad\n fr, fc, n_in, n_out = W_shape\n n_ex, in_rows, in_cols, n_in = X_shape\n\n X_pad = np.zeros((n_ex, n_in, in_rows + pr1 + pr2, in_cols + pc1 + pc2))\n k, i, j = _im2col_indices((n_ex, n_in, in_rows, in_cols), fr, fc, pad, s)\n\n X_col_reshaped = X_col.reshape(n_in * fr * fc, -1, n_ex)\n X_col_reshaped = X_col_reshaped.transpose(2, 0, 1)\n\n np.add.at(X_pad, (slice(None), k, i, j), X_col_reshaped)\n\n pr2 = None if pr2 == 0 else -pr2\n pc2 = None if pc2 == 0 else -pc2\n return X_pad[:, :, pr1:pr2, pc1:pc2]\n"
] |
[
[
"numpy.pad",
"numpy.arange",
"numpy.tile",
"numpy.array",
"numpy.zeros"
]
] |
SWang848/DCRAC
|
[
"695d83063cf484cd54d7744c8c719fde94c3cde5"
] |
[
"history.py"
] |
[
"import numpy as np\nimport time\nimport cv2\ntry:\n from PIL import Image\nexcept:\n import Image\n\nfrom utils import *\n\n\nclass History():\n \"\"\"\n Manages frame history\n \"\"\"\n\n def __init__(self, length, im_shape, nb_action):\n \"\"\"\n Args:\n length: How many frames should be stored in the history\n im_shape: Target size to crop to, im_shape = (WIDTH,HEIGHT,CHANNEL)\n \"\"\"\n # assert len(im_shape) == 3\n\n if len(im_shape) == 3:\n self.im_shape = im_shape\n self.black_and_white = True if im_shape[2] == 1 else False\n else:\n self.im_shape = im_shape\n self.black_and_white = False\n self.length = length\n\n self.history_o = None\n self.history_a_prev = None\n self.reset()\n\n def reset(self):\n \"\"\"Reset the history of observation and action\n \"\"\"\n self.history_o = np.zeros((self.length, ) + self.im_shape, dtype=np.uint8)\n # self.history_a_prev = np.zeros((self.length, ), dtype=np.int8)\n \n # action '-1' means None\n self.history_a_prev = -np.ones((self.length, ), dtype=np.int8)\n\n def reset_with_raw_frame(self, raw_frame, action_prev=None, fill=False):\n \"\"\"Fill the history with a raw frame\n \"\"\"\n self.reset()\n # ↓ action '-2' means no action, will be translated to all zeros with shape (nb_objective) as one-hot\n action_prev = -2 if action_prev is None else action_prev\n if fill:\n self.add_raw_frame(raw_frame, action_prev)\n return self.fill_with_last_frame()\n else:\n return self.add_raw_frame(raw_frame, action_prev)\n\n def add_raw_frame(self, raw_frame, action_prev, save=False):\n \"\"\"Adds a new frame to the history\n \"\"\"\n self.history_o = np.roll(self.history_o, -1, axis=0)\n self.history_o[-1] = self.process_frame(raw_frame, save=save)\n self.history_a_prev = np.roll(self.history_a_prev, -1, axis=0)\n self.history_a_prev[-1] = action_prev\n\n return self.history_o, self.history_a_prev\n\n def fill_with_last_frame(self):\n \"\"\"\n Fills the state with the latest experienced frame\n \"\"\"\n for i in range(len(self.history_o)-1):\n self.history_o[i] = self.history_o[-1]\n # self.history_a_prev[i] = self.history_a_prev[-1]\n return self.history_o, self.history_a_prev\n\n def process_frame(self, raw_frame, save=False, filename=None):\n \"\"\"Processes a frame by resizing and cropping as necessary and then\n converting to grayscale\n \n Arguments:\n raw_frame {np.array} -- Raw pixels\n \n Keyword Arguments:\n save {bool} -- Whether to save the converted frame to disk (default: {False})\n filename {str} -- Filename to save it to (default: {None})\n \n Returns:\n np.array -- The processed frame\n \"\"\"\n if len(self.im_shape) < 3:\n return raw_frame\n \n if self.black_and_white:\n raw_frame = cv2.cvtColor(raw_frame,cv2.COLOR_RGB2GRAY)\n \n cropped = cv2.resize(raw_frame, dsize=self.im_shape[:2], interpolation=cv2.INTER_AREA)\n cropped = cropped.reshape(self.im_shape)\n \n if save:\n self.save_image(cropped)\n\n return cropped\n \n\n def save_image(self, frame, filename=None):\n if filename is None:\n filename = \"./output/imgs/\"+str(time.time())+\".png\"\n if self.black_and_white:\n frame = frame.reshape(self.im_shape[:2])\n img = Image.fromarray(frame, mode='L')\n img.save(filename)\n else:\n img = Image.fromarray(frame, mode='RGB')\n img.save(filename)"
] |
[
[
"numpy.zeros",
"numpy.roll",
"numpy.ones"
]
] |
Wendy-Xiao/ext_summ_disco_tree_attn
|
[
"5ff99c8260350c677e140b02521c75ac03d16673"
] |
[
"utils.py"
] |
[
"from collections import Counter\nfrom pathlib import Path\nfrom random import random\nimport rouge_papier_v2\nimport pandas as pd\nimport re\nimport numpy as np\nimport os\nimport json \nimport torch\nimport os\nimport subprocess\n# import matplotlib.pyplot as plt\n\n# Utility functions\ndef get_posweight(inputs_dir):\n\tinputs_dir = Path(inputs_dir)\n\tall_files = [path for path in inputs_dir.glob(\"*.pt\")]\n\ttotal_num=0\n\ttotal_pos=0\n\tfor i in range(10):\n\t\tdata = torch.load(all_files[i])\n\t\tfor d in data:\n\t\t\ttotal_num+=len(d['d_labels'][0])\n\t\t\ttotal_pos+=sum(d['d_labels'][0])\n\tprint('Compute pos weight done! There are %d sentences in total, with %d sentences as positive'%(total_num,total_pos))\n\treturn torch.FloatTensor([(total_num-total_pos)/float(total_pos)])\n\ndef make_file_list(input_dir,file_list_file):\n\tof = open(file_list_file,'r')\n\tfile_list = of.readlines()\n\tof.close()\n\tf_list = [Path(input_dir+'/'+f.strip()+'.json') for f in file_list]\n\treturn f_list\n\ndef get_all_text(train_input_dir):\n\tif isinstance(train_input_dir,list):\n\t\tfile_l = train_input_dir\n\telse:\n\t\ttrain_input = Path(train_input_dir)\n\t\tfile_l = [path for path in train_input.glob(\"*.json\")]\n\tall_tokens = []\n\tfor f in file_l:\n\t\twith f.open() as of:\n\t\t\td = json.load(of)\n\t\ttokens = [t for sent in d['inputs'] for t in (sent['tokens']+['<eos>'])]\n\t\tall_tokens.append(tokens)\n\treturn all_tokens\n\ndef build_word2ind(utt_l, vocabularySize):\n\tword_counter = Counter([word for utt in utt_l for word in utt])\n\tprint('%d words found!'%(len(word_counter)))\n\tvocabulary = [\"<UNK>\"] + [e[0] for e in word_counter.most_common(vocabularySize)]\n\tword2index = {word:index for index,word in enumerate(vocabulary)}\n\tglobal EOS_INDEX\n\tEOS_INDEX = word2index['<eos>']\n\treturn word2index\n\n# Build embedding matrix by importing the pretrained glove\ndef getEmbeddingMatrix(gloveDir, word2index, embedding_dim):\n\t'''Refer to the official baseline model provided by SemEval.'''\n\tembeddingsIndex = {}\n\t# Load the embedding vectors from ther GloVe file\n\twith open(os.path.join(gloveDir, 'glove.6B.300d.txt'), encoding=\"utf8\") as f:\n\t\tfor line in f:\n\t\t\tvalues = line.split()\n\t\t\tword = values[0]\n\t\t\tembeddingVector = np.asarray(values[1:], dtype='float32')\n\t\t\tembeddingsIndex[word] = embeddingVector\n\t# Minimum word index of any word is 1. \n\tembeddingMatrix = np.zeros((len(word2index) , embedding_dim))\n\tfor word, i in word2index.items():\n\t\tembeddingVector = embeddingsIndex.get(word)\n\t\tif embeddingVector is not None:\n\t\t\t# words not found in embedding index will be all-zeros.\n\t\t\tembeddingMatrix[i] = embeddingVector\n\t\n\treturn embeddingMatrix\n\n\ndef get_rouge(hyp_pathlist, ref_pathlist, config_path= './config'):\n\tpath_data = []\n\tuttnames = []\n\tfor i in range(len(hyp_pathlist)):\n\t\tpath_data.append([hyp_pathlist[i], [ref_pathlist[i]]])\n\t\tuttnames.append(os.path.splitext(hyp_pathlist[i])[0].split('/')[-1])\n\n\tconfig_text = rouge_papier_v2.util.make_simple_config_text(path_data)\n\tconfig_path = config_path\n\tof = open(config_path,'w')\n\tof.write(config_text)\n\tof.close()\n\tuttnames.append('Average')\n\tdf,avgfs,conf = rouge_papier_v2.compute_rouge(\n\t\tconfig_path, max_ngram=2, lcs=True, \n\t\tremove_stopwords=False,stemmer=True,set_length = False, return_conf=True)\n\tdf['data_ids'] = pd.Series(np.array(uttnames),index =df.index)\n\tavg = df.iloc[-1:].to_dict(\"records\")[0]\n\tc = conf.to_dict(\"records\")\n\t# if lcs:\n\t# print(c)\n\tprint(\"Rouge-1 r score: %f, Rouge-1 p score: %f, Rouge-1 f-score: %f, 95-conf(%f-%f)\"%(\\\n\t\t\tavg['rouge-1-r'],avg['rouge-1-p'],avg['rouge-1-f'],c[0]['lower_conf_f'],c[0]['upper_conf_f']))\n\tprint(\"Rouge-2 r score:%f, Rouge-1 p score: %f, Rouge-2 f-score:%f, 95-conf(%f-%f)\"%(\\\n\t\tavg['rouge-2-r'],avg['rouge-2-p'],avg['rouge-2-f'],c[1]['lower_conf_f'],c[1]['upper_conf_f']))\n\tprint(\"Rouge-L r score:%f, Rouge-1 p score: %f, Rouge-L f-score:%f, 95-conf(%f-%f)\"%(\\\n\t\tavg['rouge-L-r'],avg['rouge-L-p'],avg['rouge-L-f'],c[2]['lower_conf_f'],c[2]['upper_conf_f']))\n\n\treturn avgfs[1],df\n\t\t\n\n\n\nif __name__ == '__main__':\n\t# oracle_path = '/scratch/wenxiao/pubmed/oracle/test/'\n\t# abstract_path = '/scratch/wenxiao/pubmed/human-abstracts/test/'\n\t# lead_path = '/scratch/wenxiao/pubmed/lead/test/'\n\toracle_path = '/ubc/cs/research/nlp/wenxiao/official_code/test_hyp/oracle-bigpatent_a/'\n\tlead_path = '/ubc/cs/research/nlp/wenxiao/official_code/test_hyp/lead-bigpatent_a/'\n\tabstract_path = '/scratch/wenxiao/bigpatent/bigPatentData_splitted/a/human-abstracts/test/'\n\n\td = Path(oracle_path)\n\tuttnames = [str(path.stem) for path in d.glob(\"*.txt\")]\n\tlead_pathlist = []\n\toracle_pathlist = []\n\tref_pathlist = []\n\tfor n in uttnames:\n\t\tlead_pathlist.append(lead_path+n+'.txt')\n\t\toracle_pathlist.append(oracle_path+n+'.txt')\n\t\tref_pathlist.append(abstract_path+n+'.txt')\n\n\tget_meteor(oracle_pathlist,ref_pathlist,'oracle')\n\tget_meteor(lead_pathlist,ref_pathlist,'lead')\n\n"
] |
[
[
"numpy.asarray",
"numpy.array",
"torch.load"
]
] |
mrshu/onnxruntime
|
[
"a7a2a16edddc283b53d7737f897b4bbda5e86209"
] |
[
"orttraining/orttraining/test/python/orttraining_test_ortmodule_poc.py"
] |
[
"import argparse\nimport logging\nimport os\nimport torch\nimport time\nfrom torchvision import datasets, transforms\n\nimport onnxruntime\nfrom onnxruntime.training import ORTModule\n\n\nclass NeuralNet(torch.nn.Module):\n def __init__(self, input_size, hidden_size, num_classes):\n super(NeuralNet, self).__init__()\n\n self.fc1 = torch.nn.Linear(input_size, hidden_size)\n self.relu = torch.nn.ReLU()\n self.fc2 = torch.nn.Linear(hidden_size, num_classes)\n\n def forward(self, input1):\n out = self.fc1(input1)\n out = self.relu(out)\n out = self.fc2(out)\n return out\n\n\ndef train(args, model, device, optimizer, loss_fn, train_loader, epoch):\n print('\\n======== Epoch {:} / {:} with batch size {:} ========'.format(epoch+1, args.epochs, args.batch_size))\n model.train()\n # Measure how long the training epoch takes.\n t0 = time.time()\n start_time = t0\n\n # Reset the total loss for this epoch.\n total_loss = 0\n\n for iteration, (data, target) in enumerate(train_loader):\n if iteration == args.train_steps:\n break\n data, target = data.to(device), target.to(device)\n data = data.reshape(data.shape[0], -1)\n\n optimizer.zero_grad()\n probability = model(data)\n\n if args.view_graphs:\n import torchviz\n pytorch_backward_graph = torchviz.make_dot(probability, params=dict(list(model.named_parameters())))\n pytorch_backward_graph.view()\n\n loss = loss_fn(probability, target)\n # Accumulate the training loss over all of the batches so that we can\n # calculate the average loss at the end. `loss` is a Tensor containing a\n # single value; the `.item()` function just returns the Python value\n # from the tensor.\n total_loss += loss.item()\n\n loss.backward()\n optimizer.step()\n\n # Stats\n if iteration % args.log_interval == 0:\n curr_time = time.time()\n elapsed_time = curr_time - start_time\n print('[{:5}/{:5} ({:2.0f}%)]\\tLoss: {:.6f}\\tExecution time: {:.4f}'.format(\n iteration * len(data), len(train_loader.dataset),\n 100. * iteration / len(train_loader), loss, elapsed_time))\n start_time = curr_time\n\n # Calculate the average loss over the training data.\n avg_train_loss = total_loss / len(train_loader)\n\n epoch_time = time.time() - t0\n print(\"\\n Average training loss: {0:.2f}\".format(avg_train_loss))\n print(\" Training epoch took: {:.4f}s\".format(epoch_time))\n return epoch_time\n\n\ndef test(args, model, device, loss_fn, test_loader):\n model.eval()\n\n t0 = time.time()\n\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n data = data.reshape(data.shape[0], -1)\n output = model(data)\n\n # Stats\n test_loss += loss_fn(output, target, False).item()\n pred = output.argmax(dim=1, keepdim=True)\n correct += pred.eq(target.view_as(pred)).sum().item()\n test_loss /= len(test_loader.dataset)\n print('\\nTest set: Batch size: {:}, Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n args.test_batch_size, test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n\n # Report the final accuracy for this validation run.\n epoch_time = time.time() - t0\n accuracy = float(correct)/len(test_loader.dataset)\n print(\" Accuracy: {0:.2f}\".format(accuracy))\n print(\" Validation took: {:.4f}s\".format(epoch_time))\n return epoch_time, accuracy\n\ndef my_loss(x, target, is_train=True):\n if is_train:\n return torch.nn.CrossEntropyLoss()(x, target)\n else:\n return torch.nn.CrossEntropyLoss(reduction='sum')(x, target)\n\ndef main():\n # Training settings\n parser = argparse.ArgumentParser(description='PyTorch MNIST Example')\n parser.add_argument('--train-steps', type=int, default=-1, metavar='N',\n help='number of steps to train. Set -1 to run through whole dataset (default: -1)')\n parser.add_argument('--lr', type=float, default=0.01, metavar='LR',\n help='learning rate (default: 0.01)')\n parser.add_argument('--batch-size', type=int, default=32, metavar='N',\n help='input batch size for training (default: 32)')\n parser.add_argument('--test-batch-size', type=int, default=64, metavar='N',\n help='input batch size for testing (default: 64)')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--seed', type=int, default=42, metavar='S',\n help='random seed (default: 42)')\n parser.add_argument('--pytorch-only', action='store_true', default=False,\n help='disables ONNX Runtime training')\n parser.add_argument('--log-interval', type=int, default=300, metavar='N',\n help='how many batches to wait before logging training status (default: 300)')\n parser.add_argument('--view-graphs', action='store_true', default=False,\n help='views forward and backward graphs')\n parser.add_argument('--epochs', type=int, default=5, metavar='N',\n help='number of epochs to train (default: 10)')\n parser.add_argument('--log-level', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], default='WARNING',\n help='Log level (default: WARNING)')\n parser.add_argument('--data-dir', type=str, default='./mnist',\n help='Path to the mnist data directory')\n\n args = parser.parse_args()\n\n\n # Common setup\n torch.manual_seed(args.seed)\n onnxruntime.set_seed(args.seed)\n\n if not args.no_cuda and torch.cuda.is_available():\n device = \"cuda\"\n else:\n device = \"cpu\"\n\n ## Data loader\n train_loader = torch.utils.data.DataLoader(datasets.MNIST(args.data_dir, train=True, download=True,\n transform=transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))])),\n batch_size=args.batch_size,\n shuffle=True)\n test_loader = None\n if args.test_batch_size > 0:\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST(args.data_dir, train=False, transform=transforms.Compose([\n transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])),\n batch_size=args.test_batch_size, shuffle=True)\n\n # Model architecture\n model = NeuralNet(input_size=784, hidden_size=500, num_classes=10).to(device)\n if not args.pytorch_only:\n print('Training MNIST on ORTModule....')\n model = ORTModule(model)\n\n # TODO: change it to False to stop saving ONNX models\n model._save_onnx = True\n model._save_onnx_prefix = 'MNIST'\n\n # Set log level\n numeric_level = getattr(logging, args.log_level.upper(), None)\n if not isinstance(numeric_level, int):\n raise ValueError('Invalid log level: %s' % args.log_level)\n logging.basicConfig(level=numeric_level)\n else:\n print('Training MNIST on vanilla PyTorch....')\n optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)\n\n # Train loop\n total_training_time, total_test_time, epoch_0_training, validation_accuracy = 0, 0, 0, 0\n for epoch in range(0, args.epochs):\n total_training_time += train(args, model, device, optimizer, my_loss, train_loader, epoch)\n if not args.pytorch_only and epoch == 0:\n epoch_0_training = total_training_time\n if args.test_batch_size > 0:\n test_time, validation_accuracy = test(args, model, device, my_loss, test_loader)\n total_test_time += test_time\n\n assert validation_accuracy > 0.92\n\n print('\\n======== Global stats ========')\n if not args.pytorch_only:\n estimated_export = 0\n if args.epochs > 1:\n estimated_export = epoch_0_training - (total_training_time - epoch_0_training)/(args.epochs-1)\n print(\" Estimated ONNX export took: {:.4f}s\".format(estimated_export))\n else:\n print(\" Estimated ONNX export took: Estimate available when epochs > 1 only\")\n print(\" Accumulated training without export took: {:.4f}s\".format(total_training_time - estimated_export))\n print(\" Accumulated training took: {:.4f}s\".format(total_training_time))\n print(\" Accumulated validation took: {:.4f}s\".format(total_test_time))\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"torch.nn.CrossEntropyLoss",
"torch.manual_seed",
"torch.nn.Linear",
"torch.no_grad",
"torch.cuda.is_available",
"torch.nn.ReLU"
]
] |
haochenucr/scdiff2
|
[
"ebc4149851399b2f15ed5b5874d44764b5f130fb"
] |
[
"scdiff2/prerun.py"
] |
[
"#!/usr/bin/env python\n# coding: utf-8\n# Author: Jun Ding\n# Email: junding (at) cs (dot) cmu (dot) edu\n# Date: June. 29th, 2020\n# \n# This scdiff software suite is desinged to infer the clusters, trajectories, and regulatory\n# networks underlying dynamic biological process (e.g., cell differntiation, disease progression)\n# based on given time-series single-cell expression input data. Please use \"scdiff -h\" for the detailed usage.\n# \n# This scdiff prerun program use scanpy package to learn the initial clusters/trajectories, which will be used as the input \n# to the scdiff2 main program to learn the detailed underlying regulatory networks and refined trajectories. \n#\n# This software is freely avaible for academic uses. \n# For any commerical usage, please contact me at the email address above.\n# All rights reserved.\n# Please don NOT modify the above statement.\n\n\n\n\n# In[1]:\nimport pdb,sys,os\nimport anndata\nimport scanpy as sc\nfrom File import *\nimport pandas as pd\nimport argparse\nimport matplotlib\nmatplotlib.use('Agg')\n\ndef prerun(exFn,outdir,iformat,mindisp,cluRes,skipGeneFilter):\n # # read in tab.txt file and save it to h5file \n if os.path.exists(outdir)==False:\n os.mkdir(outdir)\n\n TabFile(exFn).toH5(\"\\t\",\"%s/%s\"%(outdir,exFn.split(\"/\")[-1]),['index','time','label'])\n\n H5File(\"%s/%s.h5\"%(outdir,exFn)).toSparseAnnData(\"%s/%s.h5ad\"%(outdir,exFn),BLOCK=5000)\n # # Load in h5 file and convert it to anndata\n d1=anndata.read_h5ad(\"%s/%s.h5ad\"%(outdir,exFn))\n\n\n sc.settings.figdir = '%s/figures'%(outdir)\n # # Pre-processing ...\n print(\"pre-processing...\")\n sc.pp.filter_cells(d1,min_genes=200)\n sc.pp.filter_genes(d1,min_cells=3)\n\n if iformat=='raw':\n MTFlag1=d1.var_names.str.upper().str.startswith('MT-')\n MTFlag2=d1.var_names.str.upper().str.startswith('MT.')\n MTFlag=[bool(a+b) for a,b in zip(MTFlag1,MTFlag2)]\n d1.var['mt'] = MTFlag\n # # plot n_genes, total_counts, and mt counts\n sc.pp.calculate_qc_metrics(d1, qc_vars=['mt'], percent_top=None, log1p=False, inplace=True)\n #sc.pl.violin(d1, ['n_genes_by_counts', 'total_counts', 'pct_counts_mt'],jitter=0.4, multi_panel=True, show=False, save=\"_qc.pdf\")\n sc.pl.scatter(d1, x='total_counts', y='pct_counts_mt',show=False, save=\"_mt.pdf\")\n sc.pl.scatter(d1, x='total_counts', y='n_genes_by_counts',show=False, save=\"_n_genes.pdf\")\n d1 = d1[d1.obs.pct_counts_mt < 40, :]\n sc.pp.normalize_total(d1, target_sum=1e4)\n sc.pp.log1p(d1)\n \n # # filtering genes based on dispersion\n if (skipGeneFilter!='Yes') and (skipGeneFilter!='YES'):\n sc.pp.highly_variable_genes(d1, min_mean=0.0125, max_mean=5, min_disp=mindisp)\n sc.pl.highly_variable_genes(d1,show=False, save=\".pdf\")\n d1 = d1[:, d1.var.highly_variable]\n \n # # Removing batch effects\n #sc.pp.regress_out(d1, ['total_counts', 'pct_counts_mt'])\n #sc.pp.scale(d1, max_value=10)\n\n # # Dimension reduction\n sc.tl.pca(d1, svd_solver='arpack')\n\n\n # # Computing the neighborhood graph\n sc.pp.neighbors(d1, n_neighbors=15, n_pcs=50)\n sc.tl.diffmap(d1)\n\n # # clustering... \n sc.tl.leiden(d1,resolution=cluRes)\n sc.tl.paga(d1)\n sc.pl.paga(d1,show=False,save=\"_Traj.pdf\")\n sc.tl.umap(d1,init_pos='paga')\n sc.pl.umap(d1,color=['leiden','time'],legend_loc='on data',show=False,save=\"_clustering.pdf\")\n\n\n # # get DE genes for each of the clusters\n sc.tl.rank_genes_groups(d1, 'leiden', method='wilcoxon')\n sc.pl.rank_genes_groups(d1, n_genes=25, sharey=False,show=False, save=\"_global_DE_genes.pdf\")\n\n\n # # \n d1.write_h5ad(\"%s/%s.h5ad\"%(outdir,exFn),compression=9)\n print(\"\\n\\n>>>>------------------------------------------------<<<<\")\n print(\"prerun completed! please run scdiff2 for the second pass\")\n return d1\n\ndef main():\n parser=argparse.ArgumentParser(description=\"scdiff2 pre-run\")\n parser._action_groups.pop()\n required = parser.add_argument_group('required arguments')\n optional = parser.add_argument_group('optional arguments')\n required.add_argument('-i','--input',required=True,help='input single cell RNA-seq expression data')\n required.add_argument('-o','--output',required=True,help='output directory')\n optional.add_argument('-f','--format',required=False, default='raw', help='the format of input expression, either raw/norm (raw: raw read counts, norm: normalized expression')\n optional.add_argument('--mindisp',required=False,default=0.15,help='the dispersion cutoff to filter genes (genes with dipsersion < this cutoff will be filtered')\n optional.add_argument('--cluRes',required=False, default=1, help=\"The resolution parameter for the leiden clustering method\")\n optional.add_argument('--skipGeneFilter', required=False, default=None, help=\"whether to skip the gene filtering (Yes to skip)\")\n args = parser.parse_args()\n exFn=args.input\n outdir=args.output \n iformat=args.format\n mindisp=float(args.mindisp)\n cluRes=float(args.cluRes)\n skipGeneFilter=args.skipGeneFilter\n prerun(exFn,outdir,iformat,mindisp,cluRes,skipGeneFilter)\n\nif __name__==\"__main__\":\n main()\n"
] |
[
[
"matplotlib.use"
]
] |
IQcollaboratory/galpopFM
|
[
"1b30abc1cc2fd1119d0f34a237b0c1112d7afc9d"
] |
[
"galpopfm/dust_infer.py"
] |
[
"'''\n\n\n\n'''\nimport os \nimport sys \nimport h5py \nimport numpy as np \nfrom scipy.stats import chi2 \nnp.seterr(divide='ignore', invalid='ignore')\n# -- abcpmc -- \nimport abcpmc\nfrom abcpmc import mpi_util\n# -- galpopfm --\nfrom . import dustfm as dustFM\nfrom . import measure_obs as measureObs\n\ndat_dir = os.environ['GALPOPFM_DIR']\n\n\ndef distance_metric(x_obs, x_model, method='chi2', x_err=None): \n ''' distance metric between forward model m(theta) and observations\n\n notes\n -----\n * simple L2 norm between the 3D histogram of [Rmag, Balmer, FUV-NUV]\n ''' \n if x_err is None: \n x_err = [1. for _x in x_obs]\n\n if method == 'chi2': # chi-squared\n rho = [np.sum((_obs - _mod)**2/_err**2) \n for _obs, _mod, _err in zip(x_obs, x_model, x_err)]\n elif method == 'L2': # chi-squared\n rho = [np.sum((_obs - _mod)**2) \n for _obs, _mod, _err in zip(x_obs, x_model, x_err)]\n elif method == 'L1': # L1 morm \n rho = [np.sum(np.abs(_obs - _mod))\n for _obs, _mod, _err in zip(x_obs, x_model, x_err)]\n else: \n raise NotImplementedError\n return rho\n\n\ndef sumstat_obs(statistic='2d', return_bins=False): \n ''' summary statistics for SDSS observations is the 3D histgram of \n [M_r, G-R, FUV - NUV]. \n\n notes\n -----\n * 09/22/2020: observation summary statistics updated to Jeremy's SDSS\n catalog (centrals *and* satellites) with NSA absolute magnitudes \n * see `nb/observables.ipynb` to see exactly how the summary statistic is\n calculated. \n '''\n if statistic == '1d': \n r_edges, gr_edges, fn_edges, x_gr, x_fn, _, _ = np.load(os.path.join(dat_dir, 'obs',\n 'tinker.Mr_20.Mr.GR.FUVNUV.npy'), \n allow_pickle=True)\n dgr = gr_edges[1] - gr_edges[0]\n nbar = dgr * np.sum(x_gr)\n x_obs = [nbar, x_gr, x_fn]\n\n elif statistic == '2d': \n r_edges, gr_edges, fn_edges, x_gr, x_fn, _, _ = np.load(os.path.join(dat_dir, 'obs',\n 'tinker.Mr_20.Mr_GR.Mr_FUVNUV.npy'), \n allow_pickle=True) \n dr = r_edges[1] - r_edges[0]\n dgr = gr_edges[1] - gr_edges[0]\n nbar = dr * dgr * np.sum(x_gr),\n x_obs = [nbar, x_gr, x_fn]\n\n elif statistic == '3d': \n r_edges, gr_edges, fn_edges, _x_obs, _ = np.load(os.path.join(dat_dir, 'obs',\n 'tinker.Mr_20.Mr_GR_FUVNUV.npy'), \n allow_pickle=True)\n dr = r_edges[1] - r_edges[0]\n dgr = gr_edges[1] - gr_edges[0]\n dfn = fn_edges[1] - fn_edges[0]\n nbar = dr * dgr * dfn * np.sum(_x_obs)\n x_obs = [nbar, _x_obs]\n \n if return_bins: \n return r_edges, gr_edges, fn_edges, x_obs\n\n return x_obs \n\n\ndef sumstat_model(theta, sed=None, dem='slab_calzetti', f_downsample=1.,\n statistic='2d', noise=True, seed=None, return_datavector=False,\n sfr0_prescription='adhoc'): \n ''' calculate summary statistics for forward model m(theta) \n \n :param theta: \n array of input parameters\n :param sed: \n dictionary with SEDs of **central** galaxies \n :param dem: \n string specifying the dust empirical model\n :param f_downsample: \n if f_downsample > 1., then the SED dictionary is downsampled. \n :param sfr0_prescription: \n prescription for dealing with SFR=0 galaxies \n\n notes\n -----\n * 09/22/2020: simple noise model implemented\n * 4/22/2020: extra_data kwarg added. This is to pass pre-sampled\n observables for SFR = 0 galaxies \n '''\n # don't touch these values! they are set to agree with the binning of\n # obersvable\n nbins = [8, 400, 200]\n ranges = [(20, 24), (-5., 20.), (-5, 45.)]\n dRmag = 0.5\n dGR = 0.0625\n dfuvnuv = 0.25\n\n # SFR=0 galaxies \n sfr0 = (sed['logsfr.inst'] == -999) \n if sfr0_prescription == 'adhoc': \n raise ValueError\n #R_mag_sfr0, G_R_sfr0, FUV_NUV_sfr0 = _observable_zeroSFR(\n # sed['wave'], \n # sed['sed_noneb'][sfr0,:])\n elif sfr0_prescription == 'sfrmin': \n logsfr_min = sed['logsfr.inst'][~sfr0].min() # minimum SFR\n print(logsfr_min)\n sed['logsfr.inst'][sfr0] = logsfr_min\n else: \n raise NotImplementedError\n\n sed_dusty = dustFM.Attenuate(\n theta, \n sed['wave'], \n sed['sed_noneb'], \n sed['sed_onlyneb'], \n sed['logmstar'],\n sed['logsfr.inst'],\n dem=dem) \n \n # observational measurements \n F_mag = measureObs.AbsMag_sed(sed['wave'], sed_dusty, band='galex_fuv') \n N_mag = measureObs.AbsMag_sed(sed['wave'], sed_dusty, band='galex_nuv') \n G_mag = measureObs.AbsMag_sed(sed['wave'], sed_dusty, band='g_sdss') \n R_mag = measureObs.AbsMag_sed(sed['wave'], sed_dusty, band='r_sdss') \n\n # apply FUV and NUV cut\n uv_cut = (F_mag < -13.5) & (N_mag < -14) \n F_mag = F_mag[uv_cut]\n N_mag = N_mag[uv_cut]\n G_mag = G_mag[uv_cut]\n R_mag = R_mag[uv_cut]\n \n # calculate color \n FUV_NUV = F_mag - N_mag \n G_R = G_mag - R_mag\n \n if sfr0_prescription == 'adhoc':\n # append sampled SFR=0 observables to data vector\n R_mag = np.concatenate([R_mag, R_mag_sfr0]) \n G_R = np.concatenate([G_R, G_R_sfr0]) \n FUV_NUV = np.concatenate([FUV_NUV, FUV_NUV_sfr0]) \n\n n_gal = len(R_mag)\n \n if noise: \n if seed is not None: \n np.random.seed(seed)\n # noise model (simplest model) \n sig_R = chi2.rvs(3, loc=0.02, scale=0.00003, size=n_gal)\n sig_FN = chi2.rvs(2, loc=0.05, scale=0.05, size=n_gal)\n sig_GR = chi2.rvs(3, size=n_gal) * (0.00001 * (R_mag + 20.1) + 0.00005)\\\n + (0.000025 * (R_mag + 20.1) + 0.02835)\n\n R_mag += np.random.normal(size=n_gal) * sig_R\n FUV_NUV += np.random.normal(size=n_gal) * sig_FN\n G_R += np.random.normal(size=n_gal) * sig_GR\n\n data_vector = np.array([-1.*R_mag, G_R, FUV_NUV]).T\n\n if return_datavector: \n return data_vector.T, uv_cut\n\n Nbins, _ = np.histogramdd(data_vector, bins=nbins, range=ranges)\n \n # volume of simulation \n vol = {'simba': 100.**3, 'tng': 75.**3, 'eagle': 67.77**3}[sed['sim']] \n\n x_model = Nbins.astype(float) / vol / dRmag / dGR / dfuvnuv / f_downsample\n nbar = dRmag * dGR * dfuvnuv * np.sum(x_model)\n \n if statistic == '3d': \n return [nbar, x_model]\n elif statistic == '2d': \n x_r_gr = dfuvnuv * np.sum(x_model, axis=2)\n x_r_fn = dGR * np.sum(x_model, axis=1)\n return [nbar, x_r_gr, x_r_fn]\n elif statistic == '1d': \n x_gr = dRmag * np.sum(dfuvnuv * np.sum(x_model, axis=2), axis=0)\n x_fn = dRmag * np.sum(dGR * np.sum(x_model, axis=1), axis=0) \n return [nbar, x_gr, x_fn]\n\n\ndef _observable_zeroSFR(wave, sed): \n ''' for SFR = 0 galaxies, sample G-R and FUV-NUV color directly from G-R\n and FUV-NUV distributions of quiescent SDSS galaxies. This is to remove\n these galaxies from consideration in the inference. \n\n See `nb/sdss_quiescent_sumstat.ipynb` for details. \n\n notes\n -----\n * 09/22/2020: updated the quiescent distributions since the observational\n dataset has been updated.\n * in principle, the G-R and FUV-NUV sampling can done for R bins, but at\n the moment it does not. \n * this only runs once so its not optimized in any way \n '''\n ngal = sed.shape[0] \n # read in G-R and FUV-NUV distributions of SDSS quiescent galaxies \n gr_edges, gr_nbins = np.load(os.path.join(dat_dir, 'obs',\n 'tinker.Mr_20.quiescent.G_R_dist.npy'), allow_pickle=True)\n\n fn_edges, fn_nbins = np.load(os.path.join(dat_dir, 'obs',\n 'tinker.Mr_20.quiescent.FUV_NUV_dist.npy'), allow_pickle=True)\n \n # calculate Mr from SEDs \n R_mag = measureObs.AbsMag_sed(wave, sed, band='r_sdss') \n \n # now sample from SDSS distribution using inverse transform sampling \n gr_cdf = np.cumsum(gr_nbins)/np.sum(gr_nbins) # calculate CDFs for both distributions\n fn_cdf = np.cumsum(fn_nbins)/np.sum(fn_nbins) \n\n us = np.random.rand(ngal) \n G_R = np.empty(ngal) \n FUV_NUV = np.empty(ngal)\n for i, u in enumerate(us): \n G_R[i] = 0.5*(gr_edges[:-1] + gr_edges[1:])[np.abs(u - gr_cdf).argmin()]\n FUV_NUV[i] = 0.5*(fn_edges[:-1] + fn_edges[1:])[np.abs(u - fn_cdf).argmin()]\n \n return [R_mag, G_R, FUV_NUV]\n\n\ndef median_alongr(rmag, values, rmin=-20., rmax=-24., nbins=16): \n ''' find the median of specified values as a function of rmag \n '''\n dr = (rmin - rmax)/float(nbins) \n\n medians = [] \n for i in range(nbins-1): \n rbin = (rmag < rmin-dr*i) & (rmag >= rmin-dr*(i+1)) & np.isfinite(values) \n medians.append(np.median(values[rbin])) \n rmid = rmin - dr*(np.arange(nbins-1).astype(int)+0.5)\n\n return rmid, np.array(medians) \n\n\ndef _read_sed(name, seed=0): \n ''' read in sed files \n '''\n if name not in ['simba', 'tng', 'eagle']: raise NotImplementedError\n fhdf5 = os.path.join(dat_dir, 'sed', '%s.hdf5' % name) \n\n f = h5py.File(fhdf5, 'r') \n sed = {} \n sed['wave'] = f['wave'][...] \n sed['sed_neb'] = f['sed_neb'][...]\n sed['sed_noneb'] = f['sed_noneb'][...]\n sed['sed_onlyneb'] = sed['sed_neb'] - sed['sed_noneb'] # only nebular emissoins \n sed['logmstar'] = f['logmstar'][...] \n if 'logsfr.100' in f.keys(): \n sed['logsfr.100'] = f['logsfr.100'][...] \n sed['logsfr.inst'] = f['logsfr.inst'][...]\n sed['censat'] = f['censat'][...] \n f.close() \n \n '''\n # deal with SFR resolution effect by unifromly sampling the SFR \n # over 0 to resolution limit \n if name == 'simba': \n res_sfr = 0.182\n elif name == 'tng': \n res_sfr = 0.005142070183729021 # THIS IS WRONG!!!\n \n np.random.seed(seed)\n isnan = (~np.isfinite(sed['logsfr.100']))\n sed['logsfr.100'][isnan] = np.log10(np.random.uniform(0., res_sfr, size=np.sum(isnan))) \n '''\n if 'logsfr.100' in f.keys(): \n isnan = (~np.isfinite(sed['logsfr.100']))\n sed['logsfr.100'][isnan] = -999.\n isnan = (~np.isfinite(sed['logsfr.inst']))\n sed['logsfr.inst'][isnan] = -999.\n return sed\n\n\ndef writeABC(type, pool, prior=None, abc_dir=None): \n ''' Given abcpmc pool object. Writeout specified ABC pool property\n '''\n if abc_dir is None: \n abc_dir = os.path.join(dat_dir, 'abc') \n\n if type == 'init': # initialize\n if not os.path.exists(abc_dir): \n try: \n os.makedirs(abc_dir)\n except OSError: \n pass \n # write specific info of the run \n f = open(os.path.join(abc_dir, 'info.md'), 'w')\n f.write('# '+run+' run specs \\n')\n f.write('N_particles = %i \\n' % pool.N)\n f.write('Distance function = %s \\n' % pool.dist.__name__)\n # prior \n f.write('Top Hat Priors \\n')\n f.write('Prior Min = [%s] \\n' % ','.join([str(prior_obj.min[i]) for i in range(len(prior_obj.min))]))\n f.write('Prior Max = [%s] \\n' % ','.join([str(prior_obj.max[i]) for i in range(len(prior_obj.max))]))\n f.close()\n elif type == 'eps': # threshold writeout \n if pool is None: # write or overwrite threshold writeout\n f = open(os.path.join(abc_dir, 'epsilon.dat'), \"w\")\n else: \n f = open(os.path.join(abc_dir, 'epsilon.dat'), \"a\") # append\n f.write(str(pool.eps)+'\\t'+str(pool.ratio)+'\\n')\n f.close()\n elif type == 'theta': # particle thetas\n np.savetxt(os.path.join(abc_dir, 'theta.t%i.dat' % (pool.t)), pool.thetas) \n elif type == 'w': # particle weights\n np.savetxt(os.path.join(abc_dir, 'w.t%i.dat' % (pool.t)), pool.ws)\n elif type == 'rho': # distance\n np.savetxt(os.path.join(abc_dir, 'rho.t%i.dat' % (pool.t)), pool.dists)\n else: \n raise ValueError\n return None \n\n\ndef plotABC(pool, prior=None, dem='slab_calzetti', abc_dir=None): \n ''' Given abcpmc pool object plot the particles \n '''\n import corner as DFM \n import matplotlib as mpl\n import matplotlib.pyplot as plt \n try: \n # sometimes this formatting fails \n mpl.rcParams['text.usetex'] = True\n mpl.rcParams['font.family'] = 'serif'\n mpl.rcParams['axes.linewidth'] = 1.5\n mpl.rcParams['axes.xmargin'] = 1\n mpl.rcParams['xtick.labelsize'] = 'x-large'\n mpl.rcParams['xtick.major.size'] = 5\n mpl.rcParams['xtick.major.width'] = 1.5\n mpl.rcParams['ytick.labelsize'] = 'x-large'\n mpl.rcParams['ytick.major.size'] = 5\n mpl.rcParams['ytick.major.width'] = 1.5\n mpl.rcParams['legend.frameon'] = False\n except: \n pass \n\n # prior range\n prior_range = [(_min, _max) for _min, _max in zip(prior.min, prior.max)]\n\n # theta labels \n if dem == 'slab_calzetti': \n lbls = [r'$m_{\\tau}$', r'$c_{\\tau}$', r'$f_{\\rm neb}$'] \n elif dem == 'slab_noll_simple': \n lbls = [r'$c_{\\tau}$', r'$c_{\\delta}$'] \n elif dem == 'slab_noll_m': \n lbls = [r'$m_{\\tau}$', r'$c_{\\tau}$', r'$m_\\delta$', r'$c_\\delta$',\n r'$m_E$', r'$c_E$', r'$f_{\\rm neb}$'] \n elif dem == 'slab_noll_msfr': \n lbls = [r'$m_{\\tau,1}$', r'$m_{\\tau,2}$', r'$c_{\\tau}$', \n r'$m_{\\delta,1}$', r'$m_{\\delta,2}$', r'$c_\\delta$',\n r'$m_E$', r'$c_E$', r'$f_{\\rm neb}$'] \n elif dem == 'tnorm_noll_msfr': \n lbls = [r'$m_{\\mu,1}$', r'$m_{\\mu,2}$', r'$c_{\\mu}$', \n r'$m_{\\sigma,1}$', r'$m_{\\sigma,2}$', r'$c_{\\sigma}$', \n r'$m_{\\delta,1}$', r'$m_{\\delta,2}$', r'$c_\\delta$',\n r'$m_E$', r'$c_E$', r'$f_{\\rm neb}$'] \n elif dem == 'slab_noll_msfr_fixbump': \n lbls = [r'$m_{\\tau,1}$', r'$m_{\\tau,2}$', r'$c_{\\tau}$', \n r'$m_{\\delta,1}$', r'$m_{\\delta,2}$', r'$c_\\delta$']#, r'$f_{\\rm neb}$'] \n elif dem == 'tnorm_noll_msfr_fixbump': \n lbls = [r'$m_{\\mu,1}$', r'$m_{\\mu,2}$', r'$c_{\\mu}$', \n r'$m_{\\sigma,1}$', r'$m_{\\sigma,2}$', r'$c_{\\sigma}$', \n r'$m_{\\delta,1}$', r'$m_{\\delta,2}$', r'$c_\\delta$',\n r'$f_{\\rm neb}$'] \n elif dem == 'slab_noll_msfr_kink_fixbump': \n lbls = [r'$m_{\\tau,{\\rm low}~M_*}$', r'$m_{\\tau,{\\rm high}~M_*}$', \n r'$m_{\\tau,{\\rm low~SFR}}$', r'$m_{\\tau,{\\rm high~SFR}}$', r'$c_{\\tau}$', \n r'$m_{\\delta,1}$', r'$m_{\\delta,2}$', r'$c_\\delta$',\n r'$f_{\\rm neb}$'] \n elif dem == 'slab_noll_mssfr_fixbump': \n lbls = [r'$m_{\\mu,1}$', r'$m_{\\mu,2}$', r'$c_{\\mu}$', \n r'$m_{\\sigma,1}$', r'$m_{\\sigma,2}$', r'$c_{\\sigma}$', \n r'$m_{\\delta,1}$', r'$m_{\\delta,2}$', r'$c_\\delta$',\n r'$f_{\\rm neb}$'] \n else: \n raise NotImplementedError\n\n if abc_dir is None: \n abc_dir = os.path.join(dat_dir, 'abc') \n \n fig = DFM.corner(\n pool.thetas, \n range=prior_range,\n weights=pool.ws,\n quantiles=[0.16, 0.5, 0.84], \n levels=[0.68, 0.95],\n nbin=20, \n smooth=True, \n labels=lbls, \n label_kwargs={'fontsize': 20}) \n try: \n fig.savefig(os.path.join(abc_dir, 'abc.t%i.png' % pool.t) , bbox_inches='tight') \n except: \n fig.savefig(os.path.join(abc_dir, 'abc.t%i.pdf' % pool.t) , bbox_inches='tight') \n return None \n"
] |
[
[
"scipy.stats.chi2.rvs",
"numpy.abs",
"numpy.isfinite",
"numpy.random.seed",
"numpy.arange",
"numpy.median",
"numpy.cumsum",
"numpy.histogramdd",
"numpy.concatenate",
"numpy.seterr",
"numpy.random.normal",
"numpy.random.rand",
"numpy.array",
"numpy.sum",
"numpy.empty"
]
] |
PedrV/stfX
|
[
"017436cd4ade7f0ea95185d82408697c43ac6ce6"
] |
[
"validation/utils/m1.py"
] |
[
"import unittest\nimport os\nfrom matplotlib import pyplot as plt\nfrom shapely import geometry, affinity\n\nX_COORDINATE = 0\nY_COORDINATE = 1\n\n\ndef extract_x_y(polygon: list) -> (list, list):\n \"\"\"Extract the x and y coordinates as two separate lists\"\"\"\n x_list = []\n y_list = []\n\n for vertex in polygon:\n x_list.append(vertex[X_COORDINATE])\n y_list.append(vertex[Y_COORDINATE])\n\n return (x_list, y_list)\n\n\ndef save_fig(dir: str):\n \"\"\"Save the current plt figure in the given directory under the name: m1.png\"\"\"\n plt.savefig(dir + '/m1.png')\n plt.clf()\n\n\ndef plot_polygons(hull: list, min_hull: list, perceived_poly: list, real_poly: list, dir: str = None):\n \"\"\"Plot the given two polygons, in a single figure, with different colors\"\"\"\n h1_x, h1_y = extract_x_y(hull)\n h2_x, h2_y = extract_x_y(min_hull)\n p1_x, p1_y = extract_x_y(perceived_poly)\n p2_x, p2_y = extract_x_y(real_poly)\n\n # Figure settings\n fig = plt.figure()\n # fig.suptitle('Convex hull area (red) VS real representation area (blue)')\n plt.xlabel('x')\n plt.ylabel('y')\n\n # Plotting hulls\n plt.fill(h1_x, h1_y, color=\"#FF000020\")\n plt.fill(h2_x, h2_y, color=\"#0000FF20\")\n\n # Plotting polygons lines\n plt.plot(p1_x, p1_y, color=\"#FF000060\") # Red perceived poly\n plt.plot(p2_x, p2_y, color=\"#0000FF60\") # Blue real poly\n\n # Plotting polygons points\n for p in perceived_poly:\n plt.plot(p[X_COORDINATE], p[Y_COORDINATE], 'o', color=\"#FF0000A0\")\n for p in real_poly:\n plt.plot(p[X_COORDINATE], p[Y_COORDINATE], 'x', color=\"#0000FFA0\")\n\n # plt.show()\n if dir is not None:\n save_fig(dir)\n\n\ndef surveyor_formula(polygon: list) -> float:\n \"\"\"Find the area of the given polygon using the surveyor formula\"\"\"\n # Check if first and last points of polygon are equal\n parsed_poly = polygon[0:-1]\\\n if polygon[0] == polygon[len(polygon)-1]\\\n else polygon\n area = 0\n\n for i in range(-1, len(parsed_poly)-1):\n area += parsed_poly[i][X_COORDINATE] * parsed_poly[i+1][Y_COORDINATE] -\\\n parsed_poly[i][Y_COORDINATE] * parsed_poly[i+1][X_COORDINATE]\n\n return abs(area / 2)\n\n\ndef polygon_to_vertices_list(polygon: geometry.Polygon) -> list:\n \"\"\"Extract the polygon vertices as a list\"\"\"\n return list(polygon.exterior.coords)\n\n\ndef apply_transformations(initial_representation: list, events: list) -> float:\n \"\"\"Apply the transformations in the events list to the initial representation\"\"\"\n scale = 1\n rot_angle = 0\n trans_vector = [0, 0]\n\n for item in events:\n for event in item[\"events\"]:\n if event[\"type\"] == \"TRANSLATION\":\n trans_vector[X_COORDINATE] += event[\"trigger\"][\"transformation\"][X_COORDINATE]\n trans_vector[Y_COORDINATE] += event[\"trigger\"][\"transformation\"][Y_COORDINATE]\n\n elif event[\"type\"] == \"ROTATION\":\n rot_angle += event[\"trigger\"][\"transformation\"]\n\n elif event[\"type\"] == \"UNIFORM_SCALE\":\n scale *= event[\"trigger\"][\"transformation\"]\n\n # Apply multiplication\n polygon = geometry.Polygon(initial_representation)\n s_polygon = affinity.scale(polygon,\n xfact=scale,\n yfact=scale,\n origin=(0, 0))\n r_s_polygon = affinity.rotate(s_polygon,\n rot_angle,\n origin=(0, 0))\n t_r_s_polygon = affinity.translate(r_s_polygon,\n xoff=trans_vector[0],\n yoff=trans_vector[1])\n return polygon_to_vertices_list(t_r_s_polygon)\n\n\ndef apply_m1(real_representation: list, perceived_representation: list, dir: str = None) -> float:\n \"\"\"Apply the metric M1 and obtain its result, between 0 and 1\"\"\"\n joint_point_set = real_representation + perceived_representation\n\n # Getting necessary hulls\n real_convex_hull = geometry.MultiPoint(real_representation).convex_hull\n perceived_hull = geometry.MultiPoint(perceived_representation).convex_hull\n convex_hull = geometry.MultiPoint(joint_point_set).convex_hull\n\n # Getting vertices of hulls\n real_vertices = polygon_to_vertices_list(real_convex_hull)\n perceived_vertices = polygon_to_vertices_list(perceived_hull)\n joint_vertices = polygon_to_vertices_list(convex_hull)\n\n # Getting the min area\n real_area = surveyor_formula(real_vertices)\n perceived_area = surveyor_formula(perceived_vertices)\n if real_area <= perceived_area:\n min_area = real_area\n min_vertices = real_vertices\n else:\n min_area = perceived_area\n min_vertices = perceived_vertices\n\n plot_polygons(hull=joint_vertices,\n min_hull=min_vertices,\n perceived_poly=perceived_representation,\n real_poly=real_representation,\n dir=dir)\n return min_area / surveyor_formula(joint_vertices)\n\n\nclass TestM1(unittest.TestCase):\n\n def __init__(self, *args, **kwargs):\n super(TestM1, self).__init__(*args, **kwargs)\n\n self.representation = [\n [1, 1],\n [1, -1],\n [-1, -1],\n [-1, 1],\n [1, 1]\n ]\n self.transformations = [{\n \"events\": [\n {\"type\": \"TRANSLATION\", \"trigger\": {\"transformation\": [5, 5]}},\n {\"type\": \"ROTATION\", \"trigger\": {\"transformation\": 180}},\n {\"type\": \"UNIFORM_SCALE\", \"trigger\": {\"transformation\": 1.25}}\n ]\n }, {\n \"events\": [\n {\"type\": \"TRANSLATION\", \"trigger\": {\"transformation\": [5, 0]}},\n {\"type\": \"ROTATION\", \"trigger\": {\"transformation\": -90}},\n {\"type\": \"UNIFORM_SCALE\", \"trigger\": {\"transformation\": 1.6}}\n ]\n }]\n self.min_scale = [{\n \"events\": [\n {\"type\": \"UNIFORM_SCALE\", \"trigger\": {\"transformation\": 0.5}}\n ]\n }]\n\n def test_area(self):\n square = [\n [1, 1],\n [1, -1],\n [-1, -1],\n [-1, 1]\n ]\n self.assertEqual(surveyor_formula(square), 4)\n self.assertEqual(surveyor_formula(self.representation), 4)\n\n def test_transformations(self):\n self.assertEqual(apply_transformations(self.representation, self.transformations), [\n (8.0, 7.0),\n (12.0, 7.0),\n (12.0, 3.0),\n (8.0, 3.0),\n (8.0, 7.0),\n ])\n\n def test_M1(self):\n self.assertEqual(apply_m1(self.representation, self.representation), 1)\n self.assertTrue(apply_m1(self.representation,\n apply_transformations(self.representation, self.transformations))\n < 0.1)\n self.assertEqual(apply_m1([\n (8.0, 7.0),\n (12.0, 7.0),\n (12.0, 3.0),\n (8.0, 3.0),\n (8.0, 7.0)],\n apply_transformations(self.representation, self.transformations)),\n 1)\n\n def test_mean_perceived(self):\n self.assertEqual(apply_m1(self.representation,\n apply_transformations(self.representation, self.min_scale)),\n 0.25)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.fill",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
] |
rostyboost/cvxpy
|
[
"95e728b01b6bb442c924812c7eac631019c5cbc6"
] |
[
"cvxpy/tests/test_examples.py"
] |
[
"\"\"\"\nCopyright 2013 Steven Diamond\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nfrom __future__ import print_function\nimport cvxpy as cvx\nimport cvxpy.interface as intf\nfrom cvxpy.tests.base_test import BaseTest\nfrom cvxpy.reductions.solvers.conic_solvers import ecos_conif\nimport numpy as np\nimport unittest\n\n\nclass TestExamples(BaseTest):\n \"\"\" Unit tests using example problems. \"\"\"\n\n # Find the largest Euclidean ball in the polyhedron.\n def test_chebyshev_center(self):\n # The goal is to find the largest Euclidean ball (i.e. its center and\n # radius) that lies in a polyhedron described by linear inequalities in this\n # fashion: P = {x : a_i'*x <= b_i, i=1,...,m} where x is in R^2\n\n # Generate the input data\n a1 = np.array([2, 1])\n a2 = np.array([2, -1])\n a3 = np.array([-1, 2])\n a4 = np.array([-1, -2])\n b = np.ones(4)\n\n # Create and solve the model\n r = cvx.Variable(name='r')\n x_c = cvx.Variable(2, name='x_c')\n obj = cvx.Maximize(r)\n constraints = [ # TODO have atoms compute values for constants.\n a1.T*x_c + np.linalg.norm(a1)*r <= b[0],\n a2.T*x_c + np.linalg.norm(a2)*r <= b[1],\n a3.T*x_c + np.linalg.norm(a3)*r <= b[2],\n a4.T*x_c + np.linalg.norm(a4)*r <= b[3],\n ]\n\n p = cvx.Problem(obj, constraints)\n result = p.solve()\n self.assertAlmostEqual(result, 0.447214)\n self.assertAlmostEqual(r.value, result)\n self.assertItemsAlmostEqual(x_c.value, [0, 0])\n\n # Test issue with numpy scalars.\n def test_numpy_scalars(self):\n n = 6\n eps = 1e-6\n np.random.seed(10)\n P0 = np.random.randn(n, n)\n eye = np.eye(n)\n P0 = P0.T.dot(P0) + eps * eye\n\n print(P0)\n\n P1 = np.random.randn(n, n)\n P1 = P1.T.dot(P1)\n P2 = np.random.randn(n, n)\n P2 = P2.T.dot(P2)\n P3 = np.random.randn(n, n)\n P3 = P3.T.dot(P3)\n\n q0 = np.random.randn(n, 1)\n q1 = np.random.randn(n, 1)\n q2 = np.random.randn(n, 1)\n q3 = np.random.randn(n, 1)\n\n r0 = np.random.randn(1, 1)\n r1 = np.random.randn(1, 1)\n r2 = np.random.randn(1, 1)\n r3 = np.random.randn(1, 1)\n\n slack = cvx.Variable()\n # Form the problem\n x = cvx.Variable(n)\n objective = cvx.Minimize(0.5*cvx.quad_form(x, P0) + q0.T*x + r0 + slack)\n constraints = [0.5*cvx.quad_form(x, P1) + q1.T*x + r1 <= slack,\n 0.5*cvx.quad_form(x, P2) + q2.T*x + r2 <= slack,\n 0.5*cvx.quad_form(x, P3) + q3.T*x + r3 <= slack,\n ]\n\n # We now find the primal result and compare it to the dual result\n # to check if strong duality holds i.e. the duality gap is effectively zero\n p = cvx.Problem(objective, constraints)\n p.solve()\n\n # Note that since our data is random,\n # we may need to run this program multiple times to get a feasible primal\n # When feasible, we can print out the following values\n print(x.value) # solution\n lam1 = constraints[0].dual_value\n lam2 = constraints[1].dual_value\n lam3 = constraints[2].dual_value\n print(type(lam1))\n\n P_lam = P0 + lam1*P1 + lam2*P2 + lam3*P3\n q_lam = q0 + lam1*q1 + lam2*q2 + lam3*q3\n r_lam = r0 + lam1*r1 + lam2*r2 + lam3*r3\n dual_result = -0.5*q_lam.T.dot(P_lam).dot(q_lam) + r_lam\n print(dual_result.shape)\n self.assertEqual(intf.shape(dual_result), (1, 1))\n\n # Tests examples from the README.\n def test_readme_examples(self):\n import numpy\n numpy.random.seed(1)\n # cvx.Problem data.\n m = 30\n n = 20\n A = numpy.random.randn(m, n)\n b = numpy.random.randn(m)\n\n # Construct the problem.\n x = cvx.Variable(n)\n objective = cvx.Minimize(cvx.sum_squares(A*x - b))\n constraints = [0 <= x, x <= 1]\n p = cvx.Problem(objective, constraints)\n\n # The optimal objective is returned by p.solve().\n p.solve()\n # The optimal value for x is stored in x.value.\n print(x.value)\n # The optimal Lagrange multiplier for a constraint\n # is stored in constraint.dual_value.\n print(constraints[0].dual_value)\n\n ####################################################\n\n # Scalar variable.\n a = cvx.Variable()\n\n # Column vector variable of length 5.\n x = cvx.Variable(5)\n\n # Matrix variable with 4 rows and 7 columns.\n A = cvx.Variable((4, 7))\n\n ####################################################\n\n # Positive scalar parameter.\n m = cvx.Parameter(nonneg=True)\n\n # Column vector parameter with unknown sign (by default).\n cvx.Parameter(5)\n\n # Matrix parameter with negative entries.\n G = cvx.Parameter((4, 7), nonpos=True)\n\n # Assigns a constant value to G.\n G.value = -numpy.ones((4, 7))\n\n # Raises an error for assigning a value with invalid sign.\n with self.assertRaises(Exception) as cm:\n G.value = numpy.ones((4, 7))\n self.assertEqual(str(cm.exception), \"Parameter value must be nonpositive.\")\n\n ####################################################\n a = cvx.Variable()\n x = cvx.Variable(5)\n\n # expr is an Expression object after each assignment.\n expr = 2*x\n expr = expr - a\n expr = cvx.sum(expr) + cvx.norm(x, 2)\n\n ####################################################\n\n import numpy as np\n\n # cvx.Problem data.\n n = 10\n m = 5\n A = np.random.randn(n, m)\n b = np.random.randn(n)\n gamma = cvx.Parameter(nonneg=True)\n\n # Construct the problem.\n x = cvx.Variable(m)\n objective = cvx.Minimize(cvx.sum_squares(A*x - b) + gamma*cvx.norm(x, 1))\n p = cvx.Problem(objective)\n\n # Assign a value to gamma and find the optimal x.\n def get_x(gamma_value):\n gamma.value = gamma_value\n p.solve()\n return x.value\n\n gammas = np.logspace(-1, 2, num=2)\n # Serial computation.\n [get_x(value) for value in gammas]\n\n ####################################################\n n = 10\n\n mu = np.random.randn(1, n)\n sigma = np.random.randn(n, n)\n sigma = sigma.T.dot(sigma)\n gamma = cvx.Parameter(nonneg=True)\n gamma.value = 1\n x = cvx.Variable(n)\n\n # Constants:\n # mu is the vector of expected returns.\n # sigma is the covariance matrix.\n # gamma is a cvx.Parameter that trades off risk and return.\n\n # cvx.Variables:\n # x is a vector of stock holdings as fractions of total assets.\n\n expected_return = mu*x\n risk = cvx.quad_form(x, sigma)\n\n objective = cvx.Maximize(expected_return - gamma*risk)\n p = cvx.Problem(objective, [cvx.sum(x) == 1])\n p.solve()\n\n # The optimal expected return.\n print(expected_return.value)\n\n # The optimal risk.\n print(risk.value)\n\n ###########################################\n\n N = 50\n M = 40\n n = 10\n data = []\n for i in range(N):\n data += [(1, np.random.normal(loc=1.0, scale=2.0, size=n))]\n for i in range(M):\n data += [(-1, np.random.normal(loc=-1.0, scale=2.0, size=n))]\n\n # Construct problem.\n gamma = cvx.Parameter(nonneg=True)\n gamma.value = 0.1\n # 'a' is a variable constrained to have at most 6 non-zero entries.\n a = cvx.Variable(n) # mi.SparseVar(n, nonzeros=6)\n b = cvx.Variable()\n\n slack = [cvx.pos(1 - label*(sample.T*a - b)) for (label, sample) in data]\n objective = cvx.Minimize(cvx.norm(a, 2) + gamma*sum(slack))\n p = cvx.Problem(objective)\n # Extensions can attach new solve methods to the CVXPY cvx.Problem class.\n # p.solve(method=\"admm\")\n p.solve()\n\n # Count misclassifications.\n errors = 0\n for label, sample in data:\n if label*(sample.T*a - b).value < 0:\n errors += 1\n\n print(\"%s misclassifications\" % errors)\n print(a.value)\n print(b.value)\n\n def test_advanced1(self):\n \"\"\"Code from the advanced tutorial.\n \"\"\"\n # Solving a problem with different solvers.\n x = cvx.Variable(2)\n obj = cvx.Minimize(x[0] + cvx.norm(x, 1))\n constraints = [x >= 2]\n prob = cvx.Problem(obj, constraints)\n\n # Solve with ECOS.\n prob.solve(solver=cvx.ECOS)\n print(\"optimal value with ECOS:\", prob.value)\n self.assertAlmostEqual(prob.value, 6)\n\n # Solve with ECOS_BB.\n prob.solve(solver=cvx.ECOS_BB)\n print(\"optimal value with ECOS_BB:\", prob.value)\n self.assertAlmostEqual(prob.value, 6)\n\n # Solve with CVXOPT.\n if cvx.CVXOPT in cvx.installed_solvers():\n prob.solve(solver=cvx.CVXOPT)\n print(\"optimal value with CVXOPT:\", prob.value)\n self.assertAlmostEqual(prob.value, 6)\n\n # Solve with SCS.\n prob.solve(solver=cvx.SCS)\n print(\"optimal value with SCS:\", prob.value)\n self.assertAlmostEqual(prob.value, 6, places=2)\n\n if cvx.CPLEX in cvx.installed_solvers():\n # Solve with CPLEX.\n prob.solve(solver=cvx.CPLEX)\n print(\"optimal value with CPLEX:\", prob.value)\n self.assertAlmostEqual(prob.value, 6)\n\n if cvx.GLPK in cvx.installed_solvers():\n # Solve with GLPK.\n prob.solve(solver=cvx.GLPK)\n print(\"optimal value with GLPK:\", prob.value)\n self.assertAlmostEqual(prob.value, 6)\n\n # Solve with GLPK_MI.\n prob.solve(solver=cvx.GLPK_MI)\n print(\"optimal value with GLPK_MI:\", prob.value)\n self.assertAlmostEqual(prob.value, 6)\n\n if cvx.GUROBI in cvx.installed_solvers():\n # Solve with Gurobi.\n prob.solve(solver=cvx.GUROBI)\n print(\"optimal value with GUROBI:\", prob.value)\n self.assertAlmostEqual(prob.value, 6)\n\n print(cvx.installed_solvers())\n\n def test_log_det(self):\n # Generate data\n x = np.array([[0.55, 0.0],\n [0.25, 0.35],\n [-0.2, 0.2],\n [-0.25, -0.1],\n [-0.0, -0.3],\n [0.4, -0.2]]).T\n (n, m) = x.shape\n\n # Create and solve the model\n A = cvx.Variable((n, n))\n b = cvx.Variable(n)\n obj = cvx.Maximize(cvx.log_det(A))\n constraints = []\n for i in range(m):\n constraints.append(cvx.norm(A*x[:, i] + b) <= 1)\n p = cvx.Problem(obj, constraints)\n result = p.solve()\n self.assertAlmostEqual(result, 1.9746, places=2)\n\n def test_portfolio_problem(self):\n \"\"\"Test portfolio problem that caused dcp_attr errors.\n \"\"\"\n import numpy as np\n import scipy.sparse as sp\n np.random.seed(5)\n n = 100 # 10000\n m = 10 # 100\n\n F = sp.rand(m, n, density=0.01)\n F.data = np.ones(len(F.data))\n D = sp.eye(n).tocoo()\n D.data = np.random.randn(len(D.data))**2\n Z = np.random.randn(m, 1)\n Z = Z.dot(Z.T)\n\n x = cvx.Variable(n)\n y = x.__rmul__(F)\n # DCP attr causes error because not all the curvature\n # matrices are reduced to constants when an atom\n # is scalar.\n cvx.square(cvx.norm(D*x)) + cvx.square(Z*y)\n\n def test_intro(self):\n \"\"\"Test examples from cvxpy.org introduction.\n \"\"\"\n import numpy\n\n # cvx.Problem data.\n m = 30\n n = 20\n numpy.random.seed(1)\n A = numpy.random.randn(m, n)\n b = numpy.random.randn(m)\n\n # Construct the problem.\n x = cvx.Variable(n)\n objective = cvx.Minimize(cvx.sum_squares(A*x - b))\n constraints = [0 <= x, x <= 1]\n prob = cvx.Problem(objective, constraints)\n\n # The optimal objective is returned by p.solve().\n prob.solve()\n # The optimal value for x is stored in x.value.\n print(x.value)\n # The optimal Lagrange multiplier for a constraint\n # is stored in constraint.dual_value.\n print(constraints[0].dual_value)\n\n ########################################\n\n # Create two scalar variables.\n x = cvx.Variable()\n y = cvx.Variable()\n\n # Create two constraints.\n constraints = [x + y == 1,\n x - y >= 1]\n\n # Form objective.\n obj = cvx.Minimize(cvx.square(x - y))\n\n # Form and solve problem.\n prob = cvx.Problem(obj, constraints)\n prob.solve() # Returns the optimal value.\n print(\"status:\", prob.status)\n print(\"optimal value\", prob.value)\n print(\"optimal var\", x.value, y.value)\n\n ########################################\n\n # Create two scalar variables.\n x = cvx.Variable()\n y = cvx.Variable()\n\n # Create two constraints.\n constraints = [x + y == 1,\n x - y >= 1]\n\n # Form objective.\n obj = cvx.Minimize(cvx.square(x - y))\n\n # Form and solve problem.\n prob = cvx.Problem(obj, constraints)\n prob.solve() # Returns the optimal value.\n print(\"status:\", prob.status)\n print(\"optimal value\", prob.value)\n print(\"optimal var\", x.value, y.value)\n\n self.assertEqual(prob.status, cvx.OPTIMAL)\n self.assertAlmostEqual(prob.value, 1.0)\n self.assertAlmostEqual(x.value, 1.0)\n self.assertAlmostEqual(y.value, 0)\n\n ########################################\n\n # Replace the objective.\n prob = cvx.Problem(cvx.Maximize(x + y), prob.constraints)\n print(\"optimal value\", prob.solve())\n\n self.assertAlmostEqual(prob.value, 1.0, places=3)\n\n # Replace the constraint (x + y == 1).\n constraints = prob.constraints\n constraints[0] = (x + y <= 3)\n prob = cvx.Problem(prob.objective, constraints)\n print(\"optimal value\", prob.solve())\n\n self.assertAlmostEqual(prob.value, 3.0, places=2)\n\n ########################################\n\n x = cvx.Variable()\n\n # An infeasible problem.\n prob = cvx.Problem(cvx.Minimize(x), [x >= 1, x <= 0])\n prob.solve()\n print(\"status:\", prob.status)\n print(\"optimal value\", prob.value)\n\n self.assertEqual(prob.status, cvx.INFEASIBLE)\n self.assertAlmostEqual(prob.value, np.inf)\n\n # An unbounded problem.\n prob = cvx.Problem(cvx.Minimize(x))\n prob.solve()\n print(\"status:\", prob.status)\n print(\"optimal value\", prob.value)\n\n self.assertEqual(prob.status, cvx.UNBOUNDED)\n self.assertAlmostEqual(prob.value, -np.inf)\n\n ########################################\n\n # A scalar variable.\n cvx.Variable()\n\n # Column vector variable of length 5.\n x = cvx.Variable(5)\n\n # Matrix variable with 4 rows and 7 columns.\n A = cvx.Variable((4, 7))\n\n ########################################\n import numpy\n\n # cvx.Problem data.\n m = 10\n n = 5\n numpy.random.seed(1)\n A = numpy.random.randn(m, n)\n b = numpy.random.randn(m)\n\n # Construct the problem.\n x = cvx.Variable(n)\n objective = cvx.Minimize(cvx.sum_squares(A*x - b))\n constraints = [0 <= x, x <= 1]\n prob = cvx.Problem(objective, constraints)\n\n print(\"Optimal value\", prob.solve())\n print(\"Optimal var\")\n print(x.value) # A numpy matrix.\n\n self.assertAlmostEqual(prob.value, 4.14133859146)\n\n ########################################\n # Positive scalar parameter.\n m = cvx.Parameter(nonneg=True)\n\n # Column vector parameter with unknown sign (by default).\n cvx.Parameter(5)\n\n # Matrix parameter with negative entries.\n G = cvx.Parameter((4, 7), nonpos=True)\n\n # Assigns a constant value to G.\n G.value = -numpy.ones((4, 7))\n ########################################\n\n # Create parameter, then assign value.\n rho = cvx.Parameter(nonneg=True)\n rho.value = 2\n\n # Initialize parameter with a value.\n rho = cvx.Parameter(nonneg=True, value=2)\n\n ########################################\n\n import numpy\n\n # cvx.Problem data.\n n = 15\n m = 10\n numpy.random.seed(1)\n A = numpy.random.randn(n, m)\n b = numpy.random.randn(n)\n # gamma must be positive due to DCP rules.\n gamma = cvx.Parameter(nonneg=True)\n\n # Construct the problem.\n x = cvx.Variable(m)\n error = cvx.sum_squares(A*x - b)\n obj = cvx.Minimize(error + gamma*cvx.norm(x, 1))\n prob = cvx.Problem(obj)\n\n # Construct a trade-off curve of ||Ax-b||^2 vs. ||x||_1\n sq_penalty = []\n l1_penalty = []\n x_values = []\n gamma_vals = numpy.logspace(-4, 6)\n for val in gamma_vals:\n gamma.value = val\n prob.solve()\n # Use expr.value to get the numerical value of\n # an expression in the problem.\n sq_penalty.append(error.value)\n l1_penalty.append(cvx.norm(x, 1).value)\n x_values.append(x.value)\n\n ########################################\n import numpy\n\n X = cvx.Variable((5, 4))\n A = numpy.ones((3, 5))\n\n # Use expr.size to get the dimensions.\n print(\"dimensions of X:\", X.size)\n print(\"dimensions of sum(X):\", cvx.sum(X).size)\n print(\"dimensions of A*X:\", (A*X).size)\n\n # ValueError raised for invalid dimensions.\n try:\n A + X\n except ValueError as e:\n print(e)\n\n def test_inpainting(self):\n \"\"\"Test image in-painting.\n \"\"\"\n import numpy as np\n np.random.seed(1)\n rows, cols = 100, 100\n # Load the images.\n # Convert to arrays.\n Uorig = np.random.randint(0, 255, size=(rows, cols))\n\n rows, cols = Uorig.shape\n # Known is 1 if the pixel is known,\n # 0 if the pixel was corrupted.\n Known = np.zeros((rows, cols))\n for i in range(rows):\n for j in range(cols):\n if np.random.random() > 0.7:\n Known[i, j] = 1\n Ucorr = Known*Uorig\n # Recover the original image using total variation in-painting.\n U = cvx.Variable((rows, cols))\n obj = cvx.Minimize(cvx.tv(U))\n constraints = [cvx.multiply(Known, U) == cvx.multiply(Known, Ucorr)]\n prob = cvx.Problem(obj, constraints)\n prob.solve(solver=cvx.SCS)\n\n def test_advanced2(self):\n \"\"\"Test code from the advanced section of the tutorial.\n \"\"\"\n x = cvx.Variable()\n prob = cvx.Problem(cvx.Minimize(cvx.square(x)), [x == 2])\n # Get ECOS arguments.\n data, chain, inverse = prob.get_problem_data(cvx.ECOS)\n\n # Get ECOS_BB arguments.\n data, chain, inverse = prob.get_problem_data(cvx.ECOS_BB)\n\n # Get CVXOPT arguments.\n if cvx.CVXOPT in cvx.installed_solvers():\n data, chain, inverse = prob.get_problem_data(cvx.CVXOPT)\n\n # Get SCS arguments.\n data, chain, inverse = prob.get_problem_data(cvx.SCS)\n\n import ecos\n # Get ECOS arguments.\n data, chain, inverse = prob.get_problem_data(cvx.ECOS)\n # Call ECOS solver.\n solution = ecos.solve(data[\"c\"], data[\"G\"], data[\"h\"],\n ecos_conif.dims_to_solver_dict(data[\"dims\"]),\n data[\"A\"], data[\"b\"])\n # Unpack raw solver output.\n prob.unpack_results(solution, chain, inverse)\n\n def test_log_sum_exp(self):\n \"\"\"Test log_sum_exp function that failed in Github issue.\n \"\"\"\n import numpy as np\n np.random.seed(1)\n m = 5\n n = 2\n X = np.ones((m, n))\n w = cvx.Variable(n)\n\n expr2 = [cvx.log_sum_exp(cvx.hstack([0, X[i, :]*w])) for i in range(m)]\n expr3 = sum(expr2)\n obj = cvx.Minimize(expr3)\n p = cvx.Problem(obj)\n p.solve(solver=cvx.SCS, max_iters=1)\n\n # # Risk return tradeoff curve\n # def test_risk_return_tradeoff(self):\n # from math import sqrt\n # from cvxopt import matrix\n # from cvxopt.blas import dot\n # from cvxopt.solvers import qp, options\n # import scipy\n\n # n = 4\n # S = matrix( [[ 4e-2, 6e-3, -4e-3, 0.0 ],\n # [ 6e-3, 1e-2, 0.0, 0.0 ],\n # [-4e-3, 0.0, 2.5e-3, 0.0 ],\n # [ 0.0, 0.0, 0.0, 0.0 ]] )\n # pbar = matrix([.12, .10, .07, .03])\n\n # N = 100\n # # CVXPY\n # Sroot = numpy.asmatrix(scipy.linalg.sqrtm(S))\n # x = cvx.Variable(n, name='x')\n # mu = cvx.Parameter(name='mu')\n # mu.value = 1 # TODO cvx.Parameter(\"positive\")\n # objective = cvx.Minimize(-pbar*x + mu*quad_over_lin(Sroot*x,1))\n # constraints = [sum(x) == 1, x >= 0]\n # p = cvx.Problem(objective, constraints)\n\n # mus = [ 10**(5.0*t/N-1.0) for t in range(N) ]\n # xs = []\n # for mu_val in mus:\n # mu.value = mu_val\n # p.solve()\n # xs.append(x.value)\n # returns = [ dot(pbar,x) for x in xs ]\n # risks = [ sqrt(dot(x, S*x)) for x in xs ]\n\n # # QP solver\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"numpy.random.random",
"numpy.random.seed",
"numpy.logspace",
"scipy.sparse.eye",
"numpy.eye",
"scipy.sparse.rand",
"numpy.linalg.norm",
"numpy.ones",
"numpy.random.normal",
"numpy.random.randn",
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
]
] |
nids2001/UncertainSCI
|
[
"b3105bddc064575477589d7a930c71fa3149ef36"
] |
[
"demos/anisotropic_distribution.py"
] |
[
"# Demonstrates generation of anisotropic distributions. Example is similar to\n# quantiles.py demo.\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom UncertainSCI.distributions import BetaDistribution\nfrom UncertainSCI.model_examples import sine_modulation\nfrom UncertainSCI.indexing import TotalDegreeSet\nfrom UncertainSCI.pce import PolynomialChaosExpansion\n\n# Specifies 1D distribution on [0,1] (alpha=beta=1 ---> uniform)\nalpha = [1., 2., 3.]\nbeta = [3., 2., 1.]\n\ndist = BetaDistribution(alpha, beta)\n\n# Indices setup\norder = 5 # polynomial degree\nindex_set = TotalDegreeSet(dim=dist.dim, order=order)\n\n# # The remainder of this is essentially the same as quantiles.py\n\nprint('This will query the model {0:d} times'.format(index_set.get_indices().shape[0] + 10))\n\n# Initializes a pce object\npce = PolynomialChaosExpansion(index_set, dist)\n\n# Define model\nN = 10 # Number of degrees of freedom of model output\nleft = -1.\nright = 1.\nx = np.linspace(left, right, N)\nmodel = sine_modulation(N=N)\n\n# Compute PCE (runs model)\nlsq_residuals = pce.build_pce_wafp(model)\n\nQ = 6 # Number of quantile bands to plot\n\ndq = 0.5/(Q+1)\nq_lower = np.arange(dq, 0.5-1e-7, dq)[::-1]\nq_upper = np.arange(0.5 + dq, 1.0-1e-7, dq)\n\n# Meh, this triple calling is wasteful\nmedian = pce.quantile(0.5, M=int(1e3))[0, :]\nquantiles_lower = pce.quantile(q_lower, M=int(1e3))\nquantiles_upper = pce.quantile(q_upper, M=int(1e3))\n\n# # Visualization\nM = 50 # Generate MC samples\np_phys = dist.MC_samples(M)\n\noutput = np.zeros([M, N])\n\nfor j in range(M):\n output[j, :] = model(p_phys[j, :])\n\nplt.plot(x, output[:M, :].T, 'k', alpha=0.8, linewidth=0.2)\nplt.plot(x, median, 'b', label='PCE median')\n\nfor ind in range(Q):\n alpha = (Q-ind) * 1/Q - (1/(2*Q))\n plt.fill_between(x, quantiles_lower[ind, :], quantiles_upper[ind, :], interpolate=True, facecolor='red', alpha=alpha)\n\nplt.xlabel('x')\n\nplt.legend(loc='lower right')\n\nplt.show()\n"
] |
[
[
"matplotlib.pyplot.legend",
"numpy.linspace",
"numpy.arange",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.zeros"
]
] |
ZohrehShams/IntegrativeRuleExtractionMethodology
|
[
"fd7b569d11de540ffe94e0cc588e78305e45689e"
] |
[
"code/src/functionality_helpers.py"
] |
[
"import numpy as np\nimport pickle\nimport dnn_re\nfrom evaluate_rules.predict_explain import predict_explain, print_explanation\nfrom evaluate_rules.overlapping_features import features_recurrence_in_explanation\nfrom src import *\nfrom evaluate_rules.predict_explain import predict_explain, print_explanation\nfrom evaluate_rules.overlapping_features import *\nfrom rule_ranking.rank_rules import rank_rule_scores, rank_rule_scores_fav\nfrom rule_ranking.eliminate_rules import eliminate_rules, eliminate_rules_fav_score\nfrom model.generation.helpers.init_dataset_dir import clean_up, clear_file\n\n\n# Extract ruleset from the entire dataset (no fold split) and saves them\ndef validate_rem_d(extract_rules_flag=False):\n if extract_rules_flag:\n X = np.load(N_FOLD_CV_SPLIT_X_data_FP)\n y = np.load(N_FOLD_CV_SPLIT_y_data_FP)\n\n # Extract rules\n nn_accuracy, nn_auc, rules, re_time, re_memory= dnn_re.run_whole_dataset(X, y, model_fp)\n\n for rule in rules:\n print(len(rule.premise))\n\n # Save rules extracted\n print('Saving rules extracted...', end='', flush=True)\n with open(rules_fp, 'wb') as rules_file:\n pickle.dump(rules, rules_file)\n print('done')\n\n # Save rule extraction time and memory usage\n print('Saving results...', end='', flush=True)\n\n# Prints explanation for an instance generated by random sampling;\n# also prints the frequency of features in the explanation\ndef explain_prediction_entire_data(flag=False):\n if flag:\n np.random.seed(110)\n instance = np.random.uniform(0, 1, 1004)\n\n with open(rules_fp, 'rb') as rules_file:\n rules = pickle.load(rules_file)\n\n prediction, explanation = predict_explain(rules, instance)\n print(print_explanation(prediction, explanation))\n print(features_recurrence_in_explanation(explanation))\n\ndef explain_prediction(flag=False):\n if flag:\n np.random.seed(114)\n instance = np.random.uniform(0, 1, 1004)\n fold = np.random.randint(5)\n\n with open(n_fold_rules_fp(fold), 'rb') as rules_file:\n rules = pickle.load(rules_file)\n\n prediction, explanation = predict_explain(rules, instance)\n print(print_explanation(prediction, explanation))\n print(features_recurrence_in_explanation(explanation))\n\n\n# Prints the top 10 recurring features in the entire ruleset,\n# as well as in the ruleset for each class,\n# along with the frequency of operator for each of the top features\ndef compute_top_recurring_features(flag=False):\n if flag:\n with open(rules_fp, 'rb') as rules_file:\n rules = pickle.load(rules_file)\n\n print(features_recurrence(rules, DATA_FP, 10))\n print(features_recurrence_per_class(rules, DATA_FP, 10))\n print(top_features_operator_frequency_recurrence_per_class(rules, DATA_FP, 10))\n\n\n# Prints the top 50 recurring features across the folds,\n# as well as in the ruleset for each class,\n# along with the frequency of operator for each of the top features\ndef compute_top_recurring_features_across_folds(flag=False):\n if flag:\n list_of_rules=[]\n for fold in range(0, N_FOLDS):\n with open(n_fold_rules_fp(fold), 'rb') as rules_file:\n rules = pickle.load(rules_file)\n list_of_rules.append(rules)\n\n print(\"features recurrence across folds:\")\n features_recurrence_across_folds(list_of_rules, DATA_FP, 50)\n print('\\n')\n print(\"features recurrence per class across folds %s\" %(features_recurrence_per_class_across_folds(list_of_rules, DATA_FP, 50)))\n print('\\n')\n print(\"top features operator frequency recurrence per class across folds %s\" %(top_features_operator_frequency_recurrence_per_class_across_folds(list_of_rules, DATA_FP, 50)))\n\n\n# Shows the frequency of the favourite features in the ruleset\ndef compute_favourite_features_frequency(rule_path, fav_features, flag=False):\n if flag:\n with open(rule_path, 'rb') as rules_file:\n rules = pickle.load(rules_file)\n fav_freq = fav_features_recurrence(rules, DATA_FP, fav_features)\n return fav_freq\n\n\n\n# Shows the frequency of the favourite features in the ruleset\ndef compute_favourite_features_frequency_across_folds(percentage, fav_features, flag=False):\n if flag:\n list_of_rules = []\n for fold in range(0, N_FOLDS):\n with open(n_fold_rules_fp_remaining(N_FOLD_RULES_REMAINING_DP, fold)(percentage), 'rb') as rules_file:\n rules = pickle.load(rules_file)\n list_of_rules.append(rules)\n fav_freq = fav_features_recurrence_across_folds(list_of_rules, DATA_FP, fav_features)\n return fav_freq\n\n\n# Pick n features at random from the rulset extarcted from the entire dataset\ndef pick_random_features(n, flag=False):\n if flag:\n with open(rules_fp, 'rb') as rules_file:\n rules = pickle.load(rules_file)\n favourite_features = random_features_in_rules(rules, DATA_FP, n)\n return favourite_features\n\n\n# Pick n features at random from the entire dataset\ndef pick_random_features_across_folds(n, flag=False):\n if flag:\n list_of_rules = []\n data_df = pd.read_csv(DATA_FP)\n features_name = list(data_df.columns)\n\n for fold in range(0, N_FOLDS):\n with open(n_fold_rules_fp(fold), 'rb') as rules_file:\n rules = pickle.load(rules_file)\n list_of_rules.append(rules)\n favourite_features = random_features_in_rules_across_folds(list_of_rules, DATA_FP, n)\n return favourite_features\n\n\n\n# Ranks the rules extracted from the entire dataset with the option of factoring in favourite features\n# in the ranking. Based on the raking, lowest rank rules can be eliminated. n is the percentage of rules\n# that will be eliminated. n = 0.5 eliminates 50% of the rules.\ndef validate_rem_d_ranking_elimination(rank_rules_flag=False, rule_elimination=False, percentage=0):\n X = np.load(N_FOLD_CV_SPLIT_X_data_FP)\n y = np.load(N_FOLD_CV_SPLIT_y_data_FP)\n\n if rank_rules_flag:\n extracted_rules_file_path = rules_fp\n\n with open(extracted_rules_file_path, 'rb') as rules_file:\n rules = pickle.load(rules_file)\n\n for rule in rules:\n rank_rule_scores(rule, X, y, use_rl=True)\n\n clear_file(extracted_rules_file_path)\n print('Saving rules after scoring...', end='', flush=True)\n with open(extracted_rules_file_path, 'wb') as rules_file:\n pickle.dump(rules, rules_file)\n\n if rule_elimination:\n extracted_rules_file_path = rules_fp\n remaining_rules = eliminate_rules(extracted_rules_file_path, percentage)\n\n # Save remaining rules\n print('Saving remaining rules ...', end='', flush=True)\n with open(rules_fp_remaining(percentage), 'wb') as rules_file:\n pickle.dump(remaining_rules, rules_file)\n print('done')\n\n\ndef validate_rem_d_fav_ranking_elimination(favourite_features=[], rank_rules_fav_flag=False, rule_elimination=False,\n percentage=0):\n if rank_rules_fav_flag:\n extracted_rules_file_path = rules_fp\n\n with open(extracted_rules_file_path, 'rb') as rules_file:\n rules = pickle.load(rules_file)\n\n data_df = pd.read_csv(DATA_FP)\n features_name = list(data_df.columns)\n\n for rule in rules:\n rank_rule_scores_fav(rule, features_name, favourite_features)\n\n clear_file(extracted_rules_file_path)\n print('Saving rules after scoring...', end='', flush=True)\n with open(extracted_rules_file_path, 'wb') as rules_file:\n pickle.dump(rules, rules_file)\n\n if rule_elimination:\n extracted_rules_file_path = rules_fp\n remaining_rules = eliminate_rules_fav_score(extracted_rules_file_path, percentage)\n\n # Save remaining rules\n print('Saving remaining rules ...', end='', flush=True)\n with open(rules_fp_remaining(percentage), 'wb') as rules_file:\n pickle.dump(remaining_rules, rules_file)\n print('done')"
] |
[
[
"numpy.load",
"numpy.random.uniform",
"numpy.random.seed",
"numpy.random.randint"
]
] |
Justin-A/PyTorch-tutorials-kr
|
[
"0d8e407523e5e75de0081becf800b82b37eb912f"
] |
[
"intermediate_source/pruning_tutorial.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\n가지치기 기법(Pruning) 튜토리얼\n=====================================\n**저자**: `Michela Paganini <https://github.com/mickypaganini>`_\n**번역** : `안상준 <https://github.com/Justin-A>`_\n\n최첨단 딥러닝 모델들은 굉장히 많은 수의 파라미터값들로 구성되기 때문에, 쉽게 배포되기 어렵습니다.\n이와 반대로, 생물학적 신경망들은 효율적으로 희소하게 연결된 것으로 알려져 있습니다.\n모델의 정확도가 손상되지 않는 범위에서 메모리, 배터리, 하드웨어 소비량을 줄이고, \n기기에 경량화된 모델을 배치하며, 개인이 이용하고 있는 기기에서 프라이버시가 보장되기 위해서는 \n모델에 포함된 파라미터 수를 줄여 압축하는 최적의 기법을 파악하는 것이 중요합니다.\n연구 측면에서는, 가지치기 기법은 굉장히 많은 수의 파라미터값들로 구성된 모델과 \n굉장히 적은 수의 파라미터값들로 구성된 모델 간 학습 역학 차이를 조사하는데 주로 이용되기도 하며,\n하위 신경망 모델과 파라미터값들의 초기화가 운이 좋게 잘 된 케이스를 바탕으로 \n(\"`lottery tickets <https://arxiv.org/abs/1803.03635>`_\") 신경망 구조를 찾는 기술들에 대해 반대 의견을 제시하기도 합니다.\n\n이번 튜토리얼에서는, ``torch.nn.utils.prune`` 을 이용하여 여러분이 설계한 딥러닝 모델에 대해 가지치기 기법을 적용해보는 것을 배워보고, \n심화적으로 여러분의 맞춤형 가지치기 기법을 구현하는 방법에 대해 배워보도록 하겠습니다.\n\n요구사항\n------------\n``\"torch>=1.4\"``\n\n\"\"\"\nimport torch\nfrom torch import nn\nimport torch.nn.utils.prune as prune\nimport torch.nn.functional as F\n\n######################################################################\n# 딥러닝 모델 생성\n# -----------------------\n# 이번 튜토리얼에서는, 얀 르쿤 교수님의 연구진들이 1998년도에 발표한 ``LeNet \n# <http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf>`` 의 모델 구조를 이용합니다.\n\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nclass LeNet(nn.Module):\n def __init__(self):\n super(LeNet, self).__init__()\n # 1개 채널 수의 이미지를 입력값으로 이용하여 6개 채널 수의 출력값을 계산하는 방식\n # Convolution 연산을 진행하는 커널(필터)의 크기는 3x3 을 이용\n self.conv1 = nn.Conv2d(1, 6, 3)\n self.conv2 = nn.Conv2d(6, 16, 3)\n self.fc1 = nn.Linear(16 * 5 * 5, 120) # Convolution 연산 결과 5x5 크기의 16 채널 수의 이미지\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 10)\n\n def forward(self, x):\n x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))\n x = F.max_pool2d(F.relu(self.conv2(x)), 2)\n x = x.view(-1, int(x.nelement() / x.shape[0]))\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n\nmodel = LeNet().to(device=device)\n\n\n######################################################################\n# 모듈 점검\n# -----------------\n# \n# 가지치기 기법이 적용되지 않은 LeNet 모델의 ``conv1`` 층을 점검해봅시다. \n# 여기에는 2개의 파라미터값들인 ``가중치``값과 ``편향``값을이 포함될 것이며, 버퍼는 존재하지 않을 것입니다.\n\nmodule = model.conv1\nprint(list(module.named_parameters()))\n\n######################################################################\nprint(list(module.named_buffers()))\n\n######################################################################\n# 모듈 가지치기 기법 적용 예제\n# -----------------------------------\n# \n# 모듈에 대해 가지치기 기법을 적용하기 위해 (이번 예제에서는, LeNet 모델의 ``conv1`` 층)\n# 첫 번째로는, ``torch.nn.utils.prune`` (또는 ``BasePruningMethod`` 의 서브 클래스로 직접 `구현\n# <torch-nn-utils-prune>`_ )\n# 내 존재하는 가지치기 기법을 선택합니다.\n# 그 후, 해당 모듈 내에서 가지치기 기법을 적용하고자 하는 모듈과 파라미터를 지정합니다.\n# 마지막으로, 가지치기 기법에 적당한 키워드 인자값을 이용하여 가지치기 매개변수를 지정합니다.\n# 이번 예제에서는, ``conv1`` 층의 가중치의 30%값들을 랜덤으로 가지치기 기법을 적용해보겠습니다.\n# 모듈은 함수에 대한 첫 번째 인자값으로 전달되며, ``name`` 은 문자열 식별자를 이용하여 해당 모듈 내 매개변수를 구분합니다.\n# 그리고, ``amount`` 는 가지치기 기법을 적용하기 위한 대상 가중치값들의 백분율 (0과 1사이의 실수값), \n# 혹은 가중치값의 연결의 개수 (음수가 아닌 정수) 를 지정합니다.\n\nprune.random_unstructured(module, name=\"weight\", amount=0.3) \n\n######################################################################\n# 가지치기 기법은 가중치값들을 파라미터값들로부터 제거하고 ``weight_orig`` (즉, 초기 가중치 이름에 \"_orig\"을 붙인) 이라는 \n# 새로운 파라미터값으로 대체하는 것으로 실행됩니다.\n# ``weight_orig`` 은 텐서값에 가지치기 기법이 적용되지 않은 상태를 저장합니다. \n# ``bias`` 은 가지치기 기법이 적용되지 않았기 때문에 그대로 남아 있습니다.\nprint(list(module.named_parameters()))\n\n######################################################################\n# 위에서 선택한 가지치기 기법에 의해 생성되는 가지치기 마스크는 초기 파라미터 ``name`` 에 ``weight_mask`` \n# (즉, 초기 가중치 이름에 \"_mask\"를 붙인) 이름의 모듈 버퍼로 저장됩니다.\nprint(list(module.named_buffers()))\n\n######################################################################\n# 수정이 되지 않은 상태에서 순전파를 진행하기 위해서는 ``가중치``값 속성이 존재해야 합니다.\n# ``torch.nn.utils.prune`` 내 구현된 가지치기 기법은 가지치기 기법이 적용된 가중치값들을 이용하여 \n# (기존의 가중치값에 가지치기 기법이 적용된) 순전파를 진행하고, ``weight`` 속성값에 가지치기 기법이 적용된 가중치값들을 저장합니다.\n# 이제 가중치값들은 ``module`` 의 매개변수가 아니라 하나의 속성값으로 취급되는 점을 주의하세요.\nprint(module.weight)\n\n######################################################################\n# 최종적으로, 가지치기 기법은 파이토치의 ``forward_pre_hooks`` 를 이용하여 각 순전파가 진행되기 전에 가지치기 기법이 적용됩니다.\n# 구체적으로, 지금까지 진행한 것 처럼, 모듈이 가지치기 기법이 적용되었을 때, \n# 가지치기 기법이 적용된 각 파라미터값들이 ``forward_pre_hook`` 를 얻게됩니다.\n# 이러한 경우, ``weight`` 이름인 기존 파라미터값에 대해서만 가지치기 기법을 적용하였기 때문에, \n# 훅은 오직 1개만 존재할 것입니다.\nprint(module._forward_pre_hooks)\n\n######################################################################\n# 완결성을 위해, 편향값에 대해서도 가지치기 기법을 적용할 수 있으며, \n# 모듈의 파라미터, 버퍼, 훅, 속성값들이 어떻게 변경되는지 확인할 수 있습니다.\n# 또 다른 가지치기 기법을 적용해보기 위해, ``l1_unstructured`` 가지치기 함수에서 구현된 내용과 같이, \n# L1 Norm 값이 가장 작은 편향값 3개를 가지치기를 시도해봅시다.\nprune.l1_unstructured(module, name=\"bias\", amount=3)\n\n######################################################################\n# 이전에서 실습한 내용을 토대로, 명명된 파라미터값들이 ``weight_orig``, ``bias_orig`` 2개를 모두 포함할 것이라 예상됩니다.\n# 버퍼들은 ``weight_mask``, ``bias_mask`` 2개를 포함할 것입니다.\n# 가지치기 기법이 적용된 2개의 텐서값들은 모듈의 속성값으로 존재할 것이며, 모듈은 2개의 ``forward_pre_hooks`` 을 갖게 될 것입니다.\nprint(list(module.named_parameters()))\n\n######################################################################\nprint(list(module.named_buffers()))\n\n######################################################################\nprint(module.bias)\n\n######################################################################\nprint(module._forward_pre_hooks)\n\n######################################################################\n# 가지치기 기법 반복 적용\n# ------------------------------------\n# \n# 모듈 내 같은 파라미터값에 대해 가지치기 기법이 여러번 적용될 수 있으며, 다양한 가지치기 기법의 조합이 적용된 것과 동일하게 적용될 수 있습니다.\n# 새로운 마스크와 이전의 마스크의 결합은 ``PruningContainer`` 의 ``compute_mask`` 메소드를 통해 처리할 수 있습니다.\n#\n# 예를 들어, 만약 ``module.weight`` 값에 가지치기 기법을 적용하고 싶을 때, 텐서의 0번째 축의 L2 norm값을 기준으로 구조화된 가지치기 기법을 적용합니다.\n# (여기서 0번째 축이란, 합성곱 연산을 통해 계산된 출력값에 대해 각 채널별로 적용된다는 것을 의미합니다.)\n# 이 방식은 ``ln_structured`` 함수와 ``n=2`` 와 ``dim=0`` 의 인자값을 바탕으로 구현될 수 있습니다.\nprune.ln_structured(module, name=\"weight\", amount=0.5, n=2, dim=0)\n\n############################################################################\n# 우리가 확인할 수 있듯이, 이전 마스크의 작용을 유지하면서 채널의 50% (6개 중 3개) 에 해당되는 모든 연결을 0으로 변경합니다.\nprint(module.weight)\n\n############################################################################\n# 이에 해당하는 훅은 ``torch.nn.utils.prune.PruningContainer`` 형태로 존재하며, 가중치에 적용된 가지치기 기법의 이력을 저장합니다.\nfor hook in module._forward_pre_hooks.values():\n if hook._tensor_name == \"weight\": # 가중치에 해당하는 훅을 선택\n break\n\nprint(list(hook)) # 컨테이너 내 가지치기 기법의 이력\n\n######################################################################\n# 가지치기 기법이 적용된 모델의 직렬화\n# ---------------------------------------------\n# 마스크 버퍼들과 가지치기 기법이 적용된 텐서 계산에 사용된 기존의 파라미터를 포함하여 관련된 모든 텐서값들은 \n# 필요한 경우 모델의 ``state_dict`` 에 저장되기 떄문에, 쉽게 직렬화하여 저장할 수 있다.\nprint(model.state_dict().keys())\n\n\n######################################################################\n# 가지치기 기법의 재-파라미터화 제거\n# -----------------------------------------\n#\n# 가지치기 기법이 적용된 것을 영구적으로 만들기 위해서, 재-파라미터화 관점의 \n# ``weight_orig`` 와 ``weight_mask`` 값을 제거하고, ``forward_pre_hook`` 값을 제거합니다.\n# 제거하기 위해 ``torch.nn.utils.prune`` 내 ``remove`` 함수를 이용할 수 있습니다.\n# 가지치기 기법이 적용되지 않은 것처럼 실행되는 것이 아닌 점을 주의하세요.\n# 이는 단지 가지치기 기법이 적용된 상태에서 가중치 파라미터값을 모델 파라미터값으로 재할당하는 것을 통해 영구적으로 만드는 것일 뿐입니다.\n\n######################################################################\n# 재-파라미터화를 제거하기 전 상태 \nprint(list(module.named_parameters()))\n######################################################################\nprint(list(module.named_buffers()))\n######################################################################\nprint(module.weight)\n\n######################################################################\n# 재-파라미터를 제거한 후 상태 \nprune.remove(module, 'weight')\nprint(list(module.named_parameters()))\n######################################################################\nprint(list(module.named_buffers()))\n\n######################################################################\n# 모델 내 여러 파라미터값들에 대하여 가지치기 기법 적용\n# --------------------------------------\n#\n# 가지치기 기법을 적용하고 싶은 파라미터값들을 지정함으로써, 이번 예제에서 볼 수 있는 것 처럼, \n# 신경망 모델 내 여러 텐서값들에 대해서 쉽게 가지치기 기법을 적용할 수 있습니다.\n\nnew_model = LeNet()\nfor name, module in new_model.named_modules():\n # 모든 2D-conv 층의 20% 연결에 대해 가지치기 기법을 적용\n if isinstance(module, torch.nn.Conv2d):\n prune.l1_unstructured(module, name='weight', amount=0.2)\n # 모든 선형 층의 40% 연결에 대해 가지치기 기법을 적용\n elif isinstance(module, torch.nn.Linear):\n prune.l1_unstructured(module, name='weight', amount=0.4)\n\nprint(dict(new_model.named_buffers()).keys()) # 존재하는 모든 마스크들을 확인\n\n######################################################################\n# 전역 범위에 대한 가지치기 기법 적용\n# ----------------------------------------------\n#\n# 지금까지, \"지역 변수\" 에 대해서만 가지치기 기법을 적용하는 방법을 살펴보았습니다.\n# (즉, 가중치 규모, 활성화 정도, 경사값 등의 각 항목의 통계량을 바탕으로 모델 내 텐서값 하나씩 가지치기 기법을 적용하는 방식)\n# 그러나, 범용적이고 아마 더 강력한 방법은 각 층에서 가장 낮은 20%의 연결을 제거하는것 대신에, 전체 모델에 대해서 가장 낮은 20% 연결을 한번에 제거하는 것입니다.\n# 이것은 각 층에 대해서 가지치기 기법을 적용하는 연결의 백분율값을 다르게 만들 가능성이 있습니다.\n# ``torch.nn.utils.prune`` 내 ``global_unstructured`` 을 이용하여 어떻게 전역 범위에 대한 가지치기 기법을 적용하는지 살펴봅시다.\n\nmodel = LeNet()\n\nparameters_to_prune = (\n (model.conv1, 'weight'),\n (model.conv2, 'weight'),\n (model.fc1, 'weight'),\n (model.fc2, 'weight'),\n (model.fc3, 'weight'),\n)\n\nprune.global_unstructured(\n parameters_to_prune,\n pruning_method=prune.L1Unstructured,\n amount=0.2,\n)\n\n######################################################################\n# 이제 각 층에 존재하는 연결들에 가지치기 기법이 적용된 정도가 20%가 아닌 것을 확인할 수 있습니다.\n# 그러나, 전체 가지치기 적용 범위는 약 20%가 될 것입니다.\nprint(\n \"Sparsity in conv1.weight: {:.2f}%\".format(\n 100. * float(torch.sum(model.conv1.weight == 0))\n / float(model.conv1.weight.nelement())\n )\n)\nprint(\n \"Sparsity in conv2.weight: {:.2f}%\".format(\n 100. * float(torch.sum(model.conv2.weight == 0))\n / float(model.conv2.weight.nelement())\n )\n)\nprint(\n \"Sparsity in fc1.weight: {:.2f}%\".format(\n 100. * float(torch.sum(model.fc1.weight == 0))\n / float(model.fc1.weight.nelement())\n )\n)\nprint(\n \"Sparsity in fc2.weight: {:.2f}%\".format(\n 100. * float(torch.sum(model.fc2.weight == 0))\n / float(model.fc2.weight.nelement())\n )\n)\nprint(\n \"Sparsity in fc3.weight: {:.2f}%\".format(\n 100. * float(torch.sum(model.fc3.weight == 0))\n / float(model.fc3.weight.nelement())\n )\n)\nprint(\n \"Global sparsity: {:.2f}%\".format(\n 100. * float(\n torch.sum(model.conv1.weight == 0)\n + torch.sum(model.conv2.weight == 0)\n + torch.sum(model.fc1.weight == 0)\n + torch.sum(model.fc2.weight == 0)\n + torch.sum(model.fc3.weight == 0)\n )\n / float(\n model.conv1.weight.nelement()\n + model.conv2.weight.nelement()\n + model.fc1.weight.nelement()\n + model.fc2.weight.nelement()\n + model.fc3.weight.nelement()\n )\n )\n)\n\n\n######################################################################\n# ``torch.nn.utils.prune`` 에서 확장된 맞춤형 가지치기 기법\n# ------------------------------------------------------------------\n# 맞춤형 가지치기 기법은, 다른 가지치기 기법을 적용하는 것과 같은 방식으로, \n# ``BasePruningMethod`` 의 기본 클래스인 ``nn.utils.prune`` 모듈을 활용하여 구현할 수 있습니다.\n# 기본 클래스는 ``__call__``, ``apply_mask``, ``apply``, ``prune``, ``remove`` 메소드들을 내포하고 있습니다.\n# 특별한 케이스가 아닌 경우, 기본적으로 구성된 메소드들을 재구성할 필요가 없습니다.\n# 그러나, ``__init__`` (구성요소), ``compute_mask`` \n# (가지치기 기법의 논리에 따라 주어진 텐서값에 마스크를 적용하는 방법) 을 고려하여 구성해야 합니다.\n# 게다가, 가지치기 기법을 어떠한 방식으로 적용하는지 명확하게 구성해야 합니다. \n# (지원되는 옵션은 ``global``, ``structured``, ``unstructured`` 입니다.)\n# 이러한 방식은, 가지치기 기법을 반복적으로 적용해야 하는 경우 마스크를 결합하는 방법을 결정하기 위해 필요합니다.\n# 즉, 이미 가지치기 기법이 적용된 모델에 대해서 가지치기 기법을 적용할 때, \n# 기존의 가지치기 기법이 적용되지 않은 파라미터 값에 대해 가지치기 기법이 영향을 미칠 것으로 예상됩니다.\n# ``PRUNING_TYPE``을 지정한다면, 가지치기 기법을 적용하기 위해 파라미터 값을 올바르게 제거하는 \n# ``PruningContainer`` (마스크 가지치기 기법을 반복적으로 적용하는 것을 처리하는)를 가능하게 합니다.\n# 예를 들어, 다른 모든 항목이 존재하는 텐서를 가지치기 기법을 구현하고 싶을 때,\n# (또는, 텐서가 이전에 가지치기 기법에 의해 제거되었거나 남아있는 텐서에 대해)\n# 한 층의 개별 연결에 작용하며 전체 유닛/채널 (``'structured'``), 또는 다른 파라미터 간 \n# (``'global'``) 연결에는 작용하지 않기 때문에 ``PRUNING_TYPE='unstructured'`` 방식으로 진행됩니다.\n\nclass FooBarPruningMethod(prune.BasePruningMethod):\n \"\"\"\n 텐서 내 다른 항목들에 대해 가지치기 기법을 적용\n \"\"\"\n PRUNING_TYPE = 'unstructured'\n\n def compute_mask(self, t, default_mask):\n mask = default_mask.clone()\n mask.view(-1)[::2] = 0 \n return mask\n\n######################################################################\n# ``nn.Module`` 의 매개변수에 적용하기 위해 인스턴스화하고 적용하는 간단한 기능을 구현해봅니다.\ndef foobar_unstructured(module, name):\n \"\"\"\n 텐서 내 다른 모든 항목들을 제거하여 `module` 에서 `name` 이라는 파라미터에 대해 가자치기 기법을 적용\n 다음 내용에 따라 모듈을 수정 (또는 수정된 모듈을 반환):\n 1) 가지치기 기법에 의해 매개변수 `name` 에 적용된 이진 마스크에 해당하는 명명된 버퍼 `name+'_mask'` 를 추가합니다.\n `name` 파라미터는 가지치기 기법이 적용된 것으로 대체되며, 가지치기 기법이 적용되지 않은 \n 기존의 파라미터는 `name+'_orig'` 라는 이름의 새로운 매개변수에 저장됩니다.\n\n 인자값:\n module (nn.Module): 가지치기 기법을 적용해야하는 텐서를 포함하는 모듈\n name (string): 모듈 내 가지치기 기법이 적용될 파라미터의 이름\n\n 반환값:\n module (nn.Module): 입력 모듈에 대해서 가지치기 기법이 적용된 모듈 \n \n 예시:\n >>> m = nn.Linear(3, 4)\n >>> foobar_unstructured(m, name='bias')\n \"\"\"\n FooBarPruningMethod.apply(module, name)\n return module\n\n######################################################################\n# 한번 해봅시다!\nmodel = LeNet()\nfoobar_unstructured(model.fc3, name='bias')\n\nprint(model.fc3.bias_mask)\n"
] |
[
[
"torch.nn.utils.prune.ln_structured",
"torch.nn.utils.prune.remove",
"torch.nn.Conv2d",
"torch.sum",
"torch.nn.Linear",
"torch.nn.utils.prune.l1_unstructured",
"torch.cuda.is_available",
"torch.nn.utils.prune.global_unstructured",
"torch.nn.utils.prune.random_unstructured"
]
] |
CWSmith022/yigit-lab
|
[
"8ec1f7d0242d36351ef92bc6698358c9431f4c34"
] |
[
"AGONS/AGONS/test.py"
] |
[
"# %%\n\"\"\"Test how custom functions work with sklearn package.\"\"\"\nimport numpy as np\nfrom sklearn.preprocessing import FunctionTransformer\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nx = np.array([[1,2,3], [6,5,4], [8,7,9]])\nprint(x)\ndef SSRow(X):\n X_ = X.copy()\n X_t = StandardScaler().fit_transform(X_.T).T \n return X_t\n\ndef MMRow(X):\n X_ = X.copy()\n X_t = MinMaxScaler().fit_transform(X_.T).T\n return X_t\n\nd = FunctionTransformer(SSRow)\nprint(d.fit_transform(x))\ne = FunctionTransformer(MMRow)\nprint(e.fit_transform(x))\n# %%\n\"\"\"Testing AGONS with Iris Dataset\"\"\"\n"
] |
[
[
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"sklearn.preprocessing.MinMaxScaler",
"sklearn.preprocessing.FunctionTransformer"
]
] |
hoaaoh/Audio2Vec
|
[
"96711c2300646ce10878113fa0d506d703db96d7"
] |
[
"src/plot_perfomance.py"
] |
[
"#!/usr/bin/env python3\n\nimport argparse\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.interpolate import spline\ndef main():\n AE_small_list = [ 0.730, 0.685, 0.737, 0.693, 0.881, 0.713 ]\n AE_large_list = [ 0.234, 0.307, 0.400, 0.323, 0.317, 0.233 ]\n ### m = [ 3, 6, 10, 15, 21, 26 ] ###\n NE_small_list = [ 0.390, 0.490, 0.484, 0.460, 0.351, ]\n NE_large_list = [ 0.100, 0.158, 0.169, 0.150, 0.092, ] \n dim = [100, 200, 400, 600, 800, 1000 ]\n small_dim = [117, 234, 390, 585, 819, 1014 ]\n\n #dim_new = np.linspace( min(dim), max(dim),300) \n #AE_small_smooth = spline(dim, AE_small_list, dim_new)\n #plt.plot(dim_new, AE_small_smooth , label = 'AE_small_smooth')\n plt.plot(dim, AE_small_list, '-o', label='SA_small')\n\n plt.plot(dim, AE_large_list, '-o', label='SA_large') \n plt.plot(small_dim, NE_small_list, '-o', label='NE_small')\n plt.plot(small_dim, NE_large_list,'-o', label='NE_large')\n plt.xlabel('Representation Dimension', fontsize=12)\n plt.ylabel('MAP', fontsize=12)\n plt.legend()\n plt.show()\n\n return\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
Boltuzamaki/Monk_Object_Detection
|
[
"baf113ef6db8b531d0ef6413538e49d422163a20"
] |
[
"12_tf_obj_1/lib/calculate_map.py"
] |
[
"# Code from - https://github.com/Cartucho/mAP\n\nimport glob\nimport json\nimport os\nimport shutil\nimport operator\nimport sys\nimport argparse\nimport math\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n\n\ndef log_average_miss_rate(prec, rec, num_images):\n \"\"\"\n log-average miss rate:\n Calculated by averaging miss rates at 9 evenly spaced FPPI points\n between 10e-2 and 10e0, in log-space.\n output:\n lamr | log-average miss rate\n mr | miss rate\n fppi | false positives per image\n references:\n [1] Dollar, Piotr, et al. \"Pedestrian Detection: An Evaluation of the\n State of the Art.\" Pattern Analysis and Machine Intelligence, IEEE\n Transactions on 34.4 (2012): 743 - 761.\n \"\"\"\n\n # if there were no detections of that class\n if prec.size == 0:\n lamr = 0\n mr = 1\n fppi = 0\n return lamr, mr, fppi\n\n fppi = (1 - prec)\n mr = (1 - rec)\n\n fppi_tmp = np.insert(fppi, 0, -1.0)\n mr_tmp = np.insert(mr, 0, 1.0)\n\n # Use 9 evenly spaced reference points in log-space\n ref = np.logspace(-2.0, 0.0, num = 9)\n for i, ref_i in enumerate(ref):\n # np.where() will always find at least 1 index, since min(ref) = 0.01 and min(fppi_tmp) = -1.0\n j = np.where(fppi_tmp <= ref_i)[-1][-1]\n ref[i] = mr_tmp[j]\n\n # log(0) is undefined, so we use the np.maximum(1e-10, ref)\n lamr = math.exp(np.mean(np.log(np.maximum(1e-10, ref))))\n\n return lamr, mr, fppi\n\n\"\"\"\n throw error and exit\n\"\"\"\ndef error(msg):\n print(msg)\n sys.exit(0)\n\n\"\"\"\n check if the number is a float between 0.0 and 1.0\n\"\"\"\ndef is_float_between_0_and_1(value):\n try:\n val = float(value)\n if val > 0.0 and val < 1.0:\n return True\n else:\n return False\n except ValueError:\n return False\n\n\"\"\"\n Calculate the AP given the recall and precision array\n 1st) We compute a version of the measured precision/recall curve with\n precision monotonically decreasing\n 2nd) We compute the AP as the area under this curve by numerical integration.\n\"\"\"\ndef voc_ap(rec, prec):\n \"\"\"\n --- Official matlab code VOC2012---\n mrec=[0 ; rec ; 1];\n mpre=[0 ; prec ; 0];\n for i=numel(mpre)-1:-1:1\n mpre(i)=max(mpre(i),mpre(i+1));\n end\n i=find(mrec(2:end)~=mrec(1:end-1))+1;\n ap=sum((mrec(i)-mrec(i-1)).*mpre(i));\n \"\"\"\n rec.insert(0, 0.0) # insert 0.0 at begining of list\n rec.append(1.0) # insert 1.0 at end of list\n mrec = rec[:]\n prec.insert(0, 0.0) # insert 0.0 at begining of list\n prec.append(0.0) # insert 0.0 at end of list\n mpre = prec[:]\n \"\"\"\n This part makes the precision monotonically decreasing\n (goes from the end to the beginning)\n matlab: for i=numel(mpre)-1:-1:1\n mpre(i)=max(mpre(i),mpre(i+1));\n \"\"\"\n # matlab indexes start in 1 but python in 0, so I have to do:\n # range(start=(len(mpre) - 2), end=0, step=-1)\n # also the python function range excludes the end, resulting in:\n # range(start=(len(mpre) - 2), end=-1, step=-1)\n for i in range(len(mpre)-2, -1, -1):\n mpre[i] = max(mpre[i], mpre[i+1])\n \"\"\"\n This part creates a list of indexes where the recall changes\n matlab: i=find(mrec(2:end)~=mrec(1:end-1))+1;\n \"\"\"\n i_list = []\n for i in range(1, len(mrec)):\n if mrec[i] != mrec[i-1]:\n i_list.append(i) # if it was matlab would be i + 1\n \"\"\"\n The Average Precision (AP) is the area under the curve\n (numerical integration)\n matlab: ap=sum((mrec(i)-mrec(i-1)).*mpre(i));\n \"\"\"\n ap = 0.0\n for i in i_list:\n ap += ((mrec[i]-mrec[i-1])*mpre[i])\n return ap, mrec, mpre\n\n\n\"\"\"\n Convert the lines of a file to a list\n\"\"\"\ndef file_lines_to_list(path):\n # open txt file lines to a list\n with open(path) as f:\n content = f.readlines()\n # remove whitespace characters like `\\n` at the end of each line\n content = [x.strip() for x in content]\n return content\n\n\"\"\"\n Draws text in image\n\"\"\"\ndef draw_text_in_image(img, text, pos, color, line_width):\n font = cv2.FONT_HERSHEY_PLAIN\n fontScale = 1\n lineType = 1\n bottomLeftCornerOfText = pos\n cv2.putText(img, text,\n bottomLeftCornerOfText,\n font,\n fontScale,\n color,\n lineType)\n text_width, _ = cv2.getTextSize(text, font, fontScale, lineType)[0]\n return img, (line_width + text_width)\n\n\"\"\"\n Plot - adjust axes\n\"\"\"\ndef adjust_axes(r, t, fig, axes):\n # get text width for re-scaling\n bb = t.get_window_extent(renderer=r)\n text_width_inches = bb.width / fig.dpi\n # get axis width in inches\n current_fig_width = fig.get_figwidth()\n new_fig_width = current_fig_width + text_width_inches\n propotion = new_fig_width / current_fig_width\n # get axis limit\n x_lim = axes.get_xlim()\n axes.set_xlim([x_lim[0], x_lim[1]*propotion])\n\n\"\"\"\n Draw plot using Matplotlib\n\"\"\"\ndef draw_plot_func(dictionary, n_classes, window_title, plot_title, x_label, output_path, to_show, plot_color, true_p_bar):\n # sort the dictionary by decreasing value, into a list of tuples\n sorted_dic_by_value = sorted(dictionary.items(), key=operator.itemgetter(1))\n # unpacking the list of tuples into two lists\n sorted_keys, sorted_values = zip(*sorted_dic_by_value)\n # \n if true_p_bar != \"\":\n \"\"\"\n Special case to draw in:\n - green -> TP: True Positives (object detected and matches ground-truth)\n - red -> FP: False Positives (object detected but does not match ground-truth)\n - pink -> FN: False Negatives (object not detected but present in the ground-truth)\n \"\"\"\n fp_sorted = []\n tp_sorted = []\n for key in sorted_keys:\n fp_sorted.append(dictionary[key] - true_p_bar[key])\n tp_sorted.append(true_p_bar[key])\n plt.barh(range(n_classes), fp_sorted, align='center', color='crimson', label='False Positive')\n plt.barh(range(n_classes), tp_sorted, align='center', color='forestgreen', label='True Positive', left=fp_sorted)\n # add legend\n plt.legend(loc='lower right')\n \"\"\"\n Write number on side of bar\n \"\"\"\n fig = plt.gcf() # gcf - get current figure\n axes = plt.gca()\n r = fig.canvas.get_renderer()\n for i, val in enumerate(sorted_values):\n fp_val = fp_sorted[i]\n tp_val = tp_sorted[i]\n fp_str_val = \" \" + str(fp_val)\n tp_str_val = fp_str_val + \" \" + str(tp_val)\n # trick to paint multicolor with offset:\n # first paint everything and then repaint the first number\n t = plt.text(val, i, tp_str_val, color='forestgreen', va='center', fontweight='bold')\n plt.text(val, i, fp_str_val, color='crimson', va='center', fontweight='bold')\n if i == (len(sorted_values)-1): # largest bar\n adjust_axes(r, t, fig, axes)\n else:\n plt.barh(range(n_classes), sorted_values, color=plot_color)\n \"\"\"\n Write number on side of bar\n \"\"\"\n fig = plt.gcf() # gcf - get current figure\n axes = plt.gca()\n r = fig.canvas.get_renderer()\n for i, val in enumerate(sorted_values):\n str_val = \" \" + str(val) # add a space before\n if val < 1.0:\n str_val = \" {0:.2f}\".format(val)\n t = plt.text(val, i, str_val, color=plot_color, va='center', fontweight='bold')\n # re-set axes to show number inside the figure\n if i == (len(sorted_values)-1): # largest bar\n adjust_axes(r, t, fig, axes)\n # set window title\n fig.canvas.set_window_title(window_title)\n # write classes in y axis\n tick_font_size = 12\n plt.yticks(range(n_classes), sorted_keys, fontsize=tick_font_size)\n \"\"\"\n Re-scale height accordingly\n \"\"\"\n init_height = fig.get_figheight()\n # comput the matrix height in points and inches\n dpi = fig.dpi\n height_pt = n_classes * (tick_font_size * 1.4) # 1.4 (some spacing)\n height_in = height_pt / dpi\n # compute the required figure height \n top_margin = 0.15 # in percentage of the figure height\n bottom_margin = 0.05 # in percentage of the figure height\n figure_height = height_in / (1 - top_margin - bottom_margin)\n # set new height\n if figure_height > init_height:\n fig.set_figheight(figure_height)\n\n # set plot title\n plt.title(plot_title, fontsize=14)\n # set axis titles\n # plt.xlabel('classes')\n plt.xlabel(x_label, fontsize='large')\n # adjust size of window\n fig.tight_layout()\n # save the plot\n fig.savefig(output_path)\n # show image\n if to_show:\n plt.show()\n # close the plot\n plt.close()\n\n\n\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.gca",
"numpy.maximum",
"matplotlib.pyplot.title",
"numpy.logspace",
"matplotlib.pyplot.gcf",
"numpy.insert",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.text",
"matplotlib.pyplot.show",
"numpy.where"
]
] |
USEPA/sensortoolkit
|
[
"a9da32fd4df492154c6e4cc570011d14e933ee83"
] |
[
"sensortoolkit/evaluation_objs/_sensor_eval.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nTop-level analysis module for the ``sensortoolkit`` library.\n\nContains the front-facing ``SensorEvaluation`` class for conducting analysis\nof sensor data.\n\n===============================================================================\n\n@Author:\n | Samuel Frederick, NSSC Contractor (ORAU)\n | U.S. EPA / ORD / CEMM / AMCD / SFSB\n\nCreated:\n Fri Jul 31 08:39:37 2020\nLast Updated:\n Wed Jul 7 15:01:00 2021\n\"\"\"\nimport math\nimport json\nimport sys\nimport os\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sensortoolkit.calculate\nimport sensortoolkit.datetime_utils\nimport sensortoolkit.deploy\nimport sensortoolkit.lib_utils\nimport sensortoolkit.model\nimport sensortoolkit.param\nimport sensortoolkit.plotting\nimport sensortoolkit.qc\nimport sensortoolkit.reference\nimport sensortoolkit.ingest\nfrom sensortoolkit import presets as _presets\n\nclass SensorEvaluation:\n \"\"\"Evaluate air sensor performance for use in NSIM applications.\n\n A class for conducting analysis for air sensors deployed at ambient,\n outdoor, fixed monitoring sites using U.S. EPA's performance metrics and\n targets for sensors measuring PM2.5 or O3. U.S. EPA's testing protocols and\n performance metrics are intended for use with devices deployed for\n non-regulatory supplemental and informational monitoring (NSIM)\n applications.\n\n Args:\n sensor (sensortoolkit.AirSensor object):\n The air sensor object containing datasets with parameter\n measurements that will be evaluated.\n param (sensortoolkit.Parameter object):\n The parameter (measured environmental quantity) object containing\n parameter-specific attributes as well as metrics and targets for\n evaluating sensor performance.\n reference (sensortoolkit.ReferenceMethod object):\n The FRM/FEM reference instrument object containing datasets with\n parameter measurements against which air sensor data will be\n evaluated.\n write_to_file (bool):\n If true, evaluation statistics will be written to the\n ``/data/eval_stats`` sensor subdirectory. Figures will also be\n written to the appropriate figures subdirectory.\n **kwargs:\n Keyword arguments (currently unused).\n\n Attributes:\n path (str): The project path in which data, figures, and reports\n relevant to the sensor evaluation are stored.\n serials (dict): A dictionary of sensor serial identifiers for each\n unit in the base testing deployment.\n figure_path (str): The full directory path to figures for a given\n sensor make and model.\n stats_path: The full directory path to evaluation statistics for a\n given sensor make and model.\n full_df_list (list of pandas DataFrames): List of sensor data frames\n of length N (where N is the number of sensor units in a testing\n group). DataFrames indexed by ``DateTime`` at recorded sampling\n frequency.\n hourly_df_list (list of pandas DataFrames): List of sensor data frames\n of length N (where N is the number of sensor units in a testing\n group). DataFrames indexed by ``DateTime`` at 1-hour averaged\n sampling frequency.\n daily_df_list (list of pandas DataFrames): List of sensor data frames\n of length N (where N is the number of sensor units in a testing\n group). DataFrames indexed by ``DateTime`` at 24-hour averaged\n sampling frequency.\n deploy_period_df (pandas DataFrame): A data frame containing the start\n time (‘Begin’), end time (‘End’), and total duration of evaluation\n period for each sensor in a deployment group.\n deploy_dict (dict): A dictionary containing descriptive statistics and\n textual information about the deployment (testing agency, site,\n time period, etc.), sensors tested, and site conditions during the\n evaluation.\n deploy_bdate (pandas timestamp object): Overall start date of\n deployment. Determined by selecting the earliest recorded timestamp\n in sensor data frames.\n deploy_edate (pandas timestamp object): Overall end date of deployment.\n Determined by selecting the latest recorded timestamp in sensor\n data frames.\n ref_dict (dict):\n A dictionary container for reference data objects at varying\n averaging intervals and parameter classifications.\n hourly_ref_df (pandas DataFrame):\n Dataset containing reference data at 1-hour averaging intervals\n for methods measuring parameters matching the parameter\n classification of the parameter object passed to the\n ``SensorEvaluation`` class during instantation.\n daily_ref_df (pandas DataFrame):\n Dataset containing reference data at 24-hour averaging intervals\n for methods measuring parameters matching the parameter\n classification of the parameter object passed to the\n ``SensorEvaluation`` class during instantation.\n pm_hourly_ref_df (pandas DataFrame):\n Dataset containing reference data at 1-hour averaging intervals\n for methods measuring particulate matter parameters.\n pm_daily_ref_df (pandas DataFrame):\n Dataset containing reference data at 24-hour averaging intervals\n for methods measuring particulate matter parameters.\n gas_hourly_ref_df (pandas DataFrame):\n Dataset containing reference data at 1-hour averaging intervals\n for methods measuring gaseous parameters.\n gas_daily_ref_df (pandas DataFrame):\n Dataset containing reference data at 24-hour averaging intervals\n for methods measuring gaseous parameters.\n met_hourly_ref_df (pandas DataFrame):\n Dataset containing reference data at 1-hour averaging intervals\n for methods measuring meteorological parameters.\n met_daily_ref_df (pandas DataFrame):\n Dataset containing reference data at 24-hour averaging intervals\n for methods measuring meteorological parameters.\n ref_name (str): The make and model of the FRM/FEM instrument used as\n reference for the selected evaluation parameter. Both AirNowTech\n and AQS return the AQS method code, and the AQS Sampling Methods\n Reference table is used to determine the instrument name associated\n with this code. AirNow does not return method codes or instrument\n names. When the name and type of the FRM/FEM instrument are\n unknown, ref_name takes the value ‘unknown_reference’.\n avg_hrly_df (pandas DataFrame): Data frame containing the inter-sensor\n average for concurrent sensor measurements at 1-hour averaging\n intervals.\n avg_daily_df (pandas DataFrame): Data frame containing the inter-sensor\n average for concurrent sensor measurements at 24-hour averaging\n intervals.\n stats_df (pandas DataFrame): Data frame with OLS regression (sensor vs\n FRM/FEM) statistics, including R2, slope, intercept, RMSE, N\n (Number of sensor-FRM/FEM data point pairs), as well as the\n minimum, maximum, and the mean sensor concentration.\n avg_stats_df (pandas DataFrame): Data frame with OLS regression (sensor\n vs intersensor average) statistics, including R2, slope,\n intercept, RMSE, N (Number of concurrent sensor measurements during\n which all sensors in the testing group reported values), as well as\n the minimum, maximum, and the mean sensor concentration.\n\n \"\"\"\n\n def __init__(self, sensor, param, reference, write_to_file=False,\n **kwargs):\n\n self.sensor = sensor\n self.name = sensor.name\n self.reference = reference\n\n try:\n self.sensor.data\n except AttributeError as error:\n sys.exit(f'{error}, use the AirSensor.load_data() method to import'\n f' data')\n\n self.path = sensor.project_path\n self.serials = sensor.serials\n\n # Private to avoid confusion between SensorEvaluation attribute and\n # paraeter attribute\n self.param = param\n self._param_name = param.name\n\n if self._param_name not in self.sensor.param_headers:\n raise AttributeError(f'{self._param_name} is not in the list of '\n f'parameters measured by {self.name}')\n\n self.write_to_file = write_to_file\n\n self.testing_loc = _presets.test_loc\n self.testing_org = _presets.test_org\n\n # Add keyword arguments\n self.__dict__.update(**kwargs)\n self.kwargs = kwargs\n\n # path to sensor figures\n self.figure_path = os.path.join(self.path, 'figures', self.name, '')\n\n # path to evaluation statistics\n self.stats_path = os.path.join(self.path, 'data',\n 'eval_stats', self.name, '')\n\n rec_int = self.sensor.recording_interval\n self.full_df_list = list(self.sensor.data[rec_int].values())\n self.hourly_df_list = list(self.sensor.data['1-hour'].values())\n self.daily_df_list = list(self.sensor.data['24-hour'].values())\n\n # Compute sensor deployment period and concurrent deployment groups\n self.deploy_period_df = sensortoolkit.deploy.deployment_period(\n self.full_df_list,\n self.name,\n self.serials)\n\n self.deploy_dict = sensortoolkit.deploy.construct_deploy_dict(\n self.deploy_period_df,\n self.full_df_list,\n self.hourly_df_list,\n self.daily_df_list,\n self.name,\n **self.kwargs)\n\n deploy_grps = self.deploy_dict['Deployment Groups']\n\n deploy_bdate = min([pd.to_datetime(deploy_grps[grp]['eval_start'])\n for grp in deploy_grps.keys()])\n self.deploy_bdate = self.kwargs.get('deploy_bdate', deploy_bdate)\n deploy_edate = max([pd.to_datetime(deploy_grps[grp]['eval_end'])\n for grp in deploy_grps.keys()])\n self.deploy_edate = self.kwargs.get('deploy_edate', deploy_edate)\n\n self._assign_refdata_objs()\n\n # Compute normalized param values\n self.hourly_df_list = sensortoolkit.calculate.normalize(\n self.hourly_df_list,\n self.hourly_ref_df,\n param=self._param_name,\n ref_name=self.ref_name)\n\n self.daily_df_list = sensortoolkit.calculate.normalize(\n self.daily_df_list,\n self.hourly_ref_df,\n param=self._param_name,\n ref_name=self.ref_name)\n\n # Compute inter-sensor averaged parameter dataframes\n self.avg_hrly_df = sensortoolkit.calculate.intersensor_mean(\n self.hourly_df_list,\n self.deploy_dict)\n\n self.avg_daily_df = sensortoolkit.calculate.intersensor_mean(\n self.daily_df_list,\n self.deploy_dict)\n\n self.stats_df = pd.DataFrame()\n self.avg_stats_df = pd.DataFrame()\n\n def _assign_refdata_objs(self):\n # Retrieve reference data\n self.ref_dict = self.reference.data\n\n # Set reference dataframe based on evaluation parameter classification\n self.hourly_ref_df = self.ref_dict[self.param.classifier]['1-hour']\n hourly_ref_idx = self.hourly_ref_df.index\n\n ref_param_cols = ['_Value', '_Unit', '_QAQC_Code', '_Param_Code',\n '_Method', '_Method_Code', '_Method_POC']\n\n site_cols = ['Agency', 'Site_Name', 'Site_AQS',\n 'Site_Lat', 'Site_Lon', 'Data_Source',\n 'Data_Acquisition_Date_Time']\n\n # Unpack the ref data into dataframes. If no reference data found,\n # return a dataframe backfilled with nulls.\n if not self.ref_dict['PM']['1-hour'].empty:\n self.pm_hourly_ref_df = self.ref_dict['PM']['1-hour']\n self.pm_daily_ref_df = self.ref_dict['PM']['24-hour']\n else:\n cols = ['PM25' + col for col in ref_param_cols]\n cols = cols + site_cols\n self.pm_hourly_ref_df = pd.DataFrame(np.nan,\n index=hourly_ref_idx,\n columns=cols,\n dtype=object)\n # Replace null method names with 'Unspecified Reference'\n for col_name in [col for col in cols if col.endswith('_Method')]:\n self.pm_hourly_ref_df[col_name] = 'Unknown Reference'\n\n self.pm_daily_ref_df = sensortoolkit.datetime_utils.interval_averaging(\n self.pm_hourly_ref_df,\n freq='D',\n interval_count=24,\n thres=0.75)\n\n if not self.ref_dict['Gases']['1-hour'].empty:\n self.gas_hourly_ref_df = self.ref_dict['Gases']['1-hour']\n self.gas_daily_ref_df = self.ref_dict['Gases']['24-hour']\n else:\n cols = ['O3' + col for col in ref_param_cols]\n cols = cols + site_cols\n self.gas_hourly_ref_df = pd.DataFrame(np.nan,\n index=hourly_ref_idx,\n columns=cols,\n dtype=object)\n # Replace null method names with 'Unspecified Reference'\n for col_name in [col for col in cols if col.endswith('_Method')]:\n self.gas_hourly_ref_df[col_name] = 'Unknown Reference'\n\n self.gas_daily_ref_df = sensortoolkit.datetime_utils.interval_averaging(\n self.gas_hourly_ref_df,\n freq='D',\n interval_count=24,\n thres=0.75)\n\n if not self.ref_dict['Met']['1-hour'].empty:\n self.met_hourly_ref_df = self.ref_dict['Met']['1-hour']\n self.met_daily_ref_df = self.ref_dict['Met']['24-hour']\n else:\n cols = [met_param + col for col in ref_param_cols\n for met_param in ['RH', 'Temp']]\n\n cols = cols + site_cols\n self.met_hourly_ref_df = pd.DataFrame(np.nan,\n index=hourly_ref_idx,\n columns=cols,\n dtype=object)\n # Replace null method names with 'Unspecified Reference'\n for col_name in [col for col in cols if col.endswith('_Method')]:\n self.met_hourly_ref_df[col_name] = 'Unknown Reference'\n\n self.met_daily_ref_df = sensortoolkit.datetime_utils.interval_averaging(\n self.met_hourly_ref_df,\n freq='D',\n interval_count=24,\n thres=0.75)\n\n # Get the name of the reference monitor\n self.ref_name = self.reference.get_method_name(self.param.name)\n\n self.daily_ref_df = self.ref_dict[self.param.classifier]['24-hour']\n\n def add_deploy_dict_stats(self):\n \"\"\"Populate deployment dictionary with statistical metrics.\n\n Add precision and error performance targets metrics, include details\n about reference (for selected evaluation parameter) and monitor\n statistics for meteorological parameters (Temp, RH).\n\n Calculates:\n\n - CV for 1-hour averaged sensor datasets\n - CV for 24-hour averaged sensor datasets\n - RMSE for 1-hour averaged sensor datasets\n - RMSE for 24-hour averaged sensor datasets\n - Reference monitor concentration range, mean concentration during\n testing period for 1-hour averaged measurements\n - Reference monitor concentration range, mean concentration during\n testing period for 24-hour averaged measurements\n - Meteorological monitor measurement range, mean value for temperature\n and/or relative humidity measurements at 1-hour intervals\n - Meteorological monitor measurement range, mean value for temperature\n and/or relative humidity measurements at 24-hour intervals\n\n Populates:\n\n - ``SensorEvaluation.deploy_dict``\n\n Writes Files:\n\n - Deployment dictionary\n\n Returns:\n None.\n\n \"\"\"\n # Compute inter-sensor precision and error metric values\n # CV: 1-hour averaged sensor param\n self.deploy_dict = sensortoolkit.calculate.cv(\n self.hourly_df_list,\n self.deploy_dict,\n param=self._param_name)\n\n # CV: 24-hour averaged sensor param\n self.deploy_dict = sensortoolkit.calculate.cv(\n self.daily_df_list,\n self.deploy_dict,\n param=self._param_name)\n\n # RMSE: 1-hour averaged sensor param\n self.deploy_dict = sensortoolkit.calculate.rmse(\n self.hourly_df_list,\n self.hourly_ref_df,\n self.deploy_dict,\n param=self._param_name)\n\n # RMSE: 24-hour averaged sensor param\n self.deploy_dict = sensortoolkit.calculate.rmse(\n self.daily_df_list,\n self.daily_ref_df,\n self.deploy_dict,\n param=self._param_name)\n\n # Reference details for param evaluation (hourly data)\n self.deploy_dict = sensortoolkit.deploy.deploy_ref_stats(\n self.deploy_dict,\n self.hourly_ref_df,\n param=self._param_name,\n ref_name=self.ref_name)\n\n # Reference details for param evaluation (daily data)\n self.deploy_dict = sensortoolkit.deploy.deploy_ref_stats(\n self.deploy_dict,\n self.daily_ref_df,\n param=self._param_name,\n ref_name=self.ref_name)\n\n # Reference details for meteorological data (1-hr averages)\n self.deploy_dict = sensortoolkit.deploy.deploy_met_stats(\n self.deploy_dict,\n self.hourly_df_list,\n self.met_hourly_ref_df)\n\n # Reference details for meteorological data (24-hr averages)\n self.deploy_dict = sensortoolkit.deploy.deploy_met_stats(\n self.deploy_dict,\n self.daily_df_list,\n self.met_daily_ref_df)\n\n if self.write_to_file is True:\n\n today = sensortoolkit.datetime_utils.get_todays_date()\n\n # check if sensor-specific subfolder exists\n if not os.path.exists(self.stats_path):\n os.makedirs(self.stats_path)\n\n with open(self.stats_path + self.name + '_' +\n self._param_name + \"_Evaluation_\" + today +\n \".json\", \"w\") as outfile:\n deploy_json = json.dumps(self.deploy_dict, indent=4)\n outfile.write(deploy_json)\n\n def calculate_metrics(self):\n \"\"\"Compute hourly, daily, and inter-sensor statistics dataframes.\n\n .. note::\n\n ``calculate_metrics()`` will check whether\n ``SensorEvaluation.deploy_dict`` has been populated with statistics\n via the ``add_deploy_dict_stats()`` method and will call this method\n if the dictionary has not been populated yet.\n\n Calculates:\n\n - 1-hour averaged sensor vs. reference regression statistics for each\n sensor\n - 24-hour averaged sensor vs. reference regression statistics for each\n sensor\n - 1-hour averaged sensor vs. intersensor average regression statistics\n for each sensor\n - 24-hour averaged sensor vs. intersensor average regression statistics\n for each sensor\n\n Populates:\n\n - ``SensorEvaluation.stats_df``\n - ``SensorEvaluation.avg_stats_df``\n\n Writes Files:\n\n - Statistics DataFrame - Sensor vs. FRM/FEM\n - Statistics DataFrame - Sensor vs. Intersensor Average\n\n Returns:\n None.\n\n \"\"\"\n try:\n self.deploy_dict['Deployment Groups']['Group 1'][self._param_name]\n except KeyError:\n print('Populating deployment dataframe with evaluation statistics')\n self.add_deploy_dict_stats()\n\n hourly_stats = sensortoolkit.calculate.regression_stats(\n sensor_df_obj=self.hourly_df_list,\n ref_df_obj=self.hourly_ref_df,\n deploy_dict=self.deploy_dict,\n param=self._param_name,\n serials=self.serials\n )\n\n daily_stats = sensortoolkit.calculate.regression_stats(\n sensor_df_obj=self.daily_df_list,\n ref_df_obj=self.daily_ref_df,\n deploy_dict=self.deploy_dict,\n param=self._param_name,\n serials=self.serials\n )\n\n # Combine the statistics dataframes into one\n self.stats_df = sensortoolkit.calculate.join_stats(\n hourly_stats,\n daily_stats,\n stats_path=self.stats_path,\n stats_type='individual',\n write_to_file=self.write_to_file)\n\n avg_hourly_stats = sensortoolkit.calculate.regression_stats(\n sensor_df_obj=self.hourly_df_list,\n ref_df_obj=self.hourly_ref_df,\n deploy_dict=self.deploy_dict,\n param=self._param_name,\n serials=self.serials\n )\n\n avg_daily_stats = sensortoolkit.calculate.regression_stats(\n sensor_df_obj=self.daily_df_list,\n ref_df_obj=self.daily_ref_df,\n deploy_dict=self.deploy_dict,\n param=self._param_name,\n serials=self.serials\n )\n\n # Combine the statistics dataframes into one\n self.avg_stats_df = sensortoolkit.calculate.join_stats(\n avg_hourly_stats,\n avg_daily_stats,\n stats_path=self.stats_path,\n stats_type='average',\n write_to_file=self.write_to_file)\n\n def plot_timeseries(self, report_fmt=True, **kwargs):\n \"\"\"Plot sensor and FRM/FEM reference measurements over time.\n\n Sensor measurements are indicated by distinct colors in a discrete\n color palette. FRM/FEM measurements are shown as black lines. The\n x-axis indicates the date in 5-day increments (default, although\n customizable). Measurement values are plotted along the y-axis.\n\n Args:\n report_fmt (bool, optional):\n If true, format figure for inclusion in a performance report.\n Defaults to True.\n **kwargs (dict): Plotting keyword arguments.\n\n Returns:\n None.\n\n \"\"\"\n timestamp_fmt = '%Y-%m-%d %H:%M:%S'\n t_start = (self.avg_hrly_df.dropna(how='all', axis=0).index[0] -\n pd.Timedelta('1D')).strftime(timestamp_fmt)\n t_end = (self.avg_hrly_df.dropna(how='all', axis=0).index[-1] +\n pd.Timedelta('1D')).strftime(timestamp_fmt)\n\n avg_list = self.param.averaging\n\n param = kwargs.get('param', self._param_name)\n kwargs.pop('param', None)\n\n if len(avg_list) == 2 and report_fmt is True:\n fig, axs = plt.subplots(2, 1, figsize=(10.15, 4.1))\n fig.subplots_adjust(hspace=0.7)\n for i, averaging_interval in enumerate(avg_list):\n\n if averaging_interval == '1-hour':\n sensor_data = self.hourly_df_list\n if averaging_interval == '24-hour':\n sensor_data = self.daily_df_list\n\n ref_data = self.ref_dict[sensortoolkit.Parameter(param).classifier][averaging_interval]\n ref_name = self.reference.get_method_name(self.param.name)\n\n # Prevent Sensor_Timeplot from writing to file on first\n # iteration of loop\n if i == 0:\n write_to_file = False\n if i == len(avg_list) - 1:\n write_to_file = self.write_to_file\n\n axs[i] = sensortoolkit.plotting.sensor_timeplot(\n sensor_data,\n ref_data,\n sensor_serials=self.serials,\n param=param,\n figure_path=self.figure_path,\n sensor_name=self.name,\n ref_name=ref_name,\n bdate=t_start,\n edate=t_end,\n averaging_interval=averaging_interval,\n report_fmt=report_fmt,\n write_to_file=write_to_file,\n ax=axs[i],\n fig=fig,\n **kwargs)\n\n if i == 0:\n axs[i].get_legend().remove()\n else:\n\n averaging_interval = kwargs.get('averaging_interval', '1-hour')\n kwargs.pop('averaging_interval', None)\n\n if '1-hour' in avg_list and averaging_interval == '1-hour':\n sensor_data = self.hourly_df_list\n if '24-hour' in avg_list and averaging_interval == '24-hour':\n sensor_data = self.daily_df_list\n\n ref_data = self.ref_dict[sensortoolkit.Parameter(param).classifier][averaging_interval]\n ref_name = ref_data[f'{param}_Method'].unique()[0]\n\n try:\n sensor_data\n except NameError as error:\n sys.exit(error)\n\n sensortoolkit.plotting.sensor_timeplot(\n sensor_data,\n ref_data,\n sensor_serials=self.serials,\n param=param,\n figure_path=self.figure_path,\n sensor_name=self.name,\n ref_name=ref_name,\n bdate=t_start,\n edate=t_end,\n averaging_interval=averaging_interval,\n report_fmt=report_fmt,\n write_to_file=self.write_to_file,\n **kwargs)\n\n def plot_metrics(self, **kwargs):\n \"\"\"Regression dot/boxplots for U.S EPA performance metrics and targets\n developed for PM2.5 and O3 sensor evaluations.\n\n Results for the following metrics are shown:\n\n - Linearity:\n\n - :math:`R^2`: The coefficient of determination, which is a measure\n of linearity between sensor\n and reference measurement pairs.\n\n - Bias:\n\n - Slope: The slope of the ordinary least-squares regression between\n sensor (y-axis) and\n reference (x-axis) measurements.\n - Intercept: The intercept term of the ordinary least-squares\n regression between sensor (y-axis) and\n reference (x-axis) measurements.\n\n - Error:\n\n - :math:`RMSE`: The root mean square error between sensor and\n reference measurements.\n - :math:`NRMSE`: The normalized root mean square error between sensor\n and reference measurements, where RMSE has been normalized by the\n mean reference concentration during the testing period.\n\n - Precision:\n\n - :math:`CV`: The coefficient of variation of concurrently recorded\n sensor measurements.\n - :math:`SD`: The standard deviation of concurrently recorded sensor\n measurements.\n\n Results are shown as either colored dots (if the number of sensors is\n less than four) or as boxplots (if the number of sensors exceeds\n three). Target ranges are indicated by gray shaded regions, and target\n goals are indicated by dark gray lines. Results are grouped by data\n averaging interval, including 1-hour and 24-hour intervals (note that\n some pollutants such as O3 are analyzed only at 1-hour intervals due to\n significant diurnal variability, so the formatting of the figure will\n depend on which averaging interval(s) are indicated for the parameter\n via the ``sensortoolkit.Parameter.averaging`` attribute).\n\n Args:\n **kwargs (dict): Plotting keyword arguments.\n\n Returns:\n None.\n\n \"\"\"\n try:\n self.deploy_dict['Deployment Groups']['Group 1'][self._param_name]\n except KeyError:\n print('Populating deployment dataframe with evaluation statistics')\n self.add_deploy_dict_stats()\n\n if self.stats_df.empty:\n print('Calculating OLS regression statistics for 1-hr and 24-hr '\n 'sensor vs. reference measurements')\n self.calculate_metrics()\n\n sensortoolkit.plotting.performance_metrics(\n self.stats_df,\n self.deploy_dict,\n param=self._param_name,\n param_averaging=self.param.averaging,\n path=self.figure_path,\n sensor_name=self.name,\n write_to_file=self.write_to_file,\n **kwargs)\n\n def plot_sensor_scatter(self, averaging_interval='24-hour',\n plot_subset=None, **kwargs):\n \"\"\"Plot sensor vs FRM/FEM reference measurement pairs as scatter.\n\n FRM/FEM reference concentrations are plotted along the x-axis, and\n sensor concentrations are plotted along the y-axis. Measurement pairs\n (i.e., concentration values for sensor and reference datasets recorded\n at matching timestamp entries) are colored by the relative humidity\n recorded by an independent meteorological instrument at the monitoring\n site if RH data are located within the ``reference_object.data['Met']``\n DataFrame.\n\n Args:\n averaging_interval (str, optional):\n The measurement averaging intervals commonly utilized for\n analyzing data corresponding the the selected parameter.\n Defaults to '24-hour'.\n plot_subset (list, optional):\n A list of either sensor serial IDs or the keys associated with\n the serial IDs in the serial dictionary. Defaults to None.\n\n **Keyword Arguments**\n\n :param dict report_fmt:\n For displaying scatter plots on the\n first page of the performance report included alongside U.S. EPA's\n documents outlining recommended testing protocols, performance\n metrics, and target values. Defaults to False.\n :param **kwargs:\n Additional keyword arguments passed to the underlying\n ``sensortoolkit.plotting.scatter_plotter()`` method.\n\n Returns:\n None.\n\n \"\"\"\n report_fmt = kwargs.get('report_fmt', False)\n # Avoids multiple args passed to same param\n kwargs.pop('report_fmt', None)\n\n try:\n self.deploy_dict['Deployment Groups']['Group 1'][self._param_name]\n except KeyError:\n print('Populating deployment dataframe with evaluation statistics')\n self.add_deploy_dict_stats()\n\n if self.stats_df.empty:\n print('Calculating OLS regression statistics for 1-hr and 24-hr '\n 'sensor vs. reference measurements')\n self.calculate_metrics()\n\n avg_list = self.param.averaging\n\n # Figuring out averaging intervals is done if report_fmt true, no\n # need to check for invalid intervals passed (will be ignored in favor\n # of intervals specified by Parameter.averaging)\n if not report_fmt and averaging_interval not in avg_list:\n txt = ('Invalid averaging interval, choose from the following: '\n + ', '.join(avg_list))\n sys.exit(txt)\n\n if (report_fmt is True and plot_subset is not None):\n if len(avg_list) == 2:\n # Create a 1x2 subplot, 1-hr scatter on left and 24-hr scatter\n # on right for a single sensor unit (performance report page\n # 1 plot)\n figsize = (5.29, 3.17)\n elif len(avg_list) == 1:\n # Create a 1x1 subplot, 1-hr scatter with vertical colorbar\n figsize = (4.3, 3.91)\n else:\n sys.exit('Reporting template formatted '\n 'figure not specified for ' + self._param_name)\n\n fig, axs = plt.subplots(1, len(avg_list), figsize=figsize)\n fig.subplots_adjust(hspace=0.7)\n for i, averaging_interval in enumerate(self.param.averaging):\n\n if averaging_interval == '1-hour':\n sensor_data = self.hourly_df_list\n ref_data = self.hourly_ref_df\n met_data = self.met_hourly_ref_df\n if averaging_interval == '24-hour':\n sensor_data = self.daily_df_list\n ref_data = self.daily_ref_df\n met_data = self.met_daily_ref_df\n\n # Prevent sub-routine from writing to file on first\n # iteration of loop, also dont draw cbar on first loop\n if i == 0:\n write_to_file = False\n kwargs['draw_cbar'] = False\n if i == len(self.param.averaging) - 1:\n write_to_file = self.write_to_file\n kwargs['draw_cbar'] = True\n\n if isinstance(axs, np.ndarray):\n ax = axs[i]\n multiplot = True\n else:\n ax = axs\n multiplot = False\n\n ax = sensortoolkit.plotting.scatter_plotter(\n sensor_data,\n ref_data,\n self.stats_df,\n deploy_dict=self.deploy_dict,\n met_ref_df=met_data,\n sensor_serials=self.serials,\n param=self._param_name,\n figure_path=self.figure_path,\n sensor_name=self.name,\n ref_name=self.ref_name,\n averaging_interval=averaging_interval,\n plot_subset=plot_subset,\n write_to_file=write_to_file,\n report_fmt=True,\n ax=ax,\n fig=fig,\n **kwargs)\n\n if multiplot:\n axs[i] = ax\n else:\n axs = ax\n\n # Create scatter for all sensors in an evaluation at a specified\n # averaging interval\n else:\n report_fmt = False\n\n # Assuming avg_list contains either only 1-hour or 24-hour\n if '1-hour' in avg_list and averaging_interval == '1-hour':\n sensor_data = self.hourly_df_list\n ref_data = self.hourly_ref_df\n if '24-hour' in avg_list and averaging_interval == '24-hour':\n sensor_data = self.daily_df_list\n ref_data = self.daily_ref_df\n\n try:\n sensor_data\n except NameError as error:\n sys.exit(error)\n\n sensortoolkit.plotting.scatter_plotter(\n sensor_data,\n ref_data,\n self.stats_df,\n deploy_dict=self.deploy_dict,\n met_ref_df=self.met_hourly_ref_df,\n sensor_serials=self.serials,\n param=self._param_name,\n figure_path=self.figure_path,\n sensor_name=self.name,\n ref_name=self.ref_name,\n averaging_interval=averaging_interval,\n plot_subset=plot_subset,\n report_fmt=report_fmt,\n write_to_file=self.write_to_file,\n **kwargs)\n\n def plot_met_dist(self):\n \"\"\"Plot the distribution of temperature and RH recorded by\n meterological instruments at the collocation site.\n\n Displays the relative frequency of meteorological measurements recorded\n during the testing period. Temperature (left) and relative humidity\n (right) measurements are displayed on separate subplots. Measurements\n are grouped into 15 bins, and the frequency of measurements within bin\n is normalized by the total number of measurements (i.e., the relative\n frequency) is displayed as a histogram. Additionally, a polynomial\n estimating the kernel density of measurements is shown for each subplot\n and indicates the general distribution of measurements over the range\n of recorded values.\n\n This method will prioritize plotting meteorological measurements made\n by reference instruments, as sensor measurements are commonly biased\n warmer and drier than ambient conditions if measurements are made by\n an onboard sensing component within the housing of the air sensor. If\n no meteorological reference measurements are available, the method will\n use sensor measurements; however, a disclaimer will displayed above\n subplots indicating that sensor measurements are shown in the figure.\n\n Returns:\n None.\n\n \"\"\"\n met_params = ['Temp_Value', 'RH_Value']\n\n sensortoolkit.plotting.met_distrib(self.met_hourly_ref_df[met_params],\n self.avg_hrly_df,\n figure_path=self.figure_path,\n sensor_name=self.name,\n write_to_file=self.write_to_file)\n\n def plot_met_influence(self, met_param='Temp', report_fmt=True,\n **kwargs):\n \"\"\"Plot the influence meteorological parameters (temperature or\n relative humidity) on sensor measurements.\n\n Sensor measurements that have been normalized by reference measurement\n values for the corresponding timestamp and are plotted along the\n y-axis. Meteorological measurements as measured by temperature or\n relative humidity monitors (rather than onboard sensor measurements)\n are plotted along the x-axis. Scatter for each sensor are displayed as\n separate colors to indicate the unique response of each sensor unit.\n\n A gray 1:1 line indicates ideal agreement between sensor and reference\n measurements over the range of meteorological conditions (i.e., a ratio\n of 1 would indicate that the sensor and reference measure the same\n concentration value for a given timestamp). Scatter below the 1:1\n line indicates underestimation bias, and scatter above the 1:1 line\n indicates overestimation bias.\n\n Args:\n met_param (str, optional):\n Either ``'Temp'`` for displaying the influence of temperature\n or ``'RH'`` for displaying the influence of relative humidity.\n Defaults to None.\n report_fmt (bool, optional):\n If true, format figure for inclusion in a performance report.\n Defaults to True.\n **kwargs (dict): Plotting keyword arguments.\n\n Returns:\n None.\n\n \"\"\"\n # Reference data header names for met data\n valid_met_params = ['Temp', 'RH']\n\n if report_fmt is True:\n fig, axs = plt.subplots(1, 2, figsize=(8.1, 3.8))\n fig.subplots_adjust(hspace=0.7)\n kwargs['fontsize'] = kwargs.get('fontsize', 10)\n kwargs['ylims'] = kwargs.get('ylims', (-.3, 4))\n\n for i, m_param in enumerate(valid_met_params):\n # Prevent writing to file on first iteration of loop\n if i == 0:\n write_to_file = False\n if i == 1:\n write_to_file = self.write_to_file\n\n axs[i] = sensortoolkit.plotting.normalized_met_scatter(\n self.hourly_df_list,\n self.hourly_ref_df,\n self.avg_hrly_df,\n self.met_hourly_ref_df,\n self.figure_path,\n param=self._param_name,\n sensor_serials=self.serials,\n sensor_name=self.name,\n met_param=m_param,\n ref_name=self.ref_name,\n write_to_file=write_to_file,\n report_fmt=report_fmt,\n fig=fig,\n ax=axs[i],\n **kwargs)\n if i == 0:\n axs[i].get_legend().remove()\n else:\n # Either Temp or RH must be passed to met_param if not using report\n # formatting. Report formatted plots dont require a value for\n # met_param as both Temp and RH scatter are automatically plotted.\n if met_param not in valid_met_params:\n sys.exit(f'Invalid parameter name: {met_param}')\n\n sensortoolkit.plotting.normalized_met_scatter(\n self.hourly_df_list,\n self.hourly_ref_df,\n self.avg_hrly_df,\n self.met_hourly_ref_df,\n self.figure_path,\n param=self._param_name,\n sensor_serials=self.serials,\n sensor_name=self.name,\n met_param=met_param,\n ref_name=self.ref_name,\n write_to_file=self.write_to_file,\n **kwargs)\n\n def plot_sensor_met_scatter(self, averaging_interval='1-hour',\n met_param='Temp',\n **kwargs):\n \"\"\"Plot internal sensor temp or RH measurements against collocated\n reference monitor measurements.\n\n Plots generated by this method:\n * Internal sensor RH vs Reference monitor RH\n * Internal sensor Temp vs Reference monitor Temp\n\n\n Sensor measurements are plotted along the y-axis with reference\n measurements along the x-axis. Statistical quantities are displayed\n for each scatter plot including the ordinary least-squares (OLS)\n regression equation, R^2, RMSE, and N (the number of measurement\n pairs). The one-to-one line (indicating ideal agreement between\n sensor and reference measurements) is shown as a dashed gray line.\n\n Args:\n averaging_interval (str, optional):\n The measurement averaging intervals commonly utilized for\n analyzing data corresponding the the selected parameter.\n Defaults to '1-hour'.\n met_param (str, optional):\n The meteorological parameter to display. Defaults to None.\n **kwargs (dict):\n Plotting keyword arguments.\n\n Returns:\n None.\n\n \"\"\"\n # Data header names for met data\n met_params = ['Temp', 'RH']\n\n if met_param not in met_params:\n sys.exit('Invalid parameter name: ' + str(met_param))\n\n if averaging_interval not in self.param.averaging:\n txt = ('Invalid averaging interval, choose from the following: '\n + ', '.join(self.param.averaging))\n sys.exit(txt)\n\n if averaging_interval == '1-hour':\n sensor_data = self.hourly_df_list\n ref_data = self.met_hourly_ref_df\n if averaging_interval == '24-hour':\n sensor_data = self.daily_df_list\n ref_data = self.met_daily_ref_df\n ref_name = ref_data[met_param + '_Method'].unique()[0]\n\n ymin = math.floor(self.avg_hrly_df[\n 'mean_' + met_param + '_Value'].min())\n ymax = round(self.avg_hrly_df[\n 'mean_' + met_param + '_Value'].max(), -1)\n\n xmin, xmax = ymin, ymax\n\n try:\n self.deploy_dict['Deployment Groups']['Group 1'][self._param_name]\n except KeyError:\n print('Populating deployment dataframe with evaluation statistics')\n self.add_deploy_dict_stats()\n\n try:\n self.stats_df\n except AttributeError:\n print('Calculating OLS regression statistics for 1-hr and 24-hr '\n 'sensor vs. reference measurements')\n self.calculate_metrics()\n\n fontsize = sensortoolkit.plotting.set_fontsize(self.serials)\n\n # Set keyword argument values to defaults or passed values\n kwargs['fontsize'] = kwargs.get('fontsize', fontsize)\n kwargs['ylims'] = kwargs.get('ylims', (ymin, ymax))\n kwargs['xlims'] = kwargs.get('xlims', (xmin, xmax))\n kwargs['param_class'] = 'Met'\n kwargs['tick_spacing'] = kwargs.get('tick_spacing', 10)\n kwargs['show_colorbar'] = False\n\n sensortoolkit.plotting.scatter_plotter(\n sensor_data,\n ref_data,\n deploy_dict=self.deploy_dict,\n param=met_param,\n sensor_name=self.name,\n ref_name=ref_name,\n averaging_interval=averaging_interval,\n figure_path=self.figure_path,\n write_to_file=self.write_to_file,\n sensor_serials=self.serials,\n **kwargs)\n\n def print_eval_metrics(self, averaging_interval='24-hour'):\n \"\"\"Display a summary of performance evaluation results using\n EPA’s recommended performance metrics (‘PM25’ and ‘O3’).\n\n The coefficient of variation, sensor vs FRM/FEM OLS regression slope,\n intercept, and R2, and RMSE are displayed. Regression statistics\n are computed for each sensor, and the mean metric value is\n presented alongside the range (min to max).\n\n Args:\n averaging_interval (dict, optional):\n The measurement averaging intervals commonly utilized for\n analyzing data corresponding the the selected parameter.\n Defaults to '24-hour'.\n\n Returns:\n None.\n\n \"\"\"\n try:\n self.deploy_dict['Deployment Groups']['Group 1'][self._param_name]\n except KeyError:\n print('Populating deployment dataframe with evaluation statistics')\n self.add_deploy_dict_stats()\n\n if self.stats_df.empty:\n self.calculate_metrics()\n\n param = self._param_name\n\n deploy_dic = self.deploy_dict\n deploy_stats = self.stats_df.where(\n self.stats_df['Averaging Interval'] == averaging_interval)\n\n print(88*'-')\n print('{:^88s}'.format(self.name + ' '\n + averaging_interval +\n ' Performance Evaluation Results'))\n print('{:^88s}'.format('Reference Method: ' + self.ref_name))\n print(88*'-')\n print('{:^6s}|{:^24s}|{:^24s}|{:^24s}|{:^6s}'.format('CV', 'Slope',\n 'Intercept', 'R^2', 'RMSE'))\n print(88*'-')\n cv_data = [(deploy_dic['Deployment Groups'][group]\n [param]['Precision']['cv_' + averaging_interval])\n for group in deploy_dic['Deployment Groups']]\n\n slope_avg = deploy_stats.Slope.mean()\n slope_min = deploy_stats.Slope.min()\n slope_max = deploy_stats.Slope.max()\n\n intercept_avg = deploy_stats.Intercept.mean()\n intercept_min = deploy_stats.Intercept.min()\n intercept__max = deploy_stats.Intercept.max()\n\n linearity_avg = deploy_stats['R$^2$'].mean()\n linearity_min = deploy_stats['R$^2$'].min()\n linearity_max = deploy_stats['R$^2$'].max()\n\n rmse_data = [(deploy_dic['Deployment Groups'][group]\n [param]['Error']['rmse_' + averaging_interval])\n for group in deploy_dic['Deployment Groups']]\n\n print(('{:^6.1f}|{:^24.2f}|'\n '{:^24.2f}|{:^24.2f}|{:^6.1f}').format(cv_data[0],\n slope_avg,\n intercept_avg,\n linearity_avg,\n rmse_data[0]))\n\n print(5*' ',\n ('| ({:4.2f} to {:4.2f}) '\n '| ({:4.2f} to {:4.2f}) '\n '| ({:4.2f} to {:4.2f}) |').format(slope_min,\n slope_max,\n intercept_min,\n intercept__max,\n linearity_min,\n linearity_max),\n 5*' ')\n\n def print_eval_conditions(self, averaging_interval='24-hour'):\n \"\"\"Display conditions for the evaluation parameter and meteorological\n conditions during the testing period.\n\n Values for the evaluation parameter recorded by the sensor, FRM/FEM\n instrument, and temperature and relative humidity values are\n displayed by the mean of 1-hour or 24-hour averages during the\n testing period. The range (min to max) of each parameter is listed\n below the mean in parentheses.\n\n Args:\n averaging_interval (str, optional):\n The measurement averaging intervals commonly utilized for\n analyzing data corresponding the the selected parameter.\n Defaults to '24-hour'.\n\n Returns:\n None.\n\n \"\"\"\n try:\n self.deploy_dict['Deployment Groups']['Group 1'][self._param_name]\n except KeyError:\n print('Populating deployment dataframe with evaluation statistics')\n self.add_deploy_dict_stats()\n\n if self.stats_df.empty:\n self.calculate_metrics()\n\n if averaging_interval == '1-hour':\n ref_df = self.hourly_ref_df\n met_ref_df = self.met_hourly_ref_df\n if averaging_interval == '24-hour':\n ref_df = self.daily_ref_df\n met_ref_df = self.met_daily_ref_df\n\n deploy_dict = self.deploy_dict\n deploy_stats = self.stats_df.where(\n self.stats_df['Averaging Interval'] == averaging_interval\n ).dropna(how='all', axis=0)\n n_sensors = len(self.serials)\n\n print(88*'-')\n print('{:^88s}'.format(self.name + ' (' + str(n_sensors) + ') '\n + averaging_interval +\n ' Evaluation Conditions'))\n\n print(88*'-')\n print('{:^14s}|{:^14s}|{:^14s}|{:^14s}|{:^14s}|{:^14s}'.format(\n 'Eval period', 'Duration', 'Sensor ' + self._param_name,\n 'Ref ' + self._param_name, 'Temp', 'RH'))\n print(88*'-')\n\n deploy_loc = deploy_dict['Deployment Groups']\n\n eval_start = [pd.to_datetime(deploy_loc[group]['eval_start']\n ).strftime('%m-%d-%y')\n for group in deploy_loc]\n\n eval_end = [pd.to_datetime(deploy_loc[group]['eval_end']\n ).strftime('%m-%d-%y')\n for group in deploy_loc]\n\n eval_duration = [str(pd.to_timedelta(\n deploy_loc[group]['eval_duration']\n ).round('D').days) + ' days'\n for group in deploy_dict['Deployment Groups']]\n\n sensor_min = format(deploy_stats.Sensor_Min.min(), '3.1f')\n sensor_max = format(deploy_stats.Sensor_Max.max(), '3.1f')\n sensor_mean = format(deploy_stats.Sensor_Mean.mean(), '3.1f')\n\n ref_min = format(ref_df[self._param_name + '_Value'].min(), '3.1f')\n ref_max = format(ref_df[self._param_name + '_Value'].max(), '3.1f')\n ref_mean = format(ref_df[self._param_name + '_Value'].mean(), '3.1f')\n\n temp_min = format(met_ref_df['Temp_Value'].min(), '2.0f')\n temp_max = format(met_ref_df['Temp_Value'].max(), '2.0f')\n temp_mean = format(met_ref_df['Temp_Value'].mean(), '2.0f')\n\n rh_min = format(met_ref_df['RH_Value'].min(), '2.0f')\n rh_max = format(met_ref_df['RH_Value'].max(), '2.0f')\n rh_mean = format(met_ref_df['RH_Value'].mean(), '2.0f')\n\n print(('{:^14s}|{:^14s}|{:^14s}|{:^14s}|{:^14s}|{:^14s}'\n ).format(eval_start[0]+'-',\n eval_duration[0],\n sensor_mean,\n ref_mean,\n temp_mean,\n rh_mean))\n\n print(('{:^14s}|{:^14s}|{:^14s}|{:^14s}|{:^14s}|{:^14s}'\n ).format(eval_end[0],\n '',\n '(' + sensor_min + ' to ' + sensor_max + ')',\n '(' + ref_min + ' to ' + ref_max + ')',\n '(' + temp_min + ' to ' + temp_max + ')',\n '(' + rh_min + ' to ' + rh_max + ')'))\n"
] |
[
[
"pandas.to_datetime",
"matplotlib.pyplot.subplots",
"pandas.Timedelta",
"pandas.DataFrame",
"pandas.to_timedelta"
]
] |
markyong97/retinafacetest
|
[
"b72317d682c9e17492f5418073073e63c4ce2ce2"
] |
[
"retinaface/inference.py"
] |
[
"import argparse\nimport json\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nimport albumentations as albu\nimport cv2\nimport numpy as np\nimport torch\nimport torch.nn.parallel\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport yaml\nfrom albumentations.core.serialization import from_dict\nfrom iglovikov_helper_functions.config_parsing.utils import object_from_dict\nfrom iglovikov_helper_functions.dl.pytorch.utils import state_dict_from_disk\nfrom iglovikov_helper_functions.utils.image_utils import pad_to_size, unpad_from_size\nfrom PIL import Image\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch.utils.data import Dataset\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torchvision.ops import nms\nfrom tqdm import tqdm\n\nfrom retinaface.box_utils import decode, decode_landm\nfrom retinaface.utils import tensor_from_rgb_image, vis_annotations\n\n\ndef get_args() -> Any:\n parser = argparse.ArgumentParser()\n arg = parser.add_argument\n arg(\"-i\", \"--input_path\", type=Path, help=\"Path with images.\", required=True)\n arg(\"-c\", \"--config_path\", type=Path, help=\"Path to config.\", required=True)\n arg(\"-o\", \"--output_path\", type=Path, help=\"Path to save jsons.\", required=True)\n arg(\"-v\", \"--visualize\", action=\"store_true\", help=\"Visualize predictions\")\n arg(\"-m\", \"--max_size\", type=int, help=\"Resize the largest side to this number\", default=960)\n arg(\"-b\", \"--batch_size\", type=int, help=\"batch_size\", default=1)\n arg(\"-j\", \"--num_workers\", type=int, help=\"num_workers\", default=12)\n arg(\"--confidence_threshold\", default=0.7, type=float, help=\"confidence_threshold\")\n arg(\"--nms_threshold\", default=0.4, type=float, help=\"nms_threshold\")\n arg(\"-w\", \"--weight_path\", type=str, help=\"Path to weights.\", required=True)\n arg(\"--keep_top_k\", default=750, type=int, help=\"keep_top_k\")\n arg(\"--world_size\", default=-1, type=int, help=\"number of nodes for distributed training\")\n arg(\"--local_rank\", default=-1, type=int, help=\"node rank for distributed training\")\n arg(\"--fp16\", action=\"store_true\", help=\"Use fp6\")\n arg(\"--folder_in_name\", action=\"store_true\", help=\"Add folder to the saved labels.\")\n return parser.parse_args()\n\n\nclass InferenceDataset(Dataset):\n def __init__(\n self, file_paths: List[Path], max_size: int, transform: albu.Compose\n ) -> None: # pylint: disable=W0231\n self.file_paths = file_paths\n self.transform = transform\n self.max_size = max_size\n self.resize = albu.LongestMaxSize(max_size=max_size, p=1)\n\n def __len__(self) -> int:\n return len(self.file_paths)\n\n def __getitem__(self, idx: int) -> Optional[Dict[str, Any]]:\n image_path = self.file_paths[idx]\n\n image = np.array(Image.open(image_path))\n\n image_height, image_width = image.shape[:2]\n\n image = self.resize(image=image)[\"image\"]\n\n paded = pad_to_size(target_size=(self.max_size, self.max_size), image=image)\n\n image = paded[\"image\"]\n pads = paded[\"pads\"]\n\n image = self.transform(image=image)[\"image\"]\n\n return {\n \"torched_image\": tensor_from_rgb_image(image),\n \"image_path\": str(image_path),\n \"pads\": np.array(pads),\n \"image_height\": image_height,\n \"image_width\": image_width,\n }\n\n\ndef unnormalize(image: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]:\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n\n for c in range(image.shape[-1]):\n image[:, :, c] *= std[c] # type: ignore\n image[:, :, c] += mean[c] # type: ignore\n image[:, :, c] *= 255 # type: ignore\n\n return image\n\n\ndef process_predictions(\n prediction: Tuple[torch.Tensor, torch.Tensor, torch.Tensor],\n original_shapes: List[Tuple[int, int]],\n input_shape: Tuple[int, int, int, int],\n pads: Tuple[int, int, int, int],\n confidence_threshold: float,\n nms_threshold: float,\n prior_box: torch.Tensor,\n variance: Tuple[float, float],\n keep_top_k: bool,\n) -> List[List[Dict[str, Union[float, List[float]]]]]:\n loc, conf, land = prediction\n\n conf = F.softmax(conf, dim=-1)\n\n result: List[List[Dict[str, Union[List[float], float]]]] = []\n\n batch_size, _, image_height, image_width = input_shape\n\n scale1 = torch.from_numpy(np.tile([image_width, image_height], 5)).to(loc.device)\n scale = torch.from_numpy(np.tile([image_width, image_height], 2)).to(loc.device)\n\n for batch_id in range(batch_size):\n annotations: List[Dict[str, Union[List, float]]] = []\n\n boxes = decode(loc.data[batch_id], prior_box.to(loc.device), variance)\n\n boxes *= scale\n scores = conf[batch_id][:, 1]\n\n landmarks = decode_landm(land.data[batch_id], prior_box.to(land.device), variance)\n landmarks *= scale1\n\n # ignore low scores\n valid_index = torch.where(scores > confidence_threshold)[0]\n boxes = boxes[valid_index]\n landmarks = landmarks[valid_index]\n scores = scores[valid_index]\n\n order = scores.argsort(descending=True)\n\n boxes = boxes[order]\n landmarks = landmarks[order]\n scores = scores[order]\n\n # do NMS\n keep = nms(boxes, scores, nms_threshold)\n boxes = boxes[keep, :].int()\n\n if boxes.shape[0] == 0:\n result += [[{\"bbox\": [], \"score\": -1, \"landmarks\": []}]]\n continue\n\n landmarks = landmarks[keep]\n\n scores = scores[keep].cpu().numpy().astype(np.float64)[:keep_top_k]\n boxes = boxes.cpu().numpy()[:keep_top_k, :]\n landmarks = landmarks.cpu().numpy()[:keep_top_k, :]\n landmarks = landmarks.reshape([-1, 2])\n\n if pads is None:\n pads_numpy = np.array([0, 0, 0, 0])\n else:\n pads_numpy = pads[batch_id]\n\n unpadded = unpad_from_size(pads_numpy, bboxes=boxes, keypoints=landmarks)\n\n resize_coeff = max(original_shapes[batch_id]) / max(image_height, image_width)\n\n boxes = (unpadded[\"bboxes\"] * resize_coeff).astype(int)\n landmarks = (unpadded[\"keypoints\"].reshape(-1, 10) * resize_coeff).astype(int)\n\n for crop_id, bbox in enumerate(boxes):\n annotations += [\n {\n \"bbox\": bbox.tolist(),\n \"score\": float(scores[crop_id]),\n \"landmarks\": landmarks[crop_id].reshape(-1, 2).tolist(),\n }\n ]\n\n result += [annotations]\n\n return result\n\n\ndef main() -> None:\n args = get_args()\n torch.distributed.init_process_group(backend=\"nccl\")\n\n with args.config_path.open() as f:\n hparams = yaml.load(f, Loader=yaml.SafeLoader)\n\n hparams.update(\n {\n \"json_path\": args.output_path,\n \"visualize\": args.visualize,\n \"confidence_threshold\": args.confidence_threshold,\n \"nms_threshold\": args.nms_threshold,\n \"keep_top_k\": args.keep_top_k,\n \"local_rank\": args.local_rank,\n \"prior_box\": object_from_dict(hparams[\"prior_box\"], image_size=[args.max_size, args.max_size]),\n \"fp16\": args.fp16,\n \"folder_in_name\": args.folder_in_name,\n }\n )\n\n if args.visualize:\n output_vis_path = args.output_path / \"viz\"\n output_vis_path.mkdir(parents=True, exist_ok=True)\n hparams[\"output_vis_path\"] = output_vis_path\n\n output_label_path = args.output_path / \"labels\"\n output_label_path.mkdir(parents=True, exist_ok=True)\n hparams[\"output_label_path\"] = output_label_path\n\n device = torch.device(\"cuda\", args.local_rank)\n\n model = object_from_dict(hparams[\"model\"])\n model = model.to(device)\n\n if args.fp16:\n model = model.half()\n\n corrections: Dict[str, str] = {\"model.\": \"\"}\n state_dict = state_dict_from_disk(file_path=args.weight_path, rename_in_layers=corrections)\n model.load_state_dict(state_dict)\n\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[args.local_rank], output_device=args.local_rank\n )\n\n file_paths = list(args.input_path.rglob(\"*.jpg\"))\n\n dataset = InferenceDataset(file_paths, max_size=args.max_size, transform=from_dict(hparams[\"test_aug\"]))\n\n sampler: DistributedSampler = DistributedSampler(dataset, shuffle=False)\n\n dataloader = torch.utils.data.DataLoader(\n dataset,\n batch_size=args.batch_size,\n num_workers=args.num_workers,\n pin_memory=True,\n shuffle=False,\n drop_last=False,\n sampler=sampler,\n )\n\n predict(dataloader, model, hparams, device)\n\n\ndef predict(dataloader: torch.utils.data.DataLoader, model: nn.Module, hparams: dict, device: torch.device) -> None:\n model.eval()\n\n if hparams[\"local_rank\"] == 0:\n loader = tqdm(dataloader)\n else:\n loader = dataloader\n\n with torch.no_grad():\n for batch in loader:\n torched_images = batch[\"torched_image\"] # images that are rescaled and padded\n\n if hparams[\"fp16\"]:\n torched_images = torched_images.half()\n\n pads = batch[\"pads\"]\n image_paths = batch[\"image_path\"]\n image_heights = batch[\"image_height\"]\n image_widths = batch[\"image_width\"]\n\n batch_size = torched_images.shape[0]\n\n image_heights = image_heights.cpu().numpy()\n image_widths = image_widths.cpu().numpy()\n\n original_shapes = list(zip(image_heights, image_widths))\n\n prediction = model(torched_images.to(device))\n\n output_annotations = process_predictions(\n prediction=prediction,\n original_shapes=original_shapes,\n input_shape=torched_images.shape,\n pads=pads.cpu().numpy(),\n confidence_threshold=hparams[\"confidence_threshold\"],\n nms_threshold=hparams[\"nms_threshold\"],\n prior_box=hparams[\"prior_box\"],\n variance=hparams[\"test_parameters\"][\"variance\"],\n keep_top_k=hparams[\"keep_top_k\"],\n )\n\n for batch_id in range(batch_size):\n annotations = output_annotations[batch_id]\n if not annotations[0][\"bbox\"]:\n continue\n\n folder_name = Path(image_paths[batch_id]).parent.name\n file_name = Path(image_paths[batch_id]).name\n file_id = Path(image_paths[batch_id]).stem\n\n predictions = {\n \"file_name\": file_name,\n \"annotations\": annotations,\n \"file_path\": str(Path(folder_name) / file_name),\n }\n\n (hparams[\"output_label_path\"] / folder_name).mkdir(exist_ok=True, parents=True)\n result_path = hparams[\"output_label_path\"] / folder_name / f\"{file_id}.json\"\n\n with result_path.open(\"w\") as f:\n json.dump(predictions, f, indent=2)\n\n if hparams[\"visualize\"]:\n normalized_image = np.transpose(torched_images[batch_id].cpu().numpy(), (1, 2, 0))\n image = unnormalize(normalized_image)\n unpadded = unpad_from_size(pads[batch_id].cpu().numpy(), image)\n\n original_image_height = image_heights[batch_id].item()\n original_image_width = image_widths[batch_id].item()\n\n image = cv2.resize(\n unpadded[\"image\"].astype(np.uint8), (original_image_width, original_image_height)\n )\n\n image = vis_annotations(image, annotations=annotations) # type: ignore\n\n (hparams[\"output_vis_path\"] / folder_name).mkdir(exist_ok=True, parents=True)\n result_path = hparams[\"output_vis_path\"] / folder_name / f\"{file_id}.jpg\"\n\n cv2.imwrite(str(result_path), cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"torch.nn.functional.softmax",
"torch.distributed.init_process_group",
"torch.utils.data.distributed.DistributedSampler",
"torch.utils.data.DataLoader",
"numpy.tile",
"torch.no_grad",
"torch.where",
"torch.device",
"numpy.array",
"torch.nn.parallel.DistributedDataParallel"
]
] |
pentschev/xarray
|
[
"de6144c0e8c8fc316cfc412a2057af4d1a04edfd"
] |
[
"xarray/tests/test_dataset.py"
] |
[
"import pickle\nimport sys\nimport warnings\nfrom copy import copy, deepcopy\nfrom io import StringIO\nfrom textwrap import dedent\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom pandas.core.computation.ops import UndefinedVariableError\nfrom pandas.core.indexes.datetimes import DatetimeIndex\nfrom pandas.tseries.frequencies import to_offset\n\nimport xarray as xr\nfrom xarray import (\n DataArray,\n Dataset,\n IndexVariable,\n MergeError,\n Variable,\n align,\n backends,\n broadcast,\n open_dataset,\n set_options,\n)\nfrom xarray.coding.cftimeindex import CFTimeIndex\nfrom xarray.core import dtypes, indexing, utils\nfrom xarray.core.common import duck_array_ops, full_like\nfrom xarray.core.indexes import Index\nfrom xarray.core.pycompat import integer_types\nfrom xarray.core.utils import is_scalar\n\nfrom . import (\n InaccessibleArray,\n UnexpectedDataAccess,\n assert_allclose,\n assert_array_equal,\n assert_equal,\n assert_identical,\n has_cftime,\n has_dask,\n requires_bottleneck,\n requires_cftime,\n requires_dask,\n requires_numbagg,\n requires_numexpr,\n requires_scipy,\n requires_sparse,\n source_ndarray,\n)\n\ntry:\n import dask.array as da\nexcept ImportError:\n pass\n\npytestmark = [\n pytest.mark.filterwarnings(\"error:Mean of empty slice\"),\n pytest.mark.filterwarnings(\"error:All-NaN (slice|axis) encountered\"),\n]\n\n\ndef create_test_data(seed=None, add_attrs=True):\n rs = np.random.RandomState(seed)\n _vars = {\n \"var1\": [\"dim1\", \"dim2\"],\n \"var2\": [\"dim1\", \"dim2\"],\n \"var3\": [\"dim3\", \"dim1\"],\n }\n _dims = {\"dim1\": 8, \"dim2\": 9, \"dim3\": 10}\n\n obj = Dataset()\n obj[\"dim2\"] = (\"dim2\", 0.5 * np.arange(_dims[\"dim2\"]))\n obj[\"dim3\"] = (\"dim3\", list(\"abcdefghij\"))\n obj[\"time\"] = (\"time\", pd.date_range(\"2000-01-01\", periods=20))\n for v, dims in sorted(_vars.items()):\n data = rs.normal(size=tuple(_dims[d] for d in dims))\n obj[v] = (dims, data)\n if add_attrs:\n obj[v].attrs = {\"foo\": \"variable\"}\n obj.coords[\"numbers\"] = (\n \"dim3\",\n np.array([0, 1, 2, 0, 0, 1, 1, 2, 2, 3], dtype=\"int64\"),\n )\n obj.encoding = {\"foo\": \"bar\"}\n assert all(obj.data.flags.writeable for obj in obj.variables.values())\n return obj\n\n\ndef create_append_test_data(seed=None):\n rs = np.random.RandomState(seed)\n\n lat = [2, 1, 0]\n lon = [0, 1, 2]\n nt1 = 3\n nt2 = 2\n time1 = pd.date_range(\"2000-01-01\", periods=nt1)\n time2 = pd.date_range(\"2000-02-01\", periods=nt2)\n string_var = np.array([\"ae\", \"bc\", \"df\"], dtype=object)\n string_var_to_append = np.array([\"asdf\", \"asdfg\"], dtype=object)\n unicode_var = [\"áó\", \"áó\", \"áó\"]\n datetime_var = np.array(\n [\"2019-01-01\", \"2019-01-02\", \"2019-01-03\"], dtype=\"datetime64[s]\"\n )\n datetime_var_to_append = np.array(\n [\"2019-01-04\", \"2019-01-05\"], dtype=\"datetime64[s]\"\n )\n bool_var = np.array([True, False, True], dtype=bool)\n bool_var_to_append = np.array([False, True], dtype=bool)\n\n ds = xr.Dataset(\n data_vars={\n \"da\": xr.DataArray(\n rs.rand(3, 3, nt1),\n coords=[lat, lon, time1],\n dims=[\"lat\", \"lon\", \"time\"],\n ),\n \"string_var\": xr.DataArray(string_var, coords=[time1], dims=[\"time\"]),\n \"unicode_var\": xr.DataArray(\n unicode_var, coords=[time1], dims=[\"time\"]\n ).astype(np.unicode_),\n \"datetime_var\": xr.DataArray(datetime_var, coords=[time1], dims=[\"time\"]),\n \"bool_var\": xr.DataArray(bool_var, coords=[time1], dims=[\"time\"]),\n }\n )\n\n ds_to_append = xr.Dataset(\n data_vars={\n \"da\": xr.DataArray(\n rs.rand(3, 3, nt2),\n coords=[lat, lon, time2],\n dims=[\"lat\", \"lon\", \"time\"],\n ),\n \"string_var\": xr.DataArray(\n string_var_to_append, coords=[time2], dims=[\"time\"]\n ),\n \"unicode_var\": xr.DataArray(\n unicode_var[:nt2], coords=[time2], dims=[\"time\"]\n ).astype(np.unicode_),\n \"datetime_var\": xr.DataArray(\n datetime_var_to_append, coords=[time2], dims=[\"time\"]\n ),\n \"bool_var\": xr.DataArray(bool_var_to_append, coords=[time2], dims=[\"time\"]),\n }\n )\n\n ds_with_new_var = xr.Dataset(\n data_vars={\n \"new_var\": xr.DataArray(\n rs.rand(3, 3, nt1 + nt2),\n coords=[lat, lon, time1.append(time2)],\n dims=[\"lat\", \"lon\", \"time\"],\n )\n }\n )\n\n assert all(objp.data.flags.writeable for objp in ds.variables.values())\n assert all(objp.data.flags.writeable for objp in ds_to_append.variables.values())\n return ds, ds_to_append, ds_with_new_var\n\n\ndef create_test_multiindex():\n mindex = pd.MultiIndex.from_product(\n [[\"a\", \"b\"], [1, 2]], names=(\"level_1\", \"level_2\")\n )\n return Dataset({}, {\"x\": mindex})\n\n\ndef create_test_stacked_array():\n x = DataArray(pd.Index(np.r_[:10], name=\"x\"))\n y = DataArray(pd.Index(np.r_[:20], name=\"y\"))\n a = x * y\n b = x * y * y\n return a, b\n\n\nclass InaccessibleVariableDataStore(backends.InMemoryDataStore):\n def __init__(self):\n super().__init__()\n self._indexvars = set()\n\n def store(self, variables, *args, **kwargs):\n super().store(variables, *args, **kwargs)\n for k, v in variables.items():\n if isinstance(v, IndexVariable):\n self._indexvars.add(k)\n\n def get_variables(self):\n def lazy_inaccessible(k, v):\n if k in self._indexvars:\n return v\n data = indexing.LazilyIndexedArray(InaccessibleArray(v.values))\n return Variable(v.dims, data, v.attrs)\n\n return {k: lazy_inaccessible(k, v) for k, v in self._variables.items()}\n\n\nclass TestDataset:\n def test_repr(self):\n data = create_test_data(seed=123)\n data.attrs[\"foo\"] = \"bar\"\n # need to insert str dtype at runtime to handle different endianness\n expected = dedent(\n \"\"\"\\\n <xarray.Dataset>\n Dimensions: (dim2: 9, dim3: 10, time: 20, dim1: 8)\n Coordinates:\n * dim2 (dim2) float64 0.0 0.5 1.0 1.5 2.0 2.5 3.0 3.5 4.0\n * dim3 (dim3) %s 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j'\n * time (time) datetime64[ns] 2000-01-01 2000-01-02 ... 2000-01-20\n numbers (dim3) int64 0 1 2 0 0 1 1 2 2 3\n Dimensions without coordinates: dim1\n Data variables:\n var1 (dim1, dim2) float64 -1.086 0.9973 0.283 ... 0.1995 0.4684 -0.8312\n var2 (dim1, dim2) float64 1.162 -1.097 -2.123 ... 0.1302 1.267 0.3328\n var3 (dim3, dim1) float64 0.5565 -0.2121 0.4563 ... -0.2452 -0.3616\n Attributes:\n foo: bar\"\"\"\n % data[\"dim3\"].dtype\n )\n actual = \"\\n\".join(x.rstrip() for x in repr(data).split(\"\\n\"))\n print(actual)\n assert expected == actual\n\n with set_options(display_width=100):\n max_len = max(map(len, repr(data).split(\"\\n\")))\n assert 90 < max_len < 100\n\n expected = dedent(\n \"\"\"\\\n <xarray.Dataset>\n Dimensions: ()\n Data variables:\n *empty*\"\"\"\n )\n actual = \"\\n\".join(x.rstrip() for x in repr(Dataset()).split(\"\\n\"))\n print(actual)\n assert expected == actual\n\n # verify that ... doesn't appear for scalar coordinates\n data = Dataset({\"foo\": (\"x\", np.ones(10))}).mean()\n expected = dedent(\n \"\"\"\\\n <xarray.Dataset>\n Dimensions: ()\n Data variables:\n foo float64 1.0\"\"\"\n )\n actual = \"\\n\".join(x.rstrip() for x in repr(data).split(\"\\n\"))\n print(actual)\n assert expected == actual\n\n # verify long attributes are truncated\n data = Dataset(attrs={\"foo\": \"bar\" * 1000})\n assert len(repr(data)) < 1000\n\n def test_repr_multiindex(self):\n data = create_test_multiindex()\n expected = dedent(\n \"\"\"\\\n <xarray.Dataset>\n Dimensions: (x: 4)\n Coordinates:\n * x (x) MultiIndex\n - level_1 (x) object 'a' 'a' 'b' 'b'\n - level_2 (x) int64 1 2 1 2\n Data variables:\n *empty*\"\"\"\n )\n actual = \"\\n\".join(x.rstrip() for x in repr(data).split(\"\\n\"))\n print(actual)\n assert expected == actual\n\n # verify that long level names are not truncated\n mindex = pd.MultiIndex.from_product(\n [[\"a\", \"b\"], [1, 2]], names=(\"a_quite_long_level_name\", \"level_2\")\n )\n data = Dataset({}, {\"x\": mindex})\n expected = dedent(\n \"\"\"\\\n <xarray.Dataset>\n Dimensions: (x: 4)\n Coordinates:\n * x (x) MultiIndex\n - a_quite_long_level_name (x) object 'a' 'a' 'b' 'b'\n - level_2 (x) int64 1 2 1 2\n Data variables:\n *empty*\"\"\"\n )\n actual = \"\\n\".join(x.rstrip() for x in repr(data).split(\"\\n\"))\n print(actual)\n assert expected == actual\n\n def test_repr_period_index(self):\n data = create_test_data(seed=456)\n data.coords[\"time\"] = pd.period_range(\"2000-01-01\", periods=20, freq=\"B\")\n\n # check that creating the repr doesn't raise an error #GH645\n repr(data)\n\n def test_unicode_data(self):\n # regression test for GH834\n data = Dataset({\"foø\": [\"ba®\"]}, attrs={\"å\": \"∑\"})\n repr(data) # should not raise\n\n byteorder = \"<\" if sys.byteorder == \"little\" else \">\"\n expected = dedent(\n \"\"\"\\\n <xarray.Dataset>\n Dimensions: (foø: 1)\n Coordinates:\n * foø (foø) %cU3 %r\n Data variables:\n *empty*\n Attributes:\n å: ∑\"\"\"\n % (byteorder, \"ba®\")\n )\n actual = str(data)\n assert expected == actual\n\n def test_repr_nep18(self):\n class Array:\n def __init__(self):\n self.shape = (2,)\n self.dtype = np.dtype(np.float64)\n\n def __array_function__(self, *args, **kwargs):\n pass\n\n def __repr__(self):\n return \"Custom\\nArray\"\n\n dataset = Dataset({\"foo\": (\"x\", Array())})\n expected = dedent(\n \"\"\"\\\n <xarray.Dataset>\n Dimensions: (x: 2)\n Dimensions without coordinates: x\n Data variables:\n foo (x) float64 Custom Array\"\"\"\n )\n assert expected == repr(dataset)\n\n def test_info(self):\n ds = create_test_data(seed=123)\n ds = ds.drop_vars(\"dim3\") # string type prints differently in PY2 vs PY3\n ds.attrs[\"unicode_attr\"] = \"ba®\"\n ds.attrs[\"string_attr\"] = \"bar\"\n\n buf = StringIO()\n ds.info(buf=buf)\n\n expected = dedent(\n \"\"\"\\\n xarray.Dataset {\n dimensions:\n \\tdim2 = 9 ;\n \\ttime = 20 ;\n \\tdim1 = 8 ;\n \\tdim3 = 10 ;\n\n variables:\n \\tfloat64 dim2(dim2) ;\n \\tdatetime64[ns] time(time) ;\n \\tfloat64 var1(dim1, dim2) ;\n \\t\\tvar1:foo = variable ;\n \\tfloat64 var2(dim1, dim2) ;\n \\t\\tvar2:foo = variable ;\n \\tfloat64 var3(dim3, dim1) ;\n \\t\\tvar3:foo = variable ;\n \\tint64 numbers(dim3) ;\n\n // global attributes:\n \\t:unicode_attr = ba® ;\n \\t:string_attr = bar ;\n }\"\"\"\n )\n actual = buf.getvalue()\n assert expected == actual\n buf.close()\n\n def test_constructor(self):\n x1 = (\"x\", 2 * np.arange(100))\n x2 = (\"x\", np.arange(1000))\n z = ([\"x\", \"y\"], np.arange(1000).reshape(100, 10))\n\n with pytest.raises(ValueError, match=r\"conflicting sizes\"):\n Dataset({\"a\": x1, \"b\": x2})\n with pytest.raises(ValueError, match=r\"disallows such variables\"):\n Dataset({\"a\": x1, \"x\": z})\n with pytest.raises(TypeError, match=r\"tuple of form\"):\n Dataset({\"x\": (1, 2, 3, 4, 5, 6, 7)})\n with pytest.raises(ValueError, match=r\"already exists as a scalar\"):\n Dataset({\"x\": 0, \"y\": (\"x\", [1, 2, 3])})\n\n # verify handling of DataArrays\n expected = Dataset({\"x\": x1, \"z\": z})\n actual = Dataset({\"z\": expected[\"z\"]})\n assert_identical(expected, actual)\n\n def test_constructor_invalid_dims(self):\n # regression for GH1120\n with pytest.raises(MergeError):\n Dataset(\n data_vars=dict(v=(\"y\", [1, 2, 3, 4])),\n coords=dict(y=DataArray([0.1, 0.2, 0.3, 0.4], dims=\"x\")),\n )\n\n def test_constructor_1d(self):\n expected = Dataset({\"x\": ([\"x\"], 5.0 + np.arange(5))})\n actual = Dataset({\"x\": 5.0 + np.arange(5)})\n assert_identical(expected, actual)\n\n actual = Dataset({\"x\": [5, 6, 7, 8, 9]})\n assert_identical(expected, actual)\n\n def test_constructor_0d(self):\n expected = Dataset({\"x\": ([], 1)})\n for arg in [1, np.array(1), expected[\"x\"]]:\n actual = Dataset({\"x\": arg})\n assert_identical(expected, actual)\n\n class Arbitrary:\n pass\n\n d = pd.Timestamp(\"2000-01-01T12\")\n args = [\n True,\n None,\n 3.4,\n np.nan,\n \"hello\",\n b\"raw\",\n np.datetime64(\"2000-01-01\"),\n d,\n d.to_pydatetime(),\n Arbitrary(),\n ]\n for arg in args:\n print(arg)\n expected = Dataset({\"x\": ([], arg)})\n actual = Dataset({\"x\": arg})\n assert_identical(expected, actual)\n\n def test_constructor_deprecated(self):\n with pytest.raises(ValueError, match=r\"DataArray dimensions\"):\n DataArray([1, 2, 3], coords={\"x\": [0, 1, 2]})\n\n def test_constructor_auto_align(self):\n a = DataArray([1, 2], [(\"x\", [0, 1])])\n b = DataArray([3, 4], [(\"x\", [1, 2])])\n\n # verify align uses outer join\n expected = Dataset(\n {\"a\": (\"x\", [1, 2, np.nan]), \"b\": (\"x\", [np.nan, 3, 4])}, {\"x\": [0, 1, 2]}\n )\n actual = Dataset({\"a\": a, \"b\": b})\n assert_identical(expected, actual)\n\n # regression test for GH346\n assert isinstance(actual.variables[\"x\"], IndexVariable)\n\n # variable with different dimensions\n c = (\"y\", [3, 4])\n expected2 = expected.merge({\"c\": c})\n actual = Dataset({\"a\": a, \"b\": b, \"c\": c})\n assert_identical(expected2, actual)\n\n # variable that is only aligned against the aligned variables\n d = (\"x\", [3, 2, 1])\n expected3 = expected.merge({\"d\": d})\n actual = Dataset({\"a\": a, \"b\": b, \"d\": d})\n assert_identical(expected3, actual)\n\n e = (\"x\", [0, 0])\n with pytest.raises(ValueError, match=r\"conflicting sizes\"):\n Dataset({\"a\": a, \"b\": b, \"e\": e})\n\n def test_constructor_pandas_sequence(self):\n\n ds = self.make_example_math_dataset()\n pandas_objs = {\n var_name: ds[var_name].to_pandas() for var_name in [\"foo\", \"bar\"]\n }\n ds_based_on_pandas = Dataset(pandas_objs, ds.coords, attrs=ds.attrs)\n del ds_based_on_pandas[\"x\"]\n assert_equal(ds, ds_based_on_pandas)\n\n # reindex pandas obj, check align works\n rearranged_index = reversed(pandas_objs[\"foo\"].index)\n pandas_objs[\"foo\"] = pandas_objs[\"foo\"].reindex(rearranged_index)\n ds_based_on_pandas = Dataset(pandas_objs, ds.coords, attrs=ds.attrs)\n del ds_based_on_pandas[\"x\"]\n assert_equal(ds, ds_based_on_pandas)\n\n def test_constructor_pandas_single(self):\n\n das = [\n DataArray(np.random.rand(4), dims=[\"a\"]), # series\n DataArray(np.random.rand(4, 3), dims=[\"a\", \"b\"]), # df\n ]\n\n for a in das:\n pandas_obj = a.to_pandas()\n ds_based_on_pandas = Dataset(pandas_obj)\n for dim in ds_based_on_pandas.data_vars:\n assert_array_equal(ds_based_on_pandas[dim], pandas_obj[dim])\n\n def test_constructor_compat(self):\n data = {\"x\": DataArray(0, coords={\"y\": 1}), \"y\": (\"z\", [1, 1, 1])}\n expected = Dataset({\"x\": 0}, {\"y\": (\"z\", [1, 1, 1])})\n actual = Dataset(data)\n assert_identical(expected, actual)\n\n data = {\"y\": (\"z\", [1, 1, 1]), \"x\": DataArray(0, coords={\"y\": 1})}\n actual = Dataset(data)\n assert_identical(expected, actual)\n\n original = Dataset(\n {\"a\": ((\"x\", \"y\"), np.ones((2, 3)))},\n {\"c\": ((\"x\", \"y\"), np.zeros((2, 3))), \"x\": [0, 1]},\n )\n expected = Dataset(\n {\"a\": (\"x\", np.ones(2)), \"b\": (\"y\", np.ones(3))},\n {\"c\": ((\"x\", \"y\"), np.zeros((2, 3))), \"x\": [0, 1]},\n )\n\n actual = Dataset(\n {\"a\": original[\"a\"][:, 0], \"b\": original[\"a\"][0].drop_vars(\"x\")}\n )\n assert_identical(expected, actual)\n\n data = {\"x\": DataArray(0, coords={\"y\": 3}), \"y\": (\"z\", [1, 1, 1])}\n with pytest.raises(MergeError):\n Dataset(data)\n\n data = {\"x\": DataArray(0, coords={\"y\": 1}), \"y\": [1, 1]}\n actual = Dataset(data)\n expected = Dataset({\"x\": 0}, {\"y\": [1, 1]})\n assert_identical(expected, actual)\n\n def test_constructor_with_coords(self):\n with pytest.raises(ValueError, match=r\"found in both data_vars and\"):\n Dataset({\"a\": (\"x\", [1])}, {\"a\": (\"x\", [1])})\n\n ds = Dataset({}, {\"a\": (\"x\", [1])})\n assert not ds.data_vars\n assert list(ds.coords.keys()) == [\"a\"]\n\n mindex = pd.MultiIndex.from_product(\n [[\"a\", \"b\"], [1, 2]], names=(\"level_1\", \"level_2\")\n )\n with pytest.raises(ValueError, match=r\"conflicting MultiIndex\"):\n Dataset({}, {\"x\": mindex, \"y\": mindex})\n Dataset({}, {\"x\": mindex, \"level_1\": range(4)})\n\n def test_properties(self):\n ds = create_test_data()\n assert ds.dims == {\"dim1\": 8, \"dim2\": 9, \"dim3\": 10, \"time\": 20}\n assert ds.sizes == ds.dims\n\n # These exact types aren't public API, but this makes sure we don't\n # change them inadvertently:\n assert isinstance(ds.dims, utils.Frozen)\n assert isinstance(ds.dims.mapping, dict)\n assert type(ds.dims.mapping) is dict\n\n assert list(ds) == list(ds.data_vars)\n assert list(ds.keys()) == list(ds.data_vars)\n assert \"aasldfjalskdfj\" not in ds.variables\n assert \"dim1\" in repr(ds.variables)\n assert len(ds) == 3\n assert bool(ds)\n\n assert list(ds.data_vars) == [\"var1\", \"var2\", \"var3\"]\n assert list(ds.data_vars.keys()) == [\"var1\", \"var2\", \"var3\"]\n assert \"var1\" in ds.data_vars\n assert \"dim1\" not in ds.data_vars\n assert \"numbers\" not in ds.data_vars\n assert len(ds.data_vars) == 3\n\n assert set(ds.xindexes) == {\"dim2\", \"dim3\", \"time\"}\n assert len(ds.xindexes) == 3\n assert \"dim2\" in repr(ds.xindexes)\n assert all([isinstance(idx, Index) for idx in ds.xindexes.values()])\n\n assert set(ds.indexes) == {\"dim2\", \"dim3\", \"time\"}\n assert len(ds.indexes) == 3\n assert \"dim2\" in repr(ds.indexes)\n assert all([isinstance(idx, pd.Index) for idx in ds.indexes.values()])\n\n assert list(ds.coords) == [\"dim2\", \"dim3\", \"time\", \"numbers\"]\n assert \"dim2\" in ds.coords\n assert \"numbers\" in ds.coords\n assert \"var1\" not in ds.coords\n assert \"dim1\" not in ds.coords\n assert len(ds.coords) == 4\n\n assert Dataset({\"x\": np.int64(1), \"y\": np.float32([1, 2])}).nbytes == 16\n\n def test_asarray(self):\n ds = Dataset({\"x\": 0})\n with pytest.raises(TypeError, match=r\"cannot directly convert\"):\n np.asarray(ds)\n\n def test_get_index(self):\n ds = Dataset({\"foo\": ((\"x\", \"y\"), np.zeros((2, 3)))}, coords={\"x\": [\"a\", \"b\"]})\n assert ds.get_index(\"x\").equals(pd.Index([\"a\", \"b\"]))\n assert ds.get_index(\"y\").equals(pd.Index([0, 1, 2]))\n with pytest.raises(KeyError):\n ds.get_index(\"z\")\n\n def test_attr_access(self):\n ds = Dataset(\n {\"tmin\": (\"x\", [42], {\"units\": \"Celcius\"})}, attrs={\"title\": \"My test data\"}\n )\n assert_identical(ds.tmin, ds[\"tmin\"])\n assert_identical(ds.tmin.x, ds.x)\n\n assert ds.title == ds.attrs[\"title\"]\n assert ds.tmin.units == ds[\"tmin\"].attrs[\"units\"]\n\n assert {\"tmin\", \"title\"} <= set(dir(ds))\n assert \"units\" in set(dir(ds.tmin))\n\n # should defer to variable of same name\n ds.attrs[\"tmin\"] = -999\n assert ds.attrs[\"tmin\"] == -999\n assert_identical(ds.tmin, ds[\"tmin\"])\n\n def test_variable(self):\n a = Dataset()\n d = np.random.random((10, 3))\n a[\"foo\"] = ((\"time\", \"x\"), d)\n assert \"foo\" in a.variables\n assert \"foo\" in a\n a[\"bar\"] = ((\"time\", \"x\"), d)\n # order of creation is preserved\n assert list(a.variables) == [\"foo\", \"bar\"]\n assert_array_equal(a[\"foo\"].values, d)\n # try to add variable with dim (10,3) with data that's (3,10)\n with pytest.raises(ValueError):\n a[\"qux\"] = ((\"time\", \"x\"), d.T)\n\n def test_modify_inplace(self):\n a = Dataset()\n vec = np.random.random((10,))\n attributes = {\"foo\": \"bar\"}\n a[\"x\"] = (\"x\", vec, attributes)\n assert \"x\" in a.coords\n assert isinstance(a.coords[\"x\"].to_index(), pd.Index)\n assert_identical(a.coords[\"x\"].variable, a.variables[\"x\"])\n b = Dataset()\n b[\"x\"] = (\"x\", vec, attributes)\n assert_identical(a[\"x\"], b[\"x\"])\n assert a.dims == b.dims\n # this should work\n a[\"x\"] = (\"x\", vec[:5])\n a[\"z\"] = (\"x\", np.arange(5))\n with pytest.raises(ValueError):\n # now it shouldn't, since there is a conflicting length\n a[\"x\"] = (\"x\", vec[:4])\n arr = np.random.random((10, 1))\n scal = np.array(0)\n with pytest.raises(ValueError):\n a[\"y\"] = (\"y\", arr)\n with pytest.raises(ValueError):\n a[\"y\"] = (\"y\", scal)\n assert \"y\" not in a.dims\n\n def test_coords_properties(self):\n # use int64 for repr consistency on windows\n data = Dataset(\n {\n \"x\": (\"x\", np.array([-1, -2], \"int64\")),\n \"y\": (\"y\", np.array([0, 1, 2], \"int64\")),\n \"foo\": ([\"x\", \"y\"], np.random.randn(2, 3)),\n },\n {\"a\": (\"x\", np.array([4, 5], \"int64\")), \"b\": np.int64(-10)},\n )\n\n assert 4 == len(data.coords)\n\n assert [\"x\", \"y\", \"a\", \"b\"] == list(data.coords)\n\n assert_identical(data.coords[\"x\"].variable, data[\"x\"].variable)\n assert_identical(data.coords[\"y\"].variable, data[\"y\"].variable)\n\n assert \"x\" in data.coords\n assert \"a\" in data.coords\n assert 0 not in data.coords\n assert \"foo\" not in data.coords\n\n with pytest.raises(KeyError):\n data.coords[\"foo\"]\n with pytest.raises(KeyError):\n data.coords[0]\n\n expected = dedent(\n \"\"\"\\\n Coordinates:\n * x (x) int64 -1 -2\n * y (y) int64 0 1 2\n a (x) int64 4 5\n b int64 -10\"\"\"\n )\n actual = repr(data.coords)\n assert expected == actual\n\n assert {\"x\": 2, \"y\": 3} == data.coords.dims\n\n def test_coords_modify(self):\n data = Dataset(\n {\n \"x\": (\"x\", [-1, -2]),\n \"y\": (\"y\", [0, 1, 2]),\n \"foo\": ([\"x\", \"y\"], np.random.randn(2, 3)),\n },\n {\"a\": (\"x\", [4, 5]), \"b\": -10},\n )\n\n actual = data.copy(deep=True)\n actual.coords[\"x\"] = (\"x\", [\"a\", \"b\"])\n assert_array_equal(actual[\"x\"], [\"a\", \"b\"])\n\n actual = data.copy(deep=True)\n actual.coords[\"z\"] = (\"z\", [\"a\", \"b\"])\n assert_array_equal(actual[\"z\"], [\"a\", \"b\"])\n\n actual = data.copy(deep=True)\n with pytest.raises(ValueError, match=r\"conflicting sizes\"):\n actual.coords[\"x\"] = (\"x\", [-1])\n assert_identical(actual, data) # should not be modified\n\n actual = data.copy()\n del actual.coords[\"b\"]\n expected = data.reset_coords(\"b\", drop=True)\n assert_identical(expected, actual)\n\n with pytest.raises(KeyError):\n del data.coords[\"not_found\"]\n\n with pytest.raises(KeyError):\n del data.coords[\"foo\"]\n\n actual = data.copy(deep=True)\n actual.coords.update({\"c\": 11})\n expected = data.merge({\"c\": 11}).set_coords(\"c\")\n assert_identical(expected, actual)\n\n # regression test for GH3746\n del actual.coords[\"x\"]\n assert \"x\" not in actual.xindexes\n\n def test_update_index(self):\n actual = Dataset(coords={\"x\": [1, 2, 3]})\n actual[\"x\"] = [\"a\", \"b\", \"c\"]\n assert actual.xindexes[\"x\"].equals(pd.Index([\"a\", \"b\", \"c\"]))\n\n def test_coords_setitem_with_new_dimension(self):\n actual = Dataset()\n actual.coords[\"foo\"] = (\"x\", [1, 2, 3])\n expected = Dataset(coords={\"foo\": (\"x\", [1, 2, 3])})\n assert_identical(expected, actual)\n\n def test_coords_setitem_multiindex(self):\n data = create_test_multiindex()\n with pytest.raises(ValueError, match=r\"conflicting MultiIndex\"):\n data.coords[\"level_1\"] = range(4)\n\n def test_coords_set(self):\n one_coord = Dataset({\"x\": (\"x\", [0]), \"yy\": (\"x\", [1]), \"zzz\": (\"x\", [2])})\n two_coords = Dataset({\"zzz\": (\"x\", [2])}, {\"x\": (\"x\", [0]), \"yy\": (\"x\", [1])})\n all_coords = Dataset(\n coords={\"x\": (\"x\", [0]), \"yy\": (\"x\", [1]), \"zzz\": (\"x\", [2])}\n )\n\n actual = one_coord.set_coords(\"x\")\n assert_identical(one_coord, actual)\n actual = one_coord.set_coords([\"x\"])\n assert_identical(one_coord, actual)\n\n actual = one_coord.set_coords(\"yy\")\n assert_identical(two_coords, actual)\n\n actual = one_coord.set_coords([\"yy\", \"zzz\"])\n assert_identical(all_coords, actual)\n\n actual = one_coord.reset_coords()\n assert_identical(one_coord, actual)\n actual = two_coords.reset_coords()\n assert_identical(one_coord, actual)\n actual = all_coords.reset_coords()\n assert_identical(one_coord, actual)\n\n actual = all_coords.reset_coords([\"yy\", \"zzz\"])\n assert_identical(one_coord, actual)\n actual = all_coords.reset_coords(\"zzz\")\n assert_identical(two_coords, actual)\n\n with pytest.raises(ValueError, match=r\"cannot remove index\"):\n one_coord.reset_coords(\"x\")\n\n actual = all_coords.reset_coords(\"zzz\", drop=True)\n expected = all_coords.drop_vars(\"zzz\")\n assert_identical(expected, actual)\n expected = two_coords.drop_vars(\"zzz\")\n assert_identical(expected, actual)\n\n def test_coords_to_dataset(self):\n orig = Dataset({\"foo\": (\"y\", [-1, 0, 1])}, {\"x\": 10, \"y\": [2, 3, 4]})\n expected = Dataset(coords={\"x\": 10, \"y\": [2, 3, 4]})\n actual = orig.coords.to_dataset()\n assert_identical(expected, actual)\n\n def test_coords_merge(self):\n orig_coords = Dataset(coords={\"a\": (\"x\", [1, 2]), \"x\": [0, 1]}).coords\n other_coords = Dataset(coords={\"b\": (\"x\", [\"a\", \"b\"]), \"x\": [0, 1]}).coords\n expected = Dataset(\n coords={\"a\": (\"x\", [1, 2]), \"b\": (\"x\", [\"a\", \"b\"]), \"x\": [0, 1]}\n )\n actual = orig_coords.merge(other_coords)\n assert_identical(expected, actual)\n actual = other_coords.merge(orig_coords)\n assert_identical(expected, actual)\n\n other_coords = Dataset(coords={\"x\": (\"x\", [\"a\"])}).coords\n with pytest.raises(MergeError):\n orig_coords.merge(other_coords)\n other_coords = Dataset(coords={\"x\": (\"x\", [\"a\", \"b\"])}).coords\n with pytest.raises(MergeError):\n orig_coords.merge(other_coords)\n other_coords = Dataset(coords={\"x\": (\"x\", [\"a\", \"b\", \"c\"])}).coords\n with pytest.raises(MergeError):\n orig_coords.merge(other_coords)\n\n other_coords = Dataset(coords={\"a\": (\"x\", [8, 9])}).coords\n expected = Dataset(coords={\"x\": range(2)})\n actual = orig_coords.merge(other_coords)\n assert_identical(expected, actual)\n actual = other_coords.merge(orig_coords)\n assert_identical(expected, actual)\n\n other_coords = Dataset(coords={\"x\": np.nan}).coords\n actual = orig_coords.merge(other_coords)\n assert_identical(orig_coords.to_dataset(), actual)\n actual = other_coords.merge(orig_coords)\n assert_identical(orig_coords.to_dataset(), actual)\n\n def test_coords_merge_mismatched_shape(self):\n orig_coords = Dataset(coords={\"a\": (\"x\", [1, 1])}).coords\n other_coords = Dataset(coords={\"a\": 1}).coords\n expected = orig_coords.to_dataset()\n actual = orig_coords.merge(other_coords)\n assert_identical(expected, actual)\n\n other_coords = Dataset(coords={\"a\": (\"y\", [1])}).coords\n expected = Dataset(coords={\"a\": ([\"x\", \"y\"], [[1], [1]])})\n actual = orig_coords.merge(other_coords)\n assert_identical(expected, actual)\n\n actual = other_coords.merge(orig_coords)\n assert_identical(expected.transpose(), actual)\n\n orig_coords = Dataset(coords={\"a\": (\"x\", [np.nan])}).coords\n other_coords = Dataset(coords={\"a\": np.nan}).coords\n expected = orig_coords.to_dataset()\n actual = orig_coords.merge(other_coords)\n assert_identical(expected, actual)\n\n def test_data_vars_properties(self):\n ds = Dataset()\n ds[\"foo\"] = ((\"x\",), [1.0])\n ds[\"bar\"] = 2.0\n\n assert set(ds.data_vars) == {\"foo\", \"bar\"}\n assert \"foo\" in ds.data_vars\n assert \"x\" not in ds.data_vars\n assert_identical(ds[\"foo\"], ds.data_vars[\"foo\"])\n\n expected = dedent(\n \"\"\"\\\n Data variables:\n foo (x) float64 1.0\n bar float64 2.0\"\"\"\n )\n actual = repr(ds.data_vars)\n assert expected == actual\n\n def test_equals_and_identical(self):\n data = create_test_data(seed=42)\n assert data.equals(data)\n assert data.identical(data)\n\n data2 = create_test_data(seed=42)\n data2.attrs[\"foobar\"] = \"baz\"\n assert data.equals(data2)\n assert not data.identical(data2)\n\n del data2[\"time\"]\n assert not data.equals(data2)\n\n data = create_test_data(seed=42).rename({\"var1\": None})\n assert data.equals(data)\n assert data.identical(data)\n\n data2 = data.reset_coords()\n assert not data2.equals(data)\n assert not data2.identical(data)\n\n def test_equals_failures(self):\n data = create_test_data()\n assert not data.equals(\"foo\")\n assert not data.identical(123)\n assert not data.broadcast_equals({1: 2})\n\n def test_broadcast_equals(self):\n data1 = Dataset(coords={\"x\": 0})\n data2 = Dataset(coords={\"x\": [0]})\n assert data1.broadcast_equals(data2)\n assert not data1.equals(data2)\n assert not data1.identical(data2)\n\n def test_attrs(self):\n data = create_test_data(seed=42)\n data.attrs = {\"foobar\": \"baz\"}\n assert data.attrs[\"foobar\"], \"baz\"\n assert isinstance(data.attrs, dict)\n\n @requires_dask\n def test_chunk(self):\n data = create_test_data()\n for v in data.variables.values():\n assert isinstance(v.data, np.ndarray)\n assert data.chunks == {}\n\n reblocked = data.chunk()\n for k, v in reblocked.variables.items():\n if k in reblocked.dims:\n assert isinstance(v.data, np.ndarray)\n else:\n assert isinstance(v.data, da.Array)\n\n expected_chunks = {\"dim1\": (8,), \"dim2\": (9,), \"dim3\": (10,)}\n assert reblocked.chunks == expected_chunks\n\n def get_dask_names(ds):\n return {k: v.data.name for k, v in ds.items()}\n\n orig_dask_names = get_dask_names(reblocked)\n\n reblocked = data.chunk({\"time\": 5, \"dim1\": 5, \"dim2\": 5, \"dim3\": 5})\n # time is not a dim in any of the data_vars, so it\n # doesn't get chunked\n expected_chunks = {\"dim1\": (5, 3), \"dim2\": (5, 4), \"dim3\": (5, 5)}\n assert reblocked.chunks == expected_chunks\n\n # make sure dask names change when rechunking by different amounts\n # regression test for GH3350\n new_dask_names = get_dask_names(reblocked)\n for k, v in new_dask_names.items():\n assert v != orig_dask_names[k]\n\n reblocked = data.chunk(expected_chunks)\n assert reblocked.chunks == expected_chunks\n\n # reblock on already blocked data\n orig_dask_names = get_dask_names(reblocked)\n reblocked = reblocked.chunk(expected_chunks)\n new_dask_names = get_dask_names(reblocked)\n assert reblocked.chunks == expected_chunks\n assert_identical(reblocked, data)\n # recuhnking with same chunk sizes should not change names\n for k, v in new_dask_names.items():\n assert v == orig_dask_names[k]\n\n with pytest.raises(ValueError, match=r\"some chunks\"):\n data.chunk({\"foo\": 10})\n\n @requires_dask\n def test_dask_is_lazy(self):\n store = InaccessibleVariableDataStore()\n create_test_data().dump_to_store(store)\n ds = open_dataset(store).chunk()\n\n with pytest.raises(UnexpectedDataAccess):\n ds.load()\n with pytest.raises(UnexpectedDataAccess):\n ds[\"var1\"].values\n\n # these should not raise UnexpectedDataAccess:\n ds.var1.data\n ds.isel(time=10)\n ds.isel(time=slice(10), dim1=[0]).isel(dim1=0, dim2=-1)\n ds.transpose()\n ds.mean()\n ds.fillna(0)\n ds.rename({\"dim1\": \"foobar\"})\n ds.set_coords(\"var1\")\n ds.drop_vars(\"var1\")\n\n def test_isel(self):\n data = create_test_data()\n slicers = {\"dim1\": slice(None, None, 2), \"dim2\": slice(0, 2)}\n ret = data.isel(**slicers)\n\n # Verify that only the specified dimension was altered\n assert list(data.dims) == list(ret.dims)\n for d in data.dims:\n if d in slicers:\n assert ret.dims[d] == np.arange(data.dims[d])[slicers[d]].size\n else:\n assert data.dims[d] == ret.dims[d]\n # Verify that the data is what we expect\n for v in data.variables:\n assert data[v].dims == ret[v].dims\n assert data[v].attrs == ret[v].attrs\n slice_list = [slice(None)] * data[v].values.ndim\n for d, s in slicers.items():\n if d in data[v].dims:\n inds = np.nonzero(np.array(data[v].dims) == d)[0]\n for ind in inds:\n slice_list[ind] = s\n expected = data[v].values[tuple(slice_list)]\n actual = ret[v].values\n np.testing.assert_array_equal(expected, actual)\n\n with pytest.raises(ValueError):\n data.isel(not_a_dim=slice(0, 2))\n with pytest.raises(\n ValueError,\n match=r\"Dimensions {'not_a_dim'} do not exist. Expected \"\n r\"one or more of \"\n r\"[\\w\\W]*'dim\\d'[\\w\\W]*'dim\\d'[\\w\\W]*'time'[\\w\\W]*'dim\\d'[\\w\\W]*\",\n ):\n data.isel(not_a_dim=slice(0, 2))\n with pytest.warns(\n UserWarning,\n match=r\"Dimensions {'not_a_dim'} do not exist. \"\n r\"Expected one or more of \"\n r\"[\\w\\W]*'dim\\d'[\\w\\W]*'dim\\d'[\\w\\W]*'time'[\\w\\W]*'dim\\d'[\\w\\W]*\",\n ):\n data.isel(not_a_dim=slice(0, 2), missing_dims=\"warn\")\n assert_identical(data, data.isel(not_a_dim=slice(0, 2), missing_dims=\"ignore\"))\n\n ret = data.isel(dim1=0)\n assert {\"time\": 20, \"dim2\": 9, \"dim3\": 10} == ret.dims\n assert set(data.data_vars) == set(ret.data_vars)\n assert set(data.coords) == set(ret.coords)\n assert set(data.xindexes) == set(ret.xindexes)\n\n ret = data.isel(time=slice(2), dim1=0, dim2=slice(5))\n assert {\"time\": 2, \"dim2\": 5, \"dim3\": 10} == ret.dims\n assert set(data.data_vars) == set(ret.data_vars)\n assert set(data.coords) == set(ret.coords)\n assert set(data.xindexes) == set(ret.xindexes)\n\n ret = data.isel(time=0, dim1=0, dim2=slice(5))\n assert {\"dim2\": 5, \"dim3\": 10} == ret.dims\n assert set(data.data_vars) == set(ret.data_vars)\n assert set(data.coords) == set(ret.coords)\n assert set(data.xindexes) == set(list(ret.xindexes) + [\"time\"])\n\n def test_isel_fancy(self):\n # isel with fancy indexing.\n data = create_test_data()\n\n pdim1 = [1, 2, 3]\n pdim2 = [4, 5, 1]\n pdim3 = [1, 2, 3]\n actual = data.isel(\n dim1=((\"test_coord\",), pdim1),\n dim2=((\"test_coord\",), pdim2),\n dim3=((\"test_coord\",), pdim3),\n )\n assert \"test_coord\" in actual.dims\n assert actual.coords[\"test_coord\"].shape == (len(pdim1),)\n\n # Should work with DataArray\n actual = data.isel(\n dim1=DataArray(pdim1, dims=\"test_coord\"),\n dim2=((\"test_coord\",), pdim2),\n dim3=((\"test_coord\",), pdim3),\n )\n assert \"test_coord\" in actual.dims\n assert actual.coords[\"test_coord\"].shape == (len(pdim1),)\n expected = data.isel(\n dim1=((\"test_coord\",), pdim1),\n dim2=((\"test_coord\",), pdim2),\n dim3=((\"test_coord\",), pdim3),\n )\n assert_identical(actual, expected)\n\n # DataArray with coordinate\n idx1 = DataArray(pdim1, dims=[\"a\"], coords={\"a\": np.random.randn(3)})\n idx2 = DataArray(pdim2, dims=[\"b\"], coords={\"b\": np.random.randn(3)})\n idx3 = DataArray(pdim3, dims=[\"c\"], coords={\"c\": np.random.randn(3)})\n # Should work with DataArray\n actual = data.isel(dim1=idx1, dim2=idx2, dim3=idx3)\n assert \"a\" in actual.dims\n assert \"b\" in actual.dims\n assert \"c\" in actual.dims\n assert \"time\" in actual.coords\n assert \"dim2\" in actual.coords\n assert \"dim3\" in actual.coords\n expected = data.isel(\n dim1=((\"a\",), pdim1), dim2=((\"b\",), pdim2), dim3=((\"c\",), pdim3)\n )\n expected = expected.assign_coords(a=idx1[\"a\"], b=idx2[\"b\"], c=idx3[\"c\"])\n assert_identical(actual, expected)\n\n idx1 = DataArray(pdim1, dims=[\"a\"], coords={\"a\": np.random.randn(3)})\n idx2 = DataArray(pdim2, dims=[\"a\"])\n idx3 = DataArray(pdim3, dims=[\"a\"])\n # Should work with DataArray\n actual = data.isel(dim1=idx1, dim2=idx2, dim3=idx3)\n assert \"a\" in actual.dims\n assert \"time\" in actual.coords\n assert \"dim2\" in actual.coords\n assert \"dim3\" in actual.coords\n expected = data.isel(\n dim1=((\"a\",), pdim1), dim2=((\"a\",), pdim2), dim3=((\"a\",), pdim3)\n )\n expected = expected.assign_coords(a=idx1[\"a\"])\n assert_identical(actual, expected)\n\n actual = data.isel(dim1=((\"points\",), pdim1), dim2=((\"points\",), pdim2))\n assert \"points\" in actual.dims\n assert \"dim3\" in actual.dims\n assert \"dim3\" not in actual.data_vars\n np.testing.assert_array_equal(data[\"dim2\"][pdim2], actual[\"dim2\"])\n\n # test that the order of the indexers doesn't matter\n assert_identical(\n data.isel(dim1=((\"points\",), pdim1), dim2=((\"points\",), pdim2)),\n data.isel(dim2=((\"points\",), pdim2), dim1=((\"points\",), pdim1)),\n )\n # make sure we're raising errors in the right places\n with pytest.raises(IndexError, match=r\"Dimensions of indexers mismatch\"):\n data.isel(dim1=((\"points\",), [1, 2]), dim2=((\"points\",), [1, 2, 3]))\n with pytest.raises(TypeError, match=r\"cannot use a Dataset\"):\n data.isel(dim1=Dataset({\"points\": [1, 2]}))\n\n # test to be sure we keep around variables that were not indexed\n ds = Dataset({\"x\": [1, 2, 3, 4], \"y\": 0})\n actual = ds.isel(x=((\"points\",), [0, 1, 2]))\n assert_identical(ds[\"y\"], actual[\"y\"])\n\n # tests using index or DataArray as indexers\n stations = Dataset()\n stations[\"station\"] = ((\"station\",), [\"A\", \"B\", \"C\"])\n stations[\"dim1s\"] = ((\"station\",), [1, 2, 3])\n stations[\"dim2s\"] = ((\"station\",), [4, 5, 1])\n\n actual = data.isel(dim1=stations[\"dim1s\"], dim2=stations[\"dim2s\"])\n assert \"station\" in actual.coords\n assert \"station\" in actual.dims\n assert_identical(actual[\"station\"].drop_vars([\"dim2\"]), stations[\"station\"])\n\n with pytest.raises(ValueError, match=r\"conflicting values for \"):\n data.isel(\n dim1=DataArray(\n [0, 1, 2], dims=\"station\", coords={\"station\": [0, 1, 2]}\n ),\n dim2=DataArray(\n [0, 1, 2], dims=\"station\", coords={\"station\": [0, 1, 3]}\n ),\n )\n\n # multi-dimensional selection\n stations = Dataset()\n stations[\"a\"] = ((\"a\",), [\"A\", \"B\", \"C\"])\n stations[\"b\"] = ((\"b\",), [0, 1])\n stations[\"dim1s\"] = ((\"a\", \"b\"), [[1, 2], [2, 3], [3, 4]])\n stations[\"dim2s\"] = ((\"a\",), [4, 5, 1])\n actual = data.isel(dim1=stations[\"dim1s\"], dim2=stations[\"dim2s\"])\n assert \"a\" in actual.coords\n assert \"a\" in actual.dims\n assert \"b\" in actual.coords\n assert \"b\" in actual.dims\n assert \"dim2\" in actual.coords\n assert \"a\" in actual[\"dim2\"].dims\n\n assert_identical(actual[\"a\"].drop_vars([\"dim2\"]), stations[\"a\"])\n assert_identical(actual[\"b\"], stations[\"b\"])\n expected_var1 = data[\"var1\"].variable[\n stations[\"dim1s\"].variable, stations[\"dim2s\"].variable\n ]\n expected_var2 = data[\"var2\"].variable[\n stations[\"dim1s\"].variable, stations[\"dim2s\"].variable\n ]\n expected_var3 = data[\"var3\"].variable[slice(None), stations[\"dim1s\"].variable]\n assert_equal(actual[\"a\"].drop_vars(\"dim2\"), stations[\"a\"])\n assert_array_equal(actual[\"var1\"], expected_var1)\n assert_array_equal(actual[\"var2\"], expected_var2)\n assert_array_equal(actual[\"var3\"], expected_var3)\n\n def test_isel_dataarray(self):\n \"\"\"Test for indexing by DataArray\"\"\"\n data = create_test_data()\n # indexing with DataArray with same-name coordinates.\n indexing_da = DataArray(\n np.arange(1, 4), dims=[\"dim1\"], coords={\"dim1\": np.random.randn(3)}\n )\n actual = data.isel(dim1=indexing_da)\n assert_identical(indexing_da[\"dim1\"], actual[\"dim1\"])\n assert_identical(data[\"dim2\"], actual[\"dim2\"])\n\n # Conflict in the dimension coordinate\n indexing_da = DataArray(\n np.arange(1, 4), dims=[\"dim2\"], coords={\"dim2\": np.random.randn(3)}\n )\n with pytest.raises(IndexError, match=r\"dimension coordinate 'dim2'\"):\n actual = data.isel(dim2=indexing_da)\n # Also the case for DataArray\n with pytest.raises(IndexError, match=r\"dimension coordinate 'dim2'\"):\n actual = data[\"var2\"].isel(dim2=indexing_da)\n with pytest.raises(IndexError, match=r\"dimension coordinate 'dim2'\"):\n data[\"dim2\"].isel(dim2=indexing_da)\n\n # same name coordinate which does not conflict\n indexing_da = DataArray(\n np.arange(1, 4), dims=[\"dim2\"], coords={\"dim2\": data[\"dim2\"].values[1:4]}\n )\n actual = data.isel(dim2=indexing_da)\n assert_identical(actual[\"dim2\"], indexing_da[\"dim2\"])\n\n # Silently drop conflicted (non-dimensional) coordinate of indexer\n indexing_da = DataArray(\n np.arange(1, 4),\n dims=[\"dim2\"],\n coords={\n \"dim2\": data[\"dim2\"].values[1:4],\n \"numbers\": (\"dim2\", np.arange(2, 5)),\n },\n )\n actual = data.isel(dim2=indexing_da)\n assert_identical(actual[\"numbers\"], data[\"numbers\"])\n\n # boolean data array with coordinate with the same name\n indexing_da = DataArray(\n np.arange(1, 10), dims=[\"dim2\"], coords={\"dim2\": data[\"dim2\"].values}\n )\n indexing_da = indexing_da < 3\n actual = data.isel(dim2=indexing_da)\n assert_identical(actual[\"dim2\"], data[\"dim2\"][:2])\n\n # boolean data array with non-dimensioncoordinate\n indexing_da = DataArray(\n np.arange(1, 10),\n dims=[\"dim2\"],\n coords={\n \"dim2\": data[\"dim2\"].values,\n \"non_dim\": ((\"dim2\",), np.random.randn(9)),\n \"non_dim2\": 0,\n },\n )\n indexing_da = indexing_da < 3\n actual = data.isel(dim2=indexing_da)\n assert_identical(\n actual[\"dim2\"].drop_vars(\"non_dim\").drop_vars(\"non_dim2\"), data[\"dim2\"][:2]\n )\n assert_identical(actual[\"non_dim\"], indexing_da[\"non_dim\"][:2])\n assert_identical(actual[\"non_dim2\"], indexing_da[\"non_dim2\"])\n\n # non-dimension coordinate will be also attached\n indexing_da = DataArray(\n np.arange(1, 4),\n dims=[\"dim2\"],\n coords={\"non_dim\": ((\"dim2\",), np.random.randn(3))},\n )\n actual = data.isel(dim2=indexing_da)\n assert \"non_dim\" in actual\n assert \"non_dim\" in actual.coords\n\n # Index by a scalar DataArray\n indexing_da = DataArray(3, dims=[], coords={\"station\": 2})\n actual = data.isel(dim2=indexing_da)\n assert \"station\" in actual\n actual = data.isel(dim2=indexing_da[\"station\"])\n assert \"station\" in actual\n\n # indexer generated from coordinates\n indexing_ds = Dataset({}, coords={\"dim2\": [0, 1, 2]})\n with pytest.raises(IndexError, match=r\"dimension coordinate 'dim2'\"):\n actual = data.isel(dim2=indexing_ds[\"dim2\"])\n\n def test_sel(self):\n data = create_test_data()\n int_slicers = {\"dim1\": slice(None, None, 2), \"dim2\": slice(2), \"dim3\": slice(3)}\n loc_slicers = {\n \"dim1\": slice(None, None, 2),\n \"dim2\": slice(0, 0.5),\n \"dim3\": slice(\"a\", \"c\"),\n }\n assert_equal(data.isel(**int_slicers), data.sel(**loc_slicers))\n data[\"time\"] = (\"time\", pd.date_range(\"2000-01-01\", periods=20))\n assert_equal(data.isel(time=0), data.sel(time=\"2000-01-01\"))\n assert_equal(\n data.isel(time=slice(10)), data.sel(time=slice(\"2000-01-01\", \"2000-01-10\"))\n )\n assert_equal(data, data.sel(time=slice(\"1999\", \"2005\")))\n times = pd.date_range(\"2000-01-01\", periods=3)\n assert_equal(data.isel(time=slice(3)), data.sel(time=times))\n assert_equal(\n data.isel(time=slice(3)), data.sel(time=(data[\"time.dayofyear\"] <= 3))\n )\n\n td = pd.to_timedelta(np.arange(3), unit=\"days\")\n data = Dataset({\"x\": (\"td\", np.arange(3)), \"td\": td})\n assert_equal(data, data.sel(td=td))\n assert_equal(data, data.sel(td=slice(\"3 days\")))\n assert_equal(data.isel(td=0), data.sel(td=pd.Timedelta(\"0 days\")))\n assert_equal(data.isel(td=0), data.sel(td=pd.Timedelta(\"0h\")))\n assert_equal(data.isel(td=slice(1, 3)), data.sel(td=slice(\"1 days\", \"2 days\")))\n\n def test_sel_dataarray(self):\n data = create_test_data()\n\n ind = DataArray([0.0, 0.5, 1.0], dims=[\"dim2\"])\n actual = data.sel(dim2=ind)\n assert_equal(actual, data.isel(dim2=[0, 1, 2]))\n\n # with different dimension\n ind = DataArray([0.0, 0.5, 1.0], dims=[\"new_dim\"])\n actual = data.sel(dim2=ind)\n expected = data.isel(dim2=Variable(\"new_dim\", [0, 1, 2]))\n assert \"new_dim\" in actual.dims\n assert_equal(actual, expected)\n\n # Multi-dimensional\n ind = DataArray([[0.0], [0.5], [1.0]], dims=[\"new_dim\", \"new_dim2\"])\n actual = data.sel(dim2=ind)\n expected = data.isel(dim2=Variable((\"new_dim\", \"new_dim2\"), [[0], [1], [2]]))\n assert \"new_dim\" in actual.dims\n assert \"new_dim2\" in actual.dims\n assert_equal(actual, expected)\n\n # with coordinate\n ind = DataArray(\n [0.0, 0.5, 1.0], dims=[\"new_dim\"], coords={\"new_dim\": [\"a\", \"b\", \"c\"]}\n )\n actual = data.sel(dim2=ind)\n expected = data.isel(dim2=[0, 1, 2]).rename({\"dim2\": \"new_dim\"})\n assert \"new_dim\" in actual.dims\n assert \"new_dim\" in actual.coords\n assert_equal(\n actual.drop_vars(\"new_dim\").drop_vars(\"dim2\"), expected.drop_vars(\"new_dim\")\n )\n assert_equal(actual[\"new_dim\"].drop_vars(\"dim2\"), ind[\"new_dim\"])\n\n # with conflicted coordinate (silently ignored)\n ind = DataArray(\n [0.0, 0.5, 1.0], dims=[\"dim2\"], coords={\"dim2\": [\"a\", \"b\", \"c\"]}\n )\n actual = data.sel(dim2=ind)\n expected = data.isel(dim2=[0, 1, 2])\n assert_equal(actual, expected)\n\n # with conflicted coordinate (silently ignored)\n ind = DataArray(\n [0.0, 0.5, 1.0],\n dims=[\"new_dim\"],\n coords={\"new_dim\": [\"a\", \"b\", \"c\"], \"dim2\": 3},\n )\n actual = data.sel(dim2=ind)\n assert_equal(\n actual[\"new_dim\"].drop_vars(\"dim2\"), ind[\"new_dim\"].drop_vars(\"dim2\")\n )\n expected = data.isel(dim2=[0, 1, 2])\n expected[\"dim2\"] = ((\"new_dim\"), expected[\"dim2\"].values)\n assert_equal(actual[\"dim2\"].drop_vars(\"new_dim\"), expected[\"dim2\"])\n assert actual[\"var1\"].dims == (\"dim1\", \"new_dim\")\n\n # with non-dimensional coordinate\n ind = DataArray(\n [0.0, 0.5, 1.0],\n dims=[\"dim2\"],\n coords={\n \"dim2\": [\"a\", \"b\", \"c\"],\n \"numbers\": (\"dim2\", [0, 1, 2]),\n \"new_dim\": (\"dim2\", [1.1, 1.2, 1.3]),\n },\n )\n actual = data.sel(dim2=ind)\n expected = data.isel(dim2=[0, 1, 2])\n assert_equal(actual.drop_vars(\"new_dim\"), expected)\n assert np.allclose(actual[\"new_dim\"].values, ind[\"new_dim\"].values)\n\n def test_sel_dataarray_mindex(self):\n midx = pd.MultiIndex.from_product([list(\"abc\"), [0, 1]], names=(\"one\", \"two\"))\n mds = xr.Dataset(\n {\"var\": ((\"x\", \"y\"), np.random.rand(6, 3))},\n coords={\"x\": midx, \"y\": range(3)},\n )\n\n actual_isel = mds.isel(x=xr.DataArray(np.arange(3), dims=\"x\"))\n actual_sel = mds.sel(x=DataArray(midx[:3], dims=\"x\"))\n assert actual_isel[\"x\"].dims == (\"x\",)\n assert actual_sel[\"x\"].dims == (\"x\",)\n assert_identical(actual_isel, actual_sel)\n\n actual_isel = mds.isel(x=xr.DataArray(np.arange(3), dims=\"z\"))\n actual_sel = mds.sel(x=Variable(\"z\", midx[:3]))\n assert actual_isel[\"x\"].dims == (\"z\",)\n assert actual_sel[\"x\"].dims == (\"z\",)\n assert_identical(actual_isel, actual_sel)\n\n # with coordinate\n actual_isel = mds.isel(\n x=xr.DataArray(np.arange(3), dims=\"z\", coords={\"z\": [0, 1, 2]})\n )\n actual_sel = mds.sel(\n x=xr.DataArray(midx[:3], dims=\"z\", coords={\"z\": [0, 1, 2]})\n )\n assert actual_isel[\"x\"].dims == (\"z\",)\n assert actual_sel[\"x\"].dims == (\"z\",)\n assert_identical(actual_isel, actual_sel)\n\n # Vectorized indexing with level-variables raises an error\n with pytest.raises(ValueError, match=r\"Vectorized selection is \"):\n mds.sel(one=[\"a\", \"b\"])\n\n with pytest.raises(\n ValueError,\n match=r\"Vectorized selection is not available along MultiIndex variable: x\",\n ):\n mds.sel(\n x=xr.DataArray(\n [np.array(midx[:2]), np.array(midx[-2:])], dims=[\"a\", \"b\"]\n )\n )\n\n def test_sel_categorical(self):\n ind = pd.Series([\"foo\", \"bar\"], dtype=\"category\")\n df = pd.DataFrame({\"ind\": ind, \"values\": [1, 2]})\n ds = df.set_index(\"ind\").to_xarray()\n actual = ds.sel(ind=\"bar\")\n expected = ds.isel(ind=1)\n assert_identical(expected, actual)\n\n def test_sel_categorical_error(self):\n ind = pd.Series([\"foo\", \"bar\"], dtype=\"category\")\n df = pd.DataFrame({\"ind\": ind, \"values\": [1, 2]})\n ds = df.set_index(\"ind\").to_xarray()\n with pytest.raises(ValueError):\n ds.sel(ind=\"bar\", method=\"nearest\")\n with pytest.raises(ValueError):\n ds.sel(ind=\"bar\", tolerance=\"nearest\")\n\n def test_categorical_index(self):\n cat = pd.CategoricalIndex(\n [\"foo\", \"bar\", \"foo\"],\n categories=[\"foo\", \"bar\", \"baz\", \"qux\", \"quux\", \"corge\"],\n )\n ds = xr.Dataset(\n {\"var\": (\"cat\", np.arange(3))},\n coords={\"cat\": (\"cat\", cat), \"c\": (\"cat\", [0, 1, 1])},\n )\n # test slice\n actual = ds.sel(cat=\"foo\")\n expected = ds.isel(cat=[0, 2])\n assert_identical(expected, actual)\n # make sure the conversion to the array works\n actual = ds.sel(cat=\"foo\")[\"cat\"].values\n assert (actual == np.array([\"foo\", \"foo\"])).all()\n\n ds = ds.set_index(index=[\"cat\", \"c\"])\n actual = ds.unstack(\"index\")\n assert actual[\"var\"].shape == (2, 2)\n\n def test_categorical_reindex(self):\n cat = pd.CategoricalIndex(\n [\"foo\", \"bar\", \"baz\"],\n categories=[\"foo\", \"bar\", \"baz\", \"qux\", \"quux\", \"corge\"],\n )\n ds = xr.Dataset(\n {\"var\": (\"cat\", np.arange(3))},\n coords={\"cat\": (\"cat\", cat), \"c\": (\"cat\", [0, 1, 2])},\n )\n actual = ds.reindex(cat=[\"foo\"])[\"cat\"].values\n assert (actual == np.array([\"foo\"])).all()\n\n def test_categorical_multiindex(self):\n i1 = pd.Series([0, 0])\n cat = pd.CategoricalDtype(categories=[\"foo\", \"baz\", \"bar\"])\n i2 = pd.Series([\"baz\", \"bar\"], dtype=cat)\n\n df = pd.DataFrame({\"i1\": i1, \"i2\": i2, \"values\": [1, 2]}).set_index(\n [\"i1\", \"i2\"]\n )\n actual = df.to_xarray()\n assert actual[\"values\"].shape == (1, 2)\n\n def test_sel_drop(self):\n data = Dataset({\"foo\": (\"x\", [1, 2, 3])}, {\"x\": [0, 1, 2]})\n expected = Dataset({\"foo\": 1})\n selected = data.sel(x=0, drop=True)\n assert_identical(expected, selected)\n\n expected = Dataset({\"foo\": 1}, {\"x\": 0})\n selected = data.sel(x=0, drop=False)\n assert_identical(expected, selected)\n\n data = Dataset({\"foo\": (\"x\", [1, 2, 3])})\n expected = Dataset({\"foo\": 1})\n selected = data.sel(x=0, drop=True)\n assert_identical(expected, selected)\n\n def test_isel_drop(self):\n data = Dataset({\"foo\": (\"x\", [1, 2, 3])}, {\"x\": [0, 1, 2]})\n expected = Dataset({\"foo\": 1})\n selected = data.isel(x=0, drop=True)\n assert_identical(expected, selected)\n\n expected = Dataset({\"foo\": 1}, {\"x\": 0})\n selected = data.isel(x=0, drop=False)\n assert_identical(expected, selected)\n\n def test_head(self):\n data = create_test_data()\n\n expected = data.isel(time=slice(5), dim2=slice(6))\n actual = data.head(time=5, dim2=6)\n assert_equal(expected, actual)\n\n expected = data.isel(time=slice(0))\n actual = data.head(time=0)\n assert_equal(expected, actual)\n\n expected = data.isel({dim: slice(6) for dim in data.dims})\n actual = data.head(6)\n assert_equal(expected, actual)\n\n expected = data.isel({dim: slice(5) for dim in data.dims})\n actual = data.head()\n assert_equal(expected, actual)\n\n with pytest.raises(TypeError, match=r\"either dict-like or a single int\"):\n data.head([3])\n with pytest.raises(TypeError, match=r\"expected integer type\"):\n data.head(dim2=3.1)\n with pytest.raises(ValueError, match=r\"expected positive int\"):\n data.head(time=-3)\n\n def test_tail(self):\n data = create_test_data()\n\n expected = data.isel(time=slice(-5, None), dim2=slice(-6, None))\n actual = data.tail(time=5, dim2=6)\n assert_equal(expected, actual)\n\n expected = data.isel(dim1=slice(0))\n actual = data.tail(dim1=0)\n assert_equal(expected, actual)\n\n expected = data.isel({dim: slice(-6, None) for dim in data.dims})\n actual = data.tail(6)\n assert_equal(expected, actual)\n\n expected = data.isel({dim: slice(-5, None) for dim in data.dims})\n actual = data.tail()\n assert_equal(expected, actual)\n\n with pytest.raises(TypeError, match=r\"either dict-like or a single int\"):\n data.tail([3])\n with pytest.raises(TypeError, match=r\"expected integer type\"):\n data.tail(dim2=3.1)\n with pytest.raises(ValueError, match=r\"expected positive int\"):\n data.tail(time=-3)\n\n def test_thin(self):\n data = create_test_data()\n\n expected = data.isel(time=slice(None, None, 5), dim2=slice(None, None, 6))\n actual = data.thin(time=5, dim2=6)\n assert_equal(expected, actual)\n\n expected = data.isel({dim: slice(None, None, 6) for dim in data.dims})\n actual = data.thin(6)\n assert_equal(expected, actual)\n\n with pytest.raises(TypeError, match=r\"either dict-like or a single int\"):\n data.thin([3])\n with pytest.raises(TypeError, match=r\"expected integer type\"):\n data.thin(dim2=3.1)\n with pytest.raises(ValueError, match=r\"cannot be zero\"):\n data.thin(time=0)\n with pytest.raises(ValueError, match=r\"expected positive int\"):\n data.thin(time=-3)\n\n @pytest.mark.filterwarnings(\"ignore::DeprecationWarning\")\n def test_sel_fancy(self):\n data = create_test_data()\n\n # add in a range() index\n data[\"dim1\"] = data.dim1\n\n pdim1 = [1, 2, 3]\n pdim2 = [4, 5, 1]\n pdim3 = [1, 2, 3]\n expected = data.isel(\n dim1=Variable((\"test_coord\",), pdim1),\n dim2=Variable((\"test_coord\",), pdim2),\n dim3=Variable((\"test_coord\"), pdim3),\n )\n actual = data.sel(\n dim1=Variable((\"test_coord\",), data.dim1[pdim1]),\n dim2=Variable((\"test_coord\",), data.dim2[pdim2]),\n dim3=Variable((\"test_coord\",), data.dim3[pdim3]),\n )\n assert_identical(expected, actual)\n\n # DataArray Indexer\n idx_t = DataArray(\n data[\"time\"][[3, 2, 1]].values, dims=[\"a\"], coords={\"a\": [\"a\", \"b\", \"c\"]}\n )\n idx_2 = DataArray(\n data[\"dim2\"][[3, 2, 1]].values, dims=[\"a\"], coords={\"a\": [\"a\", \"b\", \"c\"]}\n )\n idx_3 = DataArray(\n data[\"dim3\"][[3, 2, 1]].values, dims=[\"a\"], coords={\"a\": [\"a\", \"b\", \"c\"]}\n )\n actual = data.sel(time=idx_t, dim2=idx_2, dim3=idx_3)\n expected = data.isel(\n time=Variable((\"a\",), [3, 2, 1]),\n dim2=Variable((\"a\",), [3, 2, 1]),\n dim3=Variable((\"a\",), [3, 2, 1]),\n )\n expected = expected.assign_coords(a=idx_t[\"a\"])\n assert_identical(expected, actual)\n\n idx_t = DataArray(\n data[\"time\"][[3, 2, 1]].values, dims=[\"a\"], coords={\"a\": [\"a\", \"b\", \"c\"]}\n )\n idx_2 = DataArray(\n data[\"dim2\"][[2, 1, 3]].values, dims=[\"b\"], coords={\"b\": [0, 1, 2]}\n )\n idx_3 = DataArray(\n data[\"dim3\"][[1, 2, 1]].values, dims=[\"c\"], coords={\"c\": [0.0, 1.1, 2.2]}\n )\n actual = data.sel(time=idx_t, dim2=idx_2, dim3=idx_3)\n expected = data.isel(\n time=Variable((\"a\",), [3, 2, 1]),\n dim2=Variable((\"b\",), [2, 1, 3]),\n dim3=Variable((\"c\",), [1, 2, 1]),\n )\n expected = expected.assign_coords(a=idx_t[\"a\"], b=idx_2[\"b\"], c=idx_3[\"c\"])\n assert_identical(expected, actual)\n\n # test from sel_points\n data = Dataset({\"foo\": ((\"x\", \"y\"), np.arange(9).reshape(3, 3))})\n data.coords.update({\"x\": [0, 1, 2], \"y\": [0, 1, 2]})\n\n expected = Dataset(\n {\"foo\": (\"points\", [0, 4, 8])},\n coords={\n \"x\": Variable((\"points\",), [0, 1, 2]),\n \"y\": Variable((\"points\",), [0, 1, 2]),\n },\n )\n actual = data.sel(\n x=Variable((\"points\",), [0, 1, 2]), y=Variable((\"points\",), [0, 1, 2])\n )\n assert_identical(expected, actual)\n\n expected.coords.update({\"x\": (\"points\", [0, 1, 2]), \"y\": (\"points\", [0, 1, 2])})\n actual = data.sel(\n x=Variable((\"points\",), [0.1, 1.1, 2.5]),\n y=Variable((\"points\",), [0, 1.2, 2.0]),\n method=\"pad\",\n )\n assert_identical(expected, actual)\n\n idx_x = DataArray([0, 1, 2], dims=[\"a\"], coords={\"a\": [\"a\", \"b\", \"c\"]})\n idx_y = DataArray([0, 2, 1], dims=[\"b\"], coords={\"b\": [0, 3, 6]})\n expected_ary = data[\"foo\"][[0, 1, 2], [0, 2, 1]]\n actual = data.sel(x=idx_x, y=idx_y)\n assert_array_equal(expected_ary, actual[\"foo\"])\n assert_identical(actual[\"a\"].drop_vars(\"x\"), idx_x[\"a\"])\n assert_identical(actual[\"b\"].drop_vars(\"y\"), idx_y[\"b\"])\n\n with pytest.raises(KeyError):\n data.sel(x=[2.5], y=[2.0], method=\"pad\", tolerance=1e-3)\n\n def test_sel_method(self):\n data = create_test_data()\n\n expected = data.sel(dim2=1)\n actual = data.sel(dim2=0.95, method=\"nearest\")\n assert_identical(expected, actual)\n\n actual = data.sel(dim2=0.95, method=\"nearest\", tolerance=1)\n assert_identical(expected, actual)\n\n with pytest.raises(KeyError):\n actual = data.sel(dim2=np.pi, method=\"nearest\", tolerance=0)\n\n expected = data.sel(dim2=[1.5])\n actual = data.sel(dim2=[1.45], method=\"backfill\")\n assert_identical(expected, actual)\n\n with pytest.raises(NotImplementedError, match=r\"slice objects\"):\n data.sel(dim2=slice(1, 3), method=\"ffill\")\n\n with pytest.raises(TypeError, match=r\"``method``\"):\n # this should not pass silently\n data.sel(method=data)\n\n # cannot pass method if there is no associated coordinate\n with pytest.raises(ValueError, match=r\"cannot supply\"):\n data.sel(dim1=0, method=\"nearest\")\n\n def test_loc(self):\n data = create_test_data()\n expected = data.sel(dim3=\"a\")\n actual = data.loc[dict(dim3=\"a\")]\n assert_identical(expected, actual)\n with pytest.raises(TypeError, match=r\"can only lookup dict\"):\n data.loc[\"a\"]\n\n def test_selection_multiindex(self):\n mindex = pd.MultiIndex.from_product(\n [[\"a\", \"b\"], [1, 2], [-1, -2]], names=(\"one\", \"two\", \"three\")\n )\n mdata = Dataset(data_vars={\"var\": (\"x\", range(8))}, coords={\"x\": mindex})\n\n def test_sel(lab_indexer, pos_indexer, replaced_idx=False, renamed_dim=None):\n ds = mdata.sel(x=lab_indexer)\n expected_ds = mdata.isel(x=pos_indexer)\n if not replaced_idx:\n assert_identical(ds, expected_ds)\n else:\n if renamed_dim:\n assert ds[\"var\"].dims[0] == renamed_dim\n ds = ds.rename({renamed_dim: \"x\"})\n assert_identical(ds[\"var\"].variable, expected_ds[\"var\"].variable)\n assert not ds[\"x\"].equals(expected_ds[\"x\"])\n\n test_sel((\"a\", 1, -1), 0)\n test_sel((\"b\", 2, -2), -1)\n test_sel((\"a\", 1), [0, 1], replaced_idx=True, renamed_dim=\"three\")\n test_sel((\"a\",), range(4), replaced_idx=True)\n test_sel(\"a\", range(4), replaced_idx=True)\n test_sel([(\"a\", 1, -1), (\"b\", 2, -2)], [0, 7])\n test_sel(slice(\"a\", \"b\"), range(8))\n test_sel(slice((\"a\", 1), (\"b\", 1)), range(6))\n test_sel({\"one\": \"a\", \"two\": 1, \"three\": -1}, 0)\n test_sel({\"one\": \"a\", \"two\": 1}, [0, 1], replaced_idx=True, renamed_dim=\"three\")\n test_sel({\"one\": \"a\"}, range(4), replaced_idx=True)\n\n assert_identical(mdata.loc[{\"x\": {\"one\": \"a\"}}], mdata.sel(x={\"one\": \"a\"}))\n assert_identical(mdata.loc[{\"x\": \"a\"}], mdata.sel(x=\"a\"))\n assert_identical(mdata.loc[{\"x\": (\"a\", 1)}], mdata.sel(x=(\"a\", 1)))\n assert_identical(mdata.loc[{\"x\": (\"a\", 1, -1)}], mdata.sel(x=(\"a\", 1, -1)))\n\n assert_identical(mdata.sel(x={\"one\": \"a\", \"two\": 1}), mdata.sel(one=\"a\", two=1))\n\n def test_broadcast_like(self):\n original1 = DataArray(\n np.random.randn(5), [(\"x\", range(5))], name=\"a\"\n ).to_dataset()\n\n original2 = DataArray(np.random.randn(6), [(\"y\", range(6))], name=\"b\")\n\n expected1, expected2 = broadcast(original1, original2)\n\n assert_identical(\n original1.broadcast_like(original2), expected1.transpose(\"y\", \"x\")\n )\n\n assert_identical(original2.broadcast_like(original1), expected2)\n\n def test_to_pandas(self):\n # 0D -> series\n actual = Dataset({\"a\": 1, \"b\": 2}).to_pandas()\n expected = pd.Series([1, 2], [\"a\", \"b\"])\n assert_array_equal(actual, expected)\n\n # 1D -> dataframe\n x = np.random.randn(10)\n y = np.random.randn(10)\n t = list(\"abcdefghij\")\n ds = Dataset({\"a\": (\"t\", x), \"b\": (\"t\", y), \"t\": (\"t\", t)})\n actual = ds.to_pandas()\n expected = ds.to_dataframe()\n assert expected.equals(actual), (expected, actual)\n\n # 2D -> error\n x2d = np.random.randn(10, 10)\n y2d = np.random.randn(10, 10)\n with pytest.raises(ValueError, match=r\"cannot convert Datasets\"):\n Dataset({\"a\": ([\"t\", \"r\"], x2d), \"b\": ([\"t\", \"r\"], y2d)}).to_pandas()\n\n def test_reindex_like(self):\n data = create_test_data()\n data[\"letters\"] = (\"dim3\", 10 * [\"a\"])\n\n expected = data.isel(dim1=slice(10), time=slice(13))\n actual = data.reindex_like(expected)\n assert_identical(actual, expected)\n\n expected = data.copy(deep=True)\n expected[\"dim3\"] = (\"dim3\", list(\"cdefghijkl\"))\n expected[\"var3\"][:-2] = expected[\"var3\"][2:].values\n expected[\"var3\"][-2:] = np.nan\n expected[\"letters\"] = expected[\"letters\"].astype(object)\n expected[\"letters\"][-2:] = np.nan\n expected[\"numbers\"] = expected[\"numbers\"].astype(float)\n expected[\"numbers\"][:-2] = expected[\"numbers\"][2:].values\n expected[\"numbers\"][-2:] = np.nan\n actual = data.reindex_like(expected)\n assert_identical(actual, expected)\n\n def test_reindex(self):\n data = create_test_data()\n assert_identical(data, data.reindex())\n\n expected = data.assign_coords(dim1=data[\"dim1\"])\n actual = data.reindex(dim1=data[\"dim1\"])\n assert_identical(actual, expected)\n\n actual = data.reindex(dim1=data[\"dim1\"].values)\n assert_identical(actual, expected)\n\n actual = data.reindex(dim1=data[\"dim1\"].to_index())\n assert_identical(actual, expected)\n\n with pytest.raises(\n ValueError, match=r\"cannot reindex or align along dimension\"\n ):\n data.reindex(dim1=data[\"dim1\"][:5])\n\n expected = data.isel(dim2=slice(5))\n actual = data.reindex(dim2=data[\"dim2\"][:5])\n assert_identical(actual, expected)\n\n # test dict-like argument\n actual = data.reindex({\"dim2\": data[\"dim2\"]})\n expected = data\n assert_identical(actual, expected)\n with pytest.raises(ValueError, match=r\"cannot specify both\"):\n data.reindex({\"x\": 0}, x=0)\n with pytest.raises(ValueError, match=r\"dictionary\"):\n data.reindex(\"foo\")\n\n # invalid dimension\n with pytest.raises(ValueError, match=r\"invalid reindex dim\"):\n data.reindex(invalid=0)\n\n # out of order\n expected = data.sel(dim2=data[\"dim2\"][:5:-1])\n actual = data.reindex(dim2=data[\"dim2\"][:5:-1])\n assert_identical(actual, expected)\n\n # multiple fill values\n expected = data.reindex(dim2=[0.1, 2.1, 3.1, 4.1]).assign(\n var1=lambda ds: ds.var1.copy(data=[[-10, -10, -10, -10]] * len(ds.dim1)),\n var2=lambda ds: ds.var2.copy(data=[[-20, -20, -20, -20]] * len(ds.dim1)),\n )\n actual = data.reindex(\n dim2=[0.1, 2.1, 3.1, 4.1], fill_value={\"var1\": -10, \"var2\": -20}\n )\n assert_identical(actual, expected)\n # use the default value\n expected = data.reindex(dim2=[0.1, 2.1, 3.1, 4.1]).assign(\n var1=lambda ds: ds.var1.copy(data=[[-10, -10, -10, -10]] * len(ds.dim1)),\n var2=lambda ds: ds.var2.copy(\n data=[[np.nan, np.nan, np.nan, np.nan]] * len(ds.dim1)\n ),\n )\n actual = data.reindex(dim2=[0.1, 2.1, 3.1, 4.1], fill_value={\"var1\": -10})\n assert_identical(actual, expected)\n\n # regression test for #279\n expected = Dataset({\"x\": (\"time\", np.random.randn(5))}, {\"time\": range(5)})\n time2 = DataArray(np.arange(5), dims=\"time2\")\n with pytest.raises(ValueError):\n actual = expected.reindex(time=time2)\n\n # another regression test\n ds = Dataset(\n {\"foo\": ([\"x\", \"y\"], np.zeros((3, 4)))}, {\"x\": range(3), \"y\": range(4)}\n )\n expected = Dataset(\n {\"foo\": ([\"x\", \"y\"], np.zeros((3, 2)))}, {\"x\": [0, 1, 3], \"y\": [0, 1]}\n )\n expected[\"foo\"][-1] = np.nan\n actual = ds.reindex(x=[0, 1, 3], y=[0, 1])\n assert_identical(expected, actual)\n\n def test_reindex_warning(self):\n data = create_test_data()\n\n with pytest.raises(ValueError):\n # DataArray with different dimension raises Future warning\n ind = xr.DataArray([0.0, 1.0], dims=[\"new_dim\"], name=\"ind\")\n data.reindex(dim2=ind)\n\n # Should not warn\n ind = xr.DataArray([0.0, 1.0], dims=[\"dim2\"], name=\"ind\")\n with pytest.warns(None) as ws:\n data.reindex(dim2=ind)\n assert len(ws) == 0\n\n def test_reindex_variables_copied(self):\n data = create_test_data()\n reindexed_data = data.reindex(copy=False)\n for k in data.variables:\n assert reindexed_data.variables[k] is not data.variables[k]\n\n def test_reindex_method(self):\n ds = Dataset({\"x\": (\"y\", [10, 20]), \"y\": [0, 1]})\n y = [-0.5, 0.5, 1.5]\n actual = ds.reindex(y=y, method=\"backfill\")\n expected = Dataset({\"x\": (\"y\", [10, 20, np.nan]), \"y\": y})\n assert_identical(expected, actual)\n\n actual = ds.reindex(y=y, method=\"backfill\", tolerance=0.1)\n expected = Dataset({\"x\": (\"y\", 3 * [np.nan]), \"y\": y})\n assert_identical(expected, actual)\n\n actual = ds.reindex(y=y, method=\"pad\")\n expected = Dataset({\"x\": (\"y\", [np.nan, 10, 20]), \"y\": y})\n assert_identical(expected, actual)\n\n alt = Dataset({\"y\": y})\n actual = ds.reindex_like(alt, method=\"pad\")\n assert_identical(expected, actual)\n\n @pytest.mark.parametrize(\"fill_value\", [dtypes.NA, 2, 2.0, {\"x\": 2, \"z\": 1}])\n def test_reindex_fill_value(self, fill_value):\n ds = Dataset({\"x\": (\"y\", [10, 20]), \"z\": (\"y\", [-20, -10]), \"y\": [0, 1]})\n y = [0, 1, 2]\n actual = ds.reindex(y=y, fill_value=fill_value)\n if fill_value == dtypes.NA:\n # if we supply the default, we expect the missing value for a\n # float array\n fill_value_x = fill_value_z = np.nan\n elif isinstance(fill_value, dict):\n fill_value_x = fill_value[\"x\"]\n fill_value_z = fill_value[\"z\"]\n else:\n fill_value_x = fill_value_z = fill_value\n expected = Dataset(\n {\n \"x\": (\"y\", [10, 20, fill_value_x]),\n \"z\": (\"y\", [-20, -10, fill_value_z]),\n \"y\": y,\n }\n )\n assert_identical(expected, actual)\n\n @pytest.mark.parametrize(\"fill_value\", [dtypes.NA, 2, 2.0, {\"x\": 2, \"z\": 1}])\n def test_reindex_like_fill_value(self, fill_value):\n ds = Dataset({\"x\": (\"y\", [10, 20]), \"z\": (\"y\", [-20, -10]), \"y\": [0, 1]})\n y = [0, 1, 2]\n alt = Dataset({\"y\": y})\n actual = ds.reindex_like(alt, fill_value=fill_value)\n if fill_value == dtypes.NA:\n # if we supply the default, we expect the missing value for a\n # float array\n fill_value_x = fill_value_z = np.nan\n elif isinstance(fill_value, dict):\n fill_value_x = fill_value[\"x\"]\n fill_value_z = fill_value[\"z\"]\n else:\n fill_value_x = fill_value_z = fill_value\n expected = Dataset(\n {\n \"x\": (\"y\", [10, 20, fill_value_x]),\n \"z\": (\"y\", [-20, -10, fill_value_z]),\n \"y\": y,\n }\n )\n assert_identical(expected, actual)\n\n @pytest.mark.parametrize(\"dtype\", [str, bytes])\n def test_reindex_str_dtype(self, dtype):\n data = Dataset({\"data\": (\"x\", [1, 2]), \"x\": np.array([\"a\", \"b\"], dtype=dtype)})\n\n actual = data.reindex(x=data.x)\n expected = data\n\n assert_identical(expected, actual)\n assert actual.x.dtype == expected.x.dtype\n\n @pytest.mark.parametrize(\"fill_value\", [dtypes.NA, 2, 2.0, {\"foo\": 2, \"bar\": 1}])\n def test_align_fill_value(self, fill_value):\n x = Dataset({\"foo\": DataArray([1, 2], dims=[\"x\"], coords={\"x\": [1, 2]})})\n y = Dataset({\"bar\": DataArray([1, 2], dims=[\"x\"], coords={\"x\": [1, 3]})})\n x2, y2 = align(x, y, join=\"outer\", fill_value=fill_value)\n if fill_value == dtypes.NA:\n # if we supply the default, we expect the missing value for a\n # float array\n fill_value_foo = fill_value_bar = np.nan\n elif isinstance(fill_value, dict):\n fill_value_foo = fill_value[\"foo\"]\n fill_value_bar = fill_value[\"bar\"]\n else:\n fill_value_foo = fill_value_bar = fill_value\n\n expected_x2 = Dataset(\n {\n \"foo\": DataArray(\n [1, 2, fill_value_foo], dims=[\"x\"], coords={\"x\": [1, 2, 3]}\n )\n }\n )\n expected_y2 = Dataset(\n {\n \"bar\": DataArray(\n [1, fill_value_bar, 2], dims=[\"x\"], coords={\"x\": [1, 2, 3]}\n )\n }\n )\n assert_identical(expected_x2, x2)\n assert_identical(expected_y2, y2)\n\n def test_align(self):\n left = create_test_data()\n right = left.copy(deep=True)\n right[\"dim3\"] = (\"dim3\", list(\"cdefghijkl\"))\n right[\"var3\"][:-2] = right[\"var3\"][2:].values\n right[\"var3\"][-2:] = np.random.randn(*right[\"var3\"][-2:].shape)\n right[\"numbers\"][:-2] = right[\"numbers\"][2:].values\n right[\"numbers\"][-2:] = -10\n\n intersection = list(\"cdefghij\")\n union = list(\"abcdefghijkl\")\n\n left2, right2 = align(left, right, join=\"inner\")\n assert_array_equal(left2[\"dim3\"], intersection)\n assert_identical(left2, right2)\n\n left2, right2 = align(left, right, join=\"outer\")\n\n assert_array_equal(left2[\"dim3\"], union)\n assert_equal(left2[\"dim3\"].variable, right2[\"dim3\"].variable)\n\n assert_identical(left2.sel(dim3=intersection), right2.sel(dim3=intersection))\n assert np.isnan(left2[\"var3\"][-2:]).all()\n assert np.isnan(right2[\"var3\"][:2]).all()\n\n left2, right2 = align(left, right, join=\"left\")\n assert_equal(left2[\"dim3\"].variable, right2[\"dim3\"].variable)\n assert_equal(left2[\"dim3\"].variable, left[\"dim3\"].variable)\n\n assert_identical(left2.sel(dim3=intersection), right2.sel(dim3=intersection))\n assert np.isnan(right2[\"var3\"][:2]).all()\n\n left2, right2 = align(left, right, join=\"right\")\n assert_equal(left2[\"dim3\"].variable, right2[\"dim3\"].variable)\n assert_equal(left2[\"dim3\"].variable, right[\"dim3\"].variable)\n\n assert_identical(left2.sel(dim3=intersection), right2.sel(dim3=intersection))\n\n assert np.isnan(left2[\"var3\"][-2:]).all()\n\n with pytest.raises(ValueError, match=r\"invalid value for join\"):\n align(left, right, join=\"foobar\")\n with pytest.raises(TypeError):\n align(left, right, foo=\"bar\")\n\n def test_align_exact(self):\n left = xr.Dataset(coords={\"x\": [0, 1]})\n right = xr.Dataset(coords={\"x\": [1, 2]})\n\n left1, left2 = xr.align(left, left, join=\"exact\")\n assert_identical(left1, left)\n assert_identical(left2, left)\n\n with pytest.raises(ValueError, match=r\"indexes .* not equal\"):\n xr.align(left, right, join=\"exact\")\n\n def test_align_override(self):\n left = xr.Dataset(coords={\"x\": [0, 1, 2]})\n right = xr.Dataset(coords={\"x\": [0.1, 1.1, 2.1], \"y\": [1, 2, 3]})\n expected_right = xr.Dataset(coords={\"x\": [0, 1, 2], \"y\": [1, 2, 3]})\n\n new_left, new_right = xr.align(left, right, join=\"override\")\n assert_identical(left, new_left)\n assert_identical(new_right, expected_right)\n\n new_left, new_right = xr.align(left, right, exclude=\"x\", join=\"override\")\n assert_identical(left, new_left)\n assert_identical(right, new_right)\n\n new_left, new_right = xr.align(\n left.isel(x=0, drop=True), right, exclude=\"x\", join=\"override\"\n )\n assert_identical(left.isel(x=0, drop=True), new_left)\n assert_identical(right, new_right)\n\n with pytest.raises(ValueError, match=r\"Indexes along dimension 'x' don't have\"):\n xr.align(left.isel(x=0).expand_dims(\"x\"), right, join=\"override\")\n\n def test_align_exclude(self):\n x = Dataset(\n {\n \"foo\": DataArray(\n [[1, 2], [3, 4]], dims=[\"x\", \"y\"], coords={\"x\": [1, 2], \"y\": [3, 4]}\n )\n }\n )\n y = Dataset(\n {\n \"bar\": DataArray(\n [[1, 2], [3, 4]], dims=[\"x\", \"y\"], coords={\"x\": [1, 3], \"y\": [5, 6]}\n )\n }\n )\n x2, y2 = align(x, y, exclude=[\"y\"], join=\"outer\")\n\n expected_x2 = Dataset(\n {\n \"foo\": DataArray(\n [[1, 2], [3, 4], [np.nan, np.nan]],\n dims=[\"x\", \"y\"],\n coords={\"x\": [1, 2, 3], \"y\": [3, 4]},\n )\n }\n )\n expected_y2 = Dataset(\n {\n \"bar\": DataArray(\n [[1, 2], [np.nan, np.nan], [3, 4]],\n dims=[\"x\", \"y\"],\n coords={\"x\": [1, 2, 3], \"y\": [5, 6]},\n )\n }\n )\n assert_identical(expected_x2, x2)\n assert_identical(expected_y2, y2)\n\n def test_align_nocopy(self):\n x = Dataset({\"foo\": DataArray([1, 2, 3], coords=[(\"x\", [1, 2, 3])])})\n y = Dataset({\"foo\": DataArray([1, 2], coords=[(\"x\", [1, 2])])})\n expected_x2 = x\n expected_y2 = Dataset(\n {\"foo\": DataArray([1, 2, np.nan], coords=[(\"x\", [1, 2, 3])])}\n )\n\n x2, y2 = align(x, y, copy=False, join=\"outer\")\n assert_identical(expected_x2, x2)\n assert_identical(expected_y2, y2)\n assert source_ndarray(x[\"foo\"].data) is source_ndarray(x2[\"foo\"].data)\n\n x2, y2 = align(x, y, copy=True, join=\"outer\")\n assert source_ndarray(x[\"foo\"].data) is not source_ndarray(x2[\"foo\"].data)\n assert_identical(expected_x2, x2)\n assert_identical(expected_y2, y2)\n\n def test_align_indexes(self):\n x = Dataset({\"foo\": DataArray([1, 2, 3], dims=\"x\", coords=[(\"x\", [1, 2, 3])])})\n (x2,) = align(x, indexes={\"x\": [2, 3, 1]})\n expected_x2 = Dataset(\n {\"foo\": DataArray([2, 3, 1], dims=\"x\", coords={\"x\": [2, 3, 1]})}\n )\n\n assert_identical(expected_x2, x2)\n\n def test_align_non_unique(self):\n x = Dataset({\"foo\": (\"x\", [3, 4, 5]), \"x\": [0, 0, 1]})\n x1, x2 = align(x, x)\n assert_identical(x1, x)\n assert_identical(x2, x)\n\n y = Dataset({\"bar\": (\"x\", [6, 7]), \"x\": [0, 1]})\n with pytest.raises(ValueError, match=r\"cannot reindex or align\"):\n align(x, y)\n\n def test_align_str_dtype(self):\n\n a = Dataset({\"foo\": (\"x\", [0, 1]), \"x\": [\"a\", \"b\"]})\n b = Dataset({\"foo\": (\"x\", [1, 2]), \"x\": [\"b\", \"c\"]})\n\n expected_a = Dataset({\"foo\": (\"x\", [0, 1, np.NaN]), \"x\": [\"a\", \"b\", \"c\"]})\n expected_b = Dataset({\"foo\": (\"x\", [np.NaN, 1, 2]), \"x\": [\"a\", \"b\", \"c\"]})\n\n actual_a, actual_b = xr.align(a, b, join=\"outer\")\n\n assert_identical(expected_a, actual_a)\n assert expected_a.x.dtype == actual_a.x.dtype\n\n assert_identical(expected_b, actual_b)\n assert expected_b.x.dtype == actual_b.x.dtype\n\n def test_broadcast(self):\n ds = Dataset(\n {\"foo\": 0, \"bar\": (\"x\", [1]), \"baz\": (\"y\", [2, 3])}, {\"c\": (\"x\", [4])}\n )\n expected = Dataset(\n {\n \"foo\": ((\"x\", \"y\"), [[0, 0]]),\n \"bar\": ((\"x\", \"y\"), [[1, 1]]),\n \"baz\": ((\"x\", \"y\"), [[2, 3]]),\n },\n {\"c\": (\"x\", [4])},\n )\n (actual,) = broadcast(ds)\n assert_identical(expected, actual)\n\n ds_x = Dataset({\"foo\": (\"x\", [1])})\n ds_y = Dataset({\"bar\": (\"y\", [2, 3])})\n expected_x = Dataset({\"foo\": ((\"x\", \"y\"), [[1, 1]])})\n expected_y = Dataset({\"bar\": ((\"x\", \"y\"), [[2, 3]])})\n actual_x, actual_y = broadcast(ds_x, ds_y)\n assert_identical(expected_x, actual_x)\n assert_identical(expected_y, actual_y)\n\n array_y = ds_y[\"bar\"]\n expected_y = expected_y[\"bar\"]\n actual_x, actual_y = broadcast(ds_x, array_y)\n assert_identical(expected_x, actual_x)\n assert_identical(expected_y, actual_y)\n\n def test_broadcast_nocopy(self):\n # Test that data is not copied if not needed\n x = Dataset({\"foo\": ((\"x\", \"y\"), [[1, 1]])})\n y = Dataset({\"bar\": (\"y\", [2, 3])})\n\n (actual_x,) = broadcast(x)\n assert_identical(x, actual_x)\n assert source_ndarray(actual_x[\"foo\"].data) is source_ndarray(x[\"foo\"].data)\n\n actual_x, actual_y = broadcast(x, y)\n assert_identical(x, actual_x)\n assert source_ndarray(actual_x[\"foo\"].data) is source_ndarray(x[\"foo\"].data)\n\n def test_broadcast_exclude(self):\n x = Dataset(\n {\n \"foo\": DataArray(\n [[1, 2], [3, 4]], dims=[\"x\", \"y\"], coords={\"x\": [1, 2], \"y\": [3, 4]}\n ),\n \"bar\": DataArray(5),\n }\n )\n y = Dataset(\n {\n \"foo\": DataArray(\n [[1, 2]], dims=[\"z\", \"y\"], coords={\"z\": [1], \"y\": [5, 6]}\n )\n }\n )\n x2, y2 = broadcast(x, y, exclude=[\"y\"])\n\n expected_x2 = Dataset(\n {\n \"foo\": DataArray(\n [[[1, 2]], [[3, 4]]],\n dims=[\"x\", \"z\", \"y\"],\n coords={\"z\": [1], \"x\": [1, 2], \"y\": [3, 4]},\n ),\n \"bar\": DataArray(\n [[5], [5]], dims=[\"x\", \"z\"], coords={\"x\": [1, 2], \"z\": [1]}\n ),\n }\n )\n expected_y2 = Dataset(\n {\n \"foo\": DataArray(\n [[[1, 2]], [[1, 2]]],\n dims=[\"x\", \"z\", \"y\"],\n coords={\"z\": [1], \"x\": [1, 2], \"y\": [5, 6]},\n )\n }\n )\n assert_identical(expected_x2, x2)\n assert_identical(expected_y2, y2)\n\n def test_broadcast_misaligned(self):\n x = Dataset({\"foo\": DataArray([1, 2, 3], coords=[(\"x\", [-1, -2, -3])])})\n y = Dataset(\n {\n \"bar\": DataArray(\n [[1, 2], [3, 4]],\n dims=[\"y\", \"x\"],\n coords={\"y\": [1, 2], \"x\": [10, -3]},\n )\n }\n )\n x2, y2 = broadcast(x, y)\n expected_x2 = Dataset(\n {\n \"foo\": DataArray(\n [[3, 3], [2, 2], [1, 1], [np.nan, np.nan]],\n dims=[\"x\", \"y\"],\n coords={\"y\": [1, 2], \"x\": [-3, -2, -1, 10]},\n )\n }\n )\n expected_y2 = Dataset(\n {\n \"bar\": DataArray(\n [[2, 4], [np.nan, np.nan], [np.nan, np.nan], [1, 3]],\n dims=[\"x\", \"y\"],\n coords={\"y\": [1, 2], \"x\": [-3, -2, -1, 10]},\n )\n }\n )\n assert_identical(expected_x2, x2)\n assert_identical(expected_y2, y2)\n\n def test_variable_indexing(self):\n data = create_test_data()\n v = data[\"var1\"]\n d1 = data[\"dim1\"]\n d2 = data[\"dim2\"]\n assert_equal(v, v[d1.values])\n assert_equal(v, v[d1])\n assert_equal(v[:3], v[d1 < 3])\n assert_equal(v[:, 3:], v[:, d2 >= 1.5])\n assert_equal(v[:3, 3:], v[d1 < 3, d2 >= 1.5])\n assert_equal(v[:3, :2], v[range(3), range(2)])\n assert_equal(v[:3, :2], v.loc[d1[:3], d2[:2]])\n\n def test_drop_variables(self):\n data = create_test_data()\n\n assert_identical(data, data.drop_vars([]))\n\n expected = Dataset({k: data[k] for k in data.variables if k != \"time\"})\n actual = data.drop_vars(\"time\")\n assert_identical(expected, actual)\n actual = data.drop_vars([\"time\"])\n assert_identical(expected, actual)\n\n with pytest.raises(ValueError, match=r\"cannot be found\"):\n data.drop_vars(\"not_found_here\")\n\n actual = data.drop_vars(\"not_found_here\", errors=\"ignore\")\n assert_identical(data, actual)\n\n actual = data.drop_vars([\"not_found_here\"], errors=\"ignore\")\n assert_identical(data, actual)\n\n actual = data.drop_vars([\"time\", \"not_found_here\"], errors=\"ignore\")\n assert_identical(expected, actual)\n\n # deprecated approach with `drop` works (straight copy paste from above)\n\n with pytest.warns(PendingDeprecationWarning):\n actual = data.drop(\"not_found_here\", errors=\"ignore\")\n assert_identical(data, actual)\n\n with pytest.warns(PendingDeprecationWarning):\n actual = data.drop([\"not_found_here\"], errors=\"ignore\")\n assert_identical(data, actual)\n\n with pytest.warns(PendingDeprecationWarning):\n actual = data.drop([\"time\", \"not_found_here\"], errors=\"ignore\")\n assert_identical(expected, actual)\n\n with pytest.warns(PendingDeprecationWarning):\n actual = data.drop({\"time\", \"not_found_here\"}, errors=\"ignore\")\n assert_identical(expected, actual)\n\n def test_drop_index_labels(self):\n data = Dataset({\"A\": ([\"x\", \"y\"], np.random.randn(2, 3)), \"x\": [\"a\", \"b\"]})\n\n with pytest.warns(DeprecationWarning):\n actual = data.drop([\"a\"], dim=\"x\")\n expected = data.isel(x=[1])\n assert_identical(expected, actual)\n\n with pytest.warns(DeprecationWarning):\n actual = data.drop([\"a\", \"b\"], dim=\"x\")\n expected = data.isel(x=slice(0, 0))\n assert_identical(expected, actual)\n\n with pytest.raises(KeyError):\n # not contained in axis\n with pytest.warns(DeprecationWarning):\n data.drop([\"c\"], dim=\"x\")\n\n with pytest.warns(DeprecationWarning):\n actual = data.drop([\"c\"], dim=\"x\", errors=\"ignore\")\n assert_identical(data, actual)\n\n with pytest.raises(ValueError):\n with pytest.warns(DeprecationWarning):\n data.drop([\"c\"], dim=\"x\", errors=\"wrong_value\")\n\n with pytest.warns(DeprecationWarning):\n actual = data.drop([\"a\", \"b\", \"c\"], \"x\", errors=\"ignore\")\n expected = data.isel(x=slice(0, 0))\n assert_identical(expected, actual)\n\n # DataArrays as labels are a nasty corner case as they are not\n # Iterable[Hashable] - DataArray.__iter__ yields scalar DataArrays.\n actual = data.drop_sel(x=DataArray([\"a\", \"b\", \"c\"]), errors=\"ignore\")\n expected = data.isel(x=slice(0, 0))\n assert_identical(expected, actual)\n with pytest.warns(DeprecationWarning):\n data.drop(DataArray([\"a\", \"b\", \"c\"]), dim=\"x\", errors=\"ignore\")\n assert_identical(expected, actual)\n\n actual = data.drop_sel(y=[1])\n expected = data.isel(y=[0, 2])\n assert_identical(expected, actual)\n\n with pytest.raises(KeyError, match=r\"not found in axis\"):\n data.drop_sel(x=0)\n\n def test_drop_labels_by_keyword(self):\n data = Dataset(\n {\"A\": ([\"x\", \"y\"], np.random.randn(2, 6)), \"x\": [\"a\", \"b\"], \"y\": range(6)}\n )\n # Basic functionality.\n assert len(data.coords[\"x\"]) == 2\n\n with pytest.warns(DeprecationWarning):\n ds1 = data.drop([\"a\"], dim=\"x\")\n ds2 = data.drop_sel(x=\"a\")\n ds3 = data.drop_sel(x=[\"a\"])\n ds4 = data.drop_sel(x=[\"a\", \"b\"])\n ds5 = data.drop_sel(x=[\"a\", \"b\"], y=range(0, 6, 2))\n\n arr = DataArray(range(3), dims=[\"c\"])\n with pytest.warns(FutureWarning):\n data.drop(arr.coords)\n with pytest.warns(FutureWarning):\n data.drop(arr.xindexes)\n\n assert_array_equal(ds1.coords[\"x\"], [\"b\"])\n assert_array_equal(ds2.coords[\"x\"], [\"b\"])\n assert_array_equal(ds3.coords[\"x\"], [\"b\"])\n assert ds4.coords[\"x\"].size == 0\n assert ds5.coords[\"x\"].size == 0\n assert_array_equal(ds5.coords[\"y\"], [1, 3, 5])\n\n # Error handling if user tries both approaches.\n with pytest.raises(ValueError):\n data.drop(labels=[\"a\"], x=\"a\")\n with pytest.raises(ValueError):\n data.drop(labels=[\"a\"], dim=\"x\", x=\"a\")\n warnings.filterwarnings(\"ignore\", r\"\\W*drop\")\n with pytest.raises(ValueError):\n data.drop(dim=\"x\", x=\"a\")\n\n def test_drop_labels_by_position(self):\n data = Dataset(\n {\"A\": ([\"x\", \"y\"], np.random.randn(2, 6)), \"x\": [\"a\", \"b\"], \"y\": range(6)}\n )\n # Basic functionality.\n assert len(data.coords[\"x\"]) == 2\n\n actual = data.drop_isel(x=0)\n expected = data.drop_sel(x=\"a\")\n assert_identical(expected, actual)\n\n actual = data.drop_isel(x=[0])\n expected = data.drop_sel(x=[\"a\"])\n assert_identical(expected, actual)\n\n actual = data.drop_isel(x=[0, 1])\n expected = data.drop_sel(x=[\"a\", \"b\"])\n assert_identical(expected, actual)\n assert actual.coords[\"x\"].size == 0\n\n actual = data.drop_isel(x=[0, 1], y=range(0, 6, 2))\n expected = data.drop_sel(x=[\"a\", \"b\"], y=range(0, 6, 2))\n assert_identical(expected, actual)\n assert actual.coords[\"x\"].size == 0\n\n with pytest.raises(KeyError):\n data.drop_isel(z=1)\n\n def test_drop_dims(self):\n data = xr.Dataset(\n {\n \"A\": ([\"x\", \"y\"], np.random.randn(2, 3)),\n \"B\": (\"x\", np.random.randn(2)),\n \"x\": [\"a\", \"b\"],\n \"z\": np.pi,\n }\n )\n\n actual = data.drop_dims(\"x\")\n expected = data.drop_vars([\"A\", \"B\", \"x\"])\n assert_identical(expected, actual)\n\n actual = data.drop_dims(\"y\")\n expected = data.drop_vars(\"A\")\n assert_identical(expected, actual)\n\n actual = data.drop_dims([\"x\", \"y\"])\n expected = data.drop_vars([\"A\", \"B\", \"x\"])\n assert_identical(expected, actual)\n\n with pytest.raises((ValueError, KeyError)):\n data.drop_dims(\"z\") # not a dimension\n\n with pytest.raises((ValueError, KeyError)):\n data.drop_dims(None)\n\n actual = data.drop_dims(\"z\", errors=\"ignore\")\n assert_identical(data, actual)\n\n actual = data.drop_dims(None, errors=\"ignore\")\n assert_identical(data, actual)\n\n with pytest.raises(ValueError):\n actual = data.drop_dims(\"z\", errors=\"wrong_value\")\n\n actual = data.drop_dims([\"x\", \"y\", \"z\"], errors=\"ignore\")\n expected = data.drop_vars([\"A\", \"B\", \"x\"])\n assert_identical(expected, actual)\n\n def test_copy(self):\n data = create_test_data()\n data.attrs[\"Test\"] = [1, 2, 3]\n\n for copied in [data.copy(deep=False), copy(data)]:\n assert_identical(data, copied)\n assert data.encoding == copied.encoding\n # Note: IndexVariable objects with string dtype are always\n # copied because of xarray.core.util.safe_cast_to_index.\n # Limiting the test to data variables.\n for k in data.data_vars:\n v0 = data.variables[k]\n v1 = copied.variables[k]\n assert source_ndarray(v0.data) is source_ndarray(v1.data)\n copied[\"foo\"] = (\"z\", np.arange(5))\n assert \"foo\" not in data\n\n copied.attrs[\"foo\"] = \"bar\"\n assert \"foo\" not in data.attrs\n assert data.attrs[\"Test\"] is copied.attrs[\"Test\"]\n\n for copied in [data.copy(deep=True), deepcopy(data)]:\n assert_identical(data, copied)\n for k, v0 in data.variables.items():\n v1 = copied.variables[k]\n assert v0 is not v1\n\n assert data.attrs[\"Test\"] is not copied.attrs[\"Test\"]\n\n def test_copy_with_data(self):\n orig = create_test_data()\n new_data = {k: np.random.randn(*v.shape) for k, v in orig.data_vars.items()}\n actual = orig.copy(data=new_data)\n\n expected = orig.copy()\n for k, v in new_data.items():\n expected[k].data = v\n assert_identical(expected, actual)\n\n @pytest.mark.xfail(raises=AssertionError)\n @pytest.mark.parametrize(\n \"deep, expected_orig\",\n [\n [\n True,\n xr.DataArray(\n xr.IndexVariable(\"a\", np.array([1, 2])),\n coords={\"a\": [1, 2]},\n dims=[\"a\"],\n ),\n ],\n [\n False,\n xr.DataArray(\n xr.IndexVariable(\"a\", np.array([999, 2])),\n coords={\"a\": [999, 2]},\n dims=[\"a\"],\n ),\n ],\n ],\n )\n def test_copy_coords(self, deep, expected_orig):\n \"\"\"The test fails for the shallow copy, and apparently only on Windows\n for some reason. In windows coords seem to be immutable unless it's one\n dataset deep copied from another.\"\"\"\n ds = xr.DataArray(\n np.ones([2, 2, 2]),\n coords={\"a\": [1, 2], \"b\": [\"x\", \"y\"], \"c\": [0, 1]},\n dims=[\"a\", \"b\", \"c\"],\n name=\"value\",\n ).to_dataset()\n ds_cp = ds.copy(deep=deep)\n ds_cp.coords[\"a\"].data[0] = 999\n\n expected_cp = xr.DataArray(\n xr.IndexVariable(\"a\", np.array([999, 2])),\n coords={\"a\": [999, 2]},\n dims=[\"a\"],\n )\n assert_identical(ds_cp.coords[\"a\"], expected_cp)\n\n assert_identical(ds.coords[\"a\"], expected_orig)\n\n def test_copy_with_data_errors(self):\n orig = create_test_data()\n new_var1 = np.arange(orig[\"var1\"].size).reshape(orig[\"var1\"].shape)\n with pytest.raises(ValueError, match=r\"Data must be dict-like\"):\n orig.copy(data=new_var1)\n with pytest.raises(ValueError, match=r\"only contain variables in original\"):\n orig.copy(data={\"not_in_original\": new_var1})\n with pytest.raises(ValueError, match=r\"contain all variables in original\"):\n orig.copy(data={\"var1\": new_var1})\n\n def test_rename(self):\n data = create_test_data()\n newnames = {\"var1\": \"renamed_var1\", \"dim2\": \"renamed_dim2\"}\n renamed = data.rename(newnames)\n\n variables = dict(data.variables)\n for k, v in newnames.items():\n variables[v] = variables.pop(k)\n\n for k, v in variables.items():\n dims = list(v.dims)\n for name, newname in newnames.items():\n if name in dims:\n dims[dims.index(name)] = newname\n\n assert_equal(\n Variable(dims, v.values, v.attrs),\n renamed[k].variable.to_base_variable(),\n )\n assert v.encoding == renamed[k].encoding\n assert type(v) is type(renamed.variables[k]) # noqa: E721\n\n assert \"var1\" not in renamed\n assert \"dim2\" not in renamed\n\n with pytest.raises(ValueError, match=r\"cannot rename 'not_a_var'\"):\n data.rename({\"not_a_var\": \"nada\"})\n\n with pytest.raises(ValueError, match=r\"'var1' conflicts\"):\n data.rename({\"var2\": \"var1\"})\n\n # verify that we can rename a variable without accessing the data\n var1 = data[\"var1\"]\n data[\"var1\"] = (var1.dims, InaccessibleArray(var1.values))\n renamed = data.rename(newnames)\n with pytest.raises(UnexpectedDataAccess):\n renamed[\"renamed_var1\"].values\n\n renamed_kwargs = data.rename(**newnames)\n assert_identical(renamed, renamed_kwargs)\n\n def test_rename_old_name(self):\n # regtest for GH1477\n data = create_test_data()\n\n with pytest.raises(ValueError, match=r\"'samecol' conflicts\"):\n data.rename({\"var1\": \"samecol\", \"var2\": \"samecol\"})\n\n # This shouldn't cause any problems.\n data.rename({\"var1\": \"var2\", \"var2\": \"var1\"})\n\n def test_rename_same_name(self):\n data = create_test_data()\n newnames = {\"var1\": \"var1\", \"dim2\": \"dim2\"}\n renamed = data.rename(newnames)\n assert_identical(renamed, data)\n\n def test_rename_dims(self):\n original = Dataset({\"x\": (\"x\", [0, 1, 2]), \"y\": (\"x\", [10, 11, 12]), \"z\": 42})\n expected = Dataset(\n {\"x\": (\"x_new\", [0, 1, 2]), \"y\": (\"x_new\", [10, 11, 12]), \"z\": 42}\n )\n expected = expected.set_coords(\"x\")\n dims_dict = {\"x\": \"x_new\"}\n actual = original.rename_dims(dims_dict)\n assert_identical(expected, actual)\n actual_2 = original.rename_dims(**dims_dict)\n assert_identical(expected, actual_2)\n\n # Test to raise ValueError\n dims_dict_bad = {\"x_bad\": \"x_new\"}\n with pytest.raises(ValueError):\n original.rename_dims(dims_dict_bad)\n\n with pytest.raises(ValueError):\n original.rename_dims({\"x\": \"z\"})\n\n def test_rename_vars(self):\n original = Dataset({\"x\": (\"x\", [0, 1, 2]), \"y\": (\"x\", [10, 11, 12]), \"z\": 42})\n expected = Dataset(\n {\"x_new\": (\"x\", [0, 1, 2]), \"y\": (\"x\", [10, 11, 12]), \"z\": 42}\n )\n expected = expected.set_coords(\"x_new\")\n name_dict = {\"x\": \"x_new\"}\n actual = original.rename_vars(name_dict)\n assert_identical(expected, actual)\n actual_2 = original.rename_vars(**name_dict)\n assert_identical(expected, actual_2)\n\n # Test to raise ValueError\n names_dict_bad = {\"x_bad\": \"x_new\"}\n with pytest.raises(ValueError):\n original.rename_vars(names_dict_bad)\n\n def test_rename_multiindex(self):\n mindex = pd.MultiIndex.from_tuples(\n [([1, 2]), ([3, 4])], names=[\"level0\", \"level1\"]\n )\n data = Dataset({}, {\"x\": mindex})\n with pytest.raises(ValueError, match=r\"conflicting MultiIndex\"):\n data.rename({\"x\": \"level0\"})\n\n @requires_cftime\n def test_rename_does_not_change_CFTimeIndex_type(self):\n # make sure CFTimeIndex is not converted to DatetimeIndex #3522\n\n time = xr.cftime_range(start=\"2000\", periods=6, freq=\"2MS\", calendar=\"noleap\")\n orig = Dataset(coords={\"time\": time})\n\n renamed = orig.rename(time=\"time_new\")\n assert \"time_new\" in renamed.xindexes\n # TODO: benbovy - flexible indexes: update when CFTimeIndex\n # inherits from xarray.Index\n assert isinstance(renamed.xindexes[\"time_new\"].to_pandas_index(), CFTimeIndex)\n assert renamed.xindexes[\"time_new\"].to_pandas_index().name == \"time_new\"\n\n # check original has not changed\n assert \"time\" in orig.xindexes\n assert isinstance(orig.xindexes[\"time\"].to_pandas_index(), CFTimeIndex)\n assert orig.xindexes[\"time\"].to_pandas_index().name == \"time\"\n\n # note: rename_dims(time=\"time_new\") drops \"ds.indexes\"\n renamed = orig.rename_dims()\n assert isinstance(renamed.xindexes[\"time\"].to_pandas_index(), CFTimeIndex)\n\n renamed = orig.rename_vars()\n assert isinstance(renamed.xindexes[\"time\"].to_pandas_index(), CFTimeIndex)\n\n def test_rename_does_not_change_DatetimeIndex_type(self):\n # make sure DatetimeIndex is conderved on rename\n\n time = pd.date_range(start=\"2000\", periods=6, freq=\"2MS\")\n orig = Dataset(coords={\"time\": time})\n\n renamed = orig.rename(time=\"time_new\")\n assert \"time_new\" in renamed.xindexes\n # TODO: benbovy - flexible indexes: update when DatetimeIndex\n # inherits from xarray.Index?\n assert isinstance(renamed.xindexes[\"time_new\"].to_pandas_index(), DatetimeIndex)\n assert renamed.xindexes[\"time_new\"].to_pandas_index().name == \"time_new\"\n\n # check original has not changed\n assert \"time\" in orig.xindexes\n assert isinstance(orig.xindexes[\"time\"].to_pandas_index(), DatetimeIndex)\n assert orig.xindexes[\"time\"].to_pandas_index().name == \"time\"\n\n # note: rename_dims(time=\"time_new\") drops \"ds.indexes\"\n renamed = orig.rename_dims()\n assert isinstance(renamed.xindexes[\"time\"].to_pandas_index(), DatetimeIndex)\n\n renamed = orig.rename_vars()\n assert isinstance(renamed.xindexes[\"time\"].to_pandas_index(), DatetimeIndex)\n\n def test_swap_dims(self):\n original = Dataset({\"x\": [1, 2, 3], \"y\": (\"x\", list(\"abc\")), \"z\": 42})\n expected = Dataset({\"z\": 42}, {\"x\": (\"y\", [1, 2, 3]), \"y\": list(\"abc\")})\n actual = original.swap_dims({\"x\": \"y\"})\n assert_identical(expected, actual)\n assert isinstance(actual.variables[\"y\"], IndexVariable)\n assert isinstance(actual.variables[\"x\"], Variable)\n pd.testing.assert_index_equal(\n actual.xindexes[\"y\"].to_pandas_index(),\n expected.xindexes[\"y\"].to_pandas_index(),\n )\n\n roundtripped = actual.swap_dims({\"y\": \"x\"})\n assert_identical(original.set_coords(\"y\"), roundtripped)\n\n with pytest.raises(ValueError, match=r\"cannot swap\"):\n original.swap_dims({\"y\": \"x\"})\n with pytest.raises(ValueError, match=r\"replacement dimension\"):\n original.swap_dims({\"x\": \"z\"})\n\n expected = Dataset(\n {\"y\": (\"u\", list(\"abc\")), \"z\": 42}, coords={\"x\": (\"u\", [1, 2, 3])}\n )\n actual = original.swap_dims({\"x\": \"u\"})\n assert_identical(expected, actual)\n\n # as kwargs\n expected = Dataset(\n {\"y\": (\"u\", list(\"abc\")), \"z\": 42}, coords={\"x\": (\"u\", [1, 2, 3])}\n )\n actual = original.swap_dims(x=\"u\")\n assert_identical(expected, actual)\n\n # handle multiindex case\n idx = pd.MultiIndex.from_arrays([list(\"aab\"), list(\"yzz\")], names=[\"y1\", \"y2\"])\n original = Dataset({\"x\": [1, 2, 3], \"y\": (\"x\", idx), \"z\": 42})\n expected = Dataset({\"z\": 42}, {\"x\": (\"y\", [1, 2, 3]), \"y\": idx})\n actual = original.swap_dims({\"x\": \"y\"})\n assert_identical(expected, actual)\n assert isinstance(actual.variables[\"y\"], IndexVariable)\n assert isinstance(actual.variables[\"x\"], Variable)\n pd.testing.assert_index_equal(\n actual.xindexes[\"y\"].to_pandas_index(),\n expected.xindexes[\"y\"].to_pandas_index(),\n )\n\n def test_expand_dims_error(self):\n original = Dataset(\n {\n \"x\": (\"a\", np.random.randn(3)),\n \"y\": ([\"b\", \"a\"], np.random.randn(4, 3)),\n \"z\": (\"a\", np.random.randn(3)),\n },\n coords={\n \"a\": np.linspace(0, 1, 3),\n \"b\": np.linspace(0, 1, 4),\n \"c\": np.linspace(0, 1, 5),\n },\n attrs={\"key\": \"entry\"},\n )\n\n with pytest.raises(ValueError, match=r\"already exists\"):\n original.expand_dims(dim=[\"x\"])\n\n # Make sure it raises true error also for non-dimensional coordinates\n # which has dimension.\n original = original.set_coords(\"z\")\n with pytest.raises(ValueError, match=r\"already exists\"):\n original.expand_dims(dim=[\"z\"])\n\n original = Dataset(\n {\n \"x\": (\"a\", np.random.randn(3)),\n \"y\": ([\"b\", \"a\"], np.random.randn(4, 3)),\n \"z\": (\"a\", np.random.randn(3)),\n },\n coords={\n \"a\": np.linspace(0, 1, 3),\n \"b\": np.linspace(0, 1, 4),\n \"c\": np.linspace(0, 1, 5),\n },\n attrs={\"key\": \"entry\"},\n )\n with pytest.raises(TypeError, match=r\"value of new dimension\"):\n original.expand_dims({\"d\": 3.2})\n with pytest.raises(ValueError, match=r\"both keyword and positional\"):\n original.expand_dims({\"d\": 4}, e=4)\n\n def test_expand_dims_int(self):\n original = Dataset(\n {\"x\": (\"a\", np.random.randn(3)), \"y\": ([\"b\", \"a\"], np.random.randn(4, 3))},\n coords={\n \"a\": np.linspace(0, 1, 3),\n \"b\": np.linspace(0, 1, 4),\n \"c\": np.linspace(0, 1, 5),\n },\n attrs={\"key\": \"entry\"},\n )\n\n actual = original.expand_dims([\"z\"], [1])\n expected = Dataset(\n {\n \"x\": original[\"x\"].expand_dims(\"z\", 1),\n \"y\": original[\"y\"].expand_dims(\"z\", 1),\n },\n coords={\n \"a\": np.linspace(0, 1, 3),\n \"b\": np.linspace(0, 1, 4),\n \"c\": np.linspace(0, 1, 5),\n },\n attrs={\"key\": \"entry\"},\n )\n assert_identical(expected, actual)\n # make sure squeeze restores the original data set.\n roundtripped = actual.squeeze(\"z\")\n assert_identical(original, roundtripped)\n\n # another test with a negative axis\n actual = original.expand_dims([\"z\"], [-1])\n expected = Dataset(\n {\n \"x\": original[\"x\"].expand_dims(\"z\", -1),\n \"y\": original[\"y\"].expand_dims(\"z\", -1),\n },\n coords={\n \"a\": np.linspace(0, 1, 3),\n \"b\": np.linspace(0, 1, 4),\n \"c\": np.linspace(0, 1, 5),\n },\n attrs={\"key\": \"entry\"},\n )\n assert_identical(expected, actual)\n # make sure squeeze restores the original data set.\n roundtripped = actual.squeeze(\"z\")\n assert_identical(original, roundtripped)\n\n def test_expand_dims_coords(self):\n original = Dataset({\"x\": (\"a\", np.array([1, 2, 3]))})\n expected = Dataset(\n {\"x\": ((\"b\", \"a\"), np.array([[1, 2, 3], [1, 2, 3]]))}, coords={\"b\": [1, 2]}\n )\n actual = original.expand_dims(dict(b=[1, 2]))\n assert_identical(expected, actual)\n assert \"b\" not in original._coord_names\n\n def test_expand_dims_existing_scalar_coord(self):\n original = Dataset({\"x\": 1}, {\"a\": 2})\n expected = Dataset({\"x\": ((\"a\",), [1])}, {\"a\": [2]})\n actual = original.expand_dims(\"a\")\n assert_identical(expected, actual)\n\n def test_isel_expand_dims_roundtrip(self):\n original = Dataset({\"x\": ((\"a\",), [1])}, {\"a\": [2]})\n actual = original.isel(a=0).expand_dims(\"a\")\n assert_identical(actual, original)\n\n def test_expand_dims_mixed_int_and_coords(self):\n # Test expanding one dimension to have size > 1 that doesn't have\n # coordinates, and also expanding another dimension to have size > 1\n # that DOES have coordinates.\n original = Dataset(\n {\"x\": (\"a\", np.random.randn(3)), \"y\": ([\"b\", \"a\"], np.random.randn(4, 3))},\n coords={\n \"a\": np.linspace(0, 1, 3),\n \"b\": np.linspace(0, 1, 4),\n \"c\": np.linspace(0, 1, 5),\n },\n )\n\n actual = original.expand_dims({\"d\": 4, \"e\": [\"l\", \"m\", \"n\"]})\n\n expected = Dataset(\n {\n \"x\": xr.DataArray(\n original[\"x\"].values * np.ones([4, 3, 3]),\n coords=dict(d=range(4), e=[\"l\", \"m\", \"n\"], a=np.linspace(0, 1, 3)),\n dims=[\"d\", \"e\", \"a\"],\n ).drop_vars(\"d\"),\n \"y\": xr.DataArray(\n original[\"y\"].values * np.ones([4, 3, 4, 3]),\n coords=dict(\n d=range(4),\n e=[\"l\", \"m\", \"n\"],\n b=np.linspace(0, 1, 4),\n a=np.linspace(0, 1, 3),\n ),\n dims=[\"d\", \"e\", \"b\", \"a\"],\n ).drop_vars(\"d\"),\n },\n coords={\"c\": np.linspace(0, 1, 5)},\n )\n assert_identical(actual, expected)\n\n def test_expand_dims_kwargs_python36plus(self):\n original = Dataset(\n {\"x\": (\"a\", np.random.randn(3)), \"y\": ([\"b\", \"a\"], np.random.randn(4, 3))},\n coords={\n \"a\": np.linspace(0, 1, 3),\n \"b\": np.linspace(0, 1, 4),\n \"c\": np.linspace(0, 1, 5),\n },\n attrs={\"key\": \"entry\"},\n )\n other_way = original.expand_dims(e=[\"l\", \"m\", \"n\"])\n other_way_expected = Dataset(\n {\n \"x\": xr.DataArray(\n original[\"x\"].values * np.ones([3, 3]),\n coords=dict(e=[\"l\", \"m\", \"n\"], a=np.linspace(0, 1, 3)),\n dims=[\"e\", \"a\"],\n ),\n \"y\": xr.DataArray(\n original[\"y\"].values * np.ones([3, 4, 3]),\n coords=dict(\n e=[\"l\", \"m\", \"n\"],\n b=np.linspace(0, 1, 4),\n a=np.linspace(0, 1, 3),\n ),\n dims=[\"e\", \"b\", \"a\"],\n ),\n },\n coords={\"c\": np.linspace(0, 1, 5)},\n attrs={\"key\": \"entry\"},\n )\n assert_identical(other_way_expected, other_way)\n\n def test_set_index(self):\n expected = create_test_multiindex()\n mindex = expected[\"x\"].to_index()\n indexes = [mindex.get_level_values(n) for n in mindex.names]\n coords = {idx.name: (\"x\", idx) for idx in indexes}\n ds = Dataset({}, coords=coords)\n\n obj = ds.set_index(x=mindex.names)\n assert_identical(obj, expected)\n\n # ensure set_index with no existing index and a single data var given\n # doesn't return multi-index\n ds = Dataset(data_vars={\"x_var\": (\"x\", [0, 1, 2])})\n expected = Dataset(coords={\"x\": [0, 1, 2]})\n assert_identical(ds.set_index(x=\"x_var\"), expected)\n\n # Issue 3176: Ensure clear error message on key error.\n with pytest.raises(ValueError) as excinfo:\n ds.set_index(foo=\"bar\")\n assert str(excinfo.value) == \"bar is not the name of an existing variable.\"\n\n def test_reset_index(self):\n ds = create_test_multiindex()\n mindex = ds[\"x\"].to_index()\n indexes = [mindex.get_level_values(n) for n in mindex.names]\n coords = {idx.name: (\"x\", idx) for idx in indexes}\n expected = Dataset({}, coords=coords)\n\n obj = ds.reset_index(\"x\")\n assert_identical(obj, expected)\n\n def test_reset_index_keep_attrs(self):\n coord_1 = DataArray([1, 2], dims=[\"coord_1\"], attrs={\"attrs\": True})\n ds = Dataset({}, {\"coord_1\": coord_1})\n expected = Dataset({}, {\"coord_1_\": coord_1})\n obj = ds.reset_index(\"coord_1\")\n assert_identical(expected, obj)\n\n def test_reorder_levels(self):\n ds = create_test_multiindex()\n mindex = ds[\"x\"].to_index()\n midx = mindex.reorder_levels([\"level_2\", \"level_1\"])\n expected = Dataset({}, coords={\"x\": midx})\n\n reindexed = ds.reorder_levels(x=[\"level_2\", \"level_1\"])\n assert_identical(reindexed, expected)\n\n ds = Dataset({}, coords={\"x\": [1, 2]})\n with pytest.raises(ValueError, match=r\"has no MultiIndex\"):\n ds.reorder_levels(x=[\"level_1\", \"level_2\"])\n\n def test_stack(self):\n ds = Dataset(\n {\"a\": (\"x\", [0, 1]), \"b\": ((\"x\", \"y\"), [[0, 1], [2, 3]]), \"y\": [\"a\", \"b\"]}\n )\n\n exp_index = pd.MultiIndex.from_product([[0, 1], [\"a\", \"b\"]], names=[\"x\", \"y\"])\n expected = Dataset(\n {\"a\": (\"z\", [0, 0, 1, 1]), \"b\": (\"z\", [0, 1, 2, 3]), \"z\": exp_index}\n )\n actual = ds.stack(z=[\"x\", \"y\"])\n assert_identical(expected, actual)\n\n actual = ds.stack(z=[...])\n assert_identical(expected, actual)\n\n # non list dims with ellipsis\n actual = ds.stack(z=(...,))\n assert_identical(expected, actual)\n\n # ellipsis with given dim\n actual = ds.stack(z=[..., \"y\"])\n assert_identical(expected, actual)\n\n exp_index = pd.MultiIndex.from_product([[\"a\", \"b\"], [0, 1]], names=[\"y\", \"x\"])\n expected = Dataset(\n {\"a\": (\"z\", [0, 1, 0, 1]), \"b\": (\"z\", [0, 2, 1, 3]), \"z\": exp_index}\n )\n actual = ds.stack(z=[\"y\", \"x\"])\n assert_identical(expected, actual)\n\n def test_unstack(self):\n index = pd.MultiIndex.from_product([[0, 1], [\"a\", \"b\"]], names=[\"x\", \"y\"])\n ds = Dataset({\"b\": (\"z\", [0, 1, 2, 3]), \"z\": index})\n expected = Dataset(\n {\"b\": ((\"x\", \"y\"), [[0, 1], [2, 3]]), \"x\": [0, 1], \"y\": [\"a\", \"b\"]}\n )\n for dim in [\"z\", [\"z\"], None]:\n actual = ds.unstack(dim)\n assert_identical(actual, expected)\n\n def test_unstack_errors(self):\n ds = Dataset({\"x\": [1, 2, 3]})\n with pytest.raises(ValueError, match=r\"does not contain the dimensions\"):\n ds.unstack(\"foo\")\n with pytest.raises(ValueError, match=r\"do not have a MultiIndex\"):\n ds.unstack(\"x\")\n\n def test_unstack_fill_value(self):\n ds = xr.Dataset(\n {\"var\": ((\"x\",), np.arange(6)), \"other_var\": ((\"x\",), np.arange(3, 9))},\n coords={\"x\": [0, 1, 2] * 2, \"y\": ((\"x\",), [\"a\"] * 3 + [\"b\"] * 3)},\n )\n # make ds incomplete\n ds = ds.isel(x=[0, 2, 3, 4]).set_index(index=[\"x\", \"y\"])\n # test fill_value\n actual = ds.unstack(\"index\", fill_value=-1)\n expected = ds.unstack(\"index\").fillna(-1).astype(int)\n assert actual[\"var\"].dtype == int\n assert_equal(actual, expected)\n\n actual = ds[\"var\"].unstack(\"index\", fill_value=-1)\n expected = ds[\"var\"].unstack(\"index\").fillna(-1).astype(int)\n assert_equal(actual, expected)\n\n actual = ds.unstack(\"index\", fill_value={\"var\": -1, \"other_var\": 1})\n expected = ds.unstack(\"index\").fillna({\"var\": -1, \"other_var\": 1}).astype(int)\n assert_equal(actual, expected)\n\n @requires_sparse\n def test_unstack_sparse(self):\n ds = xr.Dataset(\n {\"var\": ((\"x\",), np.arange(6))},\n coords={\"x\": [0, 1, 2] * 2, \"y\": ((\"x\",), [\"a\"] * 3 + [\"b\"] * 3)},\n )\n # make ds incomplete\n ds = ds.isel(x=[0, 2, 3, 4]).set_index(index=[\"x\", \"y\"])\n # test fill_value\n actual = ds.unstack(\"index\", sparse=True)\n expected = ds.unstack(\"index\")\n assert actual[\"var\"].variable._to_dense().equals(expected[\"var\"].variable)\n assert actual[\"var\"].data.density < 1.0\n\n actual = ds[\"var\"].unstack(\"index\", sparse=True)\n expected = ds[\"var\"].unstack(\"index\")\n assert actual.variable._to_dense().equals(expected.variable)\n assert actual.data.density < 1.0\n\n def test_stack_unstack_fast(self):\n ds = Dataset(\n {\n \"a\": (\"x\", [0, 1]),\n \"b\": ((\"x\", \"y\"), [[0, 1], [2, 3]]),\n \"x\": [0, 1],\n \"y\": [\"a\", \"b\"],\n }\n )\n actual = ds.stack(z=[\"x\", \"y\"]).unstack(\"z\")\n assert actual.broadcast_equals(ds)\n\n actual = ds[[\"b\"]].stack(z=[\"x\", \"y\"]).unstack(\"z\")\n assert actual.identical(ds[[\"b\"]])\n\n def test_stack_unstack_slow(self):\n ds = Dataset(\n {\n \"a\": (\"x\", [0, 1]),\n \"b\": ((\"x\", \"y\"), [[0, 1], [2, 3]]),\n \"x\": [0, 1],\n \"y\": [\"a\", \"b\"],\n }\n )\n stacked = ds.stack(z=[\"x\", \"y\"])\n actual = stacked.isel(z=slice(None, None, -1)).unstack(\"z\")\n assert actual.broadcast_equals(ds)\n\n stacked = ds[[\"b\"]].stack(z=[\"x\", \"y\"])\n actual = stacked.isel(z=slice(None, None, -1)).unstack(\"z\")\n assert actual.identical(ds[[\"b\"]])\n\n def test_to_stacked_array_invalid_sample_dims(self):\n data = xr.Dataset(\n data_vars={\"a\": ((\"x\", \"y\"), [[0, 1, 2], [3, 4, 5]]), \"b\": (\"x\", [6, 7])},\n coords={\"y\": [\"u\", \"v\", \"w\"]},\n )\n with pytest.raises(ValueError):\n data.to_stacked_array(\"features\", sample_dims=[\"y\"])\n\n def test_to_stacked_array_name(self):\n name = \"adf9d\"\n\n # make a two dimensional dataset\n a, b = create_test_stacked_array()\n D = xr.Dataset({\"a\": a, \"b\": b})\n sample_dims = [\"x\"]\n\n y = D.to_stacked_array(\"features\", sample_dims, name=name)\n assert y.name == name\n\n def test_to_stacked_array_dtype_dims(self):\n # make a two dimensional dataset\n a, b = create_test_stacked_array()\n D = xr.Dataset({\"a\": a, \"b\": b})\n sample_dims = [\"x\"]\n y = D.to_stacked_array(\"features\", sample_dims)\n # TODO: benbovy - flexible indexes: update when MultiIndex has its own class\n # inherited from xarray.Index\n assert y.xindexes[\"features\"].to_pandas_index().levels[1].dtype == D.y.dtype\n assert y.dims == (\"x\", \"features\")\n\n def test_to_stacked_array_to_unstacked_dataset(self):\n\n # single dimension: regression test for GH4049\n arr = xr.DataArray(np.arange(3), coords=[(\"x\", [0, 1, 2])])\n data = xr.Dataset({\"a\": arr, \"b\": arr})\n stacked = data.to_stacked_array(\"y\", sample_dims=[\"x\"])\n unstacked = stacked.to_unstacked_dataset(\"y\")\n assert_identical(unstacked, data)\n\n # make a two dimensional dataset\n a, b = create_test_stacked_array()\n D = xr.Dataset({\"a\": a, \"b\": b})\n sample_dims = [\"x\"]\n y = D.to_stacked_array(\"features\", sample_dims).transpose(\"x\", \"features\")\n\n x = y.to_unstacked_dataset(\"features\")\n assert_identical(D, x)\n\n # test on just one sample\n x0 = y[0].to_unstacked_dataset(\"features\")\n d0 = D.isel(x=0)\n assert_identical(d0, x0)\n\n def test_to_stacked_array_to_unstacked_dataset_different_dimension(self):\n # test when variables have different dimensionality\n a, b = create_test_stacked_array()\n sample_dims = [\"x\"]\n D = xr.Dataset({\"a\": a, \"b\": b.isel(y=0)})\n\n y = D.to_stacked_array(\"features\", sample_dims)\n x = y.to_unstacked_dataset(\"features\")\n assert_identical(D, x)\n\n def test_update(self):\n data = create_test_data(seed=0)\n expected = data.copy()\n var2 = Variable(\"dim1\", np.arange(8))\n actual = data.update({\"var2\": var2})\n expected[\"var2\"] = var2\n assert_identical(expected, actual)\n\n actual = data.copy()\n actual_result = actual.update(data)\n assert actual_result is actual\n assert_identical(expected, actual)\n\n other = Dataset(attrs={\"new\": \"attr\"})\n actual = data.copy()\n actual.update(other)\n assert_identical(expected, actual)\n\n def test_update_overwrite_coords(self):\n data = Dataset({\"a\": (\"x\", [1, 2])}, {\"b\": 3})\n data.update(Dataset(coords={\"b\": 4}))\n expected = Dataset({\"a\": (\"x\", [1, 2])}, {\"b\": 4})\n assert_identical(data, expected)\n\n data = Dataset({\"a\": (\"x\", [1, 2])}, {\"b\": 3})\n data.update(Dataset({\"c\": 5}, coords={\"b\": 4}))\n expected = Dataset({\"a\": (\"x\", [1, 2]), \"c\": 5}, {\"b\": 4})\n assert_identical(data, expected)\n\n data = Dataset({\"a\": (\"x\", [1, 2])}, {\"b\": 3})\n data.update({\"c\": DataArray(5, coords={\"b\": 4})})\n expected = Dataset({\"a\": (\"x\", [1, 2]), \"c\": 5}, {\"b\": 3})\n assert_identical(data, expected)\n\n def test_update_auto_align(self):\n ds = Dataset({\"x\": (\"t\", [3, 4])}, {\"t\": [0, 1]})\n\n expected = Dataset({\"x\": (\"t\", [3, 4]), \"y\": (\"t\", [np.nan, 5])}, {\"t\": [0, 1]})\n actual = ds.copy()\n other = {\"y\": (\"t\", [5]), \"t\": [1]}\n with pytest.raises(ValueError, match=r\"conflicting sizes\"):\n actual.update(other)\n actual.update(Dataset(other))\n assert_identical(expected, actual)\n\n actual = ds.copy()\n other = Dataset({\"y\": (\"t\", [5]), \"t\": [100]})\n actual.update(other)\n expected = Dataset(\n {\"x\": (\"t\", [3, 4]), \"y\": (\"t\", [np.nan] * 2)}, {\"t\": [0, 1]}\n )\n assert_identical(expected, actual)\n\n def test_getitem(self):\n data = create_test_data()\n assert isinstance(data[\"var1\"], DataArray)\n assert_equal(data[\"var1\"].variable, data.variables[\"var1\"])\n with pytest.raises(KeyError):\n data[\"notfound\"]\n with pytest.raises(KeyError):\n data[[\"var1\", \"notfound\"]]\n\n actual = data[[\"var1\", \"var2\"]]\n expected = Dataset({\"var1\": data[\"var1\"], \"var2\": data[\"var2\"]})\n assert_equal(expected, actual)\n\n actual = data[\"numbers\"]\n expected = DataArray(\n data[\"numbers\"].variable,\n {\"dim3\": data[\"dim3\"], \"numbers\": data[\"numbers\"]},\n dims=\"dim3\",\n name=\"numbers\",\n )\n assert_identical(expected, actual)\n\n actual = data[dict(dim1=0)]\n expected = data.isel(dim1=0)\n assert_identical(expected, actual)\n\n def test_getitem_hashable(self):\n data = create_test_data()\n data[(3, 4)] = data[\"var1\"] + 1\n expected = data[\"var1\"] + 1\n expected.name = (3, 4)\n assert_identical(expected, data[(3, 4)])\n with pytest.raises(KeyError, match=r\"('var1', 'var2')\"):\n data[(\"var1\", \"var2\")]\n\n def test_virtual_variables_default_coords(self):\n dataset = Dataset({\"foo\": (\"x\", range(10))})\n expected = DataArray(range(10), dims=\"x\", name=\"x\")\n actual = dataset[\"x\"]\n assert_identical(expected, actual)\n assert isinstance(actual.variable, IndexVariable)\n\n actual = dataset[[\"x\", \"foo\"]]\n expected = dataset.assign_coords(x=range(10))\n assert_identical(expected, actual)\n\n def test_virtual_variables_time(self):\n # access virtual variables\n data = create_test_data()\n expected = DataArray(\n 1 + np.arange(20), coords=[data[\"time\"]], dims=\"time\", name=\"dayofyear\"\n )\n\n assert_array_equal(\n data[\"time.month\"].values, data.variables[\"time\"].to_index().month\n )\n assert_array_equal(data[\"time.season\"].values, \"DJF\")\n # test virtual variable math\n assert_array_equal(data[\"time.dayofyear\"] + 1, 2 + np.arange(20))\n assert_array_equal(np.sin(data[\"time.dayofyear\"]), np.sin(1 + np.arange(20)))\n # ensure they become coordinates\n expected = Dataset({}, {\"dayofyear\": data[\"time.dayofyear\"]})\n actual = data[[\"time.dayofyear\"]]\n assert_equal(expected, actual)\n # non-coordinate variables\n ds = Dataset({\"t\": (\"x\", pd.date_range(\"2000-01-01\", periods=3))})\n assert (ds[\"t.year\"] == 2000).all()\n\n def test_virtual_variable_same_name(self):\n # regression test for GH367\n times = pd.date_range(\"2000-01-01\", freq=\"H\", periods=5)\n data = Dataset({\"time\": times})\n actual = data[\"time.time\"]\n expected = DataArray(times.time, [(\"time\", times)], name=\"time\")\n assert_identical(actual, expected)\n\n def test_virtual_variable_multiindex(self):\n # access multi-index levels as virtual variables\n data = create_test_multiindex()\n expected = DataArray(\n [\"a\", \"a\", \"b\", \"b\"],\n name=\"level_1\",\n coords=[data[\"x\"].to_index()],\n dims=\"x\",\n )\n assert_identical(expected, data[\"level_1\"])\n\n # combine multi-index level and datetime\n dr_index = pd.date_range(\"1/1/2011\", periods=4, freq=\"H\")\n mindex = pd.MultiIndex.from_arrays(\n [[\"a\", \"a\", \"b\", \"b\"], dr_index], names=(\"level_str\", \"level_date\")\n )\n data = Dataset({}, {\"x\": mindex})\n expected = DataArray(\n mindex.get_level_values(\"level_date\").hour,\n name=\"hour\",\n coords=[mindex],\n dims=\"x\",\n )\n assert_identical(expected, data[\"level_date.hour\"])\n\n # attribute style access\n assert_identical(data.level_str, data[\"level_str\"])\n\n def test_time_season(self):\n ds = Dataset({\"t\": pd.date_range(\"2000-01-01\", periods=12, freq=\"M\")})\n seas = [\"DJF\"] * 2 + [\"MAM\"] * 3 + [\"JJA\"] * 3 + [\"SON\"] * 3 + [\"DJF\"]\n assert_array_equal(seas, ds[\"t.season\"])\n\n def test_slice_virtual_variable(self):\n data = create_test_data()\n assert_equal(\n data[\"time.dayofyear\"][:10].variable, Variable([\"time\"], 1 + np.arange(10))\n )\n assert_equal(data[\"time.dayofyear\"][0].variable, Variable([], 1))\n\n def test_setitem(self):\n # assign a variable\n var = Variable([\"dim1\"], np.random.randn(8))\n data1 = create_test_data()\n data1[\"A\"] = var\n data2 = data1.copy()\n data2[\"A\"] = var\n assert_identical(data1, data2)\n # assign a dataset array\n dv = 2 * data2[\"A\"]\n data1[\"B\"] = dv.variable\n data2[\"B\"] = dv\n assert_identical(data1, data2)\n # can't assign an ND array without dimensions\n with pytest.raises(ValueError, match=r\"without explicit dimension names\"):\n data2[\"C\"] = var.values.reshape(2, 4)\n # but can assign a 1D array\n data1[\"C\"] = var.values\n data2[\"C\"] = (\"C\", var.values)\n assert_identical(data1, data2)\n # can assign a scalar\n data1[\"scalar\"] = 0\n data2[\"scalar\"] = ([], 0)\n assert_identical(data1, data2)\n # can't use the same dimension name as a scalar var\n with pytest.raises(ValueError, match=r\"already exists as a scalar\"):\n data1[\"newvar\"] = (\"scalar\", [3, 4, 5])\n # can't resize a used dimension\n with pytest.raises(ValueError, match=r\"arguments without labels\"):\n data1[\"dim1\"] = data1[\"dim1\"][:5]\n # override an existing value\n data1[\"A\"] = 3 * data2[\"A\"]\n assert_equal(data1[\"A\"], 3 * data2[\"A\"])\n\n # test assignment with positional and label-based indexing\n data3 = data1[[\"var1\", \"var2\"]]\n data3[\"var3\"] = data3.var1.isel(dim1=0)\n data4 = data3.copy()\n err_msg = (\n \"can only set locations defined by dictionaries from Dataset.loc. Got: a\"\n )\n with pytest.raises(TypeError, match=err_msg):\n data1.loc[\"a\"] = 0\n err_msg = r\"Variables \\['A', 'B', 'scalar'\\] in new values not available in original dataset:\"\n with pytest.raises(ValueError, match=err_msg):\n data4[{\"dim2\": 1}] = data1[{\"dim2\": 2}]\n err_msg = \"Variable 'var3': indexer {'dim2': 0} not available\"\n with pytest.raises(ValueError, match=err_msg):\n data1[{\"dim2\": 0}] = 0.0\n err_msg = \"Variable 'var1': indexer {'dim2': 10} not available\"\n with pytest.raises(ValueError, match=err_msg):\n data4[{\"dim2\": 10}] = data3[{\"dim2\": 2}]\n err_msg = \"Variable 'var1': dimension 'dim2' appears in new values\"\n with pytest.raises(KeyError, match=err_msg):\n data4[{\"dim2\": 2}] = data3[{\"dim2\": [2]}]\n err_msg = (\n \"Variable 'var2': dimension order differs between original and new data\"\n )\n data3[\"var2\"] = data3[\"var2\"].T\n with pytest.raises(ValueError, match=err_msg):\n data4[{\"dim2\": [2, 3]}] = data3[{\"dim2\": [2, 3]}]\n data3[\"var2\"] = data3[\"var2\"].T\n err_msg = \"indexes along dimension 'dim2' are not equal\"\n with pytest.raises(ValueError, match=err_msg):\n data4[{\"dim2\": [2, 3]}] = data3[{\"dim2\": [2, 3, 4]}]\n err_msg = \"Dataset assignment only accepts DataArrays, Datasets, and scalars.\"\n with pytest.raises(TypeError, match=err_msg):\n data4[{\"dim2\": [2, 3]}] = data3[\"var1\"][{\"dim2\": [3, 4]}].values\n data5 = data4.astype(str)\n data5[\"var4\"] = data4[\"var1\"]\n err_msg = \"could not convert string to float: 'a'\"\n with pytest.raises(ValueError, match=err_msg):\n data5[{\"dim2\": 1}] = \"a\"\n\n data4[{\"dim2\": 0}] = 0.0\n data4[{\"dim2\": 1}] = data3[{\"dim2\": 2}]\n data4.loc[{\"dim2\": 1.5}] = 1.0\n data4.loc[{\"dim2\": 2.0}] = data3.loc[{\"dim2\": 2.5}]\n for v, dat3 in data3.items():\n dat4 = data4[v]\n assert_array_equal(dat4[{\"dim2\": 0}], 0.0)\n assert_array_equal(dat4[{\"dim2\": 1}], dat3[{\"dim2\": 2}])\n assert_array_equal(dat4.loc[{\"dim2\": 1.5}], 1.0)\n assert_array_equal(dat4.loc[{\"dim2\": 2.0}], dat3.loc[{\"dim2\": 2.5}])\n unchanged = [1.0, 2.5, 3.0, 3.5, 4.0]\n assert_identical(\n dat4.loc[{\"dim2\": unchanged}], dat3.loc[{\"dim2\": unchanged}]\n )\n\n def test_setitem_pandas(self):\n\n ds = self.make_example_math_dataset()\n ds[\"x\"] = np.arange(3)\n ds_copy = ds.copy()\n ds_copy[\"bar\"] = ds[\"bar\"].to_pandas()\n\n assert_equal(ds, ds_copy)\n\n def test_setitem_auto_align(self):\n ds = Dataset()\n ds[\"x\"] = (\"y\", range(3))\n ds[\"y\"] = 1 + np.arange(3)\n expected = Dataset({\"x\": (\"y\", range(3)), \"y\": 1 + np.arange(3)})\n assert_identical(ds, expected)\n\n ds[\"y\"] = DataArray(range(3), dims=\"y\")\n expected = Dataset({\"x\": (\"y\", range(3))}, {\"y\": range(3)})\n assert_identical(ds, expected)\n\n ds[\"x\"] = DataArray([1, 2], coords=[(\"y\", [0, 1])])\n expected = Dataset({\"x\": (\"y\", [1, 2, np.nan])}, {\"y\": range(3)})\n assert_identical(ds, expected)\n\n ds[\"x\"] = 42\n expected = Dataset({\"x\": 42, \"y\": range(3)})\n assert_identical(ds, expected)\n\n ds[\"x\"] = DataArray([4, 5, 6, 7], coords=[(\"y\", [0, 1, 2, 3])])\n expected = Dataset({\"x\": (\"y\", [4, 5, 6])}, {\"y\": range(3)})\n assert_identical(ds, expected)\n\n def test_setitem_dimension_override(self):\n # regression test for GH-3377\n ds = xr.Dataset({\"x\": [0, 1, 2]})\n ds[\"x\"] = ds[\"x\"][:2]\n expected = Dataset({\"x\": [0, 1]})\n assert_identical(ds, expected)\n\n ds = xr.Dataset({\"x\": [0, 1, 2]})\n ds[\"x\"] = np.array([0, 1])\n assert_identical(ds, expected)\n\n ds = xr.Dataset({\"x\": [0, 1, 2]})\n ds.coords[\"x\"] = [0, 1]\n assert_identical(ds, expected)\n\n def test_setitem_with_coords(self):\n # Regression test for GH:2068\n ds = create_test_data()\n\n other = DataArray(\n np.arange(10), dims=\"dim3\", coords={\"numbers\": (\"dim3\", np.arange(10))}\n )\n expected = ds.copy()\n expected[\"var3\"] = other.drop_vars(\"numbers\")\n actual = ds.copy()\n actual[\"var3\"] = other\n assert_identical(expected, actual)\n assert \"numbers\" in other.coords # should not change other\n\n # with alignment\n other = ds[\"var3\"].isel(dim3=slice(1, -1))\n other[\"numbers\"] = (\"dim3\", np.arange(8))\n actual = ds.copy()\n actual[\"var3\"] = other\n assert \"numbers\" in other.coords # should not change other\n expected = ds.copy()\n expected[\"var3\"] = ds[\"var3\"].isel(dim3=slice(1, -1))\n assert_identical(expected, actual)\n\n # with non-duplicate coords\n other = ds[\"var3\"].isel(dim3=slice(1, -1))\n other[\"numbers\"] = (\"dim3\", np.arange(8))\n other[\"position\"] = (\"dim3\", np.arange(8))\n actual = ds.copy()\n actual[\"var3\"] = other\n assert \"position\" in actual\n assert \"position\" in other.coords\n\n # assigning a coordinate-only dataarray\n actual = ds.copy()\n other = actual[\"numbers\"]\n other[0] = 10\n actual[\"numbers\"] = other\n assert actual[\"numbers\"][0] == 10\n\n # GH: 2099\n ds = Dataset(\n {\"var\": (\"x\", [1, 2, 3])},\n coords={\"x\": [0, 1, 2], \"z1\": (\"x\", [1, 2, 3]), \"z2\": (\"x\", [1, 2, 3])},\n )\n ds[\"var\"] = ds[\"var\"] * 2\n assert np.allclose(ds[\"var\"], [2, 4, 6])\n\n def test_setitem_align_new_indexes(self):\n ds = Dataset({\"foo\": (\"x\", [1, 2, 3])}, {\"x\": [0, 1, 2]})\n ds[\"bar\"] = DataArray([2, 3, 4], [(\"x\", [1, 2, 3])])\n expected = Dataset(\n {\"foo\": (\"x\", [1, 2, 3]), \"bar\": (\"x\", [np.nan, 2, 3])}, {\"x\": [0, 1, 2]}\n )\n assert_identical(ds, expected)\n\n @pytest.mark.parametrize(\"dtype\", [str, bytes])\n def test_setitem_str_dtype(self, dtype):\n\n ds = xr.Dataset(coords={\"x\": np.array([\"x\", \"y\"], dtype=dtype)})\n ds[\"foo\"] = xr.DataArray(np.array([0, 0]), dims=[\"x\"])\n\n assert np.issubdtype(ds.x.dtype, dtype)\n\n def test_setitem_using_list(self):\n\n # assign a list of variables\n var1 = Variable([\"dim1\"], np.random.randn(8))\n var2 = Variable([\"dim1\"], np.random.randn(8))\n actual = create_test_data()\n expected = actual.copy()\n expected[\"A\"] = var1\n expected[\"B\"] = var2\n actual[[\"A\", \"B\"]] = [var1, var2]\n assert_identical(actual, expected)\n # assign a list of dataset arrays\n dv = 2 * expected[[\"A\", \"B\"]]\n actual[[\"C\", \"D\"]] = [d.variable for d in dv.data_vars.values()]\n expected[[\"C\", \"D\"]] = dv\n assert_identical(actual, expected)\n\n @pytest.mark.parametrize(\n \"var_list, data, error_regex\",\n [\n (\n [\"A\", \"B\"],\n [Variable([\"dim1\"], np.random.randn(8))],\n r\"Different lengths\",\n ),\n ([], [Variable([\"dim1\"], np.random.randn(8))], r\"Empty list of variables\"),\n ([\"A\", \"B\"], xr.DataArray([1, 2]), r\"assign single DataArray\"),\n ],\n )\n def test_setitem_using_list_errors(self, var_list, data, error_regex):\n actual = create_test_data()\n with pytest.raises(ValueError, match=error_regex):\n actual[var_list] = data\n\n def test_assign(self):\n ds = Dataset()\n actual = ds.assign(x=[0, 1, 2], y=2)\n expected = Dataset({\"x\": [0, 1, 2], \"y\": 2})\n assert_identical(actual, expected)\n assert list(actual.variables) == [\"x\", \"y\"]\n assert_identical(ds, Dataset())\n\n actual = actual.assign(y=lambda ds: ds.x ** 2)\n expected = Dataset({\"y\": (\"x\", [0, 1, 4]), \"x\": [0, 1, 2]})\n assert_identical(actual, expected)\n\n actual = actual.assign_coords(z=2)\n expected = Dataset({\"y\": (\"x\", [0, 1, 4])}, {\"z\": 2, \"x\": [0, 1, 2]})\n assert_identical(actual, expected)\n\n ds = Dataset({\"a\": (\"x\", range(3))}, {\"b\": (\"x\", [\"A\"] * 2 + [\"B\"])})\n actual = ds.groupby(\"b\").assign(c=lambda ds: 2 * ds.a)\n expected = ds.merge({\"c\": (\"x\", [0, 2, 4])})\n assert_identical(actual, expected)\n\n actual = ds.groupby(\"b\").assign(c=lambda ds: ds.a.sum())\n expected = ds.merge({\"c\": (\"x\", [1, 1, 2])})\n assert_identical(actual, expected)\n\n actual = ds.groupby(\"b\").assign_coords(c=lambda ds: ds.a.sum())\n expected = expected.set_coords(\"c\")\n assert_identical(actual, expected)\n\n def test_assign_coords(self):\n ds = Dataset()\n\n actual = ds.assign(x=[0, 1, 2], y=2)\n actual = actual.assign_coords(x=list(\"abc\"))\n expected = Dataset({\"x\": list(\"abc\"), \"y\": 2})\n assert_identical(actual, expected)\n\n actual = ds.assign(x=[0, 1, 2], y=[2, 3])\n actual = actual.assign_coords({\"y\": [2.0, 3.0]})\n expected = ds.assign(x=[0, 1, 2], y=[2.0, 3.0])\n assert_identical(actual, expected)\n\n def test_assign_attrs(self):\n expected = Dataset(attrs=dict(a=1, b=2))\n new = Dataset()\n actual = new.assign_attrs(a=1, b=2)\n assert_identical(actual, expected)\n assert new.attrs == {}\n\n expected.attrs[\"c\"] = 3\n new_actual = actual.assign_attrs({\"c\": 3})\n assert_identical(new_actual, expected)\n assert actual.attrs == dict(a=1, b=2)\n\n def test_assign_multiindex_level(self):\n data = create_test_multiindex()\n with pytest.raises(ValueError, match=r\"conflicting MultiIndex\"):\n data.assign(level_1=range(4))\n data.assign_coords(level_1=range(4))\n # raise an Error when any level name is used as dimension GH:2299\n with pytest.raises(ValueError):\n data[\"y\"] = (\"level_1\", [0, 1])\n\n def test_merge_multiindex_level(self):\n data = create_test_multiindex()\n other = Dataset({\"z\": (\"level_1\", [0, 1])}) # conflict dimension\n with pytest.raises(ValueError):\n data.merge(other)\n other = Dataset({\"level_1\": (\"x\", [0, 1])}) # conflict variable name\n with pytest.raises(ValueError):\n data.merge(other)\n\n def test_setitem_original_non_unique_index(self):\n # regression test for GH943\n original = Dataset({\"data\": (\"x\", np.arange(5))}, coords={\"x\": [0, 1, 2, 0, 1]})\n expected = Dataset({\"data\": (\"x\", np.arange(5))}, {\"x\": range(5)})\n\n actual = original.copy()\n actual[\"x\"] = list(range(5))\n assert_identical(actual, expected)\n\n actual = original.copy()\n actual[\"x\"] = (\"x\", list(range(5)))\n assert_identical(actual, expected)\n\n actual = original.copy()\n actual.coords[\"x\"] = list(range(5))\n assert_identical(actual, expected)\n\n def test_setitem_both_non_unique_index(self):\n # regression test for GH956\n names = [\"joaquin\", \"manolo\", \"joaquin\"]\n values = np.random.randint(0, 256, (3, 4, 4))\n array = DataArray(\n values, dims=[\"name\", \"row\", \"column\"], coords=[names, range(4), range(4)]\n )\n expected = Dataset({\"first\": array, \"second\": array})\n actual = array.rename(\"first\").to_dataset()\n actual[\"second\"] = array\n assert_identical(expected, actual)\n\n def test_setitem_multiindex_level(self):\n data = create_test_multiindex()\n with pytest.raises(ValueError, match=r\"conflicting MultiIndex\"):\n data[\"level_1\"] = range(4)\n\n def test_delitem(self):\n data = create_test_data()\n all_items = set(data.variables)\n assert set(data.variables) == all_items\n del data[\"var1\"]\n assert set(data.variables) == all_items - {\"var1\"}\n del data[\"numbers\"]\n assert set(data.variables) == all_items - {\"var1\", \"numbers\"}\n assert \"numbers\" not in data.coords\n\n expected = Dataset()\n actual = Dataset({\"y\": (\"x\", [1, 2])})\n del actual[\"y\"]\n assert_identical(expected, actual)\n\n def test_squeeze(self):\n data = Dataset({\"foo\": ([\"x\", \"y\", \"z\"], [[[1], [2]]])})\n for args in [[], [[\"x\"]], [[\"x\", \"z\"]]]:\n\n def get_args(v):\n return [set(args[0]) & set(v.dims)] if args else []\n\n expected = Dataset(\n {k: v.squeeze(*get_args(v)) for k, v in data.variables.items()}\n )\n expected = expected.set_coords(data.coords)\n assert_identical(expected, data.squeeze(*args))\n # invalid squeeze\n with pytest.raises(ValueError, match=r\"cannot select a dimension\"):\n data.squeeze(\"y\")\n\n def test_squeeze_drop(self):\n data = Dataset({\"foo\": (\"x\", [1])}, {\"x\": [0]})\n expected = Dataset({\"foo\": 1})\n selected = data.squeeze(drop=True)\n assert_identical(expected, selected)\n\n expected = Dataset({\"foo\": 1}, {\"x\": 0})\n selected = data.squeeze(drop=False)\n assert_identical(expected, selected)\n\n data = Dataset({\"foo\": ((\"x\", \"y\"), [[1]])}, {\"x\": [0], \"y\": [0]})\n expected = Dataset({\"foo\": 1})\n selected = data.squeeze(drop=True)\n assert_identical(expected, selected)\n\n expected = Dataset({\"foo\": (\"x\", [1])}, {\"x\": [0]})\n selected = data.squeeze(dim=\"y\", drop=True)\n assert_identical(expected, selected)\n\n data = Dataset({\"foo\": ((\"x\",), [])}, {\"x\": []})\n selected = data.squeeze(drop=True)\n assert_identical(data, selected)\n\n def test_groupby(self):\n data = Dataset(\n {\"z\": ([\"x\", \"y\"], np.random.randn(3, 5))},\n {\"x\": (\"x\", list(\"abc\")), \"c\": (\"x\", [0, 1, 0]), \"y\": range(5)},\n )\n groupby = data.groupby(\"x\")\n assert len(groupby) == 3\n expected_groups = {\"a\": 0, \"b\": 1, \"c\": 2}\n assert groupby.groups == expected_groups\n expected_items = [\n (\"a\", data.isel(x=0)),\n (\"b\", data.isel(x=1)),\n (\"c\", data.isel(x=2)),\n ]\n for actual, expected in zip(groupby, expected_items):\n assert actual[0] == expected[0]\n assert_equal(actual[1], expected[1])\n\n def identity(x):\n return x\n\n for k in [\"x\", \"c\", \"y\"]:\n actual = data.groupby(k, squeeze=False).map(identity)\n assert_equal(data, actual)\n\n def test_groupby_returns_new_type(self):\n data = Dataset({\"z\": ([\"x\", \"y\"], np.random.randn(3, 5))})\n\n actual = data.groupby(\"x\").map(lambda ds: ds[\"z\"])\n expected = data[\"z\"]\n assert_identical(expected, actual)\n\n actual = data[\"z\"].groupby(\"x\").map(lambda x: x.to_dataset())\n expected = data\n assert_identical(expected, actual)\n\n def test_groupby_iter(self):\n data = create_test_data()\n for n, (t, sub) in enumerate(list(data.groupby(\"dim1\"))[:3]):\n assert data[\"dim1\"][n] == t\n assert_equal(data[\"var1\"][n], sub[\"var1\"])\n assert_equal(data[\"var2\"][n], sub[\"var2\"])\n assert_equal(data[\"var3\"][:, n], sub[\"var3\"])\n\n def test_groupby_errors(self):\n data = create_test_data()\n with pytest.raises(TypeError, match=r\"`group` must be\"):\n data.groupby(np.arange(10))\n with pytest.raises(ValueError, match=r\"length does not match\"):\n data.groupby(data[\"dim1\"][:3])\n with pytest.raises(TypeError, match=r\"`group` must be\"):\n data.groupby(data.coords[\"dim1\"].to_index())\n\n def test_groupby_reduce(self):\n data = Dataset(\n {\n \"xy\": ([\"x\", \"y\"], np.random.randn(3, 4)),\n \"xonly\": (\"x\", np.random.randn(3)),\n \"yonly\": (\"y\", np.random.randn(4)),\n \"letters\": (\"y\", [\"a\", \"a\", \"b\", \"b\"]),\n }\n )\n\n expected = data.mean(\"y\")\n expected[\"yonly\"] = expected[\"yonly\"].variable.set_dims({\"x\": 3})\n actual = data.groupby(\"x\").mean(...)\n assert_allclose(expected, actual)\n\n actual = data.groupby(\"x\").mean(\"y\")\n assert_allclose(expected, actual)\n\n letters = data[\"letters\"]\n expected = Dataset(\n {\n \"xy\": data[\"xy\"].groupby(letters).mean(...),\n \"xonly\": (data[\"xonly\"].mean().variable.set_dims({\"letters\": 2})),\n \"yonly\": data[\"yonly\"].groupby(letters).mean(),\n }\n )\n actual = data.groupby(\"letters\").mean(...)\n assert_allclose(expected, actual)\n\n def test_groupby_math(self):\n def reorder_dims(x):\n return x.transpose(\"dim1\", \"dim2\", \"dim3\", \"time\")\n\n ds = create_test_data()\n ds[\"dim1\"] = ds[\"dim1\"]\n for squeeze in [True, False]:\n grouped = ds.groupby(\"dim1\", squeeze=squeeze)\n\n expected = reorder_dims(ds + ds.coords[\"dim1\"])\n actual = grouped + ds.coords[\"dim1\"]\n assert_identical(expected, reorder_dims(actual))\n\n actual = ds.coords[\"dim1\"] + grouped\n assert_identical(expected, reorder_dims(actual))\n\n ds2 = 2 * ds\n expected = reorder_dims(ds + ds2)\n actual = grouped + ds2\n assert_identical(expected, reorder_dims(actual))\n\n actual = ds2 + grouped\n assert_identical(expected, reorder_dims(actual))\n\n grouped = ds.groupby(\"numbers\")\n zeros = DataArray([0, 0, 0, 0], [(\"numbers\", range(4))])\n expected = (ds + Variable(\"dim3\", np.zeros(10))).transpose(\n \"dim3\", \"dim1\", \"dim2\", \"time\"\n )\n actual = grouped + zeros\n assert_equal(expected, actual)\n\n actual = zeros + grouped\n assert_equal(expected, actual)\n\n with pytest.raises(ValueError, match=r\"incompat.* grouped binary\"):\n grouped + ds\n with pytest.raises(ValueError, match=r\"incompat.* grouped binary\"):\n ds + grouped\n with pytest.raises(TypeError, match=r\"only support binary ops\"):\n grouped + 1\n with pytest.raises(TypeError, match=r\"only support binary ops\"):\n grouped + grouped\n with pytest.raises(TypeError, match=r\"in-place operations\"):\n ds += grouped\n\n ds = Dataset(\n {\n \"x\": (\"time\", np.arange(100)),\n \"time\": pd.date_range(\"2000-01-01\", periods=100),\n }\n )\n with pytest.raises(ValueError, match=r\"incompat.* grouped binary\"):\n ds + ds.groupby(\"time.month\")\n\n def test_groupby_math_virtual(self):\n ds = Dataset(\n {\"x\": (\"t\", [1, 2, 3])}, {\"t\": pd.date_range(\"20100101\", periods=3)}\n )\n grouped = ds.groupby(\"t.day\")\n actual = grouped - grouped.mean(...)\n expected = Dataset({\"x\": (\"t\", [0, 0, 0])}, ds[[\"t\", \"t.day\"]])\n assert_identical(actual, expected)\n\n def test_groupby_nan(self):\n # nan should be excluded from groupby\n ds = Dataset({\"foo\": (\"x\", [1, 2, 3, 4])}, {\"bar\": (\"x\", [1, 1, 2, np.nan])})\n actual = ds.groupby(\"bar\").mean(...)\n expected = Dataset({\"foo\": (\"bar\", [1.5, 3]), \"bar\": [1, 2]})\n assert_identical(actual, expected)\n\n def test_groupby_order(self):\n # groupby should preserve variables order\n ds = Dataset()\n for vn in [\"a\", \"b\", \"c\"]:\n ds[vn] = DataArray(np.arange(10), dims=[\"t\"])\n data_vars_ref = list(ds.data_vars.keys())\n ds = ds.groupby(\"t\").mean(...)\n data_vars = list(ds.data_vars.keys())\n assert data_vars == data_vars_ref\n # coords are now at the end of the list, so the test below fails\n # all_vars = list(ds.variables.keys())\n # all_vars_ref = list(ds.variables.keys())\n # self.assertEqual(all_vars, all_vars_ref)\n\n def test_resample_and_first(self):\n times = pd.date_range(\"2000-01-01\", freq=\"6H\", periods=10)\n ds = Dataset(\n {\n \"foo\": ([\"time\", \"x\", \"y\"], np.random.randn(10, 5, 3)),\n \"bar\": (\"time\", np.random.randn(10), {\"meta\": \"data\"}),\n \"time\": times,\n }\n )\n\n actual = ds.resample(time=\"1D\").first(keep_attrs=True)\n expected = ds.isel(time=[0, 4, 8])\n assert_identical(expected, actual)\n\n # upsampling\n expected_time = pd.date_range(\"2000-01-01\", freq=\"3H\", periods=19)\n expected = ds.reindex(time=expected_time)\n actual = ds.resample(time=\"3H\")\n for how in [\"mean\", \"sum\", \"first\", \"last\"]:\n method = getattr(actual, how)\n result = method()\n assert_equal(expected, result)\n for method in [np.mean]:\n result = actual.reduce(method)\n assert_equal(expected, result)\n\n def test_resample_min_count(self):\n times = pd.date_range(\"2000-01-01\", freq=\"6H\", periods=10)\n ds = Dataset(\n {\n \"foo\": ([\"time\", \"x\", \"y\"], np.random.randn(10, 5, 3)),\n \"bar\": (\"time\", np.random.randn(10), {\"meta\": \"data\"}),\n \"time\": times,\n }\n )\n # inject nan\n ds[\"foo\"] = xr.where(ds[\"foo\"] > 2.0, np.nan, ds[\"foo\"])\n\n actual = ds.resample(time=\"1D\").sum(min_count=1)\n expected = xr.concat(\n [\n ds.isel(time=slice(i * 4, (i + 1) * 4)).sum(\"time\", min_count=1)\n for i in range(3)\n ],\n dim=actual[\"time\"],\n )\n assert_equal(expected, actual)\n\n def test_resample_by_mean_with_keep_attrs(self):\n times = pd.date_range(\"2000-01-01\", freq=\"6H\", periods=10)\n ds = Dataset(\n {\n \"foo\": ([\"time\", \"x\", \"y\"], np.random.randn(10, 5, 3)),\n \"bar\": (\"time\", np.random.randn(10), {\"meta\": \"data\"}),\n \"time\": times,\n }\n )\n ds.attrs[\"dsmeta\"] = \"dsdata\"\n\n resampled_ds = ds.resample(time=\"1D\").mean(keep_attrs=True)\n actual = resampled_ds[\"bar\"].attrs\n expected = ds[\"bar\"].attrs\n assert expected == actual\n\n actual = resampled_ds.attrs\n expected = ds.attrs\n assert expected == actual\n\n with pytest.warns(\n UserWarning, match=\"Passing ``keep_attrs`` to ``resample`` has no effect.\"\n ):\n ds.resample(time=\"1D\", keep_attrs=True)\n\n def test_resample_loffset(self):\n times = pd.date_range(\"2000-01-01\", freq=\"6H\", periods=10)\n ds = Dataset(\n {\n \"foo\": ([\"time\", \"x\", \"y\"], np.random.randn(10, 5, 3)),\n \"bar\": (\"time\", np.random.randn(10), {\"meta\": \"data\"}),\n \"time\": times,\n }\n )\n ds.attrs[\"dsmeta\"] = \"dsdata\"\n\n # Our use of `loffset` may change if we align our API with pandas' changes.\n # ref https://github.com/pydata/xarray/pull/4537\n actual = ds.resample(time=\"24H\", loffset=\"-12H\").mean().bar\n expected_ = ds.bar.to_series().resample(\"24H\").mean()\n expected_.index += to_offset(\"-12H\")\n expected = DataArray.from_series(expected_)\n assert_identical(actual, expected)\n\n def test_resample_by_mean_discarding_attrs(self):\n times = pd.date_range(\"2000-01-01\", freq=\"6H\", periods=10)\n ds = Dataset(\n {\n \"foo\": ([\"time\", \"x\", \"y\"], np.random.randn(10, 5, 3)),\n \"bar\": (\"time\", np.random.randn(10), {\"meta\": \"data\"}),\n \"time\": times,\n }\n )\n ds.attrs[\"dsmeta\"] = \"dsdata\"\n\n resampled_ds = ds.resample(time=\"1D\").mean(keep_attrs=False)\n\n assert resampled_ds[\"bar\"].attrs == {}\n assert resampled_ds.attrs == {}\n\n def test_resample_by_last_discarding_attrs(self):\n times = pd.date_range(\"2000-01-01\", freq=\"6H\", periods=10)\n ds = Dataset(\n {\n \"foo\": ([\"time\", \"x\", \"y\"], np.random.randn(10, 5, 3)),\n \"bar\": (\"time\", np.random.randn(10), {\"meta\": \"data\"}),\n \"time\": times,\n }\n )\n ds.attrs[\"dsmeta\"] = \"dsdata\"\n\n resampled_ds = ds.resample(time=\"1D\").last(keep_attrs=False)\n\n assert resampled_ds[\"bar\"].attrs == {}\n assert resampled_ds.attrs == {}\n\n @requires_scipy\n def test_resample_drop_nondim_coords(self):\n xs = np.arange(6)\n ys = np.arange(3)\n times = pd.date_range(\"2000-01-01\", freq=\"6H\", periods=5)\n data = np.tile(np.arange(5), (6, 3, 1))\n xx, yy = np.meshgrid(xs * 5, ys * 2.5)\n tt = np.arange(len(times), dtype=int)\n array = DataArray(data, {\"time\": times, \"x\": xs, \"y\": ys}, (\"x\", \"y\", \"time\"))\n xcoord = DataArray(xx.T, {\"x\": xs, \"y\": ys}, (\"x\", \"y\"))\n ycoord = DataArray(yy.T, {\"x\": xs, \"y\": ys}, (\"x\", \"y\"))\n tcoord = DataArray(tt, {\"time\": times}, (\"time\",))\n ds = Dataset({\"data\": array, \"xc\": xcoord, \"yc\": ycoord, \"tc\": tcoord})\n ds = ds.set_coords([\"xc\", \"yc\", \"tc\"])\n\n # Re-sample\n actual = ds.resample(time=\"12H\").mean(\"time\")\n assert \"tc\" not in actual.coords\n\n # Up-sample - filling\n actual = ds.resample(time=\"1H\").ffill()\n assert \"tc\" not in actual.coords\n\n # Up-sample - interpolation\n actual = ds.resample(time=\"1H\").interpolate(\"linear\")\n assert \"tc\" not in actual.coords\n\n def test_resample_old_api(self):\n\n times = pd.date_range(\"2000-01-01\", freq=\"6H\", periods=10)\n ds = Dataset(\n {\n \"foo\": ([\"time\", \"x\", \"y\"], np.random.randn(10, 5, 3)),\n \"bar\": (\"time\", np.random.randn(10), {\"meta\": \"data\"}),\n \"time\": times,\n }\n )\n\n with pytest.raises(TypeError, match=r\"resample\\(\\) no longer supports\"):\n ds.resample(\"1D\", \"time\")\n\n with pytest.raises(TypeError, match=r\"resample\\(\\) no longer supports\"):\n ds.resample(\"1D\", dim=\"time\", how=\"mean\")\n\n with pytest.raises(TypeError, match=r\"resample\\(\\) no longer supports\"):\n ds.resample(\"1D\", dim=\"time\")\n\n def test_resample_ds_da_are_the_same(self):\n time = pd.date_range(\"2000-01-01\", freq=\"6H\", periods=365 * 4)\n ds = xr.Dataset(\n {\n \"foo\": ((\"time\", \"x\"), np.random.randn(365 * 4, 5)),\n \"time\": time,\n \"x\": np.arange(5),\n }\n )\n assert_identical(\n ds.resample(time=\"M\").mean()[\"foo\"], ds.foo.resample(time=\"M\").mean()\n )\n\n def test_ds_resample_apply_func_args(self):\n def func(arg1, arg2, arg3=0.0):\n return arg1.mean(\"time\") + arg2 + arg3\n\n times = pd.date_range(\"2000\", freq=\"D\", periods=3)\n ds = xr.Dataset({\"foo\": (\"time\", [1.0, 1.0, 1.0]), \"time\": times})\n expected = xr.Dataset({\"foo\": (\"time\", [3.0, 3.0, 3.0]), \"time\": times})\n actual = ds.resample(time=\"D\").map(func, args=(1.0,), arg3=1.0)\n assert_identical(expected, actual)\n\n def test_to_array(self):\n ds = Dataset(\n {\"a\": 1, \"b\": (\"x\", [1, 2, 3])},\n coords={\"c\": 42},\n attrs={\"Conventions\": \"None\"},\n )\n data = [[1, 1, 1], [1, 2, 3]]\n coords = {\"c\": 42, \"variable\": [\"a\", \"b\"]}\n dims = (\"variable\", \"x\")\n expected = DataArray(data, coords, dims, attrs=ds.attrs)\n actual = ds.to_array()\n assert_identical(expected, actual)\n\n actual = ds.to_array(\"abc\", name=\"foo\")\n expected = expected.rename({\"variable\": \"abc\"}).rename(\"foo\")\n assert_identical(expected, actual)\n\n def test_to_and_from_dataframe(self):\n x = np.random.randn(10)\n y = np.random.randn(10)\n t = list(\"abcdefghij\")\n ds = Dataset({\"a\": (\"t\", x), \"b\": (\"t\", y), \"t\": (\"t\", t)})\n expected = pd.DataFrame(\n np.array([x, y]).T, columns=[\"a\", \"b\"], index=pd.Index(t, name=\"t\")\n )\n actual = ds.to_dataframe()\n # use the .equals method to check all DataFrame metadata\n assert expected.equals(actual), (expected, actual)\n\n # verify coords are included\n actual = ds.set_coords(\"b\").to_dataframe()\n assert expected.equals(actual), (expected, actual)\n\n # check roundtrip\n assert_identical(ds, Dataset.from_dataframe(actual))\n\n # test a case with a MultiIndex\n w = np.random.randn(2, 3)\n ds = Dataset({\"w\": ((\"x\", \"y\"), w)})\n ds[\"y\"] = (\"y\", list(\"abc\"))\n exp_index = pd.MultiIndex.from_arrays(\n [[0, 0, 0, 1, 1, 1], [\"a\", \"b\", \"c\", \"a\", \"b\", \"c\"]], names=[\"x\", \"y\"]\n )\n expected = pd.DataFrame(w.reshape(-1), columns=[\"w\"], index=exp_index)\n actual = ds.to_dataframe()\n assert expected.equals(actual)\n\n # check roundtrip\n assert_identical(ds.assign_coords(x=[0, 1]), Dataset.from_dataframe(actual))\n\n # Check multiindex reordering\n new_order = [\"x\", \"y\"]\n actual = ds.to_dataframe(dim_order=new_order)\n assert expected.equals(actual)\n\n new_order = [\"y\", \"x\"]\n exp_index = pd.MultiIndex.from_arrays(\n [[\"a\", \"a\", \"b\", \"b\", \"c\", \"c\"], [0, 1, 0, 1, 0, 1]], names=[\"y\", \"x\"]\n )\n expected = pd.DataFrame(\n w.transpose().reshape(-1), columns=[\"w\"], index=exp_index\n )\n actual = ds.to_dataframe(dim_order=new_order)\n assert expected.equals(actual)\n\n invalid_order = [\"x\"]\n with pytest.raises(\n ValueError, match=\"does not match the set of dimensions of this\"\n ):\n ds.to_dataframe(dim_order=invalid_order)\n\n invalid_order = [\"x\", \"z\"]\n with pytest.raises(\n ValueError, match=\"does not match the set of dimensions of this\"\n ):\n ds.to_dataframe(dim_order=invalid_order)\n\n # check pathological cases\n df = pd.DataFrame([1])\n actual = Dataset.from_dataframe(df)\n expected = Dataset({0: (\"index\", [1])}, {\"index\": [0]})\n assert_identical(expected, actual)\n\n df = pd.DataFrame()\n actual = Dataset.from_dataframe(df)\n expected = Dataset(coords={\"index\": []})\n assert_identical(expected, actual)\n\n # GH697\n df = pd.DataFrame({\"A\": []})\n actual = Dataset.from_dataframe(df)\n expected = Dataset({\"A\": DataArray([], dims=(\"index\",))}, {\"index\": []})\n assert_identical(expected, actual)\n\n # regression test for GH278\n # use int64 to ensure consistent results for the pandas .equals method\n # on windows (which requires the same dtype)\n ds = Dataset({\"x\": pd.Index([\"bar\"]), \"a\": (\"y\", np.array([1], \"int64\"))}).isel(\n x=0\n )\n # use .loc to ensure consistent results on Python 3\n actual = ds.to_dataframe().loc[:, [\"a\", \"x\"]]\n expected = pd.DataFrame(\n [[1, \"bar\"]], index=pd.Index([0], name=\"y\"), columns=[\"a\", \"x\"]\n )\n assert expected.equals(actual), (expected, actual)\n\n ds = Dataset({\"x\": np.array([0], \"int64\"), \"y\": np.array([1], \"int64\")})\n actual = ds.to_dataframe()\n idx = pd.MultiIndex.from_arrays([[0], [1]], names=[\"x\", \"y\"])\n expected = pd.DataFrame([[]], index=idx)\n assert expected.equals(actual), (expected, actual)\n\n def test_from_dataframe_categorical(self):\n cat = pd.CategoricalDtype(\n categories=[\"foo\", \"bar\", \"baz\", \"qux\", \"quux\", \"corge\"]\n )\n i1 = pd.Series([\"foo\", \"bar\", \"foo\"], dtype=cat)\n i2 = pd.Series([\"bar\", \"bar\", \"baz\"], dtype=cat)\n\n df = pd.DataFrame({\"i1\": i1, \"i2\": i2, \"values\": [1, 2, 3]})\n ds = df.set_index(\"i1\").to_xarray()\n assert len(ds[\"i1\"]) == 3\n\n ds = df.set_index([\"i1\", \"i2\"]).to_xarray()\n assert len(ds[\"i1\"]) == 2\n assert len(ds[\"i2\"]) == 2\n\n @requires_sparse\n def test_from_dataframe_sparse(self):\n import sparse\n\n df_base = pd.DataFrame(\n {\"x\": range(10), \"y\": list(\"abcdefghij\"), \"z\": np.arange(0, 100, 10)}\n )\n\n ds_sparse = Dataset.from_dataframe(df_base.set_index(\"x\"), sparse=True)\n ds_dense = Dataset.from_dataframe(df_base.set_index(\"x\"), sparse=False)\n assert isinstance(ds_sparse[\"y\"].data, sparse.COO)\n assert isinstance(ds_sparse[\"z\"].data, sparse.COO)\n ds_sparse[\"y\"].data = ds_sparse[\"y\"].data.todense()\n ds_sparse[\"z\"].data = ds_sparse[\"z\"].data.todense()\n assert_identical(ds_dense, ds_sparse)\n\n ds_sparse = Dataset.from_dataframe(df_base.set_index([\"x\", \"y\"]), sparse=True)\n ds_dense = Dataset.from_dataframe(df_base.set_index([\"x\", \"y\"]), sparse=False)\n assert isinstance(ds_sparse[\"z\"].data, sparse.COO)\n ds_sparse[\"z\"].data = ds_sparse[\"z\"].data.todense()\n assert_identical(ds_dense, ds_sparse)\n\n def test_to_and_from_empty_dataframe(self):\n # GH697\n expected = pd.DataFrame({\"foo\": []})\n ds = Dataset.from_dataframe(expected)\n assert len(ds[\"foo\"]) == 0\n actual = ds.to_dataframe()\n assert len(actual) == 0\n assert expected.equals(actual)\n\n def test_from_dataframe_multiindex(self):\n index = pd.MultiIndex.from_product([[\"a\", \"b\"], [1, 2, 3]], names=[\"x\", \"y\"])\n df = pd.DataFrame({\"z\": np.arange(6)}, index=index)\n\n expected = Dataset(\n {\"z\": ((\"x\", \"y\"), [[0, 1, 2], [3, 4, 5]])},\n coords={\"x\": [\"a\", \"b\"], \"y\": [1, 2, 3]},\n )\n actual = Dataset.from_dataframe(df)\n assert_identical(actual, expected)\n\n df2 = df.iloc[[3, 2, 1, 0, 4, 5], :]\n actual = Dataset.from_dataframe(df2)\n assert_identical(actual, expected)\n\n df3 = df.iloc[:4, :]\n expected3 = Dataset(\n {\"z\": ((\"x\", \"y\"), [[0, 1, 2], [3, np.nan, np.nan]])},\n coords={\"x\": [\"a\", \"b\"], \"y\": [1, 2, 3]},\n )\n actual = Dataset.from_dataframe(df3)\n assert_identical(actual, expected3)\n\n df_nonunique = df.iloc[[0, 0], :]\n with pytest.raises(ValueError, match=r\"non-unique MultiIndex\"):\n Dataset.from_dataframe(df_nonunique)\n\n def test_from_dataframe_unsorted_levels(self):\n # regression test for GH-4186\n index = pd.MultiIndex(\n levels=[[\"b\", \"a\"], [\"foo\"]], codes=[[0, 1], [0, 0]], names=[\"lev1\", \"lev2\"]\n )\n df = pd.DataFrame({\"c1\": [0, 2], \"c2\": [1, 3]}, index=index)\n expected = Dataset(\n {\n \"c1\": ((\"lev1\", \"lev2\"), [[0], [2]]),\n \"c2\": ((\"lev1\", \"lev2\"), [[1], [3]]),\n },\n coords={\"lev1\": [\"b\", \"a\"], \"lev2\": [\"foo\"]},\n )\n actual = Dataset.from_dataframe(df)\n assert_identical(actual, expected)\n\n def test_from_dataframe_non_unique_columns(self):\n # regression test for GH449\n df = pd.DataFrame(np.zeros((2, 2)))\n df.columns = [\"foo\", \"foo\"]\n with pytest.raises(ValueError, match=r\"non-unique columns\"):\n Dataset.from_dataframe(df)\n\n def test_convert_dataframe_with_many_types_and_multiindex(self):\n # regression test for GH737\n df = pd.DataFrame(\n {\n \"a\": list(\"abc\"),\n \"b\": list(range(1, 4)),\n \"c\": np.arange(3, 6).astype(\"u1\"),\n \"d\": np.arange(4.0, 7.0, dtype=\"float64\"),\n \"e\": [True, False, True],\n \"f\": pd.Categorical(list(\"abc\")),\n \"g\": pd.date_range(\"20130101\", periods=3),\n \"h\": pd.date_range(\"20130101\", periods=3, tz=\"US/Eastern\"),\n }\n )\n df.index = pd.MultiIndex.from_product([[\"a\"], range(3)], names=[\"one\", \"two\"])\n roundtripped = Dataset.from_dataframe(df).to_dataframe()\n # we can't do perfectly, but we should be at least as faithful as\n # np.asarray\n expected = df.apply(np.asarray)\n assert roundtripped.equals(expected)\n\n def test_to_and_from_dict(self):\n # <xarray.Dataset>\n # Dimensions: (t: 10)\n # Coordinates:\n # * t (t) <U1 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j'\n # Data variables:\n # a (t) float64 0.6916 -1.056 -1.163 0.9792 -0.7865 ...\n # b (t) float64 1.32 0.1954 1.91 1.39 0.519 -0.2772 ...\n x = np.random.randn(10)\n y = np.random.randn(10)\n t = list(\"abcdefghij\")\n ds = Dataset({\"a\": (\"t\", x), \"b\": (\"t\", y), \"t\": (\"t\", t)})\n expected = {\n \"coords\": {\"t\": {\"dims\": (\"t\",), \"data\": t, \"attrs\": {}}},\n \"attrs\": {},\n \"dims\": {\"t\": 10},\n \"data_vars\": {\n \"a\": {\"dims\": (\"t\",), \"data\": x.tolist(), \"attrs\": {}},\n \"b\": {\"dims\": (\"t\",), \"data\": y.tolist(), \"attrs\": {}},\n },\n }\n\n actual = ds.to_dict()\n\n # check that they are identical\n assert expected == actual\n\n # check roundtrip\n assert_identical(ds, Dataset.from_dict(actual))\n\n # check the data=False option\n expected_no_data = expected.copy()\n del expected_no_data[\"coords\"][\"t\"][\"data\"]\n del expected_no_data[\"data_vars\"][\"a\"][\"data\"]\n del expected_no_data[\"data_vars\"][\"b\"][\"data\"]\n endiantype = \"<U1\" if sys.byteorder == \"little\" else \">U1\"\n expected_no_data[\"coords\"][\"t\"].update({\"dtype\": endiantype, \"shape\": (10,)})\n expected_no_data[\"data_vars\"][\"a\"].update({\"dtype\": \"float64\", \"shape\": (10,)})\n expected_no_data[\"data_vars\"][\"b\"].update({\"dtype\": \"float64\", \"shape\": (10,)})\n actual_no_data = ds.to_dict(data=False)\n assert expected_no_data == actual_no_data\n\n # verify coords are included roundtrip\n expected_ds = ds.set_coords(\"b\")\n actual = Dataset.from_dict(expected_ds.to_dict())\n\n assert_identical(expected_ds, actual)\n\n # test some incomplete dicts:\n # this one has no attrs field, the dims are strings, and x, y are\n # np.arrays\n\n d = {\n \"coords\": {\"t\": {\"dims\": \"t\", \"data\": t}},\n \"dims\": \"t\",\n \"data_vars\": {\"a\": {\"dims\": \"t\", \"data\": x}, \"b\": {\"dims\": \"t\", \"data\": y}},\n }\n assert_identical(ds, Dataset.from_dict(d))\n\n # this is kind of a flattened version with no coords, or data_vars\n d = {\n \"a\": {\"dims\": \"t\", \"data\": x},\n \"t\": {\"data\": t, \"dims\": \"t\"},\n \"b\": {\"dims\": \"t\", \"data\": y},\n }\n assert_identical(ds, Dataset.from_dict(d))\n\n # this one is missing some necessary information\n d = {\n \"a\": {\"data\": x},\n \"t\": {\"data\": t, \"dims\": \"t\"},\n \"b\": {\"dims\": \"t\", \"data\": y},\n }\n with pytest.raises(\n ValueError, match=r\"cannot convert dict without the key 'dims'\"\n ):\n Dataset.from_dict(d)\n\n def test_to_and_from_dict_with_time_dim(self):\n x = np.random.randn(10, 3)\n y = np.random.randn(10, 3)\n t = pd.date_range(\"20130101\", periods=10)\n lat = [77.7, 83.2, 76]\n ds = Dataset(\n {\n \"a\": ([\"t\", \"lat\"], x),\n \"b\": ([\"t\", \"lat\"], y),\n \"t\": (\"t\", t),\n \"lat\": (\"lat\", lat),\n }\n )\n roundtripped = Dataset.from_dict(ds.to_dict())\n assert_identical(ds, roundtripped)\n\n def test_to_and_from_dict_with_nan_nat(self):\n x = np.random.randn(10, 3)\n y = np.random.randn(10, 3)\n y[2] = np.nan\n t = pd.Series(pd.date_range(\"20130101\", periods=10))\n t[2] = np.nan\n\n lat = [77.7, 83.2, 76]\n ds = Dataset(\n {\n \"a\": ([\"t\", \"lat\"], x),\n \"b\": ([\"t\", \"lat\"], y),\n \"t\": (\"t\", t),\n \"lat\": (\"lat\", lat),\n }\n )\n roundtripped = Dataset.from_dict(ds.to_dict())\n assert_identical(ds, roundtripped)\n\n def test_to_dict_with_numpy_attrs(self):\n # this doesn't need to roundtrip\n x = np.random.randn(10)\n y = np.random.randn(10)\n t = list(\"abcdefghij\")\n attrs = {\n \"created\": np.float64(1998),\n \"coords\": np.array([37, -110.1, 100]),\n \"maintainer\": \"bar\",\n }\n ds = Dataset({\"a\": (\"t\", x, attrs), \"b\": (\"t\", y, attrs), \"t\": (\"t\", t)})\n expected_attrs = {\n \"created\": attrs[\"created\"].item(),\n \"coords\": attrs[\"coords\"].tolist(),\n \"maintainer\": \"bar\",\n }\n actual = ds.to_dict()\n\n # check that they are identical\n assert expected_attrs == actual[\"data_vars\"][\"a\"][\"attrs\"]\n\n def test_pickle(self):\n data = create_test_data()\n roundtripped = pickle.loads(pickle.dumps(data))\n assert_identical(data, roundtripped)\n # regression test for #167:\n assert data.dims == roundtripped.dims\n\n def test_lazy_load(self):\n store = InaccessibleVariableDataStore()\n create_test_data().dump_to_store(store)\n\n for decode_cf in [True, False]:\n ds = open_dataset(store, decode_cf=decode_cf)\n with pytest.raises(UnexpectedDataAccess):\n ds.load()\n with pytest.raises(UnexpectedDataAccess):\n ds[\"var1\"].values\n\n # these should not raise UnexpectedDataAccess:\n ds.isel(time=10)\n ds.isel(time=slice(10), dim1=[0]).isel(dim1=0, dim2=-1)\n\n def test_dropna(self):\n x = np.random.randn(4, 4)\n x[::2, 0] = np.nan\n y = np.random.randn(4)\n y[-1] = np.nan\n ds = Dataset({\"foo\": ((\"a\", \"b\"), x), \"bar\": ((\"b\", y))})\n\n expected = ds.isel(a=slice(1, None, 2))\n actual = ds.dropna(\"a\")\n assert_identical(actual, expected)\n\n expected = ds.isel(b=slice(1, 3))\n actual = ds.dropna(\"b\")\n assert_identical(actual, expected)\n\n actual = ds.dropna(\"b\", subset=[\"foo\", \"bar\"])\n assert_identical(actual, expected)\n\n expected = ds.isel(b=slice(1, None))\n actual = ds.dropna(\"b\", subset=[\"foo\"])\n assert_identical(actual, expected)\n\n expected = ds.isel(b=slice(3))\n actual = ds.dropna(\"b\", subset=[\"bar\"])\n assert_identical(actual, expected)\n\n actual = ds.dropna(\"a\", subset=[])\n assert_identical(actual, ds)\n\n actual = ds.dropna(\"a\", subset=[\"bar\"])\n assert_identical(actual, ds)\n\n actual = ds.dropna(\"a\", how=\"all\")\n assert_identical(actual, ds)\n\n actual = ds.dropna(\"b\", how=\"all\", subset=[\"bar\"])\n expected = ds.isel(b=[0, 1, 2])\n assert_identical(actual, expected)\n\n actual = ds.dropna(\"b\", thresh=1, subset=[\"bar\"])\n assert_identical(actual, expected)\n\n actual = ds.dropna(\"b\", thresh=2)\n assert_identical(actual, ds)\n\n actual = ds.dropna(\"b\", thresh=4)\n expected = ds.isel(b=[1, 2, 3])\n assert_identical(actual, expected)\n\n actual = ds.dropna(\"a\", thresh=3)\n expected = ds.isel(a=[1, 3])\n assert_identical(actual, ds)\n\n with pytest.raises(ValueError, match=r\"a single dataset dimension\"):\n ds.dropna(\"foo\")\n with pytest.raises(ValueError, match=r\"invalid how\"):\n ds.dropna(\"a\", how=\"somehow\")\n with pytest.raises(TypeError, match=r\"must specify how or thresh\"):\n ds.dropna(\"a\", how=None)\n\n def test_fillna(self):\n ds = Dataset({\"a\": (\"x\", [np.nan, 1, np.nan, 3])}, {\"x\": [0, 1, 2, 3]})\n\n # fill with -1\n actual = ds.fillna(-1)\n expected = Dataset({\"a\": (\"x\", [-1, 1, -1, 3])}, {\"x\": [0, 1, 2, 3]})\n assert_identical(expected, actual)\n\n actual = ds.fillna({\"a\": -1})\n assert_identical(expected, actual)\n\n other = Dataset({\"a\": -1})\n actual = ds.fillna(other)\n assert_identical(expected, actual)\n\n actual = ds.fillna({\"a\": other.a})\n assert_identical(expected, actual)\n\n # fill with range(4)\n b = DataArray(range(4), coords=[(\"x\", range(4))])\n actual = ds.fillna(b)\n expected = b.rename(\"a\").to_dataset()\n assert_identical(expected, actual)\n\n actual = ds.fillna(expected)\n assert_identical(expected, actual)\n\n actual = ds.fillna(range(4))\n assert_identical(expected, actual)\n\n actual = ds.fillna(b[:3])\n assert_identical(expected, actual)\n\n # okay to only include some data variables\n ds[\"b\"] = np.nan\n actual = ds.fillna({\"a\": -1})\n expected = Dataset(\n {\"a\": (\"x\", [-1, 1, -1, 3]), \"b\": np.nan}, {\"x\": [0, 1, 2, 3]}\n )\n assert_identical(expected, actual)\n\n # but new data variables is not okay\n with pytest.raises(ValueError, match=r\"must be contained\"):\n ds.fillna({\"x\": 0})\n\n # empty argument should be OK\n result = ds.fillna({})\n assert_identical(ds, result)\n\n result = ds.fillna(Dataset(coords={\"c\": 42}))\n expected = ds.assign_coords(c=42)\n assert_identical(expected, result)\n\n # groupby\n expected = Dataset({\"a\": (\"x\", range(4))}, {\"x\": [0, 1, 2, 3]})\n for target in [ds, expected]:\n target.coords[\"b\"] = (\"x\", [0, 0, 1, 1])\n actual = ds.groupby(\"b\").fillna(DataArray([0, 2], dims=\"b\"))\n assert_identical(expected, actual)\n\n actual = ds.groupby(\"b\").fillna(Dataset({\"a\": (\"b\", [0, 2])}))\n assert_identical(expected, actual)\n\n # attrs with groupby\n ds.attrs[\"attr\"] = \"ds\"\n ds.a.attrs[\"attr\"] = \"da\"\n actual = ds.groupby(\"b\").fillna(Dataset({\"a\": (\"b\", [0, 2])}))\n assert actual.attrs == ds.attrs\n assert actual.a.name == \"a\"\n assert actual.a.attrs == ds.a.attrs\n\n da = DataArray(range(5), name=\"a\", attrs={\"attr\": \"da\"})\n actual = da.fillna(1)\n assert actual.name == \"a\"\n assert actual.attrs == da.attrs\n\n ds = Dataset({\"a\": da}, attrs={\"attr\": \"ds\"})\n actual = ds.fillna({\"a\": 1})\n assert actual.attrs == ds.attrs\n assert actual.a.name == \"a\"\n assert actual.a.attrs == ds.a.attrs\n\n @pytest.mark.parametrize(\n \"func\", [lambda x: x.clip(0, 1), lambda x: np.float64(1.0) * x, np.abs, abs]\n )\n def test_propagate_attrs(self, func):\n\n da = DataArray(range(5), name=\"a\", attrs={\"attr\": \"da\"})\n ds = Dataset({\"a\": da}, attrs={\"attr\": \"ds\"})\n\n # test defaults\n assert func(ds).attrs == ds.attrs\n with set_options(keep_attrs=False):\n assert func(ds).attrs != ds.attrs\n assert func(ds).a.attrs != ds.a.attrs\n\n with set_options(keep_attrs=False):\n assert func(ds).attrs != ds.attrs\n assert func(ds).a.attrs != ds.a.attrs\n\n with set_options(keep_attrs=True):\n assert func(ds).attrs == ds.attrs\n assert func(ds).a.attrs == ds.a.attrs\n\n def test_where(self):\n ds = Dataset({\"a\": (\"x\", range(5))})\n expected = Dataset({\"a\": (\"x\", [np.nan, np.nan, 2, 3, 4])})\n actual = ds.where(ds > 1)\n assert_identical(expected, actual)\n\n actual = ds.where(ds.a > 1)\n assert_identical(expected, actual)\n\n actual = ds.where(ds.a.values > 1)\n assert_identical(expected, actual)\n\n actual = ds.where(True)\n assert_identical(ds, actual)\n\n expected = ds.copy(deep=True)\n expected[\"a\"].values = [np.nan] * 5\n actual = ds.where(False)\n assert_identical(expected, actual)\n\n # 2d\n ds = Dataset({\"a\": ((\"x\", \"y\"), [[0, 1], [2, 3]])})\n expected = Dataset({\"a\": ((\"x\", \"y\"), [[np.nan, 1], [2, 3]])})\n actual = ds.where(ds > 0)\n assert_identical(expected, actual)\n\n # groupby\n ds = Dataset({\"a\": (\"x\", range(5))}, {\"c\": (\"x\", [0, 0, 1, 1, 1])})\n cond = Dataset({\"a\": (\"c\", [True, False])})\n expected = ds.copy(deep=True)\n expected[\"a\"].values = [0, 1] + [np.nan] * 3\n actual = ds.groupby(\"c\").where(cond)\n assert_identical(expected, actual)\n\n # attrs with groupby\n ds.attrs[\"attr\"] = \"ds\"\n ds.a.attrs[\"attr\"] = \"da\"\n actual = ds.groupby(\"c\").where(cond)\n assert actual.attrs == ds.attrs\n assert actual.a.name == \"a\"\n assert actual.a.attrs == ds.a.attrs\n\n # attrs\n da = DataArray(range(5), name=\"a\", attrs={\"attr\": \"da\"})\n actual = da.where(da.values > 1)\n assert actual.name == \"a\"\n assert actual.attrs == da.attrs\n\n ds = Dataset({\"a\": da}, attrs={\"attr\": \"ds\"})\n actual = ds.where(ds > 0)\n assert actual.attrs == ds.attrs\n assert actual.a.name == \"a\"\n assert actual.a.attrs == ds.a.attrs\n\n # lambda\n ds = Dataset({\"a\": (\"x\", range(5))})\n expected = Dataset({\"a\": (\"x\", [np.nan, np.nan, 2, 3, 4])})\n actual = ds.where(lambda x: x > 1)\n assert_identical(expected, actual)\n\n def test_where_other(self):\n ds = Dataset({\"a\": (\"x\", range(5))}, {\"x\": range(5)})\n expected = Dataset({\"a\": (\"x\", [-1, -1, 2, 3, 4])}, {\"x\": range(5)})\n actual = ds.where(ds > 1, -1)\n assert_equal(expected, actual)\n assert actual.a.dtype == int\n\n actual = ds.where(lambda x: x > 1, -1)\n assert_equal(expected, actual)\n\n with pytest.raises(ValueError, match=r\"cannot set\"):\n ds.where(ds > 1, other=0, drop=True)\n\n with pytest.raises(ValueError, match=r\"indexes .* are not equal\"):\n ds.where(ds > 1, ds.isel(x=slice(3)))\n\n with pytest.raises(ValueError, match=r\"exact match required\"):\n ds.where(ds > 1, ds.assign(b=2))\n\n def test_where_drop(self):\n # if drop=True\n\n # 1d\n # data array case\n array = DataArray(range(5), coords=[range(5)], dims=[\"x\"])\n expected = DataArray(range(5)[2:], coords=[range(5)[2:]], dims=[\"x\"])\n actual = array.where(array > 1, drop=True)\n assert_identical(expected, actual)\n\n # dataset case\n ds = Dataset({\"a\": array})\n expected = Dataset({\"a\": expected})\n\n actual = ds.where(ds > 1, drop=True)\n assert_identical(expected, actual)\n\n actual = ds.where(ds.a > 1, drop=True)\n assert_identical(expected, actual)\n\n with pytest.raises(TypeError, match=r\"must be a\"):\n ds.where(np.arange(5) > 1, drop=True)\n\n # 1d with odd coordinates\n array = DataArray(\n np.array([2, 7, 1, 8, 3]), coords=[np.array([3, 1, 4, 5, 9])], dims=[\"x\"]\n )\n expected = DataArray(\n np.array([7, 8, 3]), coords=[np.array([1, 5, 9])], dims=[\"x\"]\n )\n actual = array.where(array > 2, drop=True)\n assert_identical(expected, actual)\n\n # 1d multiple variables\n ds = Dataset({\"a\": ((\"x\"), [0, 1, 2, 3]), \"b\": ((\"x\"), [4, 5, 6, 7])})\n expected = Dataset(\n {\"a\": ((\"x\"), [np.nan, 1, 2, 3]), \"b\": ((\"x\"), [4, 5, 6, np.nan])}\n )\n actual = ds.where((ds > 0) & (ds < 7), drop=True)\n assert_identical(expected, actual)\n\n # 2d\n ds = Dataset({\"a\": ((\"x\", \"y\"), [[0, 1], [2, 3]])})\n expected = Dataset({\"a\": ((\"x\", \"y\"), [[np.nan, 1], [2, 3]])})\n actual = ds.where(ds > 0, drop=True)\n assert_identical(expected, actual)\n\n # 2d with odd coordinates\n ds = Dataset(\n {\"a\": ((\"x\", \"y\"), [[0, 1], [2, 3]])},\n coords={\n \"x\": [4, 3],\n \"y\": [1, 2],\n \"z\": ([\"x\", \"y\"], [[np.e, np.pi], [np.pi * np.e, np.pi * 3]]),\n },\n )\n expected = Dataset(\n {\"a\": ((\"x\", \"y\"), [[3]])},\n coords={\"x\": [3], \"y\": [2], \"z\": ([\"x\", \"y\"], [[np.pi * 3]])},\n )\n actual = ds.where(ds > 2, drop=True)\n assert_identical(expected, actual)\n\n # 2d multiple variables\n ds = Dataset(\n {\"a\": ((\"x\", \"y\"), [[0, 1], [2, 3]]), \"b\": ((\"x\", \"y\"), [[4, 5], [6, 7]])}\n )\n expected = Dataset(\n {\n \"a\": ((\"x\", \"y\"), [[np.nan, 1], [2, 3]]),\n \"b\": ((\"x\", \"y\"), [[4, 5], [6, 7]]),\n }\n )\n actual = ds.where(ds > 0, drop=True)\n assert_identical(expected, actual)\n\n def test_where_drop_empty(self):\n # regression test for GH1341\n array = DataArray(np.random.rand(100, 10), dims=[\"nCells\", \"nVertLevels\"])\n mask = DataArray(np.zeros((100,), dtype=\"bool\"), dims=\"nCells\")\n actual = array.where(mask, drop=True)\n expected = DataArray(np.zeros((0, 10)), dims=[\"nCells\", \"nVertLevels\"])\n assert_identical(expected, actual)\n\n def test_where_drop_no_indexes(self):\n ds = Dataset({\"foo\": (\"x\", [0.0, 1.0])})\n expected = Dataset({\"foo\": (\"x\", [1.0])})\n actual = ds.where(ds == 1, drop=True)\n assert_identical(expected, actual)\n\n def test_reduce(self):\n data = create_test_data()\n\n assert len(data.mean().coords) == 0\n\n actual = data.max()\n expected = Dataset({k: v.max() for k, v in data.data_vars.items()})\n assert_equal(expected, actual)\n\n assert_equal(data.min(dim=[\"dim1\"]), data.min(dim=\"dim1\"))\n\n for reduct, expected in [\n (\"dim2\", [\"dim3\", \"time\", \"dim1\"]),\n ([\"dim2\", \"time\"], [\"dim3\", \"dim1\"]),\n ((\"dim2\", \"time\"), [\"dim3\", \"dim1\"]),\n ((), [\"dim2\", \"dim3\", \"time\", \"dim1\"]),\n ]:\n actual = list(data.min(dim=reduct).dims)\n assert actual == expected\n\n assert_equal(data.mean(dim=[]), data)\n\n with pytest.raises(ValueError):\n data.mean(axis=0)\n\n def test_reduce_coords(self):\n # regression test for GH1470\n data = xr.Dataset({\"a\": (\"x\", [1, 2, 3])}, coords={\"b\": 4})\n expected = xr.Dataset({\"a\": 2}, coords={\"b\": 4})\n actual = data.mean(\"x\")\n assert_identical(actual, expected)\n\n # should be consistent\n actual = data[\"a\"].mean(\"x\").to_dataset()\n assert_identical(actual, expected)\n\n def test_mean_uint_dtype(self):\n data = xr.Dataset(\n {\n \"a\": ((\"x\", \"y\"), np.arange(6).reshape(3, 2).astype(\"uint\")),\n \"b\": ((\"x\",), np.array([0.1, 0.2, np.nan])),\n }\n )\n actual = data.mean(\"x\", skipna=True)\n expected = xr.Dataset(\n {\"a\": data[\"a\"].mean(\"x\"), \"b\": data[\"b\"].mean(\"x\", skipna=True)}\n )\n assert_identical(actual, expected)\n\n def test_reduce_bad_dim(self):\n data = create_test_data()\n with pytest.raises(ValueError, match=r\"Dataset does not contain\"):\n data.mean(dim=\"bad_dim\")\n\n def test_reduce_cumsum(self):\n data = xr.Dataset(\n {\"a\": 1, \"b\": (\"x\", [1, 2]), \"c\": ((\"x\", \"y\"), [[np.nan, 3], [0, 4]])}\n )\n assert_identical(data.fillna(0), data.cumsum(\"y\"))\n\n expected = xr.Dataset(\n {\"a\": 1, \"b\": (\"x\", [1, 3]), \"c\": ((\"x\", \"y\"), [[0, 3], [0, 7]])}\n )\n assert_identical(expected, data.cumsum())\n\n @pytest.mark.parametrize(\n \"reduct, expected\",\n [\n (\"dim1\", [\"dim2\", \"dim3\", \"time\", \"dim1\"]),\n (\"dim2\", [\"dim3\", \"time\", \"dim1\", \"dim2\"]),\n (\"dim3\", [\"dim2\", \"time\", \"dim1\", \"dim3\"]),\n (\"time\", [\"dim2\", \"dim3\", \"dim1\"]),\n ],\n )\n @pytest.mark.parametrize(\"func\", [\"cumsum\", \"cumprod\"])\n def test_reduce_cumsum_test_dims(self, reduct, expected, func):\n data = create_test_data()\n with pytest.raises(ValueError, match=r\"Dataset does not contain\"):\n getattr(data, func)(dim=\"bad_dim\")\n\n # ensure dimensions are correct\n actual = getattr(data, func)(dim=reduct).dims\n assert list(actual) == expected\n\n def test_reduce_non_numeric(self):\n data1 = create_test_data(seed=44)\n data2 = create_test_data(seed=44)\n add_vars = {\"var4\": [\"dim1\", \"dim2\"]}\n for v, dims in sorted(add_vars.items()):\n size = tuple(data1.dims[d] for d in dims)\n data = np.random.randint(0, 100, size=size).astype(np.str_)\n data1[v] = (dims, data, {\"foo\": \"variable\"})\n\n assert \"var4\" not in data1.mean()\n assert_equal(data1.mean(), data2.mean())\n assert_equal(data1.mean(dim=\"dim1\"), data2.mean(dim=\"dim1\"))\n\n @pytest.mark.filterwarnings(\n \"ignore:Once the behaviour of DataArray:DeprecationWarning\"\n )\n def test_reduce_strings(self):\n expected = Dataset({\"x\": \"a\"})\n ds = Dataset({\"x\": (\"y\", [\"a\", \"b\"])})\n ds.coords[\"y\"] = [-10, 10]\n actual = ds.min()\n assert_identical(expected, actual)\n\n expected = Dataset({\"x\": \"b\"})\n actual = ds.max()\n assert_identical(expected, actual)\n\n expected = Dataset({\"x\": 0})\n actual = ds.argmin()\n assert_identical(expected, actual)\n\n expected = Dataset({\"x\": 1})\n actual = ds.argmax()\n assert_identical(expected, actual)\n\n expected = Dataset({\"x\": -10})\n actual = ds.idxmin()\n assert_identical(expected, actual)\n\n expected = Dataset({\"x\": 10})\n actual = ds.idxmax()\n assert_identical(expected, actual)\n\n expected = Dataset({\"x\": b\"a\"})\n ds = Dataset({\"x\": (\"y\", np.array([\"a\", \"b\"], \"S1\"))})\n actual = ds.min()\n assert_identical(expected, actual)\n\n expected = Dataset({\"x\": \"a\"})\n ds = Dataset({\"x\": (\"y\", np.array([\"a\", \"b\"], \"U1\"))})\n actual = ds.min()\n assert_identical(expected, actual)\n\n def test_reduce_dtypes(self):\n # regression test for GH342\n expected = Dataset({\"x\": 1})\n actual = Dataset({\"x\": True}).sum()\n assert_identical(expected, actual)\n\n # regression test for GH505\n expected = Dataset({\"x\": 3})\n actual = Dataset({\"x\": (\"y\", np.array([1, 2], \"uint16\"))}).sum()\n assert_identical(expected, actual)\n\n expected = Dataset({\"x\": 1 + 1j})\n actual = Dataset({\"x\": (\"y\", [1, 1j])}).sum()\n assert_identical(expected, actual)\n\n def test_reduce_keep_attrs(self):\n data = create_test_data()\n _attrs = {\"attr1\": \"value1\", \"attr2\": 2929}\n\n attrs = dict(_attrs)\n data.attrs = attrs\n\n # Test dropped attrs\n ds = data.mean()\n assert ds.attrs == {}\n for v in ds.data_vars.values():\n assert v.attrs == {}\n\n # Test kept attrs\n ds = data.mean(keep_attrs=True)\n assert ds.attrs == attrs\n for k, v in ds.data_vars.items():\n assert v.attrs == data[k].attrs\n\n @pytest.mark.filterwarnings(\n \"ignore:Once the behaviour of DataArray:DeprecationWarning\"\n )\n def test_reduce_argmin(self):\n # regression test for #205\n ds = Dataset({\"a\": (\"x\", [0, 1])})\n expected = Dataset({\"a\": ([], 0)})\n actual = ds.argmin()\n assert_identical(expected, actual)\n\n actual = ds.argmin(\"x\")\n assert_identical(expected, actual)\n\n def test_reduce_scalars(self):\n ds = Dataset({\"x\": (\"a\", [2, 2]), \"y\": 2, \"z\": (\"b\", [2])})\n expected = Dataset({\"x\": 0, \"y\": 0, \"z\": 0})\n actual = ds.var()\n assert_identical(expected, actual)\n\n expected = Dataset({\"x\": 0, \"y\": 0, \"z\": (\"b\", [0])})\n actual = ds.var(\"a\")\n assert_identical(expected, actual)\n\n def test_reduce_only_one_axis(self):\n def mean_only_one_axis(x, axis):\n if not isinstance(axis, integer_types):\n raise TypeError(\"non-integer axis\")\n return x.mean(axis)\n\n ds = Dataset({\"a\": ([\"x\", \"y\"], [[0, 1, 2, 3, 4]])})\n expected = Dataset({\"a\": (\"x\", [2])})\n actual = ds.reduce(mean_only_one_axis, \"y\")\n assert_identical(expected, actual)\n\n with pytest.raises(\n TypeError, match=r\"missing 1 required positional argument: 'axis'\"\n ):\n ds.reduce(mean_only_one_axis)\n\n def test_reduce_no_axis(self):\n def total_sum(x):\n return np.sum(x.flatten())\n\n ds = Dataset({\"a\": ([\"x\", \"y\"], [[0, 1, 2, 3, 4]])})\n expected = Dataset({\"a\": ((), 10)})\n actual = ds.reduce(total_sum)\n assert_identical(expected, actual)\n\n with pytest.raises(TypeError, match=r\"unexpected keyword argument 'axis'\"):\n ds.reduce(total_sum, dim=\"x\")\n\n def test_reduce_keepdims(self):\n ds = Dataset(\n {\"a\": ([\"x\", \"y\"], [[0, 1, 2, 3, 4]])},\n coords={\n \"y\": [0, 1, 2, 3, 4],\n \"x\": [0],\n \"lat\": ([\"x\", \"y\"], [[0, 1, 2, 3, 4]]),\n \"c\": -999.0,\n },\n )\n\n # Shape should match behaviour of numpy reductions with keepdims=True\n # Coordinates involved in the reduction should be removed\n actual = ds.mean(keepdims=True)\n expected = Dataset(\n {\"a\": ([\"x\", \"y\"], np.mean(ds.a, keepdims=True).data)}, coords={\"c\": ds.c}\n )\n assert_identical(expected, actual)\n\n actual = ds.mean(\"x\", keepdims=True)\n expected = Dataset(\n {\"a\": ([\"x\", \"y\"], np.mean(ds.a, axis=0, keepdims=True).data)},\n coords={\"y\": ds.y, \"c\": ds.c},\n )\n assert_identical(expected, actual)\n\n @pytest.mark.parametrize(\"skipna\", [True, False])\n @pytest.mark.parametrize(\"q\", [0.25, [0.50], [0.25, 0.75]])\n def test_quantile(self, q, skipna):\n ds = create_test_data(seed=123)\n\n for dim in [None, \"dim1\", [\"dim1\"]]:\n ds_quantile = ds.quantile(q, dim=dim, skipna=skipna)\n if is_scalar(q):\n assert \"quantile\" not in ds_quantile.dims\n else:\n assert \"quantile\" in ds_quantile.dims\n\n for var, dar in ds.data_vars.items():\n assert var in ds_quantile\n assert_identical(\n ds_quantile[var], dar.quantile(q, dim=dim, skipna=skipna)\n )\n dim = [\"dim1\", \"dim2\"]\n ds_quantile = ds.quantile(q, dim=dim, skipna=skipna)\n assert \"dim3\" in ds_quantile.dims\n assert all(d not in ds_quantile.dims for d in dim)\n\n @pytest.mark.parametrize(\"skipna\", [True, False])\n def test_quantile_skipna(self, skipna):\n q = 0.1\n dim = \"time\"\n ds = Dataset({\"a\": ([dim], np.arange(0, 11))})\n ds = ds.where(ds >= 1)\n\n result = ds.quantile(q=q, dim=dim, skipna=skipna)\n\n value = 1.9 if skipna else np.nan\n expected = Dataset({\"a\": value}, coords={\"quantile\": q})\n assert_identical(result, expected)\n\n @requires_bottleneck\n def test_rank(self):\n ds = create_test_data(seed=1234)\n # only ds.var3 depends on dim3\n z = ds.rank(\"dim3\")\n assert [\"var3\"] == list(z.data_vars)\n # same as dataarray version\n x = z.var3\n y = ds.var3.rank(\"dim3\")\n assert_equal(x, y)\n # coordinates stick\n assert list(z.coords) == list(ds.coords)\n assert list(x.coords) == list(y.coords)\n # invalid dim\n with pytest.raises(ValueError, match=r\"does not contain\"):\n x.rank(\"invalid_dim\")\n\n def test_count(self):\n ds = Dataset({\"x\": (\"a\", [np.nan, 1]), \"y\": 0, \"z\": np.nan})\n expected = Dataset({\"x\": 1, \"y\": 1, \"z\": 0})\n actual = ds.count()\n assert_identical(expected, actual)\n\n def test_map(self):\n data = create_test_data()\n data.attrs[\"foo\"] = \"bar\"\n\n assert_identical(data.map(np.mean), data.mean())\n\n expected = data.mean(keep_attrs=True)\n actual = data.map(lambda x: x.mean(keep_attrs=True), keep_attrs=True)\n assert_identical(expected, actual)\n\n assert_identical(data.map(lambda x: x, keep_attrs=True), data.drop_vars(\"time\"))\n\n def scale(x, multiple=1):\n return multiple * x\n\n actual = data.map(scale, multiple=2)\n assert_equal(actual[\"var1\"], 2 * data[\"var1\"])\n assert_identical(actual[\"numbers\"], data[\"numbers\"])\n\n actual = data.map(np.asarray)\n expected = data.drop_vars(\"time\") # time is not used on a data var\n assert_equal(expected, actual)\n\n def test_apply_pending_deprecated_map(self):\n data = create_test_data()\n data.attrs[\"foo\"] = \"bar\"\n\n with pytest.warns(PendingDeprecationWarning):\n assert_identical(data.apply(np.mean), data.mean())\n\n def make_example_math_dataset(self):\n variables = {\n \"bar\": (\"x\", np.arange(100, 400, 100)),\n \"foo\": ((\"x\", \"y\"), 1.0 * np.arange(12).reshape(3, 4)),\n }\n coords = {\"abc\": (\"x\", [\"a\", \"b\", \"c\"]), \"y\": 10 * np.arange(4)}\n ds = Dataset(variables, coords)\n ds[\"foo\"][0, 0] = np.nan\n return ds\n\n def test_dataset_number_math(self):\n ds = self.make_example_math_dataset()\n\n assert_identical(ds, +ds)\n assert_identical(ds, ds + 0)\n assert_identical(ds, 0 + ds)\n assert_identical(ds, ds + np.array(0))\n assert_identical(ds, np.array(0) + ds)\n\n actual = ds.copy(deep=True)\n actual += 0\n assert_identical(ds, actual)\n\n def test_unary_ops(self):\n ds = self.make_example_math_dataset()\n\n assert_identical(ds.map(abs), abs(ds))\n assert_identical(ds.map(lambda x: x + 4), ds + 4)\n\n for func in [\n lambda x: x.isnull(),\n lambda x: x.round(),\n lambda x: x.astype(int),\n ]:\n assert_identical(ds.map(func), func(ds))\n\n assert_identical(ds.isnull(), ~ds.notnull())\n\n # don't actually patch these methods in\n with pytest.raises(AttributeError):\n ds.item\n with pytest.raises(AttributeError):\n ds.searchsorted\n\n def test_dataset_array_math(self):\n ds = self.make_example_math_dataset()\n\n expected = ds.map(lambda x: x - ds[\"foo\"])\n assert_identical(expected, ds - ds[\"foo\"])\n assert_identical(expected, -ds[\"foo\"] + ds)\n assert_identical(expected, ds - ds[\"foo\"].variable)\n assert_identical(expected, -ds[\"foo\"].variable + ds)\n actual = ds.copy(deep=True)\n actual -= ds[\"foo\"]\n assert_identical(expected, actual)\n\n expected = ds.map(lambda x: x + ds[\"bar\"])\n assert_identical(expected, ds + ds[\"bar\"])\n actual = ds.copy(deep=True)\n actual += ds[\"bar\"]\n assert_identical(expected, actual)\n\n expected = Dataset({\"bar\": ds[\"bar\"] + np.arange(3)})\n assert_identical(expected, ds[[\"bar\"]] + np.arange(3))\n assert_identical(expected, np.arange(3) + ds[[\"bar\"]])\n\n def test_dataset_dataset_math(self):\n ds = self.make_example_math_dataset()\n\n assert_identical(ds, ds + 0 * ds)\n assert_identical(ds, ds + {\"foo\": 0, \"bar\": 0})\n\n expected = ds.map(lambda x: 2 * x)\n assert_identical(expected, 2 * ds)\n assert_identical(expected, ds + ds)\n assert_identical(expected, ds + ds.data_vars)\n assert_identical(expected, ds + dict(ds.data_vars))\n\n actual = ds.copy(deep=True)\n expected_id = id(actual)\n actual += ds\n assert_identical(expected, actual)\n assert expected_id == id(actual)\n\n assert_identical(ds == ds, ds.notnull())\n\n subsampled = ds.isel(y=slice(2))\n expected = 2 * subsampled\n assert_identical(expected, subsampled + ds)\n assert_identical(expected, ds + subsampled)\n\n def test_dataset_math_auto_align(self):\n ds = self.make_example_math_dataset()\n subset = ds.isel(y=[1, 3])\n expected = 2 * subset\n actual = ds + subset\n assert_identical(expected, actual)\n\n actual = ds.isel(y=slice(1)) + ds.isel(y=slice(1, None))\n expected = 2 * ds.drop_sel(y=ds.y)\n assert_equal(actual, expected)\n\n actual = ds + ds[[\"bar\"]]\n expected = (2 * ds[[\"bar\"]]).merge(ds.coords)\n assert_identical(expected, actual)\n\n assert_identical(ds + Dataset(), ds.coords.to_dataset())\n assert_identical(Dataset() + Dataset(), Dataset())\n\n ds2 = Dataset(coords={\"bar\": 42})\n assert_identical(ds + ds2, ds.coords.merge(ds2))\n\n # maybe unary arithmetic with empty datasets should raise instead?\n assert_identical(Dataset() + 1, Dataset())\n\n actual = ds.copy(deep=True)\n other = ds.isel(y=slice(2))\n actual += other\n expected = ds + other.reindex_like(ds)\n assert_identical(expected, actual)\n\n def test_dataset_math_errors(self):\n ds = self.make_example_math_dataset()\n\n with pytest.raises(TypeError):\n ds[\"foo\"] += ds\n with pytest.raises(TypeError):\n ds[\"foo\"].variable += ds\n with pytest.raises(ValueError, match=r\"must have the same\"):\n ds += ds[[\"bar\"]]\n\n # verify we can rollback in-place operations if something goes wrong\n # nb. inplace datetime64 math actually will work with an integer array\n # but not floats thanks to numpy's inconsistent handling\n other = DataArray(np.datetime64(\"2000-01-01\"), coords={\"c\": 2})\n actual = ds.copy(deep=True)\n with pytest.raises(TypeError):\n actual += other\n assert_identical(actual, ds)\n\n def test_dataset_transpose(self):\n ds = Dataset(\n {\n \"a\": ((\"x\", \"y\"), np.random.randn(3, 4)),\n \"b\": ((\"y\", \"x\"), np.random.randn(4, 3)),\n },\n coords={\n \"x\": range(3),\n \"y\": range(4),\n \"xy\": ((\"x\", \"y\"), np.random.randn(3, 4)),\n },\n )\n\n actual = ds.transpose()\n expected = Dataset(\n {\"a\": ((\"y\", \"x\"), ds.a.values.T), \"b\": ((\"x\", \"y\"), ds.b.values.T)},\n coords={\n \"x\": ds.x.values,\n \"y\": ds.y.values,\n \"xy\": ((\"y\", \"x\"), ds.xy.values.T),\n },\n )\n assert_identical(expected, actual)\n\n actual = ds.transpose(...)\n expected = ds\n assert_identical(expected, actual)\n\n actual = ds.transpose(\"x\", \"y\")\n expected = ds.map(lambda x: x.transpose(\"x\", \"y\", transpose_coords=True))\n assert_identical(expected, actual)\n\n ds = create_test_data()\n actual = ds.transpose()\n for k in ds.variables:\n assert actual[k].dims[::-1] == ds[k].dims\n\n new_order = (\"dim2\", \"dim3\", \"dim1\", \"time\")\n actual = ds.transpose(*new_order)\n for k in ds.variables:\n expected_dims = tuple(d for d in new_order if d in ds[k].dims)\n assert actual[k].dims == expected_dims\n\n # same as above but with ellipsis\n new_order = (\"dim2\", \"dim3\", \"dim1\", \"time\")\n actual = ds.transpose(\"dim2\", \"dim3\", ...)\n for k in ds.variables:\n expected_dims = tuple(d for d in new_order if d in ds[k].dims)\n assert actual[k].dims == expected_dims\n\n with pytest.raises(ValueError, match=r\"permuted\"):\n ds.transpose(\"dim1\", \"dim2\", \"dim3\")\n with pytest.raises(ValueError, match=r\"permuted\"):\n ds.transpose(\"dim1\", \"dim2\", \"dim3\", \"time\", \"extra_dim\")\n\n assert \"T\" not in dir(ds)\n\n def test_dataset_ellipsis_transpose_different_ordered_vars(self):\n # https://github.com/pydata/xarray/issues/1081#issuecomment-544350457\n ds = Dataset(\n dict(\n a=((\"w\", \"x\", \"y\", \"z\"), np.ones((2, 3, 4, 5))),\n b=((\"x\", \"w\", \"y\", \"z\"), np.zeros((3, 2, 4, 5))),\n )\n )\n result = ds.transpose(..., \"z\", \"y\")\n assert list(result[\"a\"].dims) == list(\"wxzy\")\n assert list(result[\"b\"].dims) == list(\"xwzy\")\n\n def test_dataset_retains_period_index_on_transpose(self):\n\n ds = create_test_data()\n ds[\"time\"] = pd.period_range(\"2000-01-01\", periods=20)\n\n transposed = ds.transpose()\n\n assert isinstance(transposed.time.to_index(), pd.PeriodIndex)\n\n def test_dataset_diff_n1_simple(self):\n ds = Dataset({\"foo\": (\"x\", [5, 5, 6, 6])})\n actual = ds.diff(\"x\")\n expected = Dataset({\"foo\": (\"x\", [0, 1, 0])})\n assert_equal(expected, actual)\n\n def test_dataset_diff_n1_label(self):\n ds = Dataset({\"foo\": (\"x\", [5, 5, 6, 6])}, {\"x\": [0, 1, 2, 3]})\n actual = ds.diff(\"x\", label=\"lower\")\n expected = Dataset({\"foo\": (\"x\", [0, 1, 0])}, {\"x\": [0, 1, 2]})\n assert_equal(expected, actual)\n\n actual = ds.diff(\"x\", label=\"upper\")\n expected = Dataset({\"foo\": (\"x\", [0, 1, 0])}, {\"x\": [1, 2, 3]})\n assert_equal(expected, actual)\n\n def test_dataset_diff_n1(self):\n ds = create_test_data(seed=1)\n actual = ds.diff(\"dim2\")\n expected = {}\n expected[\"var1\"] = DataArray(\n np.diff(ds[\"var1\"].values, axis=1),\n {\"dim2\": ds[\"dim2\"].values[1:]},\n [\"dim1\", \"dim2\"],\n )\n expected[\"var2\"] = DataArray(\n np.diff(ds[\"var2\"].values, axis=1),\n {\"dim2\": ds[\"dim2\"].values[1:]},\n [\"dim1\", \"dim2\"],\n )\n expected[\"var3\"] = ds[\"var3\"]\n expected = Dataset(expected, coords={\"time\": ds[\"time\"].values})\n expected.coords[\"numbers\"] = (\"dim3\", ds[\"numbers\"].values)\n assert_equal(expected, actual)\n\n def test_dataset_diff_n2(self):\n ds = create_test_data(seed=1)\n actual = ds.diff(\"dim2\", n=2)\n expected = {}\n expected[\"var1\"] = DataArray(\n np.diff(ds[\"var1\"].values, axis=1, n=2),\n {\"dim2\": ds[\"dim2\"].values[2:]},\n [\"dim1\", \"dim2\"],\n )\n expected[\"var2\"] = DataArray(\n np.diff(ds[\"var2\"].values, axis=1, n=2),\n {\"dim2\": ds[\"dim2\"].values[2:]},\n [\"dim1\", \"dim2\"],\n )\n expected[\"var3\"] = ds[\"var3\"]\n expected = Dataset(expected, coords={\"time\": ds[\"time\"].values})\n expected.coords[\"numbers\"] = (\"dim3\", ds[\"numbers\"].values)\n assert_equal(expected, actual)\n\n def test_dataset_diff_exception_n_neg(self):\n ds = create_test_data(seed=1)\n with pytest.raises(ValueError, match=r\"must be non-negative\"):\n ds.diff(\"dim2\", n=-1)\n\n def test_dataset_diff_exception_label_str(self):\n ds = create_test_data(seed=1)\n with pytest.raises(ValueError, match=r\"'label' argument has to\"):\n ds.diff(\"dim2\", label=\"raise_me\")\n\n @pytest.mark.parametrize(\"fill_value\", [dtypes.NA, 2, 2.0, {\"foo\": -10}])\n def test_shift(self, fill_value):\n coords = {\"bar\": (\"x\", list(\"abc\")), \"x\": [-4, 3, 2]}\n attrs = {\"meta\": \"data\"}\n ds = Dataset({\"foo\": (\"x\", [1, 2, 3])}, coords, attrs)\n actual = ds.shift(x=1, fill_value=fill_value)\n if fill_value == dtypes.NA:\n # if we supply the default, we expect the missing value for a\n # float array\n fill_value = np.nan\n elif isinstance(fill_value, dict):\n fill_value = fill_value.get(\"foo\", np.nan)\n expected = Dataset({\"foo\": (\"x\", [fill_value, 1, 2])}, coords, attrs)\n assert_identical(expected, actual)\n\n with pytest.raises(ValueError, match=r\"dimensions\"):\n ds.shift(foo=123)\n\n def test_roll_coords(self):\n coords = {\"bar\": (\"x\", list(\"abc\")), \"x\": [-4, 3, 2]}\n attrs = {\"meta\": \"data\"}\n ds = Dataset({\"foo\": (\"x\", [1, 2, 3])}, coords, attrs)\n actual = ds.roll(x=1, roll_coords=True)\n\n ex_coords = {\"bar\": (\"x\", list(\"cab\")), \"x\": [2, -4, 3]}\n expected = Dataset({\"foo\": (\"x\", [3, 1, 2])}, ex_coords, attrs)\n assert_identical(expected, actual)\n\n with pytest.raises(ValueError, match=r\"dimensions\"):\n ds.roll(foo=123, roll_coords=True)\n\n def test_roll_no_coords(self):\n coords = {\"bar\": (\"x\", list(\"abc\")), \"x\": [-4, 3, 2]}\n attrs = {\"meta\": \"data\"}\n ds = Dataset({\"foo\": (\"x\", [1, 2, 3])}, coords, attrs)\n actual = ds.roll(x=1, roll_coords=False)\n\n expected = Dataset({\"foo\": (\"x\", [3, 1, 2])}, coords, attrs)\n assert_identical(expected, actual)\n\n with pytest.raises(ValueError, match=r\"dimensions\"):\n ds.roll(abc=321, roll_coords=False)\n\n def test_roll_coords_none(self):\n coords = {\"bar\": (\"x\", list(\"abc\")), \"x\": [-4, 3, 2]}\n attrs = {\"meta\": \"data\"}\n ds = Dataset({\"foo\": (\"x\", [1, 2, 3])}, coords, attrs)\n\n with pytest.warns(FutureWarning):\n actual = ds.roll(x=1, roll_coords=None)\n\n ex_coords = {\"bar\": (\"x\", list(\"cab\")), \"x\": [2, -4, 3]}\n expected = Dataset({\"foo\": (\"x\", [3, 1, 2])}, ex_coords, attrs)\n assert_identical(expected, actual)\n\n def test_roll_multidim(self):\n # regression test for 2445\n arr = xr.DataArray(\n [[1, 2, 3], [4, 5, 6]],\n coords={\"x\": range(3), \"y\": range(2)},\n dims=(\"y\", \"x\"),\n )\n actual = arr.roll(x=1, roll_coords=True)\n expected = xr.DataArray(\n [[3, 1, 2], [6, 4, 5]], coords=[(\"y\", [0, 1]), (\"x\", [2, 0, 1])]\n )\n assert_identical(expected, actual)\n\n def test_real_and_imag(self):\n attrs = {\"foo\": \"bar\"}\n ds = Dataset({\"x\": ((), 1 + 2j, attrs)}, attrs=attrs)\n\n expected_re = Dataset({\"x\": ((), 1, attrs)}, attrs=attrs)\n assert_identical(ds.real, expected_re)\n\n expected_im = Dataset({\"x\": ((), 2, attrs)}, attrs=attrs)\n assert_identical(ds.imag, expected_im)\n\n def test_setattr_raises(self):\n ds = Dataset({}, coords={\"scalar\": 1}, attrs={\"foo\": \"bar\"})\n with pytest.raises(AttributeError, match=r\"cannot set attr\"):\n ds.scalar = 2\n with pytest.raises(AttributeError, match=r\"cannot set attr\"):\n ds.foo = 2\n with pytest.raises(AttributeError, match=r\"cannot set attr\"):\n ds.other = 2\n\n def test_filter_by_attrs(self):\n precip = dict(standard_name=\"convective_precipitation_flux\")\n temp0 = dict(standard_name=\"air_potential_temperature\", height=\"0 m\")\n temp10 = dict(standard_name=\"air_potential_temperature\", height=\"10 m\")\n ds = Dataset(\n {\n \"temperature_0\": ([\"t\"], [0], temp0),\n \"temperature_10\": ([\"t\"], [0], temp10),\n \"precipitation\": ([\"t\"], [0], precip),\n },\n coords={\"time\": ([\"t\"], [0], dict(axis=\"T\", long_name=\"time_in_seconds\"))},\n )\n\n # Test return empty Dataset.\n ds.filter_by_attrs(standard_name=\"invalid_standard_name\")\n new_ds = ds.filter_by_attrs(standard_name=\"invalid_standard_name\")\n assert not bool(new_ds.data_vars)\n\n # Test return one DataArray.\n new_ds = ds.filter_by_attrs(standard_name=\"convective_precipitation_flux\")\n assert new_ds[\"precipitation\"].standard_name == \"convective_precipitation_flux\"\n\n assert_equal(new_ds[\"precipitation\"], ds[\"precipitation\"])\n\n # Test filter coordinates\n new_ds = ds.filter_by_attrs(long_name=\"time_in_seconds\")\n assert new_ds[\"time\"].long_name == \"time_in_seconds\"\n assert not bool(new_ds.data_vars)\n\n # Test return more than one DataArray.\n new_ds = ds.filter_by_attrs(standard_name=\"air_potential_temperature\")\n assert len(new_ds.data_vars) == 2\n for var in new_ds.data_vars:\n assert new_ds[var].standard_name == \"air_potential_temperature\"\n\n # Test callable.\n new_ds = ds.filter_by_attrs(height=lambda v: v is not None)\n assert len(new_ds.data_vars) == 2\n for var in new_ds.data_vars:\n assert new_ds[var].standard_name == \"air_potential_temperature\"\n\n new_ds = ds.filter_by_attrs(height=\"10 m\")\n assert len(new_ds.data_vars) == 1\n for var in new_ds.data_vars:\n assert new_ds[var].height == \"10 m\"\n\n # Test return empty Dataset due to conflicting filters\n new_ds = ds.filter_by_attrs(\n standard_name=\"convective_precipitation_flux\", height=\"0 m\"\n )\n assert not bool(new_ds.data_vars)\n\n # Test return one DataArray with two filter conditions\n new_ds = ds.filter_by_attrs(\n standard_name=\"air_potential_temperature\", height=\"0 m\"\n )\n for var in new_ds.data_vars:\n assert new_ds[var].standard_name == \"air_potential_temperature\"\n assert new_ds[var].height == \"0 m\"\n assert new_ds[var].height != \"10 m\"\n\n # Test return empty Dataset due to conflicting callables\n new_ds = ds.filter_by_attrs(\n standard_name=lambda v: False, height=lambda v: True\n )\n assert not bool(new_ds.data_vars)\n\n def test_binary_op_propagate_indexes(self):\n ds = Dataset(\n {\"d1\": DataArray([1, 2, 3], dims=[\"x\"], coords={\"x\": [10, 20, 30]})}\n )\n expected = ds.xindexes[\"x\"]\n actual = (ds * 2).xindexes[\"x\"]\n assert expected is actual\n\n def test_binary_op_join_setting(self):\n # arithmetic_join applies to data array coordinates\n missing_2 = xr.Dataset({\"x\": [0, 1]})\n missing_0 = xr.Dataset({\"x\": [1, 2]})\n with xr.set_options(arithmetic_join=\"outer\"):\n actual = missing_2 + missing_0\n expected = xr.Dataset({\"x\": [0, 1, 2]})\n assert_equal(actual, expected)\n\n # arithmetic join also applies to data_vars\n ds1 = xr.Dataset({\"foo\": 1, \"bar\": 2})\n ds2 = xr.Dataset({\"bar\": 2, \"baz\": 3})\n expected = xr.Dataset({\"bar\": 4}) # default is inner joining\n actual = ds1 + ds2\n assert_equal(actual, expected)\n\n with xr.set_options(arithmetic_join=\"outer\"):\n expected = xr.Dataset({\"foo\": np.nan, \"bar\": 4, \"baz\": np.nan})\n actual = ds1 + ds2\n assert_equal(actual, expected)\n\n with xr.set_options(arithmetic_join=\"left\"):\n expected = xr.Dataset({\"foo\": np.nan, \"bar\": 4})\n actual = ds1 + ds2\n assert_equal(actual, expected)\n\n with xr.set_options(arithmetic_join=\"right\"):\n expected = xr.Dataset({\"bar\": 4, \"baz\": np.nan})\n actual = ds1 + ds2\n assert_equal(actual, expected)\n\n def test_full_like(self):\n # For more thorough tests, see test_variable.py\n # Note: testing data_vars with mismatched dtypes\n ds = Dataset(\n {\n \"d1\": DataArray([1, 2, 3], dims=[\"x\"], coords={\"x\": [10, 20, 30]}),\n \"d2\": DataArray([1.1, 2.2, 3.3], dims=[\"y\"]),\n },\n attrs={\"foo\": \"bar\"},\n )\n actual = full_like(ds, 2)\n\n expected = ds.copy(deep=True)\n expected[\"d1\"].values = [2, 2, 2]\n expected[\"d2\"].values = [2.0, 2.0, 2.0]\n assert expected[\"d1\"].dtype == int\n assert expected[\"d2\"].dtype == float\n assert_identical(expected, actual)\n\n # override dtype\n actual = full_like(ds, fill_value=True, dtype=bool)\n expected = ds.copy(deep=True)\n expected[\"d1\"].values = [True, True, True]\n expected[\"d2\"].values = [True, True, True]\n assert expected[\"d1\"].dtype == bool\n assert expected[\"d2\"].dtype == bool\n assert_identical(expected, actual)\n\n # with multiple fill values\n actual = full_like(ds, {\"d1\": 1, \"d2\": 2.3})\n expected = ds.assign(d1=(\"x\", [1, 1, 1]), d2=(\"y\", [2.3, 2.3, 2.3]))\n assert expected[\"d1\"].dtype == int\n assert expected[\"d2\"].dtype == float\n assert_identical(expected, actual)\n\n # override multiple dtypes\n actual = full_like(ds, fill_value={\"d1\": 1, \"d2\": 2.3}, dtype={\"d1\": bool})\n expected = ds.assign(d1=(\"x\", [True, True, True]), d2=(\"y\", [2.3, 2.3, 2.3]))\n assert expected[\"d1\"].dtype == bool\n assert expected[\"d2\"].dtype == float\n assert_identical(expected, actual)\n\n def test_combine_first(self):\n dsx0 = DataArray([0, 0], [(\"x\", [\"a\", \"b\"])]).to_dataset(name=\"dsx0\")\n dsx1 = DataArray([1, 1], [(\"x\", [\"b\", \"c\"])]).to_dataset(name=\"dsx1\")\n\n actual = dsx0.combine_first(dsx1)\n expected = Dataset(\n {\"dsx0\": (\"x\", [0, 0, np.nan]), \"dsx1\": (\"x\", [np.nan, 1, 1])},\n coords={\"x\": [\"a\", \"b\", \"c\"]},\n )\n assert_equal(actual, expected)\n assert_equal(actual, xr.merge([dsx0, dsx1]))\n\n # works just like xr.merge([self, other])\n dsy2 = DataArray([2, 2, 2], [(\"x\", [\"b\", \"c\", \"d\"])]).to_dataset(name=\"dsy2\")\n actual = dsx0.combine_first(dsy2)\n expected = xr.merge([dsy2, dsx0])\n assert_equal(actual, expected)\n\n def test_sortby(self):\n ds = Dataset(\n {\n \"A\": DataArray(\n [[1, 2], [3, 4], [5, 6]], [(\"x\", [\"c\", \"b\", \"a\"]), (\"y\", [1, 0])]\n ),\n \"B\": DataArray([[5, 6], [7, 8], [9, 10]], dims=[\"x\", \"y\"]),\n }\n )\n\n sorted1d = Dataset(\n {\n \"A\": DataArray(\n [[5, 6], [3, 4], [1, 2]], [(\"x\", [\"a\", \"b\", \"c\"]), (\"y\", [1, 0])]\n ),\n \"B\": DataArray([[9, 10], [7, 8], [5, 6]], dims=[\"x\", \"y\"]),\n }\n )\n\n sorted2d = Dataset(\n {\n \"A\": DataArray(\n [[6, 5], [4, 3], [2, 1]], [(\"x\", [\"a\", \"b\", \"c\"]), (\"y\", [0, 1])]\n ),\n \"B\": DataArray([[10, 9], [8, 7], [6, 5]], dims=[\"x\", \"y\"]),\n }\n )\n\n expected = sorted1d\n dax = DataArray([100, 99, 98], [(\"x\", [\"c\", \"b\", \"a\"])])\n actual = ds.sortby(dax)\n assert_equal(actual, expected)\n\n # test descending order sort\n actual = ds.sortby(dax, ascending=False)\n assert_equal(actual, ds)\n\n # test alignment (fills in nan for 'c')\n dax_short = DataArray([98, 97], [(\"x\", [\"b\", \"a\"])])\n actual = ds.sortby(dax_short)\n assert_equal(actual, expected)\n\n # test 1-D lexsort\n # dax0 is sorted first to give indices of [1, 2, 0]\n # and then dax1 would be used to move index 2 ahead of 1\n dax0 = DataArray([100, 95, 95], [(\"x\", [\"c\", \"b\", \"a\"])])\n dax1 = DataArray([0, 1, 0], [(\"x\", [\"c\", \"b\", \"a\"])])\n actual = ds.sortby([dax0, dax1]) # lexsort underneath gives [2, 1, 0]\n assert_equal(actual, expected)\n\n expected = sorted2d\n # test multi-dim sort by 1D dataarray values\n day = DataArray([90, 80], [(\"y\", [1, 0])])\n actual = ds.sortby([day, dax])\n assert_equal(actual, expected)\n\n # test exception-raising\n with pytest.raises(KeyError) as excinfo:\n actual = ds.sortby(\"z\")\n\n with pytest.raises(ValueError) as excinfo:\n actual = ds.sortby(ds[\"A\"])\n assert \"DataArray is not 1-D\" in str(excinfo.value)\n\n expected = sorted1d\n actual = ds.sortby(\"x\")\n assert_equal(actual, expected)\n\n # test pandas.MultiIndex\n indices = ((\"b\", 1), (\"b\", 0), (\"a\", 1), (\"a\", 0))\n midx = pd.MultiIndex.from_tuples(indices, names=[\"one\", \"two\"])\n ds_midx = Dataset(\n {\n \"A\": DataArray(\n [[1, 2], [3, 4], [5, 6], [7, 8]], [(\"x\", midx), (\"y\", [1, 0])]\n ),\n \"B\": DataArray([[5, 6], [7, 8], [9, 10], [11, 12]], dims=[\"x\", \"y\"]),\n }\n )\n actual = ds_midx.sortby(\"x\")\n midx_reversed = pd.MultiIndex.from_tuples(\n tuple(reversed(indices)), names=[\"one\", \"two\"]\n )\n expected = Dataset(\n {\n \"A\": DataArray(\n [[7, 8], [5, 6], [3, 4], [1, 2]],\n [(\"x\", midx_reversed), (\"y\", [1, 0])],\n ),\n \"B\": DataArray([[11, 12], [9, 10], [7, 8], [5, 6]], dims=[\"x\", \"y\"]),\n }\n )\n assert_equal(actual, expected)\n\n # multi-dim sort by coordinate objects\n expected = sorted2d\n actual = ds.sortby([\"x\", \"y\"])\n assert_equal(actual, expected)\n\n # test descending order sort\n actual = ds.sortby([\"x\", \"y\"], ascending=False)\n assert_equal(actual, ds)\n\n def test_attribute_access(self):\n ds = create_test_data(seed=1)\n for key in [\"var1\", \"var2\", \"var3\", \"time\", \"dim1\", \"dim2\", \"dim3\", \"numbers\"]:\n assert_equal(ds[key], getattr(ds, key))\n assert key in dir(ds)\n\n for key in [\"dim3\", \"dim1\", \"numbers\"]:\n assert_equal(ds[\"var3\"][key], getattr(ds.var3, key))\n assert key in dir(ds[\"var3\"])\n # attrs\n assert ds[\"var3\"].attrs[\"foo\"] == ds.var3.foo\n assert \"foo\" in dir(ds[\"var3\"])\n\n def test_ipython_key_completion(self):\n ds = create_test_data(seed=1)\n actual = ds._ipython_key_completions_()\n expected = [\"var1\", \"var2\", \"var3\", \"time\", \"dim1\", \"dim2\", \"dim3\", \"numbers\"]\n for item in actual:\n ds[item] # should not raise\n assert sorted(actual) == sorted(expected)\n\n # for dataarray\n actual = ds[\"var3\"]._ipython_key_completions_()\n expected = [\"dim3\", \"dim1\", \"numbers\"]\n for item in actual:\n ds[\"var3\"][item] # should not raise\n assert sorted(actual) == sorted(expected)\n\n # MultiIndex\n ds_midx = ds.stack(dim12=[\"dim1\", \"dim2\"])\n actual = ds_midx._ipython_key_completions_()\n expected = [\n \"var1\",\n \"var2\",\n \"var3\",\n \"time\",\n \"dim1\",\n \"dim2\",\n \"dim3\",\n \"numbers\",\n \"dim12\",\n ]\n for item in actual:\n ds_midx[item] # should not raise\n assert sorted(actual) == sorted(expected)\n\n # coords\n actual = ds.coords._ipython_key_completions_()\n expected = [\"time\", \"dim1\", \"dim2\", \"dim3\", \"numbers\"]\n for item in actual:\n ds.coords[item] # should not raise\n assert sorted(actual) == sorted(expected)\n\n actual = ds[\"var3\"].coords._ipython_key_completions_()\n expected = [\"dim1\", \"dim3\", \"numbers\"]\n for item in actual:\n ds[\"var3\"].coords[item] # should not raise\n assert sorted(actual) == sorted(expected)\n\n # data_vars\n actual = ds.data_vars._ipython_key_completions_()\n expected = [\"var1\", \"var2\", \"var3\", \"dim1\"]\n for item in actual:\n ds.data_vars[item] # should not raise\n assert sorted(actual) == sorted(expected)\n\n def test_polyfit_output(self):\n ds = create_test_data(seed=1)\n\n out = ds.polyfit(\"dim2\", 2, full=False)\n assert \"var1_polyfit_coefficients\" in out\n\n out = ds.polyfit(\"dim1\", 2, full=True)\n assert \"var1_polyfit_coefficients\" in out\n assert \"dim1_matrix_rank\" in out\n\n out = ds.polyfit(\"time\", 2)\n assert len(out.data_vars) == 0\n\n def test_polyfit_warnings(self):\n ds = create_test_data(seed=1)\n\n with warnings.catch_warnings(record=True) as ws:\n ds.var1.polyfit(\"dim2\", 10, full=False)\n assert len(ws) == 1\n assert ws[0].category == np.RankWarning\n ds.var1.polyfit(\"dim2\", 10, full=True)\n assert len(ws) == 1\n\n def test_pad(self):\n ds = create_test_data(seed=1)\n padded = ds.pad(dim2=(1, 1), constant_values=42)\n\n assert padded[\"dim2\"].shape == (11,)\n assert padded[\"var1\"].shape == (8, 11)\n assert padded[\"var2\"].shape == (8, 11)\n assert padded[\"var3\"].shape == (10, 8)\n assert dict(padded.dims) == {\"dim1\": 8, \"dim2\": 11, \"dim3\": 10, \"time\": 20}\n\n np.testing.assert_equal(padded[\"var1\"].isel(dim2=[0, -1]).data, 42)\n np.testing.assert_equal(padded[\"dim2\"][[0, -1]].data, np.nan)\n\n def test_astype_attrs(self):\n data = create_test_data(seed=123)\n data.attrs[\"foo\"] = \"bar\"\n\n assert data.attrs == data.astype(float).attrs\n assert data.var1.attrs == data.astype(float).var1.attrs\n assert not data.astype(float, keep_attrs=False).attrs\n assert not data.astype(float, keep_attrs=False).var1.attrs\n\n @pytest.mark.parametrize(\"parser\", [\"pandas\", \"python\"])\n @pytest.mark.parametrize(\n \"engine\", [\"python\", None, pytest.param(\"numexpr\", marks=[requires_numexpr])]\n )\n @pytest.mark.parametrize(\n \"backend\", [\"numpy\", pytest.param(\"dask\", marks=[requires_dask])]\n )\n def test_query(self, backend, engine, parser):\n \"\"\"Test querying a dataset.\"\"\"\n\n # setup test data\n np.random.seed(42)\n a = np.arange(0, 10, 1)\n b = np.random.randint(0, 100, size=10)\n c = np.linspace(0, 1, 20)\n d = np.random.choice([\"foo\", \"bar\", \"baz\"], size=30, replace=True).astype(\n object\n )\n e = np.arange(0, 10 * 20).reshape(10, 20)\n f = np.random.normal(0, 1, size=(10, 20, 30))\n if backend == \"numpy\":\n ds = Dataset(\n {\n \"a\": (\"x\", a),\n \"b\": (\"x\", b),\n \"c\": (\"y\", c),\n \"d\": (\"z\", d),\n \"e\": ((\"x\", \"y\"), e),\n \"f\": ((\"x\", \"y\", \"z\"), f),\n }\n )\n elif backend == \"dask\":\n ds = Dataset(\n {\n \"a\": (\"x\", da.from_array(a, chunks=3)),\n \"b\": (\"x\", da.from_array(b, chunks=3)),\n \"c\": (\"y\", da.from_array(c, chunks=7)),\n \"d\": (\"z\", da.from_array(d, chunks=12)),\n \"e\": ((\"x\", \"y\"), da.from_array(e, chunks=(3, 7))),\n \"f\": ((\"x\", \"y\", \"z\"), da.from_array(f, chunks=(3, 7, 12))),\n }\n )\n\n # query single dim, single variable\n actual = ds.query(x=\"a > 5\", engine=engine, parser=parser)\n expect = ds.isel(x=(a > 5))\n assert_identical(expect, actual)\n\n # query single dim, single variable, via dict\n actual = ds.query(dict(x=\"a > 5\"), engine=engine, parser=parser)\n expect = ds.isel(dict(x=(a > 5)))\n assert_identical(expect, actual)\n\n # query single dim, single variable\n actual = ds.query(x=\"b > 50\", engine=engine, parser=parser)\n expect = ds.isel(x=(b > 50))\n assert_identical(expect, actual)\n\n # query single dim, single variable\n actual = ds.query(y=\"c < .5\", engine=engine, parser=parser)\n expect = ds.isel(y=(c < 0.5))\n assert_identical(expect, actual)\n\n # query single dim, single string variable\n if parser == \"pandas\":\n # N.B., this query currently only works with the pandas parser\n # xref https://github.com/pandas-dev/pandas/issues/40436\n actual = ds.query(z='d == \"bar\"', engine=engine, parser=parser)\n expect = ds.isel(z=(d == \"bar\"))\n assert_identical(expect, actual)\n\n # query single dim, multiple variables\n actual = ds.query(x=\"(a > 5) & (b > 50)\", engine=engine, parser=parser)\n expect = ds.isel(x=((a > 5) & (b > 50)))\n assert_identical(expect, actual)\n\n # query single dim, multiple variables with computation\n actual = ds.query(x=\"(a * b) > 250\", engine=engine, parser=parser)\n expect = ds.isel(x=(a * b) > 250)\n assert_identical(expect, actual)\n\n # check pandas query syntax is supported\n if parser == \"pandas\":\n actual = ds.query(x=\"(a > 5) and (b > 50)\", engine=engine, parser=parser)\n expect = ds.isel(x=((a > 5) & (b > 50)))\n assert_identical(expect, actual)\n\n # query multiple dims via kwargs\n actual = ds.query(x=\"a > 5\", y=\"c < .5\", engine=engine, parser=parser)\n expect = ds.isel(x=(a > 5), y=(c < 0.5))\n assert_identical(expect, actual)\n\n # query multiple dims via kwargs\n if parser == \"pandas\":\n actual = ds.query(\n x=\"a > 5\", y=\"c < .5\", z=\"d == 'bar'\", engine=engine, parser=parser\n )\n expect = ds.isel(x=(a > 5), y=(c < 0.5), z=(d == \"bar\"))\n assert_identical(expect, actual)\n\n # query multiple dims via dict\n actual = ds.query(dict(x=\"a > 5\", y=\"c < .5\"), engine=engine, parser=parser)\n expect = ds.isel(dict(x=(a > 5), y=(c < 0.5)))\n assert_identical(expect, actual)\n\n # query multiple dims via dict\n if parser == \"pandas\":\n actual = ds.query(\n dict(x=\"a > 5\", y=\"c < .5\", z=\"d == 'bar'\"),\n engine=engine,\n parser=parser,\n )\n expect = ds.isel(dict(x=(a > 5), y=(c < 0.5), z=(d == \"bar\")))\n assert_identical(expect, actual)\n\n # test error handling\n with pytest.raises(ValueError):\n ds.query(\"a > 5\") # must be dict or kwargs\n with pytest.raises(ValueError):\n ds.query(x=(a > 5)) # must be query string\n with pytest.raises(IndexError):\n ds.query(y=\"a > 5\") # wrong length dimension\n with pytest.raises(IndexError):\n ds.query(x=\"c < .5\") # wrong length dimension\n with pytest.raises(IndexError):\n ds.query(x=\"e > 100\") # wrong number of dimensions\n with pytest.raises(UndefinedVariableError):\n ds.query(x=\"spam > 50\") # name not present\n\n\n# Py.test tests\n\n\[email protected](params=[None])\ndef data_set(request):\n return create_test_data(request.param)\n\n\[email protected](\"test_elements\", ([1, 2], np.array([1, 2]), DataArray([1, 2])))\ndef test_isin(test_elements):\n expected = Dataset(\n data_vars={\n \"var1\": ((\"dim1\",), [0, 1]),\n \"var2\": ((\"dim1\",), [1, 1]),\n \"var3\": ((\"dim1\",), [0, 1]),\n }\n ).astype(\"bool\")\n\n result = Dataset(\n data_vars={\n \"var1\": ((\"dim1\",), [0, 1]),\n \"var2\": ((\"dim1\",), [1, 2]),\n \"var3\": ((\"dim1\",), [0, 1]),\n }\n ).isin(test_elements)\n\n assert_equal(result, expected)\n\n\[email protected](not has_dask, reason=\"requires dask\")\[email protected](\"test_elements\", ([1, 2], np.array([1, 2]), DataArray([1, 2])))\ndef test_isin_dask(test_elements):\n expected = Dataset(\n data_vars={\n \"var1\": ((\"dim1\",), [0, 1]),\n \"var2\": ((\"dim1\",), [1, 1]),\n \"var3\": ((\"dim1\",), [0, 1]),\n }\n ).astype(\"bool\")\n\n result = (\n Dataset(\n data_vars={\n \"var1\": ((\"dim1\",), [0, 1]),\n \"var2\": ((\"dim1\",), [1, 2]),\n \"var3\": ((\"dim1\",), [0, 1]),\n }\n )\n .chunk(1)\n .isin(test_elements)\n .compute()\n )\n\n assert_equal(result, expected)\n\n\ndef test_isin_dataset():\n ds = Dataset({\"x\": [1, 2]})\n with pytest.raises(TypeError):\n ds.isin(ds)\n\n\[email protected](\n \"unaligned_coords\",\n (\n {\"x\": [2, 1, 0]},\n {\"x\": ([\"x\"], np.asarray([2, 1, 0]))},\n {\"x\": ([\"x\"], np.asarray([1, 2, 0]))},\n {\"x\": pd.Index([2, 1, 0])},\n {\"x\": Variable(dims=\"x\", data=[0, 2, 1])},\n {\"x\": IndexVariable(dims=\"x\", data=[0, 1, 2])},\n {\"y\": 42},\n {\"y\": (\"x\", [2, 1, 0])},\n {\"y\": (\"x\", np.asarray([2, 1, 0]))},\n {\"y\": ([\"x\"], np.asarray([2, 1, 0]))},\n ),\n)\[email protected](\"coords\", ({\"x\": (\"x\", [0, 1, 2])}, {\"x\": [0, 1, 2]}))\ndef test_dataset_constructor_aligns_to_explicit_coords(unaligned_coords, coords):\n\n a = xr.DataArray([1, 2, 3], dims=[\"x\"], coords=unaligned_coords)\n\n expected = xr.Dataset(coords=coords)\n expected[\"a\"] = a\n\n result = xr.Dataset({\"a\": a}, coords=coords)\n\n assert_equal(expected, result)\n\n\ndef test_error_message_on_set_supplied():\n with pytest.raises(TypeError, match=\"has invalid type <class 'set'>\"):\n xr.Dataset(dict(date=[1, 2, 3], sec={4}))\n\n\[email protected](\"unaligned_coords\", ({\"y\": (\"b\", np.asarray([2, 1, 0]))},))\ndef test_constructor_raises_with_invalid_coords(unaligned_coords):\n\n with pytest.raises(ValueError, match=\"not a subset of the DataArray dimensions\"):\n xr.DataArray([1, 2, 3], dims=[\"x\"], coords=unaligned_coords)\n\n\ndef test_dir_expected_attrs(data_set):\n\n some_expected_attrs = {\"pipe\", \"mean\", \"isnull\", \"var1\", \"dim2\", \"numbers\"}\n result = dir(data_set)\n assert set(result) >= some_expected_attrs\n\n\ndef test_dir_non_string(data_set):\n # add a numbered key to ensure this doesn't break dir\n data_set[5] = \"foo\"\n result = dir(data_set)\n assert 5 not in result\n\n # GH2172\n sample_data = np.random.uniform(size=[2, 2000, 10000])\n x = xr.Dataset({\"sample_data\": (sample_data.shape, sample_data)})\n x2 = x[\"sample_data\"]\n dir(x2)\n\n\ndef test_dir_unicode(data_set):\n data_set[\"unicode\"] = \"uni\"\n result = dir(data_set)\n assert \"unicode\" in result\n\n\[email protected](params=[1])\ndef ds(request):\n if request.param == 1:\n return Dataset(\n dict(\n z1=([\"y\", \"x\"], np.random.randn(2, 8)),\n z2=([\"time\", \"y\"], np.random.randn(10, 2)),\n ),\n dict(\n x=(\"x\", np.linspace(0, 1.0, 8)),\n time=(\"time\", np.linspace(0, 1.0, 10)),\n c=(\"y\", [\"a\", \"b\"]),\n y=range(2),\n ),\n )\n\n if request.param == 2:\n return Dataset(\n {\n \"z1\": ([\"time\", \"y\"], np.random.randn(10, 2)),\n \"z2\": ([\"time\"], np.random.randn(10)),\n \"z3\": ([\"x\", \"time\"], np.random.randn(8, 10)),\n },\n {\n \"x\": (\"x\", np.linspace(0, 1.0, 8)),\n \"time\": (\"time\", np.linspace(0, 1.0, 10)),\n \"c\": (\"y\", [\"a\", \"b\"]),\n \"y\": range(2),\n },\n )\n\n\ndef test_coarsen_absent_dims_error(ds):\n with pytest.raises(ValueError, match=r\"not found in Dataset.\"):\n ds.coarsen(foo=2)\n\n\[email protected](\"dask\", [True, False])\[email protected]((\"boundary\", \"side\"), [(\"trim\", \"left\"), (\"pad\", \"right\")])\ndef test_coarsen(ds, dask, boundary, side):\n if dask and has_dask:\n ds = ds.chunk({\"x\": 4})\n\n actual = ds.coarsen(time=2, x=3, boundary=boundary, side=side).max()\n assert_equal(\n actual[\"z1\"], ds[\"z1\"].coarsen(x=3, boundary=boundary, side=side).max()\n )\n # coordinate should be mean by default\n assert_equal(\n actual[\"time\"], ds[\"time\"].coarsen(time=2, boundary=boundary, side=side).mean()\n )\n\n\[email protected](\"dask\", [True, False])\ndef test_coarsen_coords(ds, dask):\n if dask and has_dask:\n ds = ds.chunk({\"x\": 4})\n\n # check if coord_func works\n actual = ds.coarsen(time=2, x=3, boundary=\"trim\", coord_func={\"time\": \"max\"}).max()\n assert_equal(actual[\"z1\"], ds[\"z1\"].coarsen(x=3, boundary=\"trim\").max())\n assert_equal(actual[\"time\"], ds[\"time\"].coarsen(time=2, boundary=\"trim\").max())\n\n # raise if exact\n with pytest.raises(ValueError):\n ds.coarsen(x=3).mean()\n # should be no error\n ds.isel(x=slice(0, 3 * (len(ds[\"x\"]) // 3))).coarsen(x=3).mean()\n\n # working test with pd.time\n da = xr.DataArray(\n np.linspace(0, 365, num=364),\n dims=\"time\",\n coords={\"time\": pd.date_range(\"15/12/1999\", periods=364)},\n )\n actual = da.coarsen(time=2).mean()\n\n\n@requires_cftime\ndef test_coarsen_coords_cftime():\n times = xr.cftime_range(\"2000\", periods=6)\n da = xr.DataArray(range(6), [(\"time\", times)])\n actual = da.coarsen(time=3).mean()\n expected_times = xr.cftime_range(\"2000-01-02\", freq=\"3D\", periods=2)\n np.testing.assert_array_equal(actual.time, expected_times)\n\n\[email protected](\n \"funcname, argument\",\n [\n (\"reduce\", (np.mean,)),\n (\"mean\", ()),\n ],\n)\ndef test_coarsen_keep_attrs(funcname, argument):\n global_attrs = {\"units\": \"test\", \"long_name\": \"testing\"}\n da_attrs = {\"da_attr\": \"test\"}\n attrs_coords = {\"attrs_coords\": \"test\"}\n da_not_coarsend_attrs = {\"da_not_coarsend_attr\": \"test\"}\n\n data = np.linspace(10, 15, 100)\n coords = np.linspace(1, 10, 100)\n\n ds = Dataset(\n data_vars={\n \"da\": (\"coord\", data, da_attrs),\n \"da_not_coarsend\": (\"no_coord\", data, da_not_coarsend_attrs),\n },\n coords={\"coord\": (\"coord\", coords, attrs_coords)},\n attrs=global_attrs,\n )\n\n # attrs are now kept per default\n func = getattr(ds.coarsen(dim={\"coord\": 5}), funcname)\n result = func(*argument)\n assert result.attrs == global_attrs\n assert result.da.attrs == da_attrs\n assert result.da_not_coarsend.attrs == da_not_coarsend_attrs\n assert result.coord.attrs == attrs_coords\n assert result.da.name == \"da\"\n assert result.da_not_coarsend.name == \"da_not_coarsend\"\n\n # discard attrs\n func = getattr(ds.coarsen(dim={\"coord\": 5}), funcname)\n result = func(*argument, keep_attrs=False)\n assert result.attrs == {}\n assert result.da.attrs == {}\n assert result.da_not_coarsend.attrs == {}\n assert result.coord.attrs == {}\n assert result.da.name == \"da\"\n assert result.da_not_coarsend.name == \"da_not_coarsend\"\n\n # test discard attrs using global option\n func = getattr(ds.coarsen(dim={\"coord\": 5}), funcname)\n with set_options(keep_attrs=False):\n result = func(*argument)\n\n assert result.attrs == {}\n assert result.da.attrs == {}\n assert result.da_not_coarsend.attrs == {}\n assert result.coord.attrs == {}\n assert result.da.name == \"da\"\n assert result.da_not_coarsend.name == \"da_not_coarsend\"\n\n # keyword takes precedence over global option\n func = getattr(ds.coarsen(dim={\"coord\": 5}), funcname)\n with set_options(keep_attrs=False):\n result = func(*argument, keep_attrs=True)\n\n assert result.attrs == global_attrs\n assert result.da.attrs == da_attrs\n assert result.da_not_coarsend.attrs == da_not_coarsend_attrs\n assert result.coord.attrs == attrs_coords\n assert result.da.name == \"da\"\n assert result.da_not_coarsend.name == \"da_not_coarsend\"\n\n func = getattr(ds.coarsen(dim={\"coord\": 5}), funcname)\n with set_options(keep_attrs=True):\n result = func(*argument, keep_attrs=False)\n\n assert result.attrs == {}\n assert result.da.attrs == {}\n assert result.da_not_coarsend.attrs == {}\n assert result.coord.attrs == {}\n assert result.da.name == \"da\"\n assert result.da_not_coarsend.name == \"da_not_coarsend\"\n\n\ndef test_coarsen_keep_attrs_deprecated():\n global_attrs = {\"units\": \"test\", \"long_name\": \"testing\"}\n attrs_da = {\"da_attr\": \"test\"}\n\n data = np.linspace(10, 15, 100)\n coords = np.linspace(1, 10, 100)\n\n ds = Dataset(\n data_vars={\"da\": (\"coord\", data)},\n coords={\"coord\": coords},\n attrs=global_attrs,\n )\n ds.da.attrs = attrs_da\n\n # deprecated option\n with pytest.warns(\n FutureWarning, match=\"Passing ``keep_attrs`` to ``coarsen`` is deprecated\"\n ):\n result = ds.coarsen(dim={\"coord\": 5}, keep_attrs=False).mean()\n\n assert result.attrs == {}\n assert result.da.attrs == {}\n\n # the keep_attrs in the reduction function takes precedence\n with pytest.warns(\n FutureWarning, match=\"Passing ``keep_attrs`` to ``coarsen`` is deprecated\"\n ):\n result = ds.coarsen(dim={\"coord\": 5}, keep_attrs=True).mean(keep_attrs=False)\n\n assert result.attrs == {}\n assert result.da.attrs == {}\n\n\[email protected]\[email protected](\"ds\", (1, 2), indirect=True)\[email protected](\"window\", (1, 2, 3, 4))\[email protected](\"name\", (\"sum\", \"mean\", \"std\", \"var\", \"min\", \"max\", \"median\"))\ndef test_coarsen_reduce(ds, window, name):\n # Use boundary=\"trim\" to accomodate all window sizes used in tests\n coarsen_obj = ds.coarsen(time=window, boundary=\"trim\")\n\n # add nan prefix to numpy methods to get similar behavior as bottleneck\n actual = coarsen_obj.reduce(getattr(np, f\"nan{name}\"))\n expected = getattr(coarsen_obj, name)()\n assert_allclose(actual, expected)\n\n # make sure the order of data_var are not changed.\n assert list(ds.data_vars.keys()) == list(actual.data_vars.keys())\n\n # Make sure the dimension order is restored\n for key, src_var in ds.data_vars.items():\n assert src_var.dims == actual[key].dims\n\n\[email protected](\n \"funcname, argument\",\n [\n (\"reduce\", (np.mean,)),\n (\"mean\", ()),\n (\"construct\", (\"window_dim\",)),\n (\"count\", ()),\n ],\n)\ndef test_rolling_keep_attrs(funcname, argument):\n global_attrs = {\"units\": \"test\", \"long_name\": \"testing\"}\n da_attrs = {\"da_attr\": \"test\"}\n da_not_rolled_attrs = {\"da_not_rolled_attr\": \"test\"}\n\n data = np.linspace(10, 15, 100)\n coords = np.linspace(1, 10, 100)\n\n ds = Dataset(\n data_vars={\"da\": (\"coord\", data), \"da_not_rolled\": (\"no_coord\", data)},\n coords={\"coord\": coords},\n attrs=global_attrs,\n )\n ds.da.attrs = da_attrs\n ds.da_not_rolled.attrs = da_not_rolled_attrs\n\n # attrs are now kept per default\n func = getattr(ds.rolling(dim={\"coord\": 5}), funcname)\n result = func(*argument)\n assert result.attrs == global_attrs\n assert result.da.attrs == da_attrs\n assert result.da_not_rolled.attrs == da_not_rolled_attrs\n assert result.da.name == \"da\"\n assert result.da_not_rolled.name == \"da_not_rolled\"\n\n # discard attrs\n func = getattr(ds.rolling(dim={\"coord\": 5}), funcname)\n result = func(*argument, keep_attrs=False)\n assert result.attrs == {}\n assert result.da.attrs == {}\n assert result.da_not_rolled.attrs == {}\n assert result.da.name == \"da\"\n assert result.da_not_rolled.name == \"da_not_rolled\"\n\n # test discard attrs using global option\n func = getattr(ds.rolling(dim={\"coord\": 5}), funcname)\n with set_options(keep_attrs=False):\n result = func(*argument)\n\n assert result.attrs == {}\n assert result.da.attrs == {}\n assert result.da_not_rolled.attrs == {}\n assert result.da.name == \"da\"\n assert result.da_not_rolled.name == \"da_not_rolled\"\n\n # keyword takes precedence over global option\n func = getattr(ds.rolling(dim={\"coord\": 5}), funcname)\n with set_options(keep_attrs=False):\n result = func(*argument, keep_attrs=True)\n\n assert result.attrs == global_attrs\n assert result.da.attrs == da_attrs\n assert result.da_not_rolled.attrs == da_not_rolled_attrs\n assert result.da.name == \"da\"\n assert result.da_not_rolled.name == \"da_not_rolled\"\n\n func = getattr(ds.rolling(dim={\"coord\": 5}), funcname)\n with set_options(keep_attrs=True):\n result = func(*argument, keep_attrs=False)\n\n assert result.attrs == {}\n assert result.da.attrs == {}\n assert result.da_not_rolled.attrs == {}\n assert result.da.name == \"da\"\n assert result.da_not_rolled.name == \"da_not_rolled\"\n\n\ndef test_rolling_keep_attrs_deprecated():\n global_attrs = {\"units\": \"test\", \"long_name\": \"testing\"}\n attrs_da = {\"da_attr\": \"test\"}\n\n data = np.linspace(10, 15, 100)\n coords = np.linspace(1, 10, 100)\n\n ds = Dataset(\n data_vars={\"da\": (\"coord\", data)},\n coords={\"coord\": coords},\n attrs=global_attrs,\n )\n ds.da.attrs = attrs_da\n\n # deprecated option\n with pytest.warns(\n FutureWarning, match=\"Passing ``keep_attrs`` to ``rolling`` is deprecated\"\n ):\n result = ds.rolling(dim={\"coord\": 5}, keep_attrs=False).construct(\"window_dim\")\n\n assert result.attrs == {}\n assert result.da.attrs == {}\n\n # the keep_attrs in the reduction function takes precedence\n with pytest.warns(\n FutureWarning, match=\"Passing ``keep_attrs`` to ``rolling`` is deprecated\"\n ):\n result = ds.rolling(dim={\"coord\": 5}, keep_attrs=True).construct(\n \"window_dim\", keep_attrs=False\n )\n\n assert result.attrs == {}\n assert result.da.attrs == {}\n\n\ndef test_rolling_properties(ds):\n # catching invalid args\n with pytest.raises(ValueError, match=\"window must be > 0\"):\n ds.rolling(time=-2)\n with pytest.raises(ValueError, match=\"min_periods must be greater than zero\"):\n ds.rolling(time=2, min_periods=0)\n with pytest.raises(KeyError, match=\"time2\"):\n ds.rolling(time2=2)\n\n\[email protected](\"name\", (\"sum\", \"mean\", \"std\", \"var\", \"min\", \"max\", \"median\"))\[email protected](\"center\", (True, False, None))\[email protected](\"min_periods\", (1, None))\[email protected](\"key\", (\"z1\", \"z2\"))\ndef test_rolling_wrapped_bottleneck(ds, name, center, min_periods, key):\n bn = pytest.importorskip(\"bottleneck\", minversion=\"1.1\")\n\n # Test all bottleneck functions\n rolling_obj = ds.rolling(time=7, min_periods=min_periods)\n\n func_name = f\"move_{name}\"\n actual = getattr(rolling_obj, name)()\n if key == \"z1\": # z1 does not depend on 'Time' axis. Stored as it is.\n expected = ds[key]\n elif key == \"z2\":\n expected = getattr(bn, func_name)(\n ds[key].values, window=7, axis=0, min_count=min_periods\n )\n else:\n raise ValueError\n assert_array_equal(actual[key].values, expected)\n\n # Test center\n rolling_obj = ds.rolling(time=7, center=center)\n actual = getattr(rolling_obj, name)()[\"time\"]\n assert_equal(actual, ds[\"time\"])\n\n\n@requires_numbagg\ndef test_rolling_exp(ds):\n\n result = ds.rolling_exp(time=10, window_type=\"span\").mean()\n assert isinstance(result, Dataset)\n\n\n@requires_numbagg\ndef test_rolling_exp_keep_attrs(ds):\n\n attrs_global = {\"attrs\": \"global\"}\n attrs_z1 = {\"attr\": \"z1\"}\n\n ds.attrs = attrs_global\n ds.z1.attrs = attrs_z1\n\n # attrs are kept per default\n result = ds.rolling_exp(time=10).mean()\n assert result.attrs == attrs_global\n assert result.z1.attrs == attrs_z1\n\n # discard attrs\n result = ds.rolling_exp(time=10).mean(keep_attrs=False)\n assert result.attrs == {}\n assert result.z1.attrs == {}\n\n # test discard attrs using global option\n with set_options(keep_attrs=False):\n result = ds.rolling_exp(time=10).mean()\n assert result.attrs == {}\n assert result.z1.attrs == {}\n\n # keyword takes precedence over global option\n with set_options(keep_attrs=False):\n result = ds.rolling_exp(time=10).mean(keep_attrs=True)\n assert result.attrs == attrs_global\n assert result.z1.attrs == attrs_z1\n\n with set_options(keep_attrs=True):\n result = ds.rolling_exp(time=10).mean(keep_attrs=False)\n assert result.attrs == {}\n assert result.z1.attrs == {}\n\n with pytest.warns(\n UserWarning, match=\"Passing ``keep_attrs`` to ``rolling_exp`` has no effect.\"\n ):\n ds.rolling_exp(time=10, keep_attrs=True)\n\n\[email protected](\"center\", (True, False))\[email protected](\"min_periods\", (None, 1, 2, 3))\[email protected](\"window\", (1, 2, 3, 4))\ndef test_rolling_pandas_compat(center, window, min_periods):\n df = pd.DataFrame(\n {\n \"x\": np.random.randn(20),\n \"y\": np.random.randn(20),\n \"time\": np.linspace(0, 1, 20),\n }\n )\n ds = Dataset.from_dataframe(df)\n\n if min_periods is not None and window < min_periods:\n min_periods = window\n\n df_rolling = df.rolling(window, center=center, min_periods=min_periods).mean()\n ds_rolling = ds.rolling(index=window, center=center, min_periods=min_periods).mean()\n\n np.testing.assert_allclose(df_rolling[\"x\"].values, ds_rolling[\"x\"].values)\n np.testing.assert_allclose(df_rolling.index, ds_rolling[\"index\"])\n\n\[email protected](\"center\", (True, False))\[email protected](\"window\", (1, 2, 3, 4))\ndef test_rolling_construct(center, window):\n df = pd.DataFrame(\n {\n \"x\": np.random.randn(20),\n \"y\": np.random.randn(20),\n \"time\": np.linspace(0, 1, 20),\n }\n )\n\n ds = Dataset.from_dataframe(df)\n df_rolling = df.rolling(window, center=center, min_periods=1).mean()\n ds_rolling = ds.rolling(index=window, center=center)\n\n ds_rolling_mean = ds_rolling.construct(\"window\").mean(\"window\")\n np.testing.assert_allclose(df_rolling[\"x\"].values, ds_rolling_mean[\"x\"].values)\n np.testing.assert_allclose(df_rolling.index, ds_rolling_mean[\"index\"])\n\n # with stride\n ds_rolling_mean = ds_rolling.construct(\"window\", stride=2).mean(\"window\")\n np.testing.assert_allclose(df_rolling[\"x\"][::2].values, ds_rolling_mean[\"x\"].values)\n np.testing.assert_allclose(df_rolling.index[::2], ds_rolling_mean[\"index\"])\n # with fill_value\n ds_rolling_mean = ds_rolling.construct(\"window\", stride=2, fill_value=0.0).mean(\n \"window\"\n )\n assert (ds_rolling_mean.isnull().sum() == 0).to_array(dim=\"vars\").all()\n assert (ds_rolling_mean[\"x\"] == 0.0).sum() >= 0\n\n\[email protected]\[email protected](\"ds\", (1, 2), indirect=True)\[email protected](\"center\", (True, False))\[email protected](\"min_periods\", (None, 1, 2, 3))\[email protected](\"window\", (1, 2, 3, 4))\[email protected](\"name\", (\"sum\", \"mean\", \"std\", \"var\", \"min\", \"max\", \"median\"))\ndef test_rolling_reduce(ds, center, min_periods, window, name):\n\n if min_periods is not None and window < min_periods:\n min_periods = window\n\n if name == \"std\" and window == 1:\n pytest.skip(\"std with window == 1 is unstable in bottleneck\")\n\n rolling_obj = ds.rolling(time=window, center=center, min_periods=min_periods)\n\n # add nan prefix to numpy methods to get similar behavior as bottleneck\n actual = rolling_obj.reduce(getattr(np, \"nan%s\" % name))\n expected = getattr(rolling_obj, name)()\n assert_allclose(actual, expected)\n assert ds.dims == actual.dims\n # make sure the order of data_var are not changed.\n assert list(ds.data_vars.keys()) == list(actual.data_vars.keys())\n\n # Make sure the dimension order is restored\n for key, src_var in ds.data_vars.items():\n assert src_var.dims == actual[key].dims\n\n\[email protected](\"ds\", (2,), indirect=True)\[email protected](\"center\", (True, False))\[email protected](\"min_periods\", (None, 1))\[email protected](\"name\", (\"sum\", \"max\"))\[email protected](\"dask\", (True, False))\ndef test_ndrolling_reduce(ds, center, min_periods, name, dask):\n if dask and has_dask:\n ds = ds.chunk({\"x\": 4})\n\n rolling_obj = ds.rolling(time=4, x=3, center=center, min_periods=min_periods)\n\n actual = getattr(rolling_obj, name)()\n expected = getattr(\n getattr(\n ds.rolling(time=4, center=center, min_periods=min_periods), name\n )().rolling(x=3, center=center, min_periods=min_periods),\n name,\n )()\n assert_allclose(actual, expected)\n assert actual.dims == expected.dims\n\n # Do it in the opposite order\n expected = getattr(\n getattr(\n ds.rolling(x=3, center=center, min_periods=min_periods), name\n )().rolling(time=4, center=center, min_periods=min_periods),\n name,\n )()\n\n assert_allclose(actual, expected)\n assert actual.dims == expected.dims\n\n\[email protected](\"center\", (True, False, (True, False)))\[email protected](\"fill_value\", (np.nan, 0.0))\[email protected](\"dask\", (True, False))\ndef test_ndrolling_construct(center, fill_value, dask):\n da = DataArray(\n np.arange(5 * 6 * 7).reshape(5, 6, 7).astype(float),\n dims=[\"x\", \"y\", \"z\"],\n coords={\"x\": [\"a\", \"b\", \"c\", \"d\", \"e\"], \"y\": np.arange(6)},\n )\n ds = xr.Dataset({\"da\": da})\n if dask and has_dask:\n ds = ds.chunk({\"x\": 4})\n\n actual = ds.rolling(x=3, z=2, center=center).construct(\n x=\"x1\", z=\"z1\", fill_value=fill_value\n )\n if not isinstance(center, tuple):\n center = (center, center)\n expected = (\n ds.rolling(x=3, center=center[0])\n .construct(x=\"x1\", fill_value=fill_value)\n .rolling(z=2, center=center[1])\n .construct(z=\"z1\", fill_value=fill_value)\n )\n assert_allclose(actual, expected)\n\n\ndef test_raise_no_warning_for_nan_in_binary_ops():\n with pytest.warns(None) as record:\n Dataset(data_vars={\"x\": (\"y\", [1, 2, np.NaN])}) > 0\n assert len(record) == 0\n\n\[email protected](\"error\")\[email protected](\"ds\", (2,), indirect=True)\ndef test_raise_no_warning_assert_close(ds):\n assert_allclose(ds, ds)\n\n\[email protected](reason=\"See https://github.com/pydata/xarray/pull/4369 or docstring\")\[email protected](\"error\")\[email protected](\"ds\", (2,), indirect=True)\[email protected](\"name\", (\"mean\", \"max\"))\ndef test_raise_no_warning_dask_rolling_assert_close(ds, name):\n \"\"\"\n This is a puzzle — I can't easily find the source of the warning. It\n requires `assert_allclose` to be run, for the `ds` param to be 2, and is\n different for `mean` and `max`. `sum` raises no warning.\n \"\"\"\n\n ds = ds.chunk({\"x\": 4})\n\n rolling_obj = ds.rolling(time=4, x=3)\n\n actual = getattr(rolling_obj, name)()\n expected = getattr(getattr(ds.rolling(time=4), name)().rolling(x=3), name)()\n assert_allclose(actual, expected)\n\n\[email protected](\"dask\", [True, False])\[email protected](\"edge_order\", [1, 2])\ndef test_differentiate(dask, edge_order):\n rs = np.random.RandomState(42)\n coord = [0.2, 0.35, 0.4, 0.6, 0.7, 0.75, 0.76, 0.8]\n\n da = xr.DataArray(\n rs.randn(8, 6),\n dims=[\"x\", \"y\"],\n coords={\"x\": coord, \"z\": 3, \"x2d\": ((\"x\", \"y\"), rs.randn(8, 6))},\n )\n if dask and has_dask:\n da = da.chunk({\"x\": 4})\n\n ds = xr.Dataset({\"var\": da})\n\n # along x\n actual = da.differentiate(\"x\", edge_order)\n expected_x = xr.DataArray(\n np.gradient(da, da[\"x\"], axis=0, edge_order=edge_order),\n dims=da.dims,\n coords=da.coords,\n )\n assert_equal(expected_x, actual)\n assert_equal(\n ds[\"var\"].differentiate(\"x\", edge_order=edge_order),\n ds.differentiate(\"x\", edge_order=edge_order)[\"var\"],\n )\n # coordinate should not change\n assert_equal(da[\"x\"], actual[\"x\"])\n\n # along y\n actual = da.differentiate(\"y\", edge_order)\n expected_y = xr.DataArray(\n np.gradient(da, da[\"y\"], axis=1, edge_order=edge_order),\n dims=da.dims,\n coords=da.coords,\n )\n assert_equal(expected_y, actual)\n assert_equal(actual, ds.differentiate(\"y\", edge_order=edge_order)[\"var\"])\n assert_equal(\n ds[\"var\"].differentiate(\"y\", edge_order=edge_order),\n ds.differentiate(\"y\", edge_order=edge_order)[\"var\"],\n )\n\n with pytest.raises(ValueError):\n da.differentiate(\"x2d\")\n\n\[email protected](\"dask\", [True, False])\ndef test_differentiate_datetime(dask):\n rs = np.random.RandomState(42)\n coord = np.array(\n [\n \"2004-07-13\",\n \"2006-01-13\",\n \"2010-08-13\",\n \"2010-09-13\",\n \"2010-10-11\",\n \"2010-12-13\",\n \"2011-02-13\",\n \"2012-08-13\",\n ],\n dtype=\"datetime64\",\n )\n\n da = xr.DataArray(\n rs.randn(8, 6),\n dims=[\"x\", \"y\"],\n coords={\"x\": coord, \"z\": 3, \"x2d\": ((\"x\", \"y\"), rs.randn(8, 6))},\n )\n if dask and has_dask:\n da = da.chunk({\"x\": 4})\n\n # along x\n actual = da.differentiate(\"x\", edge_order=1, datetime_unit=\"D\")\n expected_x = xr.DataArray(\n np.gradient(\n da, da[\"x\"].variable._to_numeric(datetime_unit=\"D\"), axis=0, edge_order=1\n ),\n dims=da.dims,\n coords=da.coords,\n )\n assert_equal(expected_x, actual)\n\n actual2 = da.differentiate(\"x\", edge_order=1, datetime_unit=\"h\")\n assert np.allclose(actual, actual2 * 24)\n\n # for datetime variable\n actual = da[\"x\"].differentiate(\"x\", edge_order=1, datetime_unit=\"D\")\n assert np.allclose(actual, 1.0)\n\n # with different date unit\n da = xr.DataArray(coord.astype(\"datetime64[ms]\"), dims=[\"x\"], coords={\"x\": coord})\n actual = da.differentiate(\"x\", edge_order=1)\n assert np.allclose(actual, 1.0)\n\n\[email protected](not has_cftime, reason=\"Test requires cftime.\")\[email protected](\"dask\", [True, False])\ndef test_differentiate_cftime(dask):\n rs = np.random.RandomState(42)\n coord = xr.cftime_range(\"2000\", periods=8, freq=\"2M\")\n\n da = xr.DataArray(\n rs.randn(8, 6),\n coords={\"time\": coord, \"z\": 3, \"t2d\": ((\"time\", \"y\"), rs.randn(8, 6))},\n dims=[\"time\", \"y\"],\n )\n\n if dask and has_dask:\n da = da.chunk({\"time\": 4})\n\n actual = da.differentiate(\"time\", edge_order=1, datetime_unit=\"D\")\n expected_data = np.gradient(\n da, da[\"time\"].variable._to_numeric(datetime_unit=\"D\"), axis=0, edge_order=1\n )\n expected = xr.DataArray(expected_data, coords=da.coords, dims=da.dims)\n assert_equal(expected, actual)\n\n actual2 = da.differentiate(\"time\", edge_order=1, datetime_unit=\"h\")\n assert_allclose(actual, actual2 * 24)\n\n # Test the differentiation of datetimes themselves\n actual = da[\"time\"].differentiate(\"time\", edge_order=1, datetime_unit=\"D\")\n assert_allclose(actual, xr.ones_like(da[\"time\"]).astype(float))\n\n\[email protected](\"dask\", [True, False])\ndef test_integrate(dask):\n rs = np.random.RandomState(42)\n coord = [0.2, 0.35, 0.4, 0.6, 0.7, 0.75, 0.76, 0.8]\n\n da = xr.DataArray(\n rs.randn(8, 6),\n dims=[\"x\", \"y\"],\n coords={\n \"x\": coord,\n \"x2\": ((\"x\",), rs.randn(8)),\n \"z\": 3,\n \"x2d\": ((\"x\", \"y\"), rs.randn(8, 6)),\n },\n )\n if dask and has_dask:\n da = da.chunk({\"x\": 4})\n\n ds = xr.Dataset({\"var\": da})\n\n # along x\n actual = da.integrate(\"x\")\n # coordinate that contains x should be dropped.\n expected_x = xr.DataArray(\n np.trapz(da.compute(), da[\"x\"], axis=0),\n dims=[\"y\"],\n coords={k: v for k, v in da.coords.items() if \"x\" not in v.dims},\n )\n assert_allclose(expected_x, actual.compute())\n assert_equal(ds[\"var\"].integrate(\"x\"), ds.integrate(\"x\")[\"var\"])\n\n # make sure result is also a dask array (if the source is dask array)\n assert isinstance(actual.data, type(da.data))\n\n # along y\n actual = da.integrate(\"y\")\n expected_y = xr.DataArray(\n np.trapz(da, da[\"y\"], axis=1),\n dims=[\"x\"],\n coords={k: v for k, v in da.coords.items() if \"y\" not in v.dims},\n )\n assert_allclose(expected_y, actual.compute())\n assert_equal(actual, ds.integrate(\"y\")[\"var\"])\n assert_equal(ds[\"var\"].integrate(\"y\"), ds.integrate(\"y\")[\"var\"])\n\n # along x and y\n actual = da.integrate((\"y\", \"x\"))\n assert actual.ndim == 0\n\n with pytest.raises(ValueError):\n da.integrate(\"x2d\")\n\n with pytest.warns(FutureWarning):\n da.integrate(dim=\"x\")\n\n\n@requires_scipy\[email protected](\"dask\", [True, False])\ndef test_cumulative_integrate(dask):\n rs = np.random.RandomState(43)\n coord = [0.2, 0.35, 0.4, 0.6, 0.7, 0.75, 0.76, 0.8]\n\n da = xr.DataArray(\n rs.randn(8, 6),\n dims=[\"x\", \"y\"],\n coords={\n \"x\": coord,\n \"x2\": ((\"x\",), rs.randn(8)),\n \"z\": 3,\n \"x2d\": ((\"x\", \"y\"), rs.randn(8, 6)),\n },\n )\n if dask and has_dask:\n da = da.chunk({\"x\": 4})\n\n ds = xr.Dataset({\"var\": da})\n\n # along x\n actual = da.cumulative_integrate(\"x\")\n\n # From scipy-1.6.0 cumtrapz is renamed to cumulative_trapezoid, but cumtrapz is\n # still provided for backward compatibility\n from scipy.integrate import cumtrapz\n\n expected_x = xr.DataArray(\n cumtrapz(da.compute(), da[\"x\"], axis=0, initial=0.0),\n dims=[\"x\", \"y\"],\n coords=da.coords,\n )\n assert_allclose(expected_x, actual.compute())\n assert_equal(\n ds[\"var\"].cumulative_integrate(\"x\"),\n ds.cumulative_integrate(\"x\")[\"var\"],\n )\n\n # make sure result is also a dask array (if the source is dask array)\n assert isinstance(actual.data, type(da.data))\n\n # along y\n actual = da.cumulative_integrate(\"y\")\n expected_y = xr.DataArray(\n cumtrapz(da, da[\"y\"], axis=1, initial=0.0),\n dims=[\"x\", \"y\"],\n coords=da.coords,\n )\n assert_allclose(expected_y, actual.compute())\n assert_equal(actual, ds.cumulative_integrate(\"y\")[\"var\"])\n assert_equal(\n ds[\"var\"].cumulative_integrate(\"y\"),\n ds.cumulative_integrate(\"y\")[\"var\"],\n )\n\n # along x and y\n actual = da.cumulative_integrate((\"y\", \"x\"))\n assert actual.ndim == 2\n\n with pytest.raises(ValueError):\n da.cumulative_integrate(\"x2d\")\n\n\[email protected](\"dask\", [True, False])\[email protected](\"which_datetime\", [\"np\", \"cftime\"])\ndef test_trapz_datetime(dask, which_datetime):\n rs = np.random.RandomState(42)\n if which_datetime == \"np\":\n coord = np.array(\n [\n \"2004-07-13\",\n \"2006-01-13\",\n \"2010-08-13\",\n \"2010-09-13\",\n \"2010-10-11\",\n \"2010-12-13\",\n \"2011-02-13\",\n \"2012-08-13\",\n ],\n dtype=\"datetime64\",\n )\n else:\n if not has_cftime:\n pytest.skip(\"Test requires cftime.\")\n coord = xr.cftime_range(\"2000\", periods=8, freq=\"2D\")\n\n da = xr.DataArray(\n rs.randn(8, 6),\n coords={\"time\": coord, \"z\": 3, \"t2d\": ((\"time\", \"y\"), rs.randn(8, 6))},\n dims=[\"time\", \"y\"],\n )\n\n if dask and has_dask:\n da = da.chunk({\"time\": 4})\n\n actual = da.integrate(\"time\", datetime_unit=\"D\")\n expected_data = np.trapz(\n da.data,\n duck_array_ops.datetime_to_numeric(da[\"time\"].data, datetime_unit=\"D\"),\n axis=0,\n )\n expected = xr.DataArray(\n expected_data,\n dims=[\"y\"],\n coords={k: v for k, v in da.coords.items() if \"time\" not in v.dims},\n )\n assert_allclose(expected, actual.compute())\n\n # make sure result is also a dask array (if the source is dask array)\n assert isinstance(actual.data, type(da.data))\n\n actual2 = da.integrate(\"time\", datetime_unit=\"h\")\n assert_allclose(actual, actual2 / 24.0)\n\n\ndef test_no_dict():\n d = Dataset()\n with pytest.raises(AttributeError):\n d.__dict__\n\n\ndef test_subclass_slots():\n \"\"\"Test that Dataset subclasses must explicitly define ``__slots__``.\n\n .. note::\n As of 0.13.0, this is actually mitigated into a FutureWarning for any class\n defined outside of the xarray package.\n \"\"\"\n with pytest.raises(AttributeError) as e:\n\n class MyDS(Dataset):\n pass\n\n assert str(e.value) == \"MyDS must explicitly define __slots__\"\n\n\ndef test_weakref():\n \"\"\"Classes with __slots__ are incompatible with the weakref module unless they\n explicitly state __weakref__ among their slots\n \"\"\"\n from weakref import ref\n\n ds = Dataset()\n r = ref(ds)\n assert r() is ds\n\n\ndef test_deepcopy_obj_array():\n x0 = Dataset(dict(foo=DataArray(np.array([object()]))))\n x1 = deepcopy(x0)\n assert x0[\"foo\"].values[0] is not x1[\"foo\"].values[0]\n\n\ndef test_clip(ds):\n result = ds.clip(min=0.5)\n assert result.min(...) >= 0.5\n\n result = ds.clip(max=0.5)\n assert result.max(...) <= 0.5\n\n result = ds.clip(min=0.25, max=0.75)\n assert result.min(...) >= 0.25\n assert result.max(...) <= 0.75\n\n result = ds.clip(min=ds.mean(\"y\"), max=ds.mean(\"y\"))\n assert result.dims == ds.dims\n"
] |
[
[
"pandas.tseries.frequencies.to_offset",
"pandas.Series",
"numpy.linspace",
"numpy.asarray",
"numpy.issubdtype",
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame",
"numpy.dtype",
"numpy.random.randn",
"numpy.mean",
"scipy.integrate.cumtrapz",
"numpy.trapz",
"numpy.random.randint",
"numpy.testing.assert_equal",
"numpy.allclose",
"pandas.CategoricalDtype",
"numpy.arange",
"pandas.Index",
"numpy.sin",
"numpy.diff",
"numpy.float32",
"numpy.zeros",
"pandas.MultiIndex",
"numpy.isnan",
"numpy.random.choice",
"pandas.Timedelta",
"numpy.int64",
"pandas.MultiIndex.from_product",
"numpy.random.rand",
"numpy.testing.assert_allclose",
"pandas.date_range",
"numpy.array",
"numpy.random.RandomState",
"numpy.meshgrid",
"pandas.CategoricalIndex",
"numpy.random.random",
"numpy.random.seed",
"pandas.period_range",
"numpy.gradient",
"pandas.MultiIndex.from_arrays",
"numpy.datetime64",
"numpy.testing.assert_array_equal",
"numpy.ones",
"numpy.random.normal",
"numpy.float64",
"numpy.random.uniform",
"pandas.Timestamp"
]
] |
vfcosta/coegan-trained
|
[
"44174e68909d9c03bf2e4b7e4c7a48237a560183"
] |
[
"evolution/evaluator.py"
] |
[
"import util.tools as tools\nfrom util import config\nimport torch\nimport logging\nimport numpy as np\nfrom evolution.population import Population\n\nlogger = logging.getLogger(__name__)\n\n\nclass Evaluator:\n\n def __init__(self, train_loader, validation_loader):\n self.train_loader = train_loader\n self.validation_loader = validation_loader\n self.best_discriminators = []\n self.best_generators = []\n self.initial = True\n self.batches = []\n self.eval_batches = []\n\n def init_generation(self, generation):\n self.batches = []\n self.eval_batches = []\n\n def train_evaluate(self, G, D, batches_limit):\n logger.debug(f\"train: G({G.genome.gan_type}) x D({D.genome.gan_type}), batches: {batches_limit}\")\n if config.evolution.evaluation.reset_optimizer:\n D.reset_optimizer_state()\n G.reset_optimizer_state()\n\n if G.invalid or D.invalid: # do not evaluate if G or D are invalid\n logger.warning(\"invalid D or G\")\n return\n torch.cuda.empty_cache()\n n = 0\n G, D = tools.cuda(G), tools.cuda(D) # load everything on gpu (cuda)\n G.train()\n D.train()\n G.win_rate, D.win_rate = 0, 0\n while n < batches_limit:\n image_loader = self.batches if config.evolution.evaluation.same_batches and self.batches else self.train_loader\n for images, _ in image_loader:\n if config.evolution.evaluation.same_batches and image_loader != self.batches:\n self.batches.append((images, _))\n n += 1\n images = tools.cuda(images)\n if n % config.gan.generator_iterations == 0:\n D.do_train(G, images)\n if n % config.gan.critic_iterations == 0:\n G.do_train(D, images)\n if n >= config.gan.batches_limit:\n break\n D.win_rate /= n\n G.win_rate = 1 - D.win_rate\n D.calc_skill_rating(G)\n G.calc_skill_rating(D)\n # print(\"train GLICKO G:\", G.skill_rating, G.win_rate, \", D:\", D.skill_rating, D.win_rate)\n\n G.cpu(), D.cpu() # move variables back from gpu to cpu\n torch.cuda.empty_cache()\n\n def evaluate_population(self, generators, discriminators, batches_limit=None, evaluation_type=None, calc_fid=True):\n \"\"\"Evaluate the population using all-vs-all pairing strategy\"\"\"\n batches_limit = batches_limit or config.gan.batches_limit\n evaluation_type = evaluation_type or config.evolution.evaluation.type\n for i in range(config.evolution.evaluation.iterations):\n if evaluation_type == \"random\":\n for D in discriminators:\n for g in np.random.choice(generators, 2, replace=False):\n self.train_evaluate(g, D, batches_limit)\n for G in generators:\n for d in np.random.choice(discriminators, 2, replace=False):\n self.train_evaluate(G, d, batches_limit)\n elif evaluation_type == \"spatial\":\n rows = 3\n cols = len(discriminators)//rows\n pairs = []\n for center in range(len(discriminators)):\n pairs.append([(center, n) for n in tools.get_neighbors(center, rows, cols)])\n # reorder pairs to avoid sequential training\n pairs = np.transpose(np.array(pairs), (1, 0, 2)).reshape(-1, 2)\n for g, d in pairs:\n self.train_evaluate(generators[g], discriminators[d], batches_limit)\n elif evaluation_type == \"spatial2\":\n rows = 3\n cols = len(discriminators)//rows\n for center in range(len(discriminators)):\n neighbors = tools.get_neighbors(center, rows, cols)\n norm = len(neighbors)\n for n in neighbors:\n self.train_evaluate(generators[center], discriminators[n].clone(), batches_limit)\n self.train_evaluate(generators[n].clone(), discriminators[center], batches_limit)\n\n elif evaluation_type == \"all-vs-all\" and config.evolution.evaluation.clone_adversarial:\n # train all-vs-all in a non-sequential order\n pairs = tools.permutations(generators, discriminators)\n original_generators = [g.clone() for g in generators]\n original_discriminators = [d.clone() for d in discriminators]\n for g, d in pairs:\n self.train_evaluate(generators[g], original_discriminators[d].clone(), batches_limit)\n self.train_evaluate(original_generators[g].clone(), discriminators[d], batches_limit)\n elif evaluation_type == \"all-vs-all\":\n # train all-vs-all in a non-sequential order\n pairs = tools.permutations(generators, discriminators)\n for g, d in pairs:\n self.train_evaluate(generators[g], discriminators[d], batches_limit)\n elif evaluation_type in [\"all-vs-best\", \"all-vs-species-best\", \"all-vs-kbest\", \"all-vs-kbest-previous\"]:\n if config.evolution.evaluation.initialize_all and self.initial:\n self.initial = False\n # as there are no way to determine the best G and D, we rely on all-vs-all for the first evaluation\n return self.evaluate_population(generators, discriminators, batches_limit,\n evaluation_type=\"all-vs-all\")\n\n pairs = tools.permutations(discriminators, self.best_generators)\n for d, g in pairs:\n adversarial = self.best_generators[g]\n if config.evolution.evaluation.clone_adversarial:\n adversarial = adversarial.clone()\n self.train_evaluate(adversarial, discriminators[d], batches_limit)\n pairs = tools.permutations(generators, self.best_discriminators)\n for g, d in pairs:\n adversarial = self.best_discriminators[d]\n if config.evolution.evaluation.clone_adversarial:\n adversarial = adversarial.clone()\n self.train_evaluate(generators[g], adversarial, batches_limit)\n\n # reset FID\n for G in generators:\n G.fid_score = None\n\n images, n = None, 0\n for batch, _ in self.validation_loader:\n if images is None:\n images = batch\n else:\n images = torch.cat((images, batch))\n n += 1\n if n >= config.evolution.fitness.evaluation_batches:\n break\n images = tools.cuda(images)\n if len(generators) > 0:\n for p in discriminators:\n p = tools.cuda(p)\n p.calc_global_metrics(self.best_generators or [Population(generators).best()], images)\n p.cpu()\n if len(discriminators) > 0:\n for p in generators:\n p = tools.cuda(p)\n p.calc_global_metrics(self.best_discriminators or [Population(discriminators).best()], images)\n p.cpu()\n\n # # update the skill rating for the next generation\n for p in discriminators + generators + self.best_discriminators + self.best_generators:\n p.finish_calc_skill_rating()\n for p in discriminators + generators:\n p.finish_generation(calc_fid=calc_fid)\n\n def evaluate_all_validation(self, generators, discriminators):\n # evaluate in validation\n logger.info(f\"best G: {len(self.best_generators)}, best D: {len(self.best_discriminators)}\")\n for D in discriminators:\n for G in self.best_generators + generators:\n with torch.no_grad():\n self.evaluate_validation(G, D)\n for G in generators:\n for D in self.best_discriminators:\n with torch.no_grad():\n self.evaluate_validation(G, D)\n\n # # update the skill rating for the next generation\n for p in discriminators + generators + self.best_discriminators + self.best_generators:\n p.finish_calc_skill_rating()\n\n def update_bests(self, generators_population, discriminators_population):\n # store best of generation in coevolution memory\n self.best_discriminators = self.get_bests(discriminators_population, self.best_discriminators)\n self.best_generators = self.get_bests(generators_population, self.best_generators)\n\n def evaluate_validation(self, G, D, eval_generator=True, eval_discriminator=True):\n if G.invalid or D.invalid: # do not evaluate if G or D are invalid\n logger.warning(\"invalid D or G\")\n return\n torch.cuda.empty_cache()\n G, D = tools.cuda(G), tools.cuda(D)\n G.eval(), D.eval()\n G.win_rate, D.win_rate = 0, 0\n n = 0\n while n < config.evolution.fitness.evaluation_batches:\n image_loader = self.eval_batches if config.evolution.evaluation.same_batches and self.eval_batches else self.validation_loader\n for images, _ in image_loader:\n if config.evolution.evaluation.same_batches and image_loader != self.eval_batches:\n self.eval_batches.append((images, _))\n n += 1\n images = tools.cuda(images)\n if eval_discriminator:\n D.do_eval(G, images) # FIXME always eval D when skill rating is enabled\n if eval_generator:\n G.do_eval(D, images)\n G.win_rate = 1 - D.win_rate\n if n >= config.evolution.fitness.evaluation_batches:\n break\n\n D.win_rate /= n\n G.win_rate = 1 - D.win_rate\n if eval_discriminator:\n D.calc_skill_rating(G)\n if eval_generator:\n G.calc_skill_rating(D)\n\n logger.debug(f\"eval GLICKO G: {G.skill_rating} {G.win_rate}, D: {D.skill_rating} {D.win_rate}\")\n G, D = G.cpu(), D.cpu() # move variables back from gpu to cpu\n torch.cuda.empty_cache()\n\n def get_bests(self, population, previous_best=[]):\n if config.evolution.evaluation.type == \"all-vs-species-best\":\n return [species.best() for species in population.species_list]\n elif config.evolution.evaluation.type == \"all-vs-best\":\n return (population.bests(1) + previous_best)[:config.evolution.evaluation.best_size]\n elif config.evolution.evaluation.type == \"all-vs-kbest\":\n return population.bests(config.evolution.evaluation.best_size)\n elif config.evolution.evaluation.type == \"all-vs-kbest-previous\":\n return (population.bests(1) + previous_best)[:config.evolution.evaluation.best_size]\n return (population.bests(1) + previous_best)[:config.evolution.evaluation.best_size]\n"
] |
[
[
"numpy.random.choice",
"torch.cat",
"torch.cuda.empty_cache",
"torch.no_grad",
"numpy.array"
]
] |
ReneeYe/XSTNet
|
[
"c5e508aed878d13fea790caee71db1ce77619465",
"c5e508aed878d13fea790caee71db1ce77619465"
] |
[
"neurst/tasks/cross_modal_translation.py",
"neurst/tasks/seq2seq.py"
] |
[
"# Copyright 2020 ByteDance Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Tuple\nimport numpy as np\nimport tensorflow as tf\nfrom absl import logging\n\nimport neurst.data.dataset_utils as dataset_utils\nfrom neurst.data.data_pipelines import DataPipeline, build_data_pipeline\nfrom neurst.data.data_pipelines.tagged_text_data_pipeline import TaggedTextDataPipeline\nfrom neurst.data.datasets import Dataset\nfrom neurst.layers.metric_layers.token_metric_layers import AudioFramesMetricLayer, SequenceTokenMetricLayer, BatchCountMetricLayer\nfrom neurst.models import build_model\nfrom neurst.metrics import build_metric\nfrom neurst.models.model_utils import deduce_text_length\nfrom neurst.tasks import register_task\nfrom neurst.tasks.task import Task\nfrom neurst.training.training_utils import minimal_multiple\nfrom neurst.utils import compat\nfrom neurst.utils.configurable import deep_merge_dict\nfrom neurst.utils.flags_core import Flag, ModuleFlag\nfrom neurst.tasks.speech2text import create_audio_bucket_boundaries\n\n\ndef get_speech2text_bucket_sizes(args, num_replicas_in_sync):\n audio_bucket_boundaries = create_audio_bucket_boundaries(args[\"max_audio_src_len\"],\n args[\"batch_bucket_min_audio_src_len\"])\n audio_bucket_boundaries[-1] = minimal_multiple(audio_bucket_boundaries[-1], 8)\n batch_size = dataset_utils.adjust_batch_size(\n args[\"audio_batch_size\"],\n args[\"batch_size_per_gpu\"],\n num_replicas_in_sync=num_replicas_in_sync,\n verbose=False)\n batch_size_per_gpu = batch_size // num_replicas_in_sync\n bucket_batch_sizes = [int(batch_size_per_gpu // bound\n * num_replicas_in_sync) for bound in audio_bucket_boundaries]\n return audio_bucket_boundaries, bucket_batch_sizes\n\n\ndef get_text2text_bucket_sizes(args, num_replicas_in_sync):\n src_text_bucket_boundaries = dataset_utils.create_batch_bucket_boundaries(args[\"max_text_src_len\"])\n bucket_batch_sizes = dataset_utils.adjust_batch_size(\n args[\"text_batch_size\"],\n args[\"batch_size_per_gpu\"],\n bucket_boundaries={\"src_text\": src_text_bucket_boundaries}\n if args[\"batch_by_tokens\"] else None,\n boundaries_reduce_to_length_fn=lambda x: max(tf.nest.flatten(x)),\n num_replicas_in_sync=num_replicas_in_sync)\n return src_text_bucket_boundaries, bucket_batch_sizes\n\n\ndef get_speech2text_bucket_size_with_ratio(args, \n audio_bucket_boundaries, \n bucket_batch_sizes):\n frame_transcript_ratio = args.get(\"experimental_frame_transcript_ratio\", None)\n assert frame_transcript_ratio is not None, \"define experimental_frame_transcript_ratio, or it will OOM!\"\n trans_bucket_boundaries = [\n int(bound / (frame_transcript_ratio + i * (\n args[\"max_audio_src_len\"] / args[\"max_audio_trg_len\"] - frame_transcript_ratio) /\n len(audio_bucket_boundaries)))\n for i, bound in enumerate(audio_bucket_boundaries)]\n trans_bucket_boundaries = [minimal_multiple(min(i, args[\"max_audio_trg_len\"]), 8) for i in\n trans_bucket_boundaries]\n num_buckets = len(trans_bucket_boundaries)\n true_trans_bucket_boundaries = []\n num_input_shapes = 0\n for idx, (batc, bound, tbound) in enumerate(zip(bucket_batch_sizes, audio_bucket_boundaries,\n trans_bucket_boundaries)):\n max_trans_len = [tbound,\n trans_bucket_boundaries[min(idx + 1, len(bucket_batch_sizes) - 1)]]\n num_input_shapes += len(set(max_trans_len))\n true_trans_bucket_boundaries.append(max_trans_len)\n logging.info(f\"There are {num_input_shapes} input shapes to be compiled:\")\n for idx, (batc, bound, tbound) in enumerate(zip(bucket_batch_sizes, audio_bucket_boundaries,\n true_trans_bucket_boundaries)):\n logging.info(f\" - batch={batc}, maximum-frames={bound}, \"\n f\"maximum-transcript-length={set(tbound)}\")\n true_trans_bucket_boundaries = tf.constant(true_trans_bucket_boundaries, dtype=tf.int32)\n true_audio_bucket_boundaries = tf.transpose(tf.constant([audio_bucket_boundaries] * 2, dtype=tf.int32))\n\n return true_audio_bucket_boundaries, true_trans_bucket_boundaries, num_buckets\n\n\n@register_task([\"xm_translation\", \"xst_translation\", \"cross_modal_translation\", \"XModalPretrain\"])\nclass CrossModalTranslation(Task):\n \"\"\" Defines the cross-modal(audio & text) pre-train task. \"\"\"\n\n def __init__(self, args):\n \"\"\" Initializes the task.\n\n Args:\n args: A dict of model configurations.\n \"\"\"\n super(CrossModalTranslation, self).__init__(args)\n text_data_pipeline_cls = args.get(\"text_data_pipeline.class\", TaggedTextDataPipeline)\n text_data_pipeline_params = args.get(\"text_data_pipeline.params\", None) or {}\n self._text_data_pipeline = build_data_pipeline(\n text_data_pipeline_cls, **text_data_pipeline_params)\n self._audio_feature_dim = args[\"audio_feature_dim\"]\n self._audio_feature_channels = args[\"audio_feature_channels\"]\n\n def get_config(self):\n return {\n \"text_data_pipeline.class\": self._text_data_pipeline.__class__.__name__,\n \"text_data_pipeline.params\": self._text_data_pipeline.get_config(),\n \"audio_feature_dim\": self._audio_feature_dim,\n \"audio_feature_channels\": self._audio_feature_channels\n }\n\n @staticmethod\n def class_or_method_args():\n this_args = super(CrossModalTranslation, CrossModalTranslation).class_or_method_args()\n this_args.extend([\n ModuleFlag(\"text_data_pipeline\", DataPipeline.REGISTRY_NAME,\n default=TaggedTextDataPipeline.__name__,\n help=\"The text data pipeline.\"),\n Flag(\"audio_feature_dim\", dtype=Flag.TYPE.INTEGER, default=1,\n help=\"The dimension of audio features.\"),\n Flag(\"audio_feature_channels\", dtype=Flag.TYPE.INTEGER, default=1,\n help=\"The number of channels of audio features.\"),\n\n Flag(\"max_audio_src_len\", dtype=Flag.TYPE.INTEGER, default=None,\n help=\"The maximum source length of training audio frames.\"),\n Flag(\"max_text_src_len\", dtype=Flag.TYPE.INTEGER, default=None,\n help=\"The maximum source length of training text data.\"),\n\n Flag(\"batch_bucket_min_audio_src_len\", dtype=Flag.TYPE.INTEGER, default=1000,\n help=\"The minimum source length of the training bucket of audio frames.\"),\n Flag(\"batch_bucket_min_text_src_len\", dtype=Flag.TYPE.INTEGER, default=120,\n help=\"The minimum source length of the training bucket of text data.\"),\n\n Flag(\"max_audio_trg_len\", dtype=Flag.TYPE.INTEGER, default=None,\n help=\"The maximum target length of training audio data.\"),\n Flag(\"max_text_trg_len\", dtype=Flag.TYPE.INTEGER, default=None,\n help=\"The maximum target length of training text data.\"),\n\n Flag(\"truncate_src\", dtype=Flag.TYPE.BOOLEAN, default=None,\n help=\"Whether to truncate source to max_audio_src_len or max_text_src_len.\"),\n Flag(\"truncate_trg\", dtype=Flag.TYPE.BOOLEAN, default=None,\n help=\"Whether to truncate target to max_audio_trg_len or max_text_trg_len.\"),\n\n Flag(\"experimental_frame_transcript_ratio\", dtype=Flag.TYPE.INTEGER, default=None,\n help=\"The ratio of the number of frames and its transcript for training batch bucket.\"),\n\n Flag(\"batch_by_frames\", dtype=Flag.TYPE.BOOLEAN, default=True,\n help=\"Whether to batch the data by audio frames.\"),\n Flag(\"audio_batch_size\", dtype=Flag.TYPE.INTEGER, default=None,\n help=\"The batch size of audio (frames).\"),\n Flag(\"batch_by_tokens\", dtype=Flag.TYPE.BOOLEAN, default=True,\n help=\"Whether to batch the data by text tokens.\"),\n Flag(\"text_batch_size\", dtype=Flag.TYPE.INTEGER, default=None,\n help=\"The batch size of text (tokens).\"),\n ])\n return this_args\n\n def inputs_signature(self, mode) -> Tuple[dict, dict]:\n \"\"\"Returns the input dtypes and signatures (from dataset).\"\"\"\n dtypes = {\"audio\": tf.float32, \"audio_length\": tf.int64,\n \"src_text\": tf.int64,\n \"tgt_text\": tf.int64, \"tgt_lang\": tf.int64}\n\n signatures = {\n \"audio\": tf.TensorShape([None, None]),\n \"audio_length\": tf.TensorShape([None, ]),\n \"src_text\": tf.TensorShape([None, None]),\n \"tgt_text\": tf.TensorShape([None, None]),\n \"tgt_lang\": tf.TensorShape([None, None]),\n }\n\n return dtypes, signatures\n\n def build_model(self, args, name=None):\n \"\"\" Creates the model. \"\"\"\n model = build_model(args, \n {\"audio_feature_dim\": self._audio_feature_dim,\n \"audio_feature_channels\": self._audio_feature_channels},\n self._text_data_pipeline.meta, \n name=name)\n return model\n\n def example_to_input(self, batch_of_data: dict, mode) -> dict:\n \"\"\" Transform the data examples to model acceptable inputs.\n\n Args:\n batch_of_data: A dict: name -> tf.keras.layers.Input\n mode: The running mode.\n\n Returns: The input data for model.\n \"\"\"\n batch = tf.shape(batch_of_data[\"audio\"])[0]\n\n input_dict = {\n \"audio\": tf.reshape(batch_of_data[\"audio\"],\n [batch, -1, self._audio_feature_dim, self._audio_feature_channels]),\n \"audio_length\": batch_of_data[\"audio_length\"],\n \"src_text\": batch_of_data[\"src_text\"],\n \"src_length\": deduce_text_length(batch_of_data[\"src_text\"],\n self._text_data_pipeline.meta[\"pad_id\"],\n self._text_data_pipeline.meta[\"padding_mode\"]),\n \"trg_lang\": batch_of_data[\"tgt_lang\"],\n }\n target_bos = batch_of_data[\"tgt_text\"][:, 0] # dim=1,\n\n if mode == compat.ModeKeys.INFER:\n input_dict[\"trg_input\"] = target_bos\n else:\n input_dict[\"trg\"] = batch_of_data[\"tgt_text\"]\n input_dict[\"trg_length\"] = deduce_text_length(batch_of_data[\"tgt_text\"],\n self._text_data_pipeline.meta[\"pad_id\"],\n self._text_data_pipeline.meta[\"padding_mode\"])\n input_dict[\"trg_input\"] = tf.concat([tf.expand_dims(target_bos, axis=-1),\n batch_of_data[\"tgt_text\"][:, :-1]], axis=1)\n\n return input_dict\n\n def get_data_postprocess_fn(self, mode):\n if mode == compat.ModeKeys.INFER:\n return self._text_data_pipeline.recover\n raise ValueError(\"No postprocess for TRAIN/EVAL.\")\n\n def get_data_preprocess_fn(self, mode, ds, args=None) -> dict:\n \"\"\" Preprocess data sample according to this task.\n Args:\n args: A dict containing dataset arguments. may contains:\n - args[\"task\"] in [\"MT\",\"ASR\", \"ST\"]\n mode: A ModeKeys indicating the running mode.\n ds: neurst.data.datasets.XMMultipleDataset\n\n Returns: A dict, A callable function to collate (process) a data sample.\n map_func[\"speech2text\"][name] = A callable function to process speech2text data\n map_func[\"text2text\"][name] = A callable function to process text2text data\n \"\"\"\n\n if args is None:\n args = self._args\n else:\n args = deep_merge_dict(self._args, args, local_overwrite=False)\n \n trunc_audio = args.get(\"truncate_src\", None)\n max_audio_len = args.get(\"max_audio_src_len\", None)\n max_text_src_len = args.get(\"max_text_src_len\", None)\n trunc_text_trg = args.get(\"truncate_trg\", None)\n max_text_trg_len = args.get(\"max_text_trg_len\", None)\n\n def _process_audio(audio):\n if trunc_audio and max_audio_len:\n audio = audio[:max_audio_len * self._audio_feature_dim * self._audio_feature_channels]\n return audio\n\n def _process_text(text, tag):\n if isinstance(text, tf.Tensor) and (text.dtype == tf.string):\n text = text.as_string().decode('utf-8')\n if isinstance(text, str):\n text = self._text_data_pipeline.process(text, is_processed=False)\n if mode == compat.ModeKeys.TRAIN and trunc_text_trg and max_text_trg_len:\n if tag == \"tgt_text\":\n max_text_len = max_text_trg_len\n elif tag == \"src_text\":\n max_text_len = max_text_src_len\n else: # tag in [\"src_lang\", \"tgt_lang\"]\n max_text_len = 10 # only 1 token, set a arbitrary number\n if isinstance(text, tf.Tensor):\n text = tf.cond(\n tf.less_equal(tf.size(text), max_text_len), \n lambda: text,\n lambda: tf.concat([text[:(max_text_len - 1)], text[-1:]], axis=0))\n else:\n if len(text) > max_text_len:\n text = text[:(max_text_len - 1)] + text[-1:]\n return text\n\n def _process_lang(lang):\n if not compat.is_tf_tensor(lang) and isinstance(lang, str):\n if not lang.startswith(\"<\"):\n lang = f\"<{lang}>\"\n return self._text_data_pipeline.lang2idx(lang)\n return lang\n\n def _has_lang_tag(text):\n if isinstance(text, tf.Tensor) and (text.dtype == tf.string):\n text = text.as_string()\n if isinstance(text, str):\n return text.startswith(\"<\")\n return True\n\n def _process_speech2text(data):\n audio = _process_audio(data[\"audio\"])\n lang = data.get(\"tgt_lang\", None)\n ret = {\"audio\": audio,\n \"audio_length\": tf.cast((tf.shape(audio)[0] if isinstance(audio, tf.Tensor)\n else audio.shape[0]) // self._audio_feature_dim // self._audio_feature_channels,\n dtype=tf.int64),\n \"src_text\": data[\"src_text\"]}\n if _has_lang_tag(data[\"tgt_text\"]) or (lang is None):\n ret[\"tgt_lang\"] = [_process_text(data[\"tgt_text\"], \"tgt_text\")[0]]\n ret[\"tgt_text\"] = _process_text(data[\"tgt_text\"], \"tgt_text\")\n else:\n ret[\"tgt_lang\"] = [_process_lang(lang)]\n ret[\"tgt_text\"] = [_process_lang(lang)] + _process_text(data[\"tgt_text\"], \"tgt_text\")\n return ret\n\n def _process_text2text(data):\n ret = {\"audio\": tf.constant([], dtype=tf.float32),\n \"audio_length\": tf.cast(0, dtype=tf.int64)}\n if _has_lang_tag(data[\"tgt_text\"]):\n ret[\"src_text\"] = _process_text(data[\"src_text\"], \"src_text\")\n ret[\"tgt_text\"] = _process_text(data[\"tgt_text\"], \"tgt_text\")\n ret[\"tgt_lang\"] = [_process_text(data[\"tgt_text\"], \"tgt_text\")[0]]\n else:\n ret[\"src_text\"] = [_process_lang(data[\"src_lang\"])] + _process_text(data[\"src_text\"], \"src_text\")\n ret[\"tgt_text\"] = [_process_lang(data[\"tgt_lang\"])] + _process_text(data[\"tgt_text\"], \"tgt_text\")\n ret[\"tgt_lang\"] = [_process_lang(data[\"tgt_lang\"])]\n return ret\n\n preprocess_func_dict = {}\n for ds_type in ds.datasets:\n preprocess_func_dict[ds_type] = {}\n if ds_type == \"speech2text\":\n for ds_name in ds.datasets[ds_type]:\n preprocess_func_dict[ds_type][ds_name] = _process_speech2text\n elif ds_type == \"text2text\":\n for ds_name in ds.datasets[ds_type]:\n preprocess_func_dict[ds_type][ds_name] = _process_text2text\n else:\n logging.warning(\"dataset type must be `text2text` or `speech2text` \")\n\n return preprocess_func_dict\n\n def create_and_batch_tfds(self, ds: Dataset, mode,\n args=None, num_replicas_in_sync=1) -> tf.data.Dataset:\n \"\"\" Creates a dataset according to the `mode`.\n\n Args:\n args: A dict containing dataset arguments.\n ds: A neurst.data.datasets.Dataset object. neurst.data.datasets.XMMultipleDataset object\n mode: A ModeKeys indicating the running mode.\n num_replicas_in_sync: The number of GPUs or other workers. We will generate global\n batches, and each global batch is equally divisible by number of replicas.\n\n Returns:\n A tf.data.Dataset or a INFER_DATA tuple.\n \"\"\"\n if args is None:\n args = self._args\n else:\n args = deep_merge_dict(self._args, args, local_overwrite=False)\n\n float_zero = tf.constant(0, dtype=tf.float32)\n int_zero = tf.constant(0, dtype=tf.int64)\n eos = tf.constant(self._text_data_pipeline.meta[\"eos_id\"], dtype=tf.int64)\n\n padding_values = {\"audio\": float_zero,\n \"audio_length\": int_zero,\n \"src_text\": eos,\n \"tgt_text\": eos,\n \"tgt_lang\": eos}\n \n dataset = ds.build(map_func=self.get_data_preprocess_fn(mode, ds, args),\n map_output_dtypes=self.inputs_signature(mode)[0],\n auto_shard=(mode == compat.ModeKeys.TRAIN),\n shuffle=(mode == compat.ModeKeys.TRAIN))\n\n if mode != compat.ModeKeys.TRAIN:\n is_s2t = True\n for x in dataset.take(1):\n if tf.size(x[\"audio\"]) == 0:\n is_s2t = False\n padded_shapes = {\"audio_length\": [], \"tgt_text\": [None], \"tgt_lang\": [None]}\n if is_s2t:\n padded_shapes[\"audio\"] = [None]\n padded_shapes[\"src_text\"] = [tf.constant(1, dtype=tf.int32)]\n return dataset.cache().padded_batch(\n dataset_utils.adjust_batch_size(args[\"batch_size\"],\n num_replicas_in_sync=num_replicas_in_sync),\n padded_shapes=padded_shapes,\n padding_values=padding_values,\n drop_remainder=False)\n else:\n padded_shapes[\"audio\"] = [tf.constant(8000, dtype=tf.float32)]\n padded_shapes[\"src_text\"] = [None]\n return dataset.cache().padded_batch(\n dataset_utils.adjust_batch_size(args[\"batch_size\"],\n num_replicas_in_sync=num_replicas_in_sync),\n padded_shapes=padded_shapes,\n padding_values=padding_values,\n drop_remainder=False\n )\n\n clean_length_dict = {\"audio\": args[\"max_audio_src_len\"] *\n self._audio_feature_dim * self._audio_feature_channels,\n \"audio_length\": -1,\n \"src_text\": args[\"max_text_src_len\"],\n \"tgt_text\": args[\"max_text_trg_len\"],\n \"tgt_lang\": -1}\n dataset = dataset.filter(\n lambda data_sample: tf.reduce_all([\n (length == -1) or (length is None) or\n tf.shape(data_sample[k])[0] <= length\n for k, length in clean_length_dict.items()]))\n\n logging.info(\"Created training dataset and batchifying...\")\n audio_bucket_boundaries, s2t_bucket_batch_sizes = get_speech2text_bucket_sizes(args,\n num_replicas_in_sync)\n s2t_audio_bucket_boundaries, s2t_trans_bucket_boundries, s2t_buckets_num = \\\n get_speech2text_bucket_size_with_ratio(args, audio_bucket_boundaries, \n s2t_bucket_batch_sizes)\n s2t_bucket_batch_sizes = tf.constant(s2t_bucket_batch_sizes, dtype=tf.int64)\n audio_bucket_boundaries = tf.constant(audio_bucket_boundaries, dtype=tf.int32)\n\n text_bucket_boundaries, t2t_bucket_batch_sizes = get_text2text_bucket_sizes(args,\n num_replicas_in_sync)\n t2t_bucket_batch_sizes = tf.constant(t2t_bucket_batch_sizes, dtype=tf.int64)\n text_bucket_boundaries = tf.constant(text_bucket_boundaries, dtype=tf.int32)\n\n t2t_max_trg_len = tf.constant(args[\"max_text_trg_len\"], dtype=tf.int32)\n # make s2t batches\n t2t_bucket_num = tf.constant(len(t2t_bucket_batch_sizes), tf.int64)\n\n def example_to_bucket_id(examples):\n \"\"\"Return a tuple bucket_id for the example\"\"\"\n is_text2text = tf.equal(tf.cast(examples[\"audio_length\"], tf.int32),\n tf.constant(0, dtype=tf.int32))\n\n def _to_t2t_bucket_id():\n seq_length = tf.size(examples[\"src_text\"])\n conditions_c = tf.less_equal(tf.cast(seq_length, tf.int32),\n tf.cast(text_bucket_boundaries, tf.int32))\n return tf.reduce_min(tf.where(conditions_c))\n\n def _to_s2t_bucket_id():\n conditions_c = tf.logical_and(\n tf.less_equal(tf.cast(examples[\"audio_length\"], tf.int32), \n s2t_audio_bucket_boundaries),\n tf.less_equal(tf.size(examples[\"tgt_text\"]),\n s2t_trans_bucket_boundries))\n minimum_match = tf.where(conditions_c)[0]\n\n return (minimum_match[0] * s2t_buckets_num + minimum_match[1]) + t2t_bucket_num\n\n return tf.cond(is_text2text, _to_t2t_bucket_id, _to_s2t_bucket_id)\n\n def window_size_fn(bucket_id):\n def t2t_bucket_size():\n return t2t_bucket_batch_sizes[bucket_id]\n\n def s2t_bucket_size():\n s2t_bucket_id = bucket_id - t2t_bucket_num\n return s2t_bucket_batch_sizes[s2t_bucket_id // s2t_buckets_num]\n\n return tf.cond(tf.less(bucket_id, t2t_bucket_num),\n t2t_bucket_size, s2t_bucket_size)\n\n def batching_fn(bucket_id, grouped_dataset):\n bucket_batch_size = window_size_fn(bucket_id)\n\n def t2t_shapes():\n ret = {\"audio\": [tf.constant(5000, dtype=tf.int32)], \"audio_length\": [],\n \"src_text\": [text_bucket_boundaries[bucket_id]],\n \"tgt_text\": [t2t_max_trg_len],}\n ret[\"tgt_lang\"] = [1]\n return ret\n\n def s2t_shapes():\n s2t_bucket_id = bucket_id - t2t_bucket_num\n ret = {\"audio\": ([audio_bucket_boundaries[s2t_bucket_id // s2t_buckets_num]\n * self._audio_feature_dim * self._audio_feature_channels]),\n \"audio_length\": [],\n \"src_text\": [tf.constant(5, dtype=tf.int32)],\n \"tgt_text\": [s2t_trans_bucket_boundries[s2t_bucket_id // s2t_buckets_num][s2t_bucket_id % s2t_buckets_num]],\n \"tgt_lang\": [1]}\n return ret\n\n padded_shapes = tf.cond(tf.less(bucket_id, t2t_bucket_num),\n t2t_shapes, s2t_shapes)\n return grouped_dataset.padded_batch(\n bucket_batch_size,\n padded_shapes=padded_shapes,\n padding_values=padding_values,\n drop_remainder=True\n )\n tfds = dataset.apply(tf.data.experimental.group_by_window(\n key_func=example_to_bucket_id, reduce_func=batching_fn,\n window_size=None, window_size_func=window_size_fn))\n return tfds\n\n def build_metric_layer(self):\n return [AudioFramesMetricLayer(\"audio\"),\n SequenceTokenMetricLayer(\"trg\"), BatchCountMetricLayer(\"audio\")]\n\n def get_eval_metric(self, args, name=\"metric\", ds=None):\n \"\"\" Returns a neurst.metrics.metric.Metric object for evaluation.\"\"\"\n return build_metric(args[name + \".class\"], language=self._text_data_pipeline.meta[\"language\"],\n **args[name + \".params\"])\n",
"# Copyright 2020 ByteDance Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Tuple\n\nimport tensorflow as tf\nfrom absl import logging\n\nimport neurst.data.dataset_utils as dataset_utils\nfrom neurst.data.data_pipelines import DataPipeline, build_data_pipeline\nfrom neurst.data.data_pipelines.text_data_pipeline import TextDataPipeline\nfrom neurst.data.datasets import Dataset\nfrom neurst.data.datasets.parallel_text_dataset import AbstractParallelDataset\nfrom neurst.data.text.vocab import PaddingMode\nfrom neurst.layers.metric_layers.token_metric_layers import BatchCountMetricLayer, SequenceTokenMetricLayer\nfrom neurst.metrics import build_metric\nfrom neurst.models import build_model\nfrom neurst.models.model_utils import deduce_text_length\nfrom neurst.tasks import register_task\nfrom neurst.tasks.task import Task\nfrom neurst.utils import compat\nfrom neurst.utils.configurable import deep_merge_dict\nfrom neurst.utils.flags_core import Flag, ModuleFlag\n\n\n@register_task(\"seq_to_seq\")\nclass Seq2Seq(Task):\n \"\"\" Defines the sequence to sequence task. \"\"\"\n\n def __init__(self, args):\n \"\"\" Initializes the task.\n\n Args:\n args: A dict of model configurations.\n \"\"\"\n src_data_pipeline_cls = args.get(\"src_data_pipeline.class\", TextDataPipeline)\n src_data_pipeline_params = args.get(\"src_data_pipeline.params\", None) or {}\n self._src_data_pipeline = build_data_pipeline(\n src_data_pipeline_cls, **src_data_pipeline_params)\n trg_data_pipeline_cls = args.get(\"trg_data_pipeline.class\", TextDataPipeline)\n trg_data_pipeline_params = args.get(\"trg_data_pipeline.params\", None) or {}\n self._trg_data_pipeline = build_data_pipeline(\n trg_data_pipeline_cls, **trg_data_pipeline_params)\n self._target_begin_of_sentence = args.get(\"target_begin_of_sentence\", \"bos\")\n super(Seq2Seq, self).__init__(args)\n\n def get_config(self):\n return {\n \"src_data_pipeline.class\": self._src_data_pipeline.__class__.__name__,\n \"src_data_pipeline.params\": self._src_data_pipeline.get_config(),\n \"trg_data_pipeline.class\": self._trg_data_pipeline.__class__.__name__,\n \"trg_data_pipeline.params\": self._trg_data_pipeline.get_config(),\n \"target_begin_of_sentence\": self._target_begin_of_sentence\n }\n\n @staticmethod\n def class_or_method_args():\n this_args = super(Seq2Seq, Seq2Seq).class_or_method_args()\n this_args.extend([\n # for creating data pipelines\n ModuleFlag(\"src_data_pipeline\", DataPipeline.REGISTRY_NAME,\n help=\"The source side data pipeline.\"),\n ModuleFlag(\"trg_data_pipeline\", DataPipeline.REGISTRY_NAME,\n help=\"The target side data pipeline.\"),\n # for preprocessing data\n Flag(\"max_src_len\", dtype=Flag.TYPE.INTEGER, default=None,\n help=\"The maximum source length of training data.\"),\n Flag(\"max_trg_len\", dtype=Flag.TYPE.INTEGER, default=None,\n help=\"The maximum target length of training data.\"),\n Flag(\"truncate_src\", dtype=Flag.TYPE.BOOLEAN, default=None,\n help=\"Whether to truncate source to max_src_len.\"),\n Flag(\"truncate_trg\", dtype=Flag.TYPE.BOOLEAN, default=None,\n help=\"Whether to truncate target to max_trg_len.\"),\n # for batching dataset\n Flag(\"batch_by_tokens\", dtype=Flag.TYPE.BOOLEAN, default=None,\n help=\"Whether to batch the data by word tokens.\"),\n Flag(\"target_begin_of_sentence\", dtype=Flag.TYPE.STRING, default=\"bos\",\n choices=[\"bos\", \"eos\"],\n help=\"The begin of sentence symbol for target side. The choice 'eos' \"\n \"is for compatibility with fairseq transformer.\")\n ])\n return this_args\n\n def inputs_signature(self, mode) -> Tuple[dict, dict]:\n \"\"\" Returns the input dtypes and signatures. \"\"\"\n dtypes = {\"feature\": tf.int64}\n signatures = {\"feature\": tf.TensorShape([None, None])}\n if mode == compat.ModeKeys.INFER:\n return dtypes, signatures\n dtypes[\"label\"] = tf.int64\n signatures[\"label\"] = tf.TensorShape([None, None])\n return dtypes, signatures\n\n def build_model(self, args, name=None):\n \"\"\" Builds and return a keras model. \"\"\"\n model = build_model(args, self._src_data_pipeline.meta,\n self._trg_data_pipeline.meta, name=name)\n return model\n\n def example_to_input(self, batch_of_data: dict, mode) -> dict:\n \"\"\" Transform the data examples to model acceptable inputs.\n\n Args:\n batch_of_data: A data tensor with shape [batch, ...]\n mode: The running mode.\n\n Returns: The input data for model.\n \"\"\"\n input_dict = {\"src\": batch_of_data[\"feature\"],\n \"src_length\": deduce_text_length(\n batch_of_data[\"feature\"], self._src_data_pipeline.meta[\"pad_id\"],\n self._src_data_pipeline.meta.get(\"padding_mode\", PaddingMode.EOS_AS_PADDING))}\n bosid = (self._trg_data_pipeline.meta[\"eos_id\"] if self._target_begin_of_sentence == \"eos\"\n else self._trg_data_pipeline.meta[\"bos_id\"])\n target_bos = tf.tile([tf.convert_to_tensor(bosid, dtype=tf.int64)],\n [tf.shape(input_dict[\"src\"])[0]])\n if mode == compat.ModeKeys.INFER:\n input_dict[\"trg_input\"] = target_bos\n else:\n input_dict[\"trg\"] = batch_of_data[\"label\"]\n input_dict[\"trg_length\"] = deduce_text_length(\n batch_of_data[\"label\"], self._trg_data_pipeline.meta[\"pad_id\"],\n self._trg_data_pipeline.meta.get(\"padding_mode\", PaddingMode.EOS_AS_PADDING))\n input_dict[\"trg_input\"] = tf.concat([tf.expand_dims(target_bos, axis=1),\n batch_of_data[\"label\"][:, :-1]], axis=1)\n return input_dict\n\n def get_data_postprocess_fn(self, mode):\n if mode == compat.ModeKeys.INFER:\n return self._trg_data_pipeline.recover\n raise ValueError(\"No postprocess for TRAIN/EVAL.\")\n\n def get_data_preprocess_fn(self, mode, data_status=compat.DataStatus.RAW, args=None) -> callable:\n \"\"\" Preprocess data sample according to this task.\n\n Args:\n args: A dict containing dataset arguments.\n mode: A ModeKeys indicating the running mode.\n data_status: The status of the data sample.\n\n Returns: A callable function to collate (process) a data sample.\n \"\"\"\n if args is None:\n args = self._args\n else:\n args = deep_merge_dict(self._args, args, local_overwrite=False)\n truncate_src = args.get(\"truncate_src\", None)\n truncate_trg = args.get(\"truncate_trg\", None)\n max_src_len = args.get(\"max_src_len\", None)\n max_trg_len = args.get(\"max_trg_len\", None)\n\n def _process_and_truncate(text, dp, trunc, max_len):\n if data_status != compat.DataStatus.PROJECTED:\n text = dp.process(text, is_processed=(data_status == compat.DataStatus.PROCESSED))\n if mode == compat.ModeKeys.TRAIN and trunc and max_len:\n if compat.is_tf_tensor(text):\n text = tf.cond(\n tf.less_equal(tf.size(text), max_len), lambda: text,\n lambda: tf.concat([text[:(max_len - 1)], text[-1:]], axis=0))\n elif len(text) > max_len:\n text = text[:(max_len - 1)] + text[-1:]\n return text\n\n if mode == compat.ModeKeys.INFER:\n return lambda data: {\n \"feature\": _process_and_truncate(data[\"feature\"],\n self._src_data_pipeline,\n truncate_src,\n max_src_len)}\n return lambda data: {\n \"feature\": _process_and_truncate(data[\"feature\"],\n self._src_data_pipeline,\n truncate_src,\n max_src_len),\n \"label\": _process_and_truncate(data[\"label\"],\n self._trg_data_pipeline,\n truncate_trg,\n max_trg_len)}\n\n def create_and_batch_tfds(self, ds: Dataset, mode,\n args=None, num_replicas_in_sync=1) -> tf.data.Dataset:\n \"\"\" Creates a dataset according to the `mode`.\n\n Args:\n args: A dict containing dataset arguments.\n ds: A neurst.data.datasets.Dataset object.\n mode: A ModeKeys indicating the running mode.\n num_replicas_in_sync: The number of GPUs or other workers. We will generate global\n batches, and each global batch is equally divisible by number of replicas.\n\n Returns:\n A tf.data.Dataset.\n \"\"\"\n if args is None:\n args = self._args\n else:\n args = deep_merge_dict(self._args, args, local_overwrite=False)\n src_eos = tf.constant(self._src_data_pipeline.meta[\"eos_id\"], dtype=tf.int64)\n trg_eos = tf.constant(self._trg_data_pipeline.meta[\"eos_id\"], dtype=tf.int64)\n\n assert isinstance(ds, AbstractParallelDataset), (\n \"The dataset for SeqToSeq task must inherit AbstractParallelDataset.\")\n\n dataset = ds.build(map_func=self.get_data_preprocess_fn(mode, ds.status, args),\n map_output_dtypes=self.inputs_signature(mode)[0],\n auto_shard=(mode == compat.ModeKeys.TRAIN),\n shuffle=(mode == compat.ModeKeys.TRAIN))\n\n if mode == compat.ModeKeys.INFER:\n logging.info(\"Creating test dataset.\")\n return dataset.cache().padded_batch(\n dataset_utils.adjust_batch_size(args[\"batch_size\"],\n num_replicas_in_sync=num_replicas_in_sync),\n padded_shapes={\"feature\": [None]},\n padding_values={\"feature\": src_eos},\n drop_remainder=False)\n elif mode == compat.ModeKeys.EVAL:\n logging.info(\"Creating evaluation dataset.\")\n return dataset.cache().padded_batch(\n dataset_utils.adjust_batch_size(args[\"batch_size\"],\n num_replicas_in_sync=num_replicas_in_sync),\n padded_shapes={\"feature\": [None], \"label\": [None]},\n padding_values={\"feature\": src_eos, \"label\": trg_eos},\n drop_remainder=False)\n else:\n logging.info(\"Creating training dataset.\")\n dataset = dataset_utils.clean_dataset_by_length(\n dataset, {\"feature\": args[\"max_src_len\"], \"label\": args[\"max_trg_len\"]})\n if args[\"cache_dataset\"]:\n dataset = dataset.cache()\n if args[\"shuffle_buffer\"]:\n dataset = dataset.shuffle(buffer_size=args[\"shuffle_buffer\"])\n padding_values = {\"feature\": src_eos, \"label\": trg_eos}\n if args[\"max_src_len\"] is None:\n raise RuntimeError(\"Must provide `max_src_len` for training.\")\n if args[\"max_trg_len\"] is None:\n raise RuntimeError(\"Must provide `max_trg_len` for training.\")\n src_bucket_boundaries, trg_bucket_boundaries = dataset_utils.associated_bucket_boundaries(\n dataset_utils.create_batch_bucket_boundaries(args[\"max_src_len\"]),\n dataset_utils.create_batch_bucket_boundaries(args[\"max_trg_len\"]))\n\n bucket_boundaries = {\n \"feature\": src_bucket_boundaries,\n \"label\": trg_bucket_boundaries\n }\n bucket_batch_sizes = dataset_utils.adjust_batch_size(\n args[\"batch_size\"],\n args[\"batch_size_per_gpu\"],\n bucket_boundaries=bucket_boundaries if args[\"batch_by_tokens\"] else None,\n boundaries_reduce_to_length_fn=lambda x: max(tf.nest.flatten(x)),\n num_replicas_in_sync=num_replicas_in_sync)\n return dataset_utils.batch_examples_by_token(\n dataset,\n bucket_boundaries=bucket_boundaries,\n bucket_batch_sizes=bucket_batch_sizes,\n padding_values=padding_values,\n example_length_func=lambda x: {k: tf.size(v) for k, v in x.items()}\n )\n\n def build_metric_layer(self):\n return [SequenceTokenMetricLayer(\"src\"), SequenceTokenMetricLayer(\"trg\"),\n BatchCountMetricLayer(\"src\")]\n\n def get_eval_metric(self, args, name=\"metric\", ds=None):\n \"\"\" Returns a neurst.metrics.metric.Metric object for evaluation.\"\"\"\n if ds is not None and hasattr(ds, \"trg_lang\") and ds.trg_lang is not None:\n return build_metric(args[name + \".class\"], language=ds.trg_lang,\n **args[name + \".params\"])\n return build_metric(args[name + \".class\"], language=self._trg_data_pipeline.meta[\"language\"],\n **args[name + \".params\"])\n"
] |
[
[
"tensorflow.data.experimental.group_by_window",
"tensorflow.cond",
"tensorflow.TensorShape",
"tensorflow.constant",
"tensorflow.concat",
"tensorflow.shape",
"tensorflow.less",
"tensorflow.reshape",
"tensorflow.cast",
"tensorflow.expand_dims",
"tensorflow.where",
"tensorflow.nest.flatten",
"tensorflow.size"
],
[
"tensorflow.convert_to_tensor",
"tensorflow.TensorShape",
"tensorflow.constant",
"tensorflow.concat",
"tensorflow.shape",
"tensorflow.expand_dims",
"tensorflow.nest.flatten",
"tensorflow.size"
]
] |
Albert0147/G-SFDA
|
[
"6ded750224266cd4cdb100a7fcedfa95688d22da"
] |
[
"train_tar_visda.py"
] |
[
"import argparse\nimport os, sys\nimport os.path as osp\nimport torchvision\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torchvision import transforms\nimport network, loss\nfrom torch.utils.data import DataLoader\nfrom data_list import ImageList, ImageList_idx\nimport random, pdb, math, copy\nfrom sklearn.metrics import confusion_matrix\nimport torch.nn.functional as F\n\n\ndef op_copy(optimizer):\n for param_group in optimizer.param_groups:\n param_group['lr0'] = param_group['lr']\n return optimizer\n\n\ndef lr_scheduler(optimizer, iter_num, max_iter, gamma=10, power=0.75):\n decay = (1 + gamma * iter_num / max_iter)**(-power)\n for param_group in optimizer.param_groups:\n param_group['lr'] = param_group['lr0'] * decay\n param_group['weight_decay'] = 1e-3\n param_group['momentum'] = 0.9\n param_group['nesterov'] = True\n return optimizer\n\n\ndef image_train(resize_size=256, crop_size=224, alexnet=False):\n if not alexnet:\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n else:\n normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy')\n return transforms.Compose([\n transforms.Resize((resize_size, resize_size)),\n transforms.RandomCrop(crop_size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(), normalize\n ])\n\n\ndef image_test(resize_size=256, crop_size=224, alexnet=False):\n if not alexnet:\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n else:\n normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy')\n return transforms.Compose([\n transforms.Resize((resize_size, resize_size)),\n transforms.CenterCrop(crop_size),\n transforms.ToTensor(), normalize\n ])\n\n\ndef data_load(args):\n ## prepare data\n dsets = {}\n dset_loaders = {}\n train_bs = args.batch_size\n txt_src = open(args.s_dset_path).readlines()\n txt_tar = open(args.t_dset_path).readlines()\n txt_test = open(args.test_dset_path).readlines()\n\n dsize = len(txt_src)\n tr_size = int(0.9*dsize)\n # print(dsize, tr_size, dsize - tr_size)\n tr_txt, te_txt = torch.utils.data.random_split(txt_src, [tr_size, dsize - tr_size])\n\n dsets[\"source_tr\"] = ImageList(tr_txt, transform=image_train())\n dset_loaders[\"source_tr\"] = DataLoader(dsets[\"source_tr\"],\n batch_size=train_bs,\n shuffle=True,\n num_workers=args.worker,\n drop_last=False)\n dsets[\"source_te\"] = ImageList(te_txt, transform=image_test())\n dset_loaders[\"source_te\"] = DataLoader(dsets[\"source_te\"],\n batch_size=train_bs,\n shuffle=True,\n num_workers=args.worker,\n drop_last=False)\n dsets[\"target\"] = ImageList_idx(txt_tar, transform=image_train())\n dset_loaders[\"target\"] = DataLoader(dsets[\"target\"],\n batch_size=train_bs,\n shuffle=True,\n num_workers=args.worker,\n drop_last=False)\n dsets[\"test\"] = ImageList_idx(txt_test, transform=image_test())\n dset_loaders[\"test\"] = DataLoader(dsets[\"test\"],\n batch_size=train_bs * 3,\n shuffle=False,\n num_workers=args.worker,\n drop_last=False)\n\n return dset_loaders\n\n\ndef cal_acc(loader, netF, netB, netC,t=0, flag=False):\n start_test = True\n with torch.no_grad():\n iter_test = iter(loader)\n for i in range(len(loader)):\n data = iter_test.next()\n inputs = data[0]\n labels = data[1]\n inputs = inputs.cuda()\n outputs = netC(netB(netF(inputs),t=t)[0])\n if start_test:\n all_output = outputs.float().cpu()\n all_label = labels.float()\n start_test = False\n else:\n all_output = torch.cat((all_output, outputs.float().cpu()), 0)\n all_label = torch.cat((all_label, labels.float()), 0)\n _, predict = torch.max(all_output, 1)\n accuracy = torch.sum(\n torch.squeeze(predict).float() == all_label).item() / float(\n all_label.size()[0])\n mean_ent = torch.mean(loss.Entropy(\n nn.Softmax(dim=1)(all_output))).cpu().data.item()\n\n if flag:\n matrix = confusion_matrix(all_label, torch.squeeze(predict).float())\n acc = matrix.diagonal() / matrix.sum(axis=1) * 100\n aacc = acc.mean()\n aa = [str(np.round(i, 2)) for i in acc]\n acc = ' '.join(aa)\n return aacc, acc\n else:\n return accuracy * 100, mean_ent\n\n\n\ndef train_target(args):\n dset_loaders = data_load(args)\n ## set base network\n netF = network.ResBase(res_name=args.net).cuda()\n\n netB = network.feat_bootleneck_sdaE(type=args.classifier,\n feature_dim=netF.in_features,\n bottleneck_dim=args.bottleneck).cuda()\n netC = network.feat_classifier(type=args.layer,\n class_num=args.class_num,\n bottleneck_dim=args.bottleneck).cuda()\n\n modelpath = args.output_dir_src + '/source_F.pt'\n netF.load_state_dict(torch.load(modelpath))\n modelpath = args.output_dir_src + '/source_B.pt'\n netB.load_state_dict(torch.load(modelpath))\n modelpath = args.output_dir_src + '/source_C.pt'\n netC.load_state_dict(torch.load(modelpath))\n\n param_group = []\n for k, v in netF.named_parameters():\n if k.find('bn')!=-1:\n param_group += [{'params': v, 'lr': args.lr * 0.1}]\n\n for k, v in netB.named_parameters():\n #if k.find('em')==-1: # the embedding layer can be either trained or not\n if True:\n param_group += [{'params': v, 'lr': args.lr * 1}] \n for k, v in netC.named_parameters():\n param_group += [{'params': v, 'lr': args.lr * 1}]\n\n optimizer = optim.SGD(param_group)\n optimizer = op_copy(optimizer)\n\n #building feature bank and score bank\n loader = dset_loaders[\"target\"]\n num_sample=len(loader.dataset)\n fea_bank=torch.randn(num_sample,256)\n score_bank = torch.randn(num_sample, 12).cuda()\n\n netF.eval()\n netB.eval()\n netC.eval()\n with torch.no_grad():\n iter_test = iter(loader)\n for i in range(len(loader)):\n data = iter_test.next()\n inputs = data[0]\n indx=data[-1]\n #labels = data[1]\n inputs = inputs.cuda()\n output, _ = netB(netF(inputs), t=1) # a^t\n output_norm=F.normalize(output)\n outputs = netC(output)\n outputs=nn.Softmax(-1)(outputs)\n fea_bank[indx] = output_norm.detach().clone().cpu()\n score_bank[indx] = outputs.detach().clone() #.cpu()\n\n\n max_iter = args.max_epoch * len(dset_loaders[\"target\"])\n interval_iter = max_iter // args.interval\n iter_num = 0\n\n\n netF.train()\n netB.train()\n netC.train()\n acc_log=0\n while iter_num < max_iter:\n try:\n inputs_test, _, tar_idx = iter_test.next()\n except:\n iter_test = iter(dset_loaders[\"target\"])\n inputs_test, _, tar_idx = iter_test.next()\n\n if inputs_test.size(0) == 1:\n continue\n\n inputs_test = inputs_test.cuda()\n\n iter_num += 1\n lr_scheduler(optimizer, iter_num=iter_num, max_iter=max_iter)\n\n features_test, masks = netB(netF(inputs_test),t=1)\n masks_old = masks\n outputs_test = netC(features_test)\n softmax_out = nn.Softmax(dim=1)(outputs_test)\n output_re = softmax_out.unsqueeze(1)\n\n with torch.no_grad():\n output_f_norm=F.normalize(features_test)\n fea_bank[tar_idx].fill_(-0.1) #do not use the current mini-batch in fea_bank\n output_f_=output_f_norm.cpu().detach().clone()\n distance = output_f_@fea_bank.T\n _, idx_near = torch.topk(distance,\n dim=-1,\n largest=True,\n k=10)\n score_near = score_bank[idx_near] #batch x K x num_class\n score_near=score_near.permute(0,2,1)\n\n # update banks\n fea_bank[tar_idx] = output_f_.detach().clone().cpu()\n score_bank[tar_idx] = softmax_out.detach().clone() #.cpu()\n\n const=torch.log(torch.bmm(output_re,score_near)).sum(-1)\n loss=-torch.mean(const)\n\n msoftmax = softmax_out.mean(dim=0)\n gentropy_loss = torch.sum(msoftmax *\n torch.log(msoftmax + args.epsilon))\n loss += gentropy_loss\n\n optimizer.zero_grad()\n loss.backward()\n\n for n, p in netB.bottleneck.named_parameters():\n if n.find('bias') == -1:\n mask_ = ((1 - masks_old)).view(-1, 1).expand(256, 2048).cuda()\n p.grad.data *= mask_\n else: #no bias here\n mask_ = ((1 - masks_old)).squeeze().cuda()\n p.grad.data *= mask_\n\n for n, p in netC.named_parameters():\n if n.find('weight_v') != -1:\n masks__=masks_old.view(1,-1).expand(12,256)\n mask_ = ((1 - masks__)).cuda()\n p.grad.data *= mask_\n\n for n, p in netB.bn.named_parameters():\n mask_ = ((1 - masks_old)).view(-1).cuda()\n p.grad.data *= mask_\n\n optimizer.step()\n\n if iter_num % interval_iter == 0 or iter_num == max_iter:\n netF.eval()\n netB.eval()\n netC.eval()\n if args.dset == 'visda-2017':\n acc_s_te, acc_list = cal_acc(dset_loaders['test'], netF, netB,\n netC,t=1,flag= True)\n accS_s_te, accS_list = cal_acc(dset_loaders['source_te'], netF, netB,\n netC,t=0,flag= True)\n log_str = 'Task: {}, Iter:{}/{}; Accuracy on target = {:.2f}%, Accuracy on source = {:.2f}%'.format(\n args.name, iter_num, max_iter, acc_s_te, accS_s_te\n ) + '\\n' + 'T: ' + acc_list + '\\n' + 'S: ' + accS_list\n\n args.out_file.write(log_str + '\\n')\n args.out_file.flush()\n print(log_str + '\\n')\n netF.train()\n netB.train()\n netC.train()\n\n if args.issave:\n if acc_s_te>acc_log:\n acc_log=acc_s_te\n torch.save(\n netF.state_dict(),\n osp.join(args.output_dir, \"target_F_\" + 'final' + \".pt\"))\n torch.save(\n netB.state_dict(),\n osp.join(args.output_dir, \"target_B_\" + 'final' + \".pt\"))\n torch.save(\n netC.state_dict(),\n osp.join(args.output_dir, \"target_C_\" + 'final' + \".pt\"))\n\n return netF, netB, netC\n\n\ndef print_args(args):\n s = \"==========================================\\n\"\n for arg, content in args.__dict__.items():\n s += \"{}:{}\\n\".format(arg, content)\n return s\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Ours')\n parser.add_argument('--gpu_id',\n type=str,\n nargs='?',\n default='8',\n help=\"device id to run\")\n parser.add_argument('--s', type=int, default=0, help=\"source\")\n parser.add_argument('--t', type=int, default=1, help=\"target\")\n parser.add_argument('--max_epoch',\n type=int,\n default=15,\n help=\"max iterations\")\n parser.add_argument('--interval', type=int, default=15)\n parser.add_argument('--batch_size',\n type=int,\n default=64,\n help=\"batch_size\")\n parser.add_argument('--worker',\n type=int,\n default=4,\n help=\"number of workers\")\n parser.add_argument(\n '--dset',\n type=str,\n default='visda-2017')\n parser.add_argument('--lr', type=float, default=1e-3, help=\"learning rate\")\n parser.add_argument('--net',\n type=str,\n default='resnet101')\n parser.add_argument('--seed', type=int, default=2020, help=\"random seed\")\n\n parser.add_argument('--bottleneck', type=int, default=256)\n parser.add_argument('--epsilon', type=float, default=1e-5)\n parser.add_argument('--layer',\n type=str,\n default=\"wn\",\n choices=[\"linear\", \"wn\"])\n parser.add_argument('--classifier',\n type=str,\n default=\"bn\",\n choices=[\"ori\", \"bn\"])\n parser.add_argument('--output', type=str, default='visda/target/')\n parser.add_argument('--output_src', type=str, default='visda/source/')\n parser.add_argument('--da',\n type=str,\n default='uda')\n parser.add_argument('--issave', type=bool, default=True)\n args = parser.parse_args()\n\n if args.dset == 'office-home':\n names = ['Art', 'Clipart', 'Product', 'RealWorld']\n args.class_num = 65\n if args.dset == 'visda-2017':\n names = ['train', 'validation']\n args.class_num = 12\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu_id\n SEED = args.seed\n torch.manual_seed(SEED)\n torch.cuda.manual_seed(SEED)\n np.random.seed(SEED)\n random.seed(SEED)\n torch.backends.cudnn.deterministic = True\n\n for i in range(len(names)):\n if i == args.s:\n continue\n args.t = i\n\n folder = './data/'\n args.s_dset_path = folder + args.dset + '/' + names[\n args.s] + '_list.txt'\n args.t_dset_path = folder + args.dset + '/' + names[\n args.t] + '_list.txt'\n args.test_dset_path = folder + args.dset + '/' + names[\n args.t] + '_list.txt'\n\n args.output_dir_src = osp.join(args.output_src, args.da, args.dset,\n names[args.s][0].upper())\n args.output_dir = osp.join(\n args.output, args.da, args.dset,\n names[args.s][0].upper() + names[args.t][0].upper())\n args.name = names[args.s][0].upper() + names[args.t][0].upper()\n\n if not osp.exists(args.output_dir):\n os.system('mkdir -p ' + args.output_dir)\n if not osp.exists(args.output_dir):\n os.mkdir(args.output_dir)\n\n args.out_file = open(\n osp.join(args.output_dir, 'log_target' + '.txt'), 'w')\n args.out_file.write(print_args(args) + '\\n')\n args.out_file.flush()\n train_target(args)\n"
] |
[
[
"torch.nn.functional.normalize",
"torch.nn.Softmax",
"torch.mean",
"torch.max",
"torch.cuda.manual_seed",
"numpy.random.seed",
"torch.load",
"torch.randn",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"numpy.round",
"torch.utils.data.random_split",
"torch.no_grad",
"torch.log",
"torch.bmm",
"torch.optim.SGD",
"torch.topk",
"torch.squeeze"
]
] |
maxburke/arrow
|
[
"344ed4bed675c4913db5cc7b17d0e6cc57ea55c4"
] |
[
"python/pyarrow/tests/test_parquet.py"
] |
[
"# -*- coding: utf-8 -*-\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom collections import OrderedDict\nimport datetime\nimport decimal\nimport io\nimport json\nimport os\nimport six\nimport pickle\nimport pytest\n\nimport numpy as np\n\nimport pyarrow as pa\nfrom pyarrow.compat import guid, u, BytesIO, unichar, PY2\nfrom pyarrow.pandas_compat import _pandas_api\nfrom pyarrow.tests import util\nfrom pyarrow.filesystem import LocalFileSystem, FileSystem\n\ntry:\n import pyarrow.parquet as pq\nexcept ImportError:\n pq = None\n\n\ntry:\n import pandas as pd\n import pandas.util.testing as tm\n from .pandas_examples import dataframe_with_arrays, dataframe_with_lists\nexcept ImportError:\n pd = tm = None\n\n\n# Marks all of the tests in this module\n# Ignore these with pytest ... -m 'not parquet'\npytestmark = pytest.mark.parquet\n\n\[email protected](scope='module')\ndef datadir(datadir):\n return datadir / 'parquet'\n\n\ndef _write_table(table, path, **kwargs):\n # So we see the ImportError somewhere\n import pyarrow.parquet as pq\n\n if _pandas_api.is_data_frame(table):\n table = pa.Table.from_pandas(table)\n\n pq.write_table(table, path, **kwargs)\n return table\n\n\ndef _read_table(*args, **kwargs):\n return pq.read_table(*args, **kwargs)\n\n\ndef _roundtrip_table(table, read_table_kwargs=None,\n write_table_kwargs=None):\n read_table_kwargs = read_table_kwargs or {}\n write_table_kwargs = write_table_kwargs or {}\n\n buf = io.BytesIO()\n _write_table(table, buf, **write_table_kwargs)\n buf.seek(0)\n return _read_table(buf, **read_table_kwargs)\n\n\ndef _check_roundtrip(table, expected=None, read_table_kwargs=None,\n **write_table_kwargs):\n if expected is None:\n expected = table\n\n read_table_kwargs = read_table_kwargs or {}\n\n # intentionally check twice\n result = _roundtrip_table(table, read_table_kwargs=read_table_kwargs,\n write_table_kwargs=write_table_kwargs)\n assert result.equals(expected)\n result = _roundtrip_table(result, read_table_kwargs=read_table_kwargs,\n write_table_kwargs=write_table_kwargs)\n assert result.equals(expected)\n\n\ndef _roundtrip_pandas_dataframe(df, write_kwargs):\n table = pa.Table.from_pandas(df)\n\n buf = io.BytesIO()\n _write_table(table, buf, **write_kwargs)\n\n buf.seek(0)\n table1 = _read_table(buf)\n return table1.to_pandas()\n\n\[email protected]('dtype', [int, float])\ndef test_single_pylist_column_roundtrip(tempdir, dtype):\n filename = tempdir / 'single_{}_column.parquet'.format(dtype.__name__)\n data = [pa.array(list(map(dtype, range(5))))]\n table = pa.Table.from_arrays(data, names=['a'])\n _write_table(table, filename)\n table_read = _read_table(filename)\n for i in range(table.num_columns):\n col_written = table[i]\n col_read = table_read[i]\n assert table.field(i).name == table_read.field(i).name\n assert col_read.num_chunks == 1\n data_written = col_written.chunk(0)\n data_read = col_read.chunk(0)\n assert data_written.equals(data_read)\n\n\ndef alltypes_sample(size=10000, seed=0, categorical=False):\n np.random.seed(seed)\n arrays = {\n 'uint8': np.arange(size, dtype=np.uint8),\n 'uint16': np.arange(size, dtype=np.uint16),\n 'uint32': np.arange(size, dtype=np.uint32),\n 'uint64': np.arange(size, dtype=np.uint64),\n 'int8': np.arange(size, dtype=np.int16),\n 'int16': np.arange(size, dtype=np.int16),\n 'int32': np.arange(size, dtype=np.int32),\n 'int64': np.arange(size, dtype=np.int64),\n 'float32': np.arange(size, dtype=np.float32),\n 'float64': np.arange(size, dtype=np.float64),\n 'bool': np.random.randn(size) > 0,\n # TODO(wesm): Test other timestamp resolutions now that arrow supports\n # them\n 'datetime': np.arange(\"2016-01-01T00:00:00.001\", size,\n dtype='datetime64[ms]'),\n 'str': pd.Series([str(x) for x in range(size)]),\n 'empty_str': [''] * size,\n 'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],\n 'null': [None] * size,\n 'null_list': [None] * 2 + [[None] * (x % 4) for x in range(size - 2)],\n }\n if categorical:\n arrays['str_category'] = arrays['str'].astype('category')\n return pd.DataFrame(arrays)\n\n\[email protected]\[email protected]('chunk_size', [None, 1000])\ndef test_pandas_parquet_2_0_roundtrip(tempdir, chunk_size):\n df = alltypes_sample(size=10000, categorical=True)\n\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df)\n assert arrow_table.schema.pandas_metadata is not None\n\n _write_table(arrow_table, filename, version=\"2.0\",\n coerce_timestamps='ms', chunk_size=chunk_size)\n table_read = pq.read_pandas(filename)\n assert table_read.schema.pandas_metadata is not None\n\n assert arrow_table.schema.metadata == table_read.schema.metadata\n\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\ndef test_set_data_page_size():\n arr = pa.array([1, 2, 3] * 1000000)\n t = pa.Table.from_arrays([arr], names=['f0'])\n\n # 128K, 256K, 512K\n page_sizes = [2 << 16, 2 << 17, 2 << 18]\n for target_page_size in page_sizes:\n _check_roundtrip(t, data_page_size=target_page_size)\n\n\[email protected]\ndef test_chunked_table_write():\n # ARROW-232\n df = alltypes_sample(size=10)\n\n batch = pa.RecordBatch.from_pandas(df)\n table = pa.Table.from_batches([batch] * 3)\n _check_roundtrip(table, version='2.0')\n\n df, _ = dataframe_with_lists()\n batch = pa.RecordBatch.from_pandas(df)\n table = pa.Table.from_batches([batch] * 3)\n _check_roundtrip(table, version='2.0')\n\n\[email protected]\ndef test_memory_map(tempdir):\n df = alltypes_sample(size=10)\n\n table = pa.Table.from_pandas(df)\n _check_roundtrip(table, read_table_kwargs={'memory_map': True},\n version='2.0')\n\n filename = str(tempdir / 'tmp_file')\n with open(filename, 'wb') as f:\n _write_table(table, f, version='2.0')\n table_read = pq.read_pandas(filename, memory_map=True)\n assert table_read.equals(table)\n\n\[email protected]\ndef test_enable_buffered_stream(tempdir):\n df = alltypes_sample(size=10)\n\n table = pa.Table.from_pandas(df)\n _check_roundtrip(table, read_table_kwargs={'buffer_size': 1025},\n version='2.0')\n\n filename = str(tempdir / 'tmp_file')\n with open(filename, 'wb') as f:\n _write_table(table, f, version='2.0')\n table_read = pq.read_pandas(filename, buffer_size=4096)\n assert table_read.equals(table)\n\n\ndef test_special_chars_filename(tempdir):\n table = pa.Table.from_arrays([pa.array([42])], [\"ints\"])\n filename = \"foo # bar\"\n path = tempdir / filename\n assert not path.exists()\n _write_table(table, str(path))\n assert path.exists()\n table_read = _read_table(str(path))\n assert table_read.equals(table)\n\n\[email protected]\ndef test_empty_table_roundtrip():\n df = alltypes_sample(size=10)\n\n # Create a non-empty table to infer the types correctly, then slice to 0\n table = pa.Table.from_pandas(df)\n table = pa.Table.from_arrays(\n [col.chunk(0)[:0] for col in table.itercolumns()],\n names=table.schema.names)\n\n assert table.schema.field('null').type == pa.null()\n assert table.schema.field('null_list').type == pa.list_(pa.null())\n _check_roundtrip(table, version='2.0')\n\n\[email protected]\ndef test_empty_table_no_columns():\n df = pd.DataFrame()\n empty = pa.Table.from_pandas(df, preserve_index=False)\n _check_roundtrip(empty)\n\n\ndef test_empty_lists_table_roundtrip():\n # ARROW-2744: Shouldn't crash when writing an array of empty lists\n arr = pa.array([[], []], type=pa.list_(pa.int32()))\n table = pa.Table.from_arrays([arr], [\"A\"])\n _check_roundtrip(table)\n\n\ndef test_nested_list_nonnullable_roundtrip_bug():\n # Reproduce failure in ARROW-5630\n typ = pa.list_(pa.field(\"item\", pa.float32(), False))\n num_rows = 10000\n t = pa.table([\n pa.array(([[0] * ((i + 5) % 10) for i in range(0, 10)]\n * (num_rows // 10)), type=typ)\n ], ['a'])\n _check_roundtrip(t, data_page_size=4096)\n\n\[email protected]\ndef test_pandas_parquet_datetime_tz():\n s = pd.Series([datetime.datetime(2017, 9, 6)])\n s = s.dt.tz_localize('utc')\n\n s.index = s\n\n # Both a column and an index to hit both use cases\n df = pd.DataFrame({'tz_aware': s,\n 'tz_eastern': s.dt.tz_convert('US/Eastern')},\n index=s)\n\n f = BytesIO()\n\n arrow_table = pa.Table.from_pandas(df)\n\n _write_table(arrow_table, f, coerce_timestamps='ms')\n f.seek(0)\n\n table_read = pq.read_pandas(f)\n\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\[email protected](six.PY2, reason='datetime.timezone is available since '\n 'python version 3.2')\ndef test_datetime_timezone_tzinfo():\n value = datetime.datetime(2018, 1, 1, 1, 23, 45,\n tzinfo=datetime.timezone.utc)\n df = pd.DataFrame({'foo': [value]})\n\n _roundtrip_pandas_dataframe(df, write_kwargs={})\n\n\[email protected]\ndef test_pandas_parquet_custom_metadata(tempdir):\n df = alltypes_sample(size=10000)\n\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df)\n assert b'pandas' in arrow_table.schema.metadata\n\n _write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms')\n\n metadata = pq.read_metadata(filename).metadata\n assert b'pandas' in metadata\n\n js = json.loads(metadata[b'pandas'].decode('utf8'))\n assert js['index_columns'] == [{'kind': 'range',\n 'name': None,\n 'start': 0, 'stop': 10000,\n 'step': 1}]\n\n\[email protected]\ndef test_pandas_parquet_column_multiindex(tempdir):\n df = alltypes_sample(size=10)\n df.columns = pd.MultiIndex.from_tuples(\n list(zip(df.columns, df.columns[::-1])),\n names=['level_1', 'level_2']\n )\n\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df)\n assert arrow_table.schema.pandas_metadata is not None\n\n _write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms')\n\n table_read = pq.read_pandas(filename)\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_pandas_parquet_2_0_roundtrip_read_pandas_no_index_written(tempdir):\n df = alltypes_sample(size=10000)\n\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df, preserve_index=False)\n js = arrow_table.schema.pandas_metadata\n assert not js['index_columns']\n # ARROW-2170\n # While index_columns should be empty, columns needs to be filled still.\n assert js['columns']\n\n _write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms')\n table_read = pq.read_pandas(filename)\n\n js = table_read.schema.pandas_metadata\n assert not js['index_columns']\n\n assert arrow_table.schema.metadata == table_read.schema.metadata\n\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_pandas_parquet_1_0_roundtrip(tempdir):\n size = 10000\n np.random.seed(0)\n df = pd.DataFrame({\n 'uint8': np.arange(size, dtype=np.uint8),\n 'uint16': np.arange(size, dtype=np.uint16),\n 'uint32': np.arange(size, dtype=np.uint32),\n 'uint64': np.arange(size, dtype=np.uint64),\n 'int8': np.arange(size, dtype=np.int16),\n 'int16': np.arange(size, dtype=np.int16),\n 'int32': np.arange(size, dtype=np.int32),\n 'int64': np.arange(size, dtype=np.int64),\n 'float32': np.arange(size, dtype=np.float32),\n 'float64': np.arange(size, dtype=np.float64),\n 'bool': np.random.randn(size) > 0,\n 'str': [str(x) for x in range(size)],\n 'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],\n 'empty_str': [''] * size\n })\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df)\n _write_table(arrow_table, filename, version='1.0')\n table_read = _read_table(filename)\n df_read = table_read.to_pandas()\n\n # We pass uint32_t as int64_t if we write Parquet version 1.0\n df['uint32'] = df['uint32'].values.astype(np.int64)\n\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_multiple_path_types(tempdir):\n # Test compatibility with PEP 519 path-like objects\n path = tempdir / 'zzz.parquet'\n df = pd.DataFrame({'x': np.arange(10, dtype=np.int64)})\n _write_table(df, path)\n table_read = _read_table(path)\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n # Test compatibility with plain string paths\n path = str(tempdir) + 'zzz.parquet'\n df = pd.DataFrame({'x': np.arange(10, dtype=np.int64)})\n _write_table(df, path)\n table_read = _read_table(path)\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_pandas_column_selection(tempdir):\n size = 10000\n np.random.seed(0)\n df = pd.DataFrame({\n 'uint8': np.arange(size, dtype=np.uint8),\n 'uint16': np.arange(size, dtype=np.uint16)\n })\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df)\n _write_table(arrow_table, filename)\n table_read = _read_table(filename, columns=['uint8'])\n df_read = table_read.to_pandas()\n\n tm.assert_frame_equal(df[['uint8']], df_read)\n\n # ARROW-4267: Selection of duplicate columns still leads to these columns\n # being read uniquely.\n table_read = _read_table(filename, columns=['uint8', 'uint8'])\n df_read = table_read.to_pandas()\n\n tm.assert_frame_equal(df[['uint8']], df_read)\n\n\ndef _random_integers(size, dtype):\n # We do not generate integers outside the int64 range\n platform_int_info = np.iinfo('int_')\n iinfo = np.iinfo(dtype)\n return np.random.randint(max(iinfo.min, platform_int_info.min),\n min(iinfo.max, platform_int_info.max),\n size=size).astype(dtype)\n\n\ndef _test_dataframe(size=10000, seed=0):\n np.random.seed(seed)\n df = pd.DataFrame({\n 'uint8': _random_integers(size, np.uint8),\n 'uint16': _random_integers(size, np.uint16),\n 'uint32': _random_integers(size, np.uint32),\n 'uint64': _random_integers(size, np.uint64),\n 'int8': _random_integers(size, np.int8),\n 'int16': _random_integers(size, np.int16),\n 'int32': _random_integers(size, np.int32),\n 'int64': _random_integers(size, np.int64),\n 'float32': np.random.randn(size).astype(np.float32),\n 'float64': np.arange(size, dtype=np.float64),\n 'bool': np.random.randn(size) > 0,\n 'strings': [tm.rands(10) for i in range(size)],\n 'all_none': [None] * size,\n 'all_none_category': [None] * size\n })\n # TODO(PARQUET-1015)\n # df['all_none_category'] = df['all_none_category'].astype('category')\n return df\n\n\[email protected]\ndef test_pandas_parquet_native_file_roundtrip(tempdir):\n df = _test_dataframe(10000)\n arrow_table = pa.Table.from_pandas(df)\n imos = pa.BufferOutputStream()\n _write_table(arrow_table, imos, version=\"2.0\")\n buf = imos.getvalue()\n reader = pa.BufferReader(buf)\n df_read = _read_table(reader).to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_parquet_incremental_file_build(tempdir):\n df = _test_dataframe(100)\n df['unique_id'] = 0\n\n arrow_table = pa.Table.from_pandas(df, preserve_index=False)\n out = pa.BufferOutputStream()\n\n writer = pq.ParquetWriter(out, arrow_table.schema, version='2.0')\n\n frames = []\n for i in range(10):\n df['unique_id'] = i\n arrow_table = pa.Table.from_pandas(df, preserve_index=False)\n writer.write_table(arrow_table)\n\n frames.append(df.copy())\n\n writer.close()\n\n buf = out.getvalue()\n result = _read_table(pa.BufferReader(buf))\n\n expected = pd.concat(frames, ignore_index=True)\n tm.assert_frame_equal(result.to_pandas(), expected)\n\n\[email protected]\ndef test_read_pandas_column_subset(tempdir):\n df = _test_dataframe(10000)\n arrow_table = pa.Table.from_pandas(df)\n imos = pa.BufferOutputStream()\n _write_table(arrow_table, imos, version=\"2.0\")\n buf = imos.getvalue()\n reader = pa.BufferReader(buf)\n df_read = pq.read_pandas(reader, columns=['strings', 'uint8']).to_pandas()\n tm.assert_frame_equal(df[['strings', 'uint8']], df_read)\n\n\[email protected]\ndef test_pandas_parquet_empty_roundtrip(tempdir):\n df = _test_dataframe(0)\n arrow_table = pa.Table.from_pandas(df)\n imos = pa.BufferOutputStream()\n _write_table(arrow_table, imos, version=\"2.0\")\n buf = imos.getvalue()\n reader = pa.BufferReader(buf)\n df_read = _read_table(reader).to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_pandas_parquet_pyfile_roundtrip(tempdir):\n filename = tempdir / 'pandas_pyfile_roundtrip.parquet'\n size = 5\n df = pd.DataFrame({\n 'int64': np.arange(size, dtype=np.int64),\n 'float32': np.arange(size, dtype=np.float32),\n 'float64': np.arange(size, dtype=np.float64),\n 'bool': np.random.randn(size) > 0,\n 'strings': ['foo', 'bar', None, 'baz', 'qux']\n })\n\n arrow_table = pa.Table.from_pandas(df)\n\n with filename.open('wb') as f:\n _write_table(arrow_table, f, version=\"1.0\")\n\n data = io.BytesIO(filename.read_bytes())\n\n table_read = _read_table(data)\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_pandas_parquet_configuration_options(tempdir):\n size = 10000\n np.random.seed(0)\n df = pd.DataFrame({\n 'uint8': np.arange(size, dtype=np.uint8),\n 'uint16': np.arange(size, dtype=np.uint16),\n 'uint32': np.arange(size, dtype=np.uint32),\n 'uint64': np.arange(size, dtype=np.uint64),\n 'int8': np.arange(size, dtype=np.int16),\n 'int16': np.arange(size, dtype=np.int16),\n 'int32': np.arange(size, dtype=np.int32),\n 'int64': np.arange(size, dtype=np.int64),\n 'float32': np.arange(size, dtype=np.float32),\n 'float64': np.arange(size, dtype=np.float64),\n 'bool': np.random.randn(size) > 0\n })\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df)\n\n for use_dictionary in [True, False]:\n _write_table(arrow_table, filename, version='2.0',\n use_dictionary=use_dictionary)\n table_read = _read_table(filename)\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n for write_statistics in [True, False]:\n _write_table(arrow_table, filename, version='2.0',\n write_statistics=write_statistics)\n table_read = _read_table(filename)\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n for compression in ['NONE', 'SNAPPY', 'GZIP', 'LZ4', 'ZSTD']:\n _write_table(arrow_table, filename, version='2.0',\n compression=compression)\n table_read = _read_table(filename)\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\ndef make_sample_file(table_or_df):\n if isinstance(table_or_df, pa.Table):\n a_table = table_or_df\n else:\n a_table = pa.Table.from_pandas(table_or_df)\n\n buf = io.BytesIO()\n _write_table(a_table, buf, compression='SNAPPY', version='2.0',\n coerce_timestamps='ms')\n\n buf.seek(0)\n return pq.ParquetFile(buf)\n\n\ndef test_compression_level():\n arr = pa.array(list(map(int, range(1000))))\n data = [arr, arr]\n table = pa.Table.from_arrays(data, names=['a', 'b'])\n\n # Check one compression level.\n _check_roundtrip(table, expected=table, compression=\"gzip\",\n compression_level=1)\n\n # Check another one to make sure that compression_level=1 does not\n # coincide with the default one in Arrow.\n _check_roundtrip(table, expected=table, compression=\"gzip\",\n compression_level=5)\n\n # Check that the user can provide a compression level per column\n _check_roundtrip(table, expected=table, compression=\"gzip\",\n compression_level=[{'a': 2, 'b': 3}])\n\n # Check that specifying a compression level for a codec which does allow\n # specifying one, results into an error.\n # Uncompressed, snappy, lz4 and lzo do not support specifying a compression\n # level.\n # GZIP (zlib) allows for specifying a compression level but as of up\n # to version 1.2.11 the valid range is [-1, 9].\n invalid_combinations = [(\"snappy\", 4), (\"lz4\", 5), (\"gzip\", -1337),\n (\"None\", 444), (\"lzo\", 14)]\n buf = io.BytesIO()\n for (codec, level) in invalid_combinations:\n with pytest.raises(IOError):\n _write_table(table, buf, compression=codec,\n compression_level=level)\n\n\[email protected]\ndef test_parquet_metadata_api():\n df = alltypes_sample(size=10000)\n df = df.reindex(columns=sorted(df.columns))\n df.index = np.random.randint(0, 1000000, size=len(df))\n\n fileh = make_sample_file(df)\n ncols = len(df.columns)\n\n # Series of sniff tests\n meta = fileh.metadata\n repr(meta)\n assert meta.num_rows == len(df)\n assert meta.num_columns == ncols + 1 # +1 for index\n assert meta.num_row_groups == 1\n assert meta.format_version == '2.0'\n assert 'parquet-cpp' in meta.created_by\n assert isinstance(meta.serialized_size, int)\n assert isinstance(meta.metadata, dict)\n\n # Schema\n schema = fileh.schema\n assert meta.schema is schema\n assert len(schema) == ncols + 1 # +1 for index\n repr(schema)\n\n col = schema[0]\n repr(col)\n assert col.name == df.columns[0]\n assert col.max_definition_level == 1\n assert col.max_repetition_level == 0\n assert col.max_repetition_level == 0\n\n assert col.physical_type == 'BOOLEAN'\n assert col.converted_type == 'NONE'\n\n with pytest.raises(IndexError):\n schema[ncols + 1] # +1 for index\n\n with pytest.raises(IndexError):\n schema[-1]\n\n # Row group\n for rg in range(meta.num_row_groups):\n rg_meta = meta.row_group(rg)\n assert isinstance(rg_meta, pq.RowGroupMetaData)\n repr(rg_meta)\n\n for col in range(rg_meta.num_columns):\n col_meta = rg_meta.column(col)\n assert isinstance(col_meta, pq.ColumnChunkMetaData)\n repr(col_meta)\n\n with pytest.raises(IndexError):\n meta.row_group(-1)\n\n with pytest.raises(IndexError):\n meta.row_group(meta.num_row_groups + 1)\n\n rg_meta = meta.row_group(0)\n assert rg_meta.num_rows == len(df)\n assert rg_meta.num_columns == ncols + 1 # +1 for index\n assert rg_meta.total_byte_size > 0\n\n with pytest.raises(IndexError):\n col_meta = rg_meta.column(-1)\n\n with pytest.raises(IndexError):\n col_meta = rg_meta.column(ncols + 2)\n\n col_meta = rg_meta.column(0)\n assert col_meta.file_offset > 0\n assert col_meta.file_path == '' # created from BytesIO\n assert col_meta.physical_type == 'BOOLEAN'\n assert col_meta.num_values == 10000\n assert col_meta.path_in_schema == 'bool'\n assert col_meta.is_stats_set is True\n assert isinstance(col_meta.statistics, pq.Statistics)\n assert col_meta.compression == 'SNAPPY'\n assert col_meta.encodings == ('PLAIN', 'RLE')\n assert col_meta.has_dictionary_page is False\n assert col_meta.dictionary_page_offset is None\n assert col_meta.data_page_offset > 0\n assert col_meta.total_compressed_size > 0\n assert col_meta.total_uncompressed_size > 0\n with pytest.raises(NotImplementedError):\n col_meta.has_index_page\n with pytest.raises(NotImplementedError):\n col_meta.index_page_offset\n\n\ndef test_parquet_metadata_lifetime(tempdir):\n # ARROW-6642 - ensure that chained access keeps parent objects alive\n table = pa.table({'a': [1, 2, 3]})\n pq.write_table(table, tempdir / 'test_metadata_segfault.parquet')\n dataset = pq.ParquetDataset(tempdir / 'test_metadata_segfault.parquet')\n dataset.pieces[0].get_metadata().row_group(0).column(0).statistics\n\n\[email protected]\[email protected](\n (\n 'data',\n 'type',\n 'physical_type',\n 'min_value',\n 'max_value',\n 'null_count',\n 'num_values',\n 'distinct_count'\n ),\n [\n ([1, 2, 2, None, 4], pa.uint8(), 'INT32', 1, 4, 1, 4, 0),\n ([1, 2, 2, None, 4], pa.uint16(), 'INT32', 1, 4, 1, 4, 0),\n ([1, 2, 2, None, 4], pa.uint32(), 'INT32', 1, 4, 1, 4, 0),\n ([1, 2, 2, None, 4], pa.uint64(), 'INT64', 1, 4, 1, 4, 0),\n ([-1, 2, 2, None, 4], pa.int8(), 'INT32', -1, 4, 1, 4, 0),\n ([-1, 2, 2, None, 4], pa.int16(), 'INT32', -1, 4, 1, 4, 0),\n ([-1, 2, 2, None, 4], pa.int32(), 'INT32', -1, 4, 1, 4, 0),\n ([-1, 2, 2, None, 4], pa.int64(), 'INT64', -1, 4, 1, 4, 0),\n (\n [-1.1, 2.2, 2.3, None, 4.4], pa.float32(),\n 'FLOAT', -1.1, 4.4, 1, 4, 0\n ),\n (\n [-1.1, 2.2, 2.3, None, 4.4], pa.float64(),\n 'DOUBLE', -1.1, 4.4, 1, 4, 0\n ),\n (\n [u'', u'b', unichar(1000), None, u'aaa'], pa.binary(),\n 'BYTE_ARRAY', b'', unichar(1000).encode('utf-8'), 1, 4, 0\n ),\n (\n [True, False, False, True, True], pa.bool_(),\n 'BOOLEAN', False, True, 0, 5, 0\n ),\n (\n [b'\\x00', b'b', b'12', None, b'aaa'], pa.binary(),\n 'BYTE_ARRAY', b'\\x00', b'b', 1, 4, 0\n ),\n ]\n)\ndef test_parquet_column_statistics_api(data, type, physical_type, min_value,\n max_value, null_count, num_values,\n distinct_count):\n df = pd.DataFrame({'data': data})\n schema = pa.schema([pa.field('data', type)])\n table = pa.Table.from_pandas(df, schema=schema, safe=False)\n fileh = make_sample_file(table)\n\n meta = fileh.metadata\n\n rg_meta = meta.row_group(0)\n col_meta = rg_meta.column(0)\n\n stat = col_meta.statistics\n assert stat.has_min_max\n assert _close(type, stat.min, min_value)\n assert _close(type, stat.max, max_value)\n assert stat.null_count == null_count\n assert stat.num_values == num_values\n # TODO(kszucs) until parquet-cpp API doesn't expose HasDistinctCount\n # method, missing distinct_count is represented as zero instead of None\n assert stat.distinct_count == distinct_count\n assert stat.physical_type == physical_type\n\n\n# ARROW-6339\[email protected]\ndef test_parquet_raise_on_unset_statistics():\n df = pd.DataFrame({\"t\": pd.Series([pd.NaT], dtype=\"datetime64[ns]\")})\n meta = make_sample_file(pa.Table.from_pandas(df)).metadata\n\n assert not meta.row_group(0).column(0).statistics.has_min_max\n assert meta.row_group(0).column(0).statistics.max is None\n\n\ndef _close(type, left, right):\n if type == pa.float32():\n return abs(left - right) < 1E-7\n elif type == pa.float64():\n return abs(left - right) < 1E-13\n else:\n return left == right\n\n\ndef test_statistics_convert_logical_types(tempdir):\n # ARROW-5166, ARROW-4139\n\n # (min, max, type)\n cases = [(10, 11164359321221007157, pa.uint64()),\n (10, 4294967295, pa.uint32()),\n (u\"ähnlich\", u\"öffentlich\", pa.utf8()),\n (datetime.time(10, 30, 0, 1000), datetime.time(15, 30, 0, 1000),\n pa.time32('ms')),\n (datetime.time(10, 30, 0, 1000), datetime.time(15, 30, 0, 1000),\n pa.time64('us')),\n (datetime.datetime(2019, 6, 24, 0, 0, 0, 1000),\n datetime.datetime(2019, 6, 25, 0, 0, 0, 1000),\n pa.timestamp('ms')),\n (datetime.datetime(2019, 6, 24, 0, 0, 0, 1000),\n datetime.datetime(2019, 6, 25, 0, 0, 0, 1000),\n pa.timestamp('us'))]\n\n for i, (min_val, max_val, typ) in enumerate(cases):\n t = pa.Table.from_arrays([pa.array([min_val, max_val], type=typ)],\n ['col'])\n path = str(tempdir / ('example{}.parquet'.format(i)))\n pq.write_table(t, path, version='2.0')\n pf = pq.ParquetFile(path)\n stats = pf.metadata.row_group(0).column(0).statistics\n assert stats.min == min_val\n assert stats.max == max_val\n\n\ndef test_parquet_write_disable_statistics(tempdir):\n table = pa.Table.from_pydict(\n {'a': pa.array([1, 2, 3]), 'b': pa.array(['a', 'b', 'c'])})\n _write_table(table, tempdir / 'data.parquet')\n meta = pq.read_metadata(tempdir / 'data.parquet')\n for col in [0, 1]:\n cc = meta.row_group(0).column(col)\n assert cc.is_stats_set is True\n assert cc.statistics is not None\n\n _write_table(table, tempdir / 'data2.parquet', write_statistics=False)\n meta = pq.read_metadata(tempdir / 'data2.parquet')\n for col in [0, 1]:\n cc = meta.row_group(0).column(col)\n assert cc.is_stats_set is False\n assert cc.statistics is None\n\n _write_table(table, tempdir / 'data3.parquet', write_statistics=['a'])\n meta = pq.read_metadata(tempdir / 'data3.parquet')\n cc_a = meta.row_group(0).column(0)\n assert cc_a.is_stats_set is True\n assert cc_a.statistics is not None\n cc_b = meta.row_group(0).column(1)\n assert cc_b.is_stats_set is False\n assert cc_b.statistics is None\n\n\[email protected]\ndef test_compare_schemas():\n df = alltypes_sample(size=10000)\n\n fileh = make_sample_file(df)\n fileh2 = make_sample_file(df)\n fileh3 = make_sample_file(df[df.columns[::2]])\n\n # ParquetSchema\n assert isinstance(fileh.schema, pq.ParquetSchema)\n assert fileh.schema.equals(fileh.schema)\n assert fileh.schema == fileh.schema\n assert fileh.schema.equals(fileh2.schema)\n assert fileh.schema == fileh2.schema\n assert fileh.schema != 'arbitrary object'\n assert not fileh.schema.equals(fileh3.schema)\n assert fileh.schema != fileh3.schema\n\n # ColumnSchema\n assert isinstance(fileh.schema[0], pq.ColumnSchema)\n assert fileh.schema[0].equals(fileh.schema[0])\n assert fileh.schema[0] == fileh.schema[0]\n assert not fileh.schema[0].equals(fileh.schema[1])\n assert fileh.schema[0] != fileh.schema[1]\n assert fileh.schema[0] != 'arbitrary object'\n\n\ndef test_validate_schema_write_table(tempdir):\n # ARROW-2926\n simple_fields = [\n pa.field('POS', pa.uint32()),\n pa.field('desc', pa.string())\n ]\n\n simple_schema = pa.schema(simple_fields)\n\n # simple_table schema does not match simple_schema\n simple_from_array = [pa.array([1]), pa.array(['bla'])]\n simple_table = pa.Table.from_arrays(simple_from_array, ['POS', 'desc'])\n\n path = tempdir / 'simple_validate_schema.parquet'\n\n with pq.ParquetWriter(path, simple_schema,\n version='2.0',\n compression='snappy', flavor='spark') as w:\n with pytest.raises(ValueError):\n w.write_table(simple_table)\n\n\[email protected]\ndef test_column_of_arrays(tempdir):\n df, schema = dataframe_with_arrays()\n\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df, schema=schema)\n _write_table(arrow_table, filename, version=\"2.0\", coerce_timestamps='ms')\n table_read = _read_table(filename)\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_coerce_timestamps(tempdir):\n from collections import OrderedDict\n # ARROW-622\n arrays = OrderedDict()\n fields = [pa.field('datetime64',\n pa.list_(pa.timestamp('ms')))]\n arrays['datetime64'] = [\n np.array(['2007-07-13T01:23:34.123456789',\n None,\n '2010-08-13T05:46:57.437699912'],\n dtype='datetime64[ms]'),\n None,\n None,\n np.array(['2007-07-13T02',\n None,\n '2010-08-13T05:46:57.437699912'],\n dtype='datetime64[ms]'),\n ]\n\n df = pd.DataFrame(arrays)\n schema = pa.schema(fields)\n\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df, schema=schema)\n\n _write_table(arrow_table, filename, version=\"2.0\", coerce_timestamps='us')\n table_read = _read_table(filename)\n df_read = table_read.to_pandas()\n\n df_expected = df.copy()\n for i, x in enumerate(df_expected['datetime64']):\n if isinstance(x, np.ndarray):\n df_expected['datetime64'][i] = x.astype('M8[us]')\n\n tm.assert_frame_equal(df_expected, df_read)\n\n with pytest.raises(ValueError):\n _write_table(arrow_table, filename, version='2.0',\n coerce_timestamps='unknown')\n\n\[email protected]\ndef test_coerce_timestamps_truncated(tempdir):\n \"\"\"\n ARROW-2555: Test that we can truncate timestamps when coercing if\n explicitly allowed.\n \"\"\"\n dt_us = datetime.datetime(year=2017, month=1, day=1, hour=1, minute=1,\n second=1, microsecond=1)\n dt_ms = datetime.datetime(year=2017, month=1, day=1, hour=1, minute=1,\n second=1)\n\n fields_us = [pa.field('datetime64', pa.timestamp('us'))]\n arrays_us = {'datetime64': [dt_us, dt_ms]}\n\n df_us = pd.DataFrame(arrays_us)\n schema_us = pa.schema(fields_us)\n\n filename = tempdir / 'pandas_truncated.parquet'\n table_us = pa.Table.from_pandas(df_us, schema=schema_us)\n\n _write_table(table_us, filename, version=\"2.0\", coerce_timestamps='ms',\n allow_truncated_timestamps=True)\n table_ms = _read_table(filename)\n df_ms = table_ms.to_pandas()\n\n arrays_expected = {'datetime64': [dt_ms, dt_ms]}\n df_expected = pd.DataFrame(arrays_expected)\n tm.assert_frame_equal(df_expected, df_ms)\n\n\[email protected]\ndef test_column_of_lists(tempdir):\n df, schema = dataframe_with_lists(parquet_compatible=True)\n\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df, schema=schema)\n _write_table(arrow_table, filename, version='2.0')\n table_read = _read_table(filename)\n df_read = table_read.to_pandas()\n\n if PY2:\n # assert_frame_equal fails when comparing datetime.date and\n # np.datetime64, even with check_datetimelike_compat=True so\n # convert the values to np.datetime64 instead\n for col in ['date32[day]_list', 'date64[ms]_list']:\n df[col] = df[col].apply(\n lambda x: list(map(np.datetime64, x)) if x else x\n )\n\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_date_time_types(tempdir):\n t1 = pa.date32()\n data1 = np.array([17259, 17260, 17261], dtype='int32')\n a1 = pa.array(data1, type=t1)\n\n t2 = pa.date64()\n data2 = data1.astype('int64') * 86400000\n a2 = pa.array(data2, type=t2)\n\n t3 = pa.timestamp('us')\n start = pd.Timestamp('2001-01-01').value / 1000\n data3 = np.array([start, start + 1, start + 2], dtype='int64')\n a3 = pa.array(data3, type=t3)\n\n t4 = pa.time32('ms')\n data4 = np.arange(3, dtype='i4')\n a4 = pa.array(data4, type=t4)\n\n t5 = pa.time64('us')\n a5 = pa.array(data4.astype('int64'), type=t5)\n\n t6 = pa.time32('s')\n a6 = pa.array(data4, type=t6)\n\n ex_t6 = pa.time32('ms')\n ex_a6 = pa.array(data4 * 1000, type=ex_t6)\n\n t7 = pa.timestamp('ns')\n start = pd.Timestamp('2001-01-01').value\n data7 = np.array([start, start + 1000, start + 2000],\n dtype='int64')\n a7 = pa.array(data7, type=t7)\n\n table = pa.Table.from_arrays([a1, a2, a3, a4, a5, a6, a7],\n ['date32', 'date64', 'timestamp[us]',\n 'time32[s]', 'time64[us]',\n 'time32_from64[s]',\n 'timestamp[ns]'])\n\n # date64 as date32\n # time32[s] to time32[ms]\n expected = pa.Table.from_arrays([a1, a1, a3, a4, a5, ex_a6, a7],\n ['date32', 'date64', 'timestamp[us]',\n 'time32[s]', 'time64[us]',\n 'time32_from64[s]',\n 'timestamp[ns]'])\n\n _check_roundtrip(table, expected=expected, version='2.0')\n\n t0 = pa.timestamp('ms')\n data0 = np.arange(4, dtype='int64')\n a0 = pa.array(data0, type=t0)\n\n t1 = pa.timestamp('us')\n data1 = np.arange(4, dtype='int64')\n a1 = pa.array(data1, type=t1)\n\n t2 = pa.timestamp('ns')\n data2 = np.arange(4, dtype='int64')\n a2 = pa.array(data2, type=t2)\n\n table = pa.Table.from_arrays([a0, a1, a2],\n ['ts[ms]', 'ts[us]', 'ts[ns]'])\n expected = pa.Table.from_arrays([a0, a1, a2],\n ['ts[ms]', 'ts[us]', 'ts[ns]'])\n\n # int64 for all timestamps supported by default\n filename = tempdir / 'int64_timestamps.parquet'\n _write_table(table, filename, version='2.0')\n parquet_schema = pq.ParquetFile(filename).schema\n for i in range(3):\n assert parquet_schema.column(i).physical_type == 'INT64'\n read_table = _read_table(filename)\n assert read_table.equals(expected)\n\n t0_ns = pa.timestamp('ns')\n data0_ns = np.array(data0 * 1000000, dtype='int64')\n a0_ns = pa.array(data0_ns, type=t0_ns)\n\n t1_ns = pa.timestamp('ns')\n data1_ns = np.array(data1 * 1000, dtype='int64')\n a1_ns = pa.array(data1_ns, type=t1_ns)\n\n expected = pa.Table.from_arrays([a0_ns, a1_ns, a2],\n ['ts[ms]', 'ts[us]', 'ts[ns]'])\n\n # int96 nanosecond timestamps produced upon request\n filename = tempdir / 'explicit_int96_timestamps.parquet'\n _write_table(table, filename, version='2.0',\n use_deprecated_int96_timestamps=True)\n parquet_schema = pq.ParquetFile(filename).schema\n for i in range(3):\n assert parquet_schema.column(i).physical_type == 'INT96'\n read_table = _read_table(filename)\n assert read_table.equals(expected)\n\n # int96 nanosecond timestamps implied by flavor 'spark'\n filename = tempdir / 'spark_int96_timestamps.parquet'\n _write_table(table, filename, version='2.0',\n flavor='spark')\n parquet_schema = pq.ParquetFile(filename).schema\n for i in range(3):\n assert parquet_schema.column(i).physical_type == 'INT96'\n read_table = _read_table(filename)\n assert read_table.equals(expected)\n\n\ndef test_timestamp_restore_timezone():\n # ARROW-5888, restore timezone from serialized metadata\n ty = pa.timestamp('ms', tz='America/New_York')\n arr = pa.array([1, 2, 3], type=ty)\n t = pa.table([arr], names=['f0'])\n _check_roundtrip(t)\n\n\[email protected]\ndef test_list_of_datetime_time_roundtrip():\n # ARROW-4135\n times = pd.to_datetime(['09:00', '09:30', '10:00', '10:30', '11:00',\n '11:30', '12:00'])\n df = pd.DataFrame({'time': [times.time]})\n _roundtrip_pandas_dataframe(df, write_kwargs={})\n\n\[email protected]\ndef test_parquet_version_timestamp_differences():\n i_s = pd.Timestamp('2010-01-01').value / 1000000000 # := 1262304000\n\n d_s = np.arange(i_s, i_s + 10, 1, dtype='int64')\n d_ms = d_s * 1000\n d_us = d_ms * 1000\n d_ns = d_us * 1000\n\n a_s = pa.array(d_s, type=pa.timestamp('s'))\n a_ms = pa.array(d_ms, type=pa.timestamp('ms'))\n a_us = pa.array(d_us, type=pa.timestamp('us'))\n a_ns = pa.array(d_ns, type=pa.timestamp('ns'))\n\n names = ['ts:s', 'ts:ms', 'ts:us', 'ts:ns']\n table = pa.Table.from_arrays([a_s, a_ms, a_us, a_ns], names)\n\n # Using Parquet version 1.0, seconds should be coerced to milliseconds\n # and nanoseconds should be coerced to microseconds by default\n expected = pa.Table.from_arrays([a_ms, a_ms, a_us, a_us], names)\n _check_roundtrip(table, expected)\n\n # Using Parquet version 2.0, seconds should be coerced to milliseconds\n # and nanoseconds should be retained by default\n expected = pa.Table.from_arrays([a_ms, a_ms, a_us, a_ns], names)\n _check_roundtrip(table, expected, version='2.0')\n\n # Using Parquet version 1.0, coercing to milliseconds or microseconds\n # is allowed\n expected = pa.Table.from_arrays([a_ms, a_ms, a_ms, a_ms], names)\n _check_roundtrip(table, expected, coerce_timestamps='ms')\n\n # Using Parquet version 2.0, coercing to milliseconds or microseconds\n # is allowed\n expected = pa.Table.from_arrays([a_us, a_us, a_us, a_us], names)\n _check_roundtrip(table, expected, version='2.0', coerce_timestamps='us')\n\n # TODO: after pyarrow allows coerce_timestamps='ns', tests like the\n # following should pass ...\n\n # Using Parquet version 1.0, coercing to nanoseconds is not allowed\n # expected = None\n # with pytest.raises(NotImplementedError):\n # _roundtrip_table(table, coerce_timestamps='ns')\n\n # Using Parquet version 2.0, coercing to nanoseconds is allowed\n # expected = pa.Table.from_arrays([a_ns, a_ns, a_ns, a_ns], names)\n # _check_roundtrip(table, expected, version='2.0', coerce_timestamps='ns')\n\n # For either Parquet version, coercing to nanoseconds is allowed\n # if Int96 storage is used\n expected = pa.Table.from_arrays([a_ns, a_ns, a_ns, a_ns], names)\n _check_roundtrip(table, expected,\n use_deprecated_int96_timestamps=True)\n _check_roundtrip(table, expected, version='2.0',\n use_deprecated_int96_timestamps=True)\n\n\ndef test_large_list_records():\n # This was fixed in PARQUET-1100\n\n list_lengths = np.random.randint(0, 500, size=50)\n list_lengths[::10] = 0\n\n list_values = [list(map(int, np.random.randint(0, 100, size=x)))\n if i % 8 else None\n for i, x in enumerate(list_lengths)]\n\n a1 = pa.array(list_values)\n\n table = pa.Table.from_arrays([a1], ['int_lists'])\n _check_roundtrip(table)\n\n\ndef test_sanitized_spark_field_names():\n a0 = pa.array([0, 1, 2, 3, 4])\n name = 'prohib; ,\\t{}'\n table = pa.Table.from_arrays([a0], [name])\n\n result = _roundtrip_table(table, write_table_kwargs={'flavor': 'spark'})\n\n expected_name = 'prohib______'\n assert result.schema[0].name == expected_name\n\n\[email protected]\ndef test_spark_flavor_preserves_pandas_metadata():\n df = _test_dataframe(size=100)\n df.index = np.arange(0, 10 * len(df), 10)\n df.index.name = 'foo'\n\n result = _roundtrip_pandas_dataframe(df, {'version': '2.0',\n 'flavor': 'spark'})\n tm.assert_frame_equal(result, df)\n\n\ndef test_fixed_size_binary():\n t0 = pa.binary(10)\n data = [b'fooooooooo', None, b'barooooooo', b'quxooooooo']\n a0 = pa.array(data, type=t0)\n\n table = pa.Table.from_arrays([a0],\n ['binary[10]'])\n _check_roundtrip(table)\n\n\[email protected]\ndef test_multithreaded_read():\n df = alltypes_sample(size=10000)\n\n table = pa.Table.from_pandas(df)\n\n buf = io.BytesIO()\n _write_table(table, buf, compression='SNAPPY', version='2.0')\n\n buf.seek(0)\n table1 = _read_table(buf, use_threads=True)\n\n buf.seek(0)\n table2 = _read_table(buf, use_threads=False)\n\n assert table1.equals(table2)\n\n\[email protected]\ndef test_min_chunksize():\n data = pd.DataFrame([np.arange(4)], columns=['A', 'B', 'C', 'D'])\n table = pa.Table.from_pandas(data.reset_index())\n\n buf = io.BytesIO()\n _write_table(table, buf, chunk_size=-1)\n\n buf.seek(0)\n result = _read_table(buf)\n\n assert result.equals(table)\n\n with pytest.raises(ValueError):\n _write_table(table, buf, chunk_size=0)\n\n\[email protected]\ndef test_pass_separate_metadata():\n # ARROW-471\n df = alltypes_sample(size=10000)\n\n a_table = pa.Table.from_pandas(df)\n\n buf = io.BytesIO()\n _write_table(a_table, buf, compression='snappy', version='2.0')\n\n buf.seek(0)\n metadata = pq.read_metadata(buf)\n\n buf.seek(0)\n\n fileh = pq.ParquetFile(buf, metadata=metadata)\n\n tm.assert_frame_equal(df, fileh.read().to_pandas())\n\n\[email protected]\ndef test_read_single_row_group():\n # ARROW-471\n N, K = 10000, 4\n df = alltypes_sample(size=N)\n\n a_table = pa.Table.from_pandas(df)\n\n buf = io.BytesIO()\n _write_table(a_table, buf, row_group_size=N / K,\n compression='snappy', version='2.0')\n\n buf.seek(0)\n\n pf = pq.ParquetFile(buf)\n\n assert pf.num_row_groups == K\n\n row_groups = [pf.read_row_group(i) for i in range(K)]\n result = pa.concat_tables(row_groups)\n tm.assert_frame_equal(df, result.to_pandas())\n\n\[email protected]\ndef test_read_single_row_group_with_column_subset():\n N, K = 10000, 4\n df = alltypes_sample(size=N)\n a_table = pa.Table.from_pandas(df)\n\n buf = io.BytesIO()\n _write_table(a_table, buf, row_group_size=N / K,\n compression='snappy', version='2.0')\n\n buf.seek(0)\n pf = pq.ParquetFile(buf)\n\n cols = list(df.columns[:2])\n row_groups = [pf.read_row_group(i, columns=cols) for i in range(K)]\n result = pa.concat_tables(row_groups)\n tm.assert_frame_equal(df[cols], result.to_pandas())\n\n # ARROW-4267: Selection of duplicate columns still leads to these columns\n # being read uniquely.\n row_groups = [pf.read_row_group(i, columns=cols + cols) for i in range(K)]\n result = pa.concat_tables(row_groups)\n tm.assert_frame_equal(df[cols], result.to_pandas())\n\n\[email protected]\ndef test_read_multiple_row_groups():\n N, K = 10000, 4\n df = alltypes_sample(size=N)\n\n a_table = pa.Table.from_pandas(df)\n\n buf = io.BytesIO()\n _write_table(a_table, buf, row_group_size=N / K,\n compression='snappy', version='2.0')\n\n buf.seek(0)\n\n pf = pq.ParquetFile(buf)\n\n assert pf.num_row_groups == K\n\n result = pf.read_row_groups(range(K))\n tm.assert_frame_equal(df, result.to_pandas())\n\n\[email protected]\ndef test_read_multiple_row_groups_with_column_subset():\n N, K = 10000, 4\n df = alltypes_sample(size=N)\n a_table = pa.Table.from_pandas(df)\n\n buf = io.BytesIO()\n _write_table(a_table, buf, row_group_size=N / K,\n compression='snappy', version='2.0')\n\n buf.seek(0)\n pf = pq.ParquetFile(buf)\n\n cols = list(df.columns[:2])\n result = pf.read_row_groups(range(K), columns=cols)\n tm.assert_frame_equal(df[cols], result.to_pandas())\n\n # ARROW-4267: Selection of duplicate columns still leads to these columns\n # being read uniquely.\n result = pf.read_row_groups(range(K), columns=cols + cols)\n tm.assert_frame_equal(df[cols], result.to_pandas())\n\n\[email protected]\ndef test_scan_contents():\n N, K = 10000, 4\n df = alltypes_sample(size=N)\n a_table = pa.Table.from_pandas(df)\n\n buf = io.BytesIO()\n _write_table(a_table, buf, row_group_size=N / K,\n compression='snappy', version='2.0')\n\n buf.seek(0)\n pf = pq.ParquetFile(buf)\n\n assert pf.scan_contents() == 10000\n assert pf.scan_contents(df.columns[:4]) == 10000\n\n\[email protected]\ndef test_parquet_piece_read(tempdir):\n df = _test_dataframe(1000)\n table = pa.Table.from_pandas(df)\n\n path = tempdir / 'parquet_piece_read.parquet'\n _write_table(table, path, version='2.0')\n\n piece1 = pq.ParquetDatasetPiece(path)\n\n result = piece1.read()\n assert result.equals(table)\n\n\[email protected]\ndef test_parquet_piece_open_and_get_metadata(tempdir):\n df = _test_dataframe(100)\n table = pa.Table.from_pandas(df)\n\n path = tempdir / 'parquet_piece_read.parquet'\n _write_table(table, path, version='2.0')\n\n piece = pq.ParquetDatasetPiece(path)\n table1 = piece.read()\n assert isinstance(table1, pa.Table)\n meta1 = piece.get_metadata()\n assert isinstance(meta1, pq.FileMetaData)\n\n assert table == table1\n\n\ndef test_parquet_piece_basics():\n path = '/baz.parq'\n\n piece1 = pq.ParquetDatasetPiece(path)\n piece2 = pq.ParquetDatasetPiece(path, row_group=1)\n piece3 = pq.ParquetDatasetPiece(\n path, row_group=1, partition_keys=[('foo', 0), ('bar', 1)])\n\n assert str(piece1) == path\n assert str(piece2) == '/baz.parq | row_group=1'\n assert str(piece3) == 'partition[foo=0, bar=1] /baz.parq | row_group=1'\n\n assert piece1 == piece1\n assert piece2 == piece2\n assert piece3 == piece3\n assert piece1 != piece3\n\n\ndef test_partition_set_dictionary_type():\n set1 = pq.PartitionSet('key1', [u('foo'), u('bar'), u('baz')])\n set2 = pq.PartitionSet('key2', [2007, 2008, 2009])\n\n assert isinstance(set1.dictionary, pa.StringArray)\n assert isinstance(set2.dictionary, pa.IntegerArray)\n\n set3 = pq.PartitionSet('key2', [datetime.datetime(2007, 1, 1)])\n with pytest.raises(TypeError):\n set3.dictionary\n\n\[email protected]\ndef test_read_partitioned_directory(tempdir):\n fs = LocalFileSystem.get_instance()\n _partition_test_for_filesystem(fs, tempdir)\n\n\[email protected]\ndef test_create_parquet_dataset_multi_threaded(tempdir):\n fs = LocalFileSystem.get_instance()\n base_path = tempdir\n\n _partition_test_for_filesystem(fs, base_path)\n\n manifest = pq.ParquetManifest(base_path, filesystem=fs,\n metadata_nthreads=1)\n dataset = pq.ParquetDataset(base_path, filesystem=fs, metadata_nthreads=16)\n assert len(dataset.pieces) > 0\n partitions = dataset.partitions\n assert len(partitions.partition_names) > 0\n assert partitions.partition_names == manifest.partitions.partition_names\n assert len(partitions.levels) == len(manifest.partitions.levels)\n\n\[email protected]\ndef test_equivalency(tempdir):\n fs = LocalFileSystem.get_instance()\n base_path = tempdir\n\n integer_keys = [0, 1]\n string_keys = ['a', 'b', 'c']\n boolean_keys = [True, False]\n partition_spec = [\n ['integer', integer_keys],\n ['string', string_keys],\n ['boolean', boolean_keys]\n ]\n\n df = pd.DataFrame({\n 'integer': np.array(integer_keys, dtype='i4').repeat(15),\n 'string': np.tile(np.tile(np.array(string_keys, dtype=object), 5), 2),\n 'boolean': np.tile(np.tile(np.array(boolean_keys, dtype='bool'), 5),\n 3),\n }, columns=['integer', 'string', 'boolean'])\n\n _generate_partition_directories(fs, base_path, partition_spec, df)\n\n # Old filters syntax:\n # integer == 1 AND string != b AND boolean == True\n dataset = pq.ParquetDataset(\n base_path, filesystem=fs,\n filters=[('integer', '=', 1), ('string', '!=', 'b'),\n ('boolean', '==', True)]\n )\n table = dataset.read()\n result_df = (table.to_pandas().reset_index(drop=True))\n\n assert 0 not in result_df['integer'].values\n assert 'b' not in result_df['string'].values\n assert False not in result_df['boolean'].values\n\n # filters in disjunctive normal form:\n # (integer == 1 AND string != b AND boolean == True) OR\n # (integer == 2 AND boolean == False)\n # TODO(ARROW-3388): boolean columns are reconstructed as string\n filters = [\n [\n ('integer', '=', 1),\n ('string', '!=', 'b'),\n ('boolean', '==', 'True')\n ],\n [('integer', '=', 0), ('boolean', '==', 'False')]\n ]\n dataset = pq.ParquetDataset(base_path, filesystem=fs, filters=filters)\n table = dataset.read()\n result_df = table.to_pandas().reset_index(drop=True)\n\n # Check that all rows in the DF fulfill the filter\n # Pandas 0.23.x has problems with indexing constant memoryviews in\n # categoricals. Thus we need to make an explicity copy here with np.array.\n df_filter_1 = (np.array(result_df['integer']) == 1) \\\n & (np.array(result_df['string']) != 'b') \\\n & (np.array(result_df['boolean']) == 'True')\n df_filter_2 = (np.array(result_df['integer']) == 0) \\\n & (np.array(result_df['boolean']) == 'False')\n assert df_filter_1.sum() > 0\n assert df_filter_2.sum() > 0\n assert result_df.shape[0] == (df_filter_1.sum() + df_filter_2.sum())\n\n # Check for \\0 in predicate values. Until they are correctly implemented\n # in ARROW-3391, they would otherwise lead to weird results with the\n # current code.\n with pytest.raises(NotImplementedError):\n filters = [[('string', '==', b'1\\0a')]]\n pq.ParquetDataset(base_path, filesystem=fs, filters=filters)\n with pytest.raises(NotImplementedError):\n filters = [[('string', '==', u'1\\0a')]]\n pq.ParquetDataset(base_path, filesystem=fs, filters=filters)\n\n\[email protected]\ndef test_cutoff_exclusive_integer(tempdir):\n fs = LocalFileSystem.get_instance()\n base_path = tempdir\n\n integer_keys = [0, 1, 2, 3, 4]\n partition_spec = [\n ['integers', integer_keys],\n ]\n N = 5\n\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'integers': np.array(integer_keys, dtype='i4'),\n }, columns=['index', 'integers'])\n\n _generate_partition_directories(fs, base_path, partition_spec, df)\n\n dataset = pq.ParquetDataset(\n base_path, filesystem=fs,\n filters=[\n ('integers', '<', 4),\n ('integers', '>', 1),\n ]\n )\n table = dataset.read()\n result_df = (table.to_pandas()\n .sort_values(by='index')\n .reset_index(drop=True))\n\n result_list = [x for x in map(int, result_df['integers'].values)]\n assert result_list == [2, 3]\n\n\[email protected]\[email protected](\n raises=TypeError,\n reason='Loss of type information in creation of categoricals.'\n)\ndef test_cutoff_exclusive_datetime(tempdir):\n fs = LocalFileSystem.get_instance()\n base_path = tempdir\n\n date_keys = [\n datetime.date(2018, 4, 9),\n datetime.date(2018, 4, 10),\n datetime.date(2018, 4, 11),\n datetime.date(2018, 4, 12),\n datetime.date(2018, 4, 13)\n ]\n partition_spec = [\n ['dates', date_keys]\n ]\n N = 5\n\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'dates': np.array(date_keys, dtype='datetime64'),\n }, columns=['index', 'dates'])\n\n _generate_partition_directories(fs, base_path, partition_spec, df)\n\n dataset = pq.ParquetDataset(\n base_path, filesystem=fs,\n filters=[\n ('dates', '<', \"2018-04-12\"),\n ('dates', '>', \"2018-04-10\")\n ]\n )\n table = dataset.read()\n result_df = (table.to_pandas()\n .sort_values(by='index')\n .reset_index(drop=True))\n\n expected = pd.Categorical(\n np.array([datetime.date(2018, 4, 11)], dtype='datetime64'),\n categories=np.array(date_keys, dtype='datetime64'))\n\n assert result_df['dates'].values == expected\n\n\[email protected]\ndef test_inclusive_integer(tempdir):\n fs = LocalFileSystem.get_instance()\n base_path = tempdir\n\n integer_keys = [0, 1, 2, 3, 4]\n partition_spec = [\n ['integers', integer_keys],\n ]\n N = 5\n\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'integers': np.array(integer_keys, dtype='i4'),\n }, columns=['index', 'integers'])\n\n _generate_partition_directories(fs, base_path, partition_spec, df)\n\n dataset = pq.ParquetDataset(\n base_path, filesystem=fs,\n filters=[\n ('integers', '<=', 3),\n ('integers', '>=', 2),\n ]\n )\n table = dataset.read()\n result_df = (table.to_pandas()\n .sort_values(by='index')\n .reset_index(drop=True))\n\n result_list = [int(x) for x in map(int, result_df['integers'].values)]\n assert result_list == [2, 3]\n\n\[email protected]\ndef test_inclusive_set(tempdir):\n fs = LocalFileSystem.get_instance()\n base_path = tempdir\n\n integer_keys = [0, 1]\n string_keys = ['a', 'b', 'c']\n boolean_keys = [True, False]\n partition_spec = [\n ['integer', integer_keys],\n ['string', string_keys],\n ['boolean', boolean_keys]\n ]\n\n df = pd.DataFrame({\n 'integer': np.array(integer_keys, dtype='i4').repeat(15),\n 'string': np.tile(np.tile(np.array(string_keys, dtype=object), 5), 2),\n 'boolean': np.tile(np.tile(np.array(boolean_keys, dtype='bool'), 5),\n 3),\n }, columns=['integer', 'string', 'boolean'])\n\n _generate_partition_directories(fs, base_path, partition_spec, df)\n\n dataset = pq.ParquetDataset(\n base_path, filesystem=fs,\n filters=[('integer', 'in', {1}), ('string', 'in', {'a', 'b'}),\n ('boolean', 'in', {True})]\n )\n table = dataset.read()\n result_df = (table.to_pandas().reset_index(drop=True))\n\n assert 0 not in result_df['integer'].values\n assert 'c' not in result_df['string'].values\n assert False not in result_df['boolean'].values\n\n\[email protected]\ndef test_invalid_pred_op(tempdir):\n fs = LocalFileSystem.get_instance()\n base_path = tempdir\n\n integer_keys = [0, 1, 2, 3, 4]\n partition_spec = [\n ['integers', integer_keys],\n ]\n N = 5\n\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'integers': np.array(integer_keys, dtype='i4'),\n }, columns=['index', 'integers'])\n\n _generate_partition_directories(fs, base_path, partition_spec, df)\n\n with pytest.raises(ValueError):\n pq.ParquetDataset(base_path,\n filesystem=fs,\n filters=[\n ('integers', '=<', 3),\n ])\n\n with pytest.raises(ValueError):\n pq.ParquetDataset(base_path,\n filesystem=fs,\n filters=[\n ('integers', 'in', set()),\n ])\n\n with pytest.raises(ValueError):\n pq.ParquetDataset(base_path,\n filesystem=fs,\n filters=[\n ('integers', '!=', {3}),\n ])\n\n\[email protected]\ndef test_filters_read_table(tempdir):\n # test that filters keyword is passed through in read_table\n fs = LocalFileSystem.get_instance()\n base_path = tempdir\n\n integer_keys = [0, 1, 2, 3, 4]\n partition_spec = [\n ['integers', integer_keys],\n ]\n N = 5\n\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'integers': np.array(integer_keys, dtype='i4'),\n }, columns=['index', 'integers'])\n\n _generate_partition_directories(fs, base_path, partition_spec, df)\n\n table = pq.read_table(\n base_path, filesystem=fs, filters=[('integers', '<', 3)])\n assert table.num_rows == 3\n\n table = pq.read_table(\n base_path, filesystem=fs, filters=[[('integers', '<', 3)]])\n assert table.num_rows == 3\n\n table = pq.read_pandas(\n base_path, filters=[('integers', '<', 3)])\n assert table.num_rows == 3\n\n\[email protected]\ndef s3_bucket(request, minio_server):\n boto3 = pytest.importorskip('boto3')\n botocore = pytest.importorskip('botocore')\n\n address, access_key, secret_key = minio_server\n s3 = boto3.resource(\n 's3',\n endpoint_url='http://{}'.format(address),\n aws_access_key_id=access_key,\n aws_secret_access_key=secret_key,\n config=botocore.client.Config(signature_version='s3v4'),\n region_name='us-east-1'\n )\n bucket = s3.Bucket('test-s3fs')\n bucket.create()\n return 'test-s3fs'\n\n\[email protected]\ndef s3_example(minio_server, s3_bucket):\n s3fs = pytest.importorskip('s3fs')\n\n address, access_key, secret_key = minio_server\n fs = s3fs.S3FileSystem(\n key=access_key,\n secret=secret_key,\n client_kwargs={\n 'endpoint_url': 'http://{}'.format(address)\n }\n )\n\n test_dir = guid()\n bucket_uri = 's3://{0}/{1}'.format(s3_bucket, test_dir)\n\n fs.mkdir(bucket_uri)\n yield fs, bucket_uri\n fs.rm(bucket_uri, recursive=True)\n\n\[email protected]\[email protected]\ndef test_read_partitioned_directory_s3fs(s3_example):\n from pyarrow.filesystem import S3FSWrapper\n\n fs, bucket_uri = s3_example\n wrapper = S3FSWrapper(fs)\n _partition_test_for_filesystem(wrapper, bucket_uri)\n\n # Check that we can auto-wrap\n dataset = pq.ParquetDataset(bucket_uri, filesystem=fs)\n dataset.read()\n\n\ndef _partition_test_for_filesystem(fs, base_path):\n foo_keys = [0, 1]\n bar_keys = ['a', 'b', 'c']\n partition_spec = [\n ['foo', foo_keys],\n ['bar', bar_keys]\n ]\n N = 30\n\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'foo': np.array(foo_keys, dtype='i4').repeat(15),\n 'bar': np.tile(np.tile(np.array(bar_keys, dtype=object), 5), 2),\n 'values': np.random.randn(N)\n }, columns=['index', 'foo', 'bar', 'values'])\n\n _generate_partition_directories(fs, base_path, partition_spec, df)\n\n dataset = pq.ParquetDataset(base_path, filesystem=fs)\n table = dataset.read()\n result_df = (table.to_pandas()\n .sort_values(by='index')\n .reset_index(drop=True))\n\n expected_df = (df.sort_values(by='index')\n .reset_index(drop=True)\n .reindex(columns=result_df.columns))\n expected_df['foo'] = pd.Categorical(df['foo'], categories=foo_keys)\n expected_df['bar'] = pd.Categorical(df['bar'], categories=bar_keys)\n\n assert (result_df.columns == ['index', 'values', 'foo', 'bar']).all()\n\n tm.assert_frame_equal(result_df, expected_df)\n\n\ndef _generate_partition_directories(fs, base_dir, partition_spec, df):\n # partition_spec : list of lists, e.g. [['foo', [0, 1, 2],\n # ['bar', ['a', 'b', 'c']]\n # part_table : a pyarrow.Table to write to each partition\n DEPTH = len(partition_spec)\n\n def _visit_level(base_dir, level, part_keys):\n name, values = partition_spec[level]\n for value in values:\n this_part_keys = part_keys + [(name, value)]\n\n level_dir = fs._path_join(\n str(base_dir),\n '{0}={1}'.format(name, value)\n )\n fs.mkdir(level_dir)\n\n if level == DEPTH - 1:\n # Generate example data\n file_path = fs._path_join(level_dir, guid())\n filtered_df = _filter_partition(df, this_part_keys)\n part_table = pa.Table.from_pandas(filtered_df)\n with fs.open(file_path, 'wb') as f:\n _write_table(part_table, f)\n assert fs.exists(file_path)\n\n file_success = fs._path_join(level_dir, '_SUCCESS')\n with fs.open(file_success, 'wb') as f:\n pass\n else:\n _visit_level(level_dir, level + 1, this_part_keys)\n file_success = fs._path_join(level_dir, '_SUCCESS')\n with fs.open(file_success, 'wb') as f:\n pass\n\n _visit_level(base_dir, 0, [])\n\n\ndef _test_read_common_metadata_files(fs, base_path):\n N = 100\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'values': np.random.randn(N)\n }, columns=['index', 'values'])\n\n base_path = str(base_path)\n data_path = os.path.join(base_path, 'data.parquet')\n\n table = pa.Table.from_pandas(df)\n\n with fs.open(data_path, 'wb') as f:\n _write_table(table, f)\n\n metadata_path = os.path.join(base_path, '_common_metadata')\n with fs.open(metadata_path, 'wb') as f:\n pq.write_metadata(table.schema, f)\n\n dataset = pq.ParquetDataset(base_path, filesystem=fs)\n assert dataset.common_metadata_path == str(metadata_path)\n\n with fs.open(data_path) as f:\n common_schema = pq.read_metadata(f).schema\n assert dataset.schema.equals(common_schema)\n\n # handle list of one directory\n dataset2 = pq.ParquetDataset([base_path], filesystem=fs)\n assert dataset2.schema.equals(dataset.schema)\n\n\[email protected]\ndef test_read_common_metadata_files(tempdir):\n fs = LocalFileSystem.get_instance()\n _test_read_common_metadata_files(fs, tempdir)\n\n\[email protected]\ndef test_read_metadata_files(tempdir):\n fs = LocalFileSystem.get_instance()\n\n N = 100\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'values': np.random.randn(N)\n }, columns=['index', 'values'])\n\n data_path = tempdir / 'data.parquet'\n\n table = pa.Table.from_pandas(df)\n\n with fs.open(data_path, 'wb') as f:\n _write_table(table, f)\n\n metadata_path = tempdir / '_metadata'\n with fs.open(metadata_path, 'wb') as f:\n pq.write_metadata(table.schema, f)\n\n dataset = pq.ParquetDataset(tempdir, filesystem=fs)\n assert dataset.metadata_path == str(metadata_path)\n\n with fs.open(data_path) as f:\n metadata_schema = pq.read_metadata(f).schema\n assert dataset.schema.equals(metadata_schema)\n\n\[email protected]\ndef test_read_schema(tempdir):\n N = 100\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'values': np.random.randn(N)\n }, columns=['index', 'values'])\n\n data_path = tempdir / 'test.parquet'\n\n table = pa.Table.from_pandas(df)\n _write_table(table, data_path)\n\n read1 = pq.read_schema(data_path)\n read2 = pq.read_schema(data_path, memory_map=True)\n assert table.schema.equals(read1, check_metadata=False)\n assert table.schema.equals(read2, check_metadata=False)\n\n assert table.schema.metadata[b'pandas'] == read1.metadata[b'pandas']\n\n\ndef _filter_partition(df, part_keys):\n predicate = np.ones(len(df), dtype=bool)\n\n to_drop = []\n for name, value in part_keys:\n to_drop.append(name)\n\n # to avoid pandas warning\n if isinstance(value, (datetime.date, datetime.datetime)):\n value = pd.Timestamp(value)\n\n predicate &= df[name] == value\n\n return df[predicate].drop(to_drop, axis=1)\n\n\[email protected]\ndef test_read_multiple_files(tempdir):\n nfiles = 10\n size = 5\n\n dirpath = tempdir / guid()\n dirpath.mkdir()\n\n test_data = []\n paths = []\n for i in range(nfiles):\n df = _test_dataframe(size, seed=i)\n\n # Hack so that we don't have a dtype cast in v1 files\n df['uint32'] = df['uint32'].astype(np.int64)\n\n path = dirpath / '{}.parquet'.format(i)\n\n table = pa.Table.from_pandas(df)\n _write_table(table, path)\n\n test_data.append(table)\n paths.append(path)\n\n # Write a _SUCCESS.crc file\n (dirpath / '_SUCCESS.crc').touch()\n\n def read_multiple_files(paths, columns=None, use_threads=True, **kwargs):\n dataset = pq.ParquetDataset(paths, **kwargs)\n return dataset.read(columns=columns, use_threads=use_threads)\n\n result = read_multiple_files(paths)\n expected = pa.concat_tables(test_data)\n\n assert result.equals(expected)\n\n # Read with provided metadata\n metadata = pq.read_metadata(paths[0])\n\n result2 = read_multiple_files(paths, metadata=metadata)\n assert result2.equals(expected)\n\n result3 = pa.localfs.read_parquet(dirpath, schema=metadata.schema)\n assert result3.equals(expected)\n\n # Read column subset\n to_read = [0, 2, 6, result.num_columns - 1]\n\n col_names = [result.field(i).name for i in to_read]\n out = pa.localfs.read_parquet(dirpath, columns=col_names)\n expected = pa.Table.from_arrays([result.column(i) for i in to_read],\n names=col_names,\n metadata=result.schema.metadata)\n assert out.equals(expected)\n\n # Read with multiple threads\n pa.localfs.read_parquet(dirpath, use_threads=True)\n\n # Test failure modes with non-uniform metadata\n bad_apple = _test_dataframe(size, seed=i).iloc[:, :4]\n bad_apple_path = tempdir / '{}.parquet'.format(guid())\n\n t = pa.Table.from_pandas(bad_apple)\n _write_table(t, bad_apple_path)\n\n bad_meta = pq.read_metadata(bad_apple_path)\n\n with pytest.raises(ValueError):\n read_multiple_files(paths + [bad_apple_path])\n\n with pytest.raises(ValueError):\n read_multiple_files(paths, metadata=bad_meta)\n\n mixed_paths = [bad_apple_path, paths[0]]\n\n with pytest.raises(ValueError):\n read_multiple_files(mixed_paths, schema=bad_meta.schema)\n\n with pytest.raises(ValueError):\n read_multiple_files(mixed_paths)\n\n\[email protected]\ndef test_dataset_read_pandas(tempdir):\n nfiles = 5\n size = 5\n\n dirpath = tempdir / guid()\n dirpath.mkdir()\n\n test_data = []\n frames = []\n paths = []\n for i in range(nfiles):\n df = _test_dataframe(size, seed=i)\n df.index = np.arange(i * size, (i + 1) * size)\n df.index.name = 'index'\n\n path = dirpath / '{}.parquet'.format(i)\n\n table = pa.Table.from_pandas(df)\n _write_table(table, path)\n test_data.append(table)\n frames.append(df)\n paths.append(path)\n\n dataset = pq.ParquetDataset(dirpath)\n columns = ['uint8', 'strings']\n result = dataset.read_pandas(columns=columns).to_pandas()\n expected = pd.concat([x[columns] for x in frames])\n\n tm.assert_frame_equal(result, expected)\n\n\[email protected]\ndef test_dataset_memory_map(tempdir):\n # ARROW-2627: Check that we can use ParquetDataset with memory-mapping\n dirpath = tempdir / guid()\n dirpath.mkdir()\n\n df = _test_dataframe(10, seed=0)\n path = dirpath / '{}.parquet'.format(0)\n table = pa.Table.from_pandas(df)\n _write_table(table, path, version='2.0')\n\n dataset = pq.ParquetDataset(dirpath, memory_map=True)\n assert dataset.pieces[0].read().equals(table)\n\n\[email protected]\ndef test_dataset_enable_buffered_stream(tempdir):\n dirpath = tempdir / guid()\n dirpath.mkdir()\n\n df = _test_dataframe(10, seed=0)\n path = dirpath / '{}.parquet'.format(0)\n table = pa.Table.from_pandas(df)\n _write_table(table, path, version='2.0')\n\n with pytest.raises(ValueError):\n pq.ParquetDataset(dirpath, buffer_size=-64)\n\n for buffer_size in [128, 1024]:\n dataset = pq.ParquetDataset(dirpath, buffer_size=buffer_size)\n assert dataset.pieces[0].read().equals(table)\n\n\[email protected]\[email protected]('preserve_index', [True, False, None])\ndef test_dataset_read_pandas_common_metadata(tempdir, preserve_index):\n # ARROW-1103\n nfiles = 5\n size = 5\n\n dirpath = tempdir / guid()\n dirpath.mkdir()\n\n test_data = []\n frames = []\n paths = []\n for i in range(nfiles):\n df = _test_dataframe(size, seed=i)\n df.index = pd.Index(np.arange(i * size, (i + 1) * size), name='index')\n\n path = dirpath / '{}.parquet'.format(i)\n\n table = pa.Table.from_pandas(df, preserve_index=preserve_index)\n\n # Obliterate metadata\n table = table.replace_schema_metadata(None)\n assert table.schema.metadata is None\n\n _write_table(table, path)\n test_data.append(table)\n frames.append(df)\n paths.append(path)\n\n # Write _metadata common file\n table_for_metadata = pa.Table.from_pandas(\n df, preserve_index=preserve_index\n )\n pq.write_metadata(table_for_metadata.schema, dirpath / '_metadata')\n\n dataset = pq.ParquetDataset(dirpath)\n columns = ['uint8', 'strings']\n result = dataset.read_pandas(columns=columns).to_pandas()\n expected = pd.concat([x[columns] for x in frames])\n expected.index.name = (\n df.index.name if preserve_index is not False else None)\n tm.assert_frame_equal(result, expected)\n\n\ndef _make_example_multifile_dataset(base_path, nfiles=10, file_nrows=5):\n test_data = []\n paths = []\n for i in range(nfiles):\n df = _test_dataframe(file_nrows, seed=i)\n path = base_path / '{}.parquet'.format(i)\n\n test_data.append(_write_table(df, path))\n paths.append(path)\n return paths\n\n\[email protected]\ndef test_ignore_private_directories(tempdir):\n dirpath = tempdir / guid()\n dirpath.mkdir()\n\n paths = _make_example_multifile_dataset(dirpath, nfiles=10,\n file_nrows=5)\n\n # private directory\n (dirpath / '_impala_staging').mkdir()\n\n dataset = pq.ParquetDataset(dirpath)\n assert set(map(str, paths)) == set(x.path for x in dataset.pieces)\n\n\[email protected]\ndef test_ignore_hidden_files_dot(tempdir):\n dirpath = tempdir / guid()\n dirpath.mkdir()\n\n paths = _make_example_multifile_dataset(dirpath, nfiles=10,\n file_nrows=5)\n\n with (dirpath / '.DS_Store').open('wb') as f:\n f.write(b'gibberish')\n\n with (dirpath / '.private').open('wb') as f:\n f.write(b'gibberish')\n\n dataset = pq.ParquetDataset(dirpath)\n assert set(map(str, paths)) == set(x.path for x in dataset.pieces)\n\n\[email protected]\ndef test_ignore_hidden_files_underscore(tempdir):\n dirpath = tempdir / guid()\n dirpath.mkdir()\n\n paths = _make_example_multifile_dataset(dirpath, nfiles=10,\n file_nrows=5)\n\n with (dirpath / '_committed_123').open('wb') as f:\n f.write(b'abcd')\n\n with (dirpath / '_started_321').open('wb') as f:\n f.write(b'abcd')\n\n dataset = pq.ParquetDataset(dirpath)\n assert set(map(str, paths)) == set(x.path for x in dataset.pieces)\n\n\[email protected]\ndef test_multiindex_duplicate_values(tempdir):\n num_rows = 3\n numbers = list(range(num_rows))\n index = pd.MultiIndex.from_arrays(\n [['foo', 'foo', 'bar'], numbers],\n names=['foobar', 'some_numbers'],\n )\n\n df = pd.DataFrame({'numbers': numbers}, index=index)\n table = pa.Table.from_pandas(df)\n\n filename = tempdir / 'dup_multi_index_levels.parquet'\n\n _write_table(table, filename)\n result_table = _read_table(filename)\n assert table.equals(result_table)\n\n result_df = result_table.to_pandas()\n tm.assert_frame_equal(result_df, df)\n\n\[email protected]\ndef test_write_error_deletes_incomplete_file(tempdir):\n # ARROW-1285\n df = pd.DataFrame({'a': list('abc'),\n 'b': list(range(1, 4)),\n 'c': np.arange(3, 6).astype('u1'),\n 'd': np.arange(4.0, 7.0, dtype='float64'),\n 'e': [True, False, True],\n 'f': pd.Categorical(list('abc')),\n 'g': pd.date_range('20130101', periods=3),\n 'h': pd.date_range('20130101', periods=3,\n tz='US/Eastern'),\n 'i': pd.date_range('20130101', periods=3, freq='ns')})\n\n pdf = pa.Table.from_pandas(df)\n\n filename = tempdir / 'tmp_file'\n try:\n _write_table(pdf, filename)\n except pa.ArrowException:\n pass\n\n assert not filename.exists()\n\n\[email protected]\ndef test_noncoerced_nanoseconds_written_without_exception(tempdir):\n # ARROW-1957: the Parquet version 2.0 writer preserves Arrow\n # nanosecond timestamps by default\n n = 9\n df = pd.DataFrame({'x': range(n)},\n index=pd.date_range('2017-01-01', freq='1n', periods=n))\n tb = pa.Table.from_pandas(df)\n\n filename = tempdir / 'written.parquet'\n try:\n pq.write_table(tb, filename, version='2.0')\n except Exception:\n pass\n assert filename.exists()\n\n recovered_table = pq.read_table(filename)\n assert tb.equals(recovered_table)\n\n # Loss of data thru coercion (without explicit override) still an error\n filename = tempdir / 'not_written.parquet'\n with pytest.raises(ValueError):\n pq.write_table(tb, filename, coerce_timestamps='ms', version='2.0')\n\n\ndef test_read_non_existent_file(tempdir):\n path = 'non-existent-file.parquet'\n try:\n pq.read_table(path)\n except Exception as e:\n assert path in e.args[0]\n\n\ndef test_read_table_doesnt_warn(datadir):\n with pytest.warns(None) as record:\n pq.read_table(datadir / 'v0.7.1.parquet')\n\n assert len(record) == 0\n\n\ndef _test_write_to_dataset_with_partitions(base_path,\n filesystem=None,\n schema=None,\n index_name=None):\n # ARROW-1400\n output_df = pd.DataFrame({'group1': list('aaabbbbccc'),\n 'group2': list('eefeffgeee'),\n 'num': list(range(10)),\n 'nan': [pd.np.nan] * 10,\n 'date': np.arange('2017-01-01', '2017-01-11',\n dtype='datetime64[D]')})\n cols = output_df.columns.tolist()\n partition_by = ['group1', 'group2']\n output_table = pa.Table.from_pandas(output_df, schema=schema, safe=False,\n preserve_index=False)\n pq.write_to_dataset(output_table, base_path, partition_by,\n filesystem=filesystem)\n\n metadata_path = os.path.join(base_path, '_common_metadata')\n\n if filesystem is not None:\n with filesystem.open(metadata_path, 'wb') as f:\n pq.write_metadata(output_table.schema, f)\n else:\n pq.write_metadata(output_table.schema, metadata_path)\n\n # ARROW-2891: Ensure the output_schema is preserved when writing a\n # partitioned dataset\n dataset = pq.ParquetDataset(base_path,\n filesystem=filesystem,\n validate_schema=True)\n # ARROW-2209: Ensure the dataset schema also includes the partition columns\n dataset_cols = set(dataset.schema.to_arrow_schema().names)\n assert dataset_cols == set(output_table.schema.names)\n\n input_table = dataset.read()\n input_df = input_table.to_pandas()\n\n # Read data back in and compare with original DataFrame\n # Partitioned columns added to the end of the DataFrame when read\n input_df_cols = input_df.columns.tolist()\n assert partition_by == input_df_cols[-1 * len(partition_by):]\n\n # Partitioned columns become 'categorical' dtypes\n input_df = input_df[cols]\n for col in partition_by:\n output_df[col] = output_df[col].astype('category')\n assert output_df.equals(input_df)\n\n\ndef _test_write_to_dataset_no_partitions(base_path, filesystem=None):\n # ARROW-1400\n output_df = pd.DataFrame({'group1': list('aaabbbbccc'),\n 'group2': list('eefeffgeee'),\n 'num': list(range(10)),\n 'date': np.arange('2017-01-01', '2017-01-11',\n dtype='datetime64[D]')})\n cols = output_df.columns.tolist()\n output_table = pa.Table.from_pandas(output_df)\n\n if filesystem is None:\n filesystem = LocalFileSystem.get_instance()\n\n # Without partitions, append files to root_path\n n = 5\n for i in range(n):\n pq.write_to_dataset(output_table, base_path,\n filesystem=filesystem)\n output_files = [file for file in filesystem.ls(base_path)\n if file.endswith(\".parquet\")]\n assert len(output_files) == n\n\n # Deduplicated incoming DataFrame should match\n # original outgoing Dataframe\n input_table = pq.ParquetDataset(base_path,\n filesystem=filesystem).read()\n input_df = input_table.to_pandas()\n input_df = input_df.drop_duplicates()\n input_df = input_df[cols]\n assert output_df.equals(input_df)\n\n\[email protected]\ndef test_write_to_dataset_with_partitions(tempdir):\n _test_write_to_dataset_with_partitions(str(tempdir))\n\n\[email protected]\ndef test_write_to_dataset_with_partitions_and_schema(tempdir):\n schema = pa.schema([pa.field('group1', type=pa.string()),\n pa.field('group2', type=pa.string()),\n pa.field('num', type=pa.int64()),\n pa.field('nan', type=pa.int32()),\n pa.field('date', type=pa.timestamp(unit='us'))])\n _test_write_to_dataset_with_partitions(str(tempdir), schema=schema)\n\n\[email protected]\ndef test_write_to_dataset_with_partitions_and_index_name(tempdir):\n _test_write_to_dataset_with_partitions(str(tempdir),\n index_name='index_name')\n\n\[email protected]\ndef test_write_to_dataset_no_partitions(tempdir):\n _test_write_to_dataset_no_partitions(str(tempdir))\n\n\[email protected]\ndef test_write_to_dataset_with_partitions_and_custom_filenames(tempdir):\n output_df = pd.DataFrame({'group1': list('aaabbbbccc'),\n 'group2': list('eefeffgeee'),\n 'num': list(range(10)),\n 'nan': [pd.np.nan] * 10,\n 'date': np.arange('2017-01-01', '2017-01-11',\n dtype='datetime64[D]')})\n partition_by = ['group1', 'group2']\n output_table = pa.Table.from_pandas(output_df)\n path = str(tempdir)\n\n def partition_filename_callback(keys):\n return \"{0}-{1}.parquet\".format(*keys)\n\n pq.write_to_dataset(output_table, path,\n partition_by, partition_filename_callback)\n\n dataset = pq.ParquetDataset(path)\n\n # ARROW-3538: Ensure partition filenames match the given pattern\n # defined in the local function partition_filename_callback\n expected_basenames = [\n 'a-e.parquet', 'a-f.parquet',\n 'b-e.parquet', 'b-f.parquet',\n 'b-g.parquet', 'c-e.parquet'\n ]\n output_basenames = [os.path.basename(p.path) for p in dataset.pieces]\n\n assert sorted(expected_basenames) == sorted(output_basenames)\n\n\[email protected]_memory\ndef test_large_table_int32_overflow():\n size = np.iinfo('int32').max + 1\n\n arr = np.ones(size, dtype='uint8')\n\n parr = pa.array(arr, type=pa.uint8())\n\n table = pa.Table.from_arrays([parr], names=['one'])\n f = io.BytesIO()\n _write_table(table, f)\n\n\ndef _simple_table_roundtrip(table, **write_kwargs):\n stream = pa.BufferOutputStream()\n _write_table(table, stream, **write_kwargs)\n buf = stream.getvalue()\n return _read_table(buf)\n\n\[email protected]_memory\ndef test_byte_array_exactly_2gb():\n # Test edge case reported in ARROW-3762\n val = b'x' * (1 << 10)\n\n base = pa.array([val] * ((1 << 21) - 1))\n cases = [\n [b'x' * 1023], # 2^31 - 1\n [b'x' * 1024], # 2^31\n [b'x' * 1025] # 2^31 + 1\n ]\n for case in cases:\n values = pa.chunked_array([base, pa.array(case)])\n t = pa.table([values], names=['f0'])\n result = _simple_table_roundtrip(t, use_dictionary=False)\n assert t.equals(result)\n\n\[email protected]\[email protected]_memory\ndef test_binary_array_overflow_to_chunked():\n # ARROW-3762\n\n # 2^31 + 1 bytes\n values = [b'x'] + [\n b'x' * (1 << 20)\n ] * 2 * (1 << 10)\n df = pd.DataFrame({'byte_col': values})\n\n tbl = pa.Table.from_pandas(df, preserve_index=False)\n read_tbl = _simple_table_roundtrip(tbl)\n\n col0_data = read_tbl[0]\n assert isinstance(col0_data, pa.ChunkedArray)\n\n # Split up into 2GB chunks\n assert col0_data.num_chunks == 2\n\n assert tbl.equals(read_tbl)\n\n\[email protected]\[email protected]_memory\ndef test_list_of_binary_large_cell():\n # ARROW-4688\n data = []\n\n # TODO(wesm): handle chunked children\n # 2^31 - 1 bytes in a single cell\n # data.append([b'x' * (1 << 20)] * 2047 + [b'x' * ((1 << 20) - 1)])\n\n # A little under 2GB in cell each containing approximately 10MB each\n data.extend([[b'x' * 1000000] * 10] * 214)\n\n arr = pa.array(data)\n table = pa.Table.from_arrays([arr], ['chunky_cells'])\n read_table = _simple_table_roundtrip(table)\n assert table.equals(read_table)\n\n\[email protected]\ndef test_index_column_name_duplicate(tempdir):\n data = {\n 'close': {\n pd.Timestamp('2017-06-30 01:31:00'): 154.99958999999998,\n pd.Timestamp('2017-06-30 01:32:00'): 154.99958999999998,\n },\n 'time': {\n pd.Timestamp('2017-06-30 01:31:00'): pd.Timestamp(\n '2017-06-30 01:31:00'\n ),\n pd.Timestamp('2017-06-30 01:32:00'): pd.Timestamp(\n '2017-06-30 01:32:00'\n ),\n }\n }\n path = str(tempdir / 'data.parquet')\n dfx = pd.DataFrame(data).set_index('time', drop=False)\n tdfx = pa.Table.from_pandas(dfx)\n _write_table(tdfx, path)\n arrow_table = _read_table(path)\n result_df = arrow_table.to_pandas()\n tm.assert_frame_equal(result_df, dfx)\n\n\[email protected]\ndef test_parquet_nested_convenience(tempdir):\n # ARROW-1684\n df = pd.DataFrame({\n 'a': [[1, 2, 3], None, [4, 5], []],\n 'b': [[1.], None, None, [6., 7.]],\n })\n\n path = str(tempdir / 'nested_convenience.parquet')\n\n table = pa.Table.from_pandas(df, preserve_index=False)\n _write_table(table, path)\n\n read = pq.read_table(path, columns=['a'])\n tm.assert_frame_equal(read.to_pandas(), df[['a']])\n\n read = pq.read_table(path, columns=['a', 'b'])\n tm.assert_frame_equal(read.to_pandas(), df)\n\n\[email protected]\ndef test_backwards_compatible_index_naming(datadir):\n expected_string = b\"\"\"\\\ncarat cut color clarity depth table price x y z\n 0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43\n 0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31\n 0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31\n 0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63\n 0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75\n 0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48\n 0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47\n 0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53\n 0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49\n 0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39\"\"\"\n expected = pd.read_csv(io.BytesIO(expected_string), sep=r'\\s{2,}',\n index_col=None, header=0, engine='python')\n table = _read_table(datadir / 'v0.7.1.parquet')\n result = table.to_pandas()\n tm.assert_frame_equal(result, expected)\n\n\[email protected]\ndef test_backwards_compatible_index_multi_level_named(datadir):\n expected_string = b\"\"\"\\\ncarat cut color clarity depth table price x y z\n 0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43\n 0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31\n 0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31\n 0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63\n 0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75\n 0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48\n 0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47\n 0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53\n 0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49\n 0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39\"\"\"\n expected = pd.read_csv(\n io.BytesIO(expected_string), sep=r'\\s{2,}',\n index_col=['cut', 'color', 'clarity'],\n header=0, engine='python'\n ).sort_index()\n\n table = _read_table(datadir / 'v0.7.1.all-named-index.parquet')\n result = table.to_pandas()\n tm.assert_frame_equal(result, expected)\n\n\[email protected]\ndef test_backwards_compatible_index_multi_level_some_named(datadir):\n expected_string = b\"\"\"\\\ncarat cut color clarity depth table price x y z\n 0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43\n 0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31\n 0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31\n 0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63\n 0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75\n 0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48\n 0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47\n 0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53\n 0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49\n 0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39\"\"\"\n expected = pd.read_csv(\n io.BytesIO(expected_string),\n sep=r'\\s{2,}', index_col=['cut', 'color', 'clarity'],\n header=0, engine='python'\n ).sort_index()\n expected.index = expected.index.set_names(['cut', None, 'clarity'])\n\n table = _read_table(datadir / 'v0.7.1.some-named-index.parquet')\n result = table.to_pandas()\n tm.assert_frame_equal(result, expected)\n\n\[email protected]\ndef test_backwards_compatible_column_metadata_handling(datadir):\n expected = pd.DataFrame(\n {'a': [1, 2, 3], 'b': [.1, .2, .3],\n 'c': pd.date_range(\"2017-01-01\", periods=3, tz='Europe/Brussels')})\n expected.index = pd.MultiIndex.from_arrays(\n [['a', 'b', 'c'],\n pd.date_range(\"2017-01-01\", periods=3, tz='Europe/Brussels')],\n names=['index', None])\n\n path = datadir / 'v0.7.1.column-metadata-handling.parquet'\n table = _read_table(path)\n result = table.to_pandas()\n tm.assert_frame_equal(result, expected)\n\n table = _read_table(path, columns=['a'])\n result = table.to_pandas()\n tm.assert_frame_equal(result, expected[['a']].reset_index(drop=True))\n\n\ndef _make_dataset_for_pickling(tempdir, N=100):\n path = tempdir / 'data.parquet'\n fs = LocalFileSystem.get_instance()\n\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'values': np.random.randn(N)\n }, columns=['index', 'values'])\n table = pa.Table.from_pandas(df)\n\n num_groups = 3\n with pq.ParquetWriter(path, table.schema) as writer:\n for i in range(num_groups):\n writer.write_table(table)\n\n reader = pq.ParquetFile(path)\n assert reader.metadata.num_row_groups == num_groups\n\n metadata_path = tempdir / '_metadata'\n with fs.open(metadata_path, 'wb') as f:\n pq.write_metadata(table.schema, f)\n\n dataset = pq.ParquetDataset(tempdir, filesystem=fs)\n assert dataset.metadata_path == str(metadata_path)\n\n return dataset\n\n\[email protected]\[email protected]('pickler', [\n pytest.param(pickle, id='builtin'),\n pytest.param(pytest.importorskip('cloudpickle'), id='cloudpickle')\n])\ndef test_pickle_dataset(tempdir, datadir, pickler):\n def is_pickleable(obj):\n return obj == pickler.loads(pickler.dumps(obj))\n\n dataset = _make_dataset_for_pickling(tempdir)\n\n assert is_pickleable(dataset)\n assert is_pickleable(dataset.metadata)\n assert is_pickleable(dataset.metadata.schema)\n assert len(dataset.metadata.schema)\n for column in dataset.metadata.schema:\n assert is_pickleable(column)\n\n for piece in dataset.pieces:\n assert is_pickleable(piece)\n metadata = piece.get_metadata()\n assert metadata.num_row_groups\n for i in range(metadata.num_row_groups):\n assert is_pickleable(metadata.row_group(i))\n\n\[email protected]\ndef test_decimal_roundtrip(tempdir):\n num_values = 10\n\n columns = {}\n for precision in range(1, 39):\n for scale in range(0, precision + 1):\n with util.random_seed(0):\n random_decimal_values = [\n util.randdecimal(precision, scale)\n for _ in range(num_values)\n ]\n column_name = ('dec_precision_{:d}_scale_{:d}'\n .format(precision, scale))\n columns[column_name] = random_decimal_values\n\n expected = pd.DataFrame(columns)\n filename = tempdir / 'decimals.parquet'\n string_filename = str(filename)\n table = pa.Table.from_pandas(expected)\n _write_table(table, string_filename)\n result_table = _read_table(string_filename)\n result = result_table.to_pandas()\n tm.assert_frame_equal(result, expected)\n\n\[email protected]\[email protected](\n raises=pa.ArrowException, reason='Parquet does not support negative scale'\n)\ndef test_decimal_roundtrip_negative_scale(tempdir):\n expected = pd.DataFrame({'decimal_num': [decimal.Decimal('1.23E4')]})\n filename = tempdir / 'decimals.parquet'\n string_filename = str(filename)\n t = pa.Table.from_pandas(expected)\n _write_table(t, string_filename)\n result_table = _read_table(string_filename)\n result = result_table.to_pandas()\n tm.assert_frame_equal(result, expected)\n\n\[email protected]\ndef test_parquet_writer_context_obj(tempdir):\n df = _test_dataframe(100)\n df['unique_id'] = 0\n\n arrow_table = pa.Table.from_pandas(df, preserve_index=False)\n out = pa.BufferOutputStream()\n\n with pq.ParquetWriter(out, arrow_table.schema, version='2.0') as writer:\n\n frames = []\n for i in range(10):\n df['unique_id'] = i\n arrow_table = pa.Table.from_pandas(df, preserve_index=False)\n writer.write_table(arrow_table)\n\n frames.append(df.copy())\n\n buf = out.getvalue()\n result = _read_table(pa.BufferReader(buf))\n\n expected = pd.concat(frames, ignore_index=True)\n tm.assert_frame_equal(result.to_pandas(), expected)\n\n\[email protected]\ndef test_parquet_writer_context_obj_with_exception(tempdir):\n df = _test_dataframe(100)\n df['unique_id'] = 0\n\n arrow_table = pa.Table.from_pandas(df, preserve_index=False)\n out = pa.BufferOutputStream()\n error_text = 'Artificial Error'\n\n try:\n with pq.ParquetWriter(out,\n arrow_table.schema,\n version='2.0') as writer:\n\n frames = []\n for i in range(10):\n df['unique_id'] = i\n arrow_table = pa.Table.from_pandas(df, preserve_index=False)\n writer.write_table(arrow_table)\n frames.append(df.copy())\n if i == 5:\n raise ValueError(error_text)\n except Exception as e:\n assert str(e) == error_text\n\n buf = out.getvalue()\n result = _read_table(pa.BufferReader(buf))\n\n expected = pd.concat(frames, ignore_index=True)\n tm.assert_frame_equal(result.to_pandas(), expected)\n\n\[email protected]\ndef test_zlib_compression_bug():\n # ARROW-3514: \"zlib deflate failed, output buffer too small\"\n table = pa.Table.from_arrays([pa.array(['abc', 'def'])], ['some_col'])\n f = io.BytesIO()\n pq.write_table(table, f, compression='gzip')\n\n f.seek(0)\n roundtrip = pq.read_table(f)\n tm.assert_frame_equal(roundtrip.to_pandas(), table.to_pandas())\n\n\[email protected]\ndef test_merging_parquet_tables_with_different_pandas_metadata(tempdir):\n # ARROW-3728: Merging Parquet Files - Pandas Meta in Schema Mismatch\n schema = pa.schema([\n pa.field('int', pa.int16()),\n pa.field('float', pa.float32()),\n pa.field('string', pa.string())\n ])\n df1 = pd.DataFrame({\n 'int': np.arange(3, dtype=np.uint8),\n 'float': np.arange(3, dtype=np.float32),\n 'string': ['ABBA', 'EDDA', 'ACDC']\n })\n df2 = pd.DataFrame({\n 'int': [4, 5],\n 'float': [1.1, None],\n 'string': [None, None]\n })\n table1 = pa.Table.from_pandas(df1, schema=schema, preserve_index=False)\n table2 = pa.Table.from_pandas(df2, schema=schema, preserve_index=False)\n\n assert not table1.schema.equals(table2.schema)\n assert table1.schema.equals(table2.schema, check_metadata=False)\n\n writer = pq.ParquetWriter(tempdir / 'merged.parquet', schema=schema)\n writer.write_table(table1)\n writer.write_table(table2)\n\n\ndef test_empty_row_groups(tempdir):\n # ARROW-3020\n table = pa.Table.from_arrays([pa.array([], type='int32')], ['f0'])\n\n path = tempdir / 'empty_row_groups.parquet'\n\n num_groups = 3\n with pq.ParquetWriter(path, table.schema) as writer:\n for i in range(num_groups):\n writer.write_table(table)\n\n reader = pq.ParquetFile(path)\n assert reader.metadata.num_row_groups == num_groups\n\n for i in range(num_groups):\n assert reader.read_row_group(i).equals(table)\n\n\[email protected]\ndef test_parquet_writer_with_caller_provided_filesystem():\n out = pa.BufferOutputStream()\n\n class CustomFS(FileSystem):\n def __init__(self):\n self.path = None\n self.mode = None\n\n def open(self, path, mode='rb'):\n self.path = path\n self.mode = mode\n return out\n\n fs = CustomFS()\n fname = 'expected_fname.parquet'\n df = _test_dataframe(100)\n table = pa.Table.from_pandas(df, preserve_index=False)\n\n with pq.ParquetWriter(fname, table.schema, filesystem=fs, version='2.0') \\\n as writer:\n writer.write_table(table)\n\n assert fs.path == fname\n assert fs.mode == 'wb'\n assert out.closed\n\n buf = out.getvalue()\n table_read = _read_table(pa.BufferReader(buf))\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df_read, df)\n\n # Should raise ValueError when filesystem is passed with file-like object\n with pytest.raises(ValueError) as err_info:\n pq.ParquetWriter(pa.BufferOutputStream(), table.schema, filesystem=fs)\n expected_msg = (\"filesystem passed but where is file-like, so\"\n \" there is nothing to open with filesystem.\")\n assert str(err_info) == expected_msg\n\n\ndef test_writing_empty_lists():\n # ARROW-2591: [Python] Segmentation fault issue in pq.write_table\n arr1 = pa.array([[], []], pa.list_(pa.int32()))\n table = pa.Table.from_arrays([arr1], ['list(int32)'])\n _check_roundtrip(table)\n\n\ndef test_write_nested_zero_length_array_chunk_failure():\n # Bug report in ARROW-3792\n cols = OrderedDict(\n int32=pa.int32(),\n list_string=pa.list_(pa.string())\n )\n data = [[], [OrderedDict(int32=1, list_string=('G',)), ]]\n\n # This produces a table with a column like\n # <Column name='list_string' type=ListType(list<item: string>)>\n # [\n # [],\n # [\n # [\n # \"G\"\n # ]\n # ]\n # ]\n #\n # Each column is a ChunkedArray with 2 elements\n my_arrays = [pa.array(batch, type=pa.struct(cols)).flatten()\n for batch in data]\n my_batches = [pa.RecordBatch.from_arrays(batch, schema=pa.schema(cols))\n for batch in my_arrays]\n tbl = pa.Table.from_batches(my_batches, pa.schema(cols))\n _check_roundtrip(tbl)\n\n\[email protected]\ndef test_partitioned_dataset(tempdir):\n # ARROW-3208: Segmentation fault when reading a Parquet partitioned dataset\n # to a Parquet file\n path = tempdir / \"ARROW-3208\"\n df = pd.DataFrame({\n 'one': [-1, 10, 2.5, 100, 1000, 1, 29.2],\n 'two': [-1, 10, 2, 100, 1000, 1, 11],\n 'three': [0, 0, 0, 0, 0, 0, 0]\n })\n table = pa.Table.from_pandas(df)\n pq.write_to_dataset(table, root_path=str(path),\n partition_cols=['one', 'two'])\n table = pq.ParquetDataset(path).read()\n pq.write_table(table, path / \"output.parquet\")\n\n\ndef test_read_column_invalid_index():\n table = pa.table([pa.array([4, 5]), pa.array([\"foo\", \"bar\"])],\n names=['ints', 'strs'])\n bio = pa.BufferOutputStream()\n pq.write_table(table, bio)\n f = pq.ParquetFile(bio.getvalue())\n assert f.reader.read_column(0).to_pylist() == [4, 5]\n assert f.reader.read_column(1).to_pylist() == [\"foo\", \"bar\"]\n for index in (-1, 2):\n with pytest.raises((ValueError, IndexError)):\n f.reader.read_column(index)\n\n\[email protected]\ndef test_direct_read_dictionary():\n # ARROW-3325\n repeats = 10\n nunique = 5\n\n data = [\n [tm.rands(10) for i in range(nunique)] * repeats,\n\n ]\n table = pa.table(data, names=['f0'])\n\n bio = pa.BufferOutputStream()\n pq.write_table(table, bio)\n contents = bio.getvalue()\n\n result = pq.read_table(pa.BufferReader(contents),\n read_dictionary=['f0'])\n\n # Compute dictionary-encoded subfield\n expected = pa.table([table[0].dictionary_encode()], names=['f0'])\n assert result.equals(expected)\n\n\[email protected]\ndef test_dataset_read_dictionary(tempdir):\n path = tempdir / \"ARROW-3325-dataset\"\n t1 = pa.table([[tm.rands(10) for i in range(5)] * 10], names=['f0'])\n t2 = pa.table([[tm.rands(10) for i in range(5)] * 10], names=['f0'])\n pq.write_to_dataset(t1, root_path=str(path))\n pq.write_to_dataset(t2, root_path=str(path))\n\n result = pq.ParquetDataset(path, read_dictionary=['f0']).read()\n\n # The order of the chunks is non-deterministic\n ex_chunks = [t1[0].chunk(0).dictionary_encode(),\n t2[0].chunk(0).dictionary_encode()]\n\n assert result[0].num_chunks == 2\n c0, c1 = result[0].chunk(0), result[0].chunk(1)\n if c0.equals(ex_chunks[0]):\n assert c1.equals(ex_chunks[1])\n else:\n assert c0.equals(ex_chunks[1])\n assert c1.equals(ex_chunks[0])\n\n\[email protected]\ndef test_direct_read_dictionary_subfield():\n repeats = 10\n nunique = 5\n\n data = [\n [[tm.rands(10)] for i in range(nunique)] * repeats,\n ]\n table = pa.table(data, names=['f0'])\n\n bio = pa.BufferOutputStream()\n pq.write_table(table, bio)\n contents = bio.getvalue()\n result = pq.read_table(pa.BufferReader(contents),\n read_dictionary=['f0.list.item'])\n\n arr = pa.array(data[0])\n values_as_dict = arr.values.dictionary_encode()\n\n inner_indices = values_as_dict.indices.cast('int32')\n new_values = pa.DictionaryArray.from_arrays(inner_indices,\n values_as_dict.dictionary)\n\n offsets = pa.array(range(51), type='int32')\n expected_arr = pa.ListArray.from_arrays(offsets, new_values)\n expected = pa.table([expected_arr], names=['f0'])\n\n assert result.equals(expected)\n assert result[0].num_chunks == 1\n\n\[email protected]\ndef test_dataset_metadata(tempdir):\n path = tempdir / \"ARROW-1983-dataset\"\n\n # create and write a test dataset\n df = pd.DataFrame({\n 'one': [1, 2, 3],\n 'two': [-1, -2, -3],\n 'three': [[1, 2], [2, 3], [3, 4]],\n })\n table = pa.Table.from_pandas(df)\n\n metadata_list = []\n pq.write_to_dataset(table, root_path=str(path),\n partition_cols=['one', 'two'],\n metadata_collector=metadata_list)\n\n # open the dataset and collect metadata from pieces:\n dataset = pq.ParquetDataset(path)\n metadata_list2 = [p.get_metadata() for p in dataset.pieces]\n\n # compare metadata list content:\n assert len(metadata_list) == len(metadata_list2)\n for md, md2 in zip(metadata_list, metadata_list2):\n d = md.to_dict()\n d2 = md2.to_dict()\n # serialized_size is initialized in the reader:\n assert d.pop('serialized_size') == 0\n assert d2.pop('serialized_size') > 0\n assert d == d2\n\n\ndef test_parquet_file_too_small(tempdir):\n path = str(tempdir / \"test.parquet\")\n with pytest.raises(pa.ArrowIOError,\n match='size is 0 bytes'):\n with open(path, 'wb') as f:\n pass\n pq.read_table(path)\n\n with pytest.raises(pa.ArrowIOError,\n match='size is 4 bytes'):\n with open(path, 'wb') as f:\n f.write(b'ffff')\n pq.read_table(path)\n\n\[email protected]\ndef test_categorical_index_survives_roundtrip():\n # ARROW-3652, addressed by ARROW-3246\n df = pd.DataFrame([['a', 'b'], ['c', 'd']], columns=['c1', 'c2'])\n df['c1'] = df['c1'].astype('category')\n df = df.set_index(['c1'])\n\n table = pa.Table.from_pandas(df)\n bos = pa.BufferOutputStream()\n pq.write_table(table, bos)\n ref_df = pq.read_pandas(bos.getvalue()).to_pandas()\n assert isinstance(ref_df.index, pd.CategoricalIndex)\n assert ref_df.index.equals(df.index)\n\n\[email protected]\ndef test_categorical_order_survives_roundtrip():\n # ARROW-6302\n df = pd.DataFrame({\"a\": pd.Categorical(\n [\"a\", \"b\", \"c\", \"a\"], categories=[\"b\", \"c\", \"d\"], ordered=True)})\n\n table = pa.Table.from_pandas(df)\n bos = pa.BufferOutputStream()\n pq.write_table(table, bos)\n\n contents = bos.getvalue()\n result = pq.read_pandas(contents).to_pandas()\n\n tm.assert_frame_equal(result, df)\n\n\ndef test_dictionary_array_automatically_read():\n # ARROW-3246\n\n # Make a large dictionary, a little over 4MB of data\n dict_length = 4000\n dict_values = pa.array([('x' * 1000 + '_{}'.format(i))\n for i in range(dict_length)])\n\n num_chunks = 10\n chunk_size = 100\n chunks = []\n for i in range(num_chunks):\n indices = np.random.randint(0, dict_length,\n size=chunk_size).astype(np.int32)\n chunks.append(pa.DictionaryArray.from_arrays(pa.array(indices),\n dict_values))\n\n table = pa.table([pa.chunked_array(chunks)], names=['f0'])\n\n bio = pa.BufferOutputStream()\n pq.write_table(table, bio)\n contents = bio.getvalue()\n result = pq.read_table(pa.BufferReader(contents))\n\n assert result.equals(table)\n\n # The only key in the metadata was the Arrow schema key\n assert result.schema.metadata is None\n\n\[email protected]\ndef test_pandas_categorical_na_type_row_groups():\n # ARROW-5085\n df = pd.DataFrame({\"col\": [None] * 100, \"int\": [1.0] * 100})\n df_category = df.astype({\"col\": \"category\", \"int\": \"category\"})\n table = pa.Table.from_pandas(df)\n table_cat = pa.Table.from_pandas(df_category)\n buf = pa.BufferOutputStream()\n\n # it works\n pq.write_table(table_cat, buf, version=\"2.0\", chunk_size=10)\n result = pq.read_table(buf.getvalue())\n\n # Result is non-categorical\n assert result[0].equals(table[0])\n assert result[1].equals(table[1])\n\n\[email protected]\ndef test_pandas_categorical_roundtrip():\n # ARROW-5480, this was enabled by ARROW-3246\n\n # Have one of the categories unobserved and include a null (-1)\n codes = np.array([2, 0, 0, 2, 0, -1, 2], dtype='int32')\n categories = ['foo', 'bar', 'baz']\n df = pd.DataFrame({'x': pd.Categorical.from_codes(\n codes, categories=categories)})\n\n buf = pa.BufferOutputStream()\n pq.write_table(pa.table(df), buf)\n\n result = pq.read_table(buf.getvalue()).to_pandas()\n assert result.x.dtype == 'category'\n assert (result.x.cat.categories == categories).all()\n tm.assert_frame_equal(result, df)\n\n\[email protected]\ndef test_multi_dataset_metadata(tempdir):\n filenames = [\"ARROW-1983-dataset.0\", \"ARROW-1983-dataset.1\"]\n metapath = str(tempdir / \"_metadata\")\n\n # create a test dataset\n df = pd.DataFrame({\n 'one': [1, 2, 3],\n 'two': [-1, -2, -3],\n 'three': [[1, 2], [2, 3], [3, 4]],\n })\n table = pa.Table.from_pandas(df)\n\n # write dataset twice and collect/merge metadata\n _meta = None\n for filename in filenames:\n meta = []\n pq.write_table(table, str(tempdir / filename),\n metadata_collector=meta)\n meta[0].set_file_path(filename)\n if _meta is None:\n _meta = meta[0]\n else:\n _meta.append_row_groups(meta[0])\n\n # Write merged metadata-only file\n with open(metapath, \"wb\") as f:\n _meta.write_metadata_file(f)\n\n # Read back the metadata\n meta = pq.read_metadata(metapath)\n md = meta.to_dict()\n _md = _meta.to_dict()\n for key in _md:\n if key != 'serialized_size':\n assert _md[key] == md[key]\n assert _md['num_columns'] == 3\n assert _md['num_rows'] == 6\n assert _md['num_row_groups'] == 2\n assert _md['serialized_size'] == 0\n assert md['serialized_size'] > 0\n\n\[email protected]\ndef test_filter_before_validate_schema(tempdir):\n # ARROW-4076 apply filter before schema validation\n # to avoid checking unneeded schemas\n\n # create partitioned dataset with mismatching schemas which would\n # otherwise raise if first validation all schemas\n dir1 = tempdir / 'A=0'\n dir1.mkdir()\n table1 = pa.Table.from_pandas(pd.DataFrame({'B': [1, 2, 3]}))\n pq.write_table(table1, dir1 / 'data.parquet')\n\n dir2 = tempdir / 'A=1'\n dir2.mkdir()\n table2 = pa.Table.from_pandas(pd.DataFrame({'B': ['a', 'b', 'c']}))\n pq.write_table(table2, dir2 / 'data.parquet')\n\n # read single file using filter\n table = pq.read_table(tempdir, filters=[[('A', '==', 0)]])\n assert table.column('B').equals(pa.chunked_array([[1, 2, 3]]))\n\n\[email protected]\[email protected]\[email protected](\"ignore:RangeIndex:DeprecationWarning\")\ndef test_fastparquet_cross_compatibility(tempdir):\n fp = pytest.importorskip('fastparquet')\n\n df = pd.DataFrame(\n {\n \"a\": list(\"abc\"),\n \"b\": list(range(1, 4)),\n \"c\": np.arange(4.0, 7.0, dtype=\"float64\"),\n \"d\": [True, False, True],\n \"e\": pd.date_range(\"20130101\", periods=3),\n \"f\": pd.Categorical([\"a\", \"b\", \"a\"]),\n # fastparquet writes list as BYTE_ARRAY JSON, so no roundtrip\n # \"g\": [[1, 2], None, [1, 2, 3]],\n }\n )\n table = pa.table(df)\n\n # Arrow -> fastparquet\n file_arrow = str(tempdir / \"cross_compat_arrow.parquet\")\n pq.write_table(table, file_arrow, compression=None)\n\n fp_file = fp.ParquetFile(file_arrow)\n df_fp = fp_file.to_pandas()\n tm.assert_frame_equal(df, df_fp)\n\n # Fastparquet -> arrow\n file_fastparquet = str(tempdir / \"cross_compat_fastparquet.parquet\")\n fp.write(file_fastparquet, df)\n\n table_fp = pq.read_pandas(file_fastparquet)\n # for fastparquet written file, categoricals comes back as strings\n # (no arrow schema in parquet metadata)\n df['f'] = df['f'].astype(object)\n tm.assert_frame_equal(table_fp.to_pandas(), df)\n"
] |
[
[
"pandas.Categorical.from_codes",
"pandas.concat",
"pandas.to_datetime",
"pandas.Series",
"numpy.random.seed",
"pandas.Timestamp",
"numpy.arange",
"pandas.Categorical",
"pandas.DataFrame",
"pandas.MultiIndex.from_arrays",
"pandas.util.testing.assert_frame_equal",
"numpy.ones",
"numpy.random.randn",
"numpy.iinfo",
"pandas.util.testing.rands",
"pandas.date_range",
"numpy.array",
"numpy.random.randint"
]
] |
dddd1007/mne-python
|
[
"844d53c866bbea932dd6c89ab444bb7f882f0b6f"
] |
[
"mne/viz/_brain/_brain.py"
] |
[
"# Authors: Alexandre Gramfort <[email protected]>\n# Eric Larson <[email protected]>\n# Oleh Kozynets <[email protected]>\n# Guillaume Favelier <[email protected]>\n# jona-sassenhagen <[email protected]>\n# Joan Massich <[email protected]>\n#\n# License: Simplified BSD\n\nimport contextlib\nfrom functools import partial\nfrom io import BytesIO\nimport os\nimport os.path as op\nimport sys\nimport time\nimport copy\nimport traceback\nimport warnings\n\nimport numpy as np\nfrom collections import OrderedDict\n\nfrom .colormap import calculate_lut\nfrom .surface import _Surface\nfrom .view import views_dicts, _lh_views_dict\nfrom .callback import (ShowView, TimeCallBack, SmartCallBack,\n UpdateLUT, UpdateColorbarScale)\n\nfrom ..utils import (_show_help_fig, _get_color_list, concatenate_images,\n _generate_default_filename, _save_ndarray_img)\nfrom .._3d import _process_clim, _handle_time, _check_views\n\nfrom ...externals.decorator import decorator\nfrom ...defaults import _handle_default\nfrom ...surface import mesh_edges\nfrom ...source_space import SourceSpaces, vertex_to_mni, read_talxfm\nfrom ...transforms import apply_trans, invert_transform\nfrom ...utils import (_check_option, logger, verbose, fill_doc, _validate_type,\n use_log_level, Bunch, _ReuseCycle, warn,\n get_subjects_dir)\n\n\n_ARROW_MOVE = 10 # degrees per press\n\n\n@decorator\ndef safe_event(fun, *args, **kwargs):\n \"\"\"Protect against PyQt5 exiting on event-handling errors.\"\"\"\n try:\n return fun(*args, **kwargs)\n except Exception:\n traceback.print_exc(file=sys.stderr)\n\n\nclass _Overlay(object):\n def __init__(self, scalars, colormap, rng, opacity, name):\n self._scalars = scalars\n self._colormap = colormap\n assert rng is not None\n self._rng = rng\n self._opacity = opacity\n self._name = name\n\n def to_colors(self):\n from .._3d import _get_cmap\n from matplotlib.colors import ListedColormap\n\n if isinstance(self._colormap, str):\n kind = self._colormap\n cmap = _get_cmap(self._colormap)\n else:\n cmap = ListedColormap(self._colormap / 255.)\n kind = str(type(self._colormap))\n logger.debug(\n f'Color mapping {repr(self._name)} with {kind} '\n f'colormap and range {self._rng}')\n\n rng = self._rng\n assert rng is not None\n scalars = _norm(self._scalars, rng)\n\n colors = cmap(scalars)\n if self._opacity is not None:\n colors[:, 3] *= self._opacity\n return colors\n\n\ndef _norm(x, rng):\n if rng[0] == rng[1]:\n factor = 1 if rng[0] == 0 else 1e-6 * rng[0]\n else:\n factor = rng[1] - rng[0]\n return (x - rng[0]) / factor\n\n\nclass _LayeredMesh(object):\n def __init__(self, renderer, vertices, triangles, normals):\n self._renderer = renderer\n self._vertices = vertices\n self._triangles = triangles\n self._normals = normals\n\n self._polydata = None\n self._actor = None\n self._is_mapped = False\n\n self._cache = None\n self._overlays = OrderedDict()\n\n self._default_scalars = np.ones(vertices.shape)\n self._default_scalars_name = 'Data'\n\n def map(self):\n kwargs = {\n \"color\": None,\n \"pickable\": True,\n \"rgba\": True,\n }\n mesh_data = self._renderer.mesh(\n x=self._vertices[:, 0],\n y=self._vertices[:, 1],\n z=self._vertices[:, 2],\n triangles=self._triangles,\n normals=self._normals,\n scalars=self._default_scalars,\n **kwargs\n )\n self._actor, self._polydata = mesh_data\n self._is_mapped = True\n\n def _compute_over(self, B, A):\n assert A.ndim == B.ndim == 2\n assert A.shape[1] == B.shape[1] == 4\n A_w = A[:, 3:] # * 1\n B_w = B[:, 3:] * (1 - A_w)\n C = A.copy()\n C[:, :3] *= A_w\n C[:, :3] += B[:, :3] * B_w\n C[:, 3:] += B_w\n C[:, :3] /= C[:, 3:]\n return np.clip(C, 0, 1, out=C)\n\n def _compose_overlays(self):\n B = None\n for overlay in self._overlays.values():\n A = overlay.to_colors()\n if B is None:\n B = A\n else:\n B = self._compute_over(B, A)\n return B\n\n def add_overlay(self, scalars, colormap, rng, opacity, name):\n overlay = _Overlay(\n scalars=scalars,\n colormap=colormap,\n rng=rng,\n opacity=opacity,\n name=name,\n )\n self._overlays[name] = overlay\n colors = overlay.to_colors()\n\n # save colors in cache\n if self._cache is None:\n self._cache = colors\n else:\n self._cache = self._compute_over(self._cache, colors)\n\n # update the texture\n self._update()\n\n def remove_overlay(self, names):\n if not isinstance(names, list):\n names = [names]\n for name in names:\n if name in self._overlays:\n del self._overlays[name]\n self.update()\n\n def _update(self):\n if self._cache is None or self._renderer is None:\n return\n self._renderer._set_mesh_scalars(\n mesh=self._polydata,\n scalars=self._cache,\n name=self._default_scalars_name,\n )\n\n def update(self):\n self._cache = self._compose_overlays()\n self._update()\n\n def _clean(self):\n mapper = self._actor.GetMapper()\n mapper.SetLookupTable(None)\n self._actor.SetMapper(None)\n self._actor = None\n self._polydata = None\n self._renderer = None\n\n def update_overlay(self, name, scalars=None, colormap=None,\n opacity=None, rng=None):\n overlay = self._overlays.get(name, None)\n if overlay is None:\n return\n if scalars is not None:\n overlay._scalars = scalars\n if colormap is not None:\n overlay._colormap = colormap\n if opacity is not None:\n overlay._opacity = opacity\n if rng is not None:\n overlay._rng = rng\n self.update()\n\n\n@fill_doc\nclass Brain(object):\n \"\"\"Class for visualizing a brain.\n\n .. warning::\n The API for this class is not currently complete. We suggest using\n :meth:`mne.viz.plot_source_estimates` with the PyVista backend\n enabled to obtain a ``Brain`` instance.\n\n Parameters\n ----------\n subject_id : str\n Subject name in Freesurfer subjects dir.\n hemi : str\n Hemisphere id (ie 'lh', 'rh', 'both', or 'split'). In the case\n of 'both', both hemispheres are shown in the same window.\n In the case of 'split' hemispheres are displayed side-by-side\n in different viewing panes.\n surf : str\n FreeSurfer surface mesh name (ie 'white', 'inflated', etc.).\n title : str\n Title for the window.\n cortex : str or None\n Specifies how the cortical surface is rendered.\n The name of one of the preset cortex styles can be:\n ``'classic'`` (default), ``'high_contrast'``,\n ``'low_contrast'``, or ``'bone'`` or a valid color name.\n Setting this to ``None`` is equivalent to ``(0.5, 0.5, 0.5)``.\n alpha : float in [0, 1]\n Alpha level to control opacity of the cortical surface.\n size : int | array-like, shape (2,)\n The size of the window, in pixels. can be one number to specify\n a square window, or a length-2 sequence to specify (width, height).\n background : tuple(int, int, int)\n The color definition of the background: (red, green, blue).\n foreground : matplotlib color\n Color of the foreground (will be used for colorbars and text).\n None (default) will use black or white depending on the value\n of ``background``.\n figure : list of Figure | None | int\n If None (default), a new window will be created with the appropriate\n views. For single view plots, the figure can be specified as int to\n retrieve the corresponding Mayavi window.\n subjects_dir : str | None\n If not None, this directory will be used as the subjects directory\n instead of the value set using the SUBJECTS_DIR environment\n variable.\n views : list | str\n The views to use.\n offset : bool | str\n If True, shifts the right- or left-most x coordinate of the left and\n right surfaces, respectively, to be at zero. This is useful for viewing\n inflated surface where hemispheres typically overlap. Can be \"auto\"\n (default) use True with inflated surfaces and False otherwise\n (Default: 'auto'). Only used when ``hemi='both'``.\n\n .. versionchanged:: 0.23\n Default changed to \"auto\".\n show_toolbar : bool\n If True, toolbars will be shown for each view.\n offscreen : bool\n If True, rendering will be done offscreen (not shown). Useful\n mostly for generating images or screenshots, but can be buggy.\n Use at your own risk.\n interaction : str\n Can be \"trackball\" (default) or \"terrain\", i.e. a turntable-style\n camera.\n units : str\n Can be 'm' or 'mm' (default).\n %(view_layout)s\n silhouette : dict | bool\n As a dict, it contains the ``color``, ``linewidth``, ``alpha`` opacity\n and ``decimate`` (level of decimation between 0 and 1 or None) of the\n brain's silhouette to display. If True, the default values are used\n and if False, no silhouette will be displayed. Defaults to False.\n theme : str | path-like\n Can be \"auto\" (default), \"light\", or \"dark\" or a path-like to a\n custom stylesheet. For Dark-Mode and automatic Dark-Mode-Detection,\n :mod:`qdarkstyle` respectively and `darkdetect\n <https://github.com/albertosottile/darkdetect>`__ is required.\n show : bool\n Display the window as soon as it is ready. Defaults to True.\n\n Attributes\n ----------\n geo : dict\n A dictionary of pysurfer.Surface objects for each hemisphere.\n overlays : dict\n The overlays.\n\n Notes\n -----\n This table shows the capabilities of each Brain backend (\"✓\" for full\n support, and \"-\" for partial support):\n\n .. table::\n :widths: auto\n\n +---------------------------+--------------+---------------+\n | 3D function: | surfer.Brain | mne.viz.Brain |\n +===========================+==============+===============+\n | add_annotation | ✓ | ✓ |\n +---------------------------+--------------+---------------+\n | add_data | ✓ | ✓ |\n +---------------------------+--------------+---------------+\n | add_foci | ✓ | ✓ |\n +---------------------------+--------------+---------------+\n | add_label | ✓ | ✓ |\n +---------------------------+--------------+---------------+\n | add_text | ✓ | ✓ |\n +---------------------------+--------------+---------------+\n | close | ✓ | ✓ |\n +---------------------------+--------------+---------------+\n | data | ✓ | ✓ |\n +---------------------------+--------------+---------------+\n | foci | ✓ | |\n +---------------------------+--------------+---------------+\n | labels | ✓ | ✓ |\n +---------------------------+--------------+---------------+\n | remove_foci | ✓ | |\n +---------------------------+--------------+---------------+\n | remove_labels | ✓ | ✓ |\n +---------------------------+--------------+---------------+\n | remove_annotations | - | ✓ |\n +---------------------------+--------------+---------------+\n | scale_data_colormap | ✓ | |\n +---------------------------+--------------+---------------+\n | save_image | ✓ | ✓ |\n +---------------------------+--------------+---------------+\n | save_movie | ✓ | ✓ |\n +---------------------------+--------------+---------------+\n | screenshot | ✓ | ✓ |\n +---------------------------+--------------+---------------+\n | show_view | ✓ | ✓ |\n +---------------------------+--------------+---------------+\n | TimeViewer | ✓ | ✓ |\n +---------------------------+--------------+---------------+\n | enable_depth_peeling | | ✓ |\n +---------------------------+--------------+---------------+\n | get_picked_points | | ✓ |\n +---------------------------+--------------+---------------+\n | add_data(volume) | | ✓ |\n +---------------------------+--------------+---------------+\n | view_layout | | ✓ |\n +---------------------------+--------------+---------------+\n | flatmaps | | ✓ |\n +---------------------------+--------------+---------------+\n | vertex picking | | ✓ |\n +---------------------------+--------------+---------------+\n | label picking | | ✓ |\n +---------------------------+--------------+---------------+\n \"\"\"\n\n def __init__(self, subject_id, hemi, surf, title=None,\n cortex=\"classic\", alpha=1.0, size=800, background=\"black\",\n foreground=None, figure=None, subjects_dir=None,\n views='auto', offset='auto', show_toolbar=False,\n offscreen=False, interaction='trackball', units='mm',\n view_layout='vertical', silhouette=False, theme='auto',\n show=True):\n from ..backends.renderer import backend, _get_renderer\n from .._3d import _get_cmap\n from matplotlib.colors import colorConverter\n\n if hemi in ('both', 'split'):\n self._hemis = ('lh', 'rh')\n elif hemi in ('lh', 'rh'):\n self._hemis = (hemi, )\n else:\n raise KeyError('hemi has to be either \"lh\", \"rh\", \"split\", '\n 'or \"both\"')\n self._view_layout = _check_option('view_layout', view_layout,\n ('vertical', 'horizontal'))\n\n if figure is not None and not isinstance(figure, int):\n backend._check_3d_figure(figure)\n if title is None:\n self._title = subject_id\n else:\n self._title = title\n self._interaction = 'trackball'\n\n if isinstance(background, str):\n background = colorConverter.to_rgb(background)\n self._bg_color = background\n if foreground is None:\n foreground = 'w' if sum(self._bg_color) < 2 else 'k'\n if isinstance(foreground, str):\n foreground = colorConverter.to_rgb(foreground)\n self._fg_color = foreground\n\n if isinstance(views, str):\n views = [views]\n views = _check_views(surf, views, hemi)\n col_dict = dict(lh=1, rh=1, both=1, split=2)\n shape = (len(views), col_dict[hemi])\n if self._view_layout == 'horizontal':\n shape = shape[::-1]\n self._subplot_shape = shape\n\n size = tuple(np.atleast_1d(size).round(0).astype(int).flat)\n if len(size) not in (1, 2):\n raise ValueError('\"size\" parameter must be an int or length-2 '\n 'sequence of ints.')\n size = size if len(size) == 2 else size * 2 # 1-tuple to 2-tuple\n subjects_dir = get_subjects_dir(subjects_dir)\n\n self.theme = theme\n\n self.time_viewer = False\n self._hemi = hemi\n self._units = units\n self._alpha = float(alpha)\n self._subject_id = subject_id\n self._subjects_dir = subjects_dir\n self._views = views\n self._times = None\n self._vertex_to_label_id = dict()\n self._annotation_labels = dict()\n self._labels = {'lh': list(), 'rh': list()}\n self._unnamed_label_id = 0 # can only grow\n self._annots = {'lh': list(), 'rh': list()}\n self._layered_meshes = {}\n self._elevation_rng = [15, 165] # range of motion of camera on theta\n self._lut_locked = None\n # default values for silhouette\n self._silhouette = {\n 'color': self._bg_color,\n 'line_width': 2,\n 'alpha': alpha,\n 'decimate': 0.9,\n }\n _validate_type(silhouette, (dict, bool), 'silhouette')\n if isinstance(silhouette, dict):\n self._silhouette.update(silhouette)\n self.silhouette = True\n else:\n self.silhouette = silhouette\n self._scalar_bar = None\n # for now only one time label can be added\n # since it is the same for all figures\n self._time_label_added = False\n # array of data used by TimeViewer\n self._data = {}\n self.geo = {}\n self.set_time_interpolation('nearest')\n\n geo_kwargs = self._cortex_colormap(cortex)\n # evaluate at the midpoint of the used colormap\n val = -geo_kwargs['vmin'] / (geo_kwargs['vmax'] - geo_kwargs['vmin'])\n self._brain_color = _get_cmap(geo_kwargs['colormap'])(val)\n\n # load geometry for one or both hemispheres as necessary\n _validate_type(offset, (str, bool), 'offset')\n if isinstance(offset, str):\n _check_option('offset', offset, ('auto',), extra='when str')\n offset = (surf in ('inflated', 'flat'))\n offset = None if (not offset or hemi != 'both') else 0.0\n logger.debug(f'Hemi offset: {offset}')\n\n self._renderer = _get_renderer(name=self._title, size=size,\n bgcolor=background,\n shape=shape,\n fig=figure)\n self._renderer._window_close_connect(self._clean)\n self._renderer._window_set_theme(theme)\n self.plotter = self._renderer.plotter\n\n self._setup_canonical_rotation()\n for h in self._hemis:\n # Initialize a Surface object as the geometry\n geo = _Surface(subject_id, h, surf, subjects_dir, offset,\n units=self._units, x_dir=self._rigid[0, :3])\n # Load in the geometry and curvature\n geo.load_geometry()\n geo.load_curvature()\n self.geo[h] = geo\n for ri, ci, v in self._iter_views(h):\n self._renderer.subplot(ri, ci)\n if self._layered_meshes.get(h) is None:\n mesh = _LayeredMesh(\n renderer=self._renderer,\n vertices=self.geo[h].coords,\n triangles=self.geo[h].faces,\n normals=self.geo[h].nn,\n )\n mesh.map() # send to GPU\n mesh.add_overlay(\n scalars=self.geo[h].bin_curv,\n colormap=geo_kwargs[\"colormap\"],\n rng=[geo_kwargs[\"vmin\"], geo_kwargs[\"vmax\"]],\n opacity=alpha,\n name='curv',\n )\n self._layered_meshes[h] = mesh\n # add metadata to the mesh for picking\n mesh._polydata._hemi = h\n else:\n actor = self._layered_meshes[h]._actor\n self._renderer.plotter.add_actor(actor)\n if self.silhouette:\n mesh = self._layered_meshes[h]\n self._renderer._silhouette(\n mesh=mesh._polydata,\n color=self._silhouette[\"color\"],\n line_width=self._silhouette[\"line_width\"],\n alpha=self._silhouette[\"alpha\"],\n decimate=self._silhouette[\"decimate\"],\n )\n self._renderer.set_camera(**views_dicts[h][v])\n\n self.interaction = interaction\n self._closed = False\n if show:\n self.show()\n # update the views once the geometry is all set\n for h in self._hemis:\n for ri, ci, v in self._iter_views(h):\n self.show_view(v, row=ri, col=ci, hemi=h)\n\n if surf == 'flat':\n self._renderer.set_interaction(\"rubber_band_2d\")\n\n def _setup_canonical_rotation(self):\n from ...coreg import fit_matched_points, _trans_from_params\n self._rigid = np.eye(4)\n try:\n xfm = read_talxfm(self._subject_id, self._subjects_dir)\n except Exception:\n return\n # XYZ+origin + halfway\n pts_tal = np.concatenate([np.eye(4)[:, :3], np.eye(3) * 0.5])\n pts_subj = apply_trans(invert_transform(xfm), pts_tal)\n # we fit with scaling enabled, but then discard it (we just need\n # the rigid-body components)\n params = fit_matched_points(pts_subj, pts_tal, scale=3, out='params')\n self._rigid[:] = _trans_from_params((True, True, False), params[:6])\n\n def setup_time_viewer(self, time_viewer=True, show_traces=True):\n \"\"\"Configure the time viewer parameters.\n\n Parameters\n ----------\n time_viewer : bool\n If True, enable widgets interaction. Defaults to True.\n\n show_traces : bool\n If True, enable visualization of time traces. Defaults to True.\n\n Notes\n -----\n The keyboard shortcuts are the following:\n\n '?': Display help window\n 'i': Toggle interface\n 's': Apply auto-scaling\n 'r': Restore original clim\n 'c': Clear all traces\n 'n': Shift the time forward by the playback speed\n 'b': Shift the time backward by the playback speed\n 'Space': Start/Pause playback\n 'Up': Decrease camera elevation angle\n 'Down': Increase camera elevation angle\n 'Left': Decrease camera azimuth angle\n 'Right': Increase camera azimuth angle\n \"\"\"\n if self.time_viewer:\n return\n if not self._data:\n raise ValueError(\"No data to visualize. See ``add_data``.\")\n self.time_viewer = time_viewer\n self.orientation = list(_lh_views_dict.keys())\n self.default_smoothing_range = [0, 15]\n\n # Default configuration\n self.playback = False\n self.visibility = False\n self.refresh_rate_ms = max(int(round(1000. / 60.)), 1)\n self.default_scaling_range = [0.2, 2.0]\n self.default_playback_speed_range = [0.01, 1]\n self.default_playback_speed_value = 0.01\n self.default_status_bar_msg = \"Press ? for help\"\n self.default_label_extract_modes = {\n \"stc\": [\"mean\", \"max\"],\n \"src\": [\"mean_flip\", \"pca_flip\", \"auto\"],\n }\n self.default_trace_modes = ('vertex', 'label')\n self.annot = None\n self.label_extract_mode = None\n all_keys = ('lh', 'rh', 'vol')\n self.act_data_smooth = {key: (None, None) for key in all_keys}\n self.color_list = _get_color_list()\n # remove grey for better contrast on the brain\n self.color_list.remove(\"#7f7f7f\")\n self.color_cycle = _ReuseCycle(self.color_list)\n self.mpl_canvas = None\n self.help_canvas = None\n self.rms = None\n self.picked_patches = {key: list() for key in all_keys}\n self.picked_points = {key: list() for key in all_keys}\n self.pick_table = dict()\n self._spheres = list()\n self._mouse_no_mvt = -1\n self.callbacks = dict()\n self.widgets = dict()\n self.keys = ('fmin', 'fmid', 'fmax')\n\n # Derived parameters:\n self.playback_speed = self.default_playback_speed_value\n _validate_type(show_traces, (bool, str, 'numeric'), 'show_traces')\n self.interactor_fraction = 0.25\n if isinstance(show_traces, str):\n self.show_traces = True\n self.separate_canvas = False\n self.traces_mode = 'vertex'\n if show_traces == 'separate':\n self.separate_canvas = True\n elif show_traces == 'label':\n self.traces_mode = 'label'\n else:\n assert show_traces == 'vertex' # guaranteed above\n else:\n if isinstance(show_traces, bool):\n self.show_traces = show_traces\n else:\n show_traces = float(show_traces)\n if not 0 < show_traces < 1:\n raise ValueError(\n 'show traces, if numeric, must be between 0 and 1, '\n f'got {show_traces}')\n self.show_traces = True\n self.interactor_fraction = show_traces\n self.traces_mode = 'vertex'\n self.separate_canvas = False\n del show_traces\n\n self._configure_time_label()\n self._configure_scalar_bar()\n self._configure_shortcuts()\n self._configure_picking()\n self._configure_tool_bar()\n self._configure_dock()\n self._configure_menu()\n self._configure_status_bar()\n self._configure_playback()\n self._configure_help()\n # show everything at the end\n self.toggle_interface()\n self._renderer.show()\n\n # sizes could change, update views\n for hemi in ('lh', 'rh'):\n for ri, ci, v in self._iter_views(hemi):\n self.show_view(view=v, row=ri, col=ci)\n self._renderer._process_events()\n\n self._renderer._update()\n # finally, show the MplCanvas\n if self.show_traces:\n self.mpl_canvas.show()\n\n @safe_event\n def _clean(self):\n # resolve the reference cycle\n self.clear_glyphs()\n self.remove_annotations()\n # clear init actors\n for hemi in self._hemis:\n self._layered_meshes[hemi]._clean()\n self._clear_callbacks()\n self._clear_widgets()\n if getattr(self, 'mpl_canvas', None) is not None:\n self.mpl_canvas.clear()\n if getattr(self, 'act_data_smooth', None) is not None:\n for key in list(self.act_data_smooth.keys()):\n self.act_data_smooth[key] = None\n # XXX this should be done in PyVista\n for renderer in self._renderer._all_renderers:\n renderer.RemoveAllLights()\n # app_window cannot be set to None because it is used in __del__\n for key in ('lighting', 'interactor', '_RenderWindow'):\n setattr(self.plotter, key, None)\n # Qt LeaveEvent requires _Iren so we use _FakeIren instead of None\n # to resolve the ref to vtkGenericRenderWindowInteractor\n self.plotter._Iren = _FakeIren()\n if getattr(self.plotter, 'picker', None) is not None:\n self.plotter.picker = None\n # XXX end PyVista\n for key in ('plotter', 'window', 'dock', 'tool_bar', 'menu_bar',\n 'interactor', 'mpl_canvas', 'time_actor',\n 'picked_renderer', 'act_data_smooth', '_scalar_bar',\n 'actions', 'widgets', 'geo', '_data'):\n setattr(self, key, None)\n\n def toggle_interface(self, value=None):\n \"\"\"Toggle the interface.\n\n Parameters\n ----------\n value : bool | None\n If True, the widgets are shown and if False, they\n are hidden. If None, the state of the widgets is\n toggled. Defaults to None.\n \"\"\"\n if value is None:\n self.visibility = not self.visibility\n else:\n self.visibility = value\n\n # update tool bar and dock\n with self._renderer._window_ensure_minimum_sizes():\n if self.visibility:\n self._renderer._dock_show()\n self._renderer._tool_bar_update_button_icon(\n name=\"visibility\", icon_name=\"visibility_on\")\n else:\n self._renderer._dock_hide()\n self._renderer._tool_bar_update_button_icon(\n name=\"visibility\", icon_name=\"visibility_off\")\n\n self._renderer._update()\n\n def apply_auto_scaling(self):\n \"\"\"Detect automatically fitting scaling parameters.\"\"\"\n self._update_auto_scaling()\n\n def restore_user_scaling(self):\n \"\"\"Restore original scaling parameters.\"\"\"\n self._update_auto_scaling(restore=True)\n\n def toggle_playback(self, value=None):\n \"\"\"Toggle time playback.\n\n Parameters\n ----------\n value : bool | None\n If True, automatic time playback is enabled and if False,\n it's disabled. If None, the state of time playback is toggled.\n Defaults to None.\n \"\"\"\n if value is None:\n self.playback = not self.playback\n else:\n self.playback = value\n\n # update tool bar icon\n if self.playback:\n self._renderer._tool_bar_update_button_icon(\n name=\"play\", icon_name=\"pause\")\n else:\n self._renderer._tool_bar_update_button_icon(\n name=\"play\", icon_name=\"play\")\n\n if self.playback:\n time_data = self._data['time']\n max_time = np.max(time_data)\n if self._current_time == max_time: # start over\n self.set_time_point(0) # first index\n self._last_tick = time.time()\n\n def reset(self):\n \"\"\"Reset view and time step.\"\"\"\n self.reset_view()\n max_time = len(self._data['time']) - 1\n if max_time > 0:\n self.callbacks[\"time\"](\n self._data[\"initial_time_idx\"],\n update_widget=True,\n )\n self._renderer._update()\n\n def set_playback_speed(self, speed):\n \"\"\"Set the time playback speed.\n\n Parameters\n ----------\n speed : float\n The speed of the playback.\n \"\"\"\n self.playback_speed = speed\n\n @safe_event\n def _play(self):\n if self.playback:\n try:\n self._advance()\n except Exception:\n self.toggle_playback(value=False)\n raise\n\n def _advance(self):\n this_time = time.time()\n delta = this_time - self._last_tick\n self._last_tick = time.time()\n time_data = self._data['time']\n times = np.arange(self._n_times)\n time_shift = delta * self.playback_speed\n max_time = np.max(time_data)\n time_point = min(self._current_time + time_shift, max_time)\n # always use linear here -- this does not determine the data\n # interpolation mode, it just finds where we are (in time) in\n # terms of the time indices\n idx = np.interp(time_point, time_data, times)\n self.callbacks[\"time\"](idx, update_widget=True)\n if time_point == max_time:\n self.toggle_playback(value=False)\n\n def _configure_time_label(self):\n self.time_actor = self._data.get('time_actor')\n if self.time_actor is not None:\n self.time_actor.SetPosition(0.5, 0.03)\n self.time_actor.GetTextProperty().SetJustificationToCentered()\n self.time_actor.GetTextProperty().BoldOn()\n\n def _configure_scalar_bar(self):\n if self._scalar_bar is not None:\n self._scalar_bar.SetOrientationToVertical()\n self._scalar_bar.SetHeight(0.6)\n self._scalar_bar.SetWidth(0.05)\n self._scalar_bar.SetPosition(0.02, 0.2)\n\n def _configure_dock_time_widget(self, layout=None):\n len_time = len(self._data['time']) - 1\n if len_time < 1:\n return\n layout = self._renderer.dock_layout if layout is None else layout\n hlayout = self._renderer._dock_add_layout(vertical=False)\n self.widgets[\"min_time\"] = self._renderer._dock_add_label(\n value=\"-\", layout=hlayout)\n self._renderer._dock_add_stretch(hlayout)\n self.widgets[\"current_time\"] = self._renderer._dock_add_label(\n value=\"x\", layout=hlayout)\n self._renderer._dock_add_stretch(hlayout)\n self.widgets[\"max_time\"] = self._renderer._dock_add_label(\n value=\"+\", layout=hlayout)\n self._renderer._layout_add_widget(layout, hlayout)\n min_time = float(self._data['time'][0])\n max_time = float(self._data['time'][-1])\n self.widgets[\"min_time\"].set_value(f\"{min_time: .3f}\")\n self.widgets[\"max_time\"].set_value(f\"{max_time: .3f}\")\n self.widgets[\"current_time\"].set_value(f\"{self._current_time: .3f}\")\n\n def _configure_dock_playback_widget(self, name):\n layout = self._renderer._dock_add_group_box(name)\n len_time = len(self._data['time']) - 1\n\n # Time widget\n if len_time < 1:\n self.callbacks[\"time\"] = None\n self.widgets[\"time\"] = None\n else:\n self.callbacks[\"time\"] = TimeCallBack(\n brain=self,\n callback=self.plot_time_line,\n )\n self.widgets[\"time\"] = self._renderer._dock_add_slider(\n name=\"Time (s)\",\n value=self._data['time_idx'],\n rng=[0, len_time],\n double=True,\n callback=self.callbacks[\"time\"],\n compact=False,\n layout=layout,\n )\n self.callbacks[\"time\"].widget = self.widgets[\"time\"]\n\n # Time labels\n if len_time < 1:\n self.widgets[\"min_time\"] = None\n self.widgets[\"max_time\"] = None\n self.widgets[\"current_time\"] = None\n else:\n self._configure_dock_time_widget(layout)\n self.callbacks[\"time\"].label = self.widgets[\"current_time\"]\n\n # Playback speed widget\n if len_time < 1:\n self.callbacks[\"playback_speed\"] = None\n self.widgets[\"playback_speed\"] = None\n else:\n self.callbacks[\"playback_speed\"] = SmartCallBack(\n callback=self.set_playback_speed,\n )\n self.widgets[\"playback_speed\"] = self._renderer._dock_add_spin_box(\n name=\"Speed\",\n value=self.default_playback_speed_value,\n rng=self.default_playback_speed_range,\n callback=self.callbacks[\"playback_speed\"],\n layout=layout,\n )\n self.callbacks[\"playback_speed\"].widget = \\\n self.widgets[\"playback_speed\"]\n\n # Time label\n current_time = self._current_time\n assert current_time is not None # should never be the case, float\n time_label = self._data['time_label']\n if callable(time_label):\n current_time = time_label(current_time)\n else:\n current_time = time_label\n if self.time_actor is not None:\n self.time_actor.SetInput(current_time)\n del current_time\n\n def _configure_dock_orientation_widget(self, name):\n layout = self._renderer._dock_add_group_box(name)\n # Renderer widget\n rends = [str(i) for i in range(len(self._renderer._all_renderers))]\n if len(rends) > 1:\n def select_renderer(idx):\n idx = int(idx)\n loc = self._renderer._index_to_loc(idx)\n self.plotter.subplot(*loc)\n\n self.callbacks[\"renderer\"] = SmartCallBack(\n callback=select_renderer,\n )\n self.widgets[\"renderer\"] = self._renderer._dock_add_combo_box(\n name=\"Renderer\",\n value=\"0\",\n rng=rends,\n callback=self.callbacks[\"renderer\"],\n layout=layout,\n )\n self.callbacks[\"renderer\"].widget = \\\n self.widgets[\"renderer\"]\n\n # Use 'lh' as a reference for orientation for 'both'\n if self._hemi == 'both':\n hemis_ref = ['lh']\n else:\n hemis_ref = self._hemis\n orientation_data = [None] * len(rends)\n for hemi in hemis_ref:\n for ri, ci, view in self._iter_views(hemi):\n idx = self._renderer._loc_to_index((ri, ci))\n if view == 'flat':\n _data = None\n else:\n _data = dict(default=view, hemi=hemi, row=ri, col=ci)\n orientation_data[idx] = _data\n self.callbacks[\"orientation\"] = ShowView(\n brain=self,\n data=orientation_data,\n )\n self.widgets[\"orientation\"] = self._renderer._dock_add_combo_box(\n name=None,\n value=self.orientation[0],\n rng=self.orientation,\n callback=self.callbacks[\"orientation\"],\n layout=layout,\n )\n\n def _configure_dock_colormap_widget(self, name):\n layout = self._renderer._dock_add_group_box(name)\n self._renderer._dock_add_label(\n value=\"min / mid / max\",\n align=True,\n layout=layout,\n )\n up = UpdateLUT(brain=self)\n for key in self.keys:\n hlayout = self._renderer._dock_add_layout(vertical=False)\n rng = _get_range(self)\n self.callbacks[key] = lambda value, key=key: up(**{key: value})\n self.widgets[key] = self._renderer._dock_add_slider(\n name=None,\n value=self._data[key],\n rng=rng,\n callback=self.callbacks[key],\n double=True,\n layout=hlayout,\n )\n self.widgets[f\"entry_{key}\"] = self._renderer._dock_add_spin_box(\n name=None,\n value=self._data[key],\n callback=self.callbacks[key],\n rng=rng,\n layout=hlayout,\n )\n up.widgets[key] = [self.widgets[key], self.widgets[f\"entry_{key}\"]]\n self._renderer._layout_add_widget(layout, hlayout)\n\n # reset / minus / plus\n hlayout = self._renderer._dock_add_layout(vertical=False)\n self._renderer._dock_add_label(\n value=\"Rescale\",\n align=True,\n layout=hlayout,\n )\n self.widgets[\"reset\"] = self._renderer._dock_add_button(\n name=\"↺\",\n callback=self.restore_user_scaling,\n layout=hlayout,\n )\n for key, char, val in ((\"fminus\", \"➖\", 1.2 ** -0.25),\n (\"fplus\", \"➕\", 1.2 ** 0.25)):\n self.callbacks[key] = UpdateColorbarScale(\n brain=self,\n factor=val,\n )\n self.widgets[key] = self._renderer._dock_add_button(\n name=char,\n callback=self.callbacks[key],\n layout=hlayout,\n )\n self._renderer._layout_add_widget(layout, hlayout)\n\n # register colorbar slider representations\n widgets = {key: self.widgets[key] for key in self.keys}\n for name in (\"fmin\", \"fmid\", \"fmax\", \"fminus\", \"fplus\"):\n self.callbacks[name].widgets = widgets\n\n def _configure_dock_trace_widget(self, name):\n if not self.show_traces:\n return\n # do not show trace mode for volumes\n if (self._data.get('src', None) is not None and\n self._data['src'].kind == 'volume'):\n self._configure_vertex_time_course()\n return\n\n layout = self._renderer._dock_add_group_box(name)\n\n # setup candidate annots\n def _set_annot(annot):\n self.clear_glyphs()\n self.remove_labels()\n self.remove_annotations()\n self.annot = annot\n\n if annot == 'None':\n self.traces_mode = 'vertex'\n self._configure_vertex_time_course()\n else:\n self.traces_mode = 'label'\n self._configure_label_time_course()\n self._renderer._update()\n\n # setup label extraction parameters\n def _set_label_mode(mode):\n if self.traces_mode != 'label':\n return\n glyphs = copy.deepcopy(self.picked_patches)\n self.label_extract_mode = mode\n self.clear_glyphs()\n for hemi in self._hemis:\n for label_id in glyphs[hemi]:\n label = self._annotation_labels[hemi][label_id]\n vertex_id = label.vertices[0]\n self._add_label_glyph(hemi, None, vertex_id)\n self.mpl_canvas.axes.relim()\n self.mpl_canvas.axes.autoscale_view()\n self.mpl_canvas.update_plot()\n self._renderer._update()\n\n from ...source_estimate import _get_allowed_label_modes\n from ...label import _read_annot_cands\n dir_name = op.join(self._subjects_dir, self._subject_id, 'label')\n cands = _read_annot_cands(dir_name, raise_error=False)\n cands = cands + ['None']\n self.annot = cands[0]\n stc = self._data[\"stc\"]\n modes = _get_allowed_label_modes(stc)\n if self._data[\"src\"] is None:\n modes = [m for m in modes if m not in\n self.default_label_extract_modes[\"src\"]]\n self.label_extract_mode = modes[-1]\n if self.traces_mode == 'vertex':\n _set_annot('None')\n else:\n _set_annot(self.annot)\n self.widgets[\"annotation\"] = self._renderer._dock_add_combo_box(\n name=\"Annotation\",\n value=self.annot,\n rng=cands,\n callback=_set_annot,\n layout=layout,\n )\n self.widgets[\"extract_mode\"] = self._renderer._dock_add_combo_box(\n name=\"Extract mode\",\n value=self.label_extract_mode,\n rng=modes,\n callback=_set_label_mode,\n layout=layout,\n )\n\n def _configure_dock(self):\n self._renderer._dock_initialize()\n self._configure_dock_playback_widget(name=\"Playback\")\n self._configure_dock_orientation_widget(name=\"Orientation\")\n self._configure_dock_colormap_widget(name=\"Color Limits\")\n self._configure_dock_trace_widget(name=\"Trace\")\n\n # Smoothing widget\n self.callbacks[\"smoothing\"] = SmartCallBack(\n callback=self.set_data_smoothing,\n )\n self.widgets[\"smoothing\"] = self._renderer._dock_add_spin_box(\n name=\"Smoothing\",\n value=self._data['smoothing_steps'],\n rng=self.default_smoothing_range,\n callback=self.callbacks[\"smoothing\"],\n double=False\n )\n self.callbacks[\"smoothing\"].widget = \\\n self.widgets[\"smoothing\"]\n\n self._renderer._dock_finalize()\n\n def _configure_playback(self):\n self._renderer._playback_initialize(\n func=self._play,\n timeout=self.refresh_rate_ms,\n value=self._data['time_idx'],\n rng=[0, len(self._data['time']) - 1],\n time_widget=self.widgets[\"time\"],\n play_widget=self.widgets[\"play\"],\n )\n\n def _configure_mplcanvas(self):\n # Get the fractional components for the brain and mpl\n self.mpl_canvas = self._renderer._window_get_mplcanvas(\n brain=self,\n interactor_fraction=self.interactor_fraction,\n show_traces=self.show_traces,\n separate_canvas=self.separate_canvas\n )\n xlim = [np.min(self._data['time']),\n np.max(self._data['time'])]\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=UserWarning)\n self.mpl_canvas.axes.set(xlim=xlim)\n if not self.separate_canvas:\n self._renderer._window_adjust_mplcanvas_layout()\n self.mpl_canvas.set_color(\n bg_color=self._bg_color,\n fg_color=self._fg_color,\n )\n\n def _configure_vertex_time_course(self):\n if not self.show_traces:\n return\n if self.mpl_canvas is None:\n self._configure_mplcanvas()\n else:\n self.clear_glyphs()\n\n # plot RMS of the activation\n y = np.concatenate(list(v[0] for v in self.act_data_smooth.values()\n if v[0] is not None))\n rms = np.linalg.norm(y, axis=0) / np.sqrt(len(y))\n del y\n\n self.rms, = self.mpl_canvas.axes.plot(\n self._data['time'], rms,\n lw=3, label='RMS', zorder=3, color=self._fg_color,\n alpha=0.5, ls=':')\n\n # now plot the time line\n self.plot_time_line()\n\n # then the picked points\n for idx, hemi in enumerate(['lh', 'rh', 'vol']):\n act_data = self.act_data_smooth.get(hemi, [None])[0]\n if act_data is None:\n continue\n hemi_data = self._data[hemi]\n vertices = hemi_data['vertices']\n\n # simulate a picked renderer\n if self._hemi in ('both', 'rh') or hemi == 'vol':\n idx = 0\n self.picked_renderer = self._renderer._all_renderers[idx]\n\n # initialize the default point\n if self._data['initial_time'] is not None:\n # pick at that time\n use_data = act_data[\n :, [np.round(self._data['time_idx']).astype(int)]]\n else:\n use_data = act_data\n ind = np.unravel_index(np.argmax(np.abs(use_data), axis=None),\n use_data.shape)\n if hemi == 'vol':\n mesh = hemi_data['grid']\n else:\n mesh = self._layered_meshes[hemi]._polydata\n vertex_id = vertices[ind[0]]\n self._add_vertex_glyph(hemi, mesh, vertex_id)\n\n def _configure_picking(self):\n # get data for each hemi\n from scipy import sparse\n for idx, hemi in enumerate(['vol', 'lh', 'rh']):\n hemi_data = self._data.get(hemi)\n if hemi_data is not None:\n act_data = hemi_data['array']\n if act_data.ndim == 3:\n act_data = np.linalg.norm(act_data, axis=1)\n smooth_mat = hemi_data.get('smooth_mat')\n vertices = hemi_data['vertices']\n if hemi == 'vol':\n assert smooth_mat is None\n smooth_mat = sparse.csr_matrix(\n (np.ones(len(vertices)),\n (vertices, np.arange(len(vertices)))))\n self.act_data_smooth[hemi] = (act_data, smooth_mat)\n\n self._renderer._update_picking_callback(\n self._on_mouse_move,\n self._on_button_press,\n self._on_button_release,\n self._on_pick\n )\n\n def _configure_tool_bar(self):\n self._renderer._tool_bar_load_icons()\n self._renderer._tool_bar_set_theme(self.theme)\n self._renderer._tool_bar_initialize(name=\"Toolbar\")\n self._renderer._tool_bar_add_file_button(\n name=\"screenshot\",\n desc=\"Take a screenshot\",\n func=self.save_image,\n )\n self._renderer._tool_bar_add_file_button(\n name=\"movie\",\n desc=\"Save movie...\",\n func=self.save_movie,\n shortcut=\"ctrl+shift+s\",\n )\n self._renderer._tool_bar_add_button(\n name=\"visibility\",\n desc=\"Toggle Controls\",\n func=self.toggle_interface,\n icon_name=\"visibility_on\"\n )\n self.widgets[\"play\"] = self._renderer._tool_bar_add_play_button(\n name=\"play\",\n desc=\"Play/Pause\",\n func=self.toggle_playback,\n shortcut=\" \",\n )\n self._renderer._tool_bar_add_button(\n name=\"reset\",\n desc=\"Reset\",\n func=self.reset,\n )\n self._renderer._tool_bar_add_button(\n name=\"scale\",\n desc=\"Auto-Scale\",\n func=self.apply_auto_scaling,\n )\n self._renderer._tool_bar_add_button(\n name=\"clear\",\n desc=\"Clear traces\",\n func=self.clear_glyphs,\n )\n self._renderer._tool_bar_add_spacer()\n self._renderer._tool_bar_add_button(\n name=\"help\",\n desc=\"Help\",\n func=self.help,\n shortcut=\"?\",\n )\n\n def _shift_time(self, op):\n self.callbacks[\"time\"](\n value=(op(self._current_time, self.playback_speed)),\n time_as_index=False,\n update_widget=True,\n )\n\n def _rotate_azimuth(self, value):\n azimuth = (self._renderer.figure._azimuth + value) % 360\n self._renderer.set_camera(azimuth=azimuth, reset_camera=False)\n\n def _rotate_elevation(self, value):\n elevation = np.clip(\n self._renderer.figure._elevation + value,\n self._elevation_rng[0],\n self._elevation_rng[1],\n )\n self._renderer.set_camera(elevation=elevation, reset_camera=False)\n\n def _configure_shortcuts(self):\n # First, we remove the default bindings:\n self._clear_callbacks()\n # Then, we add our own:\n self.plotter.add_key_event(\"i\", self.toggle_interface)\n self.plotter.add_key_event(\"s\", self.apply_auto_scaling)\n self.plotter.add_key_event(\"r\", self.restore_user_scaling)\n self.plotter.add_key_event(\"c\", self.clear_glyphs)\n self.plotter.add_key_event(\"n\", partial(self._shift_time,\n op=lambda x, y: x + y))\n self.plotter.add_key_event(\"b\", partial(self._shift_time,\n op=lambda x, y: x - y))\n for key, func, sign in ((\"Left\", self._rotate_azimuth, 1),\n (\"Right\", self._rotate_azimuth, -1),\n (\"Up\", self._rotate_elevation, 1),\n (\"Down\", self._rotate_elevation, -1)):\n self.plotter.add_key_event(key, partial(func, sign * _ARROW_MOVE))\n\n def _configure_menu(self):\n self._renderer._menu_initialize()\n self._renderer._menu_add_submenu(\n name=\"help\",\n desc=\"Help\",\n )\n self._renderer._menu_add_button(\n menu_name=\"help\",\n name=\"help\",\n desc=\"Show MNE key bindings\\t?\",\n func=self.help,\n )\n\n def _configure_status_bar(self):\n self._renderer._status_bar_initialize()\n self.status_msg = self._renderer._status_bar_add_label(\n self.default_status_bar_msg, stretch=1)\n self.status_progress = self._renderer._status_bar_add_progress_bar()\n if self.status_progress is not None:\n self.status_progress.hide()\n\n def _on_mouse_move(self, vtk_picker, event):\n if self._mouse_no_mvt:\n self._mouse_no_mvt -= 1\n\n def _on_button_press(self, vtk_picker, event):\n self._mouse_no_mvt = 2\n\n def _on_button_release(self, vtk_picker, event):\n if self._mouse_no_mvt > 0:\n x, y = vtk_picker.GetEventPosition()\n # programmatically detect the picked renderer\n try:\n # pyvista<0.30.0\n self.picked_renderer = \\\n self.plotter.iren.FindPokedRenderer(x, y)\n except AttributeError:\n # pyvista>=0.30.0\n self.picked_renderer = \\\n self.plotter.iren.interactor.FindPokedRenderer(x, y)\n # trigger the pick\n self.plotter.picker.Pick(x, y, 0, self.picked_renderer)\n self._mouse_no_mvt = 0\n\n def _on_pick(self, vtk_picker, event):\n if not self.show_traces:\n return\n\n # vtk_picker is a vtkCellPicker\n cell_id = vtk_picker.GetCellId()\n mesh = vtk_picker.GetDataSet()\n\n if mesh is None or cell_id == -1 or not self._mouse_no_mvt:\n return # don't pick\n\n # 1) Check to see if there are any spheres along the ray\n if len(self._spheres):\n collection = vtk_picker.GetProp3Ds()\n found_sphere = None\n for ii in range(collection.GetNumberOfItems()):\n actor = collection.GetItemAsObject(ii)\n for sphere in self._spheres:\n if any(a is actor for a in sphere._actors):\n found_sphere = sphere\n break\n if found_sphere is not None:\n break\n if found_sphere is not None:\n assert found_sphere._is_glyph\n mesh = found_sphere\n\n # 2) Remove sphere if it's what we have\n if hasattr(mesh, \"_is_glyph\"):\n self._remove_vertex_glyph(mesh)\n return\n\n # 3) Otherwise, pick the objects in the scene\n try:\n hemi = mesh._hemi\n except AttributeError: # volume\n hemi = 'vol'\n else:\n assert hemi in ('lh', 'rh')\n if self.act_data_smooth[hemi][0] is None: # no data to add for hemi\n return\n pos = np.array(vtk_picker.GetPickPosition())\n if hemi == 'vol':\n # VTK will give us the point closest to the viewer in the vol.\n # We want to pick the point with the maximum value along the\n # camera-to-click array, which fortunately we can get \"just\"\n # by inspecting the points that are sufficiently close to the\n # ray.\n grid = mesh = self._data[hemi]['grid']\n vertices = self._data[hemi]['vertices']\n coords = self._data[hemi]['grid_coords'][vertices]\n scalars = grid.cell_arrays['values'][vertices]\n spacing = np.array(grid.GetSpacing())\n max_dist = np.linalg.norm(spacing) / 2.\n origin = vtk_picker.GetRenderer().GetActiveCamera().GetPosition()\n ori = pos - origin\n ori /= np.linalg.norm(ori)\n # the magic formula: distance from a ray to a given point\n dists = np.linalg.norm(np.cross(ori, coords - pos), axis=1)\n assert dists.shape == (len(coords),)\n mask = dists <= max_dist\n idx = np.where(mask)[0]\n if len(idx) == 0:\n return # weird point on edge of volume?\n # useful for debugging the ray by mapping it into the volume:\n # dists = dists - dists.min()\n # dists = (1. - dists / dists.max()) * self._cmap_range[1]\n # grid.cell_arrays['values'][vertices] = dists * mask\n idx = idx[np.argmax(np.abs(scalars[idx]))]\n vertex_id = vertices[idx]\n # Naive way: convert pos directly to idx; i.e., apply mri_src_t\n # shape = self._data[hemi]['grid_shape']\n # taking into account the cell vs point difference (spacing/2)\n # shift = np.array(grid.GetOrigin()) + spacing / 2.\n # ijk = np.round((pos - shift) / spacing).astype(int)\n # vertex_id = np.ravel_multi_index(ijk, shape, order='F')\n else:\n vtk_cell = mesh.GetCell(cell_id)\n cell = [vtk_cell.GetPointId(point_id) for point_id\n in range(vtk_cell.GetNumberOfPoints())]\n vertices = mesh.points[cell]\n idx = np.argmin(abs(vertices - pos), axis=0)\n vertex_id = cell[idx[0]]\n\n if self.traces_mode == 'label':\n self._add_label_glyph(hemi, mesh, vertex_id)\n else:\n self._add_vertex_glyph(hemi, mesh, vertex_id)\n\n def _add_label_glyph(self, hemi, mesh, vertex_id):\n if hemi == 'vol':\n return\n label_id = self._vertex_to_label_id[hemi][vertex_id]\n label = self._annotation_labels[hemi][label_id]\n\n # remove the patch if already picked\n if label_id in self.picked_patches[hemi]:\n self._remove_label_glyph(hemi, label_id)\n return\n\n if hemi == label.hemi:\n self.add_label(label, borders=True, reset_camera=False)\n self.picked_patches[hemi].append(label_id)\n\n def _remove_label_glyph(self, hemi, label_id):\n label = self._annotation_labels[hemi][label_id]\n label._line.remove()\n self.color_cycle.restore(label._color)\n self.mpl_canvas.update_plot()\n self._layered_meshes[hemi].remove_overlay(label.name)\n self.picked_patches[hemi].remove(label_id)\n\n def _add_vertex_glyph(self, hemi, mesh, vertex_id):\n if vertex_id in self.picked_points[hemi]:\n return\n\n # skip if the wrong hemi is selected\n if self.act_data_smooth[hemi][0] is None:\n return\n color = next(self.color_cycle)\n line = self.plot_time_course(hemi, vertex_id, color)\n if hemi == 'vol':\n ijk = np.unravel_index(\n vertex_id, np.array(mesh.GetDimensions()) - 1, order='F')\n # should just be GetCentroid(center), but apparently it's VTK9+:\n # center = np.empty(3)\n # voxel.GetCentroid(center)\n voxel = mesh.GetCell(*ijk)\n pts = voxel.GetPoints()\n n_pts = pts.GetNumberOfPoints()\n center = np.empty((n_pts, 3))\n for ii in range(pts.GetNumberOfPoints()):\n pts.GetPoint(ii, center[ii])\n center = np.mean(center, axis=0)\n else:\n center = mesh.GetPoints().GetPoint(vertex_id)\n del mesh\n\n # from the picked renderer to the subplot coords\n try:\n lst = self._renderer._all_renderers._renderers\n except AttributeError:\n lst = self._renderer._all_renderers\n rindex = lst.index(self.picked_renderer)\n row, col = self._renderer._index_to_loc(rindex)\n\n actors = list()\n spheres = list()\n for ri, ci, _ in self._iter_views(hemi):\n self.plotter.subplot(ri, ci)\n # Using _sphere() instead of renderer.sphere() for 2 reasons:\n # 1) renderer.sphere() fails on Windows in a scenario where a lot\n # of picking requests are done in a short span of time (could be\n # mitigated with synchronization/delay?)\n # 2) the glyph filter is used in renderer.sphere() but only one\n # sphere is required in this function.\n actor, sphere = self._renderer._sphere(\n center=np.array(center),\n color=color,\n radius=4.0,\n )\n actors.append(actor)\n spheres.append(sphere)\n\n # add metadata for picking\n for sphere in spheres:\n sphere._is_glyph = True\n sphere._hemi = hemi\n sphere._line = line\n sphere._actors = actors\n sphere._color = color\n sphere._vertex_id = vertex_id\n\n self.picked_points[hemi].append(vertex_id)\n self._spheres.extend(spheres)\n self.pick_table[vertex_id] = spheres\n return sphere\n\n def _remove_vertex_glyph(self, mesh, render=True):\n vertex_id = mesh._vertex_id\n if vertex_id not in self.pick_table:\n return\n\n hemi = mesh._hemi\n color = mesh._color\n spheres = self.pick_table[vertex_id]\n spheres[0]._line.remove()\n self.mpl_canvas.update_plot()\n self.picked_points[hemi].remove(vertex_id)\n\n with warnings.catch_warnings(record=True):\n # We intentionally ignore these in case we have traversed the\n # entire color cycle\n warnings.simplefilter('ignore')\n self.color_cycle.restore(color)\n for sphere in spheres:\n # remove all actors\n self.plotter.remove_actor(sphere._actors, render=render)\n sphere._actors = None\n self._spheres.pop(self._spheres.index(sphere))\n self.pick_table.pop(vertex_id)\n\n def clear_glyphs(self):\n \"\"\"Clear the picking glyphs.\"\"\"\n if not self.time_viewer:\n return\n for sphere in list(self._spheres): # will remove itself, so copy\n self._remove_vertex_glyph(sphere, render=False)\n assert sum(len(v) for v in self.picked_points.values()) == 0\n assert len(self.pick_table) == 0\n assert len(self._spheres) == 0\n for hemi in self._hemis:\n for label_id in list(self.picked_patches[hemi]):\n self._remove_label_glyph(hemi, label_id)\n assert sum(len(v) for v in self.picked_patches.values()) == 0\n if self.rms is not None:\n self.rms.remove()\n self.rms = None\n self._renderer._update()\n\n def plot_time_course(self, hemi, vertex_id, color):\n \"\"\"Plot the vertex time course.\n\n Parameters\n ----------\n hemi : str\n The hemisphere id of the vertex.\n vertex_id : int\n The vertex identifier in the mesh.\n color : matplotlib color\n The color of the time course.\n\n Returns\n -------\n line : matplotlib object\n The time line object.\n \"\"\"\n if self.mpl_canvas is None:\n return\n time = self._data['time'].copy() # avoid circular ref\n mni = None\n if hemi == 'vol':\n hemi_str = 'V'\n xfm = read_talxfm(\n self._subject_id, self._subjects_dir)\n if self._units == 'mm':\n xfm['trans'][:3, 3] *= 1000.\n ijk = np.unravel_index(\n vertex_id, self._data[hemi]['grid_shape'], order='F')\n src_mri_t = self._data[hemi]['grid_src_mri_t']\n mni = apply_trans(np.dot(xfm['trans'], src_mri_t), ijk)\n else:\n hemi_str = 'L' if hemi == 'lh' else 'R'\n try:\n mni = vertex_to_mni(\n vertices=vertex_id,\n hemis=0 if hemi == 'lh' else 1,\n subject=self._subject_id,\n subjects_dir=self._subjects_dir\n )\n except Exception:\n mni = None\n if mni is not None:\n mni = ' MNI: ' + ', '.join('%5.1f' % m for m in mni)\n else:\n mni = ''\n label = \"{}:{}{}\".format(hemi_str, str(vertex_id).ljust(6), mni)\n act_data, smooth = self.act_data_smooth[hemi]\n if smooth is not None:\n act_data = smooth[vertex_id].dot(act_data)[0]\n else:\n act_data = act_data[vertex_id].copy()\n line = self.mpl_canvas.plot(\n time,\n act_data,\n label=label,\n lw=1.,\n color=color,\n zorder=4,\n )\n return line\n\n def plot_time_line(self):\n \"\"\"Add the time line to the MPL widget.\"\"\"\n if self.mpl_canvas is None:\n return\n if isinstance(self.show_traces, bool) and self.show_traces:\n # add time information\n current_time = self._current_time\n if not hasattr(self, \"time_line\"):\n self.time_line = self.mpl_canvas.plot_time_line(\n x=current_time,\n label='time',\n color=self._fg_color,\n lw=1,\n )\n self.time_line.set_xdata(current_time)\n self.mpl_canvas.update_plot()\n\n def _configure_help(self):\n pairs = [\n ('?', 'Display help window'),\n ('i', 'Toggle interface'),\n ('s', 'Apply auto-scaling'),\n ('r', 'Restore original clim'),\n ('c', 'Clear all traces'),\n ('n', 'Shift the time forward by the playback speed'),\n ('b', 'Shift the time backward by the playback speed'),\n ('Space', 'Start/Pause playback'),\n ('Up', 'Decrease camera elevation angle'),\n ('Down', 'Increase camera elevation angle'),\n ('Left', 'Decrease camera azimuth angle'),\n ('Right', 'Increase camera azimuth angle'),\n ]\n text1, text2 = zip(*pairs)\n text1 = '\\n'.join(text1)\n text2 = '\\n'.join(text2)\n self.help_canvas = self._renderer._window_get_simple_canvas(\n width=5, height=2, dpi=80)\n _show_help_fig(\n col1=text1,\n col2=text2,\n fig_help=self.help_canvas.fig,\n ax=self.help_canvas.axes,\n show=False,\n )\n\n def help(self):\n \"\"\"Display the help window.\"\"\"\n self.help_canvas.show()\n\n def _clear_callbacks(self):\n if not hasattr(self, 'callbacks'):\n return\n for callback in self.callbacks.values():\n if callback is not None:\n for key in ('plotter', 'brain', 'callback',\n 'widget', 'widgets'):\n setattr(callback, key, None)\n self.callbacks.clear()\n # Remove the default key binding\n if getattr(self, \"iren\", None) is not None:\n self.plotter.iren.clear_key_event_callbacks()\n\n def _clear_widgets(self):\n if not hasattr(self, 'widgets'):\n return\n for widget in self.widgets.values():\n if widget is not None:\n for key in ('triggered', 'valueChanged'):\n setattr(widget, key, None)\n self.widgets.clear()\n\n @property\n def interaction(self):\n \"\"\"The interaction style.\"\"\"\n return self._interaction\n\n @interaction.setter\n def interaction(self, interaction):\n \"\"\"Set the interaction style.\"\"\"\n _validate_type(interaction, str, 'interaction')\n _check_option('interaction', interaction, ('trackball', 'terrain'))\n for ri, ci, _ in self._iter_views('vol'): # will traverse all\n self._renderer.subplot(ri, ci)\n self._renderer.set_interaction(interaction)\n\n def _cortex_colormap(self, cortex):\n \"\"\"Return the colormap corresponding to the cortex.\"\"\"\n colormap_map = dict(classic=dict(colormap=\"Greys\",\n vmin=-1, vmax=2),\n high_contrast=dict(colormap=\"Greys\",\n vmin=-.1, vmax=1.3),\n low_contrast=dict(colormap=\"Greys\",\n vmin=-5, vmax=5),\n bone=dict(colormap=\"bone_r\",\n vmin=-.2, vmax=2),\n )\n return colormap_map[cortex]\n\n @verbose\n def add_data(self, array, fmin=None, fmid=None, fmax=None,\n thresh=None, center=None, transparent=False, colormap=\"auto\",\n alpha=1, vertices=None, smoothing_steps=None, time=None,\n time_label=\"auto\", colorbar=True,\n hemi=None, remove_existing=None, time_label_size=None,\n initial_time=None, scale_factor=None, vector_alpha=None,\n clim=None, src=None, volume_options=0.4, colorbar_kwargs=None,\n verbose=None):\n \"\"\"Display data from a numpy array on the surface or volume.\n\n This provides a similar interface to\n :meth:`surfer.Brain.add_overlay`, but it displays\n it with a single colormap. It offers more flexibility over the\n colormap, and provides a way to display four-dimensional data\n (i.e., a timecourse) or five-dimensional data (i.e., a\n vector-valued timecourse).\n\n .. note:: ``fmin`` sets the low end of the colormap, and is separate\n from thresh (this is a different convention from\n :meth:`surfer.Brain.add_overlay`).\n\n Parameters\n ----------\n array : numpy array, shape (n_vertices[, 3][, n_times])\n Data array. For the data to be understood as vector-valued\n (3 values per vertex corresponding to X/Y/Z surface RAS),\n then ``array`` must be have all 3 dimensions.\n If vectors with no time dimension are desired, consider using a\n singleton (e.g., ``np.newaxis``) to create a \"time\" dimension\n and pass ``time_label=None`` (vector values are not supported).\n %(fmin_fmid_fmax)s\n %(thresh)s\n %(center)s\n %(transparent)s\n colormap : str, list of color, or array\n Name of matplotlib colormap to use, a list of matplotlib colors,\n or a custom look up table (an n x 4 array coded with RBGA values\n between 0 and 255), the default \"auto\" chooses a default divergent\n colormap, if \"center\" is given (currently \"icefire\"), otherwise a\n default sequential colormap (currently \"rocket\").\n alpha : float in [0, 1]\n Alpha level to control opacity of the overlay.\n vertices : numpy array\n Vertices for which the data is defined (needed if\n ``len(data) < nvtx``).\n smoothing_steps : int or None\n Number of smoothing steps (smoothing is used if len(data) < nvtx)\n The value 'nearest' can be used too. None (default) will use as\n many as necessary to fill the surface.\n time : numpy array\n Time points in the data array (if data is 2D or 3D).\n %(time_label)s\n colorbar : bool\n Whether to add a colorbar to the figure. Can also be a tuple\n to give the (row, col) index of where to put the colorbar.\n hemi : str | None\n If None, it is assumed to belong to the hemisphere being\n shown. If two hemispheres are being shown, an error will\n be thrown.\n remove_existing : bool\n Not supported yet.\n Remove surface added by previous \"add_data\" call. Useful for\n conserving memory when displaying different data in a loop.\n time_label_size : int\n Font size of the time label (default 14).\n initial_time : float | None\n Time initially shown in the plot. ``None`` to use the first time\n sample (default).\n scale_factor : float | None (default)\n The scale factor to use when displaying glyphs for vector-valued\n data.\n vector_alpha : float | None\n Alpha level to control opacity of the arrows. Only used for\n vector-valued data. If None (default), ``alpha`` is used.\n clim : dict\n Original clim arguments.\n %(src_volume_options)s\n colorbar_kwargs : dict | None\n Options to pass to :meth:`pyvista.BasePlotter.add_scalar_bar`\n (e.g., ``dict(title_font_size=10)``).\n %(verbose)s\n\n Notes\n -----\n If the data is defined for a subset of vertices (specified\n by the \"vertices\" parameter), a smoothing method is used to interpolate\n the data onto the high resolution surface. If the data is defined for\n subsampled version of the surface, smoothing_steps can be set to None,\n in which case only as many smoothing steps are applied until the whole\n surface is filled with non-zeros.\n\n Due to a Mayavi (or VTK) alpha rendering bug, ``vector_alpha`` is\n clamped to be strictly < 1.\n \"\"\"\n _validate_type(transparent, bool, 'transparent')\n _validate_type(vector_alpha, ('numeric', None), 'vector_alpha')\n _validate_type(scale_factor, ('numeric', None), 'scale_factor')\n\n # those parameters are not supported yet, only None is allowed\n _check_option('thresh', thresh, [None])\n _check_option('remove_existing', remove_existing, [None])\n _validate_type(time_label_size, (None, 'numeric'), 'time_label_size')\n if time_label_size is not None:\n time_label_size = float(time_label_size)\n if time_label_size < 0:\n raise ValueError('time_label_size must be positive, got '\n f'{time_label_size}')\n\n hemi = self._check_hemi(hemi, extras=['vol'])\n stc, array, vertices = self._check_stc(hemi, array, vertices)\n array = np.asarray(array)\n vector_alpha = alpha if vector_alpha is None else vector_alpha\n self._data['vector_alpha'] = vector_alpha\n self._data['scale_factor'] = scale_factor\n\n # Create time array and add label if > 1D\n if array.ndim <= 1:\n time_idx = 0\n else:\n # check time array\n if time is None:\n time = np.arange(array.shape[-1])\n else:\n time = np.asarray(time)\n if time.shape != (array.shape[-1],):\n raise ValueError('time has shape %s, but need shape %s '\n '(array.shape[-1])' %\n (time.shape, (array.shape[-1],)))\n self._data[\"time\"] = time\n\n if self._n_times is None:\n self._times = time\n elif len(time) != self._n_times:\n raise ValueError(\"New n_times is different from previous \"\n \"n_times\")\n elif not np.array_equal(time, self._times):\n raise ValueError(\"Not all time values are consistent with \"\n \"previously set times.\")\n\n # initial time\n if initial_time is None:\n time_idx = 0\n else:\n time_idx = self._to_time_index(initial_time)\n\n # time label\n time_label, _ = _handle_time(time_label, 's', time)\n y_txt = 0.05 + 0.1 * bool(colorbar)\n\n if array.ndim == 3:\n if array.shape[1] != 3:\n raise ValueError('If array has 3 dimensions, array.shape[1] '\n 'must equal 3, got %s' % (array.shape[1],))\n fmin, fmid, fmax = _update_limits(\n fmin, fmid, fmax, center, array\n )\n if colormap == 'auto':\n colormap = 'mne' if center is not None else 'hot'\n\n if smoothing_steps is None:\n smoothing_steps = 7\n elif smoothing_steps == 'nearest':\n smoothing_steps = 0\n elif isinstance(smoothing_steps, int):\n if smoothing_steps < 0:\n raise ValueError('Expected value of `smoothing_steps` is'\n ' positive but {} was given.'.format(\n smoothing_steps))\n else:\n raise TypeError('Expected type of `smoothing_steps` is int or'\n ' NoneType but {} was given.'.format(\n type(smoothing_steps)))\n\n self._data['stc'] = stc\n self._data['src'] = src\n self._data['smoothing_steps'] = smoothing_steps\n self._data['clim'] = clim\n self._data['time'] = time\n self._data['initial_time'] = initial_time\n self._data['time_label'] = time_label\n self._data['initial_time_idx'] = time_idx\n self._data['time_idx'] = time_idx\n self._data['transparent'] = transparent\n # data specific for a hemi\n self._data[hemi] = dict()\n self._data[hemi]['glyph_dataset'] = None\n self._data[hemi]['glyph_mapper'] = None\n self._data[hemi]['glyph_actor'] = None\n self._data[hemi]['array'] = array\n self._data[hemi]['vertices'] = vertices\n self._data['alpha'] = alpha\n self._data['colormap'] = colormap\n self._data['center'] = center\n self._data['fmin'] = fmin\n self._data['fmid'] = fmid\n self._data['fmax'] = fmax\n self.update_lut()\n\n # 1) add the surfaces first\n actor = None\n for ri, ci, _ in self._iter_views(hemi):\n self._renderer.subplot(ri, ci)\n if hemi in ('lh', 'rh'):\n actor = self._layered_meshes[hemi]._actor\n else:\n src_vol = src[2:] if src.kind == 'mixed' else src\n actor, _ = self._add_volume_data(hemi, src_vol, volume_options)\n assert actor is not None # should have added one\n\n # 2) update time and smoothing properties\n # set_data_smoothing calls \"set_time_point\" for us, which will set\n # _current_time\n self.set_time_interpolation(self.time_interpolation)\n self.set_data_smoothing(self._data['smoothing_steps'])\n\n # 3) add the other actors\n if colorbar is True:\n # botto left by default\n colorbar = (self._subplot_shape[0] - 1, 0)\n for ri, ci, v in self._iter_views(hemi):\n self._renderer.subplot(ri, ci)\n # Add the time label to the bottommost view\n do = (ri, ci) == colorbar\n if not self._time_label_added and time_label is not None and do:\n time_actor = self._renderer.text2d(\n x_window=0.95, y_window=y_txt,\n color=self._fg_color,\n size=time_label_size,\n text=time_label(self._current_time),\n justification='right'\n )\n self._data['time_actor'] = time_actor\n self._time_label_added = True\n if colorbar and self._scalar_bar is None and do:\n kwargs = dict(source=actor, n_labels=8, color=self._fg_color,\n bgcolor=self._brain_color[:3])\n kwargs.update(colorbar_kwargs or {})\n self._scalar_bar = self._renderer.scalarbar(**kwargs)\n self._renderer.set_camera(**views_dicts[hemi][v])\n\n # 4) update the scalar bar and opacity\n self.update_lut(alpha=alpha)\n\n def _iter_views(self, hemi):\n # which rows and columns each type of visual needs to be added to\n if self._hemi == 'split':\n hemi_dict = dict(lh=[0], rh=[1], vol=[0, 1])\n else:\n hemi_dict = dict(lh=[0], rh=[0], vol=[0])\n for vi, view in enumerate(self._views):\n if self._hemi == 'split':\n view_dict = dict(lh=[vi], rh=[vi], vol=[vi, vi])\n else:\n view_dict = dict(lh=[vi], rh=[vi], vol=[vi])\n if self._view_layout == 'vertical':\n rows = view_dict # views are rows\n cols = hemi_dict # hemis are columns\n else:\n rows = hemi_dict # hemis are rows\n cols = view_dict # views are columns\n for ri, ci in zip(rows[hemi], cols[hemi]):\n yield ri, ci, view\n\n def remove_labels(self):\n \"\"\"Remove all the ROI labels from the image.\"\"\"\n for hemi in self._hemis:\n mesh = self._layered_meshes[hemi]\n for label in self._labels[hemi]:\n mesh.remove_overlay(label.name)\n self._labels[hemi].clear()\n self._renderer._update()\n\n def remove_annotations(self):\n \"\"\"Remove all annotations from the image.\"\"\"\n for hemi in self._hemis:\n mesh = self._layered_meshes[hemi]\n mesh.remove_overlay(self._annots[hemi])\n self._annots[hemi].clear()\n self._renderer._update()\n\n def _add_volume_data(self, hemi, src, volume_options):\n _validate_type(src, SourceSpaces, 'src')\n _check_option('src.kind', src.kind, ('volume',))\n _validate_type(\n volume_options, (dict, 'numeric', None), 'volume_options')\n assert hemi == 'vol'\n if not isinstance(volume_options, dict):\n volume_options = dict(\n resolution=float(volume_options) if volume_options is not None\n else None)\n volume_options = _handle_default('volume_options', volume_options)\n allowed_types = (\n ['resolution', (None, 'numeric')],\n ['blending', (str,)],\n ['alpha', ('numeric', None)],\n ['surface_alpha', (None, 'numeric')],\n ['silhouette_alpha', (None, 'numeric')],\n ['silhouette_linewidth', ('numeric',)],\n )\n for key, types in allowed_types:\n _validate_type(volume_options[key], types,\n f'volume_options[{repr(key)}]')\n extra_keys = set(volume_options) - set(a[0] for a in allowed_types)\n if len(extra_keys):\n raise ValueError(\n f'volume_options got unknown keys {sorted(extra_keys)}')\n blending = _check_option('volume_options[\"blending\"]',\n volume_options['blending'],\n ('composite', 'mip'))\n alpha = volume_options['alpha']\n if alpha is None:\n alpha = 0.4 if self._data[hemi]['array'].ndim == 3 else 1.\n alpha = np.clip(float(alpha), 0., 1.)\n resolution = volume_options['resolution']\n surface_alpha = volume_options['surface_alpha']\n if surface_alpha is None:\n surface_alpha = min(alpha / 2., 0.1)\n silhouette_alpha = volume_options['silhouette_alpha']\n if silhouette_alpha is None:\n silhouette_alpha = surface_alpha / 4.\n silhouette_linewidth = volume_options['silhouette_linewidth']\n del volume_options\n volume_pos = self._data[hemi].get('grid_volume_pos')\n volume_neg = self._data[hemi].get('grid_volume_neg')\n center = self._data['center']\n if volume_pos is None:\n xyz = np.meshgrid(\n *[np.arange(s) for s in src[0]['shape']], indexing='ij')\n dimensions = np.array(src[0]['shape'], int)\n mult = 1000 if self._units == 'mm' else 1\n src_mri_t = src[0]['src_mri_t']['trans'].copy()\n src_mri_t[:3] *= mult\n if resolution is not None:\n resolution = resolution * mult / 1000. # to mm\n del src, mult\n coords = np.array([c.ravel(order='F') for c in xyz]).T\n coords = apply_trans(src_mri_t, coords)\n self.geo[hemi] = Bunch(coords=coords)\n vertices = self._data[hemi]['vertices']\n assert self._data[hemi]['array'].shape[0] == len(vertices)\n # MNE constructs the source space on a uniform grid in MRI space,\n # but mne coreg can change it to be non-uniform, so we need to\n # use all three elements here\n assert np.allclose(\n src_mri_t[:3, :3], np.diag(np.diag(src_mri_t)[:3]))\n spacing = np.diag(src_mri_t)[:3]\n origin = src_mri_t[:3, 3] - spacing / 2.\n scalars = np.zeros(np.prod(dimensions))\n scalars[vertices] = 1. # for the outer mesh\n grid, grid_mesh, volume_pos, volume_neg = \\\n self._renderer._volume(dimensions, origin, spacing, scalars,\n surface_alpha, resolution, blending,\n center)\n self._data[hemi]['alpha'] = alpha # incorrectly set earlier\n self._data[hemi]['grid'] = grid\n self._data[hemi]['grid_mesh'] = grid_mesh\n self._data[hemi]['grid_coords'] = coords\n self._data[hemi]['grid_src_mri_t'] = src_mri_t\n self._data[hemi]['grid_shape'] = dimensions\n self._data[hemi]['grid_volume_pos'] = volume_pos\n self._data[hemi]['grid_volume_neg'] = volume_neg\n actor_pos, _ = self._renderer.plotter.add_actor(\n volume_pos, reset_camera=False, name=None, culling=False)\n if volume_neg is not None:\n actor_neg, _ = self._renderer.plotter.add_actor(\n volume_neg, reset_camera=False, name=None, culling=False)\n else:\n actor_neg = None\n grid_mesh = self._data[hemi]['grid_mesh']\n if grid_mesh is not None:\n _, prop = self._renderer.plotter.add_actor(\n grid_mesh, reset_camera=False, name=None, culling=False,\n pickable=False)\n prop.SetColor(*self._brain_color[:3])\n prop.SetOpacity(surface_alpha)\n if silhouette_alpha > 0 and silhouette_linewidth > 0:\n for ri, ci, v in self._iter_views('vol'):\n self._renderer.subplot(ri, ci)\n self._renderer._silhouette(\n mesh=grid_mesh.GetInput(),\n color=self._brain_color[:3],\n line_width=silhouette_linewidth,\n alpha=silhouette_alpha,\n )\n\n return actor_pos, actor_neg\n\n def add_label(self, label, color=None, alpha=1, scalar_thresh=None,\n borders=False, hemi=None, subdir=None,\n reset_camera=True):\n \"\"\"Add an ROI label to the image.\n\n Parameters\n ----------\n label : str | instance of Label\n Label filepath or name. Can also be an instance of\n an object with attributes \"hemi\", \"vertices\", \"name\", and\n optionally \"color\" and \"values\" (if scalar_thresh is not None).\n color : matplotlib-style color | None\n Anything matplotlib accepts: string, RGB, hex, etc. (default\n \"crimson\").\n alpha : float in [0, 1]\n Alpha level to control opacity.\n scalar_thresh : None | float\n Threshold the label ids using this value in the label\n file's scalar field (i.e. label only vertices with\n scalar >= thresh).\n borders : bool | int\n Show only label borders. If int, specify the number of steps\n (away from the true border) along the cortical mesh to include\n as part of the border definition.\n hemi : str | None\n If None, it is assumed to belong to the hemipshere being\n shown.\n subdir : None | str\n If a label is specified as name, subdir can be used to indicate\n that the label file is in a sub-directory of the subject's\n label directory rather than in the label directory itself (e.g.\n for ``$SUBJECTS_DIR/$SUBJECT/label/aparc/lh.cuneus.label``\n ``brain.add_label('cuneus', subdir='aparc')``).\n reset_camera : bool\n If True, reset the camera view after adding the label. Defaults\n to True.\n\n Notes\n -----\n To remove previously added labels, run Brain.remove_labels().\n \"\"\"\n from matplotlib.colors import colorConverter\n from ...label import read_label\n if isinstance(label, str):\n if color is None:\n color = \"crimson\"\n\n if os.path.isfile(label):\n filepath = label\n label = read_label(filepath)\n hemi = label.hemi\n label_name = os.path.basename(filepath).split('.')[1]\n else:\n hemi = self._check_hemi(hemi)\n label_name = label\n label_fname = \".\".join([hemi, label_name, 'label'])\n if subdir is None:\n filepath = op.join(self._subjects_dir, self._subject_id,\n 'label', label_fname)\n else:\n filepath = op.join(self._subjects_dir, self._subject_id,\n 'label', subdir, label_fname)\n if not os.path.exists(filepath):\n raise ValueError('Label file %s does not exist'\n % filepath)\n label = read_label(filepath)\n ids = label.vertices\n scalars = label.values\n else:\n # try to extract parameters from label instance\n try:\n hemi = label.hemi\n ids = label.vertices\n if label.name is None:\n label.name = 'unnamed' + str(self._unnamed_label_id)\n self._unnamed_label_id += 1\n label_name = str(label.name)\n\n if color is None:\n if hasattr(label, 'color') and label.color is not None:\n color = label.color\n else:\n color = \"crimson\"\n\n if scalar_thresh is not None:\n scalars = label.values\n except Exception:\n raise ValueError('Label was not a filename (str), and could '\n 'not be understood as a class. The class '\n 'must have attributes \"hemi\", \"vertices\", '\n '\"name\", and (if scalar_thresh is not None)'\n '\"values\"')\n hemi = self._check_hemi(hemi)\n\n if scalar_thresh is not None:\n ids = ids[scalars >= scalar_thresh]\n\n scalars = np.zeros(self.geo[hemi].coords.shape[0])\n scalars[ids] = 1\n\n if self.time_viewer and self.show_traces \\\n and self.traces_mode == 'label':\n stc = self._data[\"stc\"]\n src = self._data[\"src\"]\n tc = stc.extract_label_time_course(label, src=src,\n mode=self.label_extract_mode)\n tc = tc[0] if tc.ndim == 2 else tc[0, 0, :]\n color = next(self.color_cycle)\n line = self.mpl_canvas.plot(\n self._data['time'], tc, label=label_name,\n color=color)\n else:\n line = None\n\n orig_color = color\n color = colorConverter.to_rgba(color, alpha)\n cmap = np.array([(0, 0, 0, 0,), color])\n ctable = np.round(cmap * 255).astype(np.uint8)\n\n for ri, ci, v in self._iter_views(hemi):\n self._renderer.subplot(ri, ci)\n if borders:\n n_vertices = scalars.size\n edges = mesh_edges(self.geo[hemi].faces)\n edges = edges.tocoo()\n border_edges = scalars[edges.row] != scalars[edges.col]\n show = np.zeros(n_vertices, dtype=np.int64)\n keep_idx = np.unique(edges.row[border_edges])\n if isinstance(borders, int):\n for _ in range(borders):\n keep_idx = np.in1d(\n self.geo[hemi].faces.ravel(), keep_idx)\n keep_idx.shape = self.geo[hemi].faces.shape\n keep_idx = self.geo[hemi].faces[np.any(\n keep_idx, axis=1)]\n keep_idx = np.unique(keep_idx)\n show[keep_idx] = 1\n scalars *= show\n\n mesh = self._layered_meshes[hemi]\n mesh.add_overlay(\n scalars=scalars,\n colormap=ctable,\n rng=[np.min(scalars), np.max(scalars)],\n opacity=alpha,\n name=label_name,\n )\n if reset_camera:\n self._renderer.set_camera(**views_dicts[hemi][v])\n if self.time_viewer and self.show_traces \\\n and self.traces_mode == 'label':\n label._color = orig_color\n label._line = line\n self._labels[hemi].append(label)\n self._renderer._update()\n\n def add_foci(self, coords, coords_as_verts=False, map_surface=None,\n scale_factor=1, color=\"white\", alpha=1, name=None,\n hemi=None, resolution=50):\n \"\"\"Add spherical foci, possibly mapping to displayed surf.\n\n The foci spheres can be displayed at the coordinates given, or\n mapped through a surface geometry. In other words, coordinates\n from a volume-based analysis in MNI space can be displayed on an\n inflated average surface by finding the closest vertex on the\n white surface and mapping to that vertex on the inflated mesh.\n\n Parameters\n ----------\n coords : ndarray, shape (n_coords, 3)\n Coordinates in stereotaxic space (default) or array of\n vertex ids (with ``coord_as_verts=True``).\n coords_as_verts : bool\n Whether the coords parameter should be interpreted as vertex ids.\n map_surface : None\n Surface to map coordinates through, or None to use raw coords.\n scale_factor : float\n Controls the size of the foci spheres (relative to 1cm).\n color : matplotlib color code\n HTML name, RBG tuple, or hex code.\n alpha : float in [0, 1]\n Opacity of focus gylphs.\n name : str\n Internal name to use.\n hemi : str | None\n If None, it is assumed to belong to the hemipshere being\n shown. If two hemispheres are being shown, an error will\n be thrown.\n resolution : int\n The resolution of the spheres.\n \"\"\"\n from matplotlib.colors import colorConverter\n hemi = self._check_hemi(hemi, extras=['vol'])\n\n # those parameters are not supported yet, only None is allowed\n _check_option('map_surface', map_surface, [None])\n\n # Figure out how to interpret the first parameter\n if coords_as_verts:\n coords = self.geo[hemi].coords[coords]\n\n # Convert the color code\n if not isinstance(color, tuple):\n color = colorConverter.to_rgb(color)\n\n if self._units == 'm':\n scale_factor = scale_factor / 1000.\n for ri, ci, v in self._iter_views(hemi):\n self._renderer.subplot(ri, ci)\n self._renderer.sphere(center=coords, color=color,\n scale=(10. * scale_factor),\n opacity=alpha, resolution=resolution)\n self._renderer.set_camera(**views_dicts[hemi][v])\n\n def add_text(self, x, y, text, name=None, color=None, opacity=1.0,\n row=-1, col=-1, font_size=None, justification=None):\n \"\"\"Add a text to the visualization.\n\n Parameters\n ----------\n x : float\n X coordinate.\n y : float\n Y coordinate.\n text : str\n Text to add.\n name : str\n Name of the text (text label can be updated using update_text()).\n color : tuple\n Color of the text. Default is the foreground color set during\n initialization (default is black or white depending on the\n background color).\n opacity : float\n Opacity of the text (default 1.0).\n row : int\n Row index of which brain to use.\n col : int\n Column index of which brain to use.\n font_size : float | None\n The font size to use.\n justification : str | None\n The text justification.\n \"\"\"\n # XXX: support `name` should be added when update_text/remove_text\n # are implemented\n # _check_option('name', name, [None])\n\n self._renderer.text2d(x_window=x, y_window=y, text=text, color=color,\n size=font_size, justification=justification)\n\n def _configure_label_time_course(self):\n from ...label import read_labels_from_annot\n if not self.show_traces:\n return\n if self.mpl_canvas is None:\n self._configure_mplcanvas()\n else:\n self.clear_glyphs()\n self.traces_mode = 'label'\n self.add_annotation(self.annot, color=\"w\", alpha=0.75)\n\n # now plot the time line\n self.plot_time_line()\n self.mpl_canvas.update_plot()\n\n for hemi in self._hemis:\n labels = read_labels_from_annot(\n subject=self._subject_id,\n parc=self.annot,\n hemi=hemi,\n subjects_dir=self._subjects_dir\n )\n self._vertex_to_label_id[hemi] = np.full(\n self.geo[hemi].coords.shape[0], -1)\n self._annotation_labels[hemi] = labels\n for idx, label in enumerate(labels):\n self._vertex_to_label_id[hemi][label.vertices] = idx\n\n def add_annotation(self, annot, borders=True, alpha=1, hemi=None,\n remove_existing=True, color=None, **kwargs):\n \"\"\"Add an annotation file.\n\n Parameters\n ----------\n annot : str | tuple\n Either path to annotation file or annotation name. Alternatively,\n the annotation can be specified as a ``(labels, ctab)`` tuple per\n hemisphere, i.e. ``annot=(labels, ctab)`` for a single hemisphere\n or ``annot=((lh_labels, lh_ctab), (rh_labels, rh_ctab))`` for both\n hemispheres. ``labels`` and ``ctab`` should be arrays as returned\n by :func:`nibabel.freesurfer.io.read_annot`.\n borders : bool | int\n Show only label borders. If int, specify the number of steps\n (away from the true border) along the cortical mesh to include\n as part of the border definition.\n alpha : float in [0, 1]\n Alpha level to control opacity.\n hemi : str | None\n If None, it is assumed to belong to the hemipshere being\n shown. If two hemispheres are being shown, data must exist\n for both hemispheres.\n remove_existing : bool\n If True (default), remove old annotations.\n color : matplotlib-style color code\n If used, show all annotations in the same (specified) color.\n Probably useful only when showing annotation borders.\n **kwargs : dict\n These are passed to the underlying\n ``mayavi.mlab.pipeline.surface`` call.\n \"\"\"\n from ...label import _read_annot\n hemis = self._check_hemis(hemi)\n\n # Figure out where the data is coming from\n if isinstance(annot, str):\n if os.path.isfile(annot):\n filepath = annot\n path = os.path.split(filepath)[0]\n file_hemi, annot = os.path.basename(filepath).split('.')[:2]\n if len(hemis) > 1:\n if annot[:2] == 'lh.':\n filepaths = [filepath, op.join(path, 'rh' + annot[2:])]\n elif annot[:2] == 'rh.':\n filepaths = [op.join(path, 'lh' + annot[2:], filepath)]\n else:\n raise RuntimeError('To add both hemispheres '\n 'simultaneously, filename must '\n 'begin with \"lh.\" or \"rh.\"')\n else:\n filepaths = [filepath]\n else:\n filepaths = []\n for hemi in hemis:\n filepath = op.join(self._subjects_dir,\n self._subject_id,\n 'label',\n \".\".join([hemi, annot, 'annot']))\n if not os.path.exists(filepath):\n raise ValueError('Annotation file %s does not exist'\n % filepath)\n filepaths += [filepath]\n annots = []\n for hemi, filepath in zip(hemis, filepaths):\n # Read in the data\n labels, cmap, _ = _read_annot(filepath)\n annots.append((labels, cmap))\n else:\n annots = [annot] if len(hemis) == 1 else annot\n annot = 'annotation'\n\n for hemi, (labels, cmap) in zip(hemis, annots):\n # Maybe zero-out the non-border vertices\n self._to_borders(labels, hemi, borders)\n\n # Handle null labels properly\n cmap[:, 3] = 255\n bgcolor = np.round(np.array(self._brain_color) * 255).astype(int)\n bgcolor[-1] = 0\n cmap[cmap[:, 4] < 0, 4] += 2 ** 24 # wrap to positive\n cmap[cmap[:, 4] <= 0, :4] = bgcolor\n if np.any(labels == 0) and not np.any(cmap[:, -1] <= 0):\n cmap = np.vstack((cmap, np.concatenate([bgcolor, [0]])))\n\n # Set label ids sensibly\n order = np.argsort(cmap[:, -1])\n cmap = cmap[order]\n ids = np.searchsorted(cmap[:, -1], labels)\n cmap = cmap[:, :4]\n\n # Set the alpha level\n alpha_vec = cmap[:, 3]\n alpha_vec[alpha_vec > 0] = alpha * 255\n\n # Override the cmap when a single color is used\n if color is not None:\n from matplotlib.colors import colorConverter\n rgb = np.round(np.multiply(colorConverter.to_rgb(color), 255))\n cmap[:, :3] = rgb.astype(cmap.dtype)\n\n ctable = cmap.astype(np.float64)\n for ri, ci, _ in self._iter_views(hemi):\n self._renderer.subplot(ri, ci)\n mesh = self._layered_meshes[hemi]\n mesh.add_overlay(\n scalars=ids,\n colormap=ctable,\n rng=[np.min(ids), np.max(ids)],\n opacity=alpha,\n name=annot,\n )\n self._annots[hemi].append(annot)\n if not self.time_viewer or self.traces_mode == 'vertex':\n self._renderer._set_colormap_range(\n mesh._actor, cmap.astype(np.uint8), None)\n\n self._renderer._update()\n\n def close(self):\n \"\"\"Close all figures and cleanup data structure.\"\"\"\n self._closed = True\n self._renderer.close()\n\n def show(self):\n \"\"\"Display the window.\"\"\"\n self._renderer.show()\n\n def show_view(self, view=None, roll=None, distance=None, row=0, col=0,\n hemi=None, align=True):\n \"\"\"Orient camera to display view.\n\n Parameters\n ----------\n view : str | dict\n String view, or a dict with azimuth and elevation.\n roll : float | None\n The roll.\n distance : float | None\n The distance.\n row : int\n The row to set.\n col : int\n The column to set.\n hemi : str\n Which hemi to use for string lookup (when in \"both\" mode).\n align : bool\n If True, consider view arguments relative to canonical MRI\n directions (closest to MNI for the subject) rather than native MRI\n space. This helps when MRIs are not in standard orientation (e.g.,\n have large rotations).\n \"\"\"\n hemi = self._hemi if hemi is None else hemi\n if hemi == 'split':\n if (self._view_layout == 'vertical' and col == 1 or\n self._view_layout == 'horizontal' and row == 1):\n hemi = 'rh'\n else:\n hemi = 'lh'\n if isinstance(view, str):\n view = views_dicts[hemi].get(view)\n view = view.copy()\n if roll is not None:\n view.update(roll=roll)\n if distance is not None:\n view.update(distance=distance)\n self._renderer.subplot(row, col)\n xfm = self._rigid if align else None\n self._renderer.set_camera(**view, reset_camera=False, rigid=xfm)\n self._renderer._update()\n\n def reset_view(self):\n \"\"\"Reset the camera.\"\"\"\n for h in self._hemis:\n for ri, ci, v in self._iter_views(h):\n self._renderer.subplot(ri, ci)\n self._renderer.set_camera(**views_dicts[h][v],\n reset_camera=False)\n\n def save_image(self, filename=None, mode='rgb'):\n \"\"\"Save view from all panels to disk.\n\n Parameters\n ----------\n filename : str\n Path to new image file.\n mode : str\n Either 'rgb' or 'rgba' for values to return.\n \"\"\"\n if filename is None:\n filename = _generate_default_filename(\".png\")\n _save_ndarray_img(\n filename, self.screenshot(mode=mode, time_viewer=True))\n\n @fill_doc\n def screenshot(self, mode='rgb', time_viewer=False):\n \"\"\"Generate a screenshot of current view.\n\n Parameters\n ----------\n mode : str\n Either 'rgb' or 'rgba' for values to return.\n %(brain_screenshot_time_viewer)s\n\n Returns\n -------\n screenshot : array\n Image pixel values.\n \"\"\"\n img = self._renderer.screenshot(mode)\n logger.debug(f'Got screenshot of size {img.shape}')\n if time_viewer and self.time_viewer and \\\n self.show_traces and \\\n not self.separate_canvas:\n from matplotlib.image import imread\n canvas = self.mpl_canvas.fig.canvas\n canvas.draw_idle()\n fig = self.mpl_canvas.fig\n with BytesIO() as output:\n # Need to pass dpi here so it uses the physical (HiDPI) DPI\n # rather than logical DPI when saving in most cases.\n # But when matplotlib uses HiDPI and VTK doesn't\n # (e.g., macOS w/Qt 5.14+ and VTK9) then things won't work,\n # so let's just calculate the DPI we need to get\n # the correct size output based on the widths being equal\n size_in = fig.get_size_inches()\n dpi = fig.get_dpi()\n want_size = tuple(x * dpi for x in size_in)\n n_pix = want_size[0] * want_size[1]\n logger.debug(\n f'Saving figure of size {size_in} @ {dpi} DPI '\n f'({want_size} = {n_pix} pixels)')\n # Sometimes there can be off-by-one errors here (e.g.,\n # if in mpl int() rather than int(round()) is used to\n # compute the number of pixels) so rather than use \"raw\"\n # format and try to reshape ourselves, just write to PNG\n # and read it, which has the dimensions encoded for us.\n fig.savefig(output, dpi=dpi, format='png',\n facecolor=self._bg_color, edgecolor='none')\n output.seek(0)\n trace_img = imread(output, format='png')[:, :, :3]\n trace_img = np.clip(\n np.round(trace_img * 255), 0, 255).astype(np.uint8)\n bgcolor = np.array(self._brain_color[:3]) / 255\n img = concatenate_images([img, trace_img], bgcolor=bgcolor)\n return img\n\n @contextlib.contextmanager\n def _no_lut_update(self, why):\n orig = self._lut_locked\n self._lut_locked = why\n try:\n yield\n finally:\n self._lut_locked = orig\n\n @fill_doc\n def update_lut(self, fmin=None, fmid=None, fmax=None, alpha=None):\n \"\"\"Update color map.\n\n Parameters\n ----------\n %(fmin_fmid_fmax)s\n alpha : float | None\n Alpha to use in the update.\n \"\"\"\n args = f'{fmin}, {fmid}, {fmax}, {alpha}'\n if self._lut_locked is not None:\n logger.debug(f'LUT update postponed with {args}')\n return\n logger.debug(f'Updating LUT with {args}')\n center = self._data['center']\n colormap = self._data['colormap']\n transparent = self._data['transparent']\n lims = {key: self._data[key] for key in ('fmin', 'fmid', 'fmax')}\n _update_monotonic(lims, fmin=fmin, fmid=fmid, fmax=fmax)\n assert all(val is not None for val in lims.values())\n\n self._data.update(lims)\n self._data['ctable'] = np.round(\n calculate_lut(colormap, alpha=1., center=center,\n transparent=transparent, **lims) *\n 255).astype(np.uint8)\n # update our values\n rng = self._cmap_range\n ctable = self._data['ctable']\n for hemi in ['lh', 'rh', 'vol']:\n hemi_data = self._data.get(hemi)\n if hemi_data is not None:\n if hemi in self._layered_meshes:\n mesh = self._layered_meshes[hemi]\n mesh.update_overlay(name='data',\n colormap=self._data['ctable'],\n opacity=alpha,\n rng=rng)\n self._renderer._set_colormap_range(\n mesh._actor, ctable, self._scalar_bar, rng,\n self._brain_color)\n\n grid_volume_pos = hemi_data.get('grid_volume_pos')\n grid_volume_neg = hemi_data.get('grid_volume_neg')\n for grid_volume in (grid_volume_pos, grid_volume_neg):\n if grid_volume is not None:\n self._renderer._set_volume_range(\n grid_volume, ctable, hemi_data['alpha'],\n self._scalar_bar, rng)\n\n glyph_actor = hemi_data.get('glyph_actor')\n if glyph_actor is not None:\n for glyph_actor_ in glyph_actor:\n self._renderer._set_colormap_range(\n glyph_actor_, ctable, self._scalar_bar, rng)\n if self.time_viewer:\n with self._no_lut_update(f'update_lut {args}'):\n for key in ('fmin', 'fmid', 'fmax'):\n self.callbacks[key](lims[key])\n self._renderer._update()\n\n def set_data_smoothing(self, n_steps):\n \"\"\"Set the number of smoothing steps.\n\n Parameters\n ----------\n n_steps : int\n Number of smoothing steps.\n \"\"\"\n from scipy import sparse\n from ...morph import _hemi_morph\n for hemi in ['lh', 'rh']:\n hemi_data = self._data.get(hemi)\n if hemi_data is not None:\n if len(hemi_data['array']) >= self.geo[hemi].x.shape[0]:\n continue\n vertices = hemi_data['vertices']\n if vertices is None:\n raise ValueError(\n 'len(data) < nvtx (%s < %s): the vertices '\n 'parameter must not be None'\n % (len(hemi_data), self.geo[hemi].x.shape[0]))\n morph_n_steps = 'nearest' if n_steps == 0 else n_steps\n maps = sparse.eye(len(self.geo[hemi].coords), format='csr')\n with use_log_level(False):\n smooth_mat = _hemi_morph(\n self.geo[hemi].orig_faces,\n np.arange(len(self.geo[hemi].coords)),\n vertices, morph_n_steps, maps, warn=False)\n self._data[hemi]['smooth_mat'] = smooth_mat\n self.set_time_point(self._data['time_idx'])\n self._data['smoothing_steps'] = n_steps\n\n @property\n def _n_times(self):\n return len(self._times) if self._times is not None else None\n\n @property\n def time_interpolation(self):\n \"\"\"The interpolation mode.\"\"\"\n return self._time_interpolation\n\n @fill_doc\n def set_time_interpolation(self, interpolation):\n \"\"\"Set the interpolation mode.\n\n Parameters\n ----------\n %(brain_time_interpolation)s\n \"\"\"\n self._time_interpolation = _check_option(\n 'interpolation',\n interpolation,\n ('linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic')\n )\n self._time_interp_funcs = dict()\n self._time_interp_inv = None\n if self._times is not None:\n idx = np.arange(self._n_times)\n for hemi in ['lh', 'rh', 'vol']:\n hemi_data = self._data.get(hemi)\n if hemi_data is not None:\n array = hemi_data['array']\n self._time_interp_funcs[hemi] = _safe_interp1d(\n idx, array, self._time_interpolation, axis=-1,\n assume_sorted=True)\n self._time_interp_inv = _safe_interp1d(idx, self._times)\n\n def set_time_point(self, time_idx):\n \"\"\"Set the time point shown (can be a float to interpolate).\n\n Parameters\n ----------\n time_idx : int | float\n The time index to use. Can be a float to use interpolation\n between indices.\n \"\"\"\n self._current_act_data = dict()\n time_actor = self._data.get('time_actor', None)\n time_label = self._data.get('time_label', None)\n for hemi in ['lh', 'rh', 'vol']:\n hemi_data = self._data.get(hemi)\n if hemi_data is not None:\n array = hemi_data['array']\n # interpolate in time\n vectors = None\n if array.ndim == 1:\n act_data = array\n self._current_time = 0\n else:\n act_data = self._time_interp_funcs[hemi](time_idx)\n self._current_time = self._time_interp_inv(time_idx)\n if array.ndim == 3:\n vectors = act_data\n act_data = np.linalg.norm(act_data, axis=1)\n self._current_time = self._time_interp_inv(time_idx)\n self._current_act_data[hemi] = act_data\n if time_actor is not None and time_label is not None:\n time_actor.SetInput(time_label(self._current_time))\n\n # update the volume interpolation\n grid = hemi_data.get('grid')\n if grid is not None:\n vertices = self._data['vol']['vertices']\n values = self._current_act_data['vol']\n rng = self._cmap_range\n fill = 0 if self._data['center'] is not None else rng[0]\n grid.cell_arrays['values'].fill(fill)\n # XXX for sided data, we probably actually need two\n # volumes as composite/MIP needs to look at two\n # extremes... for now just use abs. Eventually we can add\n # two volumes if we want.\n grid.cell_arrays['values'][vertices] = values\n\n # interpolate in space\n smooth_mat = hemi_data.get('smooth_mat')\n if smooth_mat is not None:\n act_data = smooth_mat.dot(act_data)\n\n # update the mesh scalar values\n if hemi in self._layered_meshes:\n mesh = self._layered_meshes[hemi]\n if 'data' in mesh._overlays:\n mesh.update_overlay(name='data', scalars=act_data)\n else:\n mesh.add_overlay(\n scalars=act_data,\n colormap=self._data['ctable'],\n rng=self._cmap_range,\n opacity=None,\n name='data',\n )\n\n # update the glyphs\n if vectors is not None:\n self._update_glyphs(hemi, vectors)\n\n self._data['time_idx'] = time_idx\n self._renderer._update()\n\n def set_time(self, time):\n \"\"\"Set the time to display (in seconds).\n\n Parameters\n ----------\n time : float\n The time to show, in seconds.\n \"\"\"\n if self._times is None:\n raise ValueError(\n 'Cannot set time when brain has no defined times.')\n elif min(self._times) <= time <= max(self._times):\n self.set_time_point(np.interp(float(time), self._times,\n np.arange(self._n_times)))\n else:\n raise ValueError(\n f'Requested time ({time} s) is outside the range of '\n f'available times ({min(self._times)}-{max(self._times)} s).')\n\n def _update_glyphs(self, hemi, vectors):\n hemi_data = self._data.get(hemi)\n assert hemi_data is not None\n vertices = hemi_data['vertices']\n vector_alpha = self._data['vector_alpha']\n scale_factor = self._data['scale_factor']\n vertices = slice(None) if vertices is None else vertices\n x, y, z = np.array(self.geo[hemi].coords)[vertices].T\n\n if hemi_data['glyph_actor'] is None:\n add = True\n hemi_data['glyph_actor'] = list()\n else:\n add = False\n count = 0\n for ri, ci, _ in self._iter_views(hemi):\n self._renderer.subplot(ri, ci)\n if hemi_data['glyph_dataset'] is None:\n glyph_mapper, glyph_dataset = self._renderer.quiver3d(\n x, y, z,\n vectors[:, 0], vectors[:, 1], vectors[:, 2],\n color=None,\n mode='2darrow',\n scale_mode='vector',\n scale=scale_factor,\n opacity=vector_alpha,\n name=str(hemi) + \"_glyph\"\n )\n hemi_data['glyph_dataset'] = glyph_dataset\n hemi_data['glyph_mapper'] = glyph_mapper\n else:\n glyph_dataset = hemi_data['glyph_dataset']\n glyph_dataset.point_arrays['vec'] = vectors\n glyph_mapper = hemi_data['glyph_mapper']\n if add:\n glyph_actor = self._renderer._actor(glyph_mapper)\n prop = glyph_actor.GetProperty()\n prop.SetLineWidth(2.)\n prop.SetOpacity(vector_alpha)\n self._renderer.plotter.add_actor(glyph_actor)\n hemi_data['glyph_actor'].append(glyph_actor)\n else:\n glyph_actor = hemi_data['glyph_actor'][count]\n count += 1\n self._renderer._set_colormap_range(\n actor=glyph_actor,\n ctable=self._data['ctable'],\n scalar_bar=None,\n rng=self._cmap_range,\n )\n\n @property\n def _cmap_range(self):\n dt_max = self._data['fmax']\n if self._data['center'] is None:\n dt_min = self._data['fmin']\n else:\n dt_min = -1 * dt_max\n rng = [dt_min, dt_max]\n return rng\n\n def _update_fscale(self, fscale):\n \"\"\"Scale the colorbar points.\"\"\"\n fmin = self._data['fmin'] * fscale\n fmid = self._data['fmid'] * fscale\n fmax = self._data['fmax'] * fscale\n self.update_lut(fmin=fmin, fmid=fmid, fmax=fmax)\n\n def _update_auto_scaling(self, restore=False):\n user_clim = self._data['clim']\n if user_clim is not None and 'lims' in user_clim:\n allow_pos_lims = False\n else:\n allow_pos_lims = True\n if user_clim is not None and restore:\n clim = user_clim\n else:\n clim = 'auto'\n colormap = self._data['colormap']\n transparent = self._data['transparent']\n mapdata = _process_clim(\n clim, colormap, transparent,\n np.concatenate(list(self._current_act_data.values())),\n allow_pos_lims)\n diverging = 'pos_lims' in mapdata['clim']\n colormap = mapdata['colormap']\n scale_pts = mapdata['clim']['pos_lims' if diverging else 'lims']\n transparent = mapdata['transparent']\n del mapdata\n fmin, fmid, fmax = scale_pts\n center = 0. if diverging else None\n self._data['center'] = center\n self._data['colormap'] = colormap\n self._data['transparent'] = transparent\n self.update_lut(fmin=fmin, fmid=fmid, fmax=fmax)\n\n def _to_time_index(self, value):\n \"\"\"Return the interpolated time index of the given time value.\"\"\"\n time = self._data['time']\n value = np.interp(value, time, np.arange(len(time)))\n return value\n\n @property\n def data(self):\n \"\"\"Data used by time viewer and color bar widgets.\"\"\"\n return self._data\n\n @property\n def labels(self):\n return self._labels\n\n @property\n def views(self):\n return self._views\n\n @property\n def hemis(self):\n return self._hemis\n\n def _save_movie(self, filename, time_dilation=4., tmin=None, tmax=None,\n framerate=24, interpolation=None, codec=None,\n bitrate=None, callback=None, time_viewer=False, **kwargs):\n import imageio\n with self._renderer._disabled_interaction():\n images = self._make_movie_frames(\n time_dilation, tmin, tmax, framerate, interpolation, callback,\n time_viewer)\n # find imageio FFMPEG parameters\n if 'fps' not in kwargs:\n kwargs['fps'] = framerate\n if codec is not None:\n kwargs['codec'] = codec\n if bitrate is not None:\n kwargs['bitrate'] = bitrate\n imageio.mimwrite(filename, images, **kwargs)\n\n def _save_movie_tv(self, filename, time_dilation=4., tmin=None, tmax=None,\n framerate=24, interpolation=None, codec=None,\n bitrate=None, callback=None, time_viewer=False,\n **kwargs):\n def frame_callback(frame, n_frames):\n if frame == n_frames:\n # On the ImageIO step\n self.status_msg.set_value(\n \"Saving with ImageIO: %s\"\n % filename\n )\n self.status_msg.show()\n self.status_progress.hide()\n self._renderer._status_bar_update()\n else:\n self.status_msg.set_value(\n \"Rendering images (frame %d / %d) ...\"\n % (frame + 1, n_frames)\n )\n self.status_msg.show()\n self.status_progress.show()\n self.status_progress.set_range([0, n_frames - 1])\n self.status_progress.set_value(frame)\n self.status_progress.update()\n self.status_msg.update()\n self._renderer._status_bar_update()\n\n # set cursor to busy\n default_cursor = self._renderer._window_get_cursor()\n self._renderer._window_set_cursor(\n self._renderer._window_new_cursor(\"WaitCursor\"))\n\n try:\n self._save_movie(\n filename=filename,\n time_dilation=(1. / self.playback_speed),\n callback=frame_callback,\n **kwargs\n )\n except (Exception, KeyboardInterrupt):\n warn('Movie saving aborted:\\n' + traceback.format_exc())\n finally:\n self._renderer._window_set_cursor(default_cursor)\n\n @fill_doc\n def save_movie(self, filename=None, time_dilation=4., tmin=None, tmax=None,\n framerate=24, interpolation=None, codec=None,\n bitrate=None, callback=None, time_viewer=False, **kwargs):\n \"\"\"Save a movie (for data with a time axis).\n\n The movie is created through the :mod:`imageio` module. The format is\n determined by the extension, and additional options can be specified\n through keyword arguments that depend on the format. For available\n formats and corresponding parameters see the imageio documentation:\n http://imageio.readthedocs.io/en/latest/formats.html#multiple-images\n\n .. Warning::\n This method assumes that time is specified in seconds when adding\n data. If time is specified in milliseconds this will result in\n movies 1000 times longer than expected.\n\n Parameters\n ----------\n filename : str\n Path at which to save the movie. The extension determines the\n format (e.g., ``'*.mov'``, ``'*.gif'``, ...; see the :mod:`imageio`\n documentation for available formats).\n time_dilation : float\n Factor by which to stretch time (default 4). For example, an epoch\n from -100 to 600 ms lasts 700 ms. With ``time_dilation=4`` this\n would result in a 2.8 s long movie.\n tmin : float\n First time point to include (default: all data).\n tmax : float\n Last time point to include (default: all data).\n framerate : float\n Framerate of the movie (frames per second, default 24).\n %(brain_time_interpolation)s\n If None, it uses the current ``brain.interpolation``,\n which defaults to ``'nearest'``. Defaults to None.\n codec : str | None\n The codec to use.\n bitrate : float | None\n The bitrate to use.\n callback : callable | None\n A function to call on each iteration. Useful for status message\n updates. It will be passed keyword arguments ``frame`` and\n ``n_frames``.\n %(brain_screenshot_time_viewer)s\n **kwargs : dict\n Specify additional options for :mod:`imageio`.\n\n Returns\n -------\n dialog : object\n The opened dialog is returned for testing purpose only.\n \"\"\"\n if filename is None:\n filename = _generate_default_filename(\".mp4\")\n func = self._save_movie_tv if self.time_viewer else self._save_movie\n func(filename, time_dilation, tmin, tmax,\n framerate, interpolation, codec,\n bitrate, callback, time_viewer, **kwargs)\n\n def _make_movie_frames(self, time_dilation, tmin, tmax, framerate,\n interpolation, callback, time_viewer):\n from math import floor\n\n # find tmin\n if tmin is None:\n tmin = self._times[0]\n elif tmin < self._times[0]:\n raise ValueError(\"tmin=%r is smaller than the first time point \"\n \"(%r)\" % (tmin, self._times[0]))\n\n # find indexes at which to create frames\n if tmax is None:\n tmax = self._times[-1]\n elif tmax > self._times[-1]:\n raise ValueError(\"tmax=%r is greater than the latest time point \"\n \"(%r)\" % (tmax, self._times[-1]))\n n_frames = floor((tmax - tmin) * time_dilation * framerate)\n times = np.arange(n_frames, dtype=float)\n times /= framerate * time_dilation\n times += tmin\n time_idx = np.interp(times, self._times, np.arange(self._n_times))\n\n n_times = len(time_idx)\n if n_times == 0:\n raise ValueError(\"No time points selected\")\n\n logger.debug(\"Save movie for time points/samples\\n%s\\n%s\"\n % (times, time_idx))\n # Sometimes the first screenshot is rendered with a different\n # resolution on OS X\n self.screenshot(time_viewer=time_viewer)\n old_mode = self.time_interpolation\n if interpolation is not None:\n self.set_time_interpolation(interpolation)\n try:\n images = [\n self.screenshot(time_viewer=time_viewer)\n for _ in self._iter_time(time_idx, callback)]\n finally:\n self.set_time_interpolation(old_mode)\n if callback is not None:\n callback(frame=len(time_idx), n_frames=len(time_idx))\n return images\n\n def _iter_time(self, time_idx, callback):\n \"\"\"Iterate through time points, then reset to current time.\n\n Parameters\n ----------\n time_idx : array_like\n Time point indexes through which to iterate.\n callback : callable | None\n Callback to call before yielding each frame.\n\n Yields\n ------\n idx : int | float\n Current index.\n\n Notes\n -----\n Used by movie and image sequence saving functions.\n \"\"\"\n if self.time_viewer:\n func = partial(self.callbacks[\"time\"],\n update_widget=True)\n else:\n func = self.set_time_point\n current_time_idx = self._data[\"time_idx\"]\n for ii, idx in enumerate(time_idx):\n func(idx)\n if callback is not None:\n callback(frame=ii, n_frames=len(time_idx))\n yield idx\n\n # Restore original time index\n func(current_time_idx)\n\n def _check_stc(self, hemi, array, vertices):\n from ...source_estimate import (\n _BaseSourceEstimate, _BaseSurfaceSourceEstimate,\n _BaseMixedSourceEstimate, _BaseVolSourceEstimate\n )\n if isinstance(array, _BaseSourceEstimate):\n stc = array\n stc_surf = stc_vol = None\n if isinstance(stc, _BaseSurfaceSourceEstimate):\n stc_surf = stc\n elif isinstance(stc, _BaseMixedSourceEstimate):\n stc_surf = stc.surface() if hemi != 'vol' else None\n stc_vol = stc.volume() if hemi == 'vol' else None\n elif isinstance(stc, _BaseVolSourceEstimate):\n stc_vol = stc if hemi == 'vol' else None\n else:\n raise TypeError(\"stc not supported\")\n\n if stc_surf is None and stc_vol is None:\n raise ValueError(\"No data to be added\")\n if stc_surf is not None:\n array = getattr(stc_surf, hemi + '_data')\n vertices = stc_surf.vertices[0 if hemi == 'lh' else 1]\n if stc_vol is not None:\n array = stc_vol.data\n vertices = np.concatenate(stc_vol.vertices)\n else:\n stc = None\n return stc, array, vertices\n\n def _check_hemi(self, hemi, extras=()):\n \"\"\"Check for safe single-hemi input, returns str.\"\"\"\n if hemi is None:\n if self._hemi not in ['lh', 'rh']:\n raise ValueError('hemi must not be None when both '\n 'hemispheres are displayed')\n else:\n hemi = self._hemi\n elif hemi not in ['lh', 'rh'] + list(extras):\n extra = ' or None' if self._hemi in ['lh', 'rh'] else ''\n raise ValueError('hemi must be either \"lh\" or \"rh\"' +\n extra + \", got \" + str(hemi))\n return hemi\n\n def _check_hemis(self, hemi):\n \"\"\"Check for safe dual or single-hemi input, returns list.\"\"\"\n if hemi is None:\n if self._hemi not in ['lh', 'rh']:\n hemi = ['lh', 'rh']\n else:\n hemi = [self._hemi]\n elif hemi not in ['lh', 'rh']:\n extra = ' or None' if self._hemi in ['lh', 'rh'] else ''\n raise ValueError('hemi must be either \"lh\" or \"rh\"' + extra)\n else:\n hemi = [hemi]\n return hemi\n\n def _to_borders(self, label, hemi, borders, restrict_idx=None):\n \"\"\"Convert a label/parc to borders.\"\"\"\n if not isinstance(borders, (bool, int)) or borders < 0:\n raise ValueError('borders must be a bool or positive integer')\n if borders:\n n_vertices = label.size\n edges = mesh_edges(self.geo[hemi].orig_faces)\n edges = edges.tocoo()\n border_edges = label[edges.row] != label[edges.col]\n show = np.zeros(n_vertices, dtype=np.int64)\n keep_idx = np.unique(edges.row[border_edges])\n if isinstance(borders, int):\n for _ in range(borders):\n keep_idx = np.in1d(\n self.geo[hemi].orig_faces.ravel(), keep_idx)\n keep_idx.shape = self.geo[hemi].orig_faces.shape\n keep_idx = self.geo[hemi].orig_faces[\n np.any(keep_idx, axis=1)]\n keep_idx = np.unique(keep_idx)\n if restrict_idx is not None:\n keep_idx = keep_idx[np.in1d(keep_idx, restrict_idx)]\n show[keep_idx] = 1\n label *= show\n\n def enable_depth_peeling(self):\n \"\"\"Enable depth peeling.\"\"\"\n self._renderer.enable_depth_peeling()\n\n def get_picked_points(self):\n \"\"\"Return the vertices of the picked points.\n\n Returns\n -------\n points : list of int | None\n The vertices picked by the time viewer.\n \"\"\"\n if hasattr(self, \"time_viewer\"):\n return self.picked_points\n\n def __hash__(self):\n \"\"\"Hash the object.\"\"\"\n raise NotImplementedError\n\n\ndef _safe_interp1d(x, y, kind='linear', axis=-1, assume_sorted=False):\n \"\"\"Work around interp1d not liking singleton dimensions.\"\"\"\n from scipy.interpolate import interp1d\n if y.shape[axis] == 1:\n def func(x):\n return np.take(y, np.zeros(np.asarray(x).shape, int), axis=axis)\n return func\n else:\n return interp1d(x, y, kind, axis=axis, assume_sorted=assume_sorted)\n\n\ndef _update_limits(fmin, fmid, fmax, center, array):\n if center is None:\n if fmin is None:\n fmin = array.min() if array.size > 0 else 0\n if fmax is None:\n fmax = array.max() if array.size > 0 else 1\n else:\n if fmin is None:\n fmin = 0\n if fmax is None:\n fmax = np.abs(center - array).max() if array.size > 0 else 1\n if fmid is None:\n fmid = (fmin + fmax) / 2.\n\n if fmin >= fmid:\n raise RuntimeError('min must be < mid, got %0.4g >= %0.4g'\n % (fmin, fmid))\n if fmid >= fmax:\n raise RuntimeError('mid must be < max, got %0.4g >= %0.4g'\n % (fmid, fmax))\n\n return fmin, fmid, fmax\n\n\ndef _update_monotonic(lims, fmin, fmid, fmax):\n if fmin is not None:\n lims['fmin'] = fmin\n if lims['fmax'] < fmin:\n logger.debug(f' Bumping fmax = {lims[\"fmax\"]} to {fmin}')\n lims['fmax'] = fmin\n if lims['fmid'] < fmin:\n logger.debug(f' Bumping fmid = {lims[\"fmid\"]} to {fmin}')\n lims['fmid'] = fmin\n assert lims['fmin'] <= lims['fmid'] <= lims['fmax']\n if fmid is not None:\n lims['fmid'] = fmid\n if lims['fmin'] > fmid:\n logger.debug(f' Bumping fmin = {lims[\"fmin\"]} to {fmid}')\n lims['fmin'] = fmid\n if lims['fmax'] < fmid:\n logger.debug(f' Bumping fmax = {lims[\"fmax\"]} to {fmid}')\n lims['fmax'] = fmid\n assert lims['fmin'] <= lims['fmid'] <= lims['fmax']\n if fmax is not None:\n lims['fmax'] = fmax\n if lims['fmin'] > fmax:\n logger.debug(f' Bumping fmin = {lims[\"fmin\"]} to {fmax}')\n lims['fmin'] = fmax\n if lims['fmid'] > fmax:\n logger.debug(f' Bumping fmid = {lims[\"fmid\"]} to {fmax}')\n lims['fmid'] = fmax\n assert lims['fmin'] <= lims['fmid'] <= lims['fmax']\n\n\ndef _get_range(brain):\n val = np.abs(np.concatenate(list(brain._current_act_data.values())))\n return [np.min(val), np.max(val)]\n\n\nclass _FakeIren():\n def EnterEvent(self):\n pass\n\n def MouseMoveEvent(self):\n pass\n\n def LeaveEvent(self):\n pass\n\n def SetEventInformation(self, *args, **kwargs):\n pass\n\n def CharEvent(self):\n pass\n\n def KeyPressEvent(self, *args, **kwargs):\n pass\n\n def KeyReleaseEvent(self, *args, **kwargs):\n pass\n"
] |
[
[
"numpy.diag",
"numpy.dot",
"numpy.asarray",
"numpy.in1d",
"numpy.round",
"numpy.max",
"numpy.concatenate",
"matplotlib.image.imread",
"numpy.mean",
"numpy.any",
"numpy.searchsorted",
"numpy.cross",
"numpy.where",
"numpy.clip",
"numpy.unique",
"numpy.arange",
"numpy.eye",
"numpy.full",
"numpy.atleast_1d",
"scipy.interpolate.interp1d",
"numpy.interp",
"matplotlib.colors.colorConverter.to_rgba",
"numpy.unravel_index",
"numpy.zeros",
"numpy.min",
"matplotlib.colors.colorConverter.to_rgb",
"matplotlib.colors.ListedColormap",
"numpy.argsort",
"numpy.array",
"numpy.abs",
"numpy.array_equal",
"numpy.linalg.norm",
"numpy.ones",
"numpy.prod",
"numpy.empty"
]
] |
tuq820/efficientdet_with_landmark
|
[
"438d92168e5bb91d9ea9bd4b5c743ef41f936fde"
] |
[
"train.py"
] |
[
"import os\nimport argparse\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nfrom src.dataset import CocoDataset, Resizer, Normalizer, Augmenter, collater, MaJiaDataset\nfrom src.model import EfficientDet\nfrom tensorboardX import SummaryWriter\nimport shutil\nimport numpy as np\nfrom tqdm.autonotebook import tqdm\n\n\ndef get_args():\n parser = argparse.ArgumentParser(\n \"EfficientDet: Scalable and Efficient Object Detection implementation by Signatrix GmbH\")\n parser.add_argument(\"--image_size\", type=int, default=512, help=\"The common width and height for all images\")\n parser.add_argument(\"--batch_size\", type=int, default=8, help=\"The number of images per batch\")\n parser.add_argument(\"--lr\", type=float, default=1e-4)\n parser.add_argument('--alpha', type=float, default=0.25)\n parser.add_argument('--gamma', type=float, default=1.5)\n parser.add_argument(\"--num_epochs\", type=int, default=50)\n parser.add_argument(\"--test_interval\", type=int, default=1, help=\"Number of epoches between testing phases\")\n parser.add_argument(\"--es_min_delta\", type=float, default=0.0,\n help=\"Early stopping's parameter: minimum change loss to qualify as an improvement\")\n parser.add_argument(\"--es_patience\", type=int, default=0,\n help=\"Early stopping's parameter: number of epochs with no improvement after which training will be stopped. Set to 0 to disable this technique.\")\n # parser.add_argument(\"--data_path\", type=str, default=\"/disk4t/data/coco/data/coco\", help=\"the root folder of dataset\")\n parser.add_argument(\"--data_path\", type=str, default=\"/home/pc/work/data/majia\",\n help=\"the root folder of dataset\")\n parser.add_argument(\"--label_txt\", type=str, default=\"/home/pc/work/data/majia/data_01.txt\",\n help=\"the root folder of dataset\")\n parser.add_argument(\"--log_path\", type=str, default=\"tensorboard/signatrix_efficientdet_coco\")\n parser.add_argument(\"--saved_path\", type=str, default=\"trained_models1\")\n # parser.add_argument(\"--resume\", type=str, default=\"trained_models/signatrix_efficientdet_majia_30.pth\")\n parser.add_argument(\"--resume\", type=str, default=None)\n\n args = parser.parse_args()\n return args\n\n\ndef train(opt):\n num_gpus = 1\n if torch.cuda.is_available():\n num_gpus = torch.cuda.device_count()\n torch.cuda.manual_seed(123)\n else:\n torch.manual_seed(123)\n\n training_params = {\"batch_size\": opt.batch_size * num_gpus,\n \"shuffle\": True,\n \"drop_last\": True,\n \"collate_fn\": collater,\n \"num_workers\": 12}\n\n test_params = {\"batch_size\": opt.batch_size,\n \"shuffle\": False,\n \"drop_last\": False,\n \"collate_fn\": collater,\n \"num_workers\": 12}\n\n # training_set = CocoDataset(root_dir=opt.data_path, set=\"train2017\",\n # transform=transforms.Compose([Normalizer(), Augmenter(), Resizer()]))\n training_set = MaJiaDataset(root_dir=opt.data_path, label_txt=opt.label_txt,\n transform=transforms.Compose([Normalizer(), Augmenter(), Resizer()]))\n training_generator = DataLoader(training_set, **training_params)\n\n # test_set = CocoDataset(root_dir=opt.data_path, set=\"val2017\",\n # transform=transforms.Compose([Normalizer(), Resizer()]))\n test_set = MaJiaDataset(root_dir=opt.data_path, label_txt=opt.label_txt,\n transform=transforms.Compose([Normalizer(), Resizer()]))\n test_generator = DataLoader(test_set, **test_params)\n\n if opt.resume is not None:\n model = torch.load('trained_models/signatrix_efficientdet_majia_30.pth')\n if isinstance(model, torch.nn.DataParallel):\n model = model.module\n else:\n model = EfficientDet(num_classes=training_set.num_classes())\n\n if os.path.isdir(opt.log_path):\n shutil.rmtree(opt.log_path)\n os.makedirs(opt.log_path)\n\n if not os.path.isdir(opt.saved_path):\n os.makedirs(opt.saved_path)\n\n writer = SummaryWriter(opt.log_path)\n if torch.cuda.is_available():\n model = model.cuda()\n model = nn.DataParallel(model)\n\n optimizer = torch.optim.Adam(model.parameters(), opt.lr)\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True)\n\n best_loss = 1e5\n best_epoch = 0\n model.train()\n\n num_iter_per_epoch = len(training_generator)\n for epoch in range(opt.num_epochs):\n model.train()\n # if torch.cuda.is_available():\n # model.module.freeze_bn()\n # else:\n # model.freeze_bn()\n epoch_loss = []\n progress_bar = tqdm(training_generator)\n for iter, data in enumerate(progress_bar):\n try:\n optimizer.zero_grad()\n if torch.cuda.is_available():\n cls_loss, reg_loss, ldm_loss = model([data['img'].cuda().float(), data['annot'].cuda()])\n else:\n cls_loss, reg_loss, ldm_loss = model([data['img'].float(), data['annot']])\n\n cls_loss = cls_loss.mean()\n reg_loss = reg_loss.mean()\n ldm_loss = ldm_loss.mean()\n loss = cls_loss + reg_loss + ldm_loss\n if loss == 0:\n continue\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)\n optimizer.step()\n epoch_loss.append(float(loss))\n total_loss = np.mean(epoch_loss)\n\n progress_bar.set_description(\n 'Epoch: {}/{}. Iteration: {}/{}. Cls loss: {:.5f}. Reg loss: {:.5f}. Ldm loss: {:.5f}, Batch loss: {:.5f} Total loss: {:.5f}'.format(\n epoch + 1, opt.num_epochs, iter + 1, num_iter_per_epoch, cls_loss, reg_loss, ldm_loss, loss,\n total_loss))\n writer.add_scalar('Train/Total_loss', total_loss, epoch * num_iter_per_epoch + iter)\n writer.add_scalar('Train/Regression_loss', reg_loss, epoch * num_iter_per_epoch + iter)\n writer.add_scalar('Train/Landmark_loss', ldm_loss, epoch * num_iter_per_epoch + iter)\n writer.add_scalar('Train/Classfication_loss (focal loss)', cls_loss, epoch * num_iter_per_epoch + iter)\n\n except Exception as e:\n print(e)\n continue\n scheduler.step(np.mean(epoch_loss))\n\n if epoch % opt.test_interval == 0:\n model.eval()\n loss_regression_ls = []\n loss_classification_ls = []\n loss_landmark_ls = []\n for iter, data in enumerate(test_generator):\n with torch.no_grad():\n if torch.cuda.is_available():\n cls_loss, reg_loss, ldm_loss = model([data['img'].cuda().float(), data['annot'].cuda()])\n else:\n cls_loss, reg_loss, ldm_loss = model([data['img'].float(), data['annot']])\n\n cls_loss = cls_loss.mean()\n reg_loss = reg_loss.mean()\n ldm_loss = ldm_loss.mean()\n\n loss_classification_ls.append(float(cls_loss))\n loss_regression_ls.append(float(reg_loss))\n loss_landmark_ls.append(float(ldm_loss))\n\n cls_loss = np.mean(loss_classification_ls)\n reg_loss = np.mean(loss_regression_ls)\n ldm_loss = np.mean(loss_landmark_ls)\n loss = cls_loss + reg_loss + ldm_loss\n\n print(\n 'Epoch: {}/{}. Classification loss: {:1.5f}. Regression loss: {:1.5f}. Landmark loss: {:.5f}, Total loss: {:1.5f}'.format(\n epoch + 1, opt.num_epochs, cls_loss, reg_loss, ldm_loss,\n np.mean(loss)))\n writer.add_scalar('Test/Total_loss', loss, epoch)\n writer.add_scalar('Test/Regression_loss', reg_loss, epoch)\n writer.add_scalar('Test/Landmark_loss', ldm_loss, epoch)\n writer.add_scalar('Test/Classfication_loss (focal loss)', cls_loss, epoch)\n\n if loss + opt.es_min_delta < best_loss:\n best_loss = loss\n best_epoch = epoch\n torch.save(model, os.path.join(opt.saved_path, \"signatrix_efficientdet_majia.pth\"))\n\n dummy_input = torch.rand(opt.batch_size, 3, 512, 512)\n if torch.cuda.is_available():\n dummy_input = dummy_input.cuda()\n if isinstance(model, nn.DataParallel):\n model.module.backbone_net.model.set_swish(memory_efficient=False)\n\n torch.onnx.export(model.module, dummy_input,\n os.path.join(opt.saved_path, \"signatrix_efficientdet_majia.onnx\"),\n verbose=False)\n model.module.backbone_net.model.set_swish(memory_efficient=True)\n else:\n model.backbone_net.model.set_swish(memory_efficient=False)\n\n torch.onnx.export(model, dummy_input,\n os.path.join(opt.saved_path, \"signatrix_efficientdet_majia.onnx\"),\n verbose=False)\n model.backbone_net.model.set_swish(memory_efficient=True)\n\n # Early stopping\n if epoch - best_epoch > opt.es_patience > 0:\n print(\"Stop training at epoch {}. The lowest loss achieved is {}\".format(epoch, loss))\n break\n writer.close()\n\n\nif __name__ == \"__main__\":\n opt = get_args()\n train(opt)\n\n # 直接执行python3 train.py 即可训练码架\n # python3 test_video.py 即可测试码架\n"
] |
[
[
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.cuda.manual_seed",
"torch.load",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.nn.DataParallel",
"numpy.mean",
"torch.rand",
"torch.cuda.is_available",
"torch.no_grad",
"torch.cuda.device_count"
]
] |
maneeshdisodia/skills-ml
|
[
"194b262aa5bad1af381d1f63f8b327cf96523950"
] |
[
"skills_ml/algorithms/sampling/methods.py"
] |
[
"\"\"\"Generic sampling methods\"\"\"\nimport numpy as np\nimport heapq as hq\nimport random\n\ndef reservoir(it, k):\n \"\"\"Reservoir sampling with Random Sort from a job posting iterator\n\n Randomly choosing a sample of k items from a streaming iterator. Using random sort to implement the algorithm.\n Basically, it's assigning random number as keys to each item and maintain k items with minimum value for keys,\n which equals to assigning a random number to each item as key and sort items using these keys and take top k items.\n\n Args:\n it (iterator): Job posting iterator to sample from\n k (int): Sample size\n\n Returns:\n generator: The result sample of k items.\n \"\"\"\n it = iter(it)\n result = []\n for i, datum in enumerate(it):\n if i < k:\n result.append(datum)\n else:\n j = random.randint(0, i-1)\n if j < k:\n result[j] = datum\n while len(result) > 0:\n yield result.pop()\n\n\ndef reservoir_weighted(it, k, weights):\n \"\"\"Weighted reservoir Sampling from job posting iterator\n\n Randomly choosing a sample of k items from a streaming iterator based on the weights.\n\n\n Args:\n it (iterator): Job posting iterator to sample from. The format should be (job_posting, label)\n k (int): Sample size\n weights (dict): a dictionary that has key-value pairs as label-weighting pairs. It expects every\n label in the iterator to be present as a key in the weights dictionary For example,\n weights = {'11': 2, '13', 1}. In this case, the label/key is the occupation major\n group and the value is the weight you want to sample with.\n\n Returns:\n generator: The result sample of k items from weighted reservori sampling.\n\n \"\"\"\n heap = []\n hkey = lambda w: np.power(np.random.uniform(0.0, 1.0), 1.0 / w)\n for i, datum in enumerate(it):\n weight = weights[datum[1]]\n score = hkey(weight)\n if len(heap) < k:\n hq.heappush(heap, (hkey(weight), datum))\n elif score > heap[0][0]:\n hq.heapreplace(heap, (score, datum))\n while len(heap) > 0:\n yield hq.heappop(heap)[1]\n"
] |
[
[
"numpy.random.uniform"
]
] |
bird-house/blackswan
|
[
"5f1f20423874315f5e8eea2cf7302f9d0c05adae"
] |
[
"blackswan/pythonanattribution.py"
] |
[
"# import logging\n# LOGGER = logging.getLogger(\"PYWPS\")\n\nimport pandas\nimport random\nimport numpy as np\n\n\ndef analogs_generator(anafile, yfile, nsim=20):\n \"\"\"\n Simulates nsim values of the variable y using analogues for all the dates present in the file anafile\n \n :param anafile: path to a file with the results of the analogues\n :param yfile: path to the file containing the data. The file should have two columns:\n - the first with the date with the following for format yyyymmdd\n - the second with the variable of interest y, columns are separated by spaces and are supposed to have headers\n :param nsim: number of simulations of the variablle y to generate with the analogues\n\n :retun str: simulated variable\n \"\"\"\n\n def weight_analogues(date):\n dist = disttable.loc[[date], :].transpose()\n date = anatable.loc[[date], :].transpose()\n weights = pandas.concat([date.reset_index(drop=True), dist.reset_index(drop=True)], axis=1)\n weights.columns = ['date', 'dist']\n weights = weights.set_index('date')\n return weights\n\n def select_y_analogues(date):\n bidx = ytable.index.isin(anatable.loc[date, :])\n return ytable.iloc[bidx, 0]\n\n def generate_cond_ymean(date, nsim=20):\n weights = weight_analogues(date)\n ys = select_y_analogues(date)\n dat = pandas.concat([ys, weights], axis=1, join=\"inner\")\n weights = np.random.multinomial(nsim, dat.dist / sum(dat.dist))\n return random.sample(list(np.repeat(dat.iloc[:, 0], weights)), nsim)\n\n ytable = pandas.read_table(yfile, sep=\" \", skipinitialspace=True)\n anatable = pandas.read_table(anafile, sep=\" \", skipinitialspace=True)\n nanalogs = len([s for s in anatable.columns if \"dis\" in s])\n disttable = anatable.iloc[:, [0] + list(range(nanalogs + 1, 2 * nanalogs + 1))].copy()\n cortable = anatable.iloc[:, [0] + list(range(2 * nanalogs + 1, 3 * nanalogs + 1))].copy()\n anatable = anatable.iloc[:, 0:(nanalogs + 1)].copy()\n ytable = ytable.set_index('date')\n disttable = disttable.set_index('date')\n cortable = cortable.set_index('date')\n anatable = anatable.set_index('date')\n condys = list(map(generate_cond_ymean, anatable.index, np.repeat(nsim, len(anatable.index))))\n condys = pandas.DataFrame(condys)\n condys = condys.transpose()\n # condys = [x.reset_index(drop=True) for x in condys]\n # condys = pandas.concat(condys, axis = 1)\n condys.columns = anatable.index\n return condys\n # condyms = condys.mean(axis=1)\n # return condyms\n"
] |
[
[
"pandas.read_table",
"pandas.concat",
"numpy.repeat",
"pandas.DataFrame"
]
] |
PrateekMunjal/TorchAL
|
[
"ec60b093333c66e4c8862d128d81680060fddf36",
"ec60b093333c66e4c8862d128d81680060fddf36"
] |
[
"al_utils/vae_sampling.py",
"pycls/models/resnet_style/shake_shake_function.py"
] |
[
"import torch\nimport os\nimport math\nimport numpy as np\nfrom copy import deepcopy\nfrom pycls.core.config import cfg\nimport pycls.utils.distributed as du\nfrom tqdm import tqdm\n\n\nclass AdversarySampler:\n def __init__(self, budget):\n self.budget = budget\n self.cuda_id = torch.cuda.current_device()\n\n def compute_dists(self, X, X_train):\n dists = (\n -2 * np.dot(X, X_train.T)\n + np.sum(X_train**2, axis=1)\n + np.sum(X**2, axis=1)[:, np.newaxis]\n )\n return dists\n\n def greedy_k_center(self, labeled, unlabeled):\n greedy_indices = []\n\n # get the minimum distances between the labeled and unlabeled examples (iteratively, to avoid memory issues):\n min_dist = np.min(\n self.compute_dists(labeled[0, :].reshape((1, labeled.shape[1])), unlabeled),\n axis=0,\n )\n min_dist = min_dist.reshape((1, min_dist.shape[0]))\n temp_range = 1000\n for j in range(1, labeled.shape[0], temp_range):\n if j + temp_range < labeled.shape[0]:\n dist = self.compute_dists(labeled[j : j + temp_range, :], unlabeled)\n else:\n # for last iteration only :)\n dist = self.compute_dists(labeled[j:, :], unlabeled)\n # dist = pairwise_distances(labeled[j:, :], unlabeled,metric='euclidean')\n min_dist = np.vstack(\n (min_dist, np.min(dist, axis=0).reshape((1, min_dist.shape[1])))\n )\n min_dist = np.min(min_dist, axis=0)\n min_dist = min_dist.reshape((1, min_dist.shape[0]))\n\n # iteratively insert the farthest index and recalculate the minimum distances:\n farthest = np.argmax(min_dist)\n greedy_indices.append(farthest)\n\n amount = cfg.ACTIVE_LEARNING.BUDGET_SIZE - 1\n for i in range(amount):\n if i is not 0 and i % 500 == 0:\n print(\"{} Sampled out of {}\".format(i, amount + 1))\n # dist = pairwise_distances(unlabeled[greedy_indices[-1], :].reshape((1,unlabeled.shape[1])), unlabeled, metric='euclidean')\n dist = self.compute_dists(\n unlabeled[greedy_indices[-1], :].reshape((1, unlabeled.shape[1])),\n unlabeled,\n )\n min_dist = np.vstack((min_dist, dist.reshape((1, min_dist.shape[1]))))\n min_dist = np.min(min_dist, axis=0)\n min_dist = min_dist.reshape((1, min_dist.shape[0]))\n farthest = np.argmax(min_dist)\n greedy_indices.append(farthest)\n\n remainSet = set(np.arange(unlabeled.shape[0])) - set(greedy_indices)\n remainSet = np.array(list(remainSet))\n return greedy_indices, remainSet\n\n def get_vae_activations(self, vae, dataLoader):\n acts = []\n vae.eval()\n\n temp_max_iter = len(dataLoader)\n print(\"len(dataloader): {}\".format(temp_max_iter))\n temp_iter = 0\n for x, y in dataLoader:\n x = x.type(torch.cuda.FloatTensor)\n x = x.cuda(self.cuda_id)\n _, _, mu, _ = vae(x)\n acts.append(mu.cpu().numpy())\n if temp_iter % 100 == 0:\n print(f\"Iteration [{temp_iter}/{temp_max_iter}] Done!!\")\n\n temp_iter += 1\n\n acts = np.concatenate(acts, axis=0)\n return acts\n\n def get_predictions(self, vae, discriminator, data, cuda):\n all_preds = []\n all_indices = []\n\n assert vae.training == False, \"Expected vae model to be in eval mode\"\n assert (\n discriminator.training == False\n ), \"Expected discriminator model to be in eval mode\"\n\n temp_idx = 0\n for images, _ in data:\n if cuda:\n images = images.cuda()\n\n with torch.no_grad():\n _, _, mu, _ = vae(images)\n preds = discriminator(mu)\n\n preds = preds.cpu().data\n all_preds.extend(preds)\n temp_idx += images.shape[0]\n\n all_indices = np.arange(temp_idx)\n all_preds = torch.stack(all_preds)\n all_preds = all_preds.view(-1)\n all_preds = all_preds.cpu().numpy()\n return all_preds\n\n def gpu_compute_dists(self, M1, M2):\n \"\"\"\n Computes L2 norm square on gpu\n Assume\n M1: M x D matrix\n M2: N x D matrix\n\n output: M x N matrix\n \"\"\"\n # print(f\"Function call to gpu_compute dists; M1: {M1.shape} and M2: {M2.shape}\")\n M1_norm = (M1**2).sum(1).reshape(-1, 1)\n\n M2_t = torch.transpose(M2, 0, 1)\n M2_norm = (M2**2).sum(1).reshape(1, -1)\n dists = M1_norm + M2_norm - 2.0 * torch.mm(M1, M2_t)\n return dists\n\n def efficient_compute_dists(self, labeled, unlabeled):\n \"\"\" \"\"\"\n N_L = labeled.shape[0]\n N_U = unlabeled.shape[0]\n dist_matrix = None\n\n temp_range = 1000\n\n unlabeled = torch.from_numpy(unlabeled).cuda(self.cuda_id)\n temp_dist_matrix = np.empty((N_U, temp_range))\n # for i in range(0, N_L, temp_range):\n for i in tqdm(range(0, N_L, temp_range), desc=\"Computing Distance Matrix\"):\n end_index = i + temp_range if i + temp_range < N_L else N_L\n temp_labeled = labeled[i:end_index, :]\n temp_labeled = torch.from_numpy(temp_labeled).cuda(self.cuda_id)\n temp_dist_matrix = self.gpu_compute_dists(unlabeled, temp_labeled)\n temp_dist_matrix = torch.min(temp_dist_matrix, dim=1)[0]\n temp_dist_matrix = torch.reshape(\n temp_dist_matrix, (temp_dist_matrix.shape[0], 1)\n )\n if dist_matrix is None:\n dist_matrix = temp_dist_matrix\n else:\n dist_matrix = torch.cat((dist_matrix, temp_dist_matrix), dim=1)\n dist_matrix = torch.min(dist_matrix, dim=1)[0]\n dist_matrix = torch.reshape(dist_matrix, (dist_matrix.shape[0], 1))\n\n return dist_matrix.cpu().numpy()\n\n @torch.no_grad()\n def vae_sample_for_labeling(\n self, vae, uSet, lSet, unlabeled_dataloader, lSetLoader\n ):\n\n vae.eval()\n print(\"Computing activattions for uset....\")\n u_scores = self.get_vae_activations(vae, unlabeled_dataloader)\n print(\"Computing activattions for lset....\")\n l_scores = self.get_vae_activations(vae, lSetLoader)\n\n print(\"l_scores.shape: \", l_scores.shape)\n print(\"u_scores.shape: \", u_scores.shape)\n\n # dist_matrix = self.compute_dists(u_scores, l_scores)\n dist_matrix = self.efficient_compute_dists(l_scores, u_scores)\n print(\"Dist_matrix.shape: \", dist_matrix.shape)\n\n min_scores = np.min(dist_matrix, axis=1)\n sorted_idx = np.argsort(min_scores)[::-1]\n\n activeSet = uSet[sorted_idx[0 : self.budget]]\n remainSet = uSet[sorted_idx[self.budget :]]\n\n return activeSet, remainSet\n\n def sample_vaal_plus(self, vae, disc_task, data, cuda):\n all_preds = []\n all_indices = []\n\n assert vae.training == False, \"Expected vae model to be in eval mode\"\n assert (\n disc_task.training == False\n ), \"Expected disc_task model to be in eval mode\"\n\n temp_idx = 0\n for images, _ in data:\n if cuda:\n images = images.cuda()\n\n with torch.no_grad():\n _, _, mu, _ = vae(images)\n preds, _ = disc_task(mu)\n\n preds = preds.cpu().data\n all_preds.extend(preds)\n temp_idx += images.shape[0]\n\n all_indices = np.arange(temp_idx)\n all_preds = torch.stack(all_preds)\n all_preds = all_preds.view(-1)\n # need to multiply by -1 to be able to use torch.topk\n all_preds *= -1\n\n # select the points which the discriminator things are the most likely to be unlabeled\n _, querry_indices = torch.topk(all_preds, int(self.budget))\n querry_indices = querry_indices.numpy()\n remain_indices = np.asarray(list(set(all_indices) - set(querry_indices)))\n assert len(remain_indices) + len(querry_indices) == len(\n all_indices\n ), \" Indices are overlapped between activeSet and uSet\"\n activeSet = all_indices[querry_indices]\n uSet = all_indices[remain_indices]\n return activeSet, uSet\n\n def sample(self, vae, discriminator, data, uSet, cfg):\n all_preds = []\n all_indices = []\n\n assert vae.training == False, \"Expected vae model to be in eval mode\"\n assert (\n discriminator.training == False\n ), \"Expected discriminator model to be in eval mode\"\n\n temp_idx = 0\n for images, _ in tqdm(data, desc=\"Constructing VAE ActiveSet\"):\n images = images.type(torch.cuda.FloatTensor)\n images = images.cuda()\n\n with torch.no_grad():\n _, _, mu, _ = vae(images)\n preds = discriminator(mu)\n\n preds = preds.cpu().data\n all_preds.extend(preds)\n temp_idx += images.shape[0]\n\n all_indices = np.arange(temp_idx)\n all_preds = torch.stack(all_preds)\n all_preds = all_preds.view(-1)\n\n scores_save_path = cfg.OUT_DIR\n os.makedirs(scores_save_path, exist_ok=True) # just to be safe\n with open(os.path.join(scores_save_path, \"actualScores.txt\"), \"w\") as fpw:\n for temp_idx, temp_rank in zip(uSet, all_preds):\n fpw.write(f\"{temp_idx}\\t{temp_rank:.6f}\\n\")\n\n fpw.close()\n\n # need to multiply by -1 to be able to use torch.topk\n all_preds *= -1\n\n # select the points which the discriminator things are the most likely to be unlabeled\n _, querry_indices = torch.topk(all_preds, int(self.budget))\n querry_indices = querry_indices.numpy()\n remain_indices = np.asarray(list(set(all_indices) - set(querry_indices)))\n assert len(remain_indices) + len(querry_indices) == len(\n all_indices\n ), \" Indices are overlapped between activeSet and uSet\"\n activeSet = all_indices[querry_indices]\n uSet = all_indices[remain_indices]\n return activeSet, uSet\n\n # def sample_for_labeling(self, cfg, uSetPath, lSetPath, dataObj, noAugDataset):\n # \"\"\"\n # Picks samples from uSet to form activeSet.\n\n # INPUT\n # ------\n # vae: object of model VAE\n\n # discriminator: object of model discriminator\n\n # unlabeled_dataloader: Sequential dataloader iterating over uSet\n\n # uSet: Collection of unlabelled datapoints\n\n # NOTE: Please pass the unlabelled dataloader as sequential dataloader else the\n # results won't be appropriate.\n\n # OUTPUT\n # -------\n\n # Returns activeSet, [remaining]uSet\n # \"\"\"\n\n # current_device = torch.cuda.current_device()\n\n # #Load vae -- out_dir/vae.pyth\n # vae_dir = os.path.join(cfg.OUT_DIR, \"vae/vae.pyth\")\n\n # #Load disc -- out_dir/disc.pyth\n # disc_dir = os.path.join(cfg.OUT_DIR, \"disc/disc.pyth\")\n\n # #Get uSet form uSetPath\n # uSet = np.load(uSetPath, allow_pickle=True)\n\n # #Get uSetLoader from uSet\n # uSetLoader = dataObj.getSequentialDataLoader(indexes=uSet,batch_size=int(cfg.TRAIN.BATCH_SIZE/cfg.NUM_GPUS),\\\n # data=noAugDataset)\n\n # #load vae from vae_dir\n # vae_checkpoint = None#load from vae_dir\n # vae = torch.load(vae_checkpoint['model'], map_location='cpu')\n # vae.cuda(current_device)\n\n # #load disc from disc_dir\n # disc_checkpoint = None\n # disc = torch.load(disc_checkpoint['model'], map_location='cpu')\n # disc.cuda(current_device)\n\n # sampler = AdversarySampler(cfg.ACTIVE_LEARNING.BUDGET_SIZE)\n # activeSet, remainSet = sampler.sample(vae, disc, uSetLoader)\n\n # activeSet = uSet[activeSet]\n # remainSet = uSet[remainSet]\n # return activeSet, remainSet\n\n @torch.no_grad()\n def sample_for_labeling(self, vae, discriminator, unlabeled_dataloader, uSet, cfg):\n \"\"\"\n Picks samples from uSet to form activeSet.\n\n INPUT\n ------\n vae: object of model VAE\n\n discriminator: object of model discriminator\n\n unlabeled_dataloader: Sequential dataloader iterating over uSet\n\n uSet: Collection of unlabelled datapoints\n\n NOTE: Please pass the unlabelled dataloader as sequential dataloader else the\n results won't be appropriate.\n\n OUTPUT\n -------\n\n Returns activeSet, [remaining]uSet\n \"\"\"\n print(\"Sampling....\")\n activeSet, remainSet = self.sample(\n vae,\n discriminator,\n unlabeled_dataloader,\n uSet,\n cfg,\n )\n\n activeSet = uSet[activeSet]\n remainSet = uSet[remainSet]\n return activeSet, remainSet\n\n # def vaal_sampling(self, cfg, uSetPath, lSetPath, dataObj, noAugDataset):\n\n # lSet = np.load(lSetPath, allow_pickle=True)\n # uSet = np.load(uSetPath, allow_pickle=True)\n\n # activeSet, remainSet = self.sample_for_labeling(cfg, uSetPath, lSetPath, dataObj, noAugDataset)\n\n # lSet = np.append(lSet, activeSet)\n # uSet = remainSet\n\n # #save all sets\n # np.save(os.path.join(cfg.OUT_DIR, \"lSet.npy\"), lSet)\n # np.save(os.path.join(cfg.OUT_DIR, \"uSet.npy\"), uSet)\n # np.save(os.path.join(cfg.OUT_DIR, \"activeSet.npy\"), activeSet)\n",
"import torch\nfrom torch.autograd import Function\n\nclass ShakeFunction(Function):\n @staticmethod\n def forward(ctx, x1, x2, alpha, beta):\n ctx.save_for_backward(x1, x2, alpha, beta)\n\n y = x1 * alpha + x2 * (1 - alpha)\n return y\n\n @staticmethod\n def backward(ctx, grad_output):\n x1, x2, alpha, beta = ctx.saved_variables\n grad_x1 = grad_x2 = grad_alpha = grad_beta = None\n\n if ctx.needs_input_grad[0]:\n grad_x1 = grad_output * beta\n if ctx.needs_input_grad[1]:\n grad_x2 = grad_output * (1 - beta)\n\n return grad_x1, grad_x2, grad_alpha, grad_beta\n\n\nshake_function = ShakeFunction.apply\n\n\ndef get_alpha_beta(batch_size, shake_config, device):\n forward_shake, backward_shake, shake_image = shake_config\n\n if forward_shake and not shake_image:\n alpha = torch.rand(1)\n elif forward_shake and shake_image:\n alpha = torch.rand(batch_size).view(batch_size, 1, 1, 1)\n else:\n alpha = torch.FloatTensor([0.5])\n\n if backward_shake and not shake_image:\n beta = torch.rand(1)\n elif backward_shake and shake_image:\n beta = torch.rand(batch_size).view(batch_size, 1, 1, 1)\n else:\n beta = torch.FloatTensor([0.5])\n\n alpha = alpha.to(device)\n beta = beta.to(device)\n\n return alpha, beta"
] |
[
[
"numpy.dot",
"torch.transpose",
"torch.mm",
"torch.cuda.current_device",
"numpy.min",
"numpy.arange",
"torch.reshape",
"torch.min",
"torch.cat",
"torch.from_numpy",
"numpy.concatenate",
"numpy.argmax",
"torch.no_grad",
"torch.stack",
"numpy.argsort",
"numpy.sum",
"numpy.empty"
],
[
"torch.FloatTensor",
"torch.rand"
]
] |
SchernHe/FairML
|
[
"ebd32df6dec1d5232e05e18c88e89179a420659e"
] |
[
"fairml/metrics/utils.py"
] |
[
"\"\"\"Provides useful functions to calculate fairness metrics\"\"\"\nfrom sklearn.metrics import confusion_matrix, accuracy_score, precision_score\nimport numpy as np\nimport pandas as pd\n\n\ndef calculate_precision(df, target_variable, prediction_variable):\n \"\"\"Calculate precision / positive predictive value PPV\"\"\"\n tn, fp, fn, tp = confusion_matrix(\n df[target_variable], df[prediction_variable]\n ).ravel()\n if (tp + fp) != 0:\n return (tp / (tp + fp)) * 100\n else:\n return np.nan\n\n\ndef calculate_recall(df, target_variable, prediction_variable):\n \"\"\"Calculate recall / true positive rate TPR / sensitivity\"\"\"\n tn, fp, fn, tp = confusion_matrix(\n df[target_variable], df[prediction_variable]\n ).ravel()\n if (tp + fn) != 0:\n return (tp / (tp + fn)) * 100\n else:\n return np.nan\n\n\ndef calculate_fpr(df, target_variable, prediction_variable):\n \"\"\"Calculate false positive rate FPR / false alarm ratio\"\"\"\n tn, fp, fn, tp = confusion_matrix(\n df[target_variable], df[prediction_variable]\n ).ravel()\n if (fp + tn) != 0:\n return (fp / (fp + tn)) * 100\n else:\n return np.nan\n\n\ndef _get_nn_idx(row, neigh, radius, columns):\n \"\"\"Retrieve the NN of a sample within a specified radius.\n\n Parameters\n ----------\n row : pd.Series\n neigh : sklearn.NearestNeighbors\n radius : float\n columns : list\n\n Returns\n -------\n list\n Nearest Neighbors of given sample within radius\n \"\"\"\n neigh_dist, neigh_idx = neigh.radius_neighbors([row[columns]], radius)\n return neigh_idx[0], len(neigh_idx[0])\n\n\ndef get_nn_idx(df, neigh, informative_variables, radius):\n \"\"\"Assign each sample the indizes of NN.\n\n Parameters\n ----------\n df : pd.DataFrame\n neigh : sklearn.NearestNeighbors\n informative_variables : list\n radius : float\n\n Returns\n -------\n list\n Score values: Consistency, Accuracy and Precision\n \"\"\"\n\n series = df.apply(\n lambda row: _get_nn_idx(row, neigh, radius, informative_variables), axis=1\n )\n\n df[[\"KNN_IDX\", \"Num_NN\"]] = pd.DataFrame(series.tolist(), index=series.index)\n return df\n\n\ndef calculate_performance_scores(df, target_variable, min_tau, max_tau, step_size):\n\n accuracy_scores = []\n precision_scores = []\n\n for tau in np.arange(min_tau, max_tau + step_size, step_size):\n\n model_col = \"Y_\" + str(int(tau * 100))\n df[model_col] = df[\"Y_SCORE\"].apply(lambda row: 1 if row >= tau else 0)\n\n accuracy_scores.extend([accuracy_score(df[target_variable], df[model_col])])\n precision_scores.extend([precision_score(df[target_variable], df[model_col])])\n\n return np.array(accuracy_scores), np.array(precision_scores)\n"
] |
[
[
"numpy.arange",
"sklearn.metrics.precision_score",
"sklearn.metrics.confusion_matrix",
"numpy.array",
"sklearn.metrics.accuracy_score"
]
] |
mpiecka/metalcode
|
[
"b0306dc9d8de53d797c946254fa63fa8b3fbf093"
] |
[
"metalcode_v1_0.py"
] |
[
"from numpy import *\r\nimport matplotlib.pyplot as plt\r\nimport time\r\nimport os\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\", category=VisibleDeprecationWarning)\r\n\r\n# from metalcode_calib_metal import metal_transf\r\nfrom metalcode_calib_tempe import Teff\r\nfrom metalcode_calib_tempe import BolCorBV\r\nfrom metalcode_calib_absmg import absmag\r\nfrom metalcode_calib_clrex import clrexc_multiplier\r\nfrom metalcode_calc_lstsqr import LstSqr\r\n\r\n\r\n\r\n\r\n#-------------------------------------------------------\r\n#---------------------INITIALISATION--------------------\r\n#-------------------------------------------------------\r\ndef initialise():\r\n # input values - photometric system\r\n print('Initialisation ...')\r\n inputing=True\r\n list_vals=['G','J','2']\r\n while inputing:\r\n photosystem=input(' -- Pick photometric system (G,2,J): ')\r\n if (photosystem in list_vals):\r\n inputing=False\r\n # input values - grid spacing (age)\r\n inputing=True\r\n list_vals=[0.1,0.2]\r\n while inputing:\r\n try:\r\n age_step=float(input(' -- Pick grid spacing, age (0.1,0.2): '))\r\n if (age_step in list_vals):\r\n inputing=False\r\n except ValueError:\r\n pass\r\n # input values - grid spacing (Z)\r\n inputing=True\r\n list_vals=[0.005]\r\n while inputing:\r\n try:\r\n z_step=float(input(' -- Pick grid spacing, Z (0.005): '))\r\n if (z_step in list_vals):\r\n inputing=False\r\n except ValueError:\r\n pass\r\n # input values - Nredd\r\n inputing=True\r\n while inputing:\r\n try:\r\n Nredd=int(input(' -- Nredd (3): '))\r\n if ((Nredd == 0) or (Nredd % 2 == 1)):\r\n inputing=False\r\n except ValueError:\r\n pass\r\n # input values - reddening range\r\n inputing=True\r\n list_vals=[0.005]\r\n while inputing:\r\n try:\r\n redAdj=float(input(' -- Reddening range (0.0 .. 1.0): '))\r\n if (redAdj>0.0 and redAdj<1.0):\r\n inputing=False\r\n except ValueError:\r\n pass\r\n # input values - Niter\r\n inputing=True\r\n while inputing:\r\n try:\r\n Niter=int(input(' -- Niter (6): '))\r\n if (Niter >= 3):\r\n inputing=False\r\n except ValueError:\r\n pass\r\n return (photosystem,age_step,z_step,Nredd,Niter,redAdj)\r\n#-------------------------------------------------------\r\n#-------------------END INITIALISATION------------------\r\n#-------------------------------------------------------\r\n\r\n\r\n\r\n\r\n\r\n#-------------------------------------------------------\r\n#-----------------------CALCULATIONS--------------------\r\n#-------------------------------------------------------\r\n# systematic corrections to the temperature calibration of observed data\r\n#\r\n# what you need is only q3 and q4 collections for all [Age,Z] in the grid,\r\n# then you can correct for the systematics, works very well for less bright stars,\r\n# but also slightly improves the situation for giants (especially for higher ages)\r\ndef sys_temp(sys_age,sys_z,sys_photosystem):\r\n global isochronesLTN\r\n global isochronesCMD\r\n global age_values\r\n global z_values\r\n\r\n sys_q1=[]\r\n sys_q2=[]\r\n sys_i=age_values.index(round(sys_age,1))\r\n sys_j=z_values.index(round(sys_z,3))\r\n sys_b,sys_a=isochronesLTN[sys_i][sys_j]\r\n sys_y,sys_x=isochronesCMD[sys_i][sys_j]\r\n sys_xx=[]\r\n sys_yy=[]\r\n sys_aa=[]\r\n sys_bb=[]\r\n for sys_k in range(len(sys_x)):\r\n sys_y0=logL( sys_y[sys_k] , BolCorBV(sys_x[sys_k],sys_y[sys_k],z_values[sys_j],sys_photosystem) )\r\n sys_x0=logTN( Teff(sys_x[sys_k],sys_y[sys_k],z_values[sys_j],sys_photosystem) , sys_y0 )\r\n if (sys_x0>-3 and sys_x0<3 and sys_y0>-5 and sys_y0<5):\r\n sys_yy.append(sys_y0)\r\n sys_xx.append(sys_x0)\r\n sys_aa.append(sys_a[sys_k])\r\n sys_bb.append(sys_b[sys_k])\r\n sys_q2.append(sys_a[sys_k]-sys_x0)\r\n sys_q1.append(sys_y0)\r\n\r\n sys_qx=sorted(sys_q1)\r\n sys_qy=[]\r\n for sys_i in range(len(sys_qx)):\r\n sys_qy.append(sys_q2[sys_q1.index(sys_qx[sys_i])])\r\n sys_q3=[]\r\n sys_q4=[]\r\n for sys_j in range(35):\r\n sys_qq=[]\r\n for sys_i in range(len(sys_qx)):\r\n if (sys_qx[sys_i]>(-1.0+sys_j*0.2) and sys_qx[sys_i]<=(-1.0+(sys_j+1)*0.2)):\r\n sys_qq.append(sys_qy[sys_i])\r\n if (len(sys_qq)>0):\r\n sys_q3.append((-1.0+(sys_j+0.5)*0.2))\r\n sys_q4.append(mean(sys_qq))\r\n\r\n return [sys_q3,sys_q4]\r\n\r\n\r\ndef funfind(funv,funq3,funq4):\r\n funw=0.0\r\n funt=True\r\n funi=0\r\n while funt:\r\n if (funq3[funi]<=funv and funq3[funi+1]>=funv):\r\n funt=False\r\n funqa=(funq4[funi]-funq4[funi+1])/(funq3[funi]-funq3[funi+1])\r\n funqb=funq4[funi]-funqa*funq3[funi]\r\n funw=funqa*funv+funqb\r\n else:\r\n funi+=1\r\n if (funi==len(funq3)-1):\r\n funt=False\r\n return funw\r\n\r\n\r\ndef logL(logL_V,logL_BC):\r\n return (1.896-0.4*(logL_V+logL_BC))\r\n\r\n\r\ndef logTN(TN_Teff,TN_logL):\r\n global ZAMS\r\n # TN_Teff must be in absolute value, not log\r\n logTZAMS=-9999.9\r\n ZAMS_T=list(ZAMS[:,0])\r\n ZAMS_L=list(ZAMS[:,1])\r\n \r\n if (TN_logL>=ZAMS_L[0] and TN_logL<=ZAMS_L[-1]):\r\n TN_i=0\r\n TN_found=0\r\n while (TN_found==0): \r\n if ((TN_logL>=ZAMS_L[TN_i]) and (TN_logL<=ZAMS_L[TN_i+1])):\r\n logTZAMS=TN_logL*(ZAMS_T[TN_i+1]-ZAMS_T[TN_i])/(ZAMS_L[TN_i+1]-ZAMS_L[TN_i])+ZAMS_T[TN_i]-(ZAMS_T[TN_i+1]-ZAMS_T[TN_i])/(ZAMS_L[TN_i+1]-ZAMS_L[TN_i])*ZAMS_L[TN_i]\r\n #logTZAMS=ZAMS_T[TN_i]\r\n TN_found=1\r\n elif (TN_i<len(ZAMS_T)-1):\r\n TN_i+=1\r\n else:\r\n TN_found=1\r\n elif (TN_logL<ZAMS_L[0]):\r\n logTZAMS=TN_logL*(ZAMS_T[1]-ZAMS_T[0])/(ZAMS_L[1]-ZAMS_L[0])+ZAMS_T[0]-(ZAMS_T[1]-ZAMS_T[0])/(ZAMS_L[1]-ZAMS_L[0])*ZAMS_L[0]\r\n else:\r\n logTZAMS=TN_logL*(ZAMS_T[-1]-ZAMS_T[-2])/(ZAMS_L[-1]-ZAMS_L[-2])+ZAMS_T[-2]-(ZAMS_T[-1]-ZAMS_T[-2])/(ZAMS_L[-1]-ZAMS_L[-2])*ZAMS_L[-2]\r\n \r\n return log10(TN_Teff)-logTZAMS\r\n\r\n\r\ndef logTZ(TN_Teff,TN_logL):\r\n global ZAMS\r\n # TN_Teff must be in absolute value, not log\r\n logTZAMS=-9999.9\r\n ZAMS_T=list(ZAMS[:,0])\r\n ZAMS_L=list(ZAMS[:,1])\r\n \r\n if (TN_logL>=ZAMS_L[0] and TN_logL<=ZAMS_L[-1]):\r\n TN_i=0\r\n TN_found=0\r\n while (TN_found==0): \r\n if ((TN_logL>=ZAMS_L[TN_i]) and (TN_logL<=ZAMS_L[TN_i+1])):\r\n logTZAMS=TN_logL*(ZAMS_T[TN_i+1]-ZAMS_T[TN_i])/(ZAMS_L[TN_i+1]-ZAMS_L[TN_i])+ZAMS_T[TN_i]-(ZAMS_T[TN_i+1]-ZAMS_T[TN_i])/(ZAMS_L[TN_i+1]-ZAMS_L[TN_i])*ZAMS_L[TN_i]\r\n #logTZAMS=ZAMS_T[TN_i]\r\n TN_found=1\r\n elif (TN_i<len(ZAMS_T)-1):\r\n TN_i+=1\r\n else:\r\n TN_found=1\r\n elif (TN_logL<ZAMS_L[0]):\r\n logTZAMS=TN_logL*(ZAMS_T[1]-ZAMS_T[0])/(ZAMS_L[1]-ZAMS_L[0])+ZAMS_T[0]-(ZAMS_T[1]-ZAMS_T[0])/(ZAMS_L[1]-ZAMS_L[0])*ZAMS_L[0]\r\n else:\r\n logTZAMS=TN_logL*(ZAMS_T[-1]-ZAMS_T[-2])/(ZAMS_L[-1]-ZAMS_L[-2])+ZAMS_T[-2]-(ZAMS_T[-1]-ZAMS_T[-2])/(ZAMS_L[-1]-ZAMS_L[-2])*ZAMS_L[-2]\r\n \r\n return logTZAMS\r\n\r\n\r\ndef isochrone_grid(grid_age,grid_z,grid_v):\r\n global isochronesLTN\r\n global isochronesCMD\r\n global isochronesZMS\r\n global isochronesTEF\r\n global age_values\r\n global z_values\r\n if (grid_v=='LTN'):\r\n grid_iso=isochronesLTN[age_values.index(grid_age)][z_values.index(grid_z)]\r\n grid_TN=[]\r\n for grid_i in range(len(array(grid_iso)[:,1])):\r\n grid_TN.append(logTN(10.0**array(grid_iso)[grid_i,1] , array(grid_iso)[grid_i,0]))\r\n return [array(grid_iso)[:,0] , array(grid_TN)]\r\n elif (grid_v=='CMD'):\r\n grid_iso=isochronesCMD[age_values.index(grid_age)][z_values.index(grid_z)]\r\n return [array(grid_iso)[:,0] , array(grid_iso)[:,1]]\r\n elif (grid_v=='ZMS'):\r\n grid_iso=isochronesZMS[age_values.index(grid_age)][z_values.index(grid_z)]\r\n grid_TZ=[]\r\n for grid_i in range(len(array(grid_iso)[:,1])):\r\n grid_TZ.append(logTZ(10.0**array(grid_iso)[grid_i,1] , array(grid_iso)[grid_i,0]))\r\n return [array(grid_iso)[:,0] , array(grid_TZ)]\r\n elif (grid_v=='TEF'):\r\n grid_iso=isochronesTEF[age_values.index(grid_age)][z_values.index(grid_z)]\r\n return [array(grid_iso)[:,0] , array(grid_iso)[:,1]]\r\n#-------------------------------------------------------\r\n#-------------------END CALCULATIONS--------------------\r\n#-------------------------------------------------------\r\n\r\n\r\n\r\n\r\n\r\n#-------------------------------------------------------\r\n#---------------------BINARY CHECK----------------------\r\n#------------------------------------------------------- \r\n# DO NOT USE !\r\n# DO NOT USE !\r\n# DO NOT USE !\r\ndef BinaryJob(dist,clrexc,metal,doname,filt,b,expcor):\r\n # expcor should be either 0 or 1\r\n if (expcor==0):\r\n pass\r\n else:\r\n expcor=1\r\n\r\n # transformation from Z to Fe/H\r\n # metal=metal_transf(0,metal)\r\n # not necessary anymore\r\n\r\n # transformation factor between E(B-V) and the chosen photometric system colour\r\n factor_clrexc=clrexc_multiplier(dist,b,filt,expcor)\r\n\r\n # input stellar data\r\n fdata=loadtxt('clusters/'+str(doname)+'.txt', skiprows=1)\r\n for i in range(len(fdata)):\r\n fdata[i][1]=fdata[i][1]-clrexc*factor_clrexc\r\n\r\n lumin0=[]\r\n tempe0=[]\r\n ident0=[]\r\n for i in range(len(fdata)):\r\n lumintest=( logL( absmag(clrexc,fdata[i][0],dist,filt,b,expcor) , BolCorBV(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt) ) )\r\n tempetest=( logTN( Teff(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt) , lumintest ) )\r\n if (tempetest>-3.0 and tempetest<3.0):\r\n lumin0.append( lumintest )\r\n tempe0.append( tempetest )\r\n ident0.append(i)\r\n \r\n tempeB=[]\r\n luminB=[]\r\n identB=[]\r\n for i in range(len(tempe0)):\r\n if (lumin0[i]<1.0 and lumin0[i]>-1.5):\r\n tempeB.append(tempe0[i])\r\n luminB.append(lumin0[i])\r\n identB.append(ident0[i])\r\n\r\n if (len(luminB)>5):\r\n binlow=polyfit(luminB,tempeB,3)\r\n tempeZ=[]\r\n for i in range(len(tempeB)):\r\n tempez=0.0\r\n for j in range(len(binlow)):\r\n tempez+=binlow[j]*luminB[i]**(len(binlow)-1-j)\r\n tempeZ.append(tempeB[i]-tempez)\r\n # skew was supposed to be a measure of the binarity, but it does not work\r\n #print(skew(tempeZ))\r\n \r\n saveh=histogram(tempeZ,bins=10)\r\n savea=[]\r\n saveb=[]\r\n for i in range(len(saveh[0])):\r\n savea.append(0.5*(saveh[1][i]+saveh[1][i+1]))\r\n saveb.append(saveh[0][i])\r\n limrng=savea[saveb.index(max(saveb))]-0.04*0.5\r\n\r\n saveid=[]\r\n for i in range(len(tempeZ)):\r\n # if smaller than possibly a binary our an outlier\r\n if (tempeZ[i]>limrng):\r\n saveid.append(identB[i])\r\n for i in range(len(tempe0)):\r\n if (lumin0[i]>=1.0 or lumin0[i]<=-1.5):\r\n saveid.append(ident0[i])\r\n \r\n return [saveid,0.0]\r\n else:\r\n return [[],0.0]\r\n#-------------------------------------------------------\r\n#-----------------END BINARY CHECK----------------------\r\n#------------------------------------------------------- \r\n\r\n\r\n\r\n\r\n\r\n#-------------------------------------------------------\r\n#---------------------ISOCHR CHECK----------------------\r\n#------------------------------------------------------- \r\ndef DoJob(dist,clrexc,metal,doname,filt,b,expcor,bincor,binlist):\r\n # isochrone information from the main body\r\n global isochronesLTN\r\n global isochronesCMD\r\n global isochronesZMS\r\n global isochronesTEF\r\n global age_values\r\n global z_values\r\n\r\n # expcor and bincor should be either 0 or 1 (1 only when starting from ext.maps)\r\n if (expcor==0):\r\n pass\r\n else:\r\n expcor=1\r\n if (bincor==0):\r\n pass\r\n else:\r\n bincor=1\r\n\r\n # transformation from Z to Fe/H, based on Pöhnl & Paunzen (2010)\r\n # metal=metal_transf(0,metal)\r\n\r\n # transformation factor between E(B-V) and the chosen photometric system colour\r\n factor_clrexc=clrexc_multiplier(dist,b,filt,expcor)\r\n\r\n # input stellar data and transform from CMD space to logL-TN space\r\n # if bincor=1 then the list of indices will be used for fdata instead of the whole list\r\n # only lumin0, tempe0 and count0 are necessary for the procedure, the rest is for debugging\r\n if (bincor==0):\r\n fdata=loadtxt('clusters/'+str(doname)+'.txt', skiprows=1)\r\n BpRp=[]\r\n for i in range(len(fdata)): \r\n BpRp.append(0.0+fdata[i][1])\r\n fdata[i][1]=fdata[i][1]-clrexc*factor_clrexc\r\n lumin0=[]\r\n tempe0=[]\r\n count0=[]\r\n Gmag0=[]\r\n Gmag1=[]\r\n Gmag2=[]\r\n BpRp0=[]\r\n BpRp1=[]\r\n AuxBC=[]\r\n AuxTT=[]\r\n AuxTZ=[]\r\n for i in range(len(fdata)):\r\n lumintest=( logL( absmag(clrexc,fdata[i][0],dist,filt,b,expcor) , BolCorBV(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt) ) )\r\n tempetest=( logTN( Teff(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt) , lumintest ) )\r\n if (tempetest>-3.0 and tempetest<3.0 and lumintest>-5.0 and lumintest<5.0):\r\n lumin0.append( lumintest )\r\n tempe0.append( tempetest )\r\n count0.append( 1.0 )\r\n Gmag0.append(absmag(clrexc,fdata[i][0],dist,filt,b,expcor))\r\n Gmag1.append(fdata[i][0])\r\n Gmag2.append(absmag(clrexc,fdata[i][0],dist,filt,b,expcor)+5*log10(dist)-5.0)\r\n BpRp0.append(fdata[i][1])\r\n BpRp1.append(BpRp[i])\r\n AuxBC.append(BolCorBV(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt))\r\n AuxTT.append(Teff(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt))\r\n AuxTZ.append(logTZ( Teff(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt) , lumintest ))\r\n else:\r\n fdata=loadtxt(str(doname)+'.txt', skiprows=1)\r\n BpRp=[]\r\n for i in binlist: \r\n BpRp.append(0.0+fdata[i][1])\r\n fdata[i][1]=fdata[i][1]-clrexc*factor_clrexc\r\n lumin0=[]\r\n tempe0=[]\r\n count0=[]\r\n Gmag0=[]\r\n Gmag1=[]\r\n Gmag2=[]\r\n BpRp0=[]\r\n BpRp1=[]\r\n AuxBC=[]\r\n AuxTT=[]\r\n AuxTZ=[]\r\n for i in binlist:\r\n lumintest=( logL( absmag(clrexc,fdata[i][0],dist,filt,b,expcor) , BolCorBV(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt) ) )\r\n tempetest=( logTN( Teff(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt) , lumintest ) )\r\n if (tempetest>-3.0 and tempetest<3.0 and lumintest>-5.0 and lumintest<5.0):\r\n lumin0.append( lumintest )\r\n tempe0.append( tempetest )\r\n count0.append( 1.0 )\r\n Gmag0.append(absmag(clrexc,fdata[i][0],dist,filt,b,expcor))\r\n Gmag1.append(fdata[i][0])\r\n Gmag2.append(absmag(clrexc,fdata[i][0],dist,filt,b,expcor)+5*log10(dist)-5.0)\r\n BpRp0.append(fdata[i][1])\r\n BpRp1.append(BpRp[i])\r\n AuxBC.append(BolCorBV(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt))\r\n AuxTT.append(Teff(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt))\r\n AuxTZ.append(logTZ( Teff(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt) , lumintest ))\r\n\r\n # finding the best fit in isochrone grid\r\n fitI=-1\r\n fitJ=-1\r\n fitX=1.0e16\r\n for i in range(len(age_values)):\r\n for j in range(len(z_values)):\r\n tempe1=[]\r\n # apply TN systematic corrections, fitting section\r\n for l in range(len(tempe0)):\r\n tempe1.append(tempe0[l]+funfind(lumin0[l],temp_corr_grid[i][j][0],temp_corr_grid[i][j][1]))\r\n fitvalues=LstSqr(lumin0,tempe1,count0,isochronesLTN[i][j][0],isochronesLTN[i][j][1])\r\n if (fitvalues<fitX):\r\n fitX=fitvalues\r\n fitI=0+i\r\n fitJ=0+j\r\n\r\n # apply TN systematic corrections, results section\r\n AuxSY=[]\r\n for l in range(len(tempe0)):\r\n AuxSY.append(funfind(lumin0[l],temp_corr_grid[fitI][fitJ][0],temp_corr_grid[fitI][fitJ][1]))\r\n tempe0[l]+=funfind(lumin0[l],temp_corr_grid[fitI][fitJ][0],temp_corr_grid[fitI][fitJ][1])\r\n\r\n return([age_values[fitI],z_values[fitJ],fitX,[tempe0,lumin0,isochronesLTN[fitI][fitJ][1],isochronesLTN[fitI][fitJ][0],AuxSY,isochronesZMS[fitI][fitJ][1],isochronesTEF[fitI][fitJ][1]],[BpRp0,Gmag0,isochronesCMD[fitI][fitJ][1],isochronesCMD[fitI][fitJ][0],BpRp1,Gmag1,Gmag2,AuxBC,AuxTT,AuxTZ]])\r\n#-------------------------------------------------------\r\n#-----------------END ISOCHR CHECK----------------------\r\n#------------------------------------------------------- \r\n\r\n\r\n\r\n\r\n#<<<<---------------------- ------------------- ---------------------->>>>\r\n#<<<<---------------------- ------------------- ---------------------->>>>\r\n#<<<<---------------------- PROGRAM STARTS HERE ---------------------->>>>\r\n#<<<<---------------------- ------------------- ---------------------->>>>\r\n#<<<<---------------------- ------------------- ---------------------->>>>\r\n\r\n# only for testing\r\ndebugTest=False\r\n\r\n\r\n# initialisation, loads photometric system, grid spacing and iteration numbers\r\nphotosystem,age_step,z_step,Nredd,Niter,redAdj=initialise()\r\n\r\n\r\nprint('Loading list of clusters ...')\r\ntry:\r\n pathfile=os.getcwd()\r\n os.chdir(pathfile)\r\n with open(\"clusters/_complete.txt\",\"r\") as f_data:\r\n all_data=[x.split() for x in f_data.readlines()]\r\n dataini=array([list(map(str,x)) for x in all_data[1:]])\r\nexcept FileNotFoundError:\r\n pathfile=os.path.dirname(__file__)\r\n os.chdir(pathfile)\r\n with open(\"clusters/_complete.txt\",\"r\") as f_data:\r\n all_data=[x.split() for x in f_data.readlines()]\r\n dataini=array([list(map(str,x)) for x in all_data[1:]])\r\nclust_list=[]\r\nini_D_list=[]\r\nini_E_list=[]\r\nexp_A_list=[]\r\nexp_Z_list=[]\r\npar_b_list=[]\r\nfor i in range(len(dataini)):\r\n clust_list.append(dataini[i][0])\r\n ini_D_list.append(float(dataini[i][3]))\r\n ini_E_list.append(float(dataini[i][4]))\r\n par_b_list.append(float(dataini[i][1]))\r\n exp_A_list.append(-999)\r\n exp_Z_list.append(-999)\r\n\r\nprint('Preparing isochrone grid ...')\r\n\r\n# load ZAMS for the calculations\r\nwith open(\"ZAMS_014.txt\",\"r\") as f_data:\r\n all_data=[x.split() for x in f_data.readlines()]\r\n ZAMS=array([list(map(float,x)) for x in all_data[0:]])\r\n\r\n# preparing isochrone grid\r\nwith open(\"isochrones\"+photosystem+\".txt\",\"r\") as f_data:\r\n all_data=[x.split() for x in f_data.readlines()]\r\n isochrones_complete=array([list(map(str,x)) for x in all_data[13:]])\r\nf_data=[]\r\nall_data=[]\r\n\r\n# first, create a list of ages and metallicities available\r\nage_values=[6.6]\r\n# age_step=0.2\r\nage_last=10.0\r\nwhile (age_values[-1]!=age_last):\r\n age_values.append(round(age_values[-1]+age_step , 1))\r\nz_values=[0.005]\r\n# z_step=0.005\r\nz_last=0.040\r\nwhile (z_values[-1]!=z_last):\r\n z_values.append(round(z_values[-1]+z_step , 3))\r\n\r\n# create the grid, using age for rows and metallicity for columns\r\nisochronesLTN=[]\r\nisochronesCMD=[]\r\nisochronesZMS=[]\r\nisochronesTEF=[]\r\nfor i in range(len(age_values)):\r\n isohelp1=[]\r\n isohelp2=[]\r\n isohelp3=[]\r\n isohelp4=[]\r\n for j in range(len(z_values)):\r\n isohelp1.append([])\r\n isohelp2.append([])\r\n isohelp3.append([])\r\n isohelp4.append([])\r\n isochronesLTN.append(isohelp1)\r\n isochronesCMD.append(isohelp2)\r\n isochronesZMS.append(isohelp3)\r\n isochronesTEF.append(isohelp4)\r\n \r\n# fill in the grid from the loaded data (isochrones_complete)\r\nfor i in range(len(isochrones_complete)):\r\n if (age_values.count(round(float(isochrones_complete[i][2]),1))==1 and z_values.count(round(float(isochrones_complete[i][0]),3))==1):\r\n age_idx=age_values.index(round(float(isochrones_complete[i][2]),1))\r\n z_idx=z_values.index(round(float(isochrones_complete[i][0]),3))\r\n isochronesLTN[age_idx][z_idx].append([float(isochrones_complete[i][6]) , float(isochrones_complete[i][7])])\r\n isochronesZMS[age_idx][z_idx].append([float(isochrones_complete[i][6]) , float(isochrones_complete[i][7])])\r\n isochronesTEF[age_idx][z_idx].append([float(isochrones_complete[i][6]) , float(isochrones_complete[i][7])])\r\n if (photosystem=='G'):\r\n isochronesCMD[age_idx][z_idx].append([float(isochrones_complete[i][28]) , (float(isochrones_complete[i][29])-float(isochrones_complete[i][30]))])\r\n elif (photosystem=='2'):\r\n isochronesCMD[age_idx][z_idx].append([float(isochrones_complete[i][28]) , (float(isochrones_complete[i][28])-float(isochrones_complete[i][30]))])\r\n elif (photosystem=='J'):\r\n isochronesCMD[age_idx][z_idx].append([float(isochrones_complete[i][30]) , (float(isochrones_complete[i][29])-float(isochrones_complete[i][30]))])\r\nisochrones_complete=[]\r\n\r\n# transform isochrones to the normalised grid\r\nfor main_i in range(len(age_values)):\r\n for main_j in range(len(z_values)):\r\n isochronesLTN[main_i][main_j]=isochrone_grid(age_values[main_i],z_values[main_j],'LTN')\r\n isochronesCMD[main_i][main_j]=isochrone_grid(age_values[main_i],z_values[main_j],'CMD')\r\n isochronesZMS[main_i][main_j]=isochrone_grid(age_values[main_i],z_values[main_j],'ZMS')\r\n isochronesTEF[main_i][main_j]=isochrone_grid(age_values[main_i],z_values[main_j],'TEF')\r\n\r\n# prepare grid for corrected systematics for temperature calibration\r\ntemp_corr_grid=[]\r\nfor main_i in range(len(age_values)):\r\n temp_corr_help=[]\r\n for main_j in range(len(z_values)):\r\n temp_corr_help.append(sys_temp(age_values[main_i],z_values[main_j],photosystem))\r\n temp_corr_grid.append(temp_corr_help)\r\n\r\nprint('Starting main procedure ...\\n')\r\nstart_time = time.time()\r\nfor cc in range(len(clust_list)):\r\n try:\r\n start_time_1 = time.time()\r\n # starting values (colour excess is E(B-V), this is recalculated to whatever system\r\n # using the factor_clrexc)\r\n iniD=ini_D_list[cc]\r\n iniE=ini_E_list[cc]\r\n iniZ=0.014\r\n clustname=clust_list[cc]+'_'+photosystem\r\n\r\n # numbX = number of iterations of given quantity\r\n # rangX = range of itereations centred around the ini value\r\n # valX = current value in iteration\r\n numbD=1\r\n rangD=0.8*iniD\r\n if (iniE<0.0 or Nredd==0):\r\n numbE=10\r\n rangE=[0.010,0.040,0.080,0.125,0.250,0.500,0.750,1.000,1.500,2.000]\r\n elif (Nredd==1):\r\n numbE=0+Nredd\r\n rangE=[iniE]\r\n else:\r\n numbE=0+Nredd\r\n rangE=[]\r\n for main_i in range(numbE):\r\n rangE.append(iniE-redAdj*iniE+2*redAdj*iniE*main_i/float(numbE-1))\r\n\r\n # start calculating fits for the grid of parameters, assume no binaries\r\n valD=iniD-0.0*rangD\r\n result_grid=[]\r\n for main_i in range(numbD):\r\n for main_j in range(numbE):\r\n valE=rangE[main_j]\r\n final_z=0.000+iniZ\r\n res_ini=0.000\r\n check_limit=0\r\n while ((round(res_ini,3)!=round(final_z,3)) and check_limit<Niter):\r\n res_ini=0.000+final_z\r\n final_age,final_z,final_fit,final_data,final_data2 = DoJob(valD,valE,res_ini,clustname,photosystem,par_b_list[cc],0,0,[])\r\n check_limit+=1\r\n result_grid.append([valD,valE,final_age,final_z,final_fit,final_data,final_data2,check_limit])\r\n print(check_limit)\r\n valD+=rangD/float(numbD-1+1)\r\n\r\n # results for all reddening values are sorted are written in a logfile\r\n print(clust_list[cc])\r\n result_grid=array(result_grid)\r\n sorted_result_grid=result_grid[result_grid[:,4].argsort()]\r\n print('\\n')\r\n fsave = open('finished/_logfile.txt', \"a+\")\r\n fsave.write('Cluster: %s \\n' % (clust_list[cc]))\r\n fsave.write('Inputs: %s ; %f ; %f ; %d ; %f ; %d \\n' % (photosystem,age_step,z_step,Nredd,redAdj,Niter))\r\n fsave.write('------------------------------------------------------------------------ \\n')\r\n for main_i in range(len(sorted_result_grid)):\r\n fsave.write('Parameters: %d %.3f %.1f %.3f ..... fit:%.7f iter:%d \\n' % (int(sorted_result_grid[main_i][0]),sorted_result_grid[main_i][1],sorted_result_grid[main_i][2],sorted_result_grid[main_i][3],sorted_result_grid[main_i][4],sorted_result_grid[main_i][7]))\r\n fsave.write('# FINISHED (%.3f min) \\n\\n' % ((time.time() - start_time_1)/60.0))\r\n fsave.close()\r\n \r\n # the three best results are plotted in CMD and LTN diagrams\r\n # if debug mode is on, additional data for individual points are returned for the three fits\r\n fitcurves=['r-','g--','b:']\r\n fitpoints=['ko','ko','ko']\r\n plt.figure(figsize=(12,6))\r\n\r\n for main_i in range(len(fitcurves)):\r\n try:\r\n plt.subplot(2,3,main_i+1)\r\n plt.plot(sorted_result_grid[main_i][5][0],sorted_result_grid[main_i][5][1],fitpoints[main_i],ms=4.0,alpha=0.2)\r\n plt.plot(sorted_result_grid[main_i][5][2],sorted_result_grid[main_i][5][3],fitcurves[main_i],alpha=0.6)\r\n if (main_i==0):\r\n plt.xlabel('TN')\r\n plt.ylabel('log L')\r\n else:\r\n plt.xlabel('TN')\r\n #plt.xlim(-0.8,0.2)\r\n #plt.ylim(-2.5,4.0)\r\n plt.xlim( min(sorted_result_grid[main_i][5][0]) - 0.25*(max(sorted_result_grid[main_i][5][0])-min(sorted_result_grid[main_i][5][0])) , max(sorted_result_grid[main_i][5][0]) + 0.25*(max(sorted_result_grid[main_i][5][0])-min(sorted_result_grid[main_i][5][0])) )\r\n plt.ylim( min(sorted_result_grid[main_i][5][1]) - 0.05*(max(sorted_result_grid[main_i][5][1])-min(sorted_result_grid[main_i][5][1])) , max(sorted_result_grid[main_i][5][1]) + 0.05*(max(sorted_result_grid[main_i][5][1])-min(sorted_result_grid[main_i][5][1])) )\r\n plt.title('%d ; %.3f ; %.1f ; %.3f ... %.6f' % (int(sorted_result_grid[main_i][0]),sorted_result_grid[main_i][1],sorted_result_grid[main_i][2],sorted_result_grid[main_i][3],sorted_result_grid[main_i][4]))\r\n plt.locator_params(axis='x',nbins=7)\r\n\r\n if (debugTest):\r\n fsave = open('finished/'+str(clustname)+'_aux_R'+str(main_i+1)+'_isoch.txt', \"w+\")\r\n fsave.write('Cluster: %s \\n' % (clustname))\r\n fsave.write('Parameters: %d ; %.3f ; %.1f ; %.3f ... %.6f \\n' % (int(sorted_result_grid[main_i][0]),sorted_result_grid[main_i][1],sorted_result_grid[main_i][2],sorted_result_grid[main_i][3],sorted_result_grid[main_i][4]))\r\n fsave.write('Inputs: %s ; %f ; %f ; %d ; %f ; %d \\n' % (photosystem,age_step,z_step,Nredd,redAdj,Niter))\r\n fsave.write('---------------------------------------------------------------- \\n')\r\n fsave.write('%7s %7s %7s %7s \\n' % ('T_eff','T_ZAMS','T_N','logL'))\r\n for main_j in range(len(sorted_result_grid[main_i][5][2])):\r\n fsave.write('%7.4f %7.4f %7.4f %7.3f \\n' % (sorted_result_grid[main_i][5][6][main_j],sorted_result_grid[main_i][5][5][main_j],sorted_result_grid[main_i][5][2][main_j],sorted_result_grid[main_i][5][3][main_j]))\r\n fsave.close()\r\n except IndexError:\r\n if ((Nredd==1 and main_i>0)==False):\r\n print('err')\r\n\r\n try:\r\n plt.subplot(2,3,main_i+4)\r\n plt.plot(sorted_result_grid[main_i][6][0],sorted_result_grid[main_i][6][1],fitpoints[main_i],ms=4.0,alpha=0.2)\r\n plt.plot(sorted_result_grid[main_i][6][2],sorted_result_grid[main_i][6][3],fitcurves[main_i],alpha=0.6)\r\n if (main_i==0):\r\n if (photosystem=='G'):\r\n plt.xlabel('(BP-RP)_0 [mag]')\r\n plt.ylabel('M_G [mag]')\r\n elif (photosystem=='2'):\r\n plt.xlabel('(J-Ks)_0 [mag]')\r\n plt.ylabel('M_J [mag]')\r\n elif (photosystem=='J'):\r\n plt.xlabel('(B-V)_0 [mag]')\r\n plt.ylabel('M_V [mag]')\r\n else:\r\n if (photosystem=='G'):\r\n plt.xlabel('(BP-RP)_0 [mag]')\r\n elif (photosystem=='2'):\r\n plt.xlabel('(J-Ks)_0 [mag]')\r\n elif (photosystem=='J'):\r\n plt.xlabel('(B-V)_0 [mag]')\r\n # plt.xlim(-0.8,4.0)\r\n # plt.ylim(15,-5)\r\n plt.xlim( min(sorted_result_grid[main_i][6][0]) - 0.10*(max(sorted_result_grid[main_i][6][0])-min(sorted_result_grid[main_i][6][0])) , max(sorted_result_grid[main_i][6][0]) + 0.10*(max(sorted_result_grid[main_i][6][0])-min(sorted_result_grid[main_i][6][0])) )\r\n plt.ylim( max(sorted_result_grid[main_i][6][1]) + 0.10*(max(sorted_result_grid[main_i][6][1])-min(sorted_result_grid[main_i][6][1])) , min(sorted_result_grid[main_i][6][1]) - 0.10*(max(sorted_result_grid[main_i][6][1])-min(sorted_result_grid[main_i][6][1])) )\r\n #plt.gca().invert_yaxis()\r\n #plt.title('%d ; %.3f ; %.1f ; %.3f ... %.6f' % (int(sorted_result_grid[main_i][0]),sorted_result_grid[main_i][1],sorted_result_grid[main_i][2],sorted_result_grid[main_i][3],sorted_result_grid[main_i][4]))\r\n\r\n if (debugTest):\r\n fsave = open('finished/'+str(clustname)+'_aux_R'+str(main_i+1)+'_clust.txt', \"w+\")\r\n fsave.write('Cluster: %s \\n' % (clustname))\r\n fsave.write('Parameters: %d ; %.3f ; %.1f ; %.3f ... %.6f \\n' % (int(sorted_result_grid[main_i][0]),sorted_result_grid[main_i][1],sorted_result_grid[main_i][2],sorted_result_grid[main_i][3],sorted_result_grid[main_i][4]))\r\n fsave.write('Inputs: %s ; %f ; %f ; %d ; %f ; %d \\n' % (photosystem,age_step,z_step,Nredd,redAdj,Niter))\r\n fsave.write('---------------------------------------------------------------- \\n')\r\n fsave.write('%6s %6s %6s %6s %6s %6s %7s %7s %7s %7s %7s \\n' % ('Color','Color0','Mag','Mag0','AbsMag','BC_Mag','T_eff','T_ZAMS','T_corr','T_N','logL'))\r\n for main_j in range(len(sorted_result_grid[main_i][5][0])):\r\n fsave.write('%6.3f %6.3f %6.3f %6.3f %6.3f %6.3f %7.4f %7.4f %7.4f %7.4f %7.3f \\n' % (sorted_result_grid[main_i][6][4][main_j],sorted_result_grid[main_i][6][0][main_j],sorted_result_grid[main_i][6][5][main_j],sorted_result_grid[main_i][6][6][main_j],sorted_result_grid[main_i][6][1][main_j],sorted_result_grid[main_i][6][7][main_j],log10(sorted_result_grid[main_i][6][8][main_j]),sorted_result_grid[main_i][6][9][main_j],sorted_result_grid[main_i][5][4][main_j],sorted_result_grid[main_i][5][0][main_j],sorted_result_grid[main_i][5][1][main_j]))\r\n fsave.close()\r\n except IndexError:\r\n if ((Nredd==1 and main_i>0)==False):\r\n print('err')\r\n\r\n plt.tight_layout()\r\n plt.savefig('finished/'+str(clustname)+'.png',dpi=300,bbox_inches=\"tight\")\r\n plt.close()\r\n\r\n except OSError:\r\n # exception can be encountered if the list of clusters does not match the provided data files\r\n # names of the clusters should coincide with the names of the data files (up to the photo.system designation)\r\n print('no file: %s' % (clustname))\r\n\r\nisochrones=[]\r\nprint('\\n')\r\nprint('Finished!')\r\nprint(\"Runtime: %s min\" % ((time.time() - start_time)/60.0))"
] |
[
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.locator_params",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.figure"
]
] |
haoyu0831/Covid-Mobility-Network-Analysis
|
[
"8464b0a25db03585219c1fc6d8e257a9ed826628"
] |
[
"model.py"
] |
[
"import networkx as nx\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n'''\nThis function is to generate a graph with data produced by read_file.py\n'''\n\n\ndef generate_d_network(dest_cbgs):\n G = nx.DiGraph()\n # add edges\n for i in dest_cbgs:\n G.add_edge(*i, weight=dest_cbgs[i])\n\n return G\n\n\n'''\nThis function has almost same function as above but generate a undirected Graph\n'''\n\n\ndef generate_network(dest_cbgs):\n G = nx.Graph()\n # add edges\n for i, j in dest_cbgs.keys():\n if (i, j) not in G.edges:\n G.add_edge(i, j, weight=dest_cbgs[i, j])\n else:\n weight = dest_cbgs[i, j] + G.edges[i, j]['weight']\n G.add_edge(i, j, weight=weight)\n\n return G\n\n\n'''\nthis function is to generate percolation step of undirected network with threshold\n'''\n\n\ndef generate_network_threshold(g, threshold=0):\n new_g = nx.Graph()\n new_g.add_nodes_from(g.nodes)\n\n edge_list = list(g.edges)\n for i, j in edge_list:\n weight = g.edges[i, j]['weight']\n if weight >= threshold:\n new_g.add_edge(i, j, weight=weight)\n\n return new_g\n\n\n'''\nthis function is to generate percolation step of directed network with threshold\n'''\n\n\ndef generate_d_network_threshold(g, threshold=0):\n new_g = nx.Graph()\n\n edge_list = list(g.edges)\n for i, j in edge_list:\n if g.edges[i, j]['weight'] >= threshold:\n new_g.add_edge(i, j)\n\n return new_g\n\n\n'''\nThis function is find max weight of a graph\n'''\n\n\ndef max_weight(g):\n m_weight = 0\n for i in g.edges:\n weight = g.edges[i[0], i[1]]['weight']\n if weight > m_weight:\n m_weight = weight\n\n return m_weight\n\n\n'''\nThis function is to return the number of elements in the largest and second largest SCC\n'''\n\n\ndef num_g_sg(scc):\n\n len_scc = list(map(len, scc))\n len_scc.sort()\n len_scc.reverse()\n\n if len(len_scc) == 0:\n return 0, 0\n elif len(len_scc) == 1:\n return len_scc[0], 0\n else:\n return len_scc[0], len_scc[1]\n\n\n'''\nThis function finds the largest and second largest before the largest value\n'''\n\n\ndef l_sl_value(li):\n if len(li) == 0:\n return 0,0\n l = [i for i, j in enumerate(li) if j == max(li)][0]\n sublist = li[:l]\n if l == 0:\n sl = 0\n else:\n sl = [i for i, j in enumerate(sublist) if j == max(sublist)][0]\n\n return l, sl\n\n\n'''\nThis function is to calculate the number of elements in largest and second largest SCC changing with thresholds\n'''\n\n\ndef calc_g_sg(g, start, interval, d1=None, final=100):\n node_size = len(g.nodes())\n tmp_g = node_size\n\n tmp_t = start\n thresholds = []\n num_g = []\n num_sg = []\n dev_g = []\n dev_sg = []\n num_rest = []\n lens=[]\n\n while tmp_g > node_size/final and tmp_g != 1:\n\n tmp_n = generate_network_threshold(g, tmp_t)\n lens.append(len(tmp_n.edges))\n scc = sorted(list(nx.connected_components(tmp_n)), key=len, reverse=True)\n tmp_g, tmp_sg = num_g_sg(scc)\n num_g.append(tmp_g)\n num_sg.append(tmp_sg)\n if len(scc) < 2:\n num_rest.append(0)\n else:\n num_rest.append(sum(map(len, scc[1:]))/(len(scc)-1))\n\n thresholds.append(tmp_t)\n if final != 100 and tmp_t > 20:\n break\n\n if interval > 1 and tmp_t < 100:\n tmp_t += 1\n else:\n tmp_t += interval\n\n if d1 is None:\n continue\n if len(scc) != 0:\n dev_g.append(sum_device(scc[0], d1))\n if len(scc) == 1:\n dev_sg.append(0)\n else:\n dev_sg.append(sum_device(scc[1], d1))\n else:\n dev_sg.append(0)\n dev_g.append(0)\n\n return np.array(thresholds), np.array(num_g), np.array(num_sg), np.array(num_rest), np.array(dev_g), np.array(dev_sg), np.array(lens)\n\n\n'''\nThis function calculate the sum of device in GC and SGC\n'''\n\n\ndef sum_device(nodes, d1):\n s = 0\n for i in nodes:\n if i in d1.keys():\n s += d1[i]\n\n return s\n\n\n'''\nThis function is to find the bottleneck by analyzing the threshold around when the second SCC is the largest\n'''\n\n\ndef calc_bottleneck(g, thresholds, num_sg):\n max_index = [i for i, j in enumerate(num_sg) if j == max(num_sg)][0]\n bn_weight_b = thresholds[max_index]\n interval = thresholds[1] - thresholds[0]\n bn = []\n\n G_sg_largest = generate_network_threshold(g, bn_weight_b)\n\n if type(G_sg_largest) == nx.classes.digraph.DiGraph:\n scc = list(nx.strongly_connected_components(G_sg_largest))\n else:\n scc = list(nx.connected_components(G_sg_largest))\n\n scc.sort(key=len)\n scc_sg_largest = scc[-1]\n scc_sg_s_largest = scc[-2]\n\n for i, j in g.edges():\n if bn_weight_b - interval < g.edges[(i, j)]['weight'] <= bn_weight_b:\n if (i in scc_sg_largest and j in scc_sg_s_largest) or (j in scc_sg_largest and i in scc_sg_s_largest):\n bn.append((i, j))\n\n return bn, bn_weight_b\n\n\n'''\nThis function is to find the bottleneck by analyzing the threshold around when the second SCC is the largest\n'''\n\n\ndef calc_bottleneck_c(g, thresholds, qc):\n interval = thresholds[1] - thresholds[0]\n bn = set()\n\n G_sg_largest = generate_network_threshold(g, qc)\n\n if type(G_sg_largest) == nx.classes.digraph.DiGraph:\n scc = list(nx.strongly_connected_components(G_sg_largest))\n else:\n scc = list(nx.connected_components(G_sg_largest))\n\n scc.sort(key=len)\n scc_sg_largest = scc[-1]\n if len(scc) == 1:\n return set()\n scc_sg_s_largest = scc[-2]\n\n for i, j in g.edges():\n if qc - interval < g.edges[(i, j)]['weight'] <= qc:\n if (i in scc_sg_largest and j in scc_sg_s_largest) or (j in scc_sg_largest and i in scc_sg_s_largest):\n bn.add((i, j))\n\n return bn\n\n\n'''\nThis function calculates the total flux of a graph\n'''\n\n\ndef total_flux(g):\n flux = 0\n for i in g.edges():\n flux += g.edges[i]['weight']\n\n return flux\n\n\n'''\nThis function returns latitude and longitude of a point\n'''\n\n\ndef get_xy(pt):\n return [pt.x, pt.y]\n\n\n# file = 'data/01/01/2020-01-01-social-distancing.csv.gz'\n# G = generate_network(*read_file(file, 25), 10)\n# print(num_g_sg(G))\n\n# def bottlenecks(self):\n# g_perco_b = generate_network_threshold(self.g, self.qc - .25)\n# s_cc = sorted(list(nx.connected_components(self.g_perco)), key=len, reverse=True)[1]\n# l_cc = sorted(list(nx.connected_components(g_perco_b)), key=len, reverse=True)[0]\n# l_cc = l_cc.difference(s_cc)\n#\n# bc = set()\n#\n# for i, j in g_perco_b.edges():\n# if self.qc - .25 <= g_perco_b.edges[i, j]['weight'] < self.qc:\n# if (i in s_cc and j in l_cc) or (i in l_cc and j in s_cc):\n# bc.add((i, j))\n#\n# return bc\n\ndef calc_bn_set_diff(g_b, g):\n bn = set()\n g_b_1 = g_b.subgraph(sorted(list(nx.connected_components(g_b)), key=len, reverse=True)[0])\n g_b_link = set(g_b_1.edges())\n tmp = sorted(list(nx.connected_components(g)), key=len, reverse=True)\n g_1, g_2 = tmp[0], tmp[1]\n tmp_0 = set()\n tmp_1 = set()\n\n for i, j in g_b_link.difference(set(g.edges())):\n if (i in g_1 and j in g_1) or (i in g_2 and j in g_2):\n continue\n\n if g_b_1.degree(i) == 1 or g_b_1.degree(j) == 1:\n continue\n\n if (i in g_2 and j in g_1) or (i in g_2 and j in g_1):\n bn.add((i,j))\n continue\n\n if (i in g_1) or (j in g_1):\n tmp_0.add((i,j))\n\n if (i in g_2) or (j in g_2):\n tmp_1.add((i,j))\n\n for i,j in tmp_0:\n for k in tmp_1:\n if i in k or j in k:\n bn.add((i,j))\n bn.add(k)\n\n return bn\n\n\ndef select(dic, num):\n tmp = set()\n for i in dic.keys():\n if dic[i] > num:\n tmp.add(str(i))\n\n return tmp\n"
] |
[
[
"numpy.array"
]
] |
cperreault11/scikit-learn
|
[
"0b78cb00e69109f498c326ad84953954e349d11f"
] |
[
"sklearn/dummy.py"
] |
[
"# Author: Mathieu Blondel <[email protected]>\n# Arnaud Joly <[email protected]>\n# Maheshakya Wijewardena <[email protected]>\n# License: BSD 3 clause\n\nimport warnings\nimport numpy as np\nimport scipy.sparse as sp\n\nfrom .base import BaseEstimator, ClassifierMixin, RegressorMixin\nfrom .base import MultiOutputMixin\nfrom .utils import check_random_state\nfrom .utils import deprecated\nfrom .utils.validation import _num_samples\nfrom .utils.validation import check_array\nfrom .utils.validation import check_consistent_length\nfrom .utils.validation import check_is_fitted, _check_sample_weight\nfrom .utils.random import _random_choice_csc\nfrom .utils.stats import _weighted_percentile\nfrom .utils.multiclass import class_distribution\n\n\nclass DummyClassifier(MultiOutputMixin, ClassifierMixin, BaseEstimator):\n \"\"\"DummyClassifier makes predictions that ignore the input features.\n\n This classifier serves as a simple baseline to compare against other more\n complex classifiers.\n\n The specific behavior of the baseline is selected with the `strategy`\n parameter.\n\n All strategies make predictions that ignore the input feature values passed\n as the `X` argument to `fit` and `predict`. The predictions, however,\n typically depend on values observed in the `y` parameter passed to `fit`.\n\n Note that the \"stratified\" and \"uniform\" strategies lead to\n non-deterministic predictions that can be rendered deterministic by setting\n the `random_state` parameter if needed. The other strategies are naturally\n deterministic and, once fit, always return a the same constant prediction\n for any value of `X`.\n\n Read more in the :ref:`User Guide <dummy_estimators>`.\n\n .. versionadded:: 0.13\n\n Parameters\n ----------\n strategy : {\"most_frequent\", \"prior\", \"stratified\", \"uniform\", \\\n \"constant\"}, default=\"prior\"\n Strategy to use to generate predictions.\n\n * \"most_frequent\": the `predict` method always returns the most\n frequent class label in the observed `y` argument passed to `fit`.\n The `predict_proba` method returns the matching one-hot encoded\n vector.\n * \"prior\": the `predict` method always returns the most frequent\n class label in the observed `y` argument passed to `fit` (like\n \"most_frequent\"). ``predict_proba`` always returns the empirical\n class distribution of `y` also known as the empirical class prior\n distribution.\n * \"stratified\": the `predict_proba` method randomly samples one-hot\n vectors from a multinomial distribution parametrized by the empirical\n class prior probabilities.\n The `predict` method returns the class label which got probability\n one in the one-hot vector of `predict_proba`.\n Each sampled row of both methods is therefore independent and\n identically distributed.\n * \"uniform\": generates predictions uniformly at random from the list\n of unique classes observed in `y`, i.e. each class has equal\n probability.\n * \"constant\": always predicts a constant label that is provided by\n the user. This is useful for metrics that evaluate a non-majority\n class.\n\n .. versionchanged:: 0.24\n The default value of `strategy` has changed to \"prior\" in version\n 0.24.\n\n random_state : int, RandomState instance or None, default=None\n Controls the randomness to generate the predictions when\n ``strategy='stratified'`` or ``strategy='uniform'``.\n Pass an int for reproducible output across multiple function calls.\n See :term:`Glossary <random_state>`.\n\n constant : int or str or array-like of shape (n_outputs,), default=None\n The explicit constant as predicted by the \"constant\" strategy. This\n parameter is useful only for the \"constant\" strategy.\n\n Attributes\n ----------\n classes_ : ndarray of shape (n_classes,) or list of such arrays\n Unique class labels observed in `y`. For multi-output classification\n problems, this attribute is a list of arrays as each output has an\n independent set of possible classes.\n\n n_classes_ : int or list of int\n Number of label for each output.\n\n class_prior_ : ndarray of shape (n_classes,) or list of such arrays\n Frequency of each class observed in `y`. For multioutput classification\n problems, this is computed independently for each output.\n\n n_outputs_ : int\n Number of outputs.\n\n n_features_in_ : `None`\n Always set to `None`.\n\n .. versionadded:: 0.24\n .. deprecated:: 1.0\n Will be removed in 1.0\n\n sparse_output_ : bool\n True if the array returned from predict is to be in sparse CSC format.\n Is automatically set to True if the input `y` is passed in sparse\n format.\n\n See Also\n --------\n DummyRegressor : Regressor that makes predictions using simple rules.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.dummy import DummyClassifier\n >>> X = np.array([-1, 1, 1, 1])\n >>> y = np.array([0, 1, 1, 1])\n >>> dummy_clf = DummyClassifier(strategy=\"most_frequent\")\n >>> dummy_clf.fit(X, y)\n DummyClassifier(strategy='most_frequent')\n >>> dummy_clf.predict(X)\n array([1, 1, 1, 1])\n >>> dummy_clf.score(X, y)\n 0.75\n \"\"\"\n\n def __init__(self, *, strategy=\"prior\", random_state=None, constant=None):\n self.strategy = strategy\n self.random_state = random_state\n self.constant = constant\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Fit the baseline classifier.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data.\n\n y : array-like of shape (n_samples,) or (n_samples, n_outputs)\n Target values.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n \"\"\"\n allowed_strategies = (\n \"most_frequent\",\n \"stratified\",\n \"uniform\",\n \"constant\",\n \"prior\",\n )\n\n if self.strategy not in allowed_strategies:\n raise ValueError(\n \"Unknown strategy type: %s, expected one of %s.\"\n % (self.strategy, allowed_strategies)\n )\n\n self._strategy = self.strategy\n\n if self._strategy == \"uniform\" and sp.issparse(y):\n y = y.toarray()\n warnings.warn(\n \"A local copy of the target data has been converted \"\n \"to a numpy array. Predicting on sparse target data \"\n \"with the uniform strategy would not save memory \"\n \"and would be slower.\",\n UserWarning,\n )\n\n self.sparse_output_ = sp.issparse(y)\n\n if not self.sparse_output_:\n y = np.asarray(y)\n y = np.atleast_1d(y)\n\n if y.ndim == 1:\n y = np.reshape(y, (-1, 1))\n\n self.n_outputs_ = y.shape[1]\n\n check_consistent_length(X, y)\n\n if sample_weight is not None:\n sample_weight = _check_sample_weight(sample_weight, X)\n\n if self._strategy == \"constant\":\n if self.constant is None:\n raise ValueError(\n \"Constant target value has to be specified \"\n \"when the constant strategy is used.\"\n )\n else:\n constant = np.reshape(np.atleast_1d(self.constant), (-1, 1))\n if constant.shape[0] != self.n_outputs_:\n raise ValueError(\n \"Constant target value should have shape (%d, 1).\"\n % self.n_outputs_\n )\n\n (self.classes_, self.n_classes_, self.class_prior_) = class_distribution(\n y, sample_weight\n )\n\n if self._strategy == \"constant\":\n for k in range(self.n_outputs_):\n if not any(constant[k][0] == c for c in self.classes_[k]):\n # Checking in case of constant strategy if the constant\n # provided by the user is in y.\n err_msg = (\n \"The constant target value must be present in \"\n \"the training data. You provided constant={}. \"\n \"Possible values are: {}.\".format(\n self.constant, list(self.classes_[k])\n )\n )\n raise ValueError(err_msg)\n\n if self.n_outputs_ == 1:\n self.n_classes_ = self.n_classes_[0]\n self.classes_ = self.classes_[0]\n self.class_prior_ = self.class_prior_[0]\n\n return self\n\n def predict(self, X):\n \"\"\"Perform classification on test vectors X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Test data.\n\n Returns\n -------\n y : array-like of shape (n_samples,) or (n_samples, n_outputs)\n Predicted target values for X.\n \"\"\"\n check_is_fitted(self)\n\n # numpy random_state expects Python int and not long as size argument\n # under Windows\n n_samples = _num_samples(X)\n rs = check_random_state(self.random_state)\n\n n_classes_ = self.n_classes_\n classes_ = self.classes_\n class_prior_ = self.class_prior_\n constant = self.constant\n if self.n_outputs_ == 1:\n # Get same type even for self.n_outputs_ == 1\n n_classes_ = [n_classes_]\n classes_ = [classes_]\n class_prior_ = [class_prior_]\n constant = [constant]\n # Compute probability only once\n if self._strategy == \"stratified\":\n proba = self.predict_proba(X)\n if self.n_outputs_ == 1:\n proba = [proba]\n\n if self.sparse_output_:\n class_prob = None\n if self._strategy in (\"most_frequent\", \"prior\"):\n classes_ = [np.array([cp.argmax()]) for cp in class_prior_]\n\n elif self._strategy == \"stratified\":\n class_prob = class_prior_\n\n elif self._strategy == \"uniform\":\n raise ValueError(\n \"Sparse target prediction is not \"\n \"supported with the uniform strategy\"\n )\n\n elif self._strategy == \"constant\":\n classes_ = [np.array([c]) for c in constant]\n\n y = _random_choice_csc(n_samples, classes_, class_prob, self.random_state)\n else:\n if self._strategy in (\"most_frequent\", \"prior\"):\n y = np.tile(\n [\n classes_[k][class_prior_[k].argmax()]\n for k in range(self.n_outputs_)\n ],\n [n_samples, 1],\n )\n\n elif self._strategy == \"stratified\":\n y = np.vstack(\n [\n classes_[k][proba[k].argmax(axis=1)]\n for k in range(self.n_outputs_)\n ]\n ).T\n\n elif self._strategy == \"uniform\":\n ret = [\n classes_[k][rs.randint(n_classes_[k], size=n_samples)]\n for k in range(self.n_outputs_)\n ]\n y = np.vstack(ret).T\n\n elif self._strategy == \"constant\":\n y = np.tile(self.constant, (n_samples, 1))\n\n if self.n_outputs_ == 1:\n y = np.ravel(y)\n\n return y\n\n def predict_proba(self, X, uncertainty=[]):\n \"\"\"\n Return probability estimates for the test vectors X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Test data.\n\n Returns\n -------\n P : ndarray of shape (n_samples, n_classes) or list of such arrays\n Returns the probability of the sample for each class in\n the model, where classes are ordered arithmetically, for each\n output.\n \"\"\"\n check_is_fitted(self)\n\n # numpy random_state expects Python int and not long as size argument\n # under Windows\n n_samples = _num_samples(X)\n rs = check_random_state(self.random_state)\n\n n_classes_ = self.n_classes_\n classes_ = self.classes_\n class_prior_ = self.class_prior_\n constant = self.constant\n if self.n_outputs_ == 1:\n # Get same type even for self.n_outputs_ == 1\n n_classes_ = [n_classes_]\n classes_ = [classes_]\n class_prior_ = [class_prior_]\n constant = [constant]\n\n P = []\n for k in range(self.n_outputs_):\n if self._strategy == \"most_frequent\":\n ind = class_prior_[k].argmax()\n out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)\n out[:, ind] = 1.0\n elif self._strategy == \"prior\":\n out = np.ones((n_samples, 1)) * class_prior_[k]\n\n elif self._strategy == \"stratified\":\n out = rs.multinomial(1, class_prior_[k], size=n_samples)\n out = out.astype(np.float64)\n\n elif self._strategy == \"uniform\":\n out = np.ones((n_samples, n_classes_[k]), dtype=np.float64)\n out /= n_classes_[k]\n\n elif self._strategy == \"constant\":\n ind = np.where(classes_[k] == constant[k])\n out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)\n out[:, ind] = 1.0\n\n P.append(out)\n\n if self.n_outputs_ == 1:\n P = P[0]\n\n return P\n\n def predict_log_proba(self, X):\n \"\"\"\n Return log probability estimates for the test vectors X.\n\n Parameters\n ----------\n X : {array-like, object with finite length or shape}\n Training data.\n\n Returns\n -------\n P : ndarray of shape (n_samples, n_classes) or list of such arrays\n Returns the log probability of the sample for each class in\n the model, where classes are ordered arithmetically for each\n output.\n \"\"\"\n proba = self.predict_proba(X)\n if self.n_outputs_ == 1:\n return np.log(proba)\n else:\n return [np.log(p) for p in proba]\n\n def _more_tags(self):\n return {\n \"poor_score\": True,\n \"no_validation\": True,\n \"_xfail_checks\": {\n \"check_methods_subset_invariance\": \"fails for the predict method\",\n \"check_methods_sample_order_invariance\": \"fails for the predict method\",\n },\n }\n\n def score(self, X, y, sample_weight=None):\n \"\"\"Return the mean accuracy on the given test data and labels.\n\n In multi-label classification, this is the subset accuracy\n which is a harsh metric since you require for each sample that\n each label set be correctly predicted.\n\n Parameters\n ----------\n X : None or array-like of shape (n_samples, n_features)\n Test samples. Passing None as test samples gives the same result\n as passing real test samples, since DummyClassifier\n operates independently of the sampled observations.\n\n y : array-like of shape (n_samples,) or (n_samples, n_outputs)\n True labels for X.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n Returns\n -------\n score : float\n Mean accuracy of self.predict(X) wrt. y.\n \"\"\"\n if X is None:\n X = np.zeros(shape=(len(y), 1))\n return super().score(X, y, sample_weight)\n\n # TODO: Remove in 1.2\n # mypy error: Decorated property not supported\n @deprecated( # type: ignore\n \"`n_features_in_` is deprecated in 1.0 and will be removed in 1.2.\"\n )\n @property\n def n_features_in_(self):\n check_is_fitted(self)\n return None\n\n\nclass DummyRegressor(MultiOutputMixin, RegressorMixin, BaseEstimator):\n \"\"\"Regressor that makes predictions using simple rules.\n\n This regressor is useful as a simple baseline to compare with other\n (real) regressors. Do not use it for real problems.\n\n Read more in the :ref:`User Guide <dummy_estimators>`.\n\n .. versionadded:: 0.13\n\n Parameters\n ----------\n strategy : {\"mean\", \"median\", \"quantile\", \"constant\"}, default=\"mean\"\n Strategy to use to generate predictions.\n\n * \"mean\": always predicts the mean of the training set\n * \"median\": always predicts the median of the training set\n * \"quantile\": always predicts a specified quantile of the training set,\n provided with the quantile parameter.\n * \"constant\": always predicts a constant value that is provided by\n the user.\n\n constant : int or float or array-like of shape (n_outputs,), default=None\n The explicit constant as predicted by the \"constant\" strategy. This\n parameter is useful only for the \"constant\" strategy.\n\n quantile : float in [0.0, 1.0], default=None\n The quantile to predict using the \"quantile\" strategy. A quantile of\n 0.5 corresponds to the median, while 0.0 to the minimum and 1.0 to the\n maximum.\n\n Attributes\n ----------\n constant_ : ndarray of shape (1, n_outputs)\n Mean or median or quantile of the training targets or constant value\n given by the user.\n\n n_features_in_ : `None`\n Always set to `None`.\n\n .. versionadded:: 0.24\n .. deprecated:: 1.0\n Will be removed in 1.0\n\n n_outputs_ : int\n Number of outputs.\n\n See Also\n --------\n DummyClassifier: Classifier that makes predictions using simple rules.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.dummy import DummyRegressor\n >>> X = np.array([1.0, 2.0, 3.0, 4.0])\n >>> y = np.array([2.0, 3.0, 5.0, 10.0])\n >>> dummy_regr = DummyRegressor(strategy=\"mean\")\n >>> dummy_regr.fit(X, y)\n DummyRegressor()\n >>> dummy_regr.predict(X)\n array([5., 5., 5., 5.])\n >>> dummy_regr.score(X, y)\n 0.0\n \"\"\"\n\n def __init__(self, *, strategy=\"mean\", constant=None, quantile=None):\n self.strategy = strategy\n self.constant = constant\n self.quantile = quantile\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Fit the random regressor.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data.\n\n y : array-like of shape (n_samples,) or (n_samples, n_outputs)\n Target values.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n Returns\n -------\n self : object\n Fitted estimator.\n \"\"\"\n allowed_strategies = (\"mean\", \"median\", \"quantile\", \"constant\")\n if self.strategy not in allowed_strategies:\n raise ValueError(\n \"Unknown strategy type: %s, expected one of %s.\"\n % (self.strategy, allowed_strategies)\n )\n\n y = check_array(y, ensure_2d=False, input_name=\"y\")\n if len(y) == 0:\n raise ValueError(\"y must not be empty.\")\n\n if y.ndim == 1:\n y = np.reshape(y, (-1, 1))\n self.n_outputs_ = y.shape[1]\n\n check_consistent_length(X, y, sample_weight)\n\n if sample_weight is not None:\n sample_weight = _check_sample_weight(sample_weight, X)\n\n if self.strategy == \"mean\":\n self.constant_ = np.average(y, axis=0, weights=sample_weight)\n\n elif self.strategy == \"median\":\n if sample_weight is None:\n self.constant_ = np.median(y, axis=0)\n else:\n self.constant_ = [\n _weighted_percentile(y[:, k], sample_weight, percentile=50.0)\n for k in range(self.n_outputs_)\n ]\n\n elif self.strategy == \"quantile\":\n if self.quantile is None or not np.isscalar(self.quantile):\n raise ValueError(\n \"Quantile must be a scalar in the range [0.0, 1.0], but got %s.\"\n % self.quantile\n )\n\n percentile = self.quantile * 100.0\n if sample_weight is None:\n self.constant_ = np.percentile(y, axis=0, q=percentile)\n else:\n self.constant_ = [\n _weighted_percentile(y[:, k], sample_weight, percentile=percentile)\n for k in range(self.n_outputs_)\n ]\n\n elif self.strategy == \"constant\":\n if self.constant is None:\n raise TypeError(\n \"Constant target value has to be specified \"\n \"when the constant strategy is used.\"\n )\n\n self.constant = check_array(\n self.constant,\n accept_sparse=[\"csr\", \"csc\", \"coo\"],\n ensure_2d=False,\n ensure_min_samples=0,\n )\n\n if self.n_outputs_ != 1 and self.constant.shape[0] != y.shape[1]:\n raise ValueError(\n \"Constant target value should have shape (%d, 1).\" % y.shape[1]\n )\n\n self.constant_ = self.constant\n\n self.constant_ = np.reshape(self.constant_, (1, -1))\n return self\n\n def predict(self, X, return_std=False):\n \"\"\"Perform classification on test vectors X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Test data.\n\n return_std : bool, default=False\n Whether to return the standard deviation of posterior prediction.\n All zeros in this case.\n\n .. versionadded:: 0.20\n\n Returns\n -------\n y : array-like of shape (n_samples,) or (n_samples, n_outputs)\n Predicted target values for X.\n\n y_std : array-like of shape (n_samples,) or (n_samples, n_outputs)\n Standard deviation of predictive distribution of query points.\n \"\"\"\n check_is_fitted(self)\n n_samples = _num_samples(X)\n\n y = np.full(\n (n_samples, self.n_outputs_),\n self.constant_,\n dtype=np.array(self.constant_).dtype,\n )\n y_std = np.zeros((n_samples, self.n_outputs_))\n\n if self.n_outputs_ == 1:\n y = np.ravel(y)\n y_std = np.ravel(y_std)\n\n return (y, y_std) if return_std else y\n\n def _more_tags(self):\n return {\"poor_score\": True, \"no_validation\": True}\n\n def score(self, X, y, sample_weight=None):\n \"\"\"Return the coefficient of determination R^2 of the prediction.\n\n The coefficient R^2 is defined as `(1 - u/v)`, where `u` is the\n residual sum of squares `((y_true - y_pred) ** 2).sum()` and `v` is the\n total sum of squares `((y_true - y_true.mean()) ** 2).sum()`. The best\n possible score is 1.0 and it can be negative (because the model can be\n arbitrarily worse). A constant model that always predicts the expected\n value of y, disregarding the input features, would get a R^2 score of\n 0.0.\n\n Parameters\n ----------\n X : None or array-like of shape (n_samples, n_features)\n Test samples. Passing None as test samples gives the same result\n as passing real test samples, since `DummyRegressor`\n operates independently of the sampled observations.\n\n y : array-like of shape (n_samples,) or (n_samples, n_outputs)\n True values for X.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n Returns\n -------\n score : float\n R^2 of `self.predict(X)` wrt. y.\n \"\"\"\n if X is None:\n X = np.zeros(shape=(len(y), 1))\n return super().score(X, y, sample_weight)\n\n # TODO: Remove in 1.2\n # mypy error: Decorated property not supported\n @deprecated( # type: ignore\n \"`n_features_in_` is deprecated in 1.0 and will be removed in 1.2.\"\n )\n @property\n def n_features_in_(self):\n check_is_fitted(self)\n return None\n"
] |
[
[
"numpy.log",
"scipy.sparse.issparse",
"numpy.reshape",
"numpy.asarray",
"numpy.median",
"numpy.tile",
"numpy.percentile",
"numpy.ones",
"numpy.atleast_1d",
"numpy.isscalar",
"numpy.ravel",
"numpy.array",
"numpy.average",
"numpy.zeros",
"numpy.where",
"numpy.vstack"
]
] |
ceausuveronica/OpenCV-projects
|
[
"fe9a95eead97212de5afeb59b31a8996ce7fb116"
] |
[
"colored-led-counter/app.py"
] |
[
"# import the necessary packages\nimport argparse\nimport numpy as np\nfrom pprint import pprint\n\n\ntry:\n from cv2 import cv2\n\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-i\", \"--image\", required=True,\n help=\"path to the input image\")\n ap.add_argument(\"-c\", \"--color\",\n default=\"red\",\n help=\"red, green, blue, yellow\")\n args = vars(ap.parse_args())\n\n img_path = 'leds/' + args['image']\n img = cv2.imread(img_path) # type: numpy.ndarray\n\n #scale\n max_dimension = max(img.shape)\n scale = 816/max_dimension\n\n # reval, threshold = cv2.threshold(img, 85, 220, cv2.THRESH_BINARY)\n\n img = cv2.resize(img, None, fx=scale, fy=scale)\n\n color = args['color']\n\n if color == 'yellow':\n alpha = 2\n beta = 30\n else:\n alpha = 2\n beta = 30\n\n img =cv2.addWeighted(img, alpha, np.zeros(img.shape, img.dtype), 0, beta)\n\n img = cv2.GaussianBlur(img, (7,7), 0)\n\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n\n lower_green = np.array([55, 100, 50])\n upper_green = np.array([85, 255, 255])\n\n lower_yellow = np.array([30, 20, 50])\n upper_yellow = np.array([60, 255, 255])\n\n lower_blue = np.array([90, 100, 50])\n upper_blue = np.array([150, 255, 255])\n\n lower_red = np.array([0, 50, 50])\n upper_red = np.array([20, 255, 255])\n\n lower_cold_red = np.array([160, 50, 50])\n upper_cold_red = np.array([255, 255, 255])\n\n if color == 'red':\n mask = cv2.inRange(hsv, lower_red, upper_red)\n mask2 = cv2.inRange(hsv, lower_cold_red, upper_cold_red)\n mask = cv2.bitwise_or(mask, mask2)\n\n if color == 'green':\n mask = cv2.inRange(hsv, lower_green, upper_green)\n\n if color == 'blue':\n mask = cv2.inRange(hsv, lower_blue, upper_blue)\n\n if color == 'yellow':\n mask = cv2.inRange(hsv, lower_yellow, upper_yellow)\n\n res = cv2.bitwise_and(img, img, mask=mask)\n\n\n cv2.imshow('img', img)\n cv2.imshow('res', res)\n\n new_res = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)\n new_res = cv2.bitwise_and(new_res, new_res, mask=mask)\n new_res = cv2.cvtColor(new_res, cv2.COLOR_BGR2GRAY)\n reval, new_res = cv2.threshold(new_res, 10, 220, cv2.THRESH_BINARY)\n\n cv2.imshow('new_res', new_res)\n\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15,15))\n mask_closed = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)\n mask_clean = cv2.morphologyEx(mask_closed, cv2.MORPH_OPEN, kernel)\n\n image, contours, hierarchy = cv2.findContours(new_res.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n\n contour_sizes = [(cv2.contourArea(contour), contour) for contour in contours]\n\n led_contours = list(filter(lambda x: x[0] > 20 and x[0] < 300 , contour_sizes))\n\n nr_leds = len(led_contours)\n\n\n print(str(nr_leds) +' LEDs' )\n print('Press escape')\n\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\nexcept ImportError:\n print(\"Ooops..\")"
] |
[
[
"numpy.array",
"numpy.zeros"
]
] |
jtmccr1/variant_pipeline
|
[
"b4a26398a5707814884cbf80a2e22032476e6479"
] |
[
"scripts/position_data.py"
] |
[
"from __future__ import division\nimport pysam\nimport numpy as np\nimport yaml \nfrom Bio import SeqIO \nfrom Bio.Seq import Seq\nfrom Bio.Alphabet import generic_dna\n#from scripts.seq_classes import locus, segment, tally, allele # %%\nfrom seq_classes import locus, segment, tally, allele\n#from scripts.trim_to_regions import ReadFASTA #%%\nfrom trim_to_regions import ReadFASTA\nimport argparse\nimport os\nimport json\n\ndef main():\n parser = argparse.ArgumentParser(description='This scipts takes a bam file \\\n and identifies variants and according to a consensus file.',\n usage =\"python position_data.py sample.bed reference.fa sample.bam sample.json -maxDepth 1000 \")\n \n parser.add_argument('bed_json', metavar='bed_json', nargs='+',\n help='a json bed like file with regions to compare')\n\n parser.add_argument('reference_fa', metavar='ref',nargs='+',\n help = 'The sample consensus file which will be used to call nonsynonymous and synonymous mutations')\n\n parser.add_argument('bam', metavar='bam', nargs='+',\n help='The bam file of the sample. For naming purposes we expect it to be sample_name.removed.bam')\n \n parser.add_argument('output', metavar='output', nargs='+',\n help='The json file to hold the output')\n \n parser.add_argument('--maxDepth', metavar='maxDepth', type=int,\n help='the max depth to use for pileup default is 1000')\n \n #parser.add_argument('-mqc','--quality_metrics',action= 'store_true',dest = 'mqc',default = False)\n\n\n args = parser.parse_args()\n sample_name = args.bam[0].split(\".removed.bam\")[0].split(\"/\")[-1]\n if args.maxDepth==None:\n maxDepth = 1000\n else:\n maxDepth=args.maxDepth\n\n # get bam file\n bam = pysam.AlignmentFile(args.bam[0],\"rb\")\n # set up reference dictions with key for each segment and value of [0,length]\n ref_genome_main={}\n # this is to maintain the order for concatenated pos\n chr_order = []\n chr_length = []\n\n # This needs to be changed to account for the new bed file format \n # it should be from the min of all start codons for each ORF to the max end \n \n with open(args.bed_json[0],\"r\") as f: \n regions=json.load(f)\n for segment in regions[\"genome\"]:\n start = []\n stop = []\n chr_order.append(segment[\"seg\"])\n chr_length.append(segment[\"size\"])\n for orf in segment[\"ORF\"]:\n for reading in orf[\"regions\"]:\n start.append(reading[\"start\"])\n stop.append(reading[\"stop\"])\n ref_genome_main.update({segment[\"seg\"]: [min(start),max(stop)]})\n \n chr_cumsum = [0] + list(np.cumsum(chr_length))\n\n # tally up base counts for each segement\n sample_genome={}\n for seg in ref_genome_main:\n sample_genome.update({seg: tally(bamfile=bam,chr=seg,\\\n start = ref_genome_main[seg][0],stop = ref_genome_main[seg][1],maxDepth=maxDepth)})\n #makes sure the frequencies are up to date\n # probably don't need it now\n for seg in sample_genome: \n sample_genome[seg].consensus()\n \n\n # Here we will classify the variants \n ref_file = ReadFASTA(args.reference_fa[0])\n for seg in sample_genome:\n for ref_seg in regions[\"genome\"]:\n if seg == ref_seg[\"seg\"]:\n consensus_sequence = [s.seq for s in ref_file if s.id==seg]# the regions are relative to this sequence\n if len(consensus_sequence)==0:\n raise ValueError(\"Segment %s not found in the reference fasta file\" % seg)\n elif len(consensus_sequence)<1:\n raise ValueError(\"Segment %s found in the reference fasta file %d times \" % seg,len(consensus_sequence))\n else:\n consensus_sequence = consensus_sequence[0]\n for orf in ref_seg[\"ORF\"]:\n for l in sample_genome[seg].seq:\n for nucleotide in l.alleles:\n l.alleles[nucleotide].classifyVar(consensus_sequence,orf,l.pos) \n\n\n\n# set concatpos\n for seg in sample_genome:\n for pos in sample_genome[seg].seq:\n # set concatpos\n pos.concat_pos = pos.pos + chr_cumsum[chr_order.index(seg)]\n\n with open(args.output[0],'w') as f:\n out_data={\"Sample\": sample_name,\"genome\" :[]}\n for seg in sample_genome:\n out_data[\"genome\"].append(sample_genome[seg].reprJSON())\n \n json.dump(out_data,f,sort_keys=True,indent=4)\n\n \"\"\" \n if args.mqc: \n # check if mqc dir exists if not make it\n if not os.path.exists(\"./mqc_position_stats\"):\n os.makedirs(\"./mqc_position_stats\")\n # get sample name\n basename = \"./mqc_position_stats/\"+os.path.splitext(os.path.basename(args.bam[0]))[0]\n concat_cov=[] \n concat_pos = [] \n i = 1 \n for loci in sample_genome[seg].seq:\n concat_cov.append(loci.coverage) \n concat_pos.append(loci.concat_pos) \n i+=1 \n with open(basename+\"_mqc.cov.csv\",\"w\") as covfile: \n i = 0 \n while i<len(concat_cov): \n covfile.write(\"%d,%d\\n\" %(concat_pos[i],concat_cov[i])) \n i+=1 \n \n # Frequencies\n concat_pos = [] \n max_pos = 0 \n freqs = [] \n for seg in sample_genome: \n seg_freq=[] \n pos = [] \n for loci in sample_genome[seg].seq: \n for k,v in loci.freqs.items(): \n if v >0 and k!=loci.consensus: \n freqs.append(v) \n seg_freq.append(v) \n concat_pos.append(loci.concat_pos) \n\n\n seg_freq = np.sort(seg_freq) \n cdf = np.array(range(len(seg_freq)))/float(len(seg_freq)) \n with open(basename+ \"-\"+seg+\"_mqc.cdf.csv\",\"w\") as cdfile: \n i = 0 \n while i<len(seg_freq): \n cdfile.write(\"%f,%f\\n\" %(np.log10(seg_freq[i]),cdf[i])) \n i+=1 \n \n with open(basename+\"_mqc.frequencies.csv\",\"w\") as freqfile: \n i = 0 \n while i<len(freqs): \n freqfile.write(\"%d,%f\\n\" %(concat_pos[i],np.log10(freqs[i]))) \n i+=1 \n \"\"\"\n \nif __name__ == '__main__':\n main()\n \n \n"
] |
[
[
"numpy.cumsum"
]
] |
willzhang100/diora
|
[
"40b3533e0a181c23cb3d17a65fca528c72813cfb"
] |
[
"pytorch/diora/net/outside_index.py"
] |
[
"import torch\nfrom diora.net.offset_cache import get_offset_cache\n\n\nclass OutsideIndex(object):\n def get_pairs(self, level, i, n):\n \"\"\"\n Returns all (parent, sibling) coordinate pairs that\n are used to construct a node at coordinates\n (level, i) where there n leaf nodes.\n\n \"\"\"\n pairs = []\n\n for level_ in range(level + 1, i + 1):\n p_level = level_\n p_i = i\n s_level = level_ - level - 1\n s_i = i - level - 1\n\n pairs.append([(p_level, p_i), (s_level, s_i)])\n\n for i_ in range(i + 1, n):\n p_level = level + i_ - i\n p_i = i_\n s_level = i_ - i - 1\n s_i = i_\n\n pairs.append([(p_level, p_i), (s_level, s_i)])\n\n return pairs\n\n def xget_all_pairs(self, level, n):\n pairs = []\n for i in range(level, n):\n pairs += self.get_pairs(level, i, n)\n return pairs\n\n def get_all_pairs(self, level, n):\n L = n - level\n N = L - 1\n\n pairs = []\n\n for i in range(N):\n jseen = 0\n for j in range(L):\n if j < N - i:\n s_level = n - i - 1\n s_i = N - i - j - 1\n p_level = s_level\n p_i = s_level - j\n else:\n s_level = j - 1\n s_i = jseen\n p_level = n - (N - s_level)\n p_i = n - (N - s_i)\n jseen += 1\n pair = [(p_i, p_level), (s_i, s_level)]\n pairs.append(pair)\n\n return pairs\n\n\nclass OutsideIndexCheck(object):\n def __init__(self, length, spans, siblings):\n sib_map = {}\n for x, y, n in siblings:\n sib_map[x] = (y, n)\n sib_map[y] = (x, n)\n\n check = {}\n for sibling, (target, name) in sib_map.items():\n xlength = target[1] - target[0]\n xlevel = xlength - 1\n xpos = target[0]\n tgt = (xlevel, xpos)\n\n slength = sibling[1] - sibling[0]\n slevel = slength - 1\n spos = sibling[0]\n sis = (slevel, spos)\n\n par = (sis[0] + tgt[0] + 1, min(sis[1], tgt[1]))\n\n check[(par, sis)] = True\n self.check = check\n\n def is_valid(self, par, sis):\n return (par, sis) in self.check\n\n\ndef get_outside_index(length, level, offset_cache=None, cuda=False):\n if offset_cache is None:\n offset_cache = get_offset_cache(length)\n index = OutsideIndex()\n pairs = index.get_all_pairs(level, length)\n\n par_lvl, par_pos = [], []\n sis_lvl, sis_pos = [], []\n\n for pair in pairs:\n par, sis = pair\n par_lvl.append(par[0])\n par_pos.append(par[1] - par[0])\n sis_lvl.append(sis[0])\n sis_pos.append(sis[1] - sis[0])\n\n device = torch.cuda.current_device() if cuda else None\n\n # Parent\n index = []\n for lvl, pos in zip(par_lvl, par_pos):\n offset = offset_cache[lvl]\n idx = offset + pos\n index.append(idx)\n par_index = torch.tensor(index, dtype=torch.int64, device=device)\n\n # Sibling\n index = []\n for lvl, pos in zip(sis_lvl, sis_pos):\n offset = offset_cache[lvl]\n idx = offset + pos\n index.append(idx)\n sis_index = torch.tensor(index, dtype=torch.int64, device=device)\n\n return par_index, sis_index\n\n"
] |
[
[
"torch.cuda.current_device",
"torch.tensor"
]
] |
Geolem/tensorflow
|
[
"cfc9d3e1ba3b50dc66f1b81eaea537772ab16024"
] |
[
"tensorflow/python/distribute/collective_all_reduce_strategy.py"
] |
[
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Class CollectiveAllReduceStrategy implementing DistributionStrategy.\"\"\"\n\nimport copy\nimport threading\nimport time\nimport weakref\n\nfrom tensorflow.core.protobuf import rewriter_config_pb2\nfrom tensorflow.core.protobuf import tensorflow_server_pb2\nfrom tensorflow.python.distribute import collective_util\nfrom tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib\nfrom tensorflow.python.distribute import cross_device_utils\nfrom tensorflow.python.distribute import device_util\nfrom tensorflow.python.distribute import distribute_lib\nfrom tensorflow.python.distribute import distribute_utils\nfrom tensorflow.python.distribute import distribution_strategy_context as ds_context\nfrom tensorflow.python.distribute import input_lib\nfrom tensorflow.python.distribute import mirrored_strategy\nfrom tensorflow.python.distribute import multi_worker_util\nfrom tensorflow.python.distribute import numpy_dataset\nfrom tensorflow.python.distribute import reduce_util\nfrom tensorflow.python.distribute import values\nfrom tensorflow.python.distribute.cluster_resolver import ClusterResolver\nfrom tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver\nfrom tensorflow.python.distribute.cluster_resolver import TFConfigClusterResolver\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import collective_ops\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.tpu import tpu_strategy_util\nfrom tensorflow.python.training.tracking import base\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n# pylint: disable=line-too-long\n@tf_export(\"distribute.MultiWorkerMirroredStrategy\", v1=[])\nclass CollectiveAllReduceStrategy(distribute_lib.Strategy):\n \"\"\"A distribution strategy for synchronous training on multiple workers.\n\n This strategy implements synchronous distributed training across multiple\n workers, each with potentially multiple GPUs. Similar to\n `tf.distribute.MirroredStrategy`, it replicates all variables and computations\n to each local device. The difference is that it uses a distributed collective\n implementation (e.g. all-reduce), so that multiple workers can work together.\n\n You need to launch your program on each worker and configure\n `cluster_resolver` correctly. For example, if you are using\n `tf.distribute.cluster_resolver.TFConfigClusterResolver`, each worker needs to\n have its corresponding `task_type` and `task_id` set in the `TF_CONFIG`\n environment variable. An example TF_CONFIG on worker-0 of a two worker cluster\n is:\n\n ```\n TF_CONFIG = '{\"cluster\": {\"worker\": [\"localhost:12345\", \"localhost:23456\"]}, \"task\": {\"type\": \"worker\", \"index\": 0} }'\n ```\n\n Your program runs on each worker as-is. Note that collectives require each\n worker to participate. All `tf.distribute` and non `tf.distribute` API may use\n collectives internally, e.g. checkpointing and saving since reading a\n `tf.Variable` with `tf.VariableSynchronization.ON_READ` all-reduces the value.\n Therefore it's recommended to run exactly the same program on each worker.\n Dispatching based on `task_type` or `task_id` of the worker is error-prone.\n\n `cluster_resolver.num_accelerators()` determines the number of GPUs the\n strategy uses. If it's zero, the strategy uses the CPU. All workers need to\n use the same number of devices, otherwise the behavior is undefined.\n\n This strategy is not intended for TPU. Use `tf.distribute.TPUStrategy`\n instead.\n\n After setting up TF_CONFIG, using this strategy is similar to using\n `tf.distribute.MirroredStrategy` and `tf.distribute.TPUStrategy`.\n\n ```\n strategy = tf.distribute.MultiWorkerMirroredStrategy()\n\n with strategy.scope():\n model = tf.keras.Sequential([\n tf.keras.layers.Dense(2, input_shape=(5,)),\n ])\n optimizer = tf.keras.optimizers.SGD(learning_rate=0.1)\n\n def dataset_fn(ctx):\n x = np.random.random((2, 5)).astype(np.float32)\n y = np.random.randint(2, size=(2, 1))\n dataset = tf.data.Dataset.from_tensor_slices((x, y))\n return dataset.repeat().batch(1, drop_remainder=True)\n dist_dataset = strategy.distribute_datasets_from_function(dataset_fn)\n\n model.compile()\n model.fit(dist_dataset)\n ```\n\n You can also write your own training loop:\n\n ```\n @tf.function\n def train_step(iterator):\n\n def step_fn(inputs):\n features, labels = inputs\n with tf.GradientTape() as tape:\n logits = model(features, training=True)\n loss = tf.keras.losses.sparse_categorical_crossentropy(\n labels, logits)\n\n grads = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(grads, model.trainable_variables))\n\n strategy.run(step_fn, args=(next(iterator),))\n\n for _ in range(NUM_STEP):\n train_step(iterator)\n ```\n\n See\n [Multi-worker training with Keras](https://www.tensorflow.org/tutorials/distribute/multi_worker_with_keras)\n for a detailed tutorial.\n\n __Saving__\n\n You need to save and checkpoint on all workers instead of just one. This is\n because variables whose synchronization=ON_READ triggers aggregation during\n saving. It's recommended to save to a different path on each worker to avoid\n race conditions. Each worker saves the same thing. See\n [Multi-worker training with Keras](https://www.tensorflow.org/tutorials/distribute/multi_worker_with_keras#model_saving_and_loading)\n tutorial for examples.\n\n __Known Issues__\n\n * `tf.distribute.cluster_resolver.TFConfigClusterResolver` does not return the\n correct number of accelerators. The strategy uses all available GPUs if\n `cluster_resolver` is `tf.distribute.cluster_resolver.TFConfigClusterResolver`\n or `None`.\n * In eager mode, the strategy needs to be created before calling any other\n Tensorflow API.\n\n \"\"\"\n # pylint: enable=line-too-long\n\n # TODO(anjalisridhar): Update our guides with examples showing how we can use\n # the cluster_resolver argument.\n\n # The starting number for collective keys. This should only be set in tests.\n _collective_key_base = 0\n\n def __init__(self,\n cluster_resolver=None,\n communication_options=None):\n \"\"\"Creates the strategy.\n\n Args:\n cluster_resolver: optional\n `tf.distribute.cluster_resolver.ClusterResolver`. If `None`,\n `tf.distribute.cluster_resolver.TFConfigClusterResolver` is used.\n communication_options: optional\n `tf.distribute.experimental.CommunicationOptions`. This configures the\n default options for cross device communications. It can be overridden by\n options provided to the communication APIs like\n `tf.distribute.ReplicaContext.all_reduce`. See\n `tf.distribute.experimental.CommunicationOptions` for details.\n \"\"\"\n if communication_options is None:\n communication_options = collective_util.Options()\n super(CollectiveAllReduceStrategy, self).__init__(\n CollectiveAllReduceExtended(\n self,\n cluster_resolver=cluster_resolver,\n communication_options=communication_options))\n\n distribute_lib.distribution_strategy_gauge.get_cell(\"V2\").set(\n \"MultiWorkerMirroredStrategy\")\n # pylint: disable=protected-access\n distribute_lib.distribution_strategy_replica_gauge.get_cell(\n \"num_workers\").set(self.extended._num_workers)\n distribute_lib.distribution_strategy_replica_gauge.get_cell(\n \"num_replicas_per_worker\").set(self.extended._num_devices_per_worker)\n\n @classmethod\n def _from_local_devices(cls, devices, communication_options=None):\n \"\"\"A convenience method to create an object with a list of devices.\"\"\"\n obj = cls(communication_options=communication_options)\n obj.extended._initialize_local(TFConfigClusterResolver(), devices=devices) # pylint: disable=protected-access\n return obj\n\n @property\n def cluster_resolver(self):\n \"\"\"Returns the cluster resolver associated with this strategy.\n\n As a multi-worker strategy, `tf.distribute.MultiWorkerMirroredStrategy`\n provides the associated `tf.distribute.cluster_resolver.ClusterResolver`. If\n the user provides one in `__init__`, that instance is returned; if the user\n does not, a default `TFConfigClusterResolver` is provided.\n \"\"\"\n return self.extended._cluster_resolver # pylint: disable=protected-access\n\n\nclass _CollectiveAllReduceStrategyExperimentalMeta(type):\n\n @classmethod\n def __instancecheck__(cls, instance):\n # This is to make isinstance(tf.distribute.MultiWorkerMirroredStrategy(),\n # tf.distribute.experimental.MultiWorkerMirroredStrategy). Some libraries is\n # performing such check.\n return isinstance(instance, CollectiveAllReduceStrategy)\n\n\n@tf_export(\"distribute.experimental.MultiWorkerMirroredStrategy\", v1=[])\nclass _CollectiveAllReduceStrategyExperimental(\n CollectiveAllReduceStrategy,\n metaclass=_CollectiveAllReduceStrategyExperimentalMeta):\n\n __doc__ = CollectiveAllReduceStrategy.__doc__\n\n @deprecation.deprecated(\n None, \"use distribute.MultiWorkerMirroredStrategy instead\")\n def __init__(self,\n communication=collective_util.CommunicationImplementation.AUTO,\n cluster_resolver=None):\n \"\"\"Creates the strategy.\n\n Args:\n communication: optional\n `tf.distribute.experimental.CommunicationImplementation`. This is a hint\n on the preferred collective communication implementation. Possible\n values include `AUTO`, `RING`, and `NCCL`.\n cluster_resolver: optional\n `tf.distribute.cluster_resolver.ClusterResolver`. If `None`,\n `tf.distribute.cluster_resolver.TFConfigClusterResolver` is used.\n \"\"\"\n communication_options = collective_util.Options(\n implementation=communication)\n super(_CollectiveAllReduceStrategyExperimental,\n self).__init__(cluster_resolver, communication_options)\n\n @classmethod\n def _from_local_devices(\n cls,\n devices,\n communication=collective_util.CommunicationImplementation.AUTO):\n \"\"\"A convenience method to create an object with a list of devices.\"\"\"\n obj = cls(communication)\n obj.extended._initialize_local(TFConfigClusterResolver(), devices=devices) # pylint: disable=protected-access\n return obj\n\n\n_CollectiveAllReduceStrategyExperimental.__name__ = CollectiveAllReduceStrategy.__name__\n\n\n@tf_export(v1=[\"distribute.experimental.MultiWorkerMirroredStrategy\"]) # pylint: disable=missing-docstring\nclass CollectiveAllReduceStrategyV1(distribute_lib.StrategyV1):\n\n __doc__ = CollectiveAllReduceStrategy.__doc__\n\n # The starting number for collective keys. This should only be set in tests.\n _collective_key_base = 0\n\n def __init__(self,\n communication=collective_util.CommunicationImplementation.AUTO,\n cluster_resolver=None):\n \"\"\"Initializes the object.\"\"\"\n communication_options = collective_util.Options(\n implementation=communication)\n super(CollectiveAllReduceStrategyV1, self).__init__(\n CollectiveAllReduceExtended(\n self,\n cluster_resolver=cluster_resolver,\n communication_options=communication_options))\n distribute_lib.distribution_strategy_gauge.get_cell(\"V1\").set(\n \"MultiWorkerMirroredStrategy\")\n # pylint: disable=protected-access\n distribute_lib.distribution_strategy_replica_gauge.get_cell(\n \"num_workers\").set(self.extended._num_workers)\n distribute_lib.distribution_strategy_replica_gauge.get_cell(\n \"num_gpu_per_worker\").set(\n self.extended._num_devices_per_worker\n if self.extended._local_device_type == \"GPU\"\n else 0)\n\n\nclass CollectiveAllReduceExtended(mirrored_strategy.MirroredExtended):\n \"\"\"Implementation of CollectiveAllReduceStrategy.\"\"\"\n\n # Whether to perdically check the health of the cluster. If any worker is not\n # reachable, collectives are aborted and the user program should get a\n # tf.errors.UnavailableError. It's required to restart in order to recover.\n _enable_check_health = True\n # Check health interval in seconds.\n _check_health_interval = 30\n # Timeout in seconds for the first check health. The first check health needs\n # to wait for cluster, which may make a longer time.\n _check_health_initial_timeout = 0\n # Times to retry before considering the peer is down.\n _check_health_retry_limit = 3\n # Timeout in seconds the each check health.\n _check_health_timeout = 10\n\n def __init__(self, container_strategy, cluster_resolver,\n communication_options):\n if not isinstance(communication_options, collective_util.Options):\n raise ValueError(\"communication_options must be an instance of \"\n \"tf.distribute.experimental.CommunicationOptions\")\n self._cluster_resolver = cluster_resolver or TFConfigClusterResolver()\n if not isinstance(self._cluster_resolver, ClusterResolver):\n raise ValueError(\"cluster_resolver must be an instance of \"\n \"tf.distribute.cluster_resolver.ClusterResolver\")\n distribute_lib.StrategyExtendedV1.__init__(self, container_strategy)\n self._communication_options = communication_options\n self._collective_key_base = container_strategy._collective_key_base # pylint: disable=protected-access\n self._initialize_strategy(self._cluster_resolver)\n self._cfer_fn_cache = weakref.WeakKeyDictionary()\n self.experimental_enable_get_next_as_optional = True\n assert isinstance(self._cross_device_ops,\n cross_device_ops_lib.CollectiveAllReduce)\n\n def _use_merge_call(self):\n \"\"\"XLA is not supported for multi-worker strategy.\"\"\"\n return True\n\n def _initialize_strategy(self, cluster_resolver):\n if cluster_resolver.cluster_spec().as_dict():\n self._initialize_multi_worker(cluster_resolver)\n else:\n self._initialize_local(cluster_resolver)\n\n def _initialize_local_devices(self, cluster_resolver, worker_device):\n # TODO(b/126786766): TFConfigClusterResolver returns wrong number of GPUs in\n # some cases.\n if isinstance(cluster_resolver, TFConfigClusterResolver):\n num_gpus = context.num_gpus()\n num_tpus = 0\n else:\n num_gpus = cluster_resolver.num_accelerators().get(\"GPU\", 0)\n num_tpus = cluster_resolver.num_accelerators().get(\"TPU\", 0)\n\n if num_gpus:\n local_device_type = \"GPU\"\n num_local_devices = num_gpus\n elif num_tpus:\n local_device_type = \"TPU\"\n num_local_devices = num_tpus\n else:\n local_device_type = \"CPU\"\n num_local_devices = 1\n local_devices = tuple(\n f\"{worker_device}/device:{local_device_type}:{i}\"\n for i in range(num_local_devices))\n return local_devices, local_device_type\n\n def _initialize_local(self, cluster_resolver, devices=None):\n \"\"\"Initializes the object for local training.\"\"\"\n self._is_chief = True\n self._num_workers = 1\n\n if ops.executing_eagerly_outside_functions():\n try:\n context.context().configure_collective_ops(\n scoped_allocator_enabled_ops=(\"CollectiveReduce\",))\n except RuntimeError:\n logging.warning(\"Collective ops is not configured at program startup. \"\n \"Some performance features may not be enabled.\")\n self._collective_ops_configured = True\n\n if devices:\n local_devices = devices\n if \"GPU\" in devices[0]:\n local_device_type = \"GPU\"\n elif \"TPU\" in devices[0]:\n local_device_type = \"TPU\"\n else:\n local_device_type = \"CPU\"\n else:\n local_devices, local_device_type = self._initialize_local_devices(\n cluster_resolver, worker_device=\"\")\n\n self._worker_device = device_util.canonicalize(\"/device:CPU:0\")\n self._host_input_device = numpy_dataset.SingleDevice(self._worker_device)\n\n self._collective_keys = cross_device_utils.CollectiveKeys(\n group_key_start=1 + self._collective_key_base)\n self._cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(\n devices=local_devices,\n group_size=len(local_devices),\n collective_keys=self._collective_keys)\n # CrossDeviceOps for per host tensors.\n self._host_cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(\n devices=[self._worker_device],\n group_size=self._num_workers,\n collective_keys=self._collective_keys)\n super(CollectiveAllReduceExtended, self)._initialize_single_worker(\n local_devices)\n\n self._cluster_spec = None\n self._task_type = None\n self._task_id = None\n self._id_in_cluster = 0\n\n # This is a mark to tell whether we are running with standalone client or\n # independent worker. Right now with standalone client, strategy object is\n # created as local strategy and then turn into multi-worker strategy via\n # configure call.\n self._local_or_standalone_client_mode = True\n\n # Save the num_devices_per_worker and rpc_layer for configure method.\n self._num_devices_per_worker = len(local_devices)\n self._local_device_type = local_device_type\n self._rpc_layer = cluster_resolver.rpc_layer\n self._warn_nccl_no_gpu()\n\n logging.info(\n \"Single-worker MultiWorkerMirroredStrategy with local_devices \"\n \"= %r, communication = %s\", local_devices,\n self._communication_options.implementation)\n\n def _initialize_multi_worker(self, cluster_resolver):\n \"\"\"Initializes the object for multi-worker training.\"\"\"\n cluster_spec = multi_worker_util.normalize_cluster_spec(\n cluster_resolver.cluster_spec())\n task_type = cluster_resolver.task_type\n task_id = cluster_resolver.task_id\n if task_type is None or task_id is None:\n raise ValueError(\"When `cluster_spec` is given, you must also specify \"\n \"`task_type` and `task_id`.\")\n self._cluster_spec = cluster_spec\n self._task_type = task_type\n self._task_id = task_id\n self._id_in_cluster = multi_worker_util.id_in_cluster(\n self._cluster_spec, self._task_type, self._task_id)\n\n self._num_workers = multi_worker_util.worker_count(cluster_spec, task_type)\n if not self._num_workers:\n raise ValueError(\"No `worker`, `chief` or `evaluator` tasks can be found \"\n \"in `cluster_spec`.\")\n\n self._is_chief = multi_worker_util.is_chief(cluster_spec, task_type,\n task_id)\n\n self._worker_device = \"/job:%s/task:%d\" % (task_type, task_id)\n self._host_input_device = numpy_dataset.SingleDevice(self._worker_device)\n\n if (ops.executing_eagerly_outside_functions() and\n not getattr(self, \"_local_or_standalone_client_mode\", False)):\n context.context().configure_collective_ops(\n collective_leader=multi_worker_util.collective_leader(\n cluster_spec, task_type, task_id),\n scoped_allocator_enabled_ops=(\"CollectiveReduce\",),\n device_filters=(\"/job:%s/task:%d\" % (task_type, task_id),))\n self._collective_ops_configured = True\n\n # Starting a std server in eager mode and in independent worker mode.\n if (context.executing_eagerly() and\n not getattr(self, \"_std_server_started\", False) and\n not getattr(self, \"_local_or_standalone_client_mode\", False)):\n # Checking _local_or_standalone_client_mode as well because we should not\n # create the std server in standalone client mode.\n config_proto = copy.deepcopy(context.context().config)\n config_proto = self._update_config_proto(config_proto)\n\n # If coordination service is enabled, use its internal heartbeat to detect\n # peer failures instead of the Python-level health check.\n if config_proto.experimental.coordination_config.service_type:\n self._enable_check_health = False\n\n if hasattr(cluster_resolver, \"port\"):\n port = cluster_resolver.port\n else:\n port = 0\n server_def = tensorflow_server_pb2.ServerDef(\n cluster=cluster_spec.as_cluster_def(),\n default_session_config=config_proto,\n job_name=task_type,\n task_index=task_id,\n protocol=cluster_resolver.rpc_layer or \"grpc\",\n port=port)\n context.context().enable_collective_ops(server_def)\n self._std_server_started = True\n # The `ensure_initialized` is needed before calling\n # `context.context().devices()`.\n context.context().ensure_initialized()\n logging.info(\n \"Enabled multi-worker collective ops with available devices: %r\",\n context.context().devices())\n\n # TODO(yuefengz): The `num_gpus` is only for this particular task. It\n # assumes all workers have the same number of GPUs. We should remove this\n # assumption by querying all tasks for their numbers of GPUs.\n # TODO(b/126786766): TFConfigClusterResolver returns wrong number of GPUs in\n # some cases.\n local_devices, local_device_type = self._initialize_local_devices(\n cluster_resolver, self._worker_device)\n if local_device_type == \"TPU\":\n tpu_strategy_util.initialize_tpu_system()\n\n self._collective_keys = cross_device_utils.CollectiveKeys(\n group_key_start=1 + self._collective_key_base)\n self._cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(\n devices=local_devices,\n group_size=len(local_devices) * self._num_workers,\n collective_keys=self._collective_keys)\n # CrossDeviceOps for per host tensors.\n self._host_cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(\n devices=[self._worker_device],\n group_size=self._num_workers,\n collective_keys=self._collective_keys)\n super(CollectiveAllReduceExtended, self)._initialize_single_worker(\n local_devices)\n\n # Add a default device so that ops without specified devices will not end up\n # on other workers.\n self._default_device = \"/job:%s/task:%d\" % (task_type, task_id)\n\n # Save the num_devices_per_worker and rpc_layer for configure method.\n self._num_devices_per_worker = len(local_devices)\n self._local_device_type = local_device_type\n self._rpc_layer = cluster_resolver.rpc_layer\n self._warn_nccl_no_gpu()\n\n if self._enable_check_health and context.executing_eagerly():\n self._start_check_health_thread()\n else:\n logging.info(\"Check health not enabled.\")\n\n logging.info(\n \"MultiWorkerMirroredStrategy with cluster_spec = %r, task_type = %r, \"\n \"task_id = %r, num_workers = %r, local_devices = %r, \"\n \"communication = %s\", cluster_spec.as_dict(), task_type, task_id,\n self._num_workers, local_devices,\n self._communication_options.implementation)\n\n def __del__(self):\n self._stop_check_health_thread()\n\n def _input_workers_with_options(self, options=None):\n host_device = device_util.get_host_for_device(self._worker_device)\n if not options or options.experimental_fetch_to_device:\n return input_lib.InputWorkers([(host_device, self.worker_devices)])\n else:\n return input_lib.InputWorkers([(\n host_device,\n [device_util.get_host_for_device(worker) for worker in\n self.worker_devices])])\n\n @property\n def _input_workers(self):\n return self._input_workers_with_options()\n\n def _get_variable_creator_initial_value(self,\n replica_id,\n device,\n primary_var,\n **kwargs):\n if replica_id == 0: # First replica on each worker.\n assert device is not None\n assert primary_var is None\n\n def initial_value_fn(): # pylint: disable=g-missing-docstring\n # Only the first device participates in the broadcast of initial values.\n group_key = self._collective_keys.get_group_key([device])\n group_size = self._num_workers\n collective_instance_key = (\n self._collective_keys.get_instance_key(group_key, device))\n\n with ops.device(device):\n initial_value = kwargs[\"initial_value\"]\n if callable(initial_value):\n initial_value = initial_value()\n if isinstance(initial_value, base.CheckpointInitialValue):\n initial_value = initial_value.wrapped_value\n assert not callable(initial_value)\n initial_value = ops.convert_to_tensor(\n initial_value, dtype=kwargs.get(\"dtype\", None))\n\n if self._num_workers > 1:\n if self._is_chief:\n bcast_send = collective_ops.broadcast_send(\n initial_value, initial_value.shape, initial_value.dtype,\n group_size, group_key, collective_instance_key)\n with ops.control_dependencies([bcast_send]):\n return array_ops.identity(initial_value)\n else:\n return collective_ops.broadcast_recv(initial_value.shape,\n initial_value.dtype,\n group_size, group_key,\n collective_instance_key)\n return initial_value\n\n return initial_value_fn\n else:\n return super(CollectiveAllReduceExtended,\n self)._get_variable_creator_initial_value(\n replica_id=replica_id,\n device=device,\n primary_var=primary_var,\n **kwargs)\n\n def _make_input_context(self):\n input_context = distribute_lib.InputContext(\n num_input_pipelines=self._num_workers,\n input_pipeline_id=self._id_in_cluster,\n num_replicas_in_sync=self._num_replicas_in_sync)\n return input_context\n\n def _experimental_distribute_dataset(self, dataset, options):\n if (options and options.experimental_replication_mode ==\n distribute_lib.InputReplicationMode.PER_REPLICA):\n raise NotImplementedError(\n \"InputReplicationMode.PER_REPLICA \"\n \"is only supported in \"\n \"`distribute_datasets_from_function` \"\n \"of tf.distribute.MirroredStrategy\"\n )\n input_context = self._make_input_context()\n return input_lib.get_distributed_dataset(\n dataset,\n self._input_workers_with_options(options),\n self._container_strategy(),\n num_replicas_in_sync=self._num_replicas_in_sync,\n input_context=input_context,\n options=options)\n\n def _distribute_datasets_from_function(self, dataset_fn, options):\n if (options and options.experimental_replication_mode ==\n distribute_lib.InputReplicationMode.PER_REPLICA):\n raise NotImplementedError(\n \"InputReplicationMode.PER_REPLICA \"\n \"is only supported in \"\n \"`distribute_datasets_from_function` \"\n \"of tf.distribute.MirroredStrategy\")\n input_context = self._make_input_context()\n return input_lib.get_distributed_datasets_from_function(\n dataset_fn=dataset_fn,\n input_workers=self._input_workers_with_options(options),\n input_contexts=[input_context],\n strategy=self._container_strategy(),\n options=options)\n\n def _experimental_distribute_values_from_function(self, value_fn):\n per_replica_values = []\n num_local_replicas = len(self.worker_devices)\n for local_replica_id in range(num_local_replicas):\n replica_id = (self._id_in_cluster * num_local_replicas +\n local_replica_id)\n value_context = distribute_lib.ValueContext(\n replica_id, self._num_replicas_in_sync)\n per_replica_values.append(value_fn(value_context))\n return distribute_utils.regroup(per_replica_values, always_wrap=True)\n\n def _make_dataset_iterator(self, dataset):\n \"\"\"Distributes the dataset to each local GPU.\"\"\"\n input_context = self._make_input_context()\n return input_lib.DatasetIterator(\n dataset,\n self._input_workers,\n self._container_strategy(),\n num_replicas_in_sync=self._num_replicas_in_sync,\n input_context=input_context)\n\n def _make_input_fn_iterator(\n self,\n input_fn,\n replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):\n \"\"\"Distributes the input function to each local GPU.\"\"\"\n input_context = self._make_input_context()\n return input_lib.InputFunctionIterator(input_fn, self._input_workers,\n [input_context],\n self._container_strategy())\n\n def _configure(self,\n session_config=None,\n cluster_spec=None,\n task_type=None,\n task_id=None):\n \"\"\"Configures the object.\n\n Args:\n session_config: a `tf.compat.v1.ConfigProto`\n cluster_spec: a dict, ClusterDef or ClusterSpec object specifying the\n cluster configurations.\n task_type: the current task type, such as \"worker\".\n task_id: the current task id.\n\n Raises:\n ValueError: if `task_type` is not in the `cluster_spec`.\n \"\"\"\n if cluster_spec:\n cluster_resolver = SimpleClusterResolver(\n cluster_spec=multi_worker_util.normalize_cluster_spec(cluster_spec),\n task_type=task_type,\n task_id=task_id,\n num_accelerators={\n self._local_device_type: self._num_devices_per_worker},\n rpc_layer=self._rpc_layer)\n self._initialize_multi_worker(cluster_resolver)\n assert isinstance(self._cross_device_ops,\n cross_device_ops_lib.CollectiveAllReduce)\n\n if session_config:\n session_config.CopyFrom(self._update_config_proto(session_config))\n\n def _update_config_proto(self, config_proto):\n updated_config = copy.deepcopy(config_proto)\n # Enable the scoped allocator optimization for CollectiveOps. This\n # optimization converts many small all-reduces into fewer larger\n # all-reduces.\n rewrite_options = updated_config.graph_options.rewrite_options\n rewrite_options.scoped_allocator_optimization = (\n rewriter_config_pb2.RewriterConfig.ON)\n # We turn on ScopedAllocator only for CollectiveReduce op, i.e. enable_op =\n # [\"CollectiveReduce\"]. Since we can't assign to a repeated proto field, we\n # clear and then append.\n del rewrite_options.scoped_allocator_opts.enable_op[:]\n rewrite_options.scoped_allocator_opts.enable_op.append(\"CollectiveReduce\")\n\n if (not ops.executing_eagerly_outside_functions() and\n self._communication_options.implementation ==\n collective_util.CommunicationImplementation.NCCL):\n updated_config.experimental.collective_nccl = True\n\n if not self._cluster_spec:\n return updated_config\n\n assert self._task_type\n assert self._task_id is not None\n\n # Collective group leader is needed for collective ops to coordinate\n # workers.\n updated_config.experimental.collective_group_leader = (\n multi_worker_util.collective_leader(self._cluster_spec, self._task_type,\n self._task_id))\n\n # The device filters prevent communication between workers.\n del updated_config.device_filters[:]\n updated_config.device_filters.append(\n \"/job:%s/task:%d\" % (self._task_type, self._task_id))\n\n return updated_config\n\n def _get_cross_device_ops(self, value):\n # CollectiveAllReduce works on a predefined set of devices. In most cases\n # they should be the compute devices, but certain use cases may reduce host\n # tensors as well (e.g. early stopping). We infer the cross_device_ops to\n # use based on the number of devices, since inputs don't always have device\n # annotations. The compute devices one is preferred since we can potentially\n # leverage NCCL.\n if isinstance(value, values.DistributedValues):\n num_devices = len(value._values) # pylint: disable=protected-access\n else:\n num_devices = 1\n if num_devices == len(self.worker_devices):\n return self._cross_device_ops\n else:\n return self._host_cross_device_ops\n\n def _gather_to_implementation(self, value, destinations, axis, options):\n return self._get_cross_device_ops(value)._gather( # pylint: disable=protected-access\n value,\n destinations=destinations,\n axis=axis,\n options=options)\n\n def _reduce_to(self, reduce_op, value, destinations, options):\n if (isinstance(value, values.Mirrored) and\n reduce_op == reduce_util.ReduceOp.MEAN):\n return value\n assert not isinstance(value, values.Mirrored)\n\n if (isinstance(value, values.DistributedValues) and\n len(self.worker_devices) == 1):\n value = value.values[0]\n\n # When there are multiple workers, we need to reduce across workers using\n # collective ops.\n if (not isinstance(value, values.DistributedValues) and\n self._num_workers == 1):\n # This function handles reducing values that are not PerReplica or\n # Mirrored values. For example, the same value could be present on all\n # replicas in which case `value` would be a single value or value could\n # be 0.\n return cross_device_ops_lib.reduce_non_distributed_value(\n reduce_op, value, destinations, len(self.worker_devices))\n return self._get_cross_device_ops(value).reduce(\n reduce_op,\n value,\n destinations=destinations,\n options=self._communication_options.merge(options))\n\n def _replica_ctx_all_reduce(self, reduce_op, value, options=None):\n \"\"\"Implements `StrategyExtendedV2._replica_ctx_all_reduce`.\"\"\"\n # This implementation avoids using `merge_call` and just launches collective\n # ops in one replica.\n if options is None:\n options = collective_util.Options()\n\n if context.executing_eagerly():\n # In eager mode, falls back to the default implemenation that uses\n # `merge_call`. Replica functions are running sequentially in eager mode,\n # and due to the blocking nature of collective ops, execution will hang if\n # collective ops are to be launched sequentially.\n return super()._replica_ctx_all_reduce(reduce_op, value, options)\n\n replica_context = ds_context.get_replica_context()\n assert replica_context, (\n \"`StrategyExtended._replica_ctx_all_reduce` must be called in a \"\n \"replica context\")\n return self._cross_device_ops._all_reduce( # pylint: disable=protected-access\n reduce_op,\n value,\n replica_context._replica_id, # pylint: disable=protected-access\n options)\n\n def _check_health(self):\n while True:\n if self._check_health_thread_should_stop.is_set():\n return\n for job in self._cluster_spec.jobs:\n for task_id in range(self._cluster_spec.num_tasks(job)):\n peer = \"/job:{}/replica:0/task:{}\".format(job, task_id)\n attempts = 0\n while True:\n attempts += 1\n try:\n context.context().check_collective_ops_peer_health(\n peer, timeout_in_ms=self._check_health_timeout * 1000)\n # If check_collective_ops_peer_health doesn't raise an Exception,\n # the peer is healthy.\n break\n except (errors.UnavailableError, errors.FailedPreconditionError,\n errors.DeadlineExceededError) as e:\n # TODO(b/151232436): Always raise UnavailableError when a peer\n # fails. Now there could be many kinds of errors:\n # - Unavailable: when the peer is not reachable, e.g. it's down.\n # - FailedPrecondition: when the peer has restarted.\n if attempts < self._check_health_retry_limit:\n logging.warning(\"%s seems down, retrying %d/%d\", peer, attempts,\n self._check_health_retry_limit)\n continue\n logging.error(\n \"Cluster check alive failed, %s is down, \"\n \"aborting collectives: %s\", peer, e)\n context.context().abort_collective_ops(\n errors.UNAVAILABLE,\n \"cluster check alive failed, {} is down\".format(peer))\n return\n except Exception as e: # pylint: disable=broad-except\n logging.error(\"Unexpected exception in check alive: %s\", e)\n context.context().abort_collective_ops(\n errors.INTERNAL,\n \"unexecpted exception in check alive: %s\" % e)\n return\n time.sleep(self._check_health_interval)\n\n def _start_check_health_thread(self):\n # Use a dummy all-reduce as a barrier to wait for all workers to be up,\n # otherwise the check health may fail immediately.\n\n # Use array_ops.identity to create the dummy tensor so that we have a new\n # Tensor. If we use constant it may be a cached from on a /job:localhost\n # device, which will cause some code that relies on tensor.device to error.\n #\n # TODO(b/151232436): change to an explicit barrier if we have it.\n dummy_value = array_ops.identity([])\n logging.info(\"Waiting for the cluster, timeout = %s\",\n self._check_health_initial_timeout or \"inf\")\n try:\n self._host_cross_device_ops.reduce(\n reduce_util.ReduceOp.SUM,\n dummy_value,\n dummy_value,\n options=collective_util.Options(\n timeout_seconds=self._check_health_initial_timeout,\n implementation=collective_util.CommunicationImplementation.RING))\n if context.is_async():\n context.async_wait()\n except errors.DeadlineExceededError:\n raise RuntimeError(\n \"Timeout waiting for the cluster, timeout is %d seconds\" %\n self._check_health_initial_timeout)\n logging.info(\"Cluster is ready.\")\n self._check_health_thread_should_stop = threading.Event()\n # Start the thread as daemon to avoid it blocking the program from exiting.\n # We try best to shutdown the thread but __del__ is not guaranteed to be\n # called when program exists.\n self._check_health_thread = threading.Thread(\n target=self._check_health,\n daemon=True)\n self._check_health_thread.start()\n\n def _stop_check_health_thread(self):\n if getattr(self, \"_check_health_thread\", None):\n logging.info(\"stopping check health thread\")\n self._check_health_thread_should_stop.set()\n self._check_health_thread.join()\n self._check_health_thread = None\n logging.info(\"check health thread stopped\")\n\n def _warn_nccl_no_gpu(self):\n if ((self._communication_options.implementation ==\n collective_util.CommunicationImplementation.NCCL) and\n self._local_device_type != \"GPU\"):\n logging.warning(\"Enabled NCCL communication but no GPUs detected/\"\n \"specified.\")\n\n def _in_multi_worker_mode(self):\n \"\"\"Whether this strategy indicates working in multi-worker settings.\"\"\"\n return self._num_workers > 1\n\n @property\n def experimental_between_graph(self):\n return True\n\n @property\n def experimental_should_init(self):\n return True\n\n @property\n def should_checkpoint(self):\n return self._is_chief\n\n @property\n def should_save_summary(self):\n return self._is_chief\n\n @property\n def _num_replicas_in_sync(self):\n return len(self.worker_devices) * self._num_workers\n\n # TODO(priyag): Delete this once all strategies use global batch size.\n @property\n def _global_batch_size(self):\n \"\"\"`make_dataset_iterator` and `make_numpy_iterator` use global batch size.\n\n `make_input_fn_iterator` assumes per-replica batching.\n\n Returns:\n Boolean.\n \"\"\"\n return True\n\n def _get_replica_id_in_sync_group(self, replica_id):\n return self._id_in_cluster * len(self.worker_devices) + replica_id\n\n def _get_local_replica_id(self, replica_id_in_sync_group):\n return (replica_id_in_sync_group -\n self._id_in_cluster * len(self.worker_devices))\n\n def __deepcopy__(self, memo):\n # We check the check health thread instead of whether we are in eager mode\n # to limit the backward incompatibility.\n if hasattr(self, \"_check_health_thread\"):\n raise ValueError(\n \"MultiWorkerMirroredStrategy cannot be deep copied in eager mode. \"\n \"If you're using Estimator and see this error message, call \"\n \"tf.compat.v1.disable_eager_execution() at the beginning of your \"\n \"program\")\n # Otherwise, do a regular deepcopy.\n cls = self.__class__\n result = cls.__new__(cls)\n memo[id(self)] = result\n for k, v in self.__dict__.items():\n setattr(result, k, copy.deepcopy(v, memo))\n return result\n"
] |
[
[
"tensorflow.python.distribute.distribute_lib.StrategyExtendedV1.__init__",
"tensorflow.python.platform.tf_logging.error",
"tensorflow.python.eager.context.async_wait",
"tensorflow.python.distribute.numpy_dataset.SingleDevice",
"tensorflow.python.framework.ops.executing_eagerly_outside_functions",
"tensorflow.python.distribute.multi_worker_util.id_in_cluster",
"tensorflow.python.distribute.distribute_lib.distribution_strategy_gauge.get_cell",
"tensorflow.python.framework.ops.device",
"tensorflow.python.eager.context.is_async",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.eager.context.context",
"tensorflow.python.distribute.distribute_lib.ValueContext",
"tensorflow.python.distribute.device_util.get_host_for_device",
"tensorflow.python.tpu.tpu_strategy_util.initialize_tpu_system",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.distribute.distribute_lib.InputContext",
"tensorflow.python.distribute.multi_worker_util.worker_count",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.util.deprecation.deprecated",
"tensorflow.python.distribute.distribute_lib.distribution_strategy_replica_gauge.get_cell",
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.python.distribute.distribute_utils.regroup",
"tensorflow.python.distribute.collective_util.Options",
"tensorflow.python.distribute.device_util.canonicalize",
"tensorflow.python.distribute.distribution_strategy_context.get_replica_context",
"tensorflow.python.ops.collective_ops.broadcast_recv",
"tensorflow.python.distribute.cross_device_ops.CollectiveAllReduce",
"tensorflow.python.distribute.multi_worker_util.is_chief",
"tensorflow.python.distribute.cross_device_utils.CollectiveKeys",
"tensorflow.python.eager.context.num_gpus",
"tensorflow.python.distribute.multi_worker_util.collective_leader",
"tensorflow.python.distribute.multi_worker_util.normalize_cluster_spec",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.python.distribute.cluster_resolver.TFConfigClusterResolver",
"tensorflow.python.ops.collective_ops.broadcast_send",
"tensorflow.python.distribute.input_lib.InputWorkers"
]
] |
a07458666/UncertaintyFlow
|
[
"cef2512901d4e27bb22fc3997522cd47c03b569c"
] |
[
"module/dun_datasets/additional_gap_loader.py"
] |
[
"import zipfile\r\nimport pickle\r\ntry:\r\n import urllib\r\n from urllib import urlretrieve\r\nexcept Exception:\r\n import urllib.request as urllib\r\nfrom os import path\r\n\r\nimport numpy as np\r\nfrom numpy.random import uniform, randn\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nfrom .utils import mkdir\r\n\r\n\r\ndef load_axis(base_dir='./dun_datasets/data/'):\r\n\r\n if not path.exists(base_dir + '/gap_classification'):\r\n urllib.urlretrieve('https://javierantoran.github.io/assets/datasets/gap_classification.zip',\r\n filename=base_dir + '/gap_classification.zip')\r\n with zipfile.ZipFile(base_dir + '/gap_classification.zip', 'r') as zip_ref:\r\n zip_ref.extractall(base_dir)\r\n\r\n file1 = base_dir + '/gap_classification/axis.pkl'\r\n\r\n with open(file1, 'rb') as f:\r\n axis_tupple = pickle.load(f)\r\n axis_x = axis_tupple[0].astype(np.float32)\r\n axis_y = axis_tupple[1].astype(np.float32)[:, np.newaxis]\r\n\r\n x_means, x_stds = axis_x.mean(axis=0), axis_x.std(axis=0)\r\n y_means, y_stds = axis_y.mean(axis=0), axis_y.std(axis=0)\r\n\r\n X = ((axis_x - x_means) / x_stds).astype(np.float32)\r\n Y = ((axis_y - y_means) / y_stds).astype(np.float32)\r\n return X, Y\r\n\r\n\r\ndef load_origin(base_dir='./dun_datasets/data/'):\r\n\r\n if not path.exists(base_dir + '/gap_classification'):\r\n urllib.urlretrieve('https://javierantoran.github.io/assets/datasets/gap_classification.zip',\r\n filename=base_dir + '/gap_classification.zip')\r\n with zipfile.ZipFile(base_dir + '/gap_classification.zip', 'r') as zip_ref:\r\n zip_ref.extractall(base_dir)\r\n\r\n file2 = base_dir + '/gap_classification/origin.pkl'\r\n\r\n with open(file2, 'rb') as f:\r\n origin_tupple = pickle.load(f)\r\n origin_x = origin_tupple[0].astype(np.float32)\r\n origin_y = origin_tupple[1].astype(np.float32)[:, np.newaxis]\r\n\r\n x_means, x_stds = origin_x.mean(axis=0), origin_x.std(axis=0)\r\n y_means, y_stds = origin_y.mean(axis=0), origin_y.std(axis=0)\r\n\r\n X = ((origin_x - x_means) / x_stds).astype(np.float32)\r\n Y = ((origin_y - y_means) / y_stds).astype(np.float32)\r\n\r\n return X, Y\r\n\r\n\r\ndef load_agw_1d(base_dir='./dun_datasets/data/', get_feats=False):\r\n if not path.exists(base_dir + '/agw_data'):\r\n mkdir(base_dir + '/agw_data')\r\n urllib.urlretrieve('https://raw.githubusercontent.com/wjmaddox/drbayes/master/experiments/synthetic_regression/ckpts/data.npy',\r\n filename=base_dir + '/agw_data/data.npy')\r\n\r\n def features(x):\r\n return np.hstack([x[:, None] / 2.0, (x[:, None] / 2.0) ** 2])\r\n\r\n data = np.load(base_dir + '/agw_data/data.npy')\r\n x, y = data[:, 0], data[:, 1]\r\n y = y[:, None]\r\n f = features(x)\r\n\r\n x_means, x_stds = x.mean(axis=0), x.std(axis=0)\r\n y_means, y_stds = y.mean(axis=0), y.std(axis=0)\r\n f_means, f_stds = f.mean(axis=0), f.std(axis=0)\r\n\r\n X = ((x - x_means) / x_stds).astype(np.float32)\r\n Y = ((y - y_means) / y_stds).astype(np.float32)\r\n F = ((f - f_means) / f_stds).astype(np.float32)\r\n\r\n if get_feats:\r\n return F, Y\r\n\r\n return X[:, None], Y\r\n\r\n\r\ndef load_andrew_1d(base_dir='./dun_datasets/data/'):\r\n if not path.exists(base_dir + '/andrew_1d'):\r\n print('base_dir does not point to data directory')\r\n\r\n with open(base_dir + '/andrew_1d/1d_cosine_separated.pkl', 'rb') as f:\r\n data = pickle.load(f)\r\n x = data[:, 0]\r\n x = x[:, None]\r\n y = data[:, 1]\r\n y = y[:, None]\r\n\r\n x_means, x_stds = x.mean(axis=0), x.std(axis=0)\r\n y_means, y_stds = y.mean(axis=0), y.std(axis=0)\r\n\r\n X = ((x - x_means) / x_stds).astype(np.float32)\r\n Y = ((y - y_means) / y_stds).astype(np.float32)\r\n\r\n return X, Y\r\n\r\n\r\ndef load_matern_1d(base_dir='./dun_datasets/data/'):\r\n if not path.exists(base_dir + '/matern_data/'):\r\n mkdir(base_dir + '/matern_data/')\r\n\r\n def gen_1d_matern_data():\r\n from GPy.kern.src.sde_matern import Matern32\r\n np.random.seed(4)\r\n\r\n lengthscale = 0.5\r\n variance = 1.0\r\n sig_noise = 0.15\r\n\r\n n1_points = 200\r\n x1 = np.random.uniform(-2, -1, n1_points)[:, None]\r\n\r\n n2_points = 200\r\n x2 = np.random.uniform(0.5, 2.5, n2_points)[:, None]\r\n\r\n no_points = n1_points + n2_points\r\n x = np.concatenate([x1, x2], axis=0)\r\n x.sort(axis=0)\r\n\r\n k = Matern32(input_dim=1, variance=variance, lengthscale=lengthscale)\r\n C = k.K(x, x) + np.eye(no_points) * sig_noise ** 2\r\n\r\n y = np.random.multivariate_normal(np.zeros((no_points)), C)[:, None]\r\n\r\n x_means, x_stds = x.mean(axis=0), x.std(axis=0)\r\n y_means, y_stds = y.mean(axis=0), y.std(axis=0)\r\n\r\n X = ((x - x_means) / x_stds).astype(np.float32)\r\n Y = ((y - y_means) / y_stds).astype(np.float32)\r\n\r\n return X, Y\r\n\r\n x, y = gen_1d_matern_data()\r\n xy = np.concatenate([x, y], axis=1)\r\n np.save(base_dir + '/matern_data/matern_1d.npy', xy)\r\n return x, y\r\n else:\r\n xy = np.load(base_dir + '/matern_data/matern_1d.npy')\r\n x = xy[:, 0]\r\n x = x[:, None]\r\n y = xy[:, 1]\r\n y = y[:, None]\r\n return x, y\r\n\r\n\r\ndef load_my_1d(base_dir='./dun_datasets/data/'):\r\n if not path.exists(base_dir + '/my_1d_data/'):\r\n mkdir(base_dir + '/my_1d_data/')\r\n\r\n def gen_my_1d(hetero=False):\r\n\r\n np.random.seed(0)\r\n Npoints = 1002\r\n x0 = uniform(-1, 0, size=int(Npoints / 3))\r\n x1 = uniform(1.7, 2.5, size=int(Npoints / 3))\r\n x2 = uniform(4, 5, size=int(Npoints / 3))\r\n x = np.concatenate([x0, x1, x2])\r\n\r\n def function(x):\r\n return x - 0.1 * x ** 2 + np.cos(np.pi * x / 2)\r\n\r\n y = function(x)\r\n\r\n homo_noise_std = 0.25\r\n homo_noise = randn(*x.shape) * homo_noise_std\r\n y_homo = y + homo_noise\r\n\r\n hetero_noise_std = np.abs(0.1 * np.abs(x) ** 1.5)\r\n hetero_noise = randn(*x.shape) * hetero_noise_std\r\n y_hetero = y + hetero_noise\r\n\r\n X = x[:, np.newaxis]\r\n y_joint = np.stack([y_homo, y_hetero], axis=1)\r\n\r\n X_train, X_test, y_joint_train, y_joint_test = train_test_split(X, y_joint, test_size=0.5, random_state=42)\r\n y_hetero_train, y_hetero_test = y_joint_train[:, 1, np.newaxis], y_joint_test[:, 1, np.newaxis]\r\n y_homo_train, y_homo_test = y_joint_train[:, 0, np.newaxis], y_joint_test[:, 0, np.newaxis]\r\n\r\n x_means, x_stds = X_train.mean(axis=0), X_train.std(axis=0)\r\n y_hetero_means, y_hetero_stds = y_hetero_train.mean(axis=0), y_hetero_train.std(axis=0)\r\n y_homo_means, y_homo_stds = y_homo_test.mean(axis=0), y_homo_test.std(axis=0)\r\n\r\n X_train = ((X_train - x_means) / x_stds).astype(np.float32)\r\n X_test = ((X_test - x_means) / x_stds).astype(np.float32)\r\n\r\n y_hetero_train = ((y_hetero_train - y_hetero_means) / y_hetero_stds).astype(np.float32)\r\n y_hetero_test = ((y_hetero_test - y_hetero_means) / y_hetero_stds).astype(np.float32)\r\n\r\n y_homo_train = ((y_homo_train - y_homo_means) / y_homo_stds).astype(np.float32)\r\n y_homo_test = ((y_homo_test - y_homo_means) / y_homo_stds).astype(np.float32)\r\n\r\n if hetero:\r\n return X_train, y_hetero_train, X_test, y_hetero_test\r\n else:\r\n return X_train, y_homo_train, X_test, y_homo_test\r\n\r\n X_train, y_homo_train, X_test, y_homo_test = gen_my_1d()\r\n xy = np.concatenate([X_train, y_homo_train, X_test, y_homo_test], axis=1)\r\n np.save(base_dir + '/my_1d_data/my_1d_data.npy', xy)\r\n return X_train, y_homo_train, X_test, y_homo_test\r\n\r\n xy = np.load(base_dir + '/my_1d_data/my_1d_data.npy')\r\n X_train = xy[:, 0, None].astype(np.float32)\r\n y_homo_train = xy[:, 1, None].astype(np.float32)\r\n X_test = xy[:, 2, None].astype(np.float32)\r\n y_homo_test = xy[:, 3, None].astype(np.float32)\r\n\r\n return X_train, y_homo_train, X_test, y_homo_test\r\n\r\n\r\ndef load_wiggle_1d():\r\n\r\n np.random.seed(0)\r\n Npoints = 300\r\n x = randn(Npoints) * 2.5 + 5 # uniform(0, 10, size=Npoints)\r\n\r\n def function(x):\r\n return np.sin(np.pi * x) + 0.2 * np.cos(np.pi * x * 4) - 0.3 * x\r\n\r\n y = function(x)\r\n\r\n homo_noise_std = 0.25\r\n homo_noise = randn(*x.shape) * homo_noise_std\r\n y = y + homo_noise\r\n\r\n x = x[:, None]\r\n y = y[:, None]\r\n\r\n x_means, x_stds = x.mean(axis=0), x.std(axis=0)\r\n y_means, y_stds = y.mean(axis=0), y.std(axis=0)\r\n\r\n X = ((x - x_means) / x_stds).astype(np.float32)\r\n Y = ((y - y_means) / y_stds).astype(np.float32)\r\n\r\n return X, Y\r\n\r\n"
] |
[
[
"numpy.hstack",
"numpy.abs",
"numpy.random.seed",
"numpy.eye",
"sklearn.model_selection.train_test_split",
"numpy.save",
"numpy.stack",
"numpy.concatenate",
"numpy.random.uniform",
"numpy.sin",
"numpy.cos",
"numpy.random.randn",
"numpy.load",
"numpy.zeros"
]
] |
sumanbanerjee1/Code-Mixed-Dialog
|
[
"9df1d4dc800548a883f8bc1a9ce4116c77aebc02"
] |
[
"code/train_seq2seq.py"
] |
[
"import tensorflow as tf\nimport numpy as np\nimport json\nimport pickle\nimport os\nimport subprocess\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\n\nfrom tqdm import tqdm\nfrom seq2seq.model import Seq2seqModel\nfrom seq2seq.data_utils import pad, replace_token_no, get_len\n\nflags = tf.app.flags\nflags.DEFINE_string(\"config_id\",'135',\"Hyperparam config id\")\nflags.DEFINE_string(\"data_dir\", \"../data/tamil\", \"Data directory \")\nflags.DEFINE_string(\"infer_data\", \"test\", \"[train, dev or test] Dataset on which you want to perform inference\")\nflags.DEFINE_string(\"logs_dir\", \"logs\", \"Logging directory \")\nflags.DEFINE_string(\"checkpoint_dir\", 'checkpoints', \"Checkpoint directory\")\nflags.DEFINE_string(\"rnn_unit\", 'gru', \"GRU or LSTM\")\nflags.DEFINE_float(\"learning_rate\", 0.0004, \"Learning rate for Adam Optimizer\")\nflags.DEFINE_integer(\"batch_size\",32, \"Batch size for training\")\nflags.DEFINE_integer(\"epochs\",50, \"Number of epochs to train for\")\nflags.DEFINE_integer(\"max_gradient_norm\",5, \"Max grad norm. 0 for no clipping\")\nflags.DEFINE_float(\"dropout\", 0.75, \"keep probability for keeping unit\")\nflags.DEFINE_integer(\"num_layers\", 1, \"No of layers of stacking in RNN\")\nflags.DEFINE_integer(\"word_emb_dim\",300, \"hidden dimensions of the word embeddings.\")\nflags.DEFINE_integer(\"hidden_units\",350, \"hidden dimensions of the encoder-decoder units.\")\nflags.DEFINE_integer(\"eval_interval\", 1, \"After how many epochs do you want to eval\")\nflags.DEFINE_integer(\"patience\",5, \"Patience parameter\")\nflags.DEFINE_boolean(\"train\",True,\"Train or Infer\")\nflags.DEFINE_boolean(\"debug\",True,\"Debug mode with small dataset\")\nFLAGS = flags.FLAGS\n\ndef arrayfy(data, stats, vocab):\n \"\"\"\n Create data-arrays from the nested-list form of data\n data: The data in nested list form\n stats: The stats file dumped from preprocessing\n vocab: The vocab file dumped from preprocessing\n \"\"\"\n context_len,dec_ip_len,dec_op_len=get_len(data)\n pad(data,stats)\n replace_token_no(data,vocab)\n context=np.asarray(data[0])\n dec_ip_arr=np.asarray(data[1])\n dec_op_arr=np.asarray(data[2])\n context_len_arr=np.asarray(context_len)\n dec_ip_len_arr=np.asarray(dec_ip_len)\n target_l_arr=[]\n for i in range(len(data[2])):\n fill=list(np.zeros(stats[2],dtype=int))\n for j in range(dec_op_len[i]):\n fill[j]=1\n target_l_arr.append(fill)\n \n target_len_arr=np.asarray(target_l_arr)\n return [context,dec_ip_arr,dec_op_arr,context_len_arr,dec_ip_len_arr,target_len_arr]\n\ndef read_data(directory):\n \"\"\"\n Read the data and associated files from the \n data directory and return it in the form of arrays\n \n args:\n directory: The data directory specified by FLAGS.data_dir\n \"\"\"\n \n \n if not os.path.exists(FLAGS.logs_dir+FLAGS.config_id+'/'):\n os.mkdir(FLAGS.logs_dir+FLAGS.config_id+'/')\n \n with open(directory+'/p-dialog-dstc2-train.json','r') as fp:\n train_data=json.load(fp)\n with open(directory+'/p-dialog-dstc2-test.json','r') as fp:\n test_data=json.load(fp)\n with open(directory+'/p-dialog-dstc2-dev.json','r') as fp:\n dev_data=json.load(fp)\n with open(directory+'/p-dialog-dstc2-stats.json','r') as fp:\n stats=json.load(fp)\n with open(directory+'/p-dialog-dstc2-vocab.json','r') as fp:\n vocab=json.load(fp)\n \n params_dict=FLAGS.__flags\n params_dict['max_enc_size']=stats[0]\n params_dict['max_sent_size']=stats[1]\n params_dict['vocab_size']=len(vocab)\n\n train=arrayfy(train_data,stats,vocab)\n test=arrayfy(test_data,stats,vocab)\n dev=arrayfy(dev_data,stats,vocab)\n \n return train,test,dev\n\ndef create_model(sess,FLAGS):\n \"\"\"\n Create a new model if there are no checkpoints\n otherwise restore the model from the existing\n checkpoint\n \n args:\n sess: The active Session\n FLAGS: The configuration FLAGS\n \"\"\"\n print(\"Creating/Restoring Seq2seq Model.....\")\n model = Seq2seqModel(sess,FLAGS)\n \n ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir+FLAGS.config_id)\n if ckpt:\n print(\"Restoring model parameters from %s\" %\n ckpt.model_checkpoint_path)\n model.saver.restore(sess, ckpt.model_checkpoint_path)\n else:\n print(\"Created new model!\")\n sess.run(tf.global_variables_initializer())\n\n return model\n \n\ndef save_model(sess,model):\n \"\"\"\n Save the model in the checkpoint directory\n \n args:\n sess: The active Session\n model: The model object which is created or restored\n \"\"\"\n if not os.path.exists(FLAGS.checkpoint_dir+FLAGS.config_id):\n os.makedirs(FLAGS.checkpoint_dir+FLAGS.config_id)\n save_path = model.saver.save(sess, os.path.join(FLAGS.checkpoint_dir+FLAGS.config_id, \"model.ckpt\"))\n print(\"Model saved in file: %s\" % save_path)\n \n \ndef get_words_from_ids(ids):\n \"\"\"\n Convert token ids to the corresponding words by looking up\n from the vocabulary file. It breaks the generated sentence\n at the first '<EOS>' token.\n \n arg:\n ids: The predicted ids obtained from argmax over the Vocab softmax values\n \"\"\"\n \n ids_list=ids.tolist()\n with open(FLAGS.data_dir+'/p-dialog-dstc2-vocab.json','r') as fp:\n vocab=json.load(fp)\n \n invert_vocab= dict([v,k] for k,v in vocab.items())\n \n r=[]\n for i in ids_list:\n c=''\n for j in i:\n c=c+' '+invert_vocab[j]\n if invert_vocab[j]=='<EOS>':\n break\n r.append(c.strip())\n \n return r\n\n\ndef eval_(data,sess,model,FLAGS,epoch):\n \"\"\"\n Run one pass over the validation set\n to get the average validation loss\n \n args:\n data: The whole data in the array format\n sess: The active Session\n model: The model object which has been created or restored\n FLAGS: The configuration FLAGS\n epoch: The current epoch number\n \"\"\"\n\n batch_size = FLAGS.batch_size\n num_ex = data[0].shape[0]\n batches = zip(range(0, num_ex, batch_size), range(batch_size, num_ex+batch_size, batch_size))\n batches = [(start, end) for start, end in batches]\n\n #Start forward pass on the dev batches\n losses=[]\n preds_all=np.zeros(FLAGS.max_sent_size)\n for i,j in batches:\n batch_data=[data[k][i:j] for k in range(len(data))]\n pred,loss,_ =model.step(sess,FLAGS,batch_data,True,1.0)\n preds_all = np.row_stack((preds_all,pred))\n losses.append(loss)\n \n avg_loss=np.mean(losses)\n return avg_loss\n\n\n\ndef train():\n \"\"\"\n Set up batches of the data and run training on them.\n Also collects the validation losses after \n FLAGS.eval_interval number of epochs. Logs them in the FLAGS.logs_dir\n \"\"\"\n \n \n print(\"Reading Dataset....\")\n dir_=FLAGS.data_dir\n train_examples, test_examples, dev_examples = read_data(dir_)\n \n # If in debugging mode then run the training of 2 epochs with a smaller data of 67 examples only\n if FLAGS.debug==True:\n train_examples=[train_examples[k][0:67] for k in range(len(train_examples))]\n dev_examples=[dev_examples[k][0:67] for k in range(len(dev_examples))]\n FLAGS.epochs=2\n \n print(\"Finished Reading Dataset!\")\n \n #setup data batch indices\n batch_size = FLAGS.batch_size\n num_train = train_examples[0].shape[0]\n batches = zip(range(0, num_train, batch_size), range(batch_size, num_train+batch_size, batch_size))\n batches = [(start, end) for start, end in batches]\n fp=open(FLAGS.logs_dir+FLAGS.config_id+'/logs.log','w+')\n \n \n with tf.Session() as sess:\n #Create or Restore Model\n model=create_model(sess,FLAGS)\n try:\n #Run Training\n for epoch in range(1,FLAGS.epochs+1):\n train_loss=[]\n \n for i,j in tqdm(batches):\n batch_train =[train_examples[k][i:j] for k in range(len(train_examples))]\n ypred, loss,_ =model.step(sess,FLAGS,batch_train,False,FLAGS.dropout)\n train_loss.append(loss)\n fp.write(\"Epoch:\"+str(epoch)+\" batch train loss: \"+str(loss)+'\\n')\n \n print(\"Epoch: \",epoch,\" Train loss: \",np.mean(train_loss))\n if epoch>0 and epoch % FLAGS.eval_interval==0:\n val_loss=eval_(dev_examples,sess,model,FLAGS,epoch)\n print(\"Val Loss: \"+str(val_loss)+\" Train loss: \"+str(np.mean(train_loss)))\n fp.write(\"Val Loss: \"+str(val_loss)+\"Train loss: \"+str(np.mean(train_loss))+'\\n\\n\\n\\n')\n \n print('Saving Model...')\n fp.write(\"Saving Model\\n\")\n save_model(sess,model)\n \n except KeyboardInterrupt:\n print(\"Keyboard Interrupt\")\n finally:\n fp.close()\n\n\ndef infer(data_infer):\n \"\"\"\n Run inference on the dataset specified.\n It dumps the generated sentences and the ground truth sentences.\n \n args:\n data_infer: The dataset on which inference is going to be performed.\n \"\"\"\n \n dir_=FLAGS.data_dir\n train_examples, test_examples, dev_examples = read_data(dir_)\n \n if data_infer=='test':\n data=test_examples\n elif data_infer=='dev':\n data=dev_examples\n elif data_infer=='train':\n data=train_examples\n else:\n print(\"Invalid Choice!!\")\n return\n \n # If debugging mode is on then run inference only on a smaller dataset of 67 examples\n if FLAGS.debug:\n data = [data[k][0:67] for k in range(len(data))] \n\n #set up batch indices \n batch_size = FLAGS.batch_size\n num_ex = data[0].shape[0]\n batches = zip(range(0, num_ex, batch_size), range(batch_size, num_ex+batch_size, batch_size))\n batches = [(start, end) for start, end in batches]\n \n with tf.Session(graph=tf.Graph()) as sess:\n model=create_model(sess,FLAGS)\n all_wts=[]\n preds_all=np.zeros(FLAGS.max_sent_size)\n for i,j in tqdm(batches):\n batch_data=[data[k][i:j] for k in range(len(data))]\n pred,loss,wts =model.step(sess,FLAGS,batch_data,True,1.0)\n all_wts.append(wts)\n preds_all = np.row_stack((preds_all,pred))\n \n preds_ids=np.delete(preds_all,0,0)\n \n preds_test=get_words_from_ids(preds_ids)\n labels_test=get_words_from_ids(data[2])\n \n os.makedirs(\"Results\")\n fp1 =open('Results/predictions'+str(FLAGS.config_id)+'.txt','w+')\n \n for item in preds_test:\n fp1.write(\"%s\\n\"%item)\n fp1.close()\n \n fp2 =open('Results/labels'+str(FLAGS.config_id)+'.txt','w+')\n for item in labels_test:\n fp2.write(\"%s\\n\"%item)\n fp2.close()\n\ndef get_words_from_ids_context(ids):\n ids_list=ids.tolist()\n with open(FLAGS.data_dir+'/p-dialog-dstc2-vocab.json','r') as fp:\n vocab=json.load(fp)\n \n invert_vocab= dict([v,k] for k,v in vocab.items())\n \n r=[]\n for i in ids_list:\n c=''\n for j in i:\n c=c+' '+invert_vocab[j]\n \n r.append(c.strip())\n \n return r\n\ndef get_words_from_ids_context_kb(ids):\n ids_list=ids.tolist()\n with open(FLAGS.data_dir+'/p-dialog-dstc2-vocab.json','r') as fp:\n vocab=json.load(fp)\n \n invert_vocab= dict([v,k] for k,v in vocab.items())\n \n ind=ids_list[0].index([0,0,0])\n ids_list[0]=ids_list[0][0:ind]\n r=[]\n for i in ids_list[0]:\n c=''\n for j in i:\n c=c+' '+invert_vocab[j]\n \n r.append(c.strip())\n \n return r\n \ndef showAttention(input_sentence, output_words, attentions,c):\n fig = plt.figure(figsize=(30,10))\n ax = fig.add_subplot(1,1,1)\n \n \n cax = ax.matshow(attentions, cmap='Blues')\n fig.colorbar(cax)\n\n ax.set_xticklabels([''] + input_sentence +\n ['<EOS>'], rotation=90,fontsize=5)\n ax.set_yticklabels([''] + output_words,fontsize=6.5)\n \n ax.xaxis.set_major_locator(ticker.MultipleLocator(1))\n ax.yaxis.set_major_locator(ticker.MultipleLocator(1))\n plt.savefig('attnplots'+c+'.pdf')\n plt.show()\n \n\ndef give_wts(w,op_words,c_words,index):\n ww=[]\n \n for i in w:\n ww.append(i[index:index+1,0:len(c_words)])\n \n ww=np.asarray(ww)\n ww=np.squeeze(ww,1)\n ww=ww[0:len(op_words)]\n \n return ww\n\n\ndef give_wts_theta(w,op_words,index):\n ww1=[]\n \n for i in w:\n ww1.append(i[index:index+1,0:1])\n \n ww1=np.asarray(ww1)\n ww1=np.squeeze(ww1,1)\n ww1=ww1[0:len(op_words)]\n \n return ww1\n\ndef attention_wts(preds_test,data,all_wts,index):\n \n ind=index%32\n all_wts1=all_wts[int(index/32)]\n \n pre=data[0][index:index+1]\n op_words=preds_test[index].split(' ')\n \n pre_words=get_words_from_ids_context(pre)[0].split(' ')\n ww_pre=give_wts(all_wts1,op_words,pre_words,ind)\n showAttention(pre_words,op_words,ww_pre,'pre')\n \ndef main():\n if FLAGS.train:\n train()\n FLAGS.train=False\n infer(FLAGS.infer_data)\n else:\n infer(FLAGS.infer_data)\n\nif __name__=='__main__':\n main()\n"
] |
[
[
"tensorflow.train.get_checkpoint_state",
"matplotlib.ticker.MultipleLocator",
"tensorflow.Graph",
"numpy.asarray",
"numpy.squeeze",
"matplotlib.pyplot.savefig",
"tensorflow.global_variables_initializer",
"numpy.delete",
"numpy.mean",
"numpy.row_stack",
"tensorflow.Session",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.figure"
]
] |
LaplaceKorea/Data
|
[
"8c16fc8c4cc29c1dc42d340ba1452c7fbc222bd3"
] |
[
"FeaturesSP500.py"
] |
[
"\nfrom typing import Any, Dict, List, Tuple\nfrom Config import cfg\nfrom Features import *\nimport numpy as np\n\nDELAYED_BID = 66\nDELAYED_ASK = 67\nDELAYED_LAST = 68\nDELAYED_BID_SIZE = 69\nDELAYED_ASK_SIZE = 70\nDELAYED_LAST_SIZE = 71\nDELAYED_HIGH = 72\nDELAYED_LOW = 73\nDELAYED_VOLUME = 74\nDELAYED_CLOSE = 75\nDELAYED_OPEN = 76\nTIMESTAMP = 88\n\ntranslate = {\n DELAYED_BID: \"DELAYED_BID\",\n DELAYED_ASK: \"DELAYED_ASK\",\n DELAYED_LAST: \"DELAYED_LAST\",\n DELAYED_BID_SIZE: \"DELAYED_BID_SIZE\",\n DELAYED_ASK_SIZE: \"DELAYED_ASK_SIZE\",\n DELAYED_LAST_SIZE: \"DELAYED_LAST_SIZE\",\n DELAYED_HIGH: \"DELAYED_HIGH\",\n DELAYED_LOW: \"DELAYED_LOW\",\n DELAYED_VOLUME: \"DELAYED_VOLUME\",\n DELAYED_CLOSE: \"DELAYED_CLOSE\",\n DELAYED_OPEN: \"DELAYED_OPEN\",\n TIMESTAMP: \"TIMESTAMP\"\n}\n\ntaboo = {\n \"Symbol\": True,\n \"American Airlines Group\": True,\n \"0\": True\n}\n\nstocks: Dict[str, int] = {\n # ex: \"AAPL\": 0, \"ADBE\":1\n}\n\nstockNames: Dict[str, str] = {}\n\nimport csv\n\nccount = 0\nwith open(\"constituents.csv\") as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n if row[0] in taboo:\n pass\n else:\n print(row[0], \" => \", row[1])\n stocks[row[0]] = ccount\n stockNames[row[0]] = row[1]\n ccount = ccount+1\n\nfields = {\n DELAYED_OPEN:0,\n DELAYED_HIGH:1,\n DELAYED_LOW:2,\n DELAYED_CLOSE:3\n}\n\ndef loadSP500File(fn: str):\n raw_data: Dict[str,List[np.ndarray]] = {}\n\n # Z|#|field|size\n # P|#|field|price\n # S|#|type(88)|value(timestamp)\n\n # delayed-1623246971\n c = 1\n\n with open(fn) as infile:\n for line in infile:\n c = c + len(line)\n if line[0] == 'Z':\n pass\n if line[0] == 'P':\n elts = line.split(\"|\")\n ticker = elts[1]\n if ticker in stocks:\n field = int(elts[2])\n price = float(elts[3])\n if field in fields:\n print(ticker, field, price)\n if ticker in raw_data:\n pass\n else:\n raw_data[ticker] = [] \n rd = raw_data[ticker]\n try:\n rd[len(rd)-1][fields[field]+1] = price\n except:\n pass\n if line[0] == 'S':\n elts = line.split(\"|\")\n tickers = elts[1]\n if ticker in stocks:\n field = int(elts[2])\n ts = int(elts[3])\n if field == TIMESTAMP:\n print(ticker, \"time=\", ts)\n if ticker in raw_data:\n pass\n else:\n raw_data[ticker] = []\n rd = raw_data[ticker]\n a = np.zeros((len(fields)+1,), dtype=np.float32)\n a[0] = ts\n rd.append(a)\n \n # print(c) \n # print(raw_data)\n\n finallist: List[np.ndarray] = []\n indices: Dict[str, int] = {}\n for k in stocks:\n indices[k] = 0\n ndone = 0\n farfuture = 1e37\n while ndone < len(indices):\n next = farfuture # whatever big\n selected = \"\"\n for k in indices:\n i = indices[k]\n try:\n d = raw_data[k]\n if i < len(d):\n ts = d[i][0]\n if ts < next:\n next = ts\n selected = k\n except:\n pass\n nextLine = np.zeros((len(stocks) * (len(fields)+1),), dtype=np.float32)\n # print(nextLine.shape)\n if selected == \"\":\n break\n for k in indices:\n i = indices[k]\n try:\n d = raw_data[k]\n if i < len(d):\n ts = d[i][0]\n if abs(ts-next) < 1e-12:\n idx = stocks[k]\n nextLine[(idx *(len(fields)+1)):(idx*(len(fields)+1)+(len(fields)+1))] = d[i][:]\n indices[k] = i+1\n except:\n pass\n finallist.append(nextLine)\n\n f = np.vstack(finallist)\n print(f.shape)\n return f\n\n# description\n# List[Tuple[str,Dict[str,Any]]]\n# feature size ~\n\nfeatureSize = len(stocks) * (1 + len(fields))\nprint(\"featureSize=\", featureSize)\nfeatureDesc: List[Tuple[str,Dict[str,Any]]] = []\n\nrstocks: Dict[int, str] = {}\nrfields: Dict[int, int] = {}\nfor k in stocks:\n rstocks[stocks[k]] = k\nfor kk in fields:\n rfields[fields[kk]] = kk\n\nccount = 0\nfor i in range(len(stocks)):\n featureDesc.append((str(ccount), { \"stock\": rstocks[i], \"field\": \"time\"} ))\n ccount = ccount+1\n for j in range(len(fields)):\n featureDesc.append((str(ccount), { \"stock\": rstocks[i], \"field\": translate[rfields[j]] }))\n ccount = ccount+1\n\nprint(featureDesc[0:100])\n\nsp500Prefix = cfg[\"features\"][\"sp500Prefix\"]\n\n# \"/crypto/esoteric/tws/delayed-1623246971\"\n#import glob \n#lst = [f for f in glob.glob(\"/crypto/esoteric/tws/delayed-*\")]\n#lst.sort()\n#print(lst)\n#\n#for f in lst:\n# try:\n# v = loadSP500File(f)\n# except:\n# pass\n# loadSP500File(\"/crypto/esoteric/tws/delayed-1623246971\")\n\nclass FeaturesSP500(Features):\n def getDescription(self)->List[Tuple[str,Dict[str,Any]]]:\n return featureDesc\n def getFeatureSize(self) -> int:\n return featureSize\n def getFeature(self, fromDT: datetime, toDT: datetime, timestep: timedelta) -> np.ndarray:\n import glob \n lst = [f for f in glob.glob(sp500Prefix+\"*\")]\n lst.sort()\n print(lst)\n print(sp500Prefix)\n\n rv: List[np.ndarray] = []\n\n for f in lst:\n try:\n v = loadSP500File(f)\n rv.append(v)\n except:\n pass\n return np.vstack(rv)\n\ndef FeaturesSP500Builder(confg: Dict[str,Any])->Features:\n return FeaturesSP500()\n\nregisterFeatureBuilder(\"SP500\", FeaturesSP500Builder)\n\n"
] |
[
[
"numpy.vstack"
]
] |
Chen-Yifan/weaver
|
[
"431a1427a185fa6357e40b729b8adcf263c349d2"
] |
[
"utils/data/config.py"
] |
[
"import numpy as np\nimport yaml\nimport copy\n\nfrom ..logger import _logger\nfrom .tools import _get_variable_names\n\n\ndef _as_list(x):\n if x is None:\n return None\n elif isinstance(x, (list, tuple)):\n return x\n else:\n return [x]\n\n\ndef _md5(fname):\n '''https://stackoverflow.com/questions/3431825/generating-an-md5-checksum-of-a-file'''\n import hashlib\n hash_md5 = hashlib.md5()\n with open(fname, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n return hash_md5.hexdigest()\n\n\nclass DataConfig(object):\n r\"\"\"Data loading configuration.\n \"\"\"\n\n def __init__(self, print_info=True, **kwargs):\n\n opts = {\n 'treename': None,\n 'selection': None,\n 'test_time_selection': None,\n 'preprocess': {'method': 'manual', 'data_fraction': 0.1, 'params': None},\n 'new_variables': {},\n 'inputs': {},\n 'labels': {},\n 'observers': [],\n 'weights': None,\n }\n for k, v in kwargs.items():\n if v is not None:\n if isinstance(opts[k], dict):\n opts[k].update(v)\n else:\n opts[k] = v\n # only information in ``self.options'' will be persisted when exporting to YAML\n self.options = opts\n if print_info:\n _logger.debug(opts)\n\n self.selection = opts['selection']\n self.test_time_selection = opts['test_time_selection'] if opts['test_time_selection'] else self.selection\n self.var_funcs = opts['new_variables']\n # preprocessing config\n self.preprocess = opts['preprocess']\n self._auto_standardization = opts['preprocess']['method'].lower().startswith('auto')\n self._missing_standardization_info = False\n self.preprocess_params = opts['preprocess']['params'] if opts['preprocess']['params'] is not None else {}\n # inputs\n self.input_names = tuple(opts['inputs'].keys())\n self.input_dicts = {k: [] for k in self.input_names}\n self.input_shapes = {}\n for k, o in opts['inputs'].items():\n self.input_shapes[k] = (-1, len(o['vars']), o['length'])\n for v in o['vars']:\n v = _as_list(v)\n self.input_dicts[k].append(v[0])\n\n if opts['preprocess']['params'] is None:\n\n def _get(idx, default):\n try:\n return v[idx]\n except IndexError:\n return default\n\n params = {'length': o['length'], 'center': _get(1, 'auto' if self._auto_standardization else None),\n 'scale': _get(2, 1), 'min': _get(3, -5), 'max': _get(4, 5), 'pad_value': _get(5, 0)}\n if v[0] in self.preprocess_params and params != self.preprocess_params[v[0]]:\n raise RuntimeError('Incompatible info for variable %s, had: \\n %s\\nnow got:\\n %s' % (v[0], str(self.preprocess_params[k]), str(params)))\n if params['center'] == 'auto':\n self._missing_standardization_info = True\n self.preprocess_params[v[0]] = params\n # labels\n self.label_type = opts['labels']['type']\n self.label_value = opts['labels']['value']\n if self.label_type == 'simple':\n assert(isinstance(self.label_value, list))\n self.label_names = ('label',)\n self.var_funcs['label'] = 'np.stack([%s], axis=1).argmax(1)' % (','.join(self.label_value))\n else:\n self.label_names = tuple(self.label_value.keys())\n self.var_funcs.update(self.label_value)\n # weights: TODO\n self.weight_name = None\n if opts['weights'] is not None:\n self.weight_name = 'weight'\n self.use_precomputed_weights = opts['weights']['use_precomputed_weights']\n if self.use_precomputed_weights:\n self.var_funcs[self.weight_name] = '*'.join(opts['weights']['weight_branches'])\n else:\n self.reweight_method = opts['weights']['reweight_method']\n self.reweight_branches = tuple(opts['weights']['reweight_vars'].keys())\n self.reweight_bins = tuple(opts['weights']['reweight_vars'].values())\n self.reweight_classes = tuple(opts['weights']['reweight_classes'])\n self.class_weights = opts['weights'].get('class_weights', None)\n if self.class_weights is None:\n self.class_weights = np.ones(len(self.reweight_classes))\n self.reweight_threshold = opts['weights'].get('reweight_threshold', 10)\n self.reweight_hists = opts['weights'].get('reweight_hists', None)\n if self.reweight_hists is not None:\n for k, v in self.reweight_hists.items():\n self.reweight_hists[k] = np.array(v, dtype='float32')\n # observers\n self.observer_names = tuple(opts['observers'])\n\n # remove self mapping from var_funcs\n for k, v in self.var_funcs.items():\n if k == v:\n del self.var_funcs[k]\n\n if print_info:\n _logger.info('preprocess config: %s', str(self.preprocess))\n _logger.info('selection: %s', str(self.selection))\n _logger.info('test_time_selection: %s', str(self.test_time_selection))\n _logger.info('var_funcs:\\n - %s', '\\n - '.join(str(it) for it in self.var_funcs.items()))\n _logger.info('input_names: %s', str(self.input_names))\n _logger.info('input_dicts:\\n - %s', '\\n - '.join(str(it) for it in self.input_dicts.items()))\n _logger.info('input_shapes:\\n - %s', '\\n - '.join(str(it) for it in self.input_shapes.items()))\n _logger.info('preprocess_params:\\n - %s', '\\n - '.join(str(it) for it in self.preprocess_params.items()))\n _logger.info('label_names: %s', str(self.label_names))\n _logger.info('observer_names: %s', str(self.observer_names))\n\n # parse config\n self.keep_branches = set()\n aux_branches = set()\n # selection\n if self.selection:\n aux_branches.update(_get_variable_names(self.selection))\n # test time selection\n if self.test_time_selection:\n aux_branches.update(_get_variable_names(self.test_time_selection))\n # var_funcs\n self.keep_branches.update(self.var_funcs.keys())\n for expr in self.var_funcs.values():\n aux_branches.update(_get_variable_names(expr))\n # inputs\n for names in self.input_dicts.values():\n self.keep_branches.update(names)\n # labels\n self.keep_branches.update(self.label_names)\n # weight\n if self.weight_name:\n self.keep_branches.add(self.weight_name)\n if not self.use_precomputed_weights:\n aux_branches.update(self.reweight_branches)\n aux_branches.update(self.reweight_classes)\n # observers\n self.keep_branches.update(self.observer_names)\n # keep and drop\n self.drop_branches = (aux_branches - self.keep_branches)\n self.load_branches = (aux_branches | self.keep_branches) - set(self.var_funcs.keys()) - {self.weight_name, }\n if print_info:\n _logger.debug('drop_branches:\\n %s', ','.join(self.drop_branches))\n _logger.debug('load_branches:\\n %s', ','.join(self.load_branches))\n\n def __getattr__(self, name):\n return self.options[name]\n\n def dump(self, fp):\n with open(fp, 'w') as f:\n yaml.safe_dump(self.options, f, sort_keys=False)\n\n @classmethod\n def load(cls, fp, load_observers=True):\n with open(fp) as f:\n options = yaml.safe_load(f)\n if not load_observers:\n options['observers'] = None\n return cls(**options)\n\n def copy(self):\n return self.__class__(print_info=False, **copy.deepcopy(self.options))\n\n def __copy__(self):\n return self.copy()\n\n def __deepcopy__(self, memo):\n return self.copy()\n\n def export_json(self, fp):\n import json\n j = {'output_names':self.label_value, 'input_names':self.input_names}\n for k, v in self.input_dicts.items():\n j[k] = {'var_names':v, 'var_infos':{}}\n for var_name in v:\n j[k]['var_length'] = self.preprocess_params[var_name]['length']\n info = self.preprocess_params[var_name]\n j[k]['var_infos'][var_name] = {\n 'median': 0 if info['center'] is None else info['center'],\n 'norm_factor': info['scale'],\n 'replace_inf_value': 0,\n 'lower_bound': -1e32 if info['center'] is None else info['min'],\n 'upper_bound': 1e32 if info['center'] is None else info['max'],\n 'pad': info['pad_value']\n }\n with open(fp, 'w') as f:\n json.dump(j, f, indent=2)\n"
] |
[
[
"numpy.array"
]
] |
developmentseed/label-maker-dask
|
[
"700d2fa224ac674171fd8c4f8709a67f862eac36"
] |
[
"label_maker_dask/utils.py"
] |
[
"# pylint: disable=unused-argument\n\"\"\"Provide utility functions\"\"\"\nimport os\nfrom io import BytesIO\nfrom urllib.parse import parse_qs\n\nimport numpy as np\nimport rasterio\nimport requests # type: ignore\nfrom mercantile import Tile, bounds\nfrom PIL import Image, ImageColor\nfrom rasterio.crs import CRS\nfrom rasterio.warp import transform_bounds\nfrom rio_tiler.io import COGReader\n\nWGS84_CRS = CRS.from_epsg(4326)\n\n\nclass SafeDict(dict):\n \"\"\"dictionary for replacing missing url properties\"\"\"\n\n def __missing__(self, key):\n \"\"\"replace missing url properties\"\"\"\n return \"{\" + key + \"}\"\n\n\ndef url(tile: Tile, imagery):\n \"\"\"Return a tile url provided an imagery template and a tile\"\"\"\n return imagery.format(x=tile.x, y=tile.y, z=tile.z)\n\n\ndef class_match(ml_type, label, i):\n \"\"\"Determine if a label matches a given class index\"\"\"\n if ml_type == \"classification\":\n return label[i] > 0\n elif ml_type == \"object-detection\":\n return len(list(filter(lambda bb: bb[4] == i, label)))\n elif ml_type == \"segmentation\":\n return np.count_nonzero(label == i)\n return None\n\n\ndef download_tile_tms(tile: Tile, imagery):\n \"\"\"Download a satellite image tile from a tms endpoint\"\"\"\n\n if os.environ.get(\"ACCESS_TOKEN\"):\n token = os.environ.get(\"ACCESS_TOKEN\")\n imagery = imagery.format_map(SafeDict(ACCESS_TOKEN=token))\n\n r = requests.get(url(tile, imagery))\n\n return np.array(Image.open(BytesIO(r.content)))\n\n\ndef get_tile_tif(tile, imagery):\n \"\"\"\n Read a GeoTIFF with a window corresponding to a TMS tile\n \"\"\"\n with COGReader(imagery) as image:\n img = image.tile(*tile)\n\n return np.moveaxis(img.data, 0, 2)\n\n\ndef get_tile_wms(tile, imagery):\n \"\"\"\n Read a WMS endpoint with query parameters corresponding to a TMS tile\n\n Converts the tile boundaries to the spatial/coordinate reference system\n (SRS or CRS) specified by the WMS query parameter.\n \"\"\"\n # retrieve the necessary parameters from the query string\n query_dict = parse_qs(imagery.lower())\n wms_version = query_dict.get(\"version\")[0]\n if wms_version == \"1.3.0\":\n wms_srs = query_dict.get(\"crs\")[0]\n else:\n wms_srs = query_dict.get(\"srs\")[0]\n\n # find our tile bounding box\n bound = bounds(*[int(t) for t in tile])\n xmin, ymin, xmax, ymax = transform_bounds(\n WGS84_CRS, CRS.from_string(wms_srs), *bound, densify_pts=21\n )\n\n # project the tile bounding box from lat/lng to WMS SRS\n bbox = (\n [ymin, xmin, ymax, xmax] if wms_version == \"1.3.0\" else [xmin, ymin, xmax, ymax]\n )\n\n # request the image with the transformed bounding box and save\n wms_url = imagery.replace(\"{bbox}\", \",\".join([str(b) for b in bbox]))\n r = requests.get(wms_url)\n\n return np.array(Image.open(BytesIO(r.content)))\n\n\ndef is_tif(imagery):\n \"\"\"Determine if an imagery path leads to a valid tif\"\"\"\n valid_drivers = [\"GTiff\", \"VRT\"]\n try:\n with rasterio.open(imagery) as test_ds:\n if test_ds.meta[\"driver\"] not in valid_drivers:\n # rasterio can open path, but it is not a tif\n valid_tif = False\n else:\n valid_tif = True\n except rasterio.errors.RasterioIOError:\n # rasterio cannot open the path. this is the case for a\n # tile service\n valid_tif = False\n\n return valid_tif\n\n\ndef is_wms(imagery):\n \"\"\"Determine if an imagery path is a WMS endpoint\"\"\"\n return \"{bbox}\" in imagery\n\n\ndef get_image_function(imagery):\n \"\"\"Return the correct image downloading function based on the imagery string\"\"\"\n if is_tif(imagery):\n return get_tile_tif\n if is_wms(imagery):\n return get_tile_wms\n return download_tile_tms\n\n\n# Taken from https://github.com/CartoDB/CartoColor/blob/master/cartocolor.js#L1633-L1733\ncolors = [\"#DDCC77\", \"#CC6677\", \"#117733\", \"#332288\", \"#AA4499\", \"#88CCEE\"]\n\n\ndef class_color(c):\n \"\"\"Return 3-element tuple containing rgb values for a given class\"\"\"\n if c == 0:\n return (0, 0, 0) # background class\n return ImageColor.getrgb(colors[c % len(colors)])\n"
] |
[
[
"numpy.count_nonzero",
"numpy.moveaxis"
]
] |
DanielCalimayor/DS-Unit-3-Sprint-3-Productization-and-Cloud
|
[
"d1479de0badb674daf9cbb8b5f738d214de831ed"
] |
[
"Iris_Deploy.py"
] |
[
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\n#pip install pickle-mixin\n\n\n# In[1]:\n\n\n#import the basics but important libraries\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n#Sklearn Preprocessing\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score, classification_report\nfrom sklearn.tree import DecisionTreeClassifier\n\n#import pickle\nimport pickle\nimport requests\nimport json\n\n\n# In[2]:\n\n\n#import\n\n\n\n\n\niris = pd.read_csv(\"https://gist.githubusercontent.com/curran/a08a1080b88344b0c8a7/raw/d546eaee765268bf2f487608c537c05e22e4b221/iris.csv\")\n\n\n\n\n# In[3]:\n\n\niris.head()\n\n\n# In[4]:\n\n\niris.dtypes\n\n\n# In[5]:\n\n\n#check dist of y\nplt.hist(iris['species'])\nplt.show()\n\n\n# In[6]:\n\n\n#encode\nle = LabelEncoder()\nle.fit(iris['species'])\n\n\n# In[7]:\n\n\niris['species'] = le.transform(iris['species'])\n\n\n# In[8]:\n\n\n#Features\nx = iris.iloc[: , 0:4 ]\nx.head()\n\n\n# In[9]:\n\n\ny = iris.iloc[:,4]\ny.head()\n\n\n# In[10]:\n\n\n#split\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size = .25, random_state = 123)\n\n\n# In[11]:\n\n\n#model\nalgo = DecisionTreeClassifier()\nmodel = algo.fit(x_train, y_train)\n\n\n# In[12]:\n\n\n#Predict\ny_pred = model.predict(x_test)\n\n\n# In[13]:\n\n\nprint(accuracy_score(y_test, y_pred))\n\n\n# In[14]:\n\n\nprint(classification_report(y_test, y_pred))\n\n\n# In[19]:\n\n\n#pickle\npickle.dump(model, open('iris_model.pkl', 'wb'))\n\n\n# In[20]:\n\n\nmy_model = pickle.load(open('iris_model.pkl', 'rb'))\n\n\n# In[21]:\n\n\nurl = \"https://localhost:9000/api\"\n\n\n# In[22]:\n\n\ndata = json.dumps({'sepal_width': 2.8, 'sepal_legnth': 6.3,'petal_width': 1.8, 'petal_legnth' : 5.5})\n\n\n# In[24]:\n\n\nsend = requests.post(url, data)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n"
] |
[
[
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.preprocessing.LabelEncoder",
"sklearn.metrics.classification_report",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.show",
"sklearn.metrics.accuracy_score"
]
] |
cnheider/MinkowskiEngine
|
[
"ae6db31203ba012df2f695576e2d3819d49bf2d7"
] |
[
"examples/indoor.py"
] |
[
"# Copyright (c) Chris Choy ([email protected]).\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\n# of the Software, and to permit persons to whom the Software is furnished to do\n# so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# Please cite \"4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural\n# Networks\", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part\n# of the code.\nimport os\nimport argparse\nimport numpy as np\nfrom urllib.request import urlretrieve\ntry:\n import open3d as o3d\nexcept ImportError:\n raise ImportError('Please install open3d with `pip install open3d`.')\n\nimport torch\nimport MinkowskiEngine as ME\nfrom examples.minkunet import MinkUNet34C\nfrom examples.common import Timer\n\n# Check if the weights and file exist and download\nif not os.path.isfile('weights.pth'):\n print('Downloading weights and a room ply file...')\n urlretrieve(\"http://cvgl.stanford.edu/data2/minkowskiengine/weights.pth\",\n 'weights.pth')\n urlretrieve(\"http://cvgl.stanford.edu/data2/minkowskiengine/1.ply\", '1.ply')\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--file_name', type=str, default='1.ply')\nparser.add_argument('--weights', type=str, default='weights.pth')\nparser.add_argument('--use_cpu', action='store_true')\n\nCLASS_LABELS = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table',\n 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk',\n 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink',\n 'bathtub', 'otherfurniture')\n\nVALID_CLASS_IDS = [\n 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39\n]\n\nSCANNET_COLOR_MAP = {\n 0: (0., 0., 0.),\n 1: (174., 199., 232.),\n 2: (152., 223., 138.),\n 3: (31., 119., 180.),\n 4: (255., 187., 120.),\n 5: (188., 189., 34.),\n 6: (140., 86., 75.),\n 7: (255., 152., 150.),\n 8: (214., 39., 40.),\n 9: (197., 176., 213.),\n 10: (148., 103., 189.),\n 11: (196., 156., 148.),\n 12: (23., 190., 207.),\n 14: (247., 182., 210.),\n 15: (66., 188., 102.),\n 16: (219., 219., 141.),\n 17: (140., 57., 197.),\n 18: (202., 185., 52.),\n 19: (51., 176., 203.),\n 20: (200., 54., 131.),\n 21: (92., 193., 61.),\n 22: (78., 71., 183.),\n 23: (172., 114., 82.),\n 24: (255., 127., 14.),\n 25: (91., 163., 138.),\n 26: (153., 98., 156.),\n 27: (140., 153., 101.),\n 28: (158., 218., 229.),\n 29: (100., 125., 154.),\n 30: (178., 127., 135.),\n 32: (146., 111., 194.),\n 33: (44., 160., 44.),\n 34: (112., 128., 144.),\n 35: (96., 207., 209.),\n 36: (227., 119., 194.),\n 37: (213., 92., 176.),\n 38: (94., 106., 211.),\n 39: (82., 84., 163.),\n 40: (100., 85., 144.),\n}\n\n\ndef load_file(file_name):\n pcd = o3d.io.read_point_cloud(file_name)\n coords = np.array(pcd.points)\n colors = np.array(pcd.colors)\n return coords, colors, pcd\n\n\nif __name__ == '__main__':\n config = parser.parse_args()\n device = torch.device('cuda' if (\n torch.cuda.is_available() and not config.use_cpu) else 'cpu')\n print(f\"Using {device}\")\n # Define a model and load the weights\n model = MinkUNet34C(3, 20).to(device)\n model_dict = torch.load(config.weights)\n model.load_state_dict(model_dict)\n model.eval()\n\n coords, colors, pcd = load_file(config.file_name)\n # Measure time\n with torch.no_grad():\n voxel_size = 0.02\n # Feed-forward pass and get the prediction\n in_field = ME.TensorField(\n features=torch.from_numpy(colors).float(),\n coordinates=ME.utils.batched_coordinates([coords / voxel_size], dtype=torch.float32),\n quantization_mode=ME.SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE,\n minkowski_algorithm=ME.MinkowskiAlgorithm.SPEED_OPTIMIZED,\n device=device,\n )\n # Convert to a sparse tensor\n sinput = in_field.sparse()\n # Output sparse tensor\n soutput = model(sinput)\n # get the prediction on the input tensor field\n out_field = soutput.slice(in_field)\n logits = out_field.F\n\n _, pred = logits.max(1)\n pred = pred.cpu().numpy()\n\n # Create a point cloud file\n pred_pcd = o3d.geometry.PointCloud()\n # Map color\n colors = np.array([SCANNET_COLOR_MAP[VALID_CLASS_IDS[l]] for l in pred])\n pred_pcd.points = o3d.utility.Vector3dVector(coords)\n pred_pcd.colors = o3d.utility.Vector3dVector(colors / 255)\n pred_pcd.estimate_normals()\n\n # Move the original point cloud\n pcd.points = o3d.utility.Vector3dVector(\n np.array(pcd.points) + np.array([0, 5, 0]))\n\n # Visualize the input point cloud and the prediction\n o3d.visualization.draw_geometries([pcd, pred_pcd])\n"
] |
[
[
"torch.load",
"torch.from_numpy",
"torch.no_grad",
"torch.cuda.is_available",
"numpy.array"
]
] |
AcodeC/video
|
[
"5d4497621cba764dc51ae22b10e1196c6e9e28bc"
] |
[
"dataset/transform.py"
] |
[
"import math\nimport re\nimport string\n\nimport numpy as np\nimport torch\n\n\nclass UniformSample:\n def __init__(self, n_sample):\n self.n_sample = n_sample\n\n def __call__(self, frames):\n n_frames = len(frames)\n if n_frames < self.n_sample:\n return frames\n\n sample_indices = [ int(i) for i in np.linspace(0, n_frames-1, self.n_sample) ]\n samples = [ frames[i] for i in sample_indices ]\n return samples\n\n\nclass RandomSample:\n def __init__(self, n_sample):\n self.n_sample = n_sample\n\n def __call__(self, frames):\n n_frames = len(frames)\n if n_frames < self.n_sample:\n return frames\n \n sample_indices = sorted(np.random.choice(n_frames, self.n_sample, replace=False))\n samples = [ frames[i] for i in sample_indices ]\n return samples\n\n\nclass UniformJitterSample:\n def __init__(self, n_sample):\n self.n_sample = n_sample\n\n def __call__(self, frames):\n n_frames = len(frames)\n if n_frames < self.n_sample:\n return frames\n\n jitter_std = int(math.sqrt(n_frames / self.n_sample / 2 / 2))\n\n sample_indices = [ int(i) for i in np.linspace(0, n_frames-1, self.n_sample) ]\n sample_indices = [ int(i + np.random.normal(0, jitter_std)) for i in sample_indices ]\n sample_indices = [ min(max(0, i), n_frames-1) for i in sample_indices ]\n sample_indices = sorted(sample_indices)\n samples = [ frames[i] for i in sample_indices ]\n return samples\n\n\nclass ZeroPadIfLessThan:\n def __init__(self, n):\n self.n = n\n\n def __call__(self, frames):\n while len(frames) < self.n:\n frames.append(np.zeros_like(frames[0]))\n return frames\n\n\nclass ToTensor:\n def __init__(self, dtype=None):\n self.dtype = dtype\n \n def __call__(self, array):\n np_array = np.asarray(array)\n t = torch.from_numpy(np_array)\n if self.dtype:\n t = t.type(self.dtype)\n return t\n\n\nclass TrimExceptAscii:\n\n def __call__(self, sentence):\n return sentence.decode('ascii', 'ignore').encode('ascii')\n\n\nclass RemovePunctuation:\n def __init__(self):\n self.regex = re.compile('[%s]' % re.escape(string.punctuation))\n\n def __call__(self, sentence):\n return self.regex.sub('', sentence)\n\n\nclass Lowercase:\n\n def __call__(self, sentence):\n return sentence.lower()\n\n\nclass SplitWithWhiteSpace:\n\n def __call__(self, sentence):\n return sentence.split()\n\n\nclass Truncate:\n def __init__(self, n_word):\n self.n_word = n_word\n\n def __call__(self, words):\n return words[:self.n_word]\n\n\nclass PadFirst:\n def __init__(self, token):\n self.token = token\n\n def __call__(self, words):\n return [ self.token ] + words\n\n\nclass PadLast:\n def __init__(self, token):\n self.token = token\n\n def __call__(self, words):\n return words + [ self.token ]\n\n\nclass PadToLength:\n def __init__(self, token, length):\n self.token = token\n self.length = length\n\n def __call__(self, words):\n n_pads = self.length - len(words)\n return words + [ self.token ] * n_pads\n\n\nclass ToIndex:\n def __init__(self, word2idx):\n self.word2idx = word2idx\n\n def __call__(self, words): # Ignore unknown (or trimmed) words.\n return [ self.word2idx[word] for word in words if word in self.word2idx ]\n\n"
] |
[
[
"numpy.linspace",
"numpy.random.choice",
"numpy.asarray",
"torch.from_numpy",
"numpy.random.normal",
"numpy.zeros_like"
]
] |
isomorphicdude/Highlighted-Text-OCR
|
[
"07557bf77172b5e411f83352d8c4a6ba1b46fe6a"
] |
[
"detect_mask_and_ocr.py"
] |
[
"import cv2\r\nimport numpy as np\r\nimport torch\r\nimport easyocr\r\n\r\n# Reading the image\r\n\r\nimg = cv2.imread('image.jpg')\r\n\r\n#define kernel size \r\nkernel = np.ones((7,7),np.uint8)\r\n\r\n\r\n# convert to hsv colorspace \r\nhsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\r\n\r\n# lower bound and upper bound for Green color \r\n# lower_bound = np.array([50, 20, 20]) \r\n# upper_bound = np.array([100, 255, 255])\r\n\r\n# lower bound and upper bound for Yellow color \r\nlower_bound = np.array([20, 80, 80]) \r\nupper_bound = np.array([30, 255, 255])\r\n\r\n# find the colors within the boundaries\r\nmask = cv2.inRange(hsv, lower_bound, upper_bound)\r\n\r\n# Remove unnecessary noise from mask\r\nmask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)\r\nmask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)\r\n\r\n# Segment only the detected region\r\nsegmented_img = cv2.bitwise_and(img, img, mask=mask)\r\n\r\noutput = cv2.resize(segmented_img, (960, 540))\r\n\r\ncv2.imwrite('modified',output)\r\n\r\nreader = easyocr.Reader(['de', 'en'])\r\n\r\nresult = reader.readtext('modified.jpg')"
] |
[
[
"numpy.array",
"numpy.ones"
]
] |
OSUmageed/pyHeatTransfer
|
[
"b1db8ca7594a657826a1ccfb38a4e4eb102cce55"
] |
[
"pyHeatTransfer/conduction.py"
] |
[
"#I suppose the idea is that it gets these values from somewhere and takes off.\r\n#Regardless of where those values come from.\r\n\r\nimport os\r\nimport os.path as op\r\nimport sys\r\nimport numpy as np\r\nimport matplotlib as mpl\r\nimport matplotlib.pyplot as plt\r\nimport CoolProp.CoolProp as cp\r\nimport collections\r\nimport time\r\nimport random\r\nfrom deco import concurrent, synchronized\r\n\r\nsourcepath = op.abspath(op.dirname(__file__))\r\ngitpath = op.dirname(sourcepath) #Top level of git repo\r\nos.chdir(sourcepath)\r\nsys.path.append(gitpath)\r\n\r\nimport geometry as geo\r\nimport SolidProp.PropertySI as sp\r\nimport convection as cv\r\n\r\nshape_func = { \r\n 'Brick': lambda z, ht: True,\r\n 'Ziggurat' : lambda z, ht: (z%ht)\r\n}\r\n\r\nthispath = op.abspath(op.dirname(__file__))\r\ntoK = 273.15\r\n\r\ntag = geo.tags\r\n\r\ndef contourmaker(Tg, XX, yspot):\r\n npE = np.zeros_like(XX)\r\n for key in Tg.keys():\r\n x,y,z = key\r\n if y != yspot:\r\n continue\r\n \r\n npE[z,x] = Tg[key]\r\n\r\n return npE\r\n\r\ndef randT(T):\r\n return 0.1*random.random() + T\r\n\r\n#Doesn't do the ziggurat!\r\ndef make_grid(xi, xf, yi, yf, z, Ti, zFlag=\"\"):\r\n\r\n xFlag = [\"E\", \"W\"]\r\n yFlag = [\"S\", \"N\"]\r\n typr = dict()\r\n Tmake = dict()\r\n\r\n #First x row\r\n gr = (xi, yi, z)\r\n typr[gr] = xFlag[0]+yFlag[0]+zFlag\r\n Tmake[gr] = randT(Ti)\r\n\r\n for y in range(yi+1,yf):\r\n gr = (xi, y, z)\r\n typr[gr] = xFlag[0]+zFlag\r\n Tmake[gr] = randT(Ti)\r\n\r\n gr = (xi, yf, z)\r\n typr[gr] = xFlag[0]+yFlag[1]+zFlag\r\n Tmake[gr] = randT(Ti)\r\n\r\n # All central x rows\r\n for x in range(xi+1,xf):\r\n gr = (x, yi, z)\r\n typr[gr] = yFlag[0]+zFlag\r\n Tmake[gr] = randT(Ti)\r\n\r\n for y in range(yi+1,yf):\r\n gr = (x, y, z)\r\n typr[gr] = zFlag\r\n Tmake[gr] = randT(Ti)\r\n\r\n gr = (x,yf,z)\r\n typr[gr] = yFlag[1]+zFlag\r\n Tmake[gr] = randT(Ti)\r\n\r\n #Last x row\r\n gr = (xf, yi, z)\r\n typr[gr] = xFlag[1]+yFlag[0]+zFlag\r\n Tmake[gr] = randT(Ti)\r\n\r\n for y in range(yi+1,yf):\r\n gr = (xf, y, z)\r\n typr[gr] = xFlag[1]+zFlag\r\n Tmake[gr] = randT(Ti)\r\n\r\n gr = (xf,yf,z)\r\n typr[gr] = xFlag[1]+yFlag[1]+zFlag\r\n Tmake[gr] = randT(Ti)\r\n \r\n return Tmake, typr\r\n\r\ndef step_forwardf(Tg_in, ky, Pg, typD, V, A, dt, ds, Ta, h, ep, qVol):\r\n ty = tag[typD]\r\n cond_coefficient = Pg['A']/(V*ds*ty['Vc'])\r\n\r\n cs = np.array(ky) + np.array(ty['Stencil'])\r\n ck = []\r\n for c in cs:\r\n ck.append(Tg_in[tuple(c)])\r\n\r\n conduction = cond_coefficient * (sum([ci*Ai*A for ci, Ai in list(zip(ty['Acond'],ck))]) - \r\n Tg_in[ky]*A*sum(ty['Acond']))\r\n\r\n cv_radiant = cv.ambientQ(Tg_in[ky], Ta, ty['Aconv'][0]*A, h, ep)/(V*ty['Vc']*Pg['D']*Pg['CP'])\r\n \r\n return Tg_in[ky] + dt*(conduction + cv_radiant + qVol/Pg['D']*Pg['CP'])\r\n\r\n\r\ndef forward_call(Tg_in, Pg, typD, dt, ds, Ta, h, ep, qVol=0.0):\r\n A = ds**2\r\n V = ds**3\r\n Tg_out = dict()\r\n for key in Tg_in.keys():\r\n Tg_out[key] = step_forwardf(Tg_in, key, Pg[key], typD[key], V, \r\n A, dt, ds, Ta, h, ep, qVol)\r\n\r\n return Tg_out\r\n\r\n#could use scipy interpolate to do a spline. This is limited to just linear.\r\nclass SolidProperties(object):\r\n def __init__(self, mat, Tgrid={}):\r\n self.props = sp.get_props(mat)\r\n self.pGrid = collections.defaultdict(dict)\r\n self.update_props(Tgrid)\r\n \r\n #Should accommodate lists in order to \r\n def update_props(self, Tgrid):\r\n for pt in Tgrid.keys():\r\n for prop in self.props.keys():\r\n self.pGrid[pt][prop] = np.interp(Tgrid[pt], self.props[prop][0, :], self.props[prop][1, :])\r\n\r\n def query_props(self, Temp):\r\n Tget = Temp if isinstance(Temp, list) else [Temp]\r\n out = collections.defaultdict(dict)\r\n for T in Tget:\r\n for prop in self.props.keys():\r\n out[T][prop] = np.interp(T, self.props[prop][0, :], self.props[prop][1, :])\r\n\r\n return out\r\n\r\n#Using Dict\r\nclass HeatSimulation(object):\r\n def __init__(self, specificDict):\r\n self.parameter_dict = specificDict\r\n self.mat = specificDict['mat']\r\n self.ds = specificDict['ds']\r\n self.dt = specificDict['dt']\r\n self.Ti = specificDict['Ti'] + toK\r\n self.Ta = specificDict['Ta'] + toK\r\n self.h = specificDict['h']\r\n self.ep = specificDict['ep']\r\n self.Lx = specificDict['Lx'] \r\n self.Ly = specificDict['Ly'] \r\n self.Lz = specificDict['Lz'] \r\n self.tF = specificDict['tFinal']\r\n self.qVol = specificDict['qVol']\r\n self.tNow = 0.0 \r\n self.Nx = int(self.Lx/self.ds)+1 \r\n self.Ny = int(self.Ly/self.ds)+1 \r\n self.Nz = int(self.Lz/self.ds)+1\r\n self.A = self.ds**2\r\n self.V = self.ds**3\r\n self.xrng = np.arange(0, self.Lx + 2.0*self.ds, self.ds)\r\n self.yrng = np.arange(0, self.Lz + 2.0*self.ds, self.ds)\r\n self.Gx, self.Gz = np.meshgrid(self.xrng, self.yrng) \r\n self.pPlot = np.zeros_like(self.Gx)\r\n self.Tgrid, self.fGrid = self.__instantiate_grid()\r\n self.mProps = SolidProperties(self.mat, self.Tgrid)\r\n\r\n def __instantiate_grid(self):\r\n xf, yf = self.Nx, self.Ny\r\n xi, yi = 0, 0\r\n Tuno, fGrid = make_grid(xi, xf, yi, yf, 0, self.Ti, zFlag=\"B\")\r\n\r\n cD = self.parameter_dict['stepD']\r\n stepFunction = shape_func[self.parameter_dict['shape']]\r\n\r\n for z in range(1,self.Nz):\r\n if not stepFunction(z, self.parameter_dict['stepH']):\r\n xi += cD\r\n xf -= cD\r\n yi += cD\r\n yf -= cD\r\n \r\n Tt, ft = make_grid(xi, xf, yi, yf, z, self.Ti)\r\n Tuno.update(Tt)\r\n fGrid.update(ft)\r\n \r\n Tt, ft = make_grid(xi, xf, yi, yf, self.Nz, self.Ti, zFlag=\"U\")\r\n Tuno.update(Tt)\r\n fGrid.update(ft)\r\n return Tuno, fGrid\r\n\r\n\r\n def step_forward(self):\r\n Tg_out = dict()\r\n \r\n for key in self.Tgrid.keys():\r\n Tg_out[key] = self.__take_step(key)\r\n\r\n self.tNow += self.dt\r\n self.Tgrid = Tg_out\r\n\r\n\r\n def __take_step(self, key):\r\n ty = tag[self.fGrid[key]]\r\n pG = self.mProps.pGrid[key]\r\n\r\n #Alpha/(V*ds*Vcoeff)\r\n cond_coefficient = pG['A']/(self.V * self.ds * ty['Vc'])\r\n cs = np.array(key) + np.array(ty['Stencil'])\r\n ck = []\r\n\r\n for c in cs:\r\n ck.append(self.Tgrid[tuple(c)])\r\n\r\n conduction = cond_coefficient * (sum([ci*Ai*self.A for ci, Ai in list(zip(ty['Acond'],ck))]) - \r\n self.Tgrid[key]*self.A*sum(ty['Acond']))\r\n\r\n cv_radiant = cv.ambientQ(self.Tgrid[key], self.Ta, ty['Aconv'][0]*self.A, self.h, self.ep)/(self.V * ty['Vc'] * pG['D'] * pG['CP'])\r\n \r\n return self.Tgrid[key] + self.dt*(conduction + cv_radiant + self.qVol/(pG['D']*pG['CP'])) \r\n\r\n\r\n def plot_step(self, ySpot):\r\n\r\n for key in self.Tgrid.keys(): \r\n x,y,z = key\r\n if y != ySpot:\r\n continue\r\n \r\n self.pPlot[z,x] = self.Tgrid[key]\r\n \r\n\r\n#Make a run function for the class-based version.\r\ndef initialize_class(specificDict):\r\n\r\n hsim = HeatSimulation(specificDict)\r\n Gsize = hsim.Gx.shape\r\n t = [time.time()]\r\n\r\n while hsim.tNow < hsim.tF:\r\n hsim.step_forward()\r\n t.append(time.time())\r\n print(hsim.tNow, t[-1]-t[-2])\r\n\r\n hsim.plot_step(Gsize[1]//2)\r\n CS = plt.contour(hsim.Gx, hsim.Gz, hsim.pPlot-toK, 5)\r\n plt.title(\"yaxis = {:.3f}, t = {:.3f} s\".format(yval, tnow))\r\n plt.ylabel('Z axis')\r\n plt.xlabel('X axis')\r\n plt.clabel(CS, inline=1, fontsize=10)\r\n plt.grid(True)\r\n plt.show()\r\n\r\n \r\n#Called by calling conduction without interface.\r\ndef initialize(specificDict):\r\n ds = specificDict['ds']\r\n Lx, Ly, Lz = specificDict['Lx'], specificDict['Ly'], specificDict['Lz'] \r\n Nx, Ny, Nz = int(Lx/ds)+1, int(Ly/ds)+1, int(Lz/ds)+1\r\n Gx, Gz = np.meshgrid(np.arange(0,Lx+2.0*ds,ds), np.arange(0,Lz+2.0*ds,ds))\r\n\r\n dt = specificDict['dt']\r\n Ti = specificDict['Ti'] + toK\r\n Ta, h, ep = specificDict['Ta'] + toK, specificDict['h'], specificDict['ep']\r\n xf, yf = Nx, Ny\r\n xi, yi = 0, 0\r\n Tuno, fGrid = make_grid(xi, xf, yi, yf, 0, Ti, zFlag=\"B\")\r\n\r\n cD = specificDict['stepD']\r\n stepFunction = shape_func[specificDict['shape']]\r\n\r\n for z in range(1,Nz):\r\n if not stepFunction(z, specificDict['stepH']):\r\n xi += cD\r\n xf -= cD\r\n yi += cD\r\n yf -= cD\r\n \r\n Tt, ft = make_grid(xi, xf, yi, yf, z, Ti)\r\n Tuno.update(Tt)\r\n fGrid.update(ft)\r\n \r\n Tt, ft = make_grid(xi, xf, yi, yf, Nz, Ti, zFlag=\"U\")\r\n Tuno.update(Tt)\r\n fGrid.update(ft)\r\n\r\n tnow = 0.0\r\n yval = Ly/2\r\n Gsize = Gx.shape\r\n yplace = Gsize[1]//2\r\n \r\n matProps = SolidProperties(specificDict['mat'], Tuno)\r\n t = [time.time()]\r\n print(Gsize, len(Tuno.keys()))\r\n\r\n\r\n while tnow < specificDict['tFinal']:\r\n Tdos = forward_call(Tuno, matProps.pGrid, fGrid, dt, ds, Ta, h, ep)\r\n matProps.update_props(Tdos)\r\n Tuno = forward_call(Tdos, matProps.pGrid, fGrid, dt, ds, Ta, h, ep)\r\n\r\n matProps.update_props(Tuno)\r\n\r\n tnow += dt*2.0\r\n t.append(time.time())\r\n print(tnow, t[-1]-t[-2])\r\n\r\n Zv = contourmaker(Tuno, Gx, yplace)\r\n CS = plt.contour(Gx, Gz, Zv-toK, 5)\r\n plt.title(\"yaxis = {:.3f}, t = {:.3f} s\".format(yval, tnow))\r\n plt.ylabel('Z axis')\r\n plt.xlabel('X axis')\r\n plt.clabel(CS, inline=1, fontsize=10)\r\n plt.grid(True)\r\n plt.show()\r\n\r\n return 'Yay'\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n import examples as ex\r\n print(\"You have chosen to run a predefined example: \")\r\n choice = bool(int(input(\"Enter 1 for ziggurat, 0 for brick: \")))\r\n\r\n param = ex.zigg if choice else ex.bricky\r\n param['tFinal'] = 10.0\r\n initialize_class(param)\r\n #initialize(param)"
] |
[
[
"matplotlib.pyplot.clabel",
"numpy.meshgrid",
"numpy.arange",
"matplotlib.pyplot.contour",
"numpy.zeros_like",
"matplotlib.pyplot.grid",
"numpy.interp",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
pranavsb/RL_smart_grid
|
[
"b23b407d3c873171d9a2af6d5a0104a7bcadc6cd"
] |
[
"model/DynamicPricing.py"
] |
[
"from Environment import Environment\nfrom QTableAgent import QTableAgent\nimport time, os\nimport numpy as np\nimport pickle\nfrom Utils import get_battery_reward_factor\n\nSOURCE_DEMAND_STATE = 'demand'\nSOURCE_SMART_LOADS = True\nSOURCE_LEARNING_RATE = 0.03\nSOURCE_DISCOUNT_FACTOR = 0.95\nSOURCE_NUM_LOADS = 10\nSOURCE_MODE = 'vanilla'\n\nLOAD_RANDOMIZE_BATTERY = 'rb'#True\nLOAD_MODE = 'mode'\nLOAD_DAY = 'day'#199999\nLOAD_NUM_LOADS = 'ndl'\nLOAD_LEARNING_RATE = 'lr'\nLOAD_DISCOUNT_FACTOR = 'df'\nLOAD_BATTERY_STATE = 'battery'\nLOAD_PRICE_STATE = 'price'\n\n\n\n\n\nMODEL_PATH = os.getcwd()#+'/basic_qlearning_models/dumloads'+str(NUM_DUM_LOADS)+'/df'+str(DISCOUNT_FACTOR)+'/lr'+str(LEARNING_RATE)\nMODEL_PATH+='/dynamic_pricing_models'\nif not os.path.isdir(MODEL_PATH):\n os.makedirs(MODEL_PATH)\nif SOURCE_SMART_LOADS:\n MODEL_PATH += '/smart'\nelse:\n MODEL_PATH+='/dumb'\nif not os.path.isdir(MODEL_PATH):\n os.makedirs(MODEL_PATH)\nMODEL_PATH+='/df'+str(SOURCE_DISCOUNT_FACTOR)\nif not os.path.isdir(MODEL_PATH):\n os.makedirs(MODEL_PATH)\nMODEL_PATH+='/lr'+str(SOURCE_LEARNING_RATE)\nif not os.path.isdir(MODEL_PATH):\n os.makedirs(MODEL_PATH)\n\nload_agent_params = {\n LOAD_RANDOMIZE_BATTERY:True,\n LOAD_LEARNING_RATE: 0.03,\n LOAD_DISCOUNT_FACTOR: 0.9,\n LOAD_NUM_LOADS:999,\n LOAD_DAY:99999,\n LOAD_MODE:'vanilla'\n }\nLOAD_MODEL_PATH = os.getcwd()\nLOAD_MODEL_PATH += '/basic_qlearning_models'\nif load_agent_params[LOAD_RANDOMIZE_BATTERY]:\n LOAD_MODEL_PATH+='/randomize_battery'\nelse:\n LOAD_MODEL_PATH+='/continuous_battery'\nLOAD_MODEL_PATH+= '/dumloads'+str(load_agent_params[LOAD_NUM_LOADS]) +'/df' + str(load_agent_params[LOAD_DISCOUNT_FACTOR]) + '/lr'+str(load_agent_params[LOAD_LEARNING_RATE])\n\n\n\ndef setup():\n env = Environment()\n # env.add_connections({0:[0]})\n load_agent = None\n if SOURCE_SMART_LOADS:\n with open(\n LOAD_MODEL_PATH + '/' + load_agent_params[LOAD_MODE] + '_agent_' + str(load_agent_params[LOAD_DAY]) + '.pickle',\n 'rb') as f:\n load_agent = pickle.load(f)\n env.add_connections({0:list(range(SOURCE_NUM_LOADS))})\n else:\n env.add_dumb_loads(0,SOURCE_NUM_LOADS)\n env.set_environment_ready()\n env.reset(True)\n source_agent_dict = {0:QTableAgent(env.get_source_action_space(),\n {SOURCE_DEMAND_STATE:env.get_overall_demand_bounds(0)},\n {SOURCE_DEMAND_STATE:20},\n default_action=1,\n discount_factor=SOURCE_DISCOUNT_FACTOR\n )}\n source_agent_dict[0].set_learning_rate(SOURCE_LEARNING_RATE)\n return env, source_agent_dict, load_agent\n\ndef train(startday=0, endday=200000):\n start=time.time()\n load_actions = {}\n for day in range(startday, endday):\n states = []\n actions = []\n max_change = 0\n max_change_state_action = []\n response = env.reset(True)\n next_state = {SOURCE_DEMAND_STATE: response[0][0][0][0]}\n source_agent_dict[0].update_state(next_state)\n next_action = source_agent_dict[0].take_action()\n\n for step in range(env.get_max_timestep()+1):\n # print(env.get_current_timestep(),step)\n current_state = next_state\n current_action = next_action\n actions.append(current_action)\n if SOURCE_SMART_LOADS:\n for i in range(SOURCE_NUM_LOADS):\n load_actions[i] = load_agent.get_action(\n {LOAD_BATTERY_STATE: response[1][i][0][0], LOAD_PRICE_STATE: response[1][i][0][1][-1]})\n\n response = env.step(sourceActionDict={0:current_action}, loadActionDict=load_actions)\n next_state = {SOURCE_DEMAND_STATE:response[0][0][0][0]}\n states.append(current_state)\n source_agent_dict[0].update_state(next_state)\n\n if SOURCE_MODE is 'vanilla':\n max_change = max(abs(\n source_agent_dict[0].update_qtable(\n current_state=current_state, current_action=current_action,\n reward = response[0][0][1],\n mode=SOURCE_MODE, next_state = next_state\n )), max_change) #response should be negative\n next_action = source_agent_dict[0].take_action()\n\n elif SOURCE_MODE is 'sarsa':\n next_action = source_agent_dict[0].take_action()\n max_change = max(abs(\n source_agent_dict[0].update_qtable(\n current_state=current_state, current_action=current_action,\n reward = response[0][0][1],\n next_state=next_state, next_action=next_action, mode=SOURCE_MODE, #clip=[-25,25] # clip the increments to a certain range\n )), max_change) # response should be negative\n\n\n max_change_state_action = [source_agent_dict[0].state,current_action]\n print(day,':',source_agent_dict[0].get_explore_rate(day),':',max_change,':',max_change_state_action,':',np.mean(source_agent_dict[0].qtable))\n if max_change<0.001:\n break\n source_agent_dict[0].set_explore_rate(source_agent_dict[0].get_explore_rate(day))\n # load_agent_dict[0].set_learning_rate(load_agent_dict[0].get_learning_rate(day))\n if (day+1)%500==0:\n source_agent_dict[0].update_policy()\n # np.save(MODEL_PATH+'/qtable_'+str(day),load_agent_dict[0].qtable)\n # np.save(MODEL_PATH+'/visitcounts_'+str(day),load_agent_dict[0].visit_counts)\n # np.save(MODEL_PATH+'/policy_'+str(day),load_agent_dict[0].policy)\n with open(MODEL_PATH+'/'+SOURCE_MODE+'_agent_'+str(day)+'.pickle', 'wb') as f:\n pickle.dump(source_agent_dict[0], f)\n\n end = time.time()\n return end-start\n\nenv, source_agent_dict, load_agent = setup()\n\ntimetaken = train(0,10000)\n"
] |
[
[
"numpy.mean"
]
] |
ssriblo/ionic-smarthome-test-1
|
[
"060bc247e0b8295d6cd869d90b364756515cfc19"
] |
[
"python-opcua/timetable_parser.py"
] |
[
"import numpy as np\nimport sys\nimport json\n\nclass DataParserTT(object):\n def __init__(self):\n pass\n # Let convert from string to json: \n # \n\n def timetableParser(self, str):\n# ob = self.obj\n a = [None]*16\n print(f\"\\n\\nstr type\", type(str), str) # магическим образом этот принт нужен - без него type(str)=dict и все падает\n ob = json.loads(str)\n# print(f\"OBJECT\", ob, ob['mode'])\n try:\n if (ob['mode'] == \"TimeTable\"):\n a[0] = 1\n else:\n a[0] = 0\n a[1] = ob['comf_0']\n a[2] = ob['comf_1']\n a[3] = ob['econ_0']\n a[4] = ob['econ_1']\n a[5] = 0 # Reserved\n a[6] = 0 # Reserved\n a[7] = ob['tt_vals'][0]['start']\n a[8] = ob['tt_vals'][0]['end']\n a[9] = ob['tt_vals'][1]['start']\n a[10] = ob['tt_vals'][1]['end']\n a[11] = ob['tt_vals'][2]['start']\n a[12] = ob['tt_vals'][2]['end']\n for i in range(0,3):\n array = []\n for j in range(0,6):\n array.append(ob['tt_days'][i][j])\n nda = np.packbits(array, bitorder='little')\n a[i+13] = int(nda[0])\n except:\n e = sys.exc_info()\n print( \"EXCEPTION3: \", e[0], e[1])\n return a\n\n"
] |
[
[
"numpy.packbits"
]
] |
QuMuLab/PDKB-Planning
|
[
"61a96c006b606aa051b2c7c9b5bfc9b6473d2a4d"
] |
[
"pdkb/test/aamas.py"
] |
[
"\nimport sys, random, time\n\nfrom pdkb.kd45 import *\nfrom pdkb.indexed_kd45 import *\nfrom pdkb.pinf import *\nfrom pdkb.rml import *\nfrom pdkb.test.utils import random_pdkb, random_rml, write_file, append_file\n\nTYPE = 'normal'\nNUM_PDKBS = 10\nQUERIES_PER_PDKB = 10\n\nif 'small' == TYPE:\n AGENTS = (2,3)\n DEPTH = (2,3)\n FLUENTS = list(map(Literal, 'pqr'))\n FLUENT_RANGE = (2,3)\n RMLS = (3,8)\n\nelif 'normal' == TYPE:\n AGENTS = (3,6)\n DEPTH = (4,7)\n FLUENTS = list(map(Literal, 'pqrst'))\n FLUENT_RANGE = (3,5)\n RMLS = (13,39)\n\nelif 'big' == TYPE:\n AGENTS = (3,10)\n DEPTH = (3,10)\n FLUENTS = list(map(Literal, 'pqrstvwxyz'))\n FLUENT_RANGE = (5,10)\n RMLS = (50,150)\n\nelse:\n assert False, \"Bad experiment type: %s\" % TYPE\n\ndef now():\n return time.time()\n\n\ndef doit():\n\n skip_ag = 0\n skip_dep = 0\n\n if skip_ag == 0 and skip_dep == 0:\n write_file('aamas.csv', 'agents,depth,fluents,inf-size,closed-size,reduced-size,inf-query,closed-query,reduced-query,inf-update,closed-update,reduced-update')\n\n for ag in range(AGENTS[0], AGENTS[1]+1):\n for dep in range(DEPTH[0], DEPTH[1]+1):\n\n if ag < skip_ag:\n continue\n elif ag == skip_ag and dep < skip_dep:\n continue\n\n print()\n print(\"--------------\")\n print(\" %d x %d\" % (ag, dep))\n print(\"--------------\")\n (times, sizes) = get_size_and_time(ag, dep, FLUENTS)\n print()\n print(\"-------------------------\")\n\n append_file('aamas.csv', \"\\n%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\" % (ag, dep, len(FLUENTS), sizes[0], sizes[1], sizes[2], times[0], times[1], times[2], times[3], times[4], times[5]))\n\n #csv = ['agents,depth,fluents,reduced-rmls,closed-rmls,inf-size,closed-size,reduced-size,inf-pre,closed-pre,inf-query,closed-query,reduced-query,result']\n #csv_yes = [csv[0]]\n #csv_no = [csv[0]]\n\n #kbs.append(random_pdkb(random.randint(DEPTH[0], DEPTH[1]),\n # random.randint(AGENTS[0], AGENTS[1]),\n # FLUENTS[:random.randint(FLUENT_RANGE[0], FLUENT_RANGE[1])],\n # random.randint(RMLS[0], RMLS[1]),\n # False))\n\n #write_file('aamas.csv', csv)\n #write_file('aamas-no.csv', csv_no)\n #write_file('aamas-yes.csv', csv_yes)\n #write_file('aamas-all.csv', csv_yes + csv_no[1:])\n\ndef get_size_and_time(num_agents, depth, fluents):\n\n agents = list(range(1, num_agents + 1))\n\n def generate_kbs():\n numRMLs = num_agents * depth * 2\n closed_kb = PDKB(depth, agents, fluents)\n indexed_kb = IndexedPDKB(depth, agents, fluents)\n count = 0\n while count < numRMLs:\n next_rml = random_rml(depth, agents, fluents)\n if not closed_kb.query(neg(next_rml)):\n closed_kb.add_rml(next_rml)\n closed_kb.logically_close()\n indexed_kb.expand(set([next_rml]))\n count += 1\n\n inf_kb = INF.PDKB2INF(closed_kb)\n return (inf_kb, closed_kb, indexed_kb)\n \n '''\n print\n print \"Generating %d PDKBs...\" % NUM_PDKBS\n kbs = []\n infs = []\n indexed_kbs = []\n progress = 10\n trial = 1\n\n for i in range(NUM_PDKBS):\n if trial > progress:\n print \"%d%%\" % progress\n progress += 10\n trial += 1\n\n (inf_kb, closed_kb, indexed_kb) = generate_kbs()\n kbs.append(closed_kb)\n indexed_kbs.append(indexed_kb)\n infs.append(inf_kb)\n\n print\n\n print \"Closing PDKBs...\"\n closed_kbs = [kb.copy() for kb in kbs]\n closure_time = []\n progress = 10\n trial = 1\n for kb in closed_kbs:\n if trial > progress:\n print \"%d%%\" % progress\n progress += 10\n trial += 1\n start = now()\n kb.logically_close()\n assert kb.is_consistent()\n closure_time.append(now() - start)\n print\n\n print \"Computing INFs...\"\n for kb in kbs:\n start = now()\n infs.append(INF.PDKB2INF(kb))\n inf_time.append(now() - start)\n '''\n\n def run_queries(index, rml, infs_kb, closed_kb, indexed_kb):\n start = now()\n ans1 = infs_kb.query(rml)\n inf_query = now() - start\n start = now()\n #ans2 = rml in closed_kbs[index].rmls\n ans2 = closed_kb.query(rml)\n closed_query = now() - start\n start = now()\n ans3 = indexed_kb.query(rml)\n unclosed_query = now() - start\n\n assert ans1 == ans2\n assert ans2 == ans3\n\n # Copy the KBs to run update commands without changing the original KBs\n copy_kb = closed_kb.copy()\n copy_indexed_kb = indexed_kb.copy()\n\n #start = now()\n # INF update is not yet implemented...\n inf_update = 0.0 #now() - start\n start = now()\n copy_kb.update(set([rml]))\n closed_update = now() - start\n start = now()\n copy_indexed_kb.update(set([rml]))\n unclosed_update = now() - start\n\n return (ans1, ans2, ans3, inf_query, closed_query, unclosed_query, inf_update, closed_update, unclosed_update)\n\n #print \"Performing random misc queries...\"\n for i in range(NUM_PDKBS):\n #for i in range(0):\n (infs_kb, closed_kb, unclosed_kb) = generate_kbs()\n for j in range(QUERIES_PER_PDKB):\n\n rml = random_rml(closed_kb.depth, closed_kb.agents, closed_kb.props)\n (ans1, ans2, ans3, inf_query, closed_query, unclosed_query, inf_update, closed_update, unclosed_update) = run_queries(i, rml, infs_kb, closed_kb, unclosed_kb)\n #(inf_update, closed_update, unclosed_update) =\n\n #csv.append(\"%d,%d,%d,%d,%d,%d,%d,%d,%f,%f,%f,%f,%f,%s\" %\n # (len(kbs[i].agents), kbs[i].depth, len(kbs[i].props), len(kbs[i].rmls), len(closed_kbs[i].rmls), infs[i].size(), closed_kbs[i].size(), kbs[i].size(),\n # inf_time[i], closure_time[i], inf_query, closed_query, unclosed_query, str(ans1)))\n\n print(\"Performing random successful queries...\")\n times = [0.0,0.0,0.0,0.0,0.0,0.0]\n for i in range(NUM_PDKBS):\n\n (infs_kb, closed_kb, unclosed_kb) = generate_kbs()\n\n for j in range(QUERIES_PER_PDKB):\n\n # Get a random RML from the PDKB\n rml = random.choice(list(closed_kb.rmls))\n # Get the closed set\n #entailed = list(kd_closure(rml))\n # Pick a random element\n #rml = random.choice(entailed)\n #(infs_kb, closed_kb, unclosed_kb) = generate_kbs()\n (ans1, ans2, ans3, inf_query, closed_query, unclosed_query, inf_update, closed_update, unclosed_update) = run_queries(i, rml, infs_kb, closed_kb, unclosed_kb)\n\n assert ans1 == ans2\n assert ans2 == ans3\n\n times[0] += inf_query\n times[1] += closed_query\n times[2] += unclosed_query\n times[3] += inf_update\n times[4] += closed_update\n times[5] += unclosed_update\n\n #csv_yes.append(\"%d,%d,%d,%d,%d,%d,%d,%d,%f,%f,%f,%f,%f,%s\" %\n # (len(kbs[i].agents), kbs[i].depth, len(kbs[i].props), len(kbs[i].rmls), len(closed_kbs[i].rmls), infs[i].size(), closed_kbs[i].size(), kbs[i].size(),\n # inf_time[i], closure_time[i], inf_query, closed_query, unclosed_query, str(ans1)))\n\n sizes = [0.0, 0.0, 0.0]\n print(\"Performing random unsuccessful queries...\")\n for i in range(NUM_PDKBS):\n\n (infs_kb, closed_kb, unclosed_kb) = generate_kbs()\n\n sizes[0] += infs_kb.size()\n sizes[1] += closed_kb.size()\n sizes[2] += unclosed_kb.size()\n\n for j in range(QUERIES_PER_PDKB):\n\n\n going = True\n while going:\n rml = random_rml(closed_kb.depth, closed_kb.agents, closed_kb.props)\n if rml not in closed_kb.rmls:\n going = False\n (ans1, ans2, ans3, inf_query, closed_query, unclosed_query, inf_update, closed_update, unclosed_update) = run_queries(i, rml, infs_kb, closed_kb, unclosed_kb)\n\n assert ans1 == ans2\n assert ans2 == ans3\n\n times[0] += inf_query\n times[1] += closed_query\n times[2] += unclosed_query\n times[3] += inf_update\n times[4] += closed_update\n times[5] += unclosed_update\n\n\n #csv_no.append(\"%d,%d,%d,%d,%d,%d,%d,%d,%f,%f,%f,%f,%f,%s\" %\n # (len(kbs[i].agents), kbs[i].depth, len(kbs[i].props), len(kbs[i].rmls), len(closed_kbs[i].rmls), infs[i].size(), closed_kbs[i].size(), kbs[i].size(),\n # inf_time[i], closure_time[i], inf_query, closed_query, unclosed_query, str(ans1)))\n\n\n times[0] /= float(NUM_PDKBS * QUERIES_PER_PDKB * 2)\n times[1] /= float(NUM_PDKBS * QUERIES_PER_PDKB * 2)\n times[2] /= float(NUM_PDKBS * QUERIES_PER_PDKB * 2)\n times[3] /= float(NUM_PDKBS * QUERIES_PER_PDKB * 2)\n times[4] /= float(NUM_PDKBS * QUERIES_PER_PDKB * 2)\n times[5] /= float(NUM_PDKBS * QUERIES_PER_PDKB * 2)\n\n #sizes.append(float(sum([inf.size() for inf in infs])) / float(NUM_PDKBS))\n #sizes.append(float(sum([kb.size() for kb in kbs])) / float(NUM_PDKBS))\n #sizes.append(float(sum([kb.size() for kb in indexed_kbs])) / float(NUM_PDKBS))\n sizes[0] /= float(NUM_PDKBS)\n sizes[1] /= float(NUM_PDKBS)\n sizes[2] /= float(NUM_PDKBS)\n\n print(\"\\nDone!\\n\")\n\n return (times, sizes)\n\n\ndef checkit(filename):\n\n data = load_CSV(filename)[1:]\n\n for row in data:\n for i in range(len(row)):\n if row[i] == '0.000000':\n row[i] = '0.000001'\n\n def plot_data(data, inds, labs, cols, zlabel, fname):\n\n data_map = {}\n for ag in range(AGENTS[0], AGENTS[1]+1):\n data_map[ag] = {}\n for dep in range(DEPTH[0], DEPTH[1]+1):\n data_map[ag][dep] = {}\n\n for row in data:\n data_map[int(row[0])][int(row[1])][inds[0]] = float(row[inds[0]])\n data_map[int(row[0])][int(row[1])][inds[1]] = float(row[inds[1]])\n data_map[int(row[0])][int(row[1])][inds[2]] = float(row[inds[2]])\n\n from mpl_toolkits.mplot3d import axes3d\n import matplotlib.pyplot as plt\n import matplotlib\n import numpy as np\n\n X, Y = np.meshgrid(np.arange(AGENTS[0], AGENTS[1]+1), np.arange(DEPTH[0], DEPTH[1]+1))\n\n #zs0 = np.array([1 for x,y in zip(np.ravel(X), np.ravel(Y))])\n #zs1 = np.array([data_map[x][y][ind1] / data_map[x][y][indnorm] for x,y in zip(np.ravel(X), np.ravel(Y))])\n #zs2 = np.array([data_map[x][y][ind2] / data_map[x][y][indnorm] for x,y in zip(np.ravel(X), np.ravel(Y))])\n\n zs0 = np.array([data_map[x][y][inds[0]] for x,y in zip(np.ravel(X), np.ravel(Y))])\n zs1 = np.array([data_map[x][y][inds[1]] for x,y in zip(np.ravel(X), np.ravel(Y))])\n zs2 = np.array([data_map[x][y][inds[2]] for x,y in zip(np.ravel(X), np.ravel(Y))])\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n\n if 'Query Time ($log_e(sec)$)' == zlabel or 'Update Time ($log_e(sec)$)' == zlabel:\n print(\"za = \" + str(zs0))\n Z0 = np.log(zs0).reshape(X.shape)\n print(\"Z0 = \" + str(Z0))\n Z1 = np.log(zs1).reshape(X.shape)\n Z2 = np.log(zs2).reshape(X.shape)\n else:\n #ax.set_zticks([])\n Z0 = (zs0 / 1000).reshape(X.shape)\n Z1 = (zs1 / 1000).reshape(X.shape)\n Z2 = (zs2 / 1000).reshape(X.shape)\n\n #ax.plot_wireframe(X, Y, Z0, color='0.75')\n ax.plot_wireframe(X, Y, Z0, color=cols[0])\n ax.plot_wireframe(X, Y, Z1, color=cols[1])\n ax.plot_wireframe(X, Y, Z2, color=cols[2])\n\n #cset = ax.contourf(X, Y, Z0, zdir='z', offset=-100, cmap=matplotlib.cm.coolwarm)\n #cset = ax.contourf(X, Y, Z0, zdir='x', offset=0, cmap=matplotlib.cm.coolwarm)\n #cset = ax.contourf(X, Y, Z0, zdir='z', offset=0, cmap=matplotlib.cm.coolwarm)\n #cset = ax.contourf(X, Y, Z0, zdir='y', offset=40, cmap=cm.coolwarm)\n\n ax.set_xlabel('# of Agents')\n ax.set_ylabel('Maximum Depth')\n ax.set_zlabel(zlabel)\n\n scatter1_proxy = matplotlib.lines.Line2D([0],[0], linestyle=\"none\", c=cols[0], marker = 's')\n scatter2_proxy = matplotlib.lines.Line2D([0],[0], linestyle=\"none\", c=cols[1], marker = 's')\n scatter3_proxy = matplotlib.lines.Line2D([0],[0], linestyle=\"none\", c=cols[2], marker = 's')\n ax.legend([scatter1_proxy, scatter2_proxy, scatter3_proxy], [labs[0], labs[1], labs[2]], numpoints = 1)\n\n ax.get_xaxis().set_major_locator(matplotlib.ticker.MaxNLocator(integer=True))\n ax.get_yaxis().set_major_locator(matplotlib.ticker.MaxNLocator(integer=True))\n\n plt.show()\n\n\n col1 = '#1b9e77'\n col2 = '#d95f02'\n col3 = '#7570b3'\n\n print(\"Plotting query time...\")\n plot_data(data, [6, 8, 7], ['INF', '$V_{RML}$', 'Closure'], [col1, col3, col2], 'Query Time ($log_e(sec)$)', 'time.eps')\n\n print(\"Plotting size...\")\n plot_data(data, [4, 3, 5], ['Closure', 'INF', '$V_{RML}$'], [col2, col1, col3], 'Size (x1000)', 'size.eps')\n\n print(\"Plotting update time...\")\n plot_data(data, [9, 11, 10], ['INF', '$V_{RML}$', 'Closure'], [col1, col3, col2], 'Update Time ($log_e(sec)$)', 'update_time.eps')\n\n\n\n"
] |
[
[
"numpy.log",
"numpy.arange",
"matplotlib.lines.Line2D",
"matplotlib.ticker.MaxNLocator",
"numpy.ravel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
PKUxxz/simp-detr
|
[
"c83846b1b6fc0e396e268dcfef278e162cf231c5"
] |
[
"models/main.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport argparse\nimport datetime\nimport json\nimport random\nimport sys\nimport time\nfrom pathlib import Path\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader, DistributedSampler\n\nsys.path.append(\"../../../lib\")\nimport datasets\nimport util.misc as utils\nfrom datasets import build_dataset, get_coco_api_from_dataset\nfrom detr import build\nfrom engine import evaluate, train_one_epoch\n\n\ndef get_args_parser():\n parser = argparse.ArgumentParser('Set transformer detector', add_help=False)\n parser.add_argument('--lr', default=1e-4, type=float)\n parser.add_argument('--lr_backbone', default=1e-5, type=float)\n parser.add_argument('--batch_size', default=2, type=int)\n parser.add_argument('--weight_decay', default=1e-4, type=float)\n parser.add_argument('--epochs', default=50, type=int)\n parser.add_argument('--lr_drop', default=45, type=int)\n parser.add_argument('--clip_max_norm', default=0.1, type=float,\n help='gradient clipping max norm')\n\n # Model parameters\n parser.add_argument('--frozen_weights', type=str, default=None,\n help=\"Path to the pretrained model. If set, only the mask head will be trained\")\n # * Backbone\n parser.add_argument('--backbone', default='resnet50', type=str,\n help=\"Name of the convolutional backbone to use\")\n parser.add_argument('--dilation', action='store_true',\n help=\"If true, we replace stride with dilation in the last convolutional block (DC5)\")\n parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'),\n help=\"Type of positional embedding to use on top of the image features\")\n\n # * Transformer\n parser.add_argument('--enc_layers', default=6, type=int,\n help=\"Number of encoding layers in the transformer\")\n parser.add_argument('--dec_layers', default=6, type=int,\n help=\"Number of decoding layers in the transformer\")\n parser.add_argument('--dim_feedforward', default=2048, type=int,\n help=\"Intermediate size of the feedforward layers in the transformer blocks\")\n parser.add_argument('--hidden_dim', default=256, type=int,\n help=\"Size of the embeddings (dimension of the transformer)\")\n parser.add_argument('--dropout', default=0.1, type=float,\n help=\"Dropout applied in the transformer\")\n parser.add_argument('--nheads', default=8, type=int,\n help=\"Number of attention heads inside the transformer's attentions\")\n parser.add_argument('--num_queries', default=100, type=int,\n help=\"Number of query slots\")\n parser.add_argument('--pre_norm', action='store_true')\n\n # * Segmentation\n parser.add_argument('--masks', action='store_true',\n help=\"Train segmentation head if the flag is provided\")\n\n # Loss\n parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false',\n help=\"Disables auxiliary decoding losses (loss at each layer)\")\n # * Matcher\n parser.add_argument('--set_cost_class', default=1, type=float,\n help=\"Class coefficient in the matching cost\")\n parser.add_argument('--set_cost_bbox', default=5, type=float,\n help=\"L1 box coefficient in the matching cost\")\n parser.add_argument('--set_cost_giou', default=2, type=float,\n help=\"giou box coefficient in the matching cost\")\n # * Loss coefficients\n parser.add_argument('--mask_loss_coef', default=1, type=float)\n parser.add_argument('--dice_loss_coef', default=1, type=float)\n parser.add_argument('--bbox_loss_coef', default=5, type=float)\n parser.add_argument('--giou_loss_coef', default=2, type=float)\n parser.add_argument('--eos_coef', default=0.1, type=float,\n help=\"Relative classification weight of the no-object class\")\n\n # dataset parameters\n parser.add_argument('--dataset_file', default='coco')\n parser.add_argument('--coco_path', type=str)\n parser.add_argument('--coco_panoptic_path', type=str)\n parser.add_argument('--remove_difficult', action='store_true')\n\n parser.add_argument('--output_dir', default='train_log',\n help='path where to save, empty for no saving')\n parser.add_argument('--device', default='cuda',\n help='device to use for training / testing')\n parser.add_argument('--seed', default=42, type=int)\n parser.add_argument('--resume', default='', help='resume from checkpoint')\n parser.add_argument('--start_epoch', default=0, type=int, metavar='N',\n help='start epoch')\n parser.add_argument('--eval', action='store_true')\n parser.add_argument('--num_workers', default=2, type=int)\n\n # distributed training parameters\n parser.add_argument('--world_size', default=1, type=int,\n help='number of distributed processes')\n parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')\n return parser\n\n\ndef main(args):\n utils.init_distributed_mode(args)\n print(\"git:\\n {}\\n\".format(utils.get_sha()))\n\n if args.frozen_weights is not None:\n assert args.masks, \"Frozen training is meant for segmentation only\"\n print(args)\n\n device = torch.device(args.device)\n\n # fix the seed for reproducibility\n seed = args.seed + utils.get_rank()\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n\n model, criterion, postprocessors = build(args)\n model.to(device)\n\n model_without_ddp = model\n if args.distributed:\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])\n model_without_ddp = model.module\n n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)\n print('number of params:', n_parameters)\n\n param_dicts = [\n {\"params\": [p for n, p in model_without_ddp.named_parameters() if \"backbone\" not in n and p.requires_grad]},\n {\n \"params\": [p for n, p in model_without_ddp.named_parameters() if \"backbone\" in n and p.requires_grad],\n \"lr\": args.lr_backbone,\n },\n ]\n optimizer = torch.optim.AdamW(param_dicts, lr=args.lr,\n weight_decay=args.weight_decay)\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)\n\n dataset_train = build_dataset(image_set='train', args=args)\n dataset_val = build_dataset(image_set='val', args=args)\n\n if args.distributed:\n sampler_train = DistributedSampler(dataset_train)\n sampler_val = DistributedSampler(dataset_val, shuffle=False)\n else:\n sampler_train = torch.utils.data.RandomSampler(dataset_train)\n sampler_val = torch.utils.data.SequentialSampler(dataset_val)\n\n batch_sampler_train = torch.utils.data.BatchSampler(\n sampler_train, args.batch_size, drop_last=True)\n\n data_loader_train = DataLoader(dataset_train, batch_sampler=batch_sampler_train,\n collate_fn=utils.collate_fn, num_workers=args.num_workers)\n data_loader_val = DataLoader(dataset_val, args.batch_size, sampler=sampler_val,\n drop_last=False, collate_fn=utils.collate_fn, num_workers=args.num_workers)\n\n if args.dataset_file == \"coco_panoptic\":\n # We also evaluate AP during panoptic training, on original coco DS\n coco_val = datasets.coco.build(\"val\", args)\n base_ds = get_coco_api_from_dataset(coco_val)\n else:\n base_ds = get_coco_api_from_dataset(dataset_val)\n\n if args.frozen_weights is not None:\n checkpoint = torch.load(args.frozen_weights, map_location='cpu')\n model_without_ddp.detr.load_state_dict(checkpoint['model'])\n\n if args.output_dir == \"train_log\":\n local_dir = Path.cwd()\n *_, user, experiment = local_dir.parts\n output_dir = local_dir.parents[2] / \"output\" / user / experiment\n else:\n output_dir = Path(args.output_dir)\n output_dir.mkdir(parents=True, exist_ok=True)\n link_dir = Path(\"train_log\")\n if utils.is_main_process():\n if link_dir.is_symlink():\n link_dir.unlink()\n link_dir.symlink_to(output_dir)\n \n if args.resume:\n if args.resume.startswith('https'):\n checkpoint = torch.hub.load_state_dict_from_url(\n args.resume, map_location='cpu', check_hash=True)\n else:\n checkpoint = torch.load(args.resume, map_location='cpu')\n model_without_ddp.load_state_dict(checkpoint['model'])\n if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:\n optimizer.load_state_dict(checkpoint['optimizer'])\n lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])\n args.start_epoch = checkpoint['epoch'] + 1\n\n if args.eval:\n test_stats, coco_evaluator = evaluate(model, criterion, postprocessors,\n data_loader_val, base_ds, device, args.output_dir)\n if args.output_dir:\n utils.save_on_master(coco_evaluator.coco_eval[\"bbox\"].eval, output_dir / \"eval.pth\")\n return\n\n print(\"Start training\")\n start_time = time.time()\n for epoch in range(args.start_epoch, args.epochs):\n if args.distributed:\n sampler_train.set_epoch(epoch)\n train_stats = train_one_epoch(\n model, criterion, data_loader_train, optimizer, device, epoch,\n args.clip_max_norm)\n lr_scheduler.step()\n if args.output_dir:\n checkpoint_paths = [output_dir / 'checkpoint.pth']\n # extra checkpoint before LR drop and every 100 epochs\n if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % 100 == 0:\n checkpoint_paths.append(output_dir / f'checkpoint{epoch:04}.pth')\n for checkpoint_path in checkpoint_paths:\n utils.save_on_master({\n 'model': model_without_ddp.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'lr_scheduler': lr_scheduler.state_dict(),\n 'epoch': epoch,\n 'args': args,\n }, checkpoint_path)\n\n test_stats, coco_evaluator = evaluate(\n model, criterion, postprocessors, data_loader_val, base_ds, device, args.output_dir\n )\n\n log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},\n **{f'test_{k}': v for k, v in test_stats.items()},\n 'epoch': epoch,\n 'n_parameters': n_parameters}\n\n if args.output_dir and utils.is_main_process():\n with (output_dir / \"log.txt\").open(\"a\") as f:\n f.write(json.dumps(log_stats) + \"\\n\")\n\n # for evaluation logs\n if coco_evaluator is not None:\n (output_dir / 'eval').mkdir(exist_ok=True)\n if \"bbox\" in coco_evaluator.coco_eval:\n filenames = ['latest.pth']\n # if epoch % 50 == 0:\n # filenames.append(f'{epoch:03}.pth')\n filenames.append(f'{epoch:03}.pth')\n for name in filenames:\n torch.save(coco_evaluator.coco_eval[\"bbox\"].eval,\n output_dir / \"eval\" / name)\n\n result_file = (output_dir / \"eval\" / \"result.txt\")\n if epoch == 0 and result_file.exists():\n result_file.unlink()\n eval_result = utils.get_summary(coco_evaluator.coco_eval[\"bbox\"])\n with result_file.open(\"a\") as f:\n f.write(f\"Evaluation result of Epoch {epoch:03}:\\n\")\n f.write(f\"-------------------------------------------------------------------------------\\n\")\n f.write(eval_result)\n f.write(f\"-------------------------------------------------------------------------------\\n\\n\")\n\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n print('Training time {}'.format(total_time_str))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser('DETR training and evaluation script', parents=[get_args_parser()])\n args = parser.parse_args()\n\n main(args)\n"
] |
[
[
"torch.utils.data.DistributedSampler",
"numpy.random.seed",
"torch.load",
"torch.manual_seed",
"torch.utils.data.SequentialSampler",
"torch.utils.data.DataLoader",
"torch.utils.data.RandomSampler",
"torch.optim.AdamW",
"torch.save",
"torch.nn.parallel.DistributedDataParallel",
"torch.device",
"torch.hub.load_state_dict_from_url",
"torch.utils.data.BatchSampler",
"torch.optim.lr_scheduler.StepLR"
]
] |
goitom/eemeter
|
[
"bb05d5b776546858f8f3a8d3a95bec202728d9f0"
] |
[
"tests/test_segmentation.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n\n Copyright 2018 Open Energy Efficiency, Inc.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\"\"\"\nimport json\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nfrom eemeter.segmentation import (\n CalTRACKSegmentModel,\n SegmentedModel,\n segment_time_series,\n iterate_segmented_dataset,\n)\n\n\[email protected]\ndef index_8760():\n return pd.date_range(\"2017-01-01\", periods=365 * 24, freq=\"H\", tz=\"UTC\")\n\n\ndef test_segment_time_series_invalid_type(index_8760):\n with pytest.raises(ValueError):\n segment_time_series(index_8760, segment_type=\"unknown\")\n\n\ndef test_segment_time_series_single(index_8760):\n weights = segment_time_series(index_8760, segment_type=\"single\")\n assert list(weights.columns) == [\"all\"]\n assert weights.shape == (8760, 1)\n assert weights.sum().sum() == 8760.0\n\n\ndef test_segment_time_series_one_month(index_8760):\n weights = segment_time_series(index_8760, segment_type=\"one_month\")\n assert list(weights.columns) == [\n \"jan\",\n \"feb\",\n \"mar\",\n \"apr\",\n \"may\",\n \"jun\",\n \"jul\",\n \"aug\",\n \"sep\",\n \"oct\",\n \"nov\",\n \"dec\",\n ]\n assert weights.shape == (8760, 12)\n assert weights.sum().sum() == 8760.0\n\n\ndef test_segment_time_series_three_month(index_8760):\n weights = segment_time_series(index_8760, segment_type=\"three_month\")\n assert list(weights.columns) == [\n \"dec-jan-feb\",\n \"jan-feb-mar\",\n \"feb-mar-apr\",\n \"mar-apr-may\",\n \"apr-may-jun\",\n \"may-jun-jul\",\n \"jun-jul-aug\",\n \"jul-aug-sep\",\n \"aug-sep-oct\",\n \"sep-oct-nov\",\n \"oct-nov-dec\",\n \"nov-dec-jan\",\n ]\n assert weights.shape == (8760, 12)\n assert weights.sum().sum() == 26280.0\n\n\ndef test_segment_time_series_three_month_weighted(index_8760):\n weights = segment_time_series(index_8760, segment_type=\"three_month_weighted\")\n assert list(weights.columns) == [\n \"dec-jan-feb-weighted\",\n \"jan-feb-mar-weighted\",\n \"feb-mar-apr-weighted\",\n \"mar-apr-may-weighted\",\n \"apr-may-jun-weighted\",\n \"may-jun-jul-weighted\",\n \"jun-jul-aug-weighted\",\n \"jul-aug-sep-weighted\",\n \"aug-sep-oct-weighted\",\n \"sep-oct-nov-weighted\",\n \"oct-nov-dec-weighted\",\n \"nov-dec-jan-weighted\",\n ]\n assert weights.shape == (8760, 12)\n assert weights.sum().sum() == 17520.0\n\n\ndef test_segment_time_series_drop_zero_weight_segments(index_8760):\n weights = segment_time_series(\n index_8760[:100], segment_type=\"one_month\", drop_zero_weight_segments=True\n )\n assert list(weights.columns) == [\"jan\"]\n assert weights.shape == (100, 1)\n assert weights.sum().sum() == 100.0\n\n\[email protected]\ndef dataset():\n index = pd.date_range(\"2017-01-01\", periods=1000, freq=\"H\", tz=\"UTC\")\n return pd.DataFrame({\"a\": 1, \"b\": 2}, index=index, columns=[\"a\", \"b\"])\n\n\ndef test_iterate_segmented_dataset_no_segmentation(dataset):\n iterator = iterate_segmented_dataset(dataset, segmentation=None)\n segment_name, data = next(iterator)\n assert segment_name is None\n assert list(data.columns) == [\"a\", \"b\", \"weight\"]\n assert data.shape == (1000, 3)\n assert data.sum().sum() == 4000\n\n with pytest.raises(StopIteration):\n next(iterator)\n\n\[email protected]\ndef segmentation(dataset):\n return segment_time_series(dataset.index, segment_type=\"one_month\")\n\n\ndef test_iterate_segmented_dataset_with_segmentation(dataset, segmentation):\n iterator = iterate_segmented_dataset(dataset, segmentation=segmentation)\n segment_name, data = next(iterator)\n assert segment_name == \"jan\"\n assert list(data.columns) == [\"a\", \"b\", \"weight\"]\n assert data.shape == (744, 3)\n assert data.sum().sum() == 2976.0\n\n segment_name, data = next(iterator)\n assert segment_name == \"feb\"\n assert list(data.columns) == [\"a\", \"b\", \"weight\"]\n assert data.shape == (256, 3)\n assert data.sum().sum() == 1024.0\n\n segment_name, data = next(iterator)\n assert segment_name == \"mar\"\n assert list(data.columns) == [\"a\", \"b\", \"weight\"]\n assert data.shape == (0, 3)\n assert data.sum().sum() == 0.0\n\n\ndef test_iterate_segmented_dataset_with_processor(dataset, segmentation):\n feature_processor_segment_names = []\n\n def feature_processor(\n segment_name, dataset, column_mapping=None\n ): # rename some columns\n feature_processor_segment_names.append(segment_name)\n return dataset.rename(columns=column_mapping).assign(weight=1)\n\n iterator = iterate_segmented_dataset(\n dataset,\n segmentation=segmentation,\n feature_processor=feature_processor,\n feature_processor_kwargs={\"column_mapping\": {\"a\": \"c\", \"b\": \"d\"}},\n feature_processor_segment_name_mapping={\"jan\": \"jan2\", \"feb\": \"feb2\"},\n )\n segment_name, data = next(iterator)\n assert feature_processor_segment_names == [\"jan2\"]\n assert segment_name == \"jan\"\n assert list(data.columns) == [\"c\", \"d\", \"weight\"]\n assert data.shape == (1000, 3)\n assert data.sum().sum() == 4000.0\n\n segment_name, data = next(iterator)\n assert feature_processor_segment_names == [\"jan2\", \"feb2\"]\n assert segment_name == \"feb\"\n assert list(data.columns) == [\"c\", \"d\", \"weight\"]\n assert data.shape == (1000, 3)\n assert data.sum().sum() == 4000.0\n\n\ndef test_segment_model():\n segment_model = CalTRACKSegmentModel(\n segment_name=\"segment\",\n model=None,\n formula=\"meter_value ~ C(hour_of_week) + a - 1\",\n model_params={\"C(hour_of_week)[1]\": 1, \"a\": 1},\n warnings=None,\n )\n index = pd.date_range(\"2017-01-01\", periods=2, freq=\"H\", tz=\"UTC\")\n data = pd.DataFrame({\"a\": [1, 1], \"hour_of_week\": [1, 1]}, index=index)\n prediction = segment_model.predict(data)\n assert prediction.sum() == 4\n\n\ndef test_segmented_model():\n segment_model = CalTRACKSegmentModel(\n segment_name=\"jan\",\n model=None,\n formula=\"meter_value ~ C(hour_of_week) + a- 1\",\n model_params={\"C(hour_of_week)[1]\": 1, \"a\": 1},\n warnings=None,\n )\n\n def fake_feature_processor(segment_name, segment_data):\n return pd.DataFrame(\n {\"hour_of_week\": 1, \"a\": 1, \"weight\": segment_data.weight},\n index=segment_data.index,\n )\n\n segmented_model = SegmentedModel(\n segment_models=[segment_model],\n prediction_segment_type=\"one_month\",\n prediction_segment_name_mapping=None,\n prediction_feature_processor=fake_feature_processor,\n prediction_feature_processor_kwargs=None,\n )\n\n # make this cover jan and feb but only supply jan model\n index = pd.date_range(\"2017-01-01\", periods=24 * 50, freq=\"H\", tz=\"UTC\")\n temps = pd.Series(np.linspace(0, 100, 24 * 50), index=index)\n prediction = segmented_model.predict(temps.index, temps).result.predicted_usage\n assert prediction.sum() == 1488.0\n\n\ndef test_segment_model_serialized():\n segment_model = CalTRACKSegmentModel(\n segment_name=\"jan\",\n model=None,\n formula=\"meter_value ~ a + b - 1\",\n model_params={\"a\": 1, \"b\": 1},\n warnings=None,\n )\n assert segment_model.json()[\"formula\"] == \"meter_value ~ a + b - 1\"\n assert segment_model.json()[\"model_params\"] == {\"a\": 1, \"b\": 1}\n assert segment_model.json()[\"warnings\"] == []\n assert json.dumps(segment_model.json())\n\n\ndef test_segmented_model_serialized():\n segment_model = CalTRACKSegmentModel(\n segment_name=\"jan\",\n model=None,\n formula=\"meter_value ~ a + b - 1\",\n model_params={\"a\": 1, \"b\": 1},\n warnings=None,\n )\n\n def fake_feature_processor(segment_name, segment_data): # pragma: no cover\n return pd.DataFrame(\n {\"a\": 1, \"b\": 1, \"weight\": segment_data.weight}, index=segment_data.index\n )\n\n segmented_model = SegmentedModel(\n segment_models=[segment_model],\n prediction_segment_type=\"one_month\",\n prediction_segment_name_mapping=None,\n prediction_feature_processor=fake_feature_processor,\n prediction_feature_processor_kwargs=None,\n )\n assert segmented_model.json()[\"prediction_segment_type\"] == \"one_month\"\n assert (\n segmented_model.json()[\"prediction_feature_processor\"]\n == \"fake_feature_processor\"\n )\n assert json.dumps(segmented_model.json())\n"
] |
[
[
"numpy.linspace",
"pandas.DataFrame",
"pandas.date_range"
]
] |
Alhassan20/mealpy
|
[
"7ed365c5c495ad1c1e066662c90159b3d5e9b8e3"
] |
[
"mealpy/bio_based/VCS.py"
] |
[
"#!/usr/bin/env python\n# ------------------------------------------------------------------------------------------------------%\n# Created by \"Thieu Nguyen\" at 22:07, 11/04/2020 %\n# %\n# Email: [email protected] %\n# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %\n# Github: https://github.com/thieu1995 %\n#-------------------------------------------------------------------------------------------------------%\n\nfrom numpy import sum, log1p, array, mean, prod, abs, where\nfrom numpy.random import uniform, normal, choice\nfrom copy import deepcopy\nfrom mealpy.optimizer import Root\n\n\nclass BaseVCS(Root):\n \"\"\"\n My version of: Virus Colony Search (VCS)\n A Novel Nature-inspired Algorithm For Optimization: Virus Colony Search\n Link:\n https://doi.org/10.1016/j.advengsoft.2015.11.004\n Notes:\n + Remove all third loop, make algrithm 10 times faster than original\n + In Immune response process, updating whole position instead of updating each variable in position\n + Drop batch-size idea to 3 main process of this algorithm, make it more robust\n \"\"\"\n\n def __init__(self, obj_func=None, lb=None, ub=None, verbose=True, epoch=750, pop_size=100, lamda=0.5, xichma=0.3, **kwargs):\n super().__init__(obj_func, lb, ub, verbose, kwargs)\n self.epoch = epoch\n self.pop_size = pop_size\n self.xichma = xichma # Weight factor\n self.lamda = lamda # Number of the best will keep\n if lamda < 1:\n self.n_best = int(lamda * self.pop_size)\n else:\n self.n_best = int(lamda)\n\n def train(self):\n pop = [self.create_solution() for _ in range(self.pop_size)]\n pop, g_best = self.get_sorted_pop_and_global_best_solution(pop, self.ID_FIT, self.ID_MIN_PROB)\n pos_list = [item[self.ID_POS] for item in pop]\n x_mean = mean(pos_list, axis=0)\n\n for epoch in range(self.epoch):\n ## Viruses diffusion\n for i in range(0, self.pop_size):\n xichma = (log1p(epoch + 1) / self.epoch) * (pop[i][self.ID_POS] - g_best[self.ID_POS])\n gauss = normal(normal(g_best[self.ID_POS], abs(xichma)))\n pos_new = gauss + uniform() * g_best[self.ID_POS] - uniform() * pop[i][self.ID_POS]\n pos_new = self.amend_position_random(pos_new)\n fit = self.get_fitness_position(pos_new)\n if fit < pop[i][self.ID_FIT]:\n pop[i] = [pos_new, fit]\n\n # Batch-size idea\n if self.batch_idea:\n if (i + 1) % self.batch_size == 0:\n g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)\n else:\n if (i + 1) % self.pop_size == 0:\n g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)\n\n ## Host cells infection\n xichma = self.xichma * (1 - (epoch + 1) / self.epoch)\n for i in range(0, self.pop_size):\n pos_new = x_mean + xichma * normal(0, 1, self.problem_size) ## Basic / simple version, not the original version in the paper\n pos_new = self.amend_position_random(pos_new)\n fit = self.get_fitness_position(pos_new)\n if fit < pop[i][self.ID_FIT]:\n pop[i] = [pos_new, fit]\n\n # Batch-size idea\n if self.batch_idea:\n if (i + 1) % self.batch_size == 0:\n g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)\n else:\n if (i + 1) % self.pop_size == 0:\n g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)\n\n ## Calculate the weighted mean of the λ best individuals by\n pop = sorted(pop, key=lambda item: item[self.ID_FIT])\n pos_list = [item[self.ID_POS] for item in pop[:self.n_best]]\n\n factor_down = self.n_best * log1p(self.n_best + 1) - log1p(prod(range(1, self.n_best + 1)))\n weight = log1p(self.n_best + 1) / factor_down\n weight = weight / self.n_best\n x_mean = weight * sum(pos_list, axis=0)\n\n ## Immune response\n for i in range(0, self.pop_size):\n pr = (self.problem_size - i + 1) / self.problem_size\n\n id1, id2 = choice(list(set(range(0, self.pop_size)) - {i}), 2, replace=False)\n temp = pop[id1][self.ID_POS] - (pop[id2][self.ID_POS] - pop[i][self.ID_POS]) * uniform()\n pos_new = deepcopy(pop[i][self.ID_POS])\n pos_new = where(uniform(0, 1, self.problem_size) < pr, pos_new, temp)\n\n fit = self.get_fitness_position(pos_new)\n if fit < pop[i][self.ID_FIT]:\n pop[i] = [pos_new, fit]\n\n # Batch-size idea\n if self.batch_idea:\n if (i + 1) % self.batch_size == 0:\n g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)\n else:\n if (i + 1) % self.pop_size == 0:\n g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)\n\n ## Update elite if a bower becomes fitter than the elite\n pop, g_best = self.update_sorted_population_and_global_best_solution(pop, self.ID_MIN_PROB, g_best)\n self.loss_train.append(g_best[self.ID_FIT])\n if self.verbose:\n print(\"> Epoch: {}, Best fit: {}\".format(epoch + 1, g_best[self.ID_FIT]))\n self.solution = g_best\n return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train\n\n\nclass OriginalVCS(Root):\n \"\"\"\n The original version of: Virus Colony Search (VCS)\n A Novel Nature-inspired Algorithm For Optimization: Virus Colony Search\n - This is basic version, not the full version of the paper\n Link:\n https://doi.org/10.1016/j.advengsoft.2015.11.004\n \"\"\"\n\n def __init__(self, obj_func=None, lb=None, ub=None, verbose=True, epoch=750, pop_size=100, lamda=0.5, xichma=0.3, **kwargs):\n super().__init__(obj_func, lb, ub, verbose, kwargs)\n self.epoch = epoch\n self.pop_size = pop_size\n self.xichma = xichma # Weight factor\n self.lamda = lamda # Number of the best will keep\n if lamda < 1:\n self.n_best = int(lamda * self.pop_size)\n else:\n self.n_best = int(lamda)\n\n def train(self):\n pop = [self.create_solution() for _ in range(self.pop_size)]\n pop, g_best = self.get_sorted_pop_and_global_best_solution(pop, self.ID_FIT, self.ID_MIN_PROB)\n pos_list = [item[self.ID_POS] for item in pop]\n x_mean = mean(pos_list, axis=0)\n\n for epoch in range(self.epoch):\n ## Viruses diffusion\n for i in range(0, self.pop_size):\n xichma = (log1p(epoch + 1) / self.epoch) * (pop[i][self.ID_POS] - g_best[self.ID_POS])\n gauss = array([normal(g_best[self.ID_POS][idx], abs(xichma[idx])) for idx in range(0, self.problem_size)])\n pos_new = gauss + uniform() * g_best[self.ID_POS] - uniform() * pop[i][self.ID_POS]\n pos_new = self.amend_position_random(pos_new)\n fit = self.get_fitness_position(pos_new)\n if fit < pop[i][self.ID_FIT]:\n pop[i] = [pos_new, fit]\n\n ## Host cells infection\n xichma = self.xichma * (1 - (epoch+1)/self.epoch)\n for i in range(0, self.pop_size):\n pos_new = x_mean + xichma * normal(0, 1, self.problem_size) ## Basic / simple version, not the original version in the paper\n pos_new = self.amend_position_random(pos_new)\n fit = self.get_fitness_position(pos_new)\n if fit < pop[i][self.ID_FIT]:\n pop[i] = [pos_new, fit]\n\n ## Calculate the weighted mean of the λ best individuals by\n pop = sorted(pop, key=lambda item: item[self.ID_FIT])\n pos_list = [item[self.ID_POS] for item in pop[:self.n_best]]\n\n factor_down = self.n_best * log1p(self.n_best + 1) - log1p(prod(range(1, self.n_best + 1)))\n weight = log1p(self.n_best + 1) / factor_down\n weight = weight / self.n_best\n x_mean = weight * sum(pos_list, axis=0)\n\n ## Immune response\n for i in range(0, self.pop_size):\n pr = (self.problem_size - i + 1) / self.problem_size\n pos_new = pop[i][self.ID_POS]\n for j in range(0, self.problem_size):\n if uniform() > pr:\n id1, id2 = choice(list(set(range(0, self.pop_size)) - {i}), 2, replace=False)\n pos_new[j] = pop[id1][self.ID_POS][j] - (pop[id2][self.ID_POS][j] - pop[i][self.ID_POS][j]) * uniform()\n fit = self.get_fitness_position(pos_new)\n if fit < pop[i][self.ID_FIT]:\n pop[i] = [pos_new, fit]\n\n ## Update elite if a bower becomes fitter than the elite\n pop, g_best = self.update_sorted_population_and_global_best_solution(pop, self.ID_MIN_PROB, g_best)\n self.loss_train.append(g_best[self.ID_FIT])\n if self.verbose:\n print(\"> Epoch: {}, Best fit: {}\".format(epoch + 1, g_best[self.ID_FIT]))\n self.solution = g_best\n return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train\n"
] |
[
[
"numpy.abs",
"numpy.random.uniform",
"numpy.random.normal",
"numpy.mean",
"numpy.log1p",
"numpy.sum"
]
] |
patrickltobing/shallow-wavenet
|
[
"a7348805825e47a24e3ad0e759cecfe85284ba9f"
] |
[
"src/bin/feature_extract.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright 2019 Patrick Lumban Tobing (Nagoya University)\n# based on PyTorch implementation for WaveNet vocoder by Tomoki Hayashi (Nagoya University)\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport multiprocessing as mp\nimport os\nimport sys\nfrom distutils.util import strtobool\n\nimport logging\nimport numpy as np\nfrom numpy.matlib import repmat\nfrom scipy.interpolate import interp1d\n#from scipy.io import wavfile\nfrom scipy.signal import firwin\nfrom scipy.signal import lfilter\n\nfrom utils import find_files\nfrom utils import read_txt\nfrom utils import write_hdf5, read_hdf5\n\nfrom multiprocessing import Array\n\nimport pysptk as ps\nimport pyworld as pw\n#import librosa\nimport soundfile as sf\n\nnp.set_printoptions(threshold=np.inf)\n\nFS = 22050\nFS = 24000\n#FS = 44100\n#FS = 48000\nSHIFTMS = 5.0\nMINF0 = 40\nMAXF0 = 700\n#MCEP_DIM = 34\nMCEP_DIM = 49\nMCEP_ALPHA = 0.455\nMCEP_ALPHA = 0.466\n#MCEP_ALPHA = 0.544\n#MCEP_ALPHA = 0.554\nFFTL = 1024\nLOWPASS_CUTOFF = 20\nHIGHPASS_CUTOFF = 70\nOVERWRITE = True\n\n\ndef low_cut_filter(x, fs, cutoff=HIGHPASS_CUTOFF):\n \"\"\"FUNCTION TO APPLY LOW CUT FILTER\n\n Args:\n x (ndarray): Waveform sequence\n fs (int): Sampling frequency\n cutoff (float): Cutoff frequency of low cut filter\n\n Return:\n (ndarray): Low cut filtered waveform sequence\n \"\"\"\n\n nyquist = fs // 2\n norm_cutoff = cutoff / nyquist\n\n # low cut filter\n fil = firwin(255, norm_cutoff, pass_zero=False)\n lcf_x = lfilter(fil, 1, x)\n\n return lcf_x\n\n\ndef analyze(wav, fs=FS, minf0=MINF0, maxf0=MAXF0, fperiod=SHIFTMS, fftl=FFTL, f0=None, time_axis=None):\n #f0_flr = pw.get_cheaptrick_f0_floor(fs, fftl)\n #logging.info(f0_flr)\n #fft_size = pw.get_cheaptrick_fft_size(fs, f0_flr)\n #logging.info(fft_size)\n #f0_flr = pw.get_cheaptrick_f0_floor(fs, fft_size)\n #logging.info(f0_flr)\n if f0 is None or time_axis is None:\n _f0, time_axis = pw.harvest(wav, fs, f0_floor=60.0, frame_period=fperiod)\n f0 = pw.stonemask(wav, _f0, time_axis, fs)\n sp = pw.cheaptrick(wav, f0, time_axis, fs, fft_size=fftl)\n ap = pw.d4c(wav, f0, time_axis, fs, fft_size=fftl)\n\n return time_axis, f0, sp, ap\n\n\ndef analyze_range(wav, fs=FS, minf0=MINF0, maxf0=MAXF0, fperiod=SHIFTMS, fftl=FFTL, f0=None, time_axis=None):\n if f0 is None or time_axis is None:\n #logging.info(\"%lf %lf %lf %lf\" % (minf0, maxf0, fperiod, fftl))\n #logging.info(\"1\")\n _f0, time_axis = pw.harvest(wav, fs, f0_floor=minf0, f0_ceil=maxf0, frame_period=fperiod)\n #_f0, time_axis = pw.harvest(wav, fs, f0_floor=60, f0_ceil=maxf0, frame_period=fperiod)\n #_f0, time_axis = pw.harvest(wav, fs, f0_floor=60, frame_period=fperiod)\n #_f0, time_axis = pw.harvest(wav, fs, f0_floor=minf0, frame_period=fperiod)\n #_f0, time_axis = pw.harvest(wav, fs, f0_floor=minf0, frame_period=fperiod)\n #logging.info(\"2\")\n f0 = pw.stonemask(wav, _f0, time_axis, fs)\n #logging.info(\"3\")\n #f0, time_axis = pw.harvest(wav, fs, f0_floor=minf0, f0_ceil=maxf0, frame_period=fperiod)\n sp = pw.cheaptrick(wav, f0, time_axis, fs, fft_size=fftl)\n #logging.info(\"4\")\n ap = pw.d4c(wav, f0, time_axis, fs, fft_size=fftl)\n #logging.info(\"5\")\n\n return time_axis, f0, sp, ap\n\n\n#def read_wav(wav_file, cutoff=HIGHPASS_CUTOFF, fftl_ns=None):\ndef read_wav(wav_file, cutoff=HIGHPASS_CUTOFF):\n #fs, x = wavfile.read(wav_file)\n #x = librosa.util.fix_length(x, len(x) + fftl_ns // 2)\n x, fs = sf.read(wav_file)\n #x = np.array(x, dtype=np.float64)\n if cutoff != 0:\n x = low_cut_filter(x, fs, cutoff)\n\n return fs, x\n\n\ndef low_pass_filter(x, fs, cutoff=LOWPASS_CUTOFF, padding=True):\n \"\"\"FUNCTION TO APPLY LOW PASS FILTER\n\n Args:\n x (ndarray): Waveform sequence\n fs (int): Sampling frequency\n cutoff (float): Cutoff frequency of low pass filter\n\n Return:\n (ndarray): Low pass filtered waveform sequence\n \"\"\"\n\n nyquist = fs // 2\n norm_cutoff = cutoff / nyquist\n\n # low cut filter\n numtaps = 255\n fil = firwin(numtaps, norm_cutoff)\n x_pad = np.pad(x, (numtaps, numtaps), 'edge')\n lpf_x = lfilter(fil, 1, x_pad)\n lpf_x = lpf_x[numtaps + numtaps // 2: -numtaps // 2]\n\n return lpf_x\n\n\ndef convert_continuos_f0(f0):\n \"\"\"CONVERT F0 TO CONTINUOUS F0\n\n Args:\n f0 (ndarray): original f0 sequence with the shape (T)\n\n Return:\n (ndarray): continuous f0 with the shape (T)\n \"\"\"\n # get uv information as binary\n uv = np.float32(f0 != 0)\n\n # get start and end of f0\n start_f0 = f0[f0 != 0][0]\n end_f0 = f0[f0 != 0][-1]\n\n # padding start and end of f0 sequence\n start_idx = np.where(f0 == start_f0)[0][0]\n end_idx = np.where(f0 == end_f0)[0][-1]\n f0[:start_idx] = start_f0\n f0[end_idx:] = end_f0\n\n # get non-zero frame index\n nz_frames = np.where(f0 != 0)[0]\n\n # perform linear interpolation\n f = interp1d(nz_frames, f0[nz_frames])\n cont_f0 = f(np.arange(0, f0.shape[0]))\n\n return uv, cont_f0\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"making feature file argsurations.\")\n\n parser.add_argument(\"--expdir\", required=True,\n type=str, help=\"directory to save the log\")\n parser.add_argument(\n \"--waveforms\", default=None,\n help=\"directory or list of filename of input wavfile\")\n parser.add_argument(\n \"--hdf5dir\", default=None,\n help=\"directory to save hdf5\")\n parser.add_argument(\n \"--wavdir\", default=None,\n help=\"directory to save of preprocessed wav file\")\n parser.add_argument(\n \"--wavanasyndir\", default=None,\n help=\"directory to save of preprocessed wav file\")\n parser.add_argument(\n \"--fs\", default=FS,\n type=int, help=\"Sampling frequency\")\n parser.add_argument(\n \"--shiftms\", default=SHIFTMS,\n type=float, help=\"Frame shift in msec\")\n parser.add_argument(\n \"--minf0\", default=MINF0,\n type=int, help=\"minimum f0\")\n parser.add_argument(\n \"--maxf0\", default=MAXF0,\n type=int, help=\"maximum f0\")\n parser.add_argument(\n \"--mcep_dim\", default=MCEP_DIM,\n type=int, help=\"Dimension of mel cepstrum\")\n parser.add_argument(\n \"--mcep_alpha\", default=MCEP_ALPHA,\n type=float, help=\"Alpha of mel cepstrum\")\n parser.add_argument(\n \"--fftl\", default=FFTL,\n type=int, help=\"FFT length\")\n parser.add_argument(\n \"--fftl_ns\", default=None,\n type=int, help=\"FFT length for noise shaped waveforms\")\n parser.add_argument(\n \"--highpass_cutoff\", default=HIGHPASS_CUTOFF,\n type=int, help=\"Cut off frequency in lowpass filter\")\n parser.add_argument(\"--init\", default=False,\n type=strtobool, help=\"flag for computing stats of dtw-ed feature\")\n parser.add_argument(\n \"--n_jobs\", default=10,\n type=int, help=\"number of parallel jobs\")\n parser.add_argument(\n \"--verbose\", default=1,\n type=int, help=\"log message level\")\n\n args = parser.parse_args()\n\n # set log level\n if args.verbose == 1:\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S',\n filename=args.expdir + \"/feature_extract.log\")\n logging.getLogger().addHandler(logging.StreamHandler())\n elif args.verbose > 1:\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S',\n filename=args.expdir + \"/feature_extract.log\")\n logging.getLogger().addHandler(logging.StreamHandler())\n else:\n logging.basicConfig(level=logging.WARN,\n format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S',\n filename=args.expdir + \"/feature_extract.log\")\n logging.getLogger().addHandler(logging.StreamHandler())\n logging.warn(\"logging is disabled.\")\n\n # read list\n if os.path.isdir(args.waveforms):\n file_list = sorted(find_files(args.waveforms, \"*.wav\"))\n else:\n file_list = read_txt(args.waveforms)\n\n # check directory existence\n if (args.wavdir is not None) and (not os.path.exists(args.wavdir)):\n os.makedirs(args.wavdir)\n if (args.wavanasyndir is not None) and (not os.path.exists(args.wavanasyndir)):\n os.makedirs(args.wavanasyndir)\n if not os.path.exists(args.hdf5dir):\n os.makedirs(args.hdf5dir)\n\n def feature_extract(wav_list, arr):\n n_wav = len(wav_list)\n n_sample = 0\n n_frame = 0\n count = 1\n max_frame = 0\n for wav_name in wav_list:\n # load wavfile and highpass-filter\n fs, x = read_wav(wav_name, cutoff=args.highpass_cutoff)\n n_sample += x.shape[0]\n logging.info(wav_name+\" \"+str(x.shape[0])+\" \"+str(n_sample)+\" \"+str(count))\n\n # check sampling frequency\n if not fs == args.fs:\n logging.debug(\"ERROR: sampling frequency is not matched.\")\n sys.exit(1)\n\n hdf5name = args.hdf5dir + \"/\" + os.path.basename(wav_name).replace(\".wav\", \".h5\")\n logging.info(hdf5name)\n\n if not args.init:\n _, f0, spc, ap = analyze_range(x, fs=fs, minf0=args.minf0, maxf0=args.maxf0, \\\n fperiod=args.shiftms, fftl=args.fftl)\n # concatenate\n uv, cont_f0 = convert_continuos_f0(np.array(f0))\n cont_f0_lpf = low_pass_filter(cont_f0, int(1.0 / (args.shiftms * 0.001)), cutoff=20)\n codeap = pw.code_aperiodicity(ap, fs)\n #logging.info(codeap)\n logging.info(codeap.shape)\n mcep = ps.sp2mc(spc, args.mcep_dim, args.mcep_alpha)\n cont_f0_lpf = np.expand_dims(cont_f0_lpf, axis=-1)\n uv = np.expand_dims(uv, axis=-1)\n log_contf0_lpf = np.log(cont_f0_lpf)\n feats_lf0 = np.concatenate([uv, log_contf0_lpf, codeap, mcep], axis=1)\n logging.info(feats_lf0.shape)\n\n write_hdf5(hdf5name, \"/feat_org_lf0\", feats_lf0)\n n_frame += feats_lf0.shape[0]\n if max_frame < feats_lf0.shape[0]:\n max_frame = feats_lf0.shape[0]\n\n # overwrite wav file\n if args.highpass_cutoff != 0:\n #wavfile.write(args.wavdir + \"/\" + os.path.basename(wav_name), fs, np.int16(x))\n sf.write(args.wavdir + \"/\" + os.path.basename(wav_name), x, fs, 'PCM_16')\n wavpath = args.wavanasyndir + \"/\" + os.path.basename(wav_name)\n logging.info(wavpath)\n sp_rec = ps.mc2sp(mcep, args.mcep_alpha, args.fftl)\n #wav = np.clip(pw.synthesize(f0, sp_rec, ap, fs, frame_period=args.shiftms), -32768, 32767)\n wav = np.clip(pw.synthesize(f0, sp_rec, ap, fs, frame_period=args.shiftms), -1, 1)\n #wavfile.write(wavpath, fs, np.int16(wav))\n sf.write(wavpath, wav, fs, 'PCM_16')\n else:\n _, f0, _, _ = analyze(x, fs=fs, fperiod=args.shiftms, fftl=args.fftl)\n write_hdf5(hdf5name, \"/f0\", f0)\n n_frame += f0.shape[0]\n if max_frame < f0.shape[0]:\n max_frame = f0.shape[0]\n\n count += 1\n arr[0] += n_wav\n arr[1] += n_sample\n arr[2] += n_frame\n if (n_wav > 0):\n logging.info(str(arr[0])+\" \"+str(n_wav)+\" \"+str(arr[1])+\" \"+str(n_sample/n_wav)+\" \"+str(arr[2])\\\n +\" \"+str(n_frame/n_wav)+\" max_frame = \"+str(max_frame))\n\n # divie list\n file_lists = np.array_split(file_list, args.n_jobs)\n file_lists = [f_list.tolist() for f_list in file_lists]\n\n # multi processing\n processes = []\n arr = mp.Array('d', 3)\n #logging.info(arr[:])\n for f in file_lists:\n p = mp.Process(target=feature_extract, args=(f,arr))\n p.start()\n processes.append(p)\n\n # wait for all process\n for p in processes:\n p.join()\n\n logging.info(str(arr[0])+\" \"+str(arr[1])+\" \"+str(arr[1]/arr[0])+\" \"+str(arr[2])+\" \"+str(arr[2]/arr[0]))\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"numpy.log",
"numpy.expand_dims",
"numpy.pad",
"numpy.arange",
"numpy.set_printoptions",
"numpy.concatenate",
"scipy.interpolate.interp1d",
"numpy.float32",
"scipy.signal.lfilter",
"numpy.array",
"numpy.array_split",
"numpy.where",
"scipy.signal.firwin"
]
] |
yfletberliac/ray
|
[
"a77d9c228a6891b304e789ba2758a4cbfdb75ec0"
] |
[
"python/ray/tune/test/trial_scheduler_test.py"
] |
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport random\nimport unittest\nimport numpy as np\n\nimport ray\nfrom ray.tune.schedulers import (HyperBandScheduler, AsyncHyperBandScheduler,\n PopulationBasedTraining, MedianStoppingRule,\n TrialScheduler)\nfrom ray.tune.schedulers.pbt import explore\nfrom ray.tune.trial import Trial, Resources, Checkpoint\nfrom ray.tune.trial_executor import TrialExecutor\n\nfrom ray.rllib import _register_all\n_register_all()\n\n\ndef result(t, rew):\n return dict(\n time_total_s=t, episode_reward_mean=rew, training_iteration=int(t))\n\n\nclass EarlyStoppingSuite(unittest.TestCase):\n def setUp(self):\n ray.init()\n\n def tearDown(self):\n ray.shutdown()\n _register_all() # re-register the evicted objects\n\n def basicSetup(self, rule):\n t1 = Trial(\"PPO\") # mean is 450, max 900, t_max=10\n t2 = Trial(\"PPO\") # mean is 450, max 450, t_max=5\n for i in range(10):\n self.assertEqual(\n rule.on_trial_result(None, t1, result(i, i * 100)),\n TrialScheduler.CONTINUE)\n for i in range(5):\n self.assertEqual(\n rule.on_trial_result(None, t2, result(i, 450)),\n TrialScheduler.CONTINUE)\n return t1, t2\n\n def testMedianStoppingConstantPerf(self):\n rule = MedianStoppingRule(grace_period=0, min_samples_required=1)\n t1, t2 = self.basicSetup(rule)\n rule.on_trial_complete(None, t1, result(10, 1000))\n self.assertEqual(\n rule.on_trial_result(None, t2, result(5, 450)),\n TrialScheduler.CONTINUE)\n self.assertEqual(\n rule.on_trial_result(None, t2, result(6, 0)),\n TrialScheduler.CONTINUE)\n self.assertEqual(\n rule.on_trial_result(None, t2, result(10, 450)),\n TrialScheduler.STOP)\n\n def testMedianStoppingOnCompleteOnly(self):\n rule = MedianStoppingRule(grace_period=0, min_samples_required=1)\n t1, t2 = self.basicSetup(rule)\n self.assertEqual(\n rule.on_trial_result(None, t2, result(100, 0)),\n TrialScheduler.CONTINUE)\n rule.on_trial_complete(None, t1, result(10, 1000))\n self.assertEqual(\n rule.on_trial_result(None, t2, result(101, 0)),\n TrialScheduler.STOP)\n\n def testMedianStoppingGracePeriod(self):\n rule = MedianStoppingRule(grace_period=2.5, min_samples_required=1)\n t1, t2 = self.basicSetup(rule)\n rule.on_trial_complete(None, t1, result(10, 1000))\n rule.on_trial_complete(None, t2, result(10, 1000))\n t3 = Trial(\"PPO\")\n self.assertEqual(\n rule.on_trial_result(None, t3, result(1, 10)),\n TrialScheduler.CONTINUE)\n self.assertEqual(\n rule.on_trial_result(None, t3, result(2, 10)),\n TrialScheduler.CONTINUE)\n self.assertEqual(\n rule.on_trial_result(None, t3, result(3, 10)), TrialScheduler.STOP)\n\n def testMedianStoppingMinSamples(self):\n rule = MedianStoppingRule(grace_period=0, min_samples_required=2)\n t1, t2 = self.basicSetup(rule)\n rule.on_trial_complete(None, t1, result(10, 1000))\n t3 = Trial(\"PPO\")\n self.assertEqual(\n rule.on_trial_result(None, t3, result(3, 10)),\n TrialScheduler.CONTINUE)\n rule.on_trial_complete(None, t2, result(10, 1000))\n self.assertEqual(\n rule.on_trial_result(None, t3, result(3, 10)), TrialScheduler.STOP)\n\n def testMedianStoppingUsesMedian(self):\n rule = MedianStoppingRule(grace_period=0, min_samples_required=1)\n t1, t2 = self.basicSetup(rule)\n rule.on_trial_complete(None, t1, result(10, 1000))\n rule.on_trial_complete(None, t2, result(10, 1000))\n t3 = Trial(\"PPO\")\n self.assertEqual(\n rule.on_trial_result(None, t3, result(1, 260)),\n TrialScheduler.CONTINUE)\n self.assertEqual(\n rule.on_trial_result(None, t3, result(2, 260)),\n TrialScheduler.STOP)\n\n def testMedianStoppingSoftStop(self):\n rule = MedianStoppingRule(\n grace_period=0, min_samples_required=1, hard_stop=False)\n t1, t2 = self.basicSetup(rule)\n rule.on_trial_complete(None, t1, result(10, 1000))\n rule.on_trial_complete(None, t2, result(10, 1000))\n t3 = Trial(\"PPO\")\n self.assertEqual(\n rule.on_trial_result(None, t3, result(1, 260)),\n TrialScheduler.CONTINUE)\n self.assertEqual(\n rule.on_trial_result(None, t3, result(2, 260)),\n TrialScheduler.PAUSE)\n\n def testAlternateMetrics(self):\n def result2(t, rew):\n return dict(training_iteration=t, neg_mean_loss=rew)\n\n rule = MedianStoppingRule(\n grace_period=0,\n min_samples_required=1,\n time_attr='training_iteration',\n reward_attr='neg_mean_loss')\n t1 = Trial(\"PPO\") # mean is 450, max 900, t_max=10\n t2 = Trial(\"PPO\") # mean is 450, max 450, t_max=5\n for i in range(10):\n self.assertEqual(\n rule.on_trial_result(None, t1, result2(i, i * 100)),\n TrialScheduler.CONTINUE)\n for i in range(5):\n self.assertEqual(\n rule.on_trial_result(None, t2, result2(i, 450)),\n TrialScheduler.CONTINUE)\n rule.on_trial_complete(None, t1, result2(10, 1000))\n self.assertEqual(\n rule.on_trial_result(None, t2, result2(5, 450)),\n TrialScheduler.CONTINUE)\n self.assertEqual(\n rule.on_trial_result(None, t2, result2(6, 0)),\n TrialScheduler.CONTINUE)\n\n\nclass _MockTrialExecutor(TrialExecutor):\n def start_trial(self, trial, checkpoint_obj=None):\n trial.logger_running = True\n trial.restored_checkpoint = checkpoint_obj.value\n trial.status = Trial.RUNNING\n\n def stop_trial(self, trial, error=False, error_msg=None, stop_logger=True):\n trial.status = Trial.ERROR if error else Trial.TERMINATED\n if stop_logger:\n trial.logger_running = False\n\n def restore(self, trial, checkpoint=None):\n pass\n\n def save(self, trial, type=Checkpoint.DISK):\n return trial.trainable_name\n\n def reset_trial(self, trial, new_config, new_experiment_tag):\n return False\n\n\nclass _MockTrialRunner():\n def __init__(self, scheduler):\n self._scheduler_alg = scheduler\n self.trials = []\n self.trial_executor = _MockTrialExecutor()\n\n def process_action(self, trial, action):\n if action == TrialScheduler.CONTINUE:\n pass\n elif action == TrialScheduler.PAUSE:\n self._pause_trial(trial)\n elif action == TrialScheduler.STOP:\n self.trial_executor.stop_trial(trial)\n\n def stop_trial(self, trial):\n if trial.status in [Trial.ERROR, Trial.TERMINATED]:\n return\n elif trial.status in [Trial.PENDING, Trial.PAUSED]:\n self._scheduler_alg.on_trial_remove(self, trial)\n else:\n self._scheduler_alg.on_trial_complete(self, trial, result(100, 10))\n\n def add_trial(self, trial):\n self.trials.append(trial)\n self._scheduler_alg.on_trial_add(self, trial)\n\n def get_trials(self):\n return self.trials\n\n def has_resources(self, resources):\n return True\n\n def _pause_trial(self, trial):\n trial.status = Trial.PAUSED\n\n def _launch_trial(self, trial):\n trial.status = Trial.RUNNING\n\n\nclass HyperbandSuite(unittest.TestCase):\n def setUp(self):\n ray.init()\n\n def tearDown(self):\n ray.shutdown()\n _register_all() # re-register the evicted objects\n\n def schedulerSetup(self, num_trials, max_t=81):\n \"\"\"Setup a scheduler and Runner with max Iter = 9.\n\n Bracketing is placed as follows:\n (5, 81);\n (8, 27) -> (3, 54);\n (15, 9) -> (5, 27) -> (2, 45);\n (34, 3) -> (12, 9) -> (4, 27) -> (2, 42);\n (81, 1) -> (27, 3) -> (9, 9) -> (3, 27) -> (1, 41);\"\"\"\n sched = HyperBandScheduler(max_t=max_t)\n for i in range(num_trials):\n t = Trial(\"__fake\")\n sched.on_trial_add(None, t)\n runner = _MockTrialRunner(sched)\n return sched, runner\n\n def default_statistics(self):\n \"\"\"Default statistics for HyperBand.\"\"\"\n sched = HyperBandScheduler()\n res = {\n str(s): {\n \"n\": sched._get_n0(s),\n \"r\": sched._get_r0(s)\n }\n for s in range(sched._s_max_1)\n }\n res[\"max_trials\"] = sum(v[\"n\"] for v in res.values())\n res[\"brack_count\"] = sched._s_max_1\n res[\"s_max\"] = sched._s_max_1 - 1\n return res\n\n def downscale(self, n, sched):\n return int(np.ceil(n / sched._eta))\n\n def basicSetup(self):\n \"\"\"Setup and verify full band.\"\"\"\n\n stats = self.default_statistics()\n sched, _ = self.schedulerSetup(stats[\"max_trials\"])\n\n self.assertEqual(len(sched._hyperbands), 1)\n self.assertEqual(sched._cur_band_filled(), True)\n\n filled_band = sched._hyperbands[0]\n for bracket in filled_band:\n self.assertEqual(bracket.filled(), True)\n return sched\n\n def advancedSetup(self):\n sched = self.basicSetup()\n for i in range(4):\n t = Trial(\"__fake\")\n sched.on_trial_add(None, t)\n\n self.assertEqual(sched._cur_band_filled(), False)\n\n unfilled_band = sched._hyperbands[-1]\n self.assertEqual(len(unfilled_band), 2)\n bracket = unfilled_band[-1]\n self.assertEqual(bracket.filled(), False)\n self.assertEqual(len(bracket.current_trials()), 7)\n\n return sched\n\n def testConfigSameEta(self):\n sched = HyperBandScheduler()\n i = 0\n while not sched._cur_band_filled():\n t = Trial(\"__fake\")\n sched.on_trial_add(None, t)\n i += 1\n self.assertEqual(len(sched._hyperbands[0]), 5)\n self.assertEqual(sched._hyperbands[0][0]._n, 5)\n self.assertEqual(sched._hyperbands[0][0]._r, 81)\n self.assertEqual(sched._hyperbands[0][-1]._n, 81)\n self.assertEqual(sched._hyperbands[0][-1]._r, 1)\n\n sched = HyperBandScheduler(max_t=810)\n i = 0\n while not sched._cur_band_filled():\n t = Trial(\"__fake\")\n sched.on_trial_add(None, t)\n i += 1\n self.assertEqual(len(sched._hyperbands[0]), 5)\n self.assertEqual(sched._hyperbands[0][0]._n, 5)\n self.assertEqual(sched._hyperbands[0][0]._r, 810)\n self.assertEqual(sched._hyperbands[0][-1]._n, 81)\n self.assertEqual(sched._hyperbands[0][-1]._r, 10)\n\n def testConfigSameEtaSmall(self):\n sched = HyperBandScheduler(max_t=1)\n i = 0\n while len(sched._hyperbands) < 2:\n t = Trial(\"__fake\")\n sched.on_trial_add(None, t)\n i += 1\n self.assertEqual(len(sched._hyperbands[0]), 5)\n self.assertTrue(all(v is None for v in sched._hyperbands[0][1:]))\n\n def testSuccessiveHalving(self):\n \"\"\"Setup full band, then iterate through last bracket (n=81)\n to make sure successive halving is correct.\"\"\"\n\n stats = self.default_statistics()\n sched, mock_runner = self.schedulerSetup(stats[\"max_trials\"])\n big_bracket = sched._state[\"bracket\"]\n cur_units = stats[str(stats[\"s_max\"])][\"r\"]\n # The last bracket will downscale 4 times\n for x in range(stats[\"brack_count\"] - 1):\n trials = big_bracket.current_trials()\n current_length = len(trials)\n for trl in trials:\n mock_runner._launch_trial(trl)\n\n # Provides results from 0 to 8 in order, keeping last one running\n for i, trl in enumerate(trials):\n action = sched.on_trial_result(mock_runner, trl,\n result(cur_units, i))\n if i < current_length - 1:\n self.assertEqual(action, TrialScheduler.PAUSE)\n mock_runner.process_action(trl, action)\n\n self.assertEqual(action, TrialScheduler.CONTINUE)\n new_length = len(big_bracket.current_trials())\n self.assertEqual(new_length, self.downscale(current_length, sched))\n cur_units += int(cur_units * sched._eta)\n self.assertEqual(len(big_bracket.current_trials()), 1)\n\n def testHalvingStop(self):\n stats = self.default_statistics()\n num_trials = stats[str(0)][\"n\"] + stats[str(1)][\"n\"]\n sched, mock_runner = self.schedulerSetup(num_trials)\n big_bracket = sched._state[\"bracket\"]\n for trl in big_bracket.current_trials():\n mock_runner._launch_trial(trl)\n\n # # Provides result in reverse order, killing the last one\n cur_units = stats[str(1)][\"r\"]\n for i, trl in reversed(list(enumerate(big_bracket.current_trials()))):\n action = sched.on_trial_result(mock_runner, trl,\n result(cur_units, i))\n mock_runner.process_action(trl, action)\n\n self.assertEqual(action, TrialScheduler.STOP)\n\n def testStopsLastOne(self):\n stats = self.default_statistics()\n num_trials = stats[str(0)][\"n\"] # setup one bracket\n sched, mock_runner = self.schedulerSetup(num_trials)\n big_bracket = sched._state[\"bracket\"]\n for trl in big_bracket.current_trials():\n mock_runner._launch_trial(trl)\n\n # # Provides result in reverse order, killing the last one\n cur_units = stats[str(0)][\"r\"]\n for i, trl in enumerate(big_bracket.current_trials()):\n action = sched.on_trial_result(mock_runner, trl,\n result(cur_units, i))\n mock_runner.process_action(trl, action)\n\n self.assertEqual(action, TrialScheduler.STOP)\n\n def testTrialErrored(self):\n \"\"\"If a trial errored, make sure successive halving still happens\"\"\"\n\n stats = self.default_statistics()\n trial_count = stats[str(0)][\"n\"] + 3\n sched, mock_runner = self.schedulerSetup(trial_count)\n t1, t2, t3 = sched._state[\"bracket\"].current_trials()\n for t in [t1, t2, t3]:\n mock_runner._launch_trial(t)\n\n sched.on_trial_error(mock_runner, t3)\n self.assertEqual(\n TrialScheduler.PAUSE,\n sched.on_trial_result(mock_runner, t1,\n result(stats[str(1)][\"r\"], 10)))\n self.assertEqual(\n TrialScheduler.CONTINUE,\n sched.on_trial_result(mock_runner, t2,\n result(stats[str(1)][\"r\"], 10)))\n\n def testTrialErrored2(self):\n \"\"\"Check successive halving happened even when last trial failed\"\"\"\n stats = self.default_statistics()\n trial_count = stats[str(0)][\"n\"] + stats[str(1)][\"n\"]\n sched, mock_runner = self.schedulerSetup(trial_count)\n trials = sched._state[\"bracket\"].current_trials()\n for t in trials[:-1]:\n mock_runner._launch_trial(t)\n sched.on_trial_result(mock_runner, t, result(\n stats[str(1)][\"r\"], 10))\n\n mock_runner._launch_trial(trials[-1])\n sched.on_trial_error(mock_runner, trials[-1])\n self.assertEqual(\n len(sched._state[\"bracket\"].current_trials()),\n self.downscale(stats[str(1)][\"n\"], sched))\n\n def testTrialEndedEarly(self):\n \"\"\"Check successive halving happened even when one trial failed\"\"\"\n stats = self.default_statistics()\n trial_count = stats[str(0)][\"n\"] + 3\n sched, mock_runner = self.schedulerSetup(trial_count)\n\n t1, t2, t3 = sched._state[\"bracket\"].current_trials()\n for t in [t1, t2, t3]:\n mock_runner._launch_trial(t)\n\n sched.on_trial_complete(mock_runner, t3, result(1, 12))\n self.assertEqual(\n TrialScheduler.PAUSE,\n sched.on_trial_result(mock_runner, t1,\n result(stats[str(1)][\"r\"], 10)))\n self.assertEqual(\n TrialScheduler.CONTINUE,\n sched.on_trial_result(mock_runner, t2,\n result(stats[str(1)][\"r\"], 10)))\n\n def testTrialEndedEarly2(self):\n \"\"\"Check successive halving happened even when last trial failed\"\"\"\n stats = self.default_statistics()\n trial_count = stats[str(0)][\"n\"] + stats[str(1)][\"n\"]\n sched, mock_runner = self.schedulerSetup(trial_count)\n trials = sched._state[\"bracket\"].current_trials()\n for t in trials[:-1]:\n mock_runner._launch_trial(t)\n sched.on_trial_result(mock_runner, t, result(\n stats[str(1)][\"r\"], 10))\n\n mock_runner._launch_trial(trials[-1])\n sched.on_trial_complete(mock_runner, trials[-1], result(100, 12))\n self.assertEqual(\n len(sched._state[\"bracket\"].current_trials()),\n self.downscale(stats[str(1)][\"n\"], sched))\n\n def testAddAfterHalving(self):\n stats = self.default_statistics()\n trial_count = stats[str(0)][\"n\"] + 1\n sched, mock_runner = self.schedulerSetup(trial_count)\n bracket_trials = sched._state[\"bracket\"].current_trials()\n init_units = stats[str(1)][\"r\"]\n\n for t in bracket_trials:\n mock_runner._launch_trial(t)\n\n for i, t in enumerate(bracket_trials):\n action = sched.on_trial_result(mock_runner, t, result(\n init_units, i))\n self.assertEqual(action, TrialScheduler.CONTINUE)\n t = Trial(\"__fake\")\n sched.on_trial_add(None, t)\n mock_runner._launch_trial(t)\n self.assertEqual(len(sched._state[\"bracket\"].current_trials()), 2)\n\n # Make sure that newly added trial gets fair computation (not just 1)\n self.assertEqual(\n TrialScheduler.CONTINUE,\n sched.on_trial_result(mock_runner, t, result(init_units, 12)))\n new_units = init_units + int(init_units * sched._eta)\n self.assertEqual(\n TrialScheduler.PAUSE,\n sched.on_trial_result(mock_runner, t, result(new_units, 12)))\n\n def testAlternateMetrics(self):\n \"\"\"Checking that alternate metrics will pass.\"\"\"\n\n def result2(t, rew):\n return dict(time_total_s=t, neg_mean_loss=rew)\n\n sched = HyperBandScheduler(\n time_attr='time_total_s', reward_attr='neg_mean_loss')\n stats = self.default_statistics()\n\n for i in range(stats[\"max_trials\"]):\n t = Trial(\"__fake\")\n sched.on_trial_add(None, t)\n runner = _MockTrialRunner(sched)\n\n big_bracket = sched._hyperbands[0][-1]\n\n for trl in big_bracket.current_trials():\n runner._launch_trial(trl)\n current_length = len(big_bracket.current_trials())\n\n # Provides results from 0 to 8 in order, keeping the last one running\n for i, trl in enumerate(big_bracket.current_trials()):\n action = sched.on_trial_result(runner, trl, result2(1, i))\n runner.process_action(trl, action)\n\n new_length = len(big_bracket.current_trials())\n self.assertEqual(action, TrialScheduler.CONTINUE)\n self.assertEqual(new_length, self.downscale(current_length, sched))\n\n def testJumpingTime(self):\n sched, mock_runner = self.schedulerSetup(81)\n big_bracket = sched._hyperbands[0][-1]\n\n for trl in big_bracket.current_trials():\n mock_runner._launch_trial(trl)\n\n # Provides results from 0 to 8 in order, keeping the last one running\n main_trials = big_bracket.current_trials()[:-1]\n jump = big_bracket.current_trials()[-1]\n for i, trl in enumerate(main_trials):\n action = sched.on_trial_result(mock_runner, trl, result(1, i))\n mock_runner.process_action(trl, action)\n\n action = sched.on_trial_result(mock_runner, jump, result(4, i))\n self.assertEqual(action, TrialScheduler.PAUSE)\n\n current_length = len(big_bracket.current_trials())\n self.assertLess(current_length, 27)\n\n def testRemove(self):\n \"\"\"Test with 4: start 1, remove 1 pending, add 2, remove 1 pending.\"\"\"\n sched, runner = self.schedulerSetup(4)\n trials = sorted(list(sched._trial_info), key=lambda t: t.trial_id)\n runner._launch_trial(trials[0])\n sched.on_trial_result(runner, trials[0], result(1, 5))\n self.assertEqual(trials[0].status, Trial.RUNNING)\n self.assertEqual(trials[1].status, Trial.PENDING)\n\n bracket, _ = sched._trial_info[trials[1]]\n self.assertTrue(trials[1] in bracket._live_trials)\n sched.on_trial_remove(runner, trials[1])\n self.assertFalse(trials[1] in bracket._live_trials)\n\n for i in range(2):\n trial = Trial(\"__fake\")\n sched.on_trial_add(None, trial)\n\n bracket, _ = sched._trial_info[trial]\n self.assertTrue(trial in bracket._live_trials)\n sched.on_trial_remove(runner, trial) # where trial is not running\n self.assertFalse(trial in bracket._live_trials)\n\n def testFilterNoneBracket(self):\n sched, runner = self.schedulerSetup(100, 20)\n # `sched' should contains None brackets\n non_brackets = [\n b for hyperband in sched._hyperbands for b in hyperband\n if b is None\n ]\n self.assertTrue(non_brackets)\n # Make sure `choose_trial_to_run' still works\n trial = sched.choose_trial_to_run(runner)\n self.assertIsNotNone(trial)\n\n\nclass _MockTrial(Trial):\n def __init__(self, i, config):\n self.trainable_name = \"trial_{}\".format(i)\n self.config = config\n self.experiment_tag = \"tag\"\n self.logger_running = False\n self.restored_checkpoint = None\n self.resources = Resources(1, 0)\n\n\nclass PopulationBasedTestingSuite(unittest.TestCase):\n def setUp(self):\n ray.init()\n\n def tearDown(self):\n ray.shutdown()\n _register_all() # re-register the evicted objects\n\n def basicSetup(self, resample_prob=0.0, explore=None):\n pbt = PopulationBasedTraining(\n time_attr=\"training_iteration\",\n perturbation_interval=10,\n resample_probability=resample_prob,\n hyperparam_mutations={\n \"id_factor\": [100],\n \"float_factor\": lambda: 100.0,\n \"int_factor\": lambda: 10,\n },\n custom_explore_fn=explore)\n runner = _MockTrialRunner(pbt)\n for i in range(5):\n trial = _MockTrial(\n i, {\n \"id_factor\": i,\n \"float_factor\": 2.0,\n \"const_factor\": 3,\n \"int_factor\": 10\n })\n runner.add_trial(trial)\n trial.status = Trial.RUNNING\n self.assertEqual(\n pbt.on_trial_result(runner, trial, result(10, 50 * i)),\n TrialScheduler.CONTINUE)\n pbt.reset_stats()\n return pbt, runner\n\n def testCheckpointsMostPromisingTrials(self):\n pbt, runner = self.basicSetup()\n trials = runner.get_trials()\n\n # no checkpoint: haven't hit next perturbation interval yet\n self.assertEqual(pbt.last_scores(trials), [0, 50, 100, 150, 200])\n self.assertEqual(\n pbt.on_trial_result(runner, trials[0], result(15, 200)),\n TrialScheduler.CONTINUE)\n self.assertEqual(pbt.last_scores(trials), [0, 50, 100, 150, 200])\n self.assertEqual(pbt._num_checkpoints, 0)\n\n # checkpoint: both past interval and upper quantile\n self.assertEqual(\n pbt.on_trial_result(runner, trials[0], result(20, 200)),\n TrialScheduler.CONTINUE)\n self.assertEqual(pbt.last_scores(trials), [200, 50, 100, 150, 200])\n self.assertEqual(pbt._num_checkpoints, 1)\n self.assertEqual(\n pbt.on_trial_result(runner, trials[1], result(30, 201)),\n TrialScheduler.CONTINUE)\n self.assertEqual(pbt.last_scores(trials), [200, 201, 100, 150, 200])\n self.assertEqual(pbt._num_checkpoints, 2)\n\n # not upper quantile any more\n self.assertEqual(\n pbt.on_trial_result(runner, trials[4], result(30, 199)),\n TrialScheduler.CONTINUE)\n self.assertEqual(pbt._num_checkpoints, 2)\n self.assertEqual(pbt._num_perturbations, 0)\n\n def testPerturbsLowPerformingTrials(self):\n pbt, runner = self.basicSetup()\n trials = runner.get_trials()\n\n # no perturbation: haven't hit next perturbation interval\n self.assertEqual(\n pbt.on_trial_result(runner, trials[0], result(15, -100)),\n TrialScheduler.CONTINUE)\n self.assertEqual(pbt.last_scores(trials), [0, 50, 100, 150, 200])\n self.assertTrue(\"@perturbed\" not in trials[0].experiment_tag)\n self.assertEqual(pbt._num_perturbations, 0)\n\n # perturb since it's lower quantile\n self.assertEqual(\n pbt.on_trial_result(runner, trials[0], result(20, -100)),\n TrialScheduler.CONTINUE)\n self.assertEqual(pbt.last_scores(trials), [-100, 50, 100, 150, 200])\n self.assertTrue(\"@perturbed\" in trials[0].experiment_tag)\n self.assertIn(trials[0].restored_checkpoint, [\"trial_3\", \"trial_4\"])\n self.assertEqual(pbt._num_perturbations, 1)\n\n # also perturbed\n self.assertEqual(\n pbt.on_trial_result(runner, trials[2], result(20, 40)),\n TrialScheduler.CONTINUE)\n self.assertEqual(pbt.last_scores(trials), [-100, 50, 40, 150, 200])\n self.assertEqual(pbt._num_perturbations, 2)\n self.assertIn(trials[0].restored_checkpoint, [\"trial_3\", \"trial_4\"])\n self.assertTrue(\"@perturbed\" in trials[2].experiment_tag)\n\n def testPerturbWithoutResample(self):\n pbt, runner = self.basicSetup(resample_prob=0.0)\n trials = runner.get_trials()\n self.assertEqual(\n pbt.on_trial_result(runner, trials[0], result(20, -100)),\n TrialScheduler.CONTINUE)\n self.assertIn(trials[0].restored_checkpoint, [\"trial_3\", \"trial_4\"])\n self.assertIn(trials[0].config[\"id_factor\"], [100])\n self.assertIn(trials[0].config[\"float_factor\"], [2.4, 1.6])\n self.assertEqual(type(trials[0].config[\"float_factor\"]), float)\n self.assertIn(trials[0].config[\"int_factor\"], [8, 12])\n self.assertEqual(type(trials[0].config[\"int_factor\"]), int)\n self.assertEqual(trials[0].config[\"const_factor\"], 3)\n\n def testPerturbWithResample(self):\n pbt, runner = self.basicSetup(resample_prob=1.0)\n trials = runner.get_trials()\n self.assertEqual(\n pbt.on_trial_result(runner, trials[0], result(20, -100)),\n TrialScheduler.CONTINUE)\n self.assertIn(trials[0].restored_checkpoint, [\"trial_3\", \"trial_4\"])\n self.assertEqual(trials[0].config[\"id_factor\"], 100)\n self.assertEqual(trials[0].config[\"float_factor\"], 100.0)\n self.assertEqual(type(trials[0].config[\"float_factor\"]), float)\n self.assertEqual(trials[0].config[\"int_factor\"], 10)\n self.assertEqual(type(trials[0].config[\"int_factor\"]), int)\n self.assertEqual(trials[0].config[\"const_factor\"], 3)\n\n def testPerturbationValues(self):\n def assertProduces(fn, values):\n random.seed(0)\n seen = set()\n for _ in range(100):\n seen.add(fn()[\"v\"])\n self.assertEqual(seen, values)\n\n # Categorical case\n assertProduces(\n lambda: explore({\"v\": 4}, {\"v\": [3, 4, 8, 10]}, 0.0, lambda x: x),\n {3, 8})\n assertProduces(\n lambda: explore({\"v\": 3}, {\"v\": [3, 4, 8, 10]}, 0.0, lambda x: x),\n {3, 4})\n assertProduces(\n lambda: explore({\"v\": 10}, {\"v\": [3, 4, 8, 10]}, 0.0, lambda x: x),\n {8, 10})\n assertProduces(\n lambda: explore({\"v\": 7}, {\"v\": [3, 4, 8, 10]}, 0.0, lambda x: x),\n {3, 4, 8, 10})\n assertProduces(\n lambda: explore({\"v\": 4}, {\"v\": [3, 4, 8, 10]}, 1.0, lambda x: x),\n {3, 4, 8, 10})\n\n # Continuous case\n assertProduces(\n lambda: explore(\n {\"v\": 100}, {\"v\": lambda: random.choice([10, 100])}, 0.0,\n lambda x: x),\n {80, 120})\n assertProduces(\n lambda: explore(\n {\"v\": 100.0}, {\"v\": lambda: random.choice([10, 100])}, 0.0,\n lambda x: x),\n {80.0, 120.0})\n assertProduces(\n lambda: explore(\n {\"v\": 100.0}, {\"v\": lambda: random.choice([10, 100])}, 1.0,\n lambda x: x),\n {10.0, 100.0})\n\n def testYieldsTimeToOtherTrials(self):\n pbt, runner = self.basicSetup()\n trials = runner.get_trials()\n trials[0].status = Trial.PENDING # simulate not enough resources\n\n self.assertEqual(\n pbt.on_trial_result(runner, trials[1], result(20, 1000)),\n TrialScheduler.PAUSE)\n self.assertEqual(pbt.last_scores(trials), [0, 1000, 100, 150, 200])\n self.assertEqual(pbt.choose_trial_to_run(runner), trials[0])\n\n def testSchedulesMostBehindTrialToRun(self):\n pbt, runner = self.basicSetup()\n trials = runner.get_trials()\n pbt.on_trial_result(runner, trials[0], result(800, 1000))\n pbt.on_trial_result(runner, trials[1], result(700, 1001))\n pbt.on_trial_result(runner, trials[2], result(600, 1002))\n pbt.on_trial_result(runner, trials[3], result(500, 1003))\n pbt.on_trial_result(runner, trials[4], result(700, 1004))\n self.assertEqual(pbt.choose_trial_to_run(runner), None)\n for i in range(5):\n trials[i].status = Trial.PENDING\n self.assertEqual(pbt.choose_trial_to_run(runner), trials[3])\n\n def testPerturbationResetsLastPerturbTime(self):\n pbt, runner = self.basicSetup()\n trials = runner.get_trials()\n pbt.on_trial_result(runner, trials[0], result(10000, 1005))\n pbt.on_trial_result(runner, trials[1], result(10000, 1004))\n pbt.on_trial_result(runner, trials[2], result(600, 1003))\n self.assertEqual(pbt._num_perturbations, 0)\n pbt.on_trial_result(runner, trials[3], result(500, 1002))\n self.assertEqual(pbt._num_perturbations, 1)\n pbt.on_trial_result(runner, trials[3], result(600, 100))\n self.assertEqual(pbt._num_perturbations, 1)\n pbt.on_trial_result(runner, trials[3], result(11000, 100))\n self.assertEqual(pbt._num_perturbations, 2)\n\n def testPostprocessingHook(self):\n def explore(new_config):\n new_config[\"id_factor\"] = 42\n new_config[\"float_factor\"] = 43\n return new_config\n\n pbt, runner = self.basicSetup(resample_prob=0.0, explore=explore)\n trials = runner.get_trials()\n self.assertEqual(\n pbt.on_trial_result(runner, trials[0], result(20, -100)),\n TrialScheduler.CONTINUE)\n self.assertEqual(trials[0].config[\"id_factor\"], 42)\n self.assertEqual(trials[0].config[\"float_factor\"], 43)\n\n\nclass AsyncHyperBandSuite(unittest.TestCase):\n def setUp(self):\n ray.init()\n\n def tearDown(self):\n ray.shutdown()\n _register_all() # re-register the evicted objects\n\n def basicSetup(self, scheduler):\n t1 = Trial(\"PPO\") # mean is 450, max 900, t_max=10\n t2 = Trial(\"PPO\") # mean is 450, max 450, t_max=5\n scheduler.on_trial_add(None, t1)\n scheduler.on_trial_add(None, t2)\n for i in range(10):\n self.assertEqual(\n scheduler.on_trial_result(None, t1, result(i, i * 100)),\n TrialScheduler.CONTINUE)\n for i in range(5):\n self.assertEqual(\n scheduler.on_trial_result(None, t2, result(i, 450)),\n TrialScheduler.CONTINUE)\n return t1, t2\n\n def testAsyncHBOnComplete(self):\n scheduler = AsyncHyperBandScheduler(max_t=10, brackets=1)\n t1, t2 = self.basicSetup(scheduler)\n t3 = Trial(\"PPO\")\n scheduler.on_trial_add(None, t3)\n scheduler.on_trial_complete(None, t3, result(10, 1000))\n self.assertEqual(\n scheduler.on_trial_result(None, t2, result(101, 0)),\n TrialScheduler.STOP)\n\n def testAsyncHBGracePeriod(self):\n scheduler = AsyncHyperBandScheduler(\n grace_period=2.5, reduction_factor=3, brackets=1)\n t1, t2 = self.basicSetup(scheduler)\n scheduler.on_trial_complete(None, t1, result(10, 1000))\n scheduler.on_trial_complete(None, t2, result(10, 1000))\n t3 = Trial(\"PPO\")\n scheduler.on_trial_add(None, t3)\n self.assertEqual(\n scheduler.on_trial_result(None, t3, result(1, 10)),\n TrialScheduler.CONTINUE)\n self.assertEqual(\n scheduler.on_trial_result(None, t3, result(2, 10)),\n TrialScheduler.CONTINUE)\n self.assertEqual(\n scheduler.on_trial_result(None, t3, result(3, 10)),\n TrialScheduler.STOP)\n\n def testAsyncHBAllCompletes(self):\n scheduler = AsyncHyperBandScheduler(max_t=10, brackets=10)\n trials = [Trial(\"PPO\") for i in range(10)]\n for t in trials:\n scheduler.on_trial_add(None, t)\n\n for t in trials:\n self.assertEqual(\n scheduler.on_trial_result(None, t, result(10, -2)),\n TrialScheduler.STOP)\n\n def testAsyncHBUsesPercentile(self):\n scheduler = AsyncHyperBandScheduler(\n grace_period=1, max_t=10, reduction_factor=2, brackets=1)\n t1, t2 = self.basicSetup(scheduler)\n scheduler.on_trial_complete(None, t1, result(10, 1000))\n scheduler.on_trial_complete(None, t2, result(10, 1000))\n t3 = Trial(\"PPO\")\n scheduler.on_trial_add(None, t3)\n self.assertEqual(\n scheduler.on_trial_result(None, t3, result(1, 260)),\n TrialScheduler.STOP)\n self.assertEqual(\n scheduler.on_trial_result(None, t3, result(2, 260)),\n TrialScheduler.STOP)\n\n def testAlternateMetrics(self):\n def result2(t, rew):\n return dict(training_iteration=t, neg_mean_loss=rew)\n\n scheduler = AsyncHyperBandScheduler(\n grace_period=1,\n time_attr='training_iteration',\n reward_attr='neg_mean_loss',\n brackets=1)\n t1 = Trial(\"PPO\") # mean is 450, max 900, t_max=10\n t2 = Trial(\"PPO\") # mean is 450, max 450, t_max=5\n scheduler.on_trial_add(None, t1)\n scheduler.on_trial_add(None, t2)\n for i in range(10):\n self.assertEqual(\n scheduler.on_trial_result(None, t1, result2(i, i * 100)),\n TrialScheduler.CONTINUE)\n for i in range(5):\n self.assertEqual(\n scheduler.on_trial_result(None, t2, result2(i, 450)),\n TrialScheduler.CONTINUE)\n scheduler.on_trial_complete(None, t1, result2(10, 1000))\n self.assertEqual(\n scheduler.on_trial_result(None, t2, result2(5, 450)),\n TrialScheduler.CONTINUE)\n self.assertEqual(\n scheduler.on_trial_result(None, t2, result2(6, 0)),\n TrialScheduler.CONTINUE)\n\n\nif __name__ == \"__main__\":\n unittest.main(verbosity=2)\n"
] |
[
[
"numpy.ceil"
]
] |
cclauss/xmodaler
|
[
"3e128a816876988c5fb07d842fde4a140e699dde"
] |
[
"xmodaler/utils/colormap.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates.\n\n\"\"\"\nAn awesome colormap for really neat visualizations.\nCopied from Detectron, and removed gray colors.\n\"\"\"\n\nimport numpy as np\n\n__all__ = [\"colormap\", \"random_color\"]\n\n# fmt: off\n# RGB:\n_COLORS = np.array(\n [\n 0.000, 0.447, 0.741,\n 0.850, 0.325, 0.098,\n 0.929, 0.694, 0.125,\n 0.494, 0.184, 0.556,\n 0.466, 0.674, 0.188,\n 0.301, 0.745, 0.933,\n 0.635, 0.078, 0.184,\n 0.300, 0.300, 0.300,\n 0.600, 0.600, 0.600,\n 1.000, 0.000, 0.000,\n 1.000, 0.500, 0.000,\n 0.749, 0.749, 0.000,\n 0.000, 1.000, 0.000,\n 0.000, 0.000, 1.000,\n 0.667, 0.000, 1.000,\n 0.333, 0.333, 0.000,\n 0.333, 0.667, 0.000,\n 0.333, 1.000, 0.000,\n 0.667, 0.333, 0.000,\n 0.667, 0.667, 0.000,\n 0.667, 1.000, 0.000,\n 1.000, 0.333, 0.000,\n 1.000, 0.667, 0.000,\n 1.000, 1.000, 0.000,\n 0.000, 0.333, 0.500,\n 0.000, 0.667, 0.500,\n 0.000, 1.000, 0.500,\n 0.333, 0.000, 0.500,\n 0.333, 0.333, 0.500,\n 0.333, 0.667, 0.500,\n 0.333, 1.000, 0.500,\n 0.667, 0.000, 0.500,\n 0.667, 0.333, 0.500,\n 0.667, 0.667, 0.500,\n 0.667, 1.000, 0.500,\n 1.000, 0.000, 0.500,\n 1.000, 0.333, 0.500,\n 1.000, 0.667, 0.500,\n 1.000, 1.000, 0.500,\n 0.000, 0.333, 1.000,\n 0.000, 0.667, 1.000,\n 0.000, 1.000, 1.000,\n 0.333, 0.000, 1.000,\n 0.333, 0.333, 1.000,\n 0.333, 0.667, 1.000,\n 0.333, 1.000, 1.000,\n 0.667, 0.000, 1.000,\n 0.667, 0.333, 1.000,\n 0.667, 0.667, 1.000,\n 0.667, 1.000, 1.000,\n 1.000, 0.000, 1.000,\n 1.000, 0.333, 1.000,\n 1.000, 0.667, 1.000,\n 0.333, 0.000, 0.000,\n 0.500, 0.000, 0.000,\n 0.667, 0.000, 0.000,\n 0.833, 0.000, 0.000,\n 1.000, 0.000, 0.000,\n 0.000, 0.167, 0.000,\n 0.000, 0.333, 0.000,\n 0.000, 0.500, 0.000,\n 0.000, 0.667, 0.000,\n 0.000, 0.833, 0.000,\n 0.000, 1.000, 0.000,\n 0.000, 0.000, 0.167,\n 0.000, 0.000, 0.333,\n 0.000, 0.000, 0.500,\n 0.000, 0.000, 0.667,\n 0.000, 0.000, 0.833,\n 0.000, 0.000, 1.000,\n 0.000, 0.000, 0.000,\n 0.143, 0.143, 0.143,\n 0.857, 0.857, 0.857,\n 1.000, 1.000, 1.000\n ]\n).astype(np.float32).reshape(-1, 3)\n# fmt: on\n\n\ndef colormap(rgb=False, maximum=255):\n \"\"\"\n Args:\n rgb (bool): whether to return RGB colors or BGR colors.\n maximum (int): either 255 or 1\n\n Returns:\n ndarray: a float32 array of Nx3 colors, in range [0, 255] or [0, 1]\n \"\"\"\n assert maximum in [255, 1], maximum\n c = _COLORS * maximum\n if not rgb:\n c = c[:, ::-1]\n return c\n\n\ndef random_color(rgb=False, maximum=255):\n \"\"\"\n Args:\n rgb (bool): whether to return RGB colors or BGR colors.\n maximum (int): either 255 or 1\n\n Returns:\n ndarray: a vector of 3 numbers\n \"\"\"\n idx = np.random.randint(0, len(_COLORS))\n ret = _COLORS[idx] * maximum\n if not rgb:\n ret = ret[::-1]\n return ret"
] |
[
[
"numpy.array"
]
] |
Roxot/AEVNMT
|
[
"5ebc6ec76b2c891ab76a5584a15e735145fdc43b"
] |
[
"nmt/joint/csimplejoint.py"
] |
[
"\"\"\"\n:Authors: - Bryan Eikema\n\"\"\"\n\nimport tensorflow as tf\n\nimport nmt.utils.misc_utils as utils\n\nfrom . import DSimpleJointModel\nfrom nmt import model_helper\nfrom .utils import language_model\nfrom nmt.utils.gaussianhelper import GaussianHelper\nfrom nmt.utils.amt_utils import enrich_embeddings_with_positions, self_attention_layer, diagonal_attention_coefficients\n\nclass CSimpleJointModel(DSimpleJointModel):\n\n def __init__(self, hparams, mode, iterator, source_vocab_table,\n target_vocab_table, reverse_target_vocab_table=None,\n scope=None, extra_args=None, no_summaries=False):\n\n super(CSimpleJointModel, self).__init__(hparams=hparams, mode=mode,\n iterator=iterator, source_vocab_table=source_vocab_table,\n target_vocab_table=target_vocab_table,\n reverse_target_vocab_table=reverse_target_vocab_table,\n scope=scope, extra_args=extra_args, no_summaries=True)\n\n # Set model specific training summaries.\n if self.mode == tf.contrib.learn.ModeKeys.TRAIN and not no_summaries:\n self.bi_summary = tf.summary.merge([\n self._base_summaries,\n self._supervised_tm_accuracy_summary,\n tf.summary.scalar(\"supervised_ELBO\", self._elbo),\n tf.summary.scalar(\"supervised_tm_loss\", self._tm_loss),\n tf.summary.scalar(\"supervised_lm_loss\", self._lm_loss)])\n self.mono_summary = tf.summary.merge([\n self._base_summaries,\n tf.summary.scalar(\"semi_supervised_tm_accuracy\", self._tm_accuracy),\n tf.summary.scalar(\"semi_supervised_ELBO\", self._elbo),\n tf.summary.scalar(\"semi_supervised_tm_loss\", self._tm_loss),\n tf.summary.scalar(\"semi_supervised_lm_loss\", self._lm_loss),\n tf.summary.scalar(\"semi_supervised_entropy\", self._entropy)])\n\n # Overrides DSimpleJointModel._source_embedding\n # We use pre-trained embeddings, thus don't do an embedding lookup.\n def _source_embedding(self, source):\n return source\n\n # Overrides DSimpleJointModel._parse_iterator\n # Returns word embeddings instead of one hot vectors.\n def _parse_iterator(self, iterator, hparams, scope=None):\n dtype = tf.float32\n with tf.variable_scope(scope or \"dynamic_seq2seq\", dtype=dtype):\n self.src_embed_size = self.embedding_encoder.shape[1]\n self.initializer = iterator.initializer\n self.mono_initializer = iterator.mono_initializer\n self.mono_batch = iterator.mono_batch\n\n # Change the data depending on what type of batch we're training on.\n self.target_input, self.target_output, self.target_sequence_length = tf.cond(\n self.mono_batch,\n true_fn=lambda: (iterator.mono_text_input, iterator.mono_text_output,\n iterator.mono_text_length),\n false_fn=lambda: (iterator.target_input, iterator.target_output,\n iterator.target_sequence_length))\n\n if self.mode != tf.contrib.learn.ModeKeys.INFER:\n self.batch_size = tf.size(self.target_sequence_length)\n else:\n self.batch_size = tf.size(iterator.source_sequence_length)\n\n self.source, self.source_output, self.source_sequence_length = tf.cond(\n self.mono_batch,\n true_fn=lambda: self._infer_source(iterator, hparams, embeddings=True),\n false_fn=lambda: (tf.nn.embedding_lookup(self.embedding_encoder,\n iterator.source),\n tf.nn.embedding_lookup(self.embedding_encoder,\n iterator.source_output),\n iterator.source_sequence_length))\n\n # Builds a Gaussian language model with fixed diagonal unit variance.\n # If z_sample is given it will be used to initialize the RNNLM.\n def _build_language_model(self, hparams, z_sample=None):\n source = self.source\n if self.time_major:\n source = self._transpose_time_major(source)\n\n # Use embeddings as inputs.\n embeddings = self._source_embedding(source)\n\n # Run the RNNLM.\n lm_outputs = language_model(embeddings, self.source_sequence_length,\n hparams, self.mode, self.single_cell_fn, self.time_major,\n self.batch_size, z_sample=z_sample)\n\n # Put the RNN output through a projection layer to obtain a mean for the\n # Gaussians.\n mean = tf.layers.dense(\n lm_outputs.rnn_output,\n self.src_embed_size,\n name=\"output_projection\")\n\n stddev = tf.ones_like(mean)\n\n return tf.contrib.distributions.MultivariateNormalDiag(loc=mean,\n scale_diag=stddev)\n\n # Overrides DSimpleJointModel.build_graph\n def build_graph(self, hparams, scope=None):\n utils.print_out(\"# creating %s graph ...\" % self.mode)\n dtype = tf.float32\n\n with tf.variable_scope(scope or \"dynamic_seq2seq\", dtype=dtype):\n\n with tf.variable_scope(\"generative_model\", dtype=dtype):\n\n # P(x_1^m) language model\n gauss_observations = self._build_language_model(hparams)\n\n # P(y_1^n|x_1^m) encoder\n encoder_outputs, encoder_state = self._build_encoder(hparams)\n\n # P(y_1^n|x_1^m) decoder\n tm_logits, sample_id, final_context_state = self._build_decoder(\n encoder_outputs, encoder_state, hparams)\n\n # Loss\n if self.mode != tf.contrib.learn.ModeKeys.INFER:\n with tf.device(model_helper.get_device_str(self.num_encoder_layers - 1,\n self.num_gpus)):\n loss, components = self._compute_loss(tm_logits, gauss_observations)\n else:\n loss = None\n\n # Save for summaries.\n if self.mode == tf.contrib.learn.ModeKeys.TRAIN:\n self._tm_loss = components[0]\n self._lm_loss = components[1]\n self._entropy = components[2]\n self._elbo = -loss\n\n return tm_logits, loss, final_context_state, sample_id\n\n # Overrides DSimpleJointModel._diagonal_decoder\n # Predicts Gaussian X_i | x_<i, y_1^n.\n def _diagonal_decoder(self, encoder_outputs, target_length,\n predicted_source_length, hparams):\n\n # Tile encoder_outputs from [B x T_i x d] to [B x T_o x T_i x d]\n encoder_outputs = tf.expand_dims(encoder_outputs, axis=1)\n encoder_outputs = tf.tile(encoder_outputs,\n multiples=[1, tf.reduce_max(predicted_source_length), 1, 1])\n\n # Create source and target sequence masks.\n y_mask = tf.sequence_mask(target_length, dtype=tf.float32)\n x_mask = tf.sequence_mask(predicted_source_length,\n dtype=tf.float32)\n\n # Compute fixed decoder coefficients based only on the source and\n # target sentence length.\n attention_coefficients = diagonal_attention_coefficients(y_mask, x_mask,\n target_length, predicted_source_length)\n attention_coefficients = tf.expand_dims(attention_coefficients, axis=-1)\n attention_output = tf.reduce_sum(encoder_outputs * attention_coefficients,\n axis=2)\n\n # Use the attention output to predict a mean and a diagonal covariance\n # for X_i.\n mean = tf.layers.dense(attention_output, self.src_embed_size)\n\n if hparams.Qx_covariance == \"diagonal\":\n stddev = tf.layers.dense(attention_output, self.src_embed_size)\n self.Qx = tf.contrib.distributions.MultivariateNormalDiag(loc=mean,\n scale_diag=stddev)\n elif hparams.Qx_covariance == \"full\":\n\n # Predict the cholesky factor.\n cov_matrix_values = tf.layers.dense(attention_output,\n self.src_embed_size * self.src_embed_size)\n cov_matrix = tf.reshape(cov_matrix_values,\n [self.batch_size, tf.reduce_max(predicted_source_length),\n self.src_embed_size, self.src_embed_size])\n cholesky = tf.contrib.distributions.matrix_diag_transform(cov_matrix,\n transform=tf.nn.softplus)\n\n self.Qx = tf.contrib.distributions.MultivariateNormalTriL(\n loc=mean, scale_tril=cholesky)\n else:\n raise ValueError(\"Unknown value for Qx_covariance: %s\" % \\\n hparams.Qx_covariance)\n\n return self.Qx.sample()\n\n # Overrides dsimplejoint._deterministic_rnn_decoder_with_attention\n def _deterministic_rnn_decoder_with_attention(self, encoder_outputs, final_state,\n target_length, predicted_source_length, hparams):\n\n max_source_length = tf.reduce_max(predicted_source_length)\n encoder_output = tf.tile(tf.expand_dims(final_state, 1),\n [1, max_source_length, 1])\n inputs = enrich_embeddings_with_positions(encoder_output,\n hparams.num_units, \"positional_embeddings\")\n if self.time_major:\n inputs = self._transpose_time_major(inputs)\n\n attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(\n hparams.num_units, encoder_outputs,\n memory_sequence_length=target_length)\n\n cell = tf.contrib.rnn.GRUCell(hparams.num_units)\n cell = tf.contrib.seq2seq.AttentionWrapper(\n cell,\n attention_mechanism,\n attention_layer_size=hparams.num_units,\n alignment_history=False,\n output_attention=False,\n name=\"attention\")\n\n decoder_outputs, _ = tf.nn.dynamic_rnn(cell, inputs,\n sequence_length=predicted_source_length,\n time_major=self.time_major,\n dtype=inputs.dtype)\n\n # Return batch major.\n if self.time_major:\n decoder_outputs = self._transpose_time_major(decoder_outputs)\n\n mean = tf.layers.dense(decoder_outputs, self.src_embed_size)\n\n if hparams.Qx_covariance == \"diagonal\":\n stddev = tf.layers.dense(decoder_outputs, self.src_embed_size, activation=tf.nn.sotftplus)\n self.Qx = tf.contrib.distributions.MultivariateNormalDiag(loc=mean,\n scale_diag=stddev)\n elif hparams.Qx_covariance == \"full\":\n\n # Predict the cholesky factor.\n cov_matrix_values = tf.layers.dense(decoder_outputs,\n self.src_embed_size * self.src_embed_size)\n cov_matrix = tf.reshape(cov_matrix_values,\n [self.batch_size, tf.reduce_max(predicted_source_length),\n self.src_embed_size, self.src_embed_size])\n cholesky = tf.contrib.distributions.matrix_diag_transform(cov_matrix,\n transform=tf.nn.softplus)\n\n self.Qx = tf.contrib.distributions.MultivariateNormalTriL(\n loc=mean, scale_tril=cholesky)\n else:\n raise ValueError(\"Unknown value for Qx_covariance: %s\" % \\\n hparams.Qx_covariance)\n\n return self.Qx.sample()\n\n # Overrides dsimplejoint._deterministic_rnn_decoder\n def _deterministic_rnn_decoder(self, encoder_outputs, final_state,\n target_length, predicted_source_length, hparams):\n\n max_source_length = tf.reduce_max(predicted_source_length)\n inputs = tf.tile(tf.expand_dims(final_state, 1),\n [1, max_source_length, 1])\n inputs = enrich_embeddings_with_positions(inputs,\n hparams.num_units, \"positional_embeddings\")\n if self.time_major:\n inputs = self._transpose_time_major(inputs)\n\n cell = tf.contrib.rnn.GRUCell(hparams.num_units)\n decoder_outputs, _ = tf.nn.dynamic_rnn(cell, inputs,\n sequence_length=predicted_source_length,\n time_major=self.time_major,\n dtype=inputs.dtype)\n\n # Return batch major.\n if self.time_major:\n decoder_outputs = self._transpose_time_major(decoder_outputs)\n\n with tf.variable_scope(\"mean_inference_net\"):\n mean = tf.layers.dense(decoder_outputs, self.src_embed_size)\n\n if hparams.Qx_covariance == \"diagonal\":\n with tf.variable_scope(\"stddev_inference_net\"):\n stddev = tf.layers.dense(decoder_outputs, self.src_embed_size, activation=tf.nn.softplus)\n self.Qx = tf.contrib.distributions.MultivariateNormalDiag(loc=mean,\n scale_diag=stddev)\n elif hparams.Qx_covariance == \"full\":\n\n # Predict the cholesky factor.\n cov_matrix_values = tf.layers.dense(decoder_outputs,\n self.src_embed_size * self.src_embed_size)\n cov_matrix = tf.reshape(cov_matrix_values,\n [self.batch_size, tf.reduce_max(predicted_source_length),\n self.src_embed_size, self.src_embed_size])\n cholesky = tf.contrib.distributions.matrix_diag_transform(cov_matrix,\n transform=tf.nn.softplus)\n\n self.Qx = tf.contrib.distributions.MultivariateNormalTriL(\n loc=mean, scale_tril=cholesky)\n else:\n raise ValueError(\"Unknown value for Qx_covariance: %s\" % \\\n hparams.Qx_covariance)\n\n return self.Qx.sample()\n\n # Overrides DSimpleJointModel._rnn_decoder\n # Models X_i | y_1^n, x_<i as Gaussian variables.\n def _rnn_decoder(self, encoder_outputs, encoder_state, target_length,\n predicted_source_length, hparams):\n scope = tf.get_variable_scope()\n if self.time_major:\n encoder_outputs = self._transpose_time_major(encoder_outputs)\n\n # Create an identical cell to the forward NMT decoder, but disable\n # inference mode.\n cell, decoder_init_state = self._build_decoder_cell(hparams,\n encoder_outputs, encoder_state, target_length, no_infer=True)\n\n # Create the initial inputs for the decoder. Use the generative embedding\n # matrix but stop the gradients.\n src_sos_id = tf.cast(self.src_vocab_table.lookup(\n tf.constant(hparams.sos)), tf.int32)\n start_tokens = tf.fill([self.batch_size], src_sos_id)\n start_tokens = tf.nn.embedding_lookup(self.embedding_encoder, start_tokens)\n start_tokens = tf.stop_gradient(start_tokens)\n\n # Create the Gaussian helper to generate Gaussian samples.\n helper = GaussianHelper(\n start_tokens=start_tokens,\n decode_lengths=predicted_source_length,\n full_covariance=(hparams.Qx_covariance == \"full\"))\n utils.print_out(\" creating GaussianHelper\")\n\n # Create the decoder.\n if hparams.Qx_covariance == \"diagonal\":\n projection_layer_size = self.src_embed_size * 2\n elif hparams.Qx_covariance == \"full\":\n projection_layer_size = self.src_embed_size + \\\n self.src_embed_size * self.src_embed_size\n else:\n raise ValueError(\"Unknown value for Qx_covariance: %s\" % \\\n hparams.Qx_covariance)\n projection_layer = tf.layers.Dense(projection_layer_size)\n decoder = tf.contrib.seq2seq.BasicDecoder(cell, helper,\n decoder_init_state, output_layer=projection_layer)\n\n # Decode the Concrete source sentence.\n outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(\n decoder,\n output_time_major=self.time_major,\n maximum_iterations=tf.reduce_max(predicted_source_length),\n swap_memory=True,\n scope=scope)\n inferred_source = outputs.sample_id\n\n mean = outputs.rnn_output[:, :, :self.src_embed_size]\n stddev = outputs.rnn_output[:, :, self.src_embed_size:]\n\n # Return in batch major.\n if self.time_major:\n inferred_source = self._transpose_time_major(inferred_source)\n mean = self._transpose_time_major(mean)\n stddev = self._transpose_time_major(stddev)\n\n if hparams.Qx_covariance == \"diagonal\":\n self.Qx = tf.contrib.distributions.MultivariateNormalDiag(loc=mean,\n scale_diag=stddev)\n else:\n\n # Full covariance.\n covariance = tf.reshape(stddev, [self.batch_size,\n tf.reduce_max(predicted_source_length), self.src_embed_size,\n self.src_embed_size])\n cholesky = tf.contrib.distributions.matrix_diag_transform(covariance,\n transform=tf.nn.softplus)\n self.Qx = tf.contrib.distributions.MultivariateNormalTriL(\n loc=mean, scale_tril=cholesky)\n\n return inferred_source\n\n # Gives the negative log-likelihood of given observations for a gaussian\n # variable.\n def _gaussian_nll(self, gauss_var, observations, observation_length):\n if self.time_major: observations = self._transpose_time_major(observations)\n log_prob = gauss_var.log_prob(observations)\n max_source_time = self.get_max_time(observations)\n mask = tf.sequence_mask(observation_length, max_source_time,\n dtype=log_prob.dtype)\n if self.time_major: mask = tf.transpose(mask)\n nll = -tf.reduce_sum(log_prob * mask) / tf.to_float(self.batch_size)\n return nll\n\n # Overrides DSimpleJointModel._compute_loss\n def _compute_loss(self, tm_logits, gauss_observations):\n\n # - log P(y_1^n)\n tm_loss = self._compute_categorical_loss(tm_logits,\n self.target_output, self.target_sequence_length)\n\n # - log p(x_1^m)\n lm_loss = self._gaussian_nll(gauss_observations, self.source_output,\n self.source_sequence_length)\n\n # H(X|y_1^n) -- keep in mind self.Qx is defined in batch major, as are all\n # data streams.\n entropy = tf.cond(self.mono_batch,\n true_fn=lambda: tf.reduce_mean(tf.reduce_sum(self.Qx.entropy(), axis=1)),\n false_fn=lambda: tf.constant(0.))\n\n return tm_loss + lm_loss - entropy, (tm_loss, lm_loss, entropy)\n"
] |
[
[
"tensorflow.cond",
"tensorflow.nn.dynamic_rnn",
"tensorflow.contrib.distributions.matrix_diag_transform",
"tensorflow.contrib.rnn.GRUCell",
"tensorflow.reduce_sum",
"tensorflow.summary.scalar",
"tensorflow.contrib.distributions.MultivariateNormalDiag",
"tensorflow.layers.dense",
"tensorflow.stop_gradient",
"tensorflow.to_float",
"tensorflow.contrib.seq2seq.BasicDecoder",
"tensorflow.fill",
"tensorflow.layers.Dense",
"tensorflow.contrib.seq2seq.AttentionWrapper",
"tensorflow.sequence_mask",
"tensorflow.nn.embedding_lookup",
"tensorflow.size",
"tensorflow.reduce_max",
"tensorflow.transpose",
"tensorflow.constant",
"tensorflow.contrib.distributions.MultivariateNormalTriL",
"tensorflow.ones_like",
"tensorflow.expand_dims",
"tensorflow.contrib.seq2seq.BahdanauAttention",
"tensorflow.variable_scope",
"tensorflow.get_variable_scope"
]
] |
asappinc/emergent_comms_negotiation
|
[
"19ad405dcb83a3a521b6e1752cec075b69aa164b"
] |
[
"nets.py"
] |
[
"import torch\nfrom torch import nn, autograd\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\n\n\nclass NumberSequenceEncoder(nn.Module):\n def __init__(self, num_values, embedding_size=100):\n \"\"\"\n eg for values 0,1,2,3,4,5, num_values will be: 6\n for 0,1,..,9 num_values will be: 10\n \"\"\"\n super().__init__()\n self.embedding_size = embedding_size\n self.num_values = num_values\n self.embedding = nn.Embedding(num_values, embedding_size)\n self.lstm = nn.LSTMCell(\n input_size=embedding_size,\n hidden_size=embedding_size)\n self.zero_state = None\n\n def forward(self, x):\n batch_size = x.size()[0]\n seq_len = x.size()[1]\n x = x.transpose(0, 1)\n x = self.embedding(x)\n type_constr = torch.cuda if x.is_cuda else torch\n state = (\n Variable(type_constr.FloatTensor(batch_size, self.embedding_size).fill_(0)),\n Variable(type_constr.FloatTensor(batch_size, self.embedding_size).fill_(0))\n )\n for s in range(seq_len):\n state = self.lstm(x[s], state)\n return state[0]\n\n\nclass CombinedNet(nn.Module):\n def __init__(self, num_sources=3, embedding_size=100):\n super().__init__()\n self.embedding_size = embedding_size\n self.h1 = nn.Linear(embedding_size * num_sources, embedding_size)\n\n def forward(self, x):\n x = self.h1(x)\n x = F.relu(x)\n return x\n\n\nclass TermPolicy(nn.Module):\n def __init__(self, embedding_size=100):\n super().__init__()\n self.h1 = nn.Linear(embedding_size, 1)\n\n def forward(self, thoughtvector, testing, eps=1e-8):\n logits = self.h1(thoughtvector)\n term_probs = F.sigmoid(logits)\n matches_argmax_count = 0\n\n res_greedy = (term_probs.data >= 0.5).view(-1, 1).float()\n\n log_g = None\n if not testing:\n a = torch.bernoulli(term_probs)\n g = a.detach() * term_probs + (1 - a.detach()) * (1 - term_probs)\n log_g = g.log()\n a = a.data\n else:\n a = res_greedy\n\n matches_greedy = res_greedy == a\n matches_greedy_count = matches_greedy.int().sum()\n term_probs = term_probs + eps\n entropy = - (term_probs * term_probs.log()).sum(1).sum()\n return term_probs, log_g, a.byte(), entropy, matches_greedy_count\n\n\nclass UtterancePolicy(nn.Module):\n def __init__(self, embedding_size=100, num_tokens=10, max_len=6):\n super().__init__()\n self.embedding_size = embedding_size\n self.num_tokens = num_tokens\n self.max_len = max_len\n self.embedding = nn.Embedding(num_tokens, embedding_size)\n self.lstm = nn.LSTMCell(\n input_size=embedding_size,\n hidden_size=embedding_size\n )\n self.h1 = nn.Linear(embedding_size, num_tokens)\n\n def forward(self, h_t, testing, eps=1e-8):\n batch_size = h_t.size()[0]\n\n type_constr = torch.cuda if h_t.is_cuda else torch\n h = h_t\n c = Variable(type_constr.FloatTensor(batch_size, self.embedding_size).fill_(0))\n\n matches_argmax_count = 0\n last_token = type_constr.LongTensor(batch_size).fill_(0)\n utterance_nodes = []\n type_constr = torch.cuda if h_t.is_cuda else torch\n utterance = type_constr.LongTensor(batch_size, self.max_len).fill_(0)\n entropy = 0\n matches_argmax_count = 0\n stochastic_draws_count = 0\n for i in range(self.max_len):\n embedded = self.embedding(Variable(last_token))\n h, c = self.lstm(embedded, (h, c))\n logits = self.h1(h)\n probs = F.softmax(logits)\n\n _, res_greedy = probs.data.max(1)\n res_greedy = res_greedy.view(-1, 1).long()\n\n log_g = None\n if not testing:\n a = torch.multinomial(probs)\n g = torch.gather(probs, 1, Variable(a.data))\n log_g = g.log()\n a = a.data\n else:\n a = res_greedy\n\n matches_argmax = res_greedy == a\n matches_argmax_count += matches_argmax.int().sum()\n stochastic_draws_count += batch_size\n\n if log_g is not None:\n utterance_nodes.append(log_g)\n last_token = a.view(batch_size)\n utterance[:, i] = last_token\n probs = probs + eps\n entropy -= (probs * probs.log()).sum(1).sum()\n return utterance_nodes, utterance, entropy, matches_argmax_count, stochastic_draws_count\n\n\nclass ProposalPolicy(nn.Module):\n def __init__(self, embedding_size=100, num_counts=6, num_items=3):\n super().__init__()\n self.num_counts = num_counts\n self.num_items = num_items\n self.embedding_size = embedding_size\n self.fcs = []\n for i in range(num_items):\n fc = nn.Linear(embedding_size, num_counts)\n self.fcs.append(fc)\n self.__setattr__('h1_%s' % i, fc)\n\n def forward(self, x, testing, eps=1e-8):\n batch_size = x.size()[0]\n nodes = []\n entropy = 0\n matches_argmax_count = 0\n type_constr = torch.cuda if x.is_cuda else torch\n matches_argmax_count = 0\n stochastic_draws = 0\n proposal = type_constr.LongTensor(batch_size, self.num_items).fill_(0)\n for i in range(self.num_items):\n logits = self.fcs[i](x)\n probs = F.softmax(logits)\n\n _, res_greedy = probs.data.max(1)\n res_greedy = res_greedy.view(-1, 1).long()\n\n log_g = None\n if not testing:\n a = torch.multinomial(probs)\n g = torch.gather(probs, 1, Variable(a.data))\n log_g = g.log()\n a = a.data\n else:\n a = res_greedy\n\n matches_argmax = res_greedy == a\n matches_argmax_count += matches_argmax.int().sum()\n stochastic_draws += batch_size\n\n if log_g is not None:\n nodes.append(log_g)\n probs = probs + eps\n entropy += (- probs * probs.log()).sum(1).sum()\n proposal[:, i] = a\n\n return nodes, proposal, entropy, matches_argmax_count, stochastic_draws\n\n\nclass AgentModel(nn.Module):\n def __init__(\n self, enable_comms, enable_proposal,\n term_entropy_reg,\n utterance_entropy_reg,\n proposal_entropy_reg,\n embedding_size=100):\n super().__init__()\n self.term_entropy_reg = term_entropy_reg\n self.utterance_entropy_reg = utterance_entropy_reg\n self.proposal_entropy_reg = proposal_entropy_reg\n self.embedding_size = embedding_size\n self.enable_comms = enable_comms\n self.enable_proposal = enable_proposal\n self.context_net = NumberSequenceEncoder(num_values=6)\n self.utterance_net = NumberSequenceEncoder(num_values=10)\n self.proposal_net = NumberSequenceEncoder(num_values=6)\n self.proposal_net.embedding = self.context_net.embedding\n\n self.combined_net = CombinedNet()\n\n self.term_policy = TermPolicy()\n self.utterance_policy = UtterancePolicy()\n self.proposal_policy = ProposalPolicy()\n\n def forward(self, pool, utility, m_prev, prev_proposal, testing):\n \"\"\"\n setting testing to True disables stochasticity: always picks the argmax\n cannot use this when training\n \"\"\"\n batch_size = pool.size()[0]\n context = torch.cat([pool, utility], 1)\n c_h = self.context_net(context)\n type_constr = torch.cuda if context.is_cuda else torch\n if self.enable_comms:\n m_h = self.utterance_net(m_prev)\n else:\n m_h = Variable(type_constr.FloatTensor(batch_size, self.embedding_size).fill_(0))\n p_h = self.proposal_net(prev_proposal)\n\n h_t = torch.cat([c_h, m_h, p_h], -1)\n h_t = self.combined_net(h_t)\n\n entropy_loss = 0\n nodes = []\n\n term_probs, term_node, term_a, entropy, term_matches_argmax_count = self.term_policy(h_t, testing=testing)\n nodes.append(term_node)\n entropy_loss -= entropy * self.term_entropy_reg\n\n utterance = None\n if self.enable_comms:\n utterance_nodes, utterance, utterance_entropy, utt_matches_argmax_count, utt_stochastic_draws = self.utterance_policy(\n h_t, testing=testing)\n nodes += utterance_nodes\n entropy_loss -= self.utterance_entropy_reg * utterance_entropy\n else:\n utt_matches_argmax_count = 0\n utt_stochastic_draws = 0\n utterance = type_constr.LongTensor(batch_size, 6).zero_() # hard-coding 6 here is a bit hacky...\n\n proposal_nodes, proposal, proposal_entropy, prop_matches_argmax_count, prop_stochastic_draws = self.proposal_policy(\n h_t, testing=testing)\n nodes += proposal_nodes\n entropy_loss -= self.proposal_entropy_reg * proposal_entropy\n\n return nodes, term_a, utterance, proposal, entropy_loss, \\\n term_matches_argmax_count, utt_matches_argmax_count, utt_stochastic_draws, prop_matches_argmax_count, prop_stochastic_draws\n"
] |
[
[
"torch.nn.functional.softmax",
"torch.cat",
"torch.nn.Embedding",
"torch.multinomial",
"torch.nn.LSTMCell",
"torch.nn.Linear",
"torch.nn.functional.sigmoid",
"torch.nn.functional.relu",
"torch.bernoulli",
"torch.autograd.Variable"
]
] |
varunsingh251/Indian-Sign-Language-Recognition
|
[
"40604e5c0a2a9b6310e26c0fa31b4f4f1c30de45"
] |
[
"Bag of Features/preprocessing_surf.py"
] |
[
"import numpy as np\nimport cv2\nimport os\nimport csv\nimport sklearn.metrics as sm\nfrom surf_image_processing import func,func2\nfrom sklearn.cluster import MiniBatchKMeans\nfrom sklearn.svm import SVC\nfrom sklearn.grid_search import GridSearchCV\nimport random\nimport warnings\nimport pickle\nfrom sklearn.naive_bayes import GaussianNB as nb\nfrom sklearn.neighbors import KNeighborsClassifier as knn\nfrom sklearn.linear_model import LogisticRegression as lr\nfrom sklearn.neural_network import MLPClassifier as mlp\nimport numpy as np\nimport sklearn.metrics as sm\n\n#initialise\npath=\"train\"\nlabel=0\nimg_descs=[]\ny=[]\n\n#utility functions\ndef perform_data_split(X, y, training_idxs, test_idxs, val_idxs):\n \"\"\"\n Split X and y into train/test/val sets\n Parameters:\n -----------\n X : eg, use img_bow_hist\n y : corresponding labels for X\n training_idxs : list/array of integers used as indicies for training rows\n test_idxs : same\n val_idxs : same\n Returns:\n --------\n X_train, X_test, X_val, y_train, y_test, y_val\n \"\"\"\n X_train = X[training_idxs]\n X_test = X[test_idxs]\n X_val = X[val_idxs]\n\n y_train = y[training_idxs]\n y_test = y[test_idxs]\n y_val = y[val_idxs]\n\n return X_train, X_test, X_val, y_train, y_test, y_val\n\ndef train_test_val_split_idxs(total_rows, percent_test, percent_val):\n \"\"\"\n Get indexes for training, test, and validation rows, given a total number of rows.\n Assumes indexes are sequential integers starting at 0: eg [0,1,2,3,...N]\n Returns:\n --------\n training_idxs, test_idxs, val_idxs\n Both lists of integers\n \"\"\"\n if percent_test + percent_val >= 1.0:\n raise ValueError('percent_test and percent_val must sum to less than 1.0')\n\n row_range = range(total_rows)\n\n no_test_rows = int(total_rows*(percent_test))\n test_idxs = np.random.choice(row_range, size=no_test_rows, replace=False)\n # remove test indexes\n row_range = [idx for idx in row_range if idx not in test_idxs]\n\n no_val_rows = int(total_rows*(percent_val))\n val_idxs = np.random.choice(row_range, size=no_val_rows, replace=False)\n # remove validation indexes\n training_idxs = [idx for idx in row_range if idx not in val_idxs]\n\n print('Train-test-val split: %i training rows, %i test rows, %i validation rows' % (len(training_idxs), len(test_idxs), len(val_idxs)))\n\n return training_idxs, test_idxs, val_idxs\n\ndef cluster_features(img_descs, training_idxs, cluster_model):\n \"\"\"\n Cluster the training features using the cluster_model\n and convert each set of descriptors in img_descs\n to a Visual Bag of Words histogram.\n Parameters:\n -----------\n X : list of lists of SIFT descriptors (img_descs)\n training_idxs : array/list of integers\n Indicies for the training rows in img_descs\n cluster_model : clustering model (eg KMeans from scikit-learn)\n The model used to cluster the SIFT features\n Returns:\n --------\n X, cluster_model :\n X has K feature columns, each column corresponding to a visual word\n cluster_model has been fit to the training set\n \"\"\"\n n_clusters = cluster_model.n_clusters\n\n # # Generate the SIFT descriptor features\n # img_descs = gen_sift_features(labeled_img_paths)\n #\n # # Generate indexes of training rows\n # total_rows = len(img_descs)\n # training_idxs, test_idxs, val_idxs = train_test_val_split_idxs(total_rows, percent_test, percent_val)\n\n # Concatenate all descriptors in the training set together\n training_descs = [img_descs[i] for i in training_idxs]\n all_train_descriptors = [desc for desc_list in training_descs for desc in desc_list]\n all_train_descriptors = np.array(all_train_descriptors)\n\n\n print ('%i descriptors before clustering' % all_train_descriptors.shape[0])\n\n # Cluster descriptors to get codebook\n print ('Using clustering model %s...' % repr(cluster_model))\n print ('Clustering on training set to get codebook of %i words' % n_clusters)\n\n # train kmeans or other cluster model on those descriptors selected above\n cluster_model.fit(all_train_descriptors)\n print ('done clustering. Using clustering model to generate BoW histograms for each image.')\n\n # compute set of cluster-reduced words for each image\n img_clustered_words = [cluster_model.predict(raw_words) for raw_words in img_descs]\n\n # finally make a histogram of clustered word counts for each image. These are the final features.\n img_bow_hist = np.array(\n [np.bincount(clustered_words, minlength=n_clusters) for clustered_words in img_clustered_words])\n\n X = img_bow_hist\n print ('done generating BoW histograms.')\n\n return X, cluster_model\n\ndef calc_accuracy(method,label_test,pred):\n print(\"accuracy score for \",method,sm.accuracy_score(label_test,pred))\n print(\"precision_score for \",method,sm.precision_score(label_test,pred,average='micro'))\n print(\"f1 score for \",method,sm.f1_score(label_test,pred,average='micro'))\n print(\"recall score for \",method,sm.recall_score(label_test,pred,average='micro'))\n\ndef predict_svm(X_train, X_test, y_train, y_test):\n svc=SVC(kernel='linear') \n print(\"svm started\")\n svc.fit(X_train,y_train)\n y_pred=svc.predict(X_test)\n calc_accuracy(\"SVM\",y_test,y_pred)\n np.savetxt('submission_surf_svm.csv', np.c_[range(1,len(y_test)+1),y_pred,y_test], delimiter=',', header = 'ImageId,Label,TrueLabel', comments = '', fmt='%d')\n \n\ndef predict_lr(X_train, X_test, y_train, y_test):\n clf = lr()\n print(\"lr started\")\n clf.fit(X_train,y_train)\n y_pred=clf.predict(X_test)\n calc_accuracy(\"Logistic regression\",y_test,y_pred)\n np.savetxt('submission_surf_lr.csv', np.c_[range(1,len(y_test)+1),y_pred,y_test], delimiter=',', header = 'ImageId,Label,TrueLabel', comments = '', fmt='%d')\n \n\n\ndef predict_nb(X_train, X_test, y_train, y_test):\n clf = nb()\n print(\"nb started\")\n clf.fit(X_train,y_train)\n y_pred=clf.predict(X_test)\n calc_accuracy(\"Naive Bayes\",y_test,y_pred)\n np.savetxt('submission_surf_nb.csv', np.c_[range(1,len(y_test)+1),y_pred,y_test], delimiter=',', header = 'ImageId,Label,TrueLabel', comments = '', fmt='%d')\n \n\n\ndef predict_knn(X_train, X_test, y_train, y_test):\n clf=knn(n_neighbors=3)\n print(\"knn started\")\n clf.fit(X_train,y_train)\n y_pred=clf.predict(X_test)\n calc_accuracy(\"K nearest neighbours\",y_test,y_pred)\n np.savetxt('submission_surf_knn.csv', np.c_[range(1,len(y_test)+1),y_pred,y_test], delimiter=',', header = 'ImageId,Label,TrueLabel', comments = '', fmt='%d')\n \n\ndef predict_mlp(X_train, X_test, y_train, y_test):\n clf=mlp()\n print(\"mlp started\")\n clf.fit(X_train,y_train)\n y_pred=clf.predict(X_test)\n calc_accuracy(\"MLP classifier\",y_test,y_pred)\n\n#creating desc for each file with label\nfor (dirpath,dirnames,filenames) in os.walk(path):\n for dirname in dirnames:\n print(dirname)\n for(direcpath,direcnames,files) in os.walk(path+\"\\\\\"+dirname):\n for file in files:\n actual_path=path+\"\\\\\\\\\"+dirname+\"\\\\\\\\\"+file\n print(actual_path)\n des=func(actual_path)\n img_descs.append(des)\n y.append(label)\n label=label+1\n\n#finding indexes of test train and validate\ny=np.array(y)\ntraining_idxs, test_idxs, val_idxs = train_test_val_split_idxs(len(img_descs), 0.4, 0.0)\n\n#creating histogram using kmeans minibatch cluster model\nX, cluster_model = cluster_features(img_descs, training_idxs, MiniBatchKMeans(n_clusters=150))\n\n#splitting data into test, train, validate using the indexes\nX_train, X_test, X_val, y_train, y_test, y_val = perform_data_split(X, y, training_idxs, test_idxs, val_idxs)\n\n\n#using classification methods\npredict_knn(X_train, X_test,y_train, y_test)\n#predict_mlp(X_train, X_test,y_train, y_test)\npredict_svm(X_train, X_test,y_train, y_test)\n\npredict_lr(X_train, X_test,y_train, y_test)\npredict_nb(X_train, X_test,y_train, y_test)\n\n\n"
] |
[
[
"sklearn.neural_network.MLPClassifier",
"sklearn.naive_bayes.GaussianNB",
"sklearn.linear_model.LogisticRegression",
"numpy.random.choice",
"sklearn.metrics.precision_score",
"sklearn.neighbors.KNeighborsClassifier",
"numpy.bincount",
"sklearn.svm.SVC",
"sklearn.metrics.f1_score",
"sklearn.cluster.MiniBatchKMeans",
"numpy.array",
"sklearn.metrics.recall_score",
"sklearn.metrics.accuracy_score"
]
] |
davidastephens/zipline
|
[
"1da206df936bb8125913bae9fc182fd4f611a691"
] |
[
"zipline/algorithm.py"
] |
[
"#\n# Copyright 2013 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom copy import copy\n\nimport pytz\nimport pandas as pd\nimport numpy as np\n\nfrom datetime import datetime\n\nfrom itertools import groupby, ifilter\nfrom operator import attrgetter\n\nfrom zipline.errors import (\n UnsupportedSlippageModel,\n OverrideSlippagePostInit,\n UnsupportedCommissionModel,\n OverrideCommissionPostInit\n)\nfrom zipline.finance.performance import PerformanceTracker\nfrom zipline.sources import DataFrameSource, DataPanelSource\nfrom zipline.utils.factory import create_simulation_parameters\nfrom zipline.transforms.utils import StatefulTransform\nfrom zipline.finance.slippage import (\n VolumeShareSlippage,\n SlippageModel,\n transact_partial\n)\nfrom zipline.finance.commission import PerShare, PerTrade, PerDollar\nfrom zipline.finance.blotter import Blotter\nfrom zipline.finance.constants import ANNUALIZER\nfrom zipline.finance import trading\nimport zipline.protocol\nfrom zipline.protocol import Event\n\nfrom zipline.gens.composites import (\n date_sorted_sources,\n sequential_transforms,\n alias_dt\n)\nfrom zipline.gens.tradesimulation import AlgorithmSimulator\n\nDEFAULT_CAPITAL_BASE = float(\"1.0e5\")\n\n\nclass TradingAlgorithm(object):\n \"\"\"\n Base class for trading algorithms. Inherit and overload\n initialize() and handle_data(data).\n\n A new algorithm could look like this:\n ```\n class MyAlgo(TradingAlgorithm):\n def initialize(self, sids, amount):\n self.sids = sids\n self.amount = amount\n\n def handle_data(self, data):\n sid = self.sids[0]\n amount = self.amount\n self.order(sid, amount)\n ```\n To then to run this algorithm:\n\n my_algo = MyAlgo([0], 100) # first argument has to be list of sids\n stats = my_algo.run(data)\n\n \"\"\"\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize sids and other state variables.\n\n :Arguments:\n data_frequency : str (daily, hourly or minutely)\n The duration of the bars.\n annualizer : int <optional>\n Which constant to use for annualizing risk metrics.\n If not provided, will extract from data_frequency.\n capital_base : float <default: 1.0e5>\n How much capital to start with.\n instant_fill : bool <default: False>\n Whether to fill orders immediately or on next bar.\n \"\"\"\n self.datetime = None\n\n self.registered_transforms = {}\n self.transforms = []\n self.sources = []\n\n self._recorded_vars = {}\n\n self.logger = None\n\n self.benchmark_return_source = None\n self.perf_tracker = None\n\n # default components for transact\n self.slippage = VolumeShareSlippage()\n self.commission = PerShare()\n\n if 'data_frequency' in kwargs:\n self.set_data_frequency(kwargs.pop('data_frequency'))\n else:\n self.data_frequency = None\n\n self.instant_fill = kwargs.pop('instant_fill', False)\n\n # Override annualizer if set\n if 'annualizer' in kwargs:\n self.annualizer = kwargs['annualizer']\n\n # set the capital base\n self.capital_base = kwargs.pop('capital_base', DEFAULT_CAPITAL_BASE)\n\n self.sim_params = kwargs.pop('sim_params', None)\n if self.sim_params:\n self.sim_params.data_frequency = self.data_frequency\n self.perf_tracker = PerformanceTracker(self.sim_params)\n\n self.blotter = kwargs.pop('blotter', None)\n if not self.blotter:\n self.blotter = Blotter()\n\n self.portfolio_needs_update = True\n self._portfolio = None\n\n # an algorithm subclass needs to set initialized to True when\n # it is fully initialized.\n self.initialized = False\n\n # call to user-defined constructor method\n self.initialize(*args, **kwargs)\n\n def __repr__(self):\n \"\"\"\n N.B. this does not yet represent a string that can be used\n to instantiate an exact copy of an algorithm.\n\n However, it is getting close, and provides some value as something\n that can be inspected interactively.\n \"\"\"\n return \"\"\"\n{class_name}(\n capital_base={capital_base}\n sim_params={sim_params},\n initialized={initialized},\n slippage={slippage},\n commission={commission},\n blotter={blotter},\n recorded_vars={recorded_vars})\n\"\"\".strip().format(class_name=self.__class__.__name__,\n capital_base=self.capital_base,\n sim_params=repr(self.sim_params),\n initialized=self.initialized,\n slippage=repr(self.slippage),\n commission=repr(self.commission),\n blotter=repr(self.blotter),\n recorded_vars=repr(self.recorded_vars))\n\n def _create_data_generator(self, source_filter, sim_params):\n \"\"\"\n Create a merged data generator using the sources and\n transforms attached to this algorithm.\n\n ::source_filter:: is a method that receives events in date\n sorted order, and returns True for those events that should be\n processed by the zipline, and False for those that should be\n skipped.\n \"\"\"\n if self.benchmark_return_source is None:\n benchmark_return_source = [\n Event({'dt': dt,\n 'returns': ret,\n 'type': zipline.protocol.DATASOURCE_TYPE.BENCHMARK,\n 'source_id': 'benchmarks'})\n for dt, ret in trading.environment.benchmark_returns.iterkv()\n if dt.date() >= sim_params.period_start.date()\n and dt.date() <= sim_params.period_end.date()\n ]\n else:\n benchmark_return_source = self.benchmark_return_source\n\n date_sorted = date_sorted_sources(*self.sources)\n\n if source_filter:\n date_sorted = ifilter(source_filter, date_sorted)\n\n with_tnfms = sequential_transforms(date_sorted,\n *self.transforms)\n with_alias_dt = alias_dt(with_tnfms)\n\n with_benchmarks = date_sorted_sources(benchmark_return_source,\n with_alias_dt)\n\n # Group together events with the same dt field. This depends on the\n # events already being sorted.\n return groupby(with_benchmarks, attrgetter('dt'))\n\n def _create_generator(self, sim_params, source_filter=None):\n \"\"\"\n Create a basic generator setup using the sources and\n transforms attached to this algorithm.\n\n ::source_filter:: is a method that receives events in date\n sorted order, and returns True for those events that should be\n processed by the zipline, and False for those that should be\n skipped.\n \"\"\"\n sim_params.data_frequency = self.data_frequency\n\n # perf_tracker will be instantiated in __init__ if a sim_params\n # is passed to the constructor. If not, we instantiate here.\n if self.perf_tracker is None:\n self.perf_tracker = PerformanceTracker(sim_params)\n\n self.data_gen = self._create_data_generator(source_filter,\n sim_params)\n\n self.trading_client = AlgorithmSimulator(self, sim_params)\n\n transact_method = transact_partial(self.slippage, self.commission)\n self.set_transact(transact_method)\n\n return self.trading_client.transform(self.data_gen)\n\n def get_generator(self):\n \"\"\"\n Override this method to add new logic to the construction\n of the generator. Overrides can use the _create_generator\n method to get a standard construction generator.\n \"\"\"\n return self._create_generator(self.sim_params)\n\n def initialize(self, *args, **kwargs):\n pass\n\n # TODO: make a new subclass, e.g. BatchAlgorithm, and move\n # the run method to the subclass, and refactor to put the\n # generator creation logic into get_generator.\n def run(self, source, sim_params=None, benchmark_return_source=None):\n \"\"\"Run the algorithm.\n\n :Arguments:\n source : can be either:\n - pandas.DataFrame\n - zipline source\n - list of zipline sources\n\n If pandas.DataFrame is provided, it must have the\n following structure:\n * column names must consist of ints representing the\n different sids\n * index must be DatetimeIndex\n * array contents should be price info.\n\n :Returns:\n daily_stats : pandas.DataFrame\n Daily performance metrics such as returns, alpha etc.\n\n \"\"\"\n if isinstance(source, (list, tuple)):\n assert self.sim_params is not None or sim_params is not None, \\\n \"\"\"When providing a list of sources, \\\n sim_params have to be specified as a parameter\n or in the constructor.\"\"\"\n elif isinstance(source, pd.DataFrame):\n # if DataFrame provided, wrap in DataFrameSource\n source = DataFrameSource(source)\n elif isinstance(source, pd.Panel):\n source = DataPanelSource(source)\n\n if not isinstance(source, (list, tuple)):\n self.sources = [source]\n else:\n self.sources = source\n\n # Check for override of sim_params.\n # If it isn't passed to this function,\n # use the default params set with the algorithm.\n # Else, we create simulation parameters using the start and end of the\n # source provided.\n if not sim_params:\n if not self.sim_params:\n start = source.start\n end = source.end\n\n sim_params = create_simulation_parameters(\n start=start,\n end=end,\n capital_base=self.capital_base\n )\n else:\n sim_params = self.sim_params\n\n # Create transforms by wrapping them into StatefulTransforms\n self.transforms = []\n for namestring, trans_descr in self.registered_transforms.iteritems():\n sf = StatefulTransform(\n trans_descr['class'],\n *trans_descr['args'],\n **trans_descr['kwargs']\n )\n sf.namestring = namestring\n\n self.transforms.append(sf)\n\n # force a reset of the performance tracker, in case\n # this is a repeat run of the algorithm.\n self.perf_tracker = None\n\n # create transforms and zipline\n self.gen = self._create_generator(sim_params)\n\n # loop through simulated_trading, each iteration returns a\n # perf dictionary\n perfs = []\n for perf in self.gen:\n perfs.append(perf)\n\n # convert perf dict to pandas dataframe\n daily_stats = self._create_daily_stats(perfs)\n\n return daily_stats\n\n def _create_daily_stats(self, perfs):\n # create daily and cumulative stats dataframe\n daily_perfs = []\n # TODO: the loop here could overwrite expected properties\n # of daily_perf. Could potentially raise or log a\n # warning.\n for perf in perfs:\n if 'daily_perf' in perf:\n\n perf['daily_perf'].update(\n perf['daily_perf'].pop('recorded_vars')\n )\n daily_perfs.append(perf['daily_perf'])\n else:\n self.risk_report = perf\n\n daily_dts = [np.datetime64(perf['period_close'], utc=True)\n for perf in daily_perfs]\n daily_stats = pd.DataFrame(daily_perfs, index=daily_dts)\n\n return daily_stats\n\n def add_transform(self, transform_class, tag, *args, **kwargs):\n \"\"\"Add a single-sid, sequential transform to the model.\n\n :Arguments:\n transform_class : class\n Which transform to use. E.g. mavg.\n tag : str\n How to name the transform. Can later be access via:\n data[sid].tag()\n\n Extra args and kwargs will be forwarded to the transform\n instantiation.\n\n \"\"\"\n self.registered_transforms[tag] = {'class': transform_class,\n 'args': args,\n 'kwargs': kwargs}\n\n def record(self, **kwargs):\n \"\"\"\n Track and record local variable (i.e. attributes) each day.\n \"\"\"\n for name, value in kwargs.items():\n self._recorded_vars[name] = value\n\n def order(self, sid, amount, limit_price=None, stop_price=None):\n return self.blotter.order(sid, amount, limit_price, stop_price)\n\n def order_value(self, sid, value, limit_price=None, stop_price=None):\n \"\"\"\n Place an order by desired value rather than desired number of shares.\n If the requested sid is found in the universe, the requested value is\n divided by its price to imply the number of shares to transact.\n\n value > 0 :: Buy/Cover\n value < 0 :: Sell/Short\n Market order: order(sid, value)\n Limit order: order(sid, value, limit_price)\n Stop order: order(sid, value, None, stop_price)\n StopLimit order: order(sid, value, limit_price, stop_price)\n \"\"\"\n last_price = self.trading_client.current_data[sid].price\n if np.allclose(last_price, 0):\n zero_message = \"Price of 0 for {psid}; can't infer value\".format(\n psid=sid\n )\n self.logger.debug(zero_message)\n # Don't place any order\n return\n else:\n amount = value / last_price\n return self.order(sid, amount, limit_price, stop_price)\n\n @property\n def recorded_vars(self):\n return copy(self._recorded_vars)\n\n @property\n def portfolio(self):\n # internally this will cause a refresh of the\n # period performance calculations.\n return self.perf_tracker.get_portfolio()\n\n def updated_portfolio(self):\n # internally this will cause a refresh of the\n # period performance calculations.\n if self.portfolio_needs_update:\n self._portfolio = self.perf_tracker.get_portfolio()\n self.portfolio_needs_update = False\n return self._portfolio\n\n def set_logger(self, logger):\n self.logger = logger\n\n def set_datetime(self, dt):\n assert isinstance(dt, datetime), \\\n \"Attempt to set algorithm's current time with non-datetime\"\n assert dt.tzinfo == pytz.utc, \\\n \"Algorithm expects a utc datetime\"\n self.datetime = dt\n\n def get_datetime(self):\n \"\"\"\n Returns a copy of the datetime.\n \"\"\"\n date_copy = copy(self.datetime)\n assert date_copy.tzinfo == pytz.utc, \\\n \"Algorithm should have a utc datetime\"\n return date_copy\n\n def set_transact(self, transact):\n \"\"\"\n Set the method that will be called to create a\n transaction from open orders and trade events.\n \"\"\"\n self.blotter.transact = transact\n\n def set_slippage(self, slippage):\n if not isinstance(slippage, SlippageModel):\n raise UnsupportedSlippageModel()\n if self.initialized:\n raise OverrideSlippagePostInit()\n self.slippage = slippage\n\n def set_commission(self, commission):\n if not isinstance(commission, (PerShare, PerTrade, PerDollar)):\n raise UnsupportedCommissionModel()\n\n if self.initialized:\n raise OverrideCommissionPostInit()\n self.commission = commission\n\n def set_sources(self, sources):\n assert isinstance(sources, list)\n self.sources = sources\n\n def set_transforms(self, transforms):\n assert isinstance(transforms, list)\n self.transforms = transforms\n\n def set_data_frequency(self, data_frequency):\n assert data_frequency in ('daily', 'minute')\n self.data_frequency = data_frequency\n self.annualizer = ANNUALIZER[self.data_frequency]\n\n def order_percent(self, sid, percent, limit_price=None, stop_price=None):\n \"\"\"\n Place an order in the specified security corresponding to the given\n percent of the current portfolio value.\n\n Note that percent must expressed as a decimal (0.50 means 50\\%).\n \"\"\"\n value = self.portfolio.portfolio_value * percent\n return self.order_value(sid, value, limit_price, stop_price)\n\n def order_target(self, sid, target, limit_price=None, stop_price=None):\n \"\"\"\n Place an order to adjust a position to a target number of shares. If\n the position doesn't already exist, this is equivalent to placing a new\n order. If the position does exist, this is equivalent to placing an\n order for the difference between the target number of shares and the\n current number of shares.\n \"\"\"\n if sid in self.portfolio.positions:\n current_position = self.portfolio.positions[sid].amount\n req_shares = target - current_position\n return self.order(sid, req_shares, limit_price, stop_price)\n else:\n return self.order(sid, target, limit_price, stop_price)\n\n def order_target_value(self, sid, target, limit_price=None,\n stop_price=None):\n \"\"\"\n Place an order to adjust a position to a target value. If\n the position doesn't already exist, this is equivalent to placing a new\n order. If the position does exist, this is equivalent to placing an\n order for the difference between the target value and the\n current value.\n \"\"\"\n if sid in self.portfolio.positions:\n current_position = self.portfolio.positions[sid].amount\n current_price = self.portfolio.positions[sid].last_sale_price\n current_value = current_position * current_price\n req_value = target - current_value\n return self.order_value(sid, req_value, limit_price, stop_price)\n else:\n return self.order_value(sid, target, limit_price, stop_price)\n\n def order_target_percent(self, sid, target, limit_price=None,\n stop_price=None):\n \"\"\"\n Place an order to adjust a position to a target percent of the\n current portfolio value. If the position doesn't already exist, this is\n equivalent to placing a new order. If the position does exist, this is\n equivalent to placing an order for the difference between the target\n percent and the current percent.\n\n Note that target must expressed as a decimal (0.50 means 50\\%).\n \"\"\"\n if sid in self.portfolio.positions:\n current_position = self.portfolio.positions[sid].amount\n current_price = self.portfolio.positions[sid].last_sale_price\n current_value = current_position * current_price\n else:\n current_value = 0\n target_value = self.portfolio.portfolio_value * target\n\n req_value = target_value - current_value\n return self.order_value(sid, req_value, limit_price, stop_price)\n"
] |
[
[
"numpy.allclose",
"pandas.DataFrame",
"numpy.datetime64"
]
] |
redhood-97/sms.ai
|
[
"8b787e3f678003826e36e233d966c7108989354c"
] |
[
"spam.py"
] |
[
"\nimport os, csv, re, nltk\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.metrics import confusion_matrix\n\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem.porter import PorterStemmer\nfrom nltk.stem.wordnet import WordNetLemmatizer\n\nimport snowballstemmer\n\n\nclass spam():\n\n\n def __init__(self):\n pass\n\n\n def data_input(self, loc, filename):\n try:\n os.chdir(loc)\n f = open(filename, 'r')\n file = csv.reader(f, delimiter = ',')\n\n df = pd.DataFrame(np.array(list(file)))\n df.columns = df.iloc[0]\n df = df[1:]\n\n le = preprocessing.LabelEncoder()\n le.fit(df['v1'])\n df['v1'] = le.transform(df['v1'])\n print (df.shape)\n\n self.df = df\n\n except IOError:\n print ('PROBLEM READING: ' + filename)\n\n\n def data_cleaning(self):\n stop = set(stopwords.words('english'))\n lmtzr = WordNetLemmatizer()\n stemmer = snowballstemmer.stemmer('english')\n c = np.array(self.df.v2)\n self.corpus = []\n for i in range(len(self.df.v2)):\n review = re.sub('[^a-zA-Z]', ' ', c[i])\n review = [i for i in review.lower().split() if i not in stop]\n l = [lmtzr.lemmatize(x) for x in review]\n s = stemmer.stemWords(l)\n review = ' '.join(s)\n self.corpus.append(review)\n print (self.corpus) \n\n \n def vectorizer(self):\n cv = CountVectorizer()\n self.X = cv.fit_transform(self.coupus).toarray()\n self.y = self.df['v1']\n\n\n def data_split(self):\n self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y, test_size = 0.20)\n\n\n def classifier(self):\n classifier = BernoulliNB()\n classifier.fit(self.X_train, self.y_train)\n\n\n\nif __name__ == '__main__':\n\n loc = os.getcwd() + '\\data'\n filename = 'spam.csv'\n s = spam()\n s.data_input(loc, filename)\n s.data_cleaning()\n s.vectorizer()\n s.data_split()\n s.classifier()\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
] |
[
[
"sklearn.model_selection.train_test_split",
"sklearn.feature_extraction.text.CountVectorizer",
"sklearn.naive_bayes.BernoulliNB",
"numpy.array",
"sklearn.preprocessing.LabelEncoder"
]
] |
DrFargo/Starship-Simulation
|
[
"ff1c30cd8227c7041357a1e2da2fcb34ab06a757"
] |
[
"src/base.py"
] |
[
"## Author DrFargo\r\n## Created: 2021-02-07\r\n## Latest update: 2021-02-12\r\n\r\nimport matplotlib\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport numpy as np\r\nimport scipy.linalg as la\r\nimport matplotlib.pyplot as plt\r\n\r\nclass starshipSimulation:\r\n def parameters(self, g, lox, engines):\r\n gravity = g\r\n tlox = lox\r\n rapteng = engines*2.3\r\n m_fuel = 1.8\r\n m_ox = 2.2\r\n \r\n #def DragForce(self, v):\r\n \r\n\r\n def Render(self, filename):\r\n fig = plt.figure()\r\n ax = fig.gca(projection='3d') \r\n ax = self.XYZLabels(ax, 12000) \r\n plt.savefig(filename + \".png\")\r\n plt.show()\r\n \r\n def explode(self,t):\r\n ax.text(0, 0, 0, \"red\", color='red')\r\n return y[1]\r\n \r\n def XYZLabels(self, ax, Limit):\r\n TopAlt = np.max(Limit)\r\n Lim = TopAlt*1.1\r\n ax.set_zlim3d([0,2*Lim])\r\n ax.set_xlim3d([-Lim,Lim])\r\n ax.set_ylim3d([-Lim,Lim])\r\n ax.set_xlabel(\"Eastings\")\r\n ax.set_ylabel(\"Northings\")\r\n ax.set_zlabel(\"Altitude\")"
] |
[
[
"numpy.max",
"matplotlib.pyplot.show",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.figure"
]
] |
roshan19041/Causal-Discovery
|
[
"900cfc94d9fc3ff3d75366b00bda3acd044ed638",
"900cfc94d9fc3ff3d75366b00bda3acd044ed638"
] |
[
"src/Loss.py",
"src/OrientationModels.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 23 18:59:20 2019\n\n@author: roshanprakash\n\"\"\"\nimport tensorflow as tf\n\ndef compute_loss(generated_data, observed_data):\n \"\"\"\n Computes the Maximum Mean Discrepancy between generated data and observational data.\n \n PARAMETERS\n ----------\n - generated_data (numpy array) : the generated data of shape (N, D)\n - observed_data (numpy array) : the corresponding ground truth data of shape (N, D)\n \n RETURNS\n -------\n - the MMD loss.\n \n REFERENCE\n ---------\n [1.] Training generative neural networks via Maximum Mean Discrepancy optimization\n [2.] Link : https://arxiv.org/pdf/1505.03906.pdf\n \"\"\"\n N = tf.cast(tf.shape(observed_data)[0], dtype=tf.float32)\n GAMMA = tf.constant(0.01, dtype=tf.float32, name='gamma')\n MULTIPLIERS = tf.concat([tf.ones([N, 1])/N, tf.ones([N, 1])/-N], axis=0)\n X = tf.concat(values=[generated_data, observed_data], axis=0)\n DOTS = tf.matmul(X, tf.transpose(X))\n SQUARE_SUMS = tf.transpose(tf.reduce_sum(tf.square(X), axis=1, keepdims=True))\n EXPONENT_TERMS = tf.add_n([tf.scalar_mul(-2, DOTS), tf.broadcast_to(SQUARE_SUMS, tf.shape(DOTS)), \\\n tf.broadcast_to(tf.transpose(SQUARE_SUMS), tf.shape(DOTS))])\n MMDLoss = tf.reduce_sum(tf.multiply(MULTIPLIERS, tf.exp(tf.scalar_mul(-GAMMA, EXPONENT_TERMS))))\n return MMDLoss",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 23 11:41:26 2019\n\n@author: roshanprakash\n\"\"\"\nimport time\nimport numpy as np\nimport tensorflow as tf\nfrom scipy.stats import ttest_ind\nfrom sklearn.tree import DecisionTreeRegressor as DT\nfrom sklearn.preprocessing import scale\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import ShuffleSplit\nfrom sklearn.metrics import mean_squared_error as mse\nfrom joblib import Parallel, delayed\nfrom CausalGraphicalModel import Component\nfrom Loss import *\nimport multiprocessing\n \nclass BasicGNN(Component):\n \n \"\"\" A Basic function approximator ; Y = f(X,noise) \"\"\"\n \n def __init__(self, batch_size=256, lr=0.001, num_hidden_layers=1, nh_per_layer=[128], training_epochs=1000, test_epochs=100):\n \"\"\"\n Initializes the basic GNN.\n \n PARAMETERS\n ----------\n - batch_size (int, default=256) : the size of mini-batches used while training the network\n - lr (float, default=0.001) : the learning rate for this basic Generative Neural Network\n - num_hidden_layers (int, default=1) : the number of hidden layers to be used in the network\n - nh_per_layer (list, default=[128]) : the number of hidden units in each layer of the component networks \n (Requires : <num_hidden_layers>=len(<nh_per_layer>))\n - training_epochs (int, default=1000) : the number of training epochs\n - test_epochs (int, default=100) : the number of passes of data into the trained model to compute the score for a causal direction\n \n RETURNS\n -------\n - None\n \"\"\"\n #tf.reset_default_graph()\n DIMENSIONS = [1, num_hidden_layers, nh_per_layer, 1]\n super(BasicGNN, self).__init__(DIMENSIONS)\n self.batch_size = batch_size\n self.learning_rate = lr\n self.optimizer = tf.train.AdamOptimizer(learning_rate=lr)\n self.training_epochs = training_epochs\n self.test_epochs = test_epochs\n self.X = tf.placeholder(dtype=tf.float32, shape=[None, 1])\n self.noise_inputs = tf.placeholder(dtype=tf.float32, shape=[None, 1])\n self.observed_data = tf.placeholder(dtype=tf.float32, shape=[None, 1])\n self.model_input = tf.concat([self.X, self.noise_inputs], axis=1)\n self.generated_data = self.forward_pass(self.model_input)\n self.loss = compute_loss(self.generated_data, self.observed_data)\n self.train_step = self.optimizer.minimize(self.loss)\n \n def run(self, input_data, ground_truth_data, sess, is_training=False):\n \"\"\" \n Computes a forward pass through the network ; (equivalent to generating a new data instance from noise.)\n \n PARAMETERS\n ----------\n - input_data (numpy array) : the input data, shape (N, 1)\n - ground_truth_data (numpy array) : the ground truth data, shape (N, 1)\n - sess : a tensorflow session\n - is_training (bool, default=False) : if True, updates weights if necessary\n \n RETURNS\n -------\n - a numpy array of generated data and the MMD loss.\n \"\"\"\n noise = np.random.normal(3, 1, ground_truth_data.shape[0])\n noise = noise[:, np.newaxis]\n if is_training:\n generated_data, loss, _ = sess.run([self.generated_data, self.loss, self.train_step], feed_dict={ \\\n self.X:input_data, self.observed_data:ground_truth_data, self.noise_inputs:noise})\n else:\n generated_data, loss = sess.run([self.generated_data, self.loss], feed_dict={self.X:input_data, \\\n self.observed_data:ground_truth_data, self.noise_inputs:noise})\n return generated_data, loss\n \n def _sample_batches(self, data):\n \"\"\" \n Samples batches of training data for an epoch.\n \n PARAMETERS\n ----------\n - data (numpy array) : the input data of shape (N, 2)\n \n RETURNS\n -------\n - a list containing batches wherein each batch in a numpy array of shape [<batch_size>, d] where d is the dimensions of the data.\n \"\"\"\n shuffled = np.random.permutation(data)\n return [shuffled[k*self.batch_size:k*self.batch_size+self.batch_size] for k in range(data.shape[0]//self.batch_size)]\n \n def compute_score(self, device_manager, data, direction='XY'):\n \"\"\"\n Trains the Causal Generative Network, evaluates it and saves the trained model.\n \n PARAMETERS\n ----------\n - data (numpy array) : the input data of shape (N, 2)\n \n RETURNS\n -------\n - the mean test loss after training the model.\n \"\"\"\n if direction=='XY':\n x_idx = 0\n y_idx = 1\n elif direction=='YX':\n x_idx = 1\n y_idx = 0\n else:\n raise ValueError('Invalid value for argument `direction`!')\n \n if device_manager.GPUs>0:\n device_count = {'GPU':device_manager.GPUs}\n else:\n device_count = {'CPU':device_manager.njobs}\n \n with tf.Session(config=tf.ConfigProto(device_count=device_count, allow_soft_placement=True)) as sess:\n sess.run(tf.global_variables_initializer())\n test_loss = 0.0\n for epoch in range(1, self.training_epochs+self.test_epochs+1):\n if epoch<=self.training_epochs:\n mini_batches = self._sample_batches(data)\n for iteration in range(1, len(mini_batches)+1):\n \n generated_data, loss = self.run(mini_batches[iteration-1][:, x_idx][:, np.newaxis], \\\n mini_batches[iteration-1][:, y_idx][:, np.newaxis], \\\n sess, is_training=True)\n else:\n shuffled_idxs = np.random.permutation(data.shape[0])\n generated_data, loss = self.run(data[:, x_idx][:, np.newaxis][shuffled_idxs], \\\n data[:, y_idx][:, np.newaxis][shuffled_idxs], \\\n sess, is_training=False)\n test_loss+=loss\n return test_loss/self.test_epochs\n \nclass OrientationNet: \n \n \"\"\" A Pairwise GNN used for inferring causal relationship between two nodes. \"\"\"\n \n def __init__(self, batch_size=256, lr=0.001, num_hidden_layers=1, nh_per_layer=[128], training_epochs=1000, \\\n test_epochs=100, max_iterations=3, runs_per_iteration=5, threshold=0.01):\n \"\"\"\n Initializes orientation network.\n \n PARAMETERS\n ----------\n - batch_size (int, default=256) : the size of mini-batches used while training the network\n - lr (float, default=0.001) : the learning rate for this basic Generative Neural Network\n - num_hidden_layers (int, default=1) : the number of hidden layers to be used in the network\n - nh_per_layer (list, default=[128]) : the number of hidden units in each layer of the component networks (Requires : <num_hidden_layers>=len(<nh_per_layer>))\n - training_epochs (int, default=1000) : the number of training epochs\n - test_epochs (int, default=100) : the number of passes of data into the trained model to compute the score for a causal direction\n - max_iterations (int, default=3) : the maximum number of iterations for each direction's network \n - runs_per_iteration (int, default=5) : the number of runs in each iteration for scoring each direction's network ;\n averaged results for unbiased estimates of scores\n - threshold (float, default=0.01) : the threshold for the p-value in the t-test\n \n RETURNS\n -------\n - None\n \"\"\"\n self.batch_size = batch_size\n self.learning_rate = lr\n self.num_hidden_layers = num_hidden_layers\n self.units_per_layer = nh_per_layer\n self.training_epochs = training_epochs\n self.test_epochs = test_epochs\n # TTest criterion specific initializations\n self.test_threshold = threshold\n self.pval = np.inf\n self.run_count = 0 # initialize number of runs \n self.runs_per_iteration = runs_per_iteration\n self.max_iterations = max_iterations\n self.XY_scores = []\n self.YX_scores = []\n \n def reset(self):\n \"\"\" Resets some testing characteristics \"\"\"\n self.pval = np.inf\n self.run_count = 0 # re-initialize number of runs \n self.XY_scores = []\n self.YX_scores = []\n \n def _check_stop_loop(self):\n \"\"\"\n Checks if the loop for scoring direction should stop.\n \n PARAMETERS\n ----------\n - None\n \n RETURNS\n -------\n - True or False.\n \"\"\"\n if self.run_count==0:\n return False\n t_statistic, self.pval = ttest_ind(self.XY_scores, self.YX_scores, equal_var=False)\n if self.run_count<self.runs_per_iteration*self.max_iterations and self.pval>self.test_threshold:\n return False\n else:\n return True\n \n def _compute_direction_score(self, data): # PARALLELIZE FUNCTION!\n \"\"\"\n Computes the scores for both directions, X-->Y and Y-->X, based on a t-test between results from fitting a Basic-GNN to pairwise data, multiple times.\n \n PARAMETERS\n ----------\n - data (numpy array) : the input data of shape (N, 2)\n \n RETURNS\n -------\n - a score between -1 and 1 ; if score<0, Y-->X else X-->Y.\n \"\"\"\n # setup device manager \n device_manager = DeviceManager(autoset=True)\n while self._check_stop_loop() is False:\n for run in range(self.runs_per_iteration):\n # compute model scores for X-->Y\n tf.reset_default_graph()\n GNN_XY = BasicGNN(batch_size=self.batch_size, lr=self.learning_rate, num_hidden_layers=self.num_hidden_layers, \\\n nh_per_layer=self.units_per_layer, training_epochs=self.training_epochs, test_epochs=self.test_epochs)\n self.XY_scores.append(GNN_XY.compute_score(device_manager, data, direction='XY'))\n # compute model scores for Y-->X\n tf.reset_default_graph()\n GNN_YX = BasicGNN(batch_size=self.batch_size, lr=self.learning_rate, num_hidden_layers=self.num_hidden_layers, \\\n nh_per_layer=self.units_per_layer, training_epochs=self.training_epochs, test_epochs=self.test_epochs)\n self.YX_scores.append(GNN_YX.compute_score( device_manager, data, direction='YX'))\n self.run_count+=self.runs_per_iteration\n XY_score = np.mean(self.XY_scores)\n YX_score = np.mean(self.YX_scores)\n return (YX_score-XY_score)/(YX_score+XY_score) \n \nclass OrientationTree:\n \n \"\"\" Decision Tree Regression Model for orienting edges. \"\"\"\n \n def __init__(self, test_size=0.25):\n \"\"\"\n Initialize the tree based regressor.\n \n PARAMETERS\n ----------\n - test_size (float, default=0.25) : the proportion of samples to be used as test data\n \n RETURNS\n -------\n - None\n \"\"\"\n self.test_size = test_size\n \n def _fit_score(self, x, y, model, seed):\n \"\"\"\n Fits a decision tree regressor to the data, for y=f(x)\n \n PARAMETERS\n ----------\n - x (numpy array) : the covariate(s), of shape (N, D) ; expected shape --> (N, 1)\n - y (numpy array) : the target(s), of shape (N, D) ; expected shape --> (N, 1)\n - model (sklearn.tree.DecisionTreeRegressor) : the model to fit to the data\n - run_idx (int) : the seed for the numpy.random ; used for reproducability\n \n RETURNS\n -------\n - the MSE on the test data, after training the model.\n \"\"\"\n state = np.random.get_state()\n np.random.seed(int(0.78*seed))\n shuffled_idxs = np.random.permutation(x.shape[0])\n np.random.set_state(state)\n model.fit(x[shuffled_idxs][int(x.shape[0]*self.test_size):], y[shuffled_idxs][int(y.shape[0]*self.test_size):])\n return mse(model.predict(x[shuffled_idxs][:int(x.shape[0]*self.test_size)]), y[shuffled_idxs][:int(y.shape[0]*self.test_size)])\n \n def _compute_direction_score(self, data, nruns=16):\n \"\"\"\n Fits a decision tree regressor to the data, for y=f(x)\n \n PARAMETERS\n ----------\n - data (numpy array) : the input data, of shape (N, 2) \n - nruns (int) : the number of runs for scoring each model\n \n RETURNS\n -------\n - a score between -1 and 1 ; if score<0, Y-->X else X-->Y.\n \"\"\"\n x, y = data[:, 0][:, np.newaxis], data[:, 1][:, np.newaxis]\n param_grid = {'max_features':['log2', 'sqrt', 0.5], \\\n 'min_samples_split':[2, 8, 64, 512, 1e-2, .2, .4]}\n # y = f(x)\n cv_splits = ShuffleSplit(n_splits=3, test_size=0.2)\n model0 = DT(**GridSearchCV(DT(random_state=1), param_grid=param_grid, n_jobs=-1, cv=cv_splits).fit(x, scale(y)).best_params_)\n time.sleep(1)\n model0_scores = Parallel(n_jobs=-1)(delayed(self._fit_score)(x, scale(y), model0, _) for _ in range(nruns))\n # x = f(y)\n cv_splits = ShuffleSplit(n_splits=3, test_size=0.2)\n model1 = DT(**GridSearchCV(DT(random_state=300), param_grid=param_grid, n_jobs=-1, cv=cv_splits).fit(y, scale(x)).best_params_)\n model1_scores = Parallel(n_jobs=-1)(delayed(self._fit_score)(y, scale(x), model1, _) for _ in range(nruns))\n # direction score\n a = np.array(model0_scores)-np.array(model1_scores)\n return np.mean(a)\n \nclass DeviceManager:\n \n \"\"\" A device context manager. \"\"\"\n \n def __init__(self, autoset=True):\n \"\"\" \n Initialize the manager\n \n PARAMETERS\n ----------\n autoset (bool, default=True) : Looks for the system's GPU and CPU capabilities and sets up worker characteristics automatically.\n \n RETURNS\n -------\n - None.\n \"\"\"\n # default characteristics\n self.njobs = multiprocessing.cpu_count()\n self.GPUs = 0\n if autoset:\n self.autoset()\n \n def autoset(self):\n \"\"\" Looks for the system's GPU and CPU capabilities and sets up worker characteristics automatically. \"\"\"\n try:\n # look for ID's of user-set GPUs\n devices = ast.literal_eval(os.environ['CUDA_VISIBLE_DEVICES']) # look for CUDA supported GPU\n if type(devices)!=list and type(devices)!=tuple:\n devices = [devices]\n self.njobs = len(devices)\n self.GPUs = len(devices)\n print('Detected {} CUDA supported device(s)!'.format(self.NJOBS)) \n except: # key error ; no environment variable called 'CUDA_VISIBLE_DEVICES'\n self.GPUs = len(GPUtil.getAvailable(order='first', limit=8, maxLoad=0.5,\\\n maxMemory=0.5, includeNan=False))\n if self.GPUs==0:\n print('No GPU devices found! Setting n_jobs to number of CPUs..')\n self.njobs = multiprocessing.cpu_count()\n else:\n print('GPU devices found! Setting n_jobs to number of available GPU devices ..')\n self.njobs = self.GPUs"
] |
[
[
"tensorflow.transpose",
"tensorflow.concat",
"tensorflow.constant",
"tensorflow.shape",
"tensorflow.scalar_mul",
"tensorflow.ones",
"tensorflow.square"
],
[
"numpy.random.get_state",
"tensorflow.concat",
"sklearn.model_selection.ShuffleSplit",
"sklearn.tree.DecisionTreeRegressor",
"tensorflow.placeholder",
"tensorflow.ConfigProto",
"tensorflow.global_variables_initializer",
"numpy.random.normal",
"numpy.random.permutation",
"numpy.mean",
"numpy.random.set_state",
"tensorflow.train.AdamOptimizer",
"tensorflow.reset_default_graph",
"sklearn.preprocessing.scale",
"numpy.array",
"scipy.stats.ttest_ind"
]
] |
shivammehta007/NLPinEnglishLearning
|
[
"ae869d868e39df9b1787134ba6e964acd385dd2e"
] |
[
"classifier/train.py"
] |
[
"\"\"\"\nTraining script for the model\n\"\"\"\nimport argparse\nimport logging\nimport os\nimport time\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom tqdm.auto import tqdm\n\nfrom config.hyperparameters import (\n BATCH_SIZE,\n BIDIRECTION,\n DROPOUT,\n EMBEDDING_DIM,\n EPOCHS,\n FREEZE_EMBEDDINGS,\n HIDDEN_DIM,\n LR,\n N_LAYERS,\n WEIGHT_DECAY,\n CNN_N_FILTER,\n CNN_FILTER_SIZES,\n LINEAR_HIDDEN_DIM,\n)\nfrom config.root import (\n LOGGING_FORMAT,\n LOGGING_LEVEL,\n TRAINED_CLASSIFIER_FOLDER,\n TRAINED_CLASSIFIER_RNNHIDDEN,\n device,\n seed_all,\n SEED,\n)\nfrom datasetloader import GrammarDasetMultiTag, GrammarDasetAnswerTag\nfrom helperfunctions import evaluate, train, train_tag_model, evaluate_tag_model\nfrom model import (\n RNNHiddenClassifier,\n RNNMaxpoolClassifier,\n CNN2dClassifier,\n CNN1dClassifier,\n RNNFieldClassifer,\n CNN1dExtraLayerClassifier,\n)\nfrom utility import categorical_accuracy, epoch_time\n\n# Initialize logger for this file\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level=LOGGING_LEVEL, format=LOGGING_FORMAT)\n\n\ndef count_parameters(model):\n \"\"\"Method to count the number of parameters\"\"\"\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\n\ndef initialize_new_model(\n classifier_type,\n dataset,\n embedding_dim,\n hidden_dim,\n n_layers,\n bidirectional,\n dropout,\n freeze_embeddings,\n dataset_tag,\n linear_hidden_dim,\n):\n \"\"\"Method to initialise new model, takes in dataset object and hyperparameters as parameter\"\"\"\n logger.debug(\"Initializing Model\")\n if dataset_tag == \"multi\":\n VOCAB_SIZE = len(dataset.question.vocab)\n PAD_IDX = dataset.question.vocab.stoi[dataset.question.pad_token]\n pretrained_embeddings = dataset.question.vocab.vectors\n UNK_IDX = dataset.question.vocab.stoi[dataset.question.unk_token]\n else:\n VOCAB_SIZE = len(dataset.text.vocab)\n PAD_IDX = dataset.text.vocab.stoi[dataset.text.pad_token]\n pretrained_embeddings = dataset.text.vocab.vectors\n UNK_IDX = dataset.text.vocab.stoi[dataset.text.unk_token]\n\n OUTPUT_LAYERS = len(dataset.label.vocab)\n\n if classifier_type == \"RNNHiddenClassifier\":\n\n model = RNNHiddenClassifier(\n VOCAB_SIZE,\n embedding_dim,\n hidden_dim,\n OUTPUT_LAYERS,\n n_layers,\n bidirectional,\n dropout,\n PAD_IDX,\n )\n\n elif classifier_type == \"RNNMaxpoolClassifier\":\n model = RNNMaxpoolClassifier(\n VOCAB_SIZE,\n embedding_dim,\n hidden_dim,\n OUTPUT_LAYERS,\n n_layers,\n bidirectional,\n dropout,\n PAD_IDX,\n )\n elif classifier_type == \"CNN2dClassifier\":\n model = CNN2dClassifier(\n VOCAB_SIZE,\n embedding_dim,\n CNN_N_FILTER,\n CNN_FILTER_SIZES,\n OUTPUT_LAYERS,\n dropout,\n PAD_IDX,\n )\n elif classifier_type == \"CNN1dClassifier\":\n model = CNN1dClassifier(\n VOCAB_SIZE,\n embedding_dim,\n CNN_N_FILTER,\n CNN_FILTER_SIZES,\n OUTPUT_LAYERS,\n dropout,\n PAD_IDX,\n )\n elif classifier_type == \"RNNFieldClassifer\":\n model = RNNFieldClassifer(\n VOCAB_SIZE,\n embedding_dim,\n hidden_dim,\n OUTPUT_LAYERS,\n n_layers,\n bidirectional,\n dropout,\n PAD_IDX,\n dataset.tags,\n )\n elif classifier_type == \"CNN1dExtraLayerClassifier\":\n model = CNN1dExtraLayerClassifier(\n VOCAB_SIZE,\n embedding_dim,\n CNN_N_FILTER,\n CNN_FILTER_SIZES,\n linear_hidden_dim,\n OUTPUT_LAYERS,\n dropout,\n PAD_IDX,\n )\n else:\n raise TypeError(\"Invalid Classifier selected\")\n\n if freeze_embeddings:\n model.embedding.weight.requires_grad = False\n\n logger.debug(\n \"Freeze Embeddings Value {}: {}\".format(\n freeze_embeddings, model.embedding.weight.requires_grad\n )\n )\n\n logger.info(\n \"Model Initialized with {:,} trainiable parameters\".format(\n count_parameters(model)\n )\n )\n\n # Initialize pretrained word embeddings\n\n model.embedding.weight.data.copy_(pretrained_embeddings)\n\n # Initialize Padding and Unknown as 0\n model.embedding.weight.data[UNK_IDX] = torch.zeros(embedding_dim)\n model.embedding.weight.data[PAD_IDX] = torch.zeros(embedding_dim)\n\n logger.debug(\"Copied PreTrained Embeddings\")\n return model\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description=\"Utility to train the Model\")\n\n parser.add_argument(\n \"-s\",\n \"--seed\",\n default=SEED,\n help=\"Set custom seed for reproducibility\",\n type=int,\n )\n\n parser.add_argument(\n \"-loc\",\n \"--model-location\",\n default=None,\n help=\"Give an already trained model location to use and train more epochs on it\",\n )\n\n parser.add_argument(\n \"-b\",\n \"--bidirectional\",\n default=BIDIRECTION,\n help=\"Makes the model Bidirectional\",\n type=bool,\n )\n parser.add_argument(\n \"-d\",\n \"--dropout\",\n default=DROPOUT,\n help=\"Dropout count for the model\",\n type=float,\n )\n parser.add_argument(\n \"-e\",\n \"--embedding-dim\",\n default=EMBEDDING_DIM,\n help=\"Embedding Dimensions\",\n type=int,\n )\n parser.add_argument(\n \"-hd\",\n \"--hidden-dim\",\n default=HIDDEN_DIM,\n help=\"Hidden dimensions of the RNN\",\n type=int,\n )\n parser.add_argument(\n \"-l\", \"--n-layers\", default=N_LAYERS, help=\"Number of layers in RNN\", type=int\n )\n parser.add_argument(\n \"-lr\",\n \"--learning-rate\",\n default=LR,\n help=\"Learning rate of Adam Optimizer\",\n type=float,\n )\n parser.add_argument(\n \"-n\",\n \"--epochs\",\n default=EPOCHS,\n help=\"Number of Epochs to train model\",\n type=int,\n )\n parser.add_argument(\n \"-batch\",\n \"--batch_size\",\n default=BATCH_SIZE,\n help=\"Number of Epochs to train model\",\n type=int,\n )\n\n parser.add_argument(\n \"-f\",\n \"--freeze-embeddings\",\n default=FREEZE_EMBEDDINGS,\n help=\"Freeze Embeddings of Model\",\n type=int,\n )\n\n parser.add_argument(\n \"-t\",\n \"--tag\",\n default=\"answeronly\",\n choices=[\"multi\", \"answeronly\"],\n help=\"Use two different dataset type, multi type and Answer only\",\n )\n\n parser.add_argument(\n \"-l2\",\n \"--l2-regularization\",\n default=WEIGHT_DECAY,\n help=\"Value of alpha in l2 regularization 0 means no regularization \",\n type=float,\n )\n\n parser.add_argument(\n \"-m\",\n \"--model\",\n default=\"RNNHiddenClassifier\",\n choices=[\n \"RNNHiddenClassifier\",\n \"RNNMaxpoolClassifier\",\n \"RNNFieldClassifier\",\n \"CNN2dClassifier\",\n \"CNN1dClassifier\",\n \"RNNFieldClassifer\",\n \"CNN1dExtraLayerClassifier\",\n ],\n help=\"select the classifier to train on\",\n )\n\n parser.add_argument(\n \"-lhd\",\n \"--linear-hidden-dim\",\n default=LINEAR_HIDDEN_DIM,\n help=\"Freeze Embeddings of Model\",\n type=int,\n )\n\n args = parser.parse_args()\n\n seed_all(args.seed)\n logger.debug(args)\n logger.debug(\"Custom seed set with: {}\".format(args.seed))\n\n logger.info(\"Loading Dataset\")\n\n if args.tag == \"multi\":\n dataset = GrammarDasetMultiTag.get_iterators(args.batch_size)\n else:\n dataset = GrammarDasetAnswerTag.get_iterators(args.batch_size)\n\n logger.info(\"Dataset Loaded Successfully\")\n\n if args.model_location:\n model = torch.load(args.model_location)\n else:\n model = initialize_new_model(\n args.model,\n dataset,\n args.embedding_dim,\n args.hidden_dim,\n args.n_layers,\n args.bidirectional,\n args.dropout,\n args.freeze_embeddings,\n args.tag,\n args.linear_hidden_dim,\n )\n\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.Adam(\n model.parameters(), lr=LR, weight_decay=args.l2_regularization\n )\n\n model = model.to(device)\n criterion = criterion.to(device)\n\n logger.info(model)\n\n if not os.path.exists(TRAINED_CLASSIFIER_FOLDER):\n os.mkdir(TRAINED_CLASSIFIER_FOLDER)\n\n best_test_loss = float(\"inf\")\n\n for epoch in range(int(args.epochs)):\n\n start_time = time.time()\n if args.model == \"RNNFieldClassifer\":\n train_loss, train_acc = train_tag_model(\n model, dataset.train_iterator, optimizer, criterion, dataset.tags\n )\n test_loss, test_acc = evaluate_tag_model(\n model, dataset.test_iterator, criterion, dataset.tags\n )\n\n else:\n train_loss, train_acc = train(\n model, dataset.train_iterator, optimizer, criterion, args.tag\n )\n test_loss, test_acc = evaluate(\n model, dataset.test_iterator, criterion, args.tag\n )\n\n end_time = time.time()\n\n epoch_mins, epoch_secs = epoch_time(start_time, end_time)\n\n if test_loss < best_test_loss:\n best_test_loss = test_loss\n torch.save(\n model,\n os.path.join(TRAINED_CLASSIFIER_FOLDER, TRAINED_CLASSIFIER_RNNHIDDEN),\n )\n\n print(f\"Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s\")\n print(f\"\\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%\")\n print(f\"\\t Val. Loss: {test_loss:.3f} | Val. Acc: {test_acc*100:.2f}%\")\n"
] |
[
[
"torch.nn.CrossEntropyLoss",
"torch.load",
"torch.zeros"
]
] |
JoshMend/prebotc-graph-model
|
[
"4002e51ab965be366b30c2a6d900ac288fa41245"
] |
[
"postprocessing_preBotBot/doPost.py"
] |
[
"#!/usr/bin/env python\n'''\nThis file does all the post processing for a given mat file at once. This includes: \n\n1) Deleting Transient\n2)Binning the spikes \n3)Filter spikes using gaussian distribution \n4)Using butterworth filter to remove high frequency signals to smooth \n5)Finds Phase Lag and Population Correlation\n'''\nimport sys\nimport numpy as np\nimport scipy.signal\nimport scipy.io\nimport argparse\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport cmath\nimport math\n\nmaxorder=20\neta_norm_pts = 10\n\n\ndef parse_args(argv):\n # defaults\n transient = 10000 # ms\n spike_thresh = -20 # mV\n f_sigma = 20 # ms\n butter_high = 4 # Hz\n butter_low = -np.inf # Hz\n bin_width = 20 # ms\n cutoff = 0.5\n peak_order = 30\n peak_percentile = 75\n eta_norm_pts=8\n op_abs_thresh=0.2\n # parsing\n parser = argparse.ArgumentParser(prog=\"doPost\",\n description=('Postprocessing of' \n ' model output'))\n parser.add_argument('sim', help='model output (.mat) file')\n parser.add_argument('output', help='output (.jpg) filename')\n parser.add_argument('--transient', '-t', \n help='transient time, ms (default: %(default)s)', \n type=float, default=transient)\n parser.add_argument('--sec', '-s', action='store_true',\n help='time units are in seconds (default: ms)')\n parser.add_argument('--volt', '-V', action='store_true',\n help=('file contains voltage traces '\n '(default: sparse spike trains)'))\n parser.add_argument('--thresh', \n help='spike threshold, mV (default: %(default)s)',\n type=float, default=spike_thresh) \n parser.add_argument('--fsig', '-f', \n help=('filter standard deviation, ms '\n '(default: %(default)s)'),\n type=float, default=f_sigma)\n parser.add_argument('--butter_high', \n help=('Butterworth filter upper cutoff frequency, Hz '\n '(default: %(default)s)'),\n type=float, default=butter_high)\n parser.add_argument('--butter_low', \n help=('Butterworth filter lower cutoff frequency, Hz '\n '(default: %(default)s)'),\n type=float, default=butter_low)\n parser.add_argument('--bin_width', '-b', \n help='bin width, ms (default: %(default)s)',\n type=float, default=bin_width)\n parser.add_argument('--cut', '-c', \n help='burst cutoff parameter (default: %(default)s)',\n type=float, default=cutoff)\n args = parser.parse_args(argv[1:])\n return args.sim, args.output, args.transient, args.sec, args.thresh, \\\n args.fsig, args.butter_low, args.butter_high, args.bin_width,\\\n args.cut, args.volt,peak_order,peak_percentile,eta_norm_pts,op_abs_thresh,\n\n\n'''\n This method chops of the transient stage of the data for better processing\n\n parameters: data-Data being passed in to chop\n transient- time and which you want to chop till\n dt-the change in time of the model\n\n return: The modified data excluding transient stage \n'''\ndef chop_transient(data, transient, dt):\n \n firstIdx = int(np.ceil(transient / dt) - 1)\n return data[:,firstIdx:]\n\n'''\n Find spikes in voltage data by taking relative maxima\n\n parameters: data- self-explanatory\n threshhold- The voltage at which you start to count data as a spike\n return: new_indices-location of the maxima\n spike_mat-dense matrix containing 1 or 0 based on if a spike is present\n'''\ndef find_spikes(data, threshold):\n \n indices = scipy.signal.argrelmax(data, axis=1) # 1st and 2nd coords of maxima\n mask = np.where(data[indices] > threshold)\n new_indices = (indices[0][mask],\n indices[1][mask])\n spike_mat = np.zeros(np.shape(data), dtype=np.int) # dense format\n spike_mat[new_indices] = 1\n return new_indices, spike_mat\n\n'''\n Return time indices of spiking of a given neuron\n'''\ndef spikes_of_neuron(spikes, neuron):\n \n return spikes[1][np.where(spikes[0] == neuron)]\n\n'''\n Filter the spike timeseries. Returns both neuron-by-neuron timeseries\n filtered with a gaussian kernel and the population data filtered\n with a butterworth filter.\n\n Parameters\n ==========\n spike_mat: the numneuron x time matrix of spikes\n samp_freq: sample frequency\n f_sigma: variance of gaussian\n butter_freq: butterworth filter cutoff frequency(s)\n\n Returns\n =======\n spike_fil: gaussian filtered matrix, same shape as spike_mat\n int_signal: butterworth filtered population timeseries\n spike_fil_butter: butterworth filtered matrix, same shape as spike_mat\n'''\n\ndef spikes_filt(spike_mat, samp_freq, f_sigma, butter_freq):\n '''\n Filter the spike timeseries. Returns both neuron-by-neuron timeseries\n filtered with a gaussian kernel and the population data filtered\n with a butterworth filter.\n\n Parameters\n ==========\n spike_mat: the numneuron x time matrix of spikes\n samp_freq: period (in ms) between measurements in spike_mat\n f_sigma: variance of gaussian\n butter_freq: butterworth filter cutoff frequency(s)\n\n Returns\n =======\n spike_fil: gaussian filtered matrix, same shape as spike_mat\n int_signal: butterworth filtered population timeseries\n spike_fil_butter: butterworth filtered matrix, same shape as spike_mat\n '''\n def filt_window_gauss(samp_freq, std = 20, width = None, normalize = 1):\n if width is None:\n width = std*4+1\n width /= samp_freq\n std /= samp_freq\n w = scipy.signal.gaussian(width, std)\n if not normalize == 0:\n w = normalize * w / sum(w)\n return w\n def filt_gauss(spike_mat, samp_freq, f_sigma=20):\n w = filt_window_gauss(samp_freq, std=f_sigma, normalize=1)\n spike_fil = scipy.signal.fftconvolve(spike_mat, w[ np.newaxis, : ], \n mode='same')\n #spike_fil = scipy.signal.convolve(spike_mat, w[ np.newaxis, : ], \n # mode='same')\n return spike_fil\n def filt_butter(data, samp_freq, butter_freq, axis=-1):\n '''\n Filter data with a 2nd order butterworth filter.\n \n Parameters\n ==========\n data: ndarray\n samp_freq: sampling period (s)\n butter_freq: [cutoff_low, cutoff_high] (Hz), can be infinite\n axis (optional): axis along which to filter, default = -1\n Returns\n =======\n filtNs: filtered version of data\n '''\n order = 2\n ny = 0.5 / samp_freq # Nyquist frequency\n cof = butter_freq / ny # normalized cutoff freq\n if np.isneginf(cof[0]) and np.isfinite(cof[1]):\n # lowpass\n cof1 = cof[1]\n b, a = scipy.signal.butter(order, cof1, btype='low')\n filtNs = scipy.signal.filtfilt(b, a, data, axis=axis)\n elif np.isfinite(cof[0]) and np.isinf(cof[1]):\n # highpass\n cof1 = cof[0]\n b, a = scipy.signal.butter(order, cof1, btype='high')\n filtNs = scipy.signal.filtfilt(b, a, data, axis=axis)\n elif np.isfinite(cof[0]) and np.isfinite(cof[1]):\n # bandpass\n b, a = scipy.signal.butter(order, cof, btype='band')\n filtNs = scipy.signal.filtfilt(b, a, data, axis=axis)\n else:\n raise Exception('filt_butter called with bad cutoff frequency')\n filtNs /= samp_freq # normalize to rate\n return filtNs\n spike_fil = filt_gauss(spike_mat, samp_freq, f_sigma=f_sigma) \n int_signal = filt_butter(np.mean(spike_mat, axis=0), \n samp_freq*1e-3, butter_freq)\n spike_fil_butter = filt_butter(spike_fil, samp_freq*1e-3, \n butter_freq, axis=1)\n return spike_fil, int_signal, spike_fil_butter\n\n'''\n Bin spikes\n\n Parameters\n ==========\n spike_mat: matrix of spikes, (num_neuron x num_time)\n bin_width: bin width in time units\n dt: sampling frequency in spike mat\n\n Returns\n =======\n bins: an array of the bin locations in time units\n binned_spikes: a new matrix (num_neuron x num_bins)\n '''\ndef bin_spikes(spike_mat, bin_width, dt):\n num_neurons= np.shape(spike_mat)[0]\n num_times = np.shape(spike_mat)[1]\n stride = int(np.ceil(bin_width / dt))\n bins = np.arange(0, num_times, stride, dtype=np.float)\n which_bins = np.digitize(range(0, num_times), bins)\n num_bins = len(bins)\n binned_spikes = np.zeros((num_neurons, num_bins), dtype=np.int)\n for i in range(num_bins):\n bin_mask = np.where(which_bins == i)[0] # mask data in bin i, tuple\n bin_data = spike_mat[:,bin_mask]\n binned_spikes[:,i] = np.sum(bin_data, axis=1).flatten()\n return bins, binned_spikes\n\n'''\n This is computes the cross correlation for two signals\n \n paramters:\n signal_1: first signal you want to use\n signal_2: second signal you want to use\n taolen: this number determines how much of the tao to use\n returns:\n values of the cross correlation\n'''\ndef xcorr(signal_1,signal_2):\n \n signal_1 = np.asarray(signal_1)\n signal_2 = np.asarray(signal_2)\n\n #Centering the data, giving it a zero mean to reduce variance and not worry about peak differences\n m1 = np.mean(signal_1)\n m2 = np.mean(signal_2)\n \n signal_1_centered = (signal_1 - m1) / (np.std(signal_1) * len(signal_1))\n signal_2_centered = (signal_2 - m2) / np.std(signal_2)\n\n xcorr = scipy.signal.correlate(signal_1_centered,signal_2_centered)\n return xcorr\n\n'''\n Gets info from the graph to be used for plotting\n'''\ndef get_graphinfo(graph_fn):\n graph = nx.read_gml(graph_fn)\n\n cells_inhib = np.array(nx.get_node_attributes(graph, 'inh').values(), \n dtype=np.int)\n graph_edges = nx.edges(graph)\n number_of_nodes = nx.number_of_nodes(graph)\n degree_histogram = nx.degree_histogram(graph)\n return cells_inhib, graph_edges,number_of_nodes,degree_histogram\n\n\n'''\n This method gets the time at which the peak occurs for a signal\n goes through the given peak_times and finds at which point the signal is the\n strongest\n\n peak_times: the times at which a peak in the signal occurs\n signal: The signal that you want to find the max of\n'''\ndef find_max_time(peak_times,signal):\n max_time = np.nan\n for t in peak_times:\n if np.isnan(max_time):\n max_time = t\n elif signal[t] > signal[max_time]:\n max_time = t\n return max_time\n\n'''\n This method finds the phase lag and population correlation for the data given. Use the max peak from the autocorrelation and then the cross correlation peak in the middle of those two \n \n input:\n xcorr-The cross correlation signal \n autocorr-An autocorrelations signal to be ran against\n \n output:\n phase-The phase lag/difference between two populations \n pop_corr-The correlation between both signals\n \n'''\ndef find_metrics(xcorr,autocorr):\n max_time_cross = np.nan;\n peak_auto = scipy.signal.argrelmax(autocorr)[0].tolist()\n peak_cross = scipy.signal.argrelmax(xcorr)[0].tolist()\n max_time = find_max_time(peak_auto,autocorr)\n for i in range(peak_auto.index(max_time)+1,len(peak_auto)):\n if autocorr[peak_auto[i]] > 0:\n max_time_next = peak_auto[i]\n break\n for x in peak_cross:\n if x > max_time and x < max_time_next and xcorr[x] > 0:\n max_time_cross = x\n break\n auto_period = max_time_next - max_time\n auto_cross_perioid = max_time_cross - max_time\n phase = float(auto_cross_perioid)/float(auto_period)\n return phase, xcorr[max_time_cross]\n\n'''\n This method finds the population burst peaks for a given signal, uses a percentile filter to elimnate finding noisy peaks \n \n input:\n signal-This is the signal you want to find the peaks of\n peak_order-The number of points of comparison for each peak on each side of the current value\n peak_percentile-The percentage threshold the peak must meet \n dt-The time step\n \n output:\n pop_burst_peak-Peaks of the signal that pass the given criteria for a peak\n'''\ndef burst_stats(signal,peak_order,peak_percentile,dt):\n pop_burst_peak=scipy.signal.argrelmax(signal, order=peak_order)[0]\n pop_burst_peak=pop_burst_peak[signal[pop_burst_peak] >\n np.percentile(signal,peak_percentile)]\n return pop_burst_peak\n\n'''\n This method is used to get the phi (phase differences) between signals, \n here we use a moving window to find the refrence perioid to calculate phi\n \n input:\n pop_burst_peak1(2)-This is the time for the peaks from signal 1 or signal 2 \n bins-This is the bin info for the signals after some post processing \n \n output: \n phis-List of phis for the signals\n'''\ndef get_phis(pop_burst_peak1,pop_burst_peak2,bins):\n phis = []\n windowStartIndex = 0\n windowEndIndex = 1\n while windowEndIndex < len(pop_burst_peak1):\n windowStart = pop_burst_peak1[windowStartIndex]\n windowEnd = pop_burst_peak1[windowEndIndex]\n peaksInWindow = [i for i in pop_burst_peak2 if i >= windowStart and i <= windowEnd]\n for peak in peaksInWindow:\n phi = (bins[peak] - bins[windowStart]) / (bins[windowEnd] - bins[windowStart])\n phis.append(phi)\n windowStartIndex = windowEndIndex\n windowEndIndex = windowEndIndex + 1\n return phis\n\n'''\n Map phi values to a circle to accuratley take mean and std of the values \n \n input:\n phis- Phi values that are in [0,1]\n output:\n phis- Phi values that are now mapped to [0,2pi] represents radians\n'''\ndef map_phi_to_complex(phis):\n complex = []\n for i in range(len(phis)):\n radians = 2*np.pi*phis[i]\n complex.append(cmath.rect(1,radians))\n return complex\n\n'''\n This will get the mean phi and variance using circular statistics \n \n input:\n complex_values- This is a list of complex values that are gotten from the phi values \n \n output:\n mean_angle- This is the mean angle of the phi values, represents what the average phase is (can be converted back)\n variance_circular- This is the variance of the angles, 0 represents all phi values are the same.\n'''\ndef get_circular_statistics(complex_values):\n mean_resultant = np.mean(complex_values)\n mean_angle = cmath.phase(mean_resultant)\n variance_circular = abs(mean_resultant)\n return mean_angle,variance_circular\n\n'''\n This converts the mean angle back to the standard phi values which lies in [0,1]\n \n input:\n mean_angle- This is the mean angle that was calculated from the list of phis\n \n output: \n This is the converted average phi values that now consisted with other metrics\n'''\ndef get_normalized_phi(mean_angle):\n if mean_angle < 0:\n return (2*math.pi + mean_angle) / (2*math.pi)\n else:\n return mean_angle / (2*math.pi)\n\ndef synchrony_stats(data, dt, maxlags=3000):\n '''\n Synchrony measures\n \n Parameters\n ==========\n data: numneuron x time\n dt: time spacing\n maxlags: maximal lag for autocorrelation, default=3000 ms\n \n Returns\n =======\n chi: synchrony measure\n autocorr: autocorrelation of population avg \\bar{data}(t)\n '''\n data_pop=np.mean(data, axis=0) # pop avg\n sigma_pop=np.mean(np.square(data_pop)) - np.square(np.mean(data_pop))\n sigma=np.mean(np.square(data), axis=1) - np.square(np.mean(data, axis=1))\n sigma_mean=np.mean(sigma)\n chisq=sigma_pop / sigma_mean\n chi=np.sqrt(chisq)\n mean_subtract=data_pop - np.mean(data_pop)\n autocorr=scipy.signal.correlate(mean_subtract, mean_subtract,\n mode='valid')\n return chi, autocorr\n\ndef order_param(eta_norm, eta_t_norm, op_abs_thresh):\n '''\n Compute the order parameter for the normalized (phase) ETAs.\n \n Parameters\n ==========\n eta_norm: normalized ETA array\n eta_t_norm: [-.5, .5] phases corresponding to second axis of array\n op_abs_thresh: float\n \n Returns\n =======\n ops: array of complex valued order parameters, np.nan if undefined\n op_abs: magnitudes\n op_angle: angles\n op_mask: mask of ops with magnitude above threshold\n op_angle_mean: mean angle of significant ops\n op_angle_std: standard deviation of significant ops\n '''\n assert op_abs_thresh < 0.5 and op_abs_thresh >= 0.0,\\\n 'op_abs_thresh out of range'\n num_neurons=eta_norm.shape[0]\n num_bins=eta_norm.shape[1]\n dtheta=np.min(np.diff(eta_t_norm))\n # below will generate NaNs if the normalization is 0\n density_eta=eta_norm/np.tile(np.sum(eta_norm, axis=1),(num_bins,1)).T\n ops=np.sum(density_eta*\n np.exp(1.0j*\n np.tile(eta_t_norm,(num_neurons,1))*\n (2*np.pi)),\n axis=1)\n op_angle=np.angle(ops)/(2*np.pi)\n op_abs=np.abs(ops)\n op_mask=op_abs > op_abs_thresh\n op_angle_mean=np.nanmean(op_angle[op_mask])\n op_angle_std=np.nanstd(op_angle[op_mask])\n return (ops,op_abs,op_angle,op_mask,op_angle_mean,op_angle_std)\n\ndef event_trig_avg(events, data, normalize=False, pts=10):\n '''\n Compute an event-triggered average.\n \n Parameters\n ==========\n events, ndarray\n Array of event indices.\n data, ndarray, ndim=2\n Array to be averaged along dim 1 relative to the events.\n normalize, bool, optional\n Whether to normalize to phase variable\n '''\n breakpts=np.array(\n np.hstack((0, (events[0:-1] + events[1:]) / 2., data.shape[1]-1)),\n dtype=np.int)\n if normalize:\n from scipy.interpolate import griddata\n max_interval=2*pts\n fullrange=np.linspace(-.5, .5, num=max_interval)\n xgrid1=fullrange[0:pts]\n xgrid2=fullrange[pts:]\n else:\n max_interval=2*np.max(np.hstack((events-breakpts[0:-1],\n breakpts[1:]-events)))\n midpt=int(np.floor(max_interval / 2))\n numevents=events.shape[0]-2 # don't use 1st and last due to boundary\n eta=np.zeros((data.shape[0], max_interval))\n for j in range(numevents):\n i=j+1\n timeidx=np.arange(int(breakpts[i]), int(breakpts[i+1]), dtype=np.int)\n thisevent=events[i]\n center=int(np.where(timeidx==thisevent)[0].astype(int))\n if normalize:\n xs1=np.array(timeidx[:center] - timeidx[center], dtype=np.float)\n xs1 /= xs1[0]*(-2.0)\n xs2=np.array(timeidx[center+1:] - timeidx[center], dtype=np.float)\n xs2 /= xs2[-1]*2.0\n xs=np.hstack((xs1, xs2))\n toadd=np.apply_along_axis(lambda x:\n scipy.interpolate.griddata(\n xs, x, fullrange),\n 1, data[:,timeidx])\n eta += toadd\n else:\n lpad=midpt - center\n rpad=max_interval - (len(timeidx)+lpad)\n eta += np.pad(data[:, timeidx], ((0,0), (lpad,rpad)), \n 'constant', constant_values=(0,0))\n eta /= float(numevents)\n eta[eta < 0] = 0\n return eta\n\n\n\n\n'''\n This method is adapted from the old main methods of the code, this method will do all the post processing \n and allow for it to be ran indpendently of main to allow for passing in of dictionaries withouth saving and loading them to the hard disc to avoid excess memory usage\n \n Output: \n mdict - The dictionary of final variables and results. Can either be saved or used as is.\n \n'''\ndef run(sim_output,trans,sec_flag,spike_thresh,f_sigma,butter_low,butter_high,bin_width,cutoff,are_volts,peak_order,peak_percentile,eta_norm_pts,op_abs_thresh):\n\n butter_freq = np.array([butter_low,butter_high])\n if sec_flag:\n scalet=1e3\n else:\n scalet = 1\n\n graph_fn = ''\n if isinstance(sim_output['graphFn'],np.ndarray):\n graph_fn = str(sim_output['graphFn'][0])\n else:\n graph_fn = sim_output['graphFn']\n\n #Retrieve parameters from dictionary and data\n dt = float(sim_output['dt'])*scalet\n data = chop_transient(sim_output['Y'],trans,dt)\n num_neurons = np.shape(data)[0]\n tmax = np.shape(data)[1]\n\n #Generate spike trains from the data and bin the spikes\n if are_volts:\n spikes, spike_mat = find_spikes(data, spike_thresh)\n else:\n data = scipy.sparse.csc.csc_matrix(data)\n spike_mat= data.todense()\n spikes = data.nonzero()\n bins, spike_mat_bin = bin_spikes(spike_mat, bin_width, dt)\n\n #Get the different versions of the filtered data\n spike_fil_bin, butter_int_bin, spike_fil_butter = spikes_filt(spike_mat_bin[:num_neurons/2],\n dt*bin_width,\n f_sigma,\n butter_freq)\n spike_fil_bin2, butter_int_bin2, spike_fil_butter2 = spikes_filt(spike_mat_bin[num_neurons/2:],\n dt*bin_width,\n f_sigma,\n butter_freq)\n\n #Calculate Correlation Values\n cross_correlation = xcorr(butter_int_bin2,butter_int_bin)\n auto_cross_correlation1 = xcorr(butter_int_bin,butter_int_bin)\n auto_cross_correlation2 = xcorr(butter_int_bin2,butter_int_bin2)\n\n #phase_lag,pop_corr = find_metrics(cross_correlation,auto_cross_correlation1)\n\n #graph attributes\n cells_inhib,graph_edges,number_of_nodes,degree_histogram = get_graphinfo(graph_fn)\n\n #Calculating Values for Circle Map\n pop_burst_peak1 = burst_stats(butter_int_bin,peak_order,peak_percentile,dt*bin_width/1000.)\n pop_burst_peak2 = burst_stats(butter_int_bin2,peak_order,peak_percentile,dt*bin_width/1000.)\n phis = get_phis(pop_burst_peak1,pop_burst_peak2,bins)\n complex_phis = map_phi_to_complex(phis)\n mean_angle,variance_angle = get_circular_statistics(complex_phis)\n mean_phi = get_normalized_phi(mean_angle)\n #std_phi = np.std(phis)\n\n #Get Synchrony Values for each signal\n chi1,chi1_auto = synchrony_stats(spike_fil_bin,dt*bin_width/1000.)\n chi2,chi2_auto = synchrony_stats(spike_fil_bin2,dt*bin_width/1000.)\n\n '''##Compute event triggered averages and get individual cell statistics\n ##Population 1\n ##Normalize time to phase variable [-.5,.5]\n eta1_norm = event_trig_avg(pop_burst_peak1,spike_fil_bin,normalize=True,pts=eta_norm_pts)\n eta1_t_norm = np.linspace(-0.5, 0.5, 2*eta_norm_pts)\n ##Order Parameters\n (ops1,op_abs1,op_angle1,op_mask1,\n op_angle_mean1,op_angle_std1)=order_param(eta1_norm,eta1_t_norm,op_abs_thresh)\n ##Population 2\n ##Normalize time to phase variable [-.5,.5]\n eta2_norm = event_trig_avg(pop_burst_peak2,spike_fil_bin2,normalize=True,pts=eta_norm_pts)\n eta2_t_norm = np.linspace(-0.5, 0.5, 2*eta_norm_pts)\n ##Order Parameters\n (ops2,op_abs2,op_angle2,op_mask2,\n op_angle_mean2,op_angle_std2)=order_param(eta2_norm,eta2_t_norm,op_abs_thresh)'''\n \n\n\n mdict = {'bins':bins,\n 'spike_mat':spike_mat,\n 'spike_mat_bin':spike_mat_bin,\n 'spike_fil_bin':spike_fil_bin,\n 'spike_fil_bin':spike_fil_bin2,\n 'butter_int_bin': butter_int_bin,\n 'butter_int_bin2': butter_int_bin2,\n 'cross_correlation': cross_correlation,\n 'auto_cross_correlation1':auto_cross_correlation1,\n 'auto_cross_correlation2':auto_cross_correlation2,\n 'cells_inhib': cells_inhib,\n 'graph_edges':graph_edges,\n 'number_of_nodes':number_of_nodes,\n 'degree_histogram':degree_histogram,\n #'phase_lag': phase_lag,\n #'pop_correlation': pop_corr,\n 'time': sim_output['tf'],\n 'bin_width': bin_width,\n 'phis' : phis,\n 'mean_phi': mean_phi,\n 'variance_angle' : variance_angle,\n 'chi1' : chi1,\n 'chi2' : chi2,\n\t 'pop_burst_peak1': pop_burst_peak1,\n\t 'pop_burst_peak2': pop_burst_peak2\n #'op_abs1' : op_abs1,\n #'op_angle1' : op_angle1,\n #'op_angle_mean1' : op_angle_mean1,\n #'op_angle_std1' : op_angle_std1,\n #'op_abs2' : op_abs2,\n #'op_angle2' : op_angle2,\n #'op_angle_mean2' : op_angle_mean2,\n #'op_angle_std2' : op_angle_std2\n }\n\n return mdict\n\n\n\n\ndef main(argv=None):\n \n should_save = True\n if argv is None:\n argv = sys.argv\n else:\n \n should_save = False\n\n (simFn, outFn, trans, sec_flag, spike_thresh, f_sigma, butter_low,\n butter_high, bin_width, cutoff, are_volts,peak_order,peak_percentile,eta_norm_pts,op_abs_thresh) = parse_args(argv)\n\n sim_output = scipy.io.loadmat(simFn)\n \n post_dict = run(sim_output,trans,sec_flag,spike_thresh,f_sigma,butter_low,butter_high,bin_width,cutoff,are_volts,peak_order,peak_percentile,eta_norm_pts,op_abs_thresh)\n\n if should_save:\n scipy.io.savemat(outFn,post_dict,oned_as ='column')\n else:\n return post_dict\n\nif __name__ == '__main__':\n status = main()\n sys.exit(status) \n"
] |
[
[
"numpy.sqrt",
"numpy.linspace",
"numpy.asarray",
"numpy.isneginf",
"numpy.mean",
"numpy.nanmean",
"numpy.nanstd",
"numpy.where",
"numpy.square",
"numpy.hstack",
"numpy.pad",
"numpy.arange",
"numpy.ceil",
"numpy.std",
"numpy.diff",
"numpy.zeros",
"numpy.isnan",
"numpy.floor",
"numpy.array",
"numpy.sum",
"numpy.abs",
"numpy.isfinite",
"numpy.tile",
"numpy.percentile",
"numpy.shape",
"numpy.angle",
"numpy.isinf"
]
] |
RammySekham/Data-Engineering
|
[
"eec1020defe9c54403f6a80ba91fc071ed22b727"
] |
[
"DataModelling_PostgreSQL/etl.py"
] |
[
"import os\nimport glob\nimport psycopg2\nimport pandas as pd\nimport settings\nfrom sql_queries import *\n\n\ndef process_song_file(cur, filepath):\n \"\"\"\n process the json song file to insert song record into SQL table\n \"\"\"\n \n # open song file\n df = pd.read_json(filepath, lines=True)\n\n # insert song record\n song_data = df[['song_id', 'title', 'artist_id', 'year', 'duration']].values[0]\n cur.execute(song_table_insert, song_data)\n \n # insert artist record\n artist_data = df.loc[0, ['artist_id', 'artist_name', 'artist_location', 'artist_latitude','artist_longitude']].values.tolist()\n cur.execute(artist_table_insert, artist_data)\n\n\ndef process_log_file(cur, filepath):\n \"\"\"\n process the json log file to dump data into SQL table\n \"\"\"\n \n # open log file\n df = pd.read_json(filepath, lines=True)\n\n # filter by NextSong action\n df = df[df.loc[:, 'page']==\"NextSong\"] \n\n # convert timestamp column to datetime\n t = pd.to_datetime((df.ts)/1000)\n \n # insert time data records\n time_data = {'t':t, 'hour':t.dt.hour, 'day':t.dt.day, 'week':t.dt.isocalendar().week, 'month':t.dt.month, 'year':t.dt.year, 'weekday':t.dt.weekday}\n column_labels = ['t', 'hour', 'day', 'week', 'month', 'year', 'weekday'] \n time_df = pd.DataFrame(data=time_data, columns=column_labels)\n\n for i, row in time_df.iterrows():\n cur.execute(time_table_insert, list(row))\n\n # load user table\n user_df = (df.loc[:, ['userId', 'firstName', 'lastName', 'gender', 'level']])\n\n # insert user records\n for i, row in user_df.iterrows():\n cur.execute(user_table_insert, row)\n\n # insert songplay records\n for index, row in df.iterrows():\n \n # get songid and artistid from song and artist tables\n cur.execute(song_select, (row.song, row.artist, row.length))\n results = cur.fetchone()\n \n if results:\n songid, artistid = results\n else:\n songid, artistid = None, None\n\n # insert songplay record\n songplay_data = (pd.Timestamp(row.ts/1000, unit='s'), row.userId, row.level, songid, artistid, row.sessionId, row.location, row.userAgent)\n cur.execute(songplay_table_insert, songplay_data)\n\n\ndef process_data(cur, conn, filepath, func):\n \"\"\"\n process the file based on given func \n \"\"\"\n \n # get all files matching extension from directory\n all_files = []\n for root, dirs, files in os.walk(filepath):\n files = glob.glob(os.path.join(root,'*.json'))\n for f in files :\n all_files.append(os.path.abspath(f))\n\n # get total number of files found\n num_files = len(all_files)\n print('{} files found in {}'.format(num_files, filepath))\n\n # iterate over files and process\n for i, datafile in enumerate(all_files, 1):\n func(cur, datafile)\n conn.commit()\n print('{}/{} files processed.'.format(i, num_files))\n\n\ndef main():\n \"\"\"\n Calling process_data function to process the raw files to insert data into SQL tables\n \"\"\"\n \n conn = psycopg2.connect(host=settings.host, dbname=settings.new_db, user=settings.user, password=settings.password, port=settings.port)\n cur = conn.cursor()\n\n process_data(cur, conn, filepath='data/song_data', func=process_song_file)\n process_data(cur, conn, filepath='data/log_data', func=process_log_file)\n\n conn.close()\n\n\nif __name__ == \"__main__\":\n main()"
] |
[
[
"pandas.to_datetime",
"pandas.Timestamp",
"pandas.read_json",
"pandas.DataFrame"
]
] |
dvhg/tvm
|
[
"288e9ef41d7884cea3d868d6d2bbb672c058757b"
] |
[
"python/tvm/testing.py"
] |
[
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# pylint: disable=invalid-name,unnecessary-comprehension\n\"\"\" TVM testing utilities\n\nTesting Markers\n***************\n\nWe use pytest markers to specify the requirements of test functions. Currently\nthere is a single distinction that matters for our testing environment: does\nthe test require a gpu. For tests that require just a gpu or just a cpu, we\nhave the decorator :py:func:`requires_gpu` that enables the test when a gpu is\navailable. To avoid running tests that don't require a gpu on gpu nodes, this\ndecorator also sets the pytest marker `gpu` so we can use select the gpu subset\nof tests (using `pytest -m gpu`).\n\nUnfortunately, many tests are written like this:\n\n.. python::\n\n def test_something():\n for target in all_targets():\n do_something()\n\nThe test uses both gpu and cpu targets, so the test needs to be run on both cpu\nand gpu nodes. But we still want to only run the cpu targets on the cpu testing\nnode. The solution is to mark these tests with the gpu marker so they will be\nrun on the gpu nodes. But we also modify all_targets (renamed to\nenabled_targets) so that it only returns gpu targets on gpu nodes and cpu\ntargets on cpu nodes (using an environment variable).\n\nInstead of using the all_targets function, future tests that would like to\ntest against a variety of targets should use the\n:py:func:`tvm.testing.parametrize_targets` functionality. This allows us\ngreater control over which targets are run on which testing nodes.\n\nIf in the future we want to add a new type of testing node (for example\nfpgas), we need to add a new marker in `tests/python/pytest.ini` and a new\nfunction in this module. Then targets using this node should be added to the\n`TVM_TEST_TARGETS` environment variable in the CI.\n\"\"\"\nimport logging\nimport os\nimport sys\nimport time\nimport pytest\nimport numpy as np\nimport tvm\nimport tvm.arith\nimport tvm.tir\nimport tvm.te\nimport tvm._ffi\nfrom tvm.contrib import nvcc\n\n\ndef assert_allclose(actual, desired, rtol=1e-7, atol=1e-7):\n \"\"\"Version of np.testing.assert_allclose with `atol` and `rtol` fields set\n in reasonable defaults.\n\n Arguments `actual` and `desired` are not interchangable, since the function\n compares the `abs(actual-desired)` with `atol+rtol*abs(desired)`. Since we\n often allow `desired` to be close to zero, we generally want non-zero `atol`.\n \"\"\"\n actual = np.asanyarray(actual)\n desired = np.asanyarray(desired)\n np.testing.assert_allclose(actual.shape, desired.shape)\n np.testing.assert_allclose(actual, desired, rtol=rtol, atol=atol, verbose=True)\n\n\ndef check_numerical_grads(\n function, input_values, grad_values, function_value=None, delta=1e-3, atol=1e-2, rtol=0.1\n):\n \"\"\"A helper function that checks that numerical gradients of a function are\n equal to gradients computed in some different way (analytical gradients).\n\n Numerical gradients are computed using finite difference approximation. To\n reduce the number of function evaluations, the number of points used is\n gradually increased if the error value is too high (up to 5 points).\n\n Parameters\n ----------\n function\n A function that takes inputs either as positional or as keyword\n arguments (either `function(*input_values)` or `function(**input_values)`\n should be correct) and returns a scalar result. Should accept numpy\n ndarrays.\n\n input_values : Dict[str, numpy.ndarray] or List[numpy.ndarray]\n A list of values or a dict assigning values to variables. Represents the\n point at which gradients should be computed.\n\n grad_values : Dict[str, numpy.ndarray] or List[numpy.ndarray]\n Gradients computed using a different method.\n\n function_value : float, optional\n Should be equal to `function(**input_values)`.\n\n delta : float, optional\n A small number used for numerical computation of partial derivatives.\n The default 1e-3 is a good choice for float32.\n\n atol : float, optional\n Absolute tolerance. Gets multiplied by `sqrt(n)` where n is the size of a\n gradient.\n\n rtol : float, optional\n Relative tolerance.\n \"\"\"\n # If input_values is a list then function accepts positional arguments\n # In this case transform it to a function taking kwargs of the form {\"0\": ..., \"1\": ...}\n if not isinstance(input_values, dict):\n input_len = len(input_values)\n input_values = {str(idx): val for idx, val in enumerate(input_values)}\n\n def _function(_input_len=input_len, _orig_function=function, **kwargs):\n return _orig_function(*(kwargs[str(i)] for i in range(input_len)))\n\n function = _function\n\n grad_values = {str(idx): val for idx, val in enumerate(grad_values)}\n\n if function_value is None:\n function_value = function(**input_values)\n\n # a helper to modify j-th element of val by a_delta\n def modify(val, j, a_delta):\n val = val.copy()\n val.reshape(-1)[j] = val.reshape(-1)[j] + a_delta\n return val\n\n # numerically compute a partial derivative with respect to j-th element of the var `name`\n def derivative(x_name, j, a_delta):\n modified_values = {\n n: modify(val, j, a_delta) if n == x_name else val for n, val in input_values.items()\n }\n return (function(**modified_values) - function_value) / a_delta\n\n def compare_derivative(j, n_der, grad):\n der = grad.reshape(-1)[j]\n return np.abs(n_der - der) < atol + rtol * np.abs(n_der)\n\n for x_name, grad in grad_values.items():\n if grad.shape != input_values[x_name].shape:\n raise AssertionError(\n \"Gradient wrt '{}' has unexpected shape {}, expected {} \".format(\n x_name, grad.shape, input_values[x_name].shape\n )\n )\n\n ngrad = np.zeros_like(grad)\n\n wrong_positions = []\n\n # compute partial derivatives for each position in this variable\n for j in range(np.prod(grad.shape)):\n # forward difference approximation\n nder = derivative(x_name, j, delta)\n\n # if the derivative is not equal to the analytical one, try to use more\n # precise and expensive methods\n if not compare_derivative(j, nder, grad):\n # central difference approximation\n nder = (derivative(x_name, j, -delta) + nder) / 2\n\n if not compare_derivative(j, nder, grad):\n # central difference approximation using h = delta/2\n cnder2 = (\n derivative(x_name, j, delta / 2) + derivative(x_name, j, -delta / 2)\n ) / 2\n # five-point derivative\n nder = (4 * cnder2 - nder) / 3\n\n # if the derivatives still don't match, add this position to the\n # list of wrong positions\n if not compare_derivative(j, nder, grad):\n wrong_positions.append(np.unravel_index(j, grad.shape))\n\n ngrad.reshape(-1)[j] = nder\n\n wrong_percentage = int(100 * len(wrong_positions) / np.prod(grad.shape))\n\n dist = np.sqrt(np.sum((ngrad - grad) ** 2))\n grad_norm = np.sqrt(np.sum(ngrad ** 2))\n\n if not (np.isfinite(dist) and np.isfinite(grad_norm)):\n raise ValueError(\n \"NaN or infinity detected during numerical gradient checking wrt '{}'\\n\"\n \"analytical grad = {}\\n numerical grad = {}\\n\".format(x_name, grad, ngrad)\n )\n\n # we multiply atol by this number to make it more universal for different sizes\n sqrt_n = np.sqrt(float(np.prod(grad.shape)))\n\n if dist > atol * sqrt_n + rtol * grad_norm:\n raise AssertionError(\n \"Analytical and numerical grads wrt '{}' differ too much\\n\"\n \"analytical grad = {}\\n numerical grad = {}\\n\"\n \"{}% of elements differ, first 10 of wrong positions: {}\\n\"\n \"distance > atol*sqrt(n) + rtol*grad_norm\\n\"\n \"distance {} > {}*{} + {}*{}\".format(\n x_name,\n grad,\n ngrad,\n wrong_percentage,\n wrong_positions[:10],\n dist,\n atol,\n sqrt_n,\n rtol,\n grad_norm,\n )\n )\n\n max_diff = np.max(np.abs(ngrad - grad))\n avg_diff = np.mean(np.abs(ngrad - grad))\n logging.info(\n \"Numerical grad test wrt '%s' of shape %s passes, \"\n \"dist = %f, max_diff = %f, avg_diff = %f\",\n x_name,\n grad.shape,\n dist,\n max_diff,\n avg_diff,\n )\n\n\ndef assert_prim_expr_equal(lhs, rhs):\n \"\"\"Assert lhs and rhs equals to each iother.\n\n Parameters\n ----------\n lhs : tvm.tir.PrimExpr\n The left operand.\n\n rhs : tvm.tir.PrimExpr\n The left operand.\n \"\"\"\n ana = tvm.arith.Analyzer()\n res = ana.simplify(lhs - rhs)\n equal = isinstance(res, tvm.tir.IntImm) and res.value == 0\n if not equal:\n raise ValueError(\"{} and {} are not equal\".format(lhs, rhs))\n\n\ndef check_bool_expr_is_true(bool_expr, vranges, cond=None):\n \"\"\"Check that bool_expr holds given the condition cond\n for every value of free variables from vranges.\n\n for example, 2x > 4y solves to x > 2y given x in (0, 10) and y in (0, 10)\n here bool_expr is x > 2y, vranges is {x: (0, 10), y: (0, 10)}, cond is 2x > 4y\n We creates iterations to check,\n for x in range(10):\n for y in range(10):\n assert !(2x > 4y) || (x > 2y)\n\n Parameters\n ----------\n bool_expr : tvm.ir.PrimExpr\n Boolean expression to check\n vranges: Dict[tvm.tir.expr.Var, tvm.ir.Range]\n Free variables and their ranges\n cond: tvm.ir.PrimExpr\n extra conditions needs to be satisfied.\n \"\"\"\n if cond is not None:\n bool_expr = tvm.te.any(tvm.tir.Not(cond), bool_expr)\n\n def _run_expr(expr, vranges):\n \"\"\"Evaluate expr for every value of free variables\n given by vranges and return the tensor of results.\n \"\"\"\n\n def _compute_body(*us):\n vmap = {v: u + r.min for (v, r), u in zip(vranges.items(), us)}\n return tvm.tir.stmt_functor.substitute(expr, vmap)\n\n A = tvm.te.compute([r.extent.value for v, r in vranges.items()], _compute_body)\n args = [tvm.nd.empty(A.shape, A.dtype)]\n sch = tvm.te.create_schedule(A.op)\n mod = tvm.build(sch, [A])\n mod(*args)\n return args[0].numpy()\n\n res = _run_expr(bool_expr, vranges)\n if not np.all(res):\n indices = list(np.argwhere(res == 0)[0])\n counterex = [(str(v), i + r.min) for (v, r), i in zip(vranges.items(), indices)]\n counterex = sorted(counterex, key=lambda x: x[0])\n counterex = \", \".join([v + \" = \" + str(i) for v, i in counterex])\n ana = tvm.arith.Analyzer()\n raise AssertionError(\n \"Expression {}\\nis not true on {}\\n\"\n \"Counterexample: {}\".format(ana.simplify(bool_expr), vranges, counterex)\n )\n\n\ndef check_int_constraints_trans_consistency(constraints_trans, vranges=None):\n \"\"\"Check IntConstraintsTransform is a bijective transformation.\n\n Parameters\n ----------\n constraints_trans : arith.IntConstraintsTransform\n Integer constraints transformation\n vranges: Dict[tvm.tir.Var, tvm.ir.Range]\n Free variables and their ranges\n \"\"\"\n if vranges is None:\n vranges = {}\n\n def _check_forward(constraints1, constraints2, varmap, backvarmap):\n ana = tvm.arith.Analyzer()\n all_vranges = vranges.copy()\n all_vranges.update({v: r for v, r in constraints1.ranges.items()})\n\n # Check that the transformation is injective\n cond_on_vars = tvm.tir.const(1, \"bool\")\n for v in constraints1.variables:\n if v in varmap:\n # variable mapping is consistent\n v_back = ana.simplify(tvm.tir.stmt_functor.substitute(varmap[v], backvarmap))\n cond_on_vars = tvm.te.all(cond_on_vars, v == v_back)\n # Also we have to check that the new relations are true when old relations are true\n cond_subst = tvm.tir.stmt_functor.substitute(\n tvm.te.all(tvm.tir.const(1, \"bool\"), *constraints2.relations), backvarmap\n )\n # We have to include relations from vranges too\n for v in constraints2.variables:\n if v in constraints2.ranges:\n r = constraints2.ranges[v]\n range_cond = tvm.te.all(v >= r.min, v < r.min + r.extent)\n range_cond = tvm.tir.stmt_functor.substitute(range_cond, backvarmap)\n cond_subst = tvm.te.all(cond_subst, range_cond)\n cond_subst = ana.simplify(cond_subst)\n check_bool_expr_is_true(\n tvm.te.all(cond_subst, cond_on_vars),\n all_vranges,\n cond=tvm.te.all(tvm.tir.const(1, \"bool\"), *constraints1.relations),\n )\n\n _check_forward(\n constraints_trans.src,\n constraints_trans.dst,\n constraints_trans.src_to_dst,\n constraints_trans.dst_to_src,\n )\n _check_forward(\n constraints_trans.dst,\n constraints_trans.src,\n constraints_trans.dst_to_src,\n constraints_trans.src_to_dst,\n )\n\n\ndef _get_targets():\n target_str = os.environ.get(\"TVM_TEST_TARGETS\", \"\")\n if len(target_str) == 0:\n target_str = DEFAULT_TEST_TARGETS\n targets = set()\n for dev in target_str.split(\";\"):\n if len(dev) == 0:\n continue\n target_kind = dev.split()[0]\n if tvm.runtime.enabled(target_kind) and tvm.device(target_kind, 0).exist:\n targets.add(dev)\n if len(targets) == 0:\n logging.warning(\n \"None of the following targets are supported by this build of TVM: %s.\"\n \" Try setting TVM_TEST_TARGETS to a supported target. Defaulting to llvm.\",\n target_str,\n )\n return {\"llvm\"}\n return targets\n\n\nDEFAULT_TEST_TARGETS = (\n \"llvm;cuda;opencl;metal;rocm;vulkan;nvptx;\"\n \"llvm -device=arm_cpu;opencl -device=mali,aocl_sw_emu\"\n)\n\n\ndef device_enabled(target):\n \"\"\"Check if a target should be used when testing.\n\n It is recommended that you use :py:func:`tvm.testing.parametrize_targets`\n instead of manually checking if a target is enabled.\n\n This allows the user to control which devices they are testing against. In\n tests, this should be used to check if a device should be used when said\n device is an optional part of the test.\n\n Parameters\n ----------\n target : str\n Target string to check against\n\n Returns\n -------\n bool\n Whether or not the device associated with this target is enabled.\n\n Example\n -------\n >>> @tvm.testing.uses_gpu\n >>> def test_mytest():\n >>> for target in [\"cuda\", \"llvm\"]:\n >>> if device_enabled(target):\n >>> test_body...\n\n Here, `test_body` will only be reached by with `target=\"cuda\"` on gpu test\n nodes and `target=\"llvm\"` on cpu test nodes.\n \"\"\"\n assert isinstance(target, str), \"device_enabled requires a target as a string\"\n target_kind = target.split(\" \")[\n 0\n ] # only check if device name is found, sometime there are extra flags\n return any([target_kind in test_target for test_target in _get_targets()])\n\n\ndef enabled_targets():\n \"\"\"Get all enabled targets with associated contexts.\n\n In most cases, you should use :py:func:`tvm.testing.parametrize_targets` instead of\n this function.\n\n In this context, enabled means that TVM was built with support for this\n target and the target name appears in the TVM_TEST_TARGETS environment\n variable. If TVM_TEST_TARGETS is not set, it defaults to variable\n DEFAULT_TEST_TARGETS in this module.\n\n If you use this function in a test, you **must** decorate the test with\n :py:func:`tvm.testing.uses_gpu` (otherwise it will never be run on the gpu).\n\n Returns\n -------\n targets: list\n A list of pairs of all enabled devices and the associated context\n \"\"\"\n return [(tgt, tvm.device(tgt)) for tgt in _get_targets()]\n\n\ndef _compose(args, decs):\n \"\"\"Helper to apply multiple markers\"\"\"\n if len(args) > 0:\n f = args[0]\n for d in reversed(decs):\n f = d(f)\n return f\n return decs\n\n\ndef uses_gpu(*args):\n \"\"\"Mark to differentiate tests that use the GPU in some capacity.\n\n These tests will be run on CPU-only test nodes and on test nodes with GPUs.\n To mark a test that must have a GPU present to run, use\n :py:func:`tvm.testing.requires_gpu`.\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _uses_gpu = [pytest.mark.gpu]\n return _compose(args, _uses_gpu)\n\n\ndef requires_gpu(*args):\n \"\"\"Mark a test as requiring a GPU to run.\n\n Tests with this mark will not be run unless a gpu is present.\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _requires_gpu = [\n pytest.mark.skipif(\n not tvm.cuda().exist\n and not tvm.rocm().exist\n and not tvm.opencl().exist\n and not tvm.metal().exist\n and not tvm.vulkan().exist,\n reason=\"No GPU present\",\n ),\n *uses_gpu(),\n ]\n return _compose(args, _requires_gpu)\n\n\ndef requires_cuda(*args):\n \"\"\"Mark a test as requiring the CUDA runtime.\n\n This also marks the test as requiring a cuda gpu.\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _requires_cuda = [\n pytest.mark.cuda,\n pytest.mark.skipif(not device_enabled(\"cuda\"), reason=\"CUDA support not enabled\"),\n *requires_gpu(),\n ]\n return _compose(args, _requires_cuda)\n\n\ndef requires_cudagraph(*args):\n \"\"\"Mark a test as requiring the CUDA Graph Feature\n\n This also marks the test as requiring cuda\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _requires_cudagraph = [\n pytest.mark.skipif(\n not nvcc.have_cudagraph(), reason=\"CUDA Graph is not supported in this environment\"\n ),\n *requires_cuda(),\n ]\n return _compose(args, _requires_cudagraph)\n\n\ndef requires_opencl(*args):\n \"\"\"Mark a test as requiring the OpenCL runtime.\n\n This also marks the test as requiring a gpu.\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _requires_opencl = [\n pytest.mark.opencl,\n pytest.mark.skipif(not device_enabled(\"opencl\"), reason=\"OpenCL support not enabled\"),\n *requires_gpu(),\n ]\n return _compose(args, _requires_opencl)\n\n\ndef requires_rocm(*args):\n \"\"\"Mark a test as requiring the rocm runtime.\n\n This also marks the test as requiring a gpu.\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _requires_rocm = [\n pytest.mark.rocm,\n pytest.mark.skipif(not device_enabled(\"rocm\"), reason=\"rocm support not enabled\"),\n *requires_gpu(),\n ]\n return _compose(args, _requires_rocm)\n\n\ndef requires_metal(*args):\n \"\"\"Mark a test as requiring the metal runtime.\n\n This also marks the test as requiring a gpu.\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _requires_metal = [\n pytest.mark.metal,\n pytest.mark.skipif(not device_enabled(\"metal\"), reason=\"metal support not enabled\"),\n *requires_gpu(),\n ]\n return _compose(args, _requires_metal)\n\n\ndef requires_vulkan(*args):\n \"\"\"Mark a test as requiring the vulkan runtime.\n\n This also marks the test as requiring a gpu.\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _requires_vulkan = [\n pytest.mark.vulkan,\n pytest.mark.skipif(not device_enabled(\"vulkan\"), reason=\"vulkan support not enabled\"),\n *requires_gpu(),\n ]\n return _compose(args, _requires_vulkan)\n\n\ndef requires_tensorcore(*args):\n \"\"\"Mark a test as requiring a tensorcore to run.\n\n Tests with this mark will not be run unless a tensorcore is present.\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _requires_tensorcore = [\n pytest.mark.tensorcore,\n pytest.mark.skipif(\n not tvm.cuda().exist or not nvcc.have_tensorcore(tvm.cuda(0).compute_version),\n reason=\"No tensorcore present\",\n ),\n *requires_gpu(),\n ]\n return _compose(args, _requires_tensorcore)\n\n\ndef requires_llvm(*args):\n \"\"\"Mark a test as requiring llvm to run.\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _requires_llvm = [\n pytest.mark.llvm,\n pytest.mark.skipif(not device_enabled(\"llvm\"), reason=\"LLVM support not enabled\"),\n ]\n return _compose(args, _requires_llvm)\n\n\ndef requires_micro(*args):\n \"\"\"Mark a test as requiring microTVM to run.\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _requires_micro = [\n pytest.mark.skipif(\n tvm.support.libinfo().get(\"USE_MICRO\", \"OFF\") != \"ON\",\n reason=\"MicroTVM support not enabled. Set USE_MICRO=ON in config.cmake to enable.\",\n )\n ]\n return _compose(args, _requires_micro)\n\n\ndef requires_rpc(*args):\n \"\"\"Mark a test as requiring rpc to run.\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _requires_rpc = [\n pytest.mark.skipif(\n tvm.support.libinfo().get(\"USE_RPC\", \"OFF\") != \"ON\",\n reason=\"RPC support not enabled. Set USE_RPC=ON in config.cmake to enable.\",\n )\n ]\n return _compose(args, _requires_rpc)\n\n\ndef _target_to_requirement(target):\n # mapping from target to decorator\n if target.startswith(\"cuda\"):\n return requires_cuda()\n if target.startswith(\"rocm\"):\n return requires_rocm()\n if target.startswith(\"vulkan\"):\n return requires_vulkan()\n if target.startswith(\"nvptx\"):\n return [*requires_llvm(), *requires_gpu()]\n if target.startswith(\"metal\"):\n return requires_metal()\n if target.startswith(\"opencl\"):\n return requires_opencl()\n if target.startswith(\"llvm\"):\n return requires_llvm()\n return []\n\n\ndef parametrize_targets(*args):\n \"\"\"Parametrize a test over all enabled targets.\n\n Use this decorator when you want your test to be run over a variety of\n targets and devices (including cpu and gpu devices).\n\n Parameters\n ----------\n f : function\n Function to parametrize. Must be of the form `def test_xxxxxxxxx(target, dev)`:,\n where `xxxxxxxxx` is any name.\n targets : list[str], optional\n Set of targets to run against. If not supplied,\n :py:func:`tvm.testing.enabled_targets` will be used.\n\n Example\n -------\n >>> @tvm.testing.parametrize\n >>> def test_mytest(target, dev):\n >>> ... # do something\n\n Or\n\n >>> @tvm.testing.parametrize(\"llvm\", \"cuda\")\n >>> def test_mytest(target, dev):\n >>> ... # do something\n \"\"\"\n\n def wrap(targets):\n def func(f):\n params = [\n pytest.param(target, tvm.device(target, 0), marks=_target_to_requirement(target))\n for target in targets\n ]\n return pytest.mark.parametrize(\"target,dev\", params)(f)\n\n return func\n\n if len(args) == 1 and callable(args[0]):\n targets = [t for t, _ in enabled_targets()]\n return wrap(targets)(args[0])\n return wrap(args)\n\n\ndef identity_after(x, sleep):\n \"\"\"Testing function to return identity after sleep\n\n Parameters\n ----------\n x : int\n The input value.\n\n sleep : float\n The amount of time to sleep\n\n Returns\n -------\n x : object\n The original value\n \"\"\"\n if sleep:\n time.sleep(sleep)\n return x\n\n\ndef terminate_self():\n \"\"\"Testing function to terminate the process.\"\"\"\n sys.exit(-1)\n\n\ntvm._ffi._init_api(\"testing\", __name__)\n"
] |
[
[
"numpy.abs",
"numpy.isfinite",
"numpy.argwhere",
"numpy.all",
"numpy.asanyarray",
"numpy.zeros_like",
"numpy.prod",
"numpy.testing.assert_allclose",
"numpy.unravel_index",
"numpy.sum"
]
] |
kyuhyoung/yolact
|
[
"98fc78e963264d2ec18cf1b85de7a328abcd6e96"
] |
[
"train.py"
] |
[
"from data import *\nfrom utils.augmentations import SSDAugmentation, BaseTransform\nfrom utils.functions import MovingAverage, SavePath\nfrom utils.logger import Log\nfrom utils import timer\nfrom layers.modules import MultiBoxLoss\nfrom yolact import Yolact\nimport os\nimport sys\nimport time\nimport math, random\nfrom pathlib import Path\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\nimport torch.nn.init as init\nimport torch.utils.data as data\nimport numpy as np\nimport argparse\nimport datetime\n\n# Oof\nimport eval as eval_script\n\ndef str2bool(v):\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\n\n\nparser = argparse.ArgumentParser(\n description='Yolact Training Script')\nparser.add_argument('--batch_size', default=8, type=int,\n help='Batch size for training')\nparser.add_argument('--resume', default=None, type=str,\n help='Checkpoint state_dict file to resume training from. If this is \"interrupt\"'\\\n ', the model will resume training from the interrupt file.')\nparser.add_argument('--start_iter', default=-1, type=int,\n help='Resume training at this iter. If this is -1, the iteration will be'\\\n 'determined from the file name.')\nparser.add_argument('--num_workers', default=4, type=int,\n help='Number of workers used in dataloading')\nparser.add_argument('--cuda', default=True, type=str2bool,\n help='Use CUDA to train model')\nparser.add_argument('--lr', '--learning_rate', default=None, type=float,\n help='Initial learning rate. Leave as None to read this from the config.')\nparser.add_argument('--momentum', default=None, type=float,\n help='Momentum for SGD. Leave as None to read this from the config.')\nparser.add_argument('--decay', '--weight_decay', default=None, type=float,\n help='Weight decay for SGD. Leave as None to read this from the config.')\nparser.add_argument('--gamma', default=None, type=float,\n help='For each lr step, what to multiply the lr by. Leave as None to read this from the config.')\nparser.add_argument('--save_folder', default='weights/',\n help='Directory for saving checkpoint models.')\nparser.add_argument('--log_folder', default='logs/',\n help='Directory for saving logs.')\nparser.add_argument('--config', default=None,\n help='The config object to use.')\nparser.add_argument('--save_interval', default=10000, type=int,\n help='The number of iterations between saving the model.')\nparser.add_argument('--validation_size', default=5000, type=int,\n help='The number of images to use for validation.')\nparser.add_argument('--validation_epoch', default=2, type=int,\n help='Output validation information every n iterations. If -1, do no validation.')\nparser.add_argument('--keep_latest', dest='keep_latest', action='store_true',\n help='Only keep the latest checkpoint instead of each one.')\nparser.add_argument('--keep_latest_interval', default=100000, type=int,\n help='When --keep_latest is on, don\\'t delete the latest file at these intervals. This should be a multiple of save_interval or 0.')\nparser.add_argument('--dataset', default=None, type=str,\n help='If specified, override the dataset specified in the config with this one (example: coco2017_dataset).')\nparser.add_argument('--no_log', dest='log', action='store_false',\n help='Don\\'t log per iteration information into log_folder.')\nparser.add_argument('--log_gpu', dest='log_gpu', action='store_true',\n help='Include GPU information in the logs. Nvidia-smi tends to be slow, so set this with caution.')\nparser.add_argument('--no_interrupt', dest='interrupt', action='store_false',\n help='Don\\'t save an interrupt when KeyboardInterrupt is caught.')\nparser.add_argument('--batch_alloc', default=None, type=str,\n help='If using multiple GPUS, you can set this to be a comma separated list detailing which GPUs should get what local batch size (It should add up to your total batch size).')\nparser.add_argument('--no_autoscale', dest='autoscale', action='store_false',\n help='YOLACT will automatically scale the lr and the number of iterations depending on the batch size. Set this if you want to disable that.')\n\nparser.set_defaults(keep_latest=False, log=True, log_gpu=False, interrupt=True, autoscale=True)\nargs = parser.parse_args()\n\nif args.config is not None:\n set_cfg(args.config)\n\nif args.dataset is not None:\n set_dataset(args.dataset)\n\nif args.autoscale and args.batch_size != 8:\n factor = args.batch_size / 8\n print('Scaling parameters by %.2f to account for a batch size of %d.' % (factor, args.batch_size))\n\n cfg.lr *= factor\n cfg.max_iter //= factor\n cfg.lr_steps = [x // factor for x in cfg.lr_steps]\n\n# Update training parameters from the config if necessary\ndef replace(name):\n if getattr(args, name) == None: setattr(args, name, getattr(cfg, name))\nreplace('lr')\nreplace('decay')\nreplace('gamma')\nreplace('momentum')\n\n# This is managed by set_lr\ncur_lr = args.lr\n\nif torch.cuda.device_count() == 0:\n print('No GPUs detected. Exiting...')\n exit(-1)\n\nif args.batch_size // torch.cuda.device_count() < 6:\n print('Per-GPU batch size is less than the recommended limit for batch norm. Disabling batch norm.')\n cfg.freeze_bn = True\n\nloss_types = ['B', 'C', 'M', 'P', 'D', 'E', 'S', 'I']\n\nif torch.cuda.is_available():\n if args.cuda:\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n if not args.cuda:\n print(\"WARNING: It looks like you have a CUDA device, but aren't \" +\n \"using CUDA.\\nRun with --cuda for optimal training speed.\")\n torch.set_default_tensor_type('torch.FloatTensor')\nelse:\n torch.set_default_tensor_type('torch.FloatTensor')\n\nclass NetLoss(nn.Module):\n \"\"\"\n A wrapper for running the network and computing the loss\n This is so we can more efficiently use DataParallel.\n \"\"\"\n \n def __init__(self, net:Yolact, criterion:MultiBoxLoss):\n super().__init__()\n\n self.net = net\n self.criterion = criterion\n \n def forward(self, images, targets, masks, num_crowds):\n preds = self.net(images)\n losses = self.criterion(self.net, preds, targets, masks, num_crowds)\n return losses\n\nclass CustomDataParallel(nn.DataParallel):\n \"\"\"\n This is a custom version of DataParallel that works better with our training data.\n It should also be faster than the general case.\n \"\"\"\n\n def scatter(self, inputs, kwargs, device_ids):\n # More like scatter and data prep at the same time. The point is we prep the data in such a way\n # that no scatter is necessary, and there's no need to shuffle stuff around different GPUs.\n devices = ['cuda:' + str(x) for x in device_ids]\n splits = prepare_data(inputs[0], devices, allocation=args.batch_alloc)\n\n return [[split[device_idx] for split in splits] for device_idx in range(len(devices))], \\\n [kwargs] * len(devices)\n\n def gather(self, outputs, output_device):\n out = {}\n\n for k in outputs[0]:\n out[k] = torch.stack([output[k].to(output_device) for output in outputs])\n \n return out\n\ndef train():\n if not os.path.exists(args.save_folder):\n os.mkdir(args.save_folder)\n\n dataset = COCODetection(image_path=cfg.dataset.train_images,\n info_file=cfg.dataset.train_info,\n transform=SSDAugmentation(MEANS))\n \n if args.validation_epoch > 0:\n setup_eval()\n val_dataset = COCODetection(image_path=cfg.dataset.valid_images,\n info_file=cfg.dataset.valid_info,\n transform=BaseTransform(MEANS))\n\n # Parallel wraps the underlying module, but when saving and loading we don't want that\n yolact_net = Yolact()\n net = yolact_net\n net.train()\n\n if args.log:\n log = Log(cfg.name, args.log_folder, dict(args._get_kwargs()),\n overwrite=(args.resume is None), log_gpu_stats=args.log_gpu)\n\n # I don't use the timer during training (I use a different timing method).\n # Apparently there's a race condition with multiple GPUs, so disable it just to be safe.\n timer.disable_all()\n\n # Both of these can set args.resume to None, so do them before the check \n if args.resume == 'interrupt':\n args.resume = SavePath.get_interrupt(args.save_folder)\n elif args.resume == 'latest':\n args.resume = SavePath.get_latest(args.save_folder, cfg.name)\n\n if args.resume is not None:\n print('Resuming training, loading {}...'.format(args.resume))\n yolact_net.load_weights(args.resume)\n\n if args.start_iter == -1:\n args.start_iter = SavePath.from_str(args.resume).iteration\n else:\n print('Initializing weights...')\n yolact_net.init_weights(backbone_path=args.save_folder + cfg.backbone.path)\n\n optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum,\n weight_decay=args.decay)\n criterion = MultiBoxLoss(num_classes=cfg.num_classes,\n pos_threshold=cfg.positive_iou_threshold,\n neg_threshold=cfg.negative_iou_threshold,\n negpos_ratio=cfg.ohem_negpos_ratio)\n\n if args.batch_alloc is not None:\n args.batch_alloc = [int(x) for x in args.batch_alloc.split(',')]\n if sum(args.batch_alloc) != args.batch_size:\n print('Error: Batch allocation (%s) does not sum to batch size (%s).' % (args.batch_alloc, args.batch_size))\n exit(-1)\n\n net = CustomDataParallel(NetLoss(net, criterion))\n if args.cuda:\n net = net.cuda()\n \n # Initialize everything\n if not cfg.freeze_bn: yolact_net.freeze_bn() # Freeze bn so we don't kill our means\n yolact_net(torch.zeros(1, 3, cfg.max_size, cfg.max_size).cuda())\n if not cfg.freeze_bn: yolact_net.freeze_bn(True)\n\n # loss counters\n loc_loss = 0\n conf_loss = 0\n iteration = max(args.start_iter, 0)\n last_time = time.time()\n\n epoch_size = len(dataset) // args.batch_size\n num_epochs = math.ceil(cfg.max_iter / epoch_size)\n \n # Which learning rate adjustment step are we on? lr' = lr * gamma ^ step_index\n step_index = 0\n\n data_loader = data.DataLoader(dataset, args.batch_size,\n num_workers=args.num_workers,\n shuffle=True, collate_fn=detection_collate,\n pin_memory=True)\n \n \n save_path = lambda epoch, iteration: SavePath(cfg.name, epoch, iteration).get_path(root=args.save_folder)\n time_avg = MovingAverage()\n\n global loss_types # Forms the print order\n loss_avgs = { k: MovingAverage(100) for k in loss_types }\n\n print('Begin training!')\n print()\n # try-except so you can use ctrl+c to save early and stop training\n try:\n for epoch in range(num_epochs):\n # Resume from start_iter\n if (epoch+1)*epoch_size < iteration:\n continue\n \n for datum in data_loader:\n # Stop if we've reached an epoch if we're resuming from start_iter\n if iteration == (epoch+1)*epoch_size:\n break\n\n # Stop at the configured number of iterations even if mid-epoch\n if iteration == cfg.max_iter:\n break\n\n # Change a config setting if we've reached the specified iteration\n changed = False\n for change in cfg.delayed_settings:\n if iteration >= change[0]:\n changed = True\n cfg.replace(change[1])\n\n # Reset the loss averages because things might have changed\n for avg in loss_avgs:\n avg.reset()\n \n # If a config setting was changed, remove it from the list so we don't keep checking\n if changed:\n cfg.delayed_settings = [x for x in cfg.delayed_settings if x[0] > iteration]\n\n # Warm up by linearly interpolating the learning rate from some smaller value\n if cfg.lr_warmup_until > 0 and iteration <= cfg.lr_warmup_until:\n set_lr(optimizer, (args.lr - cfg.lr_warmup_init) * (iteration / cfg.lr_warmup_until) + cfg.lr_warmup_init)\n\n # Adjust the learning rate at the given iterations, but also if we resume from past that iteration\n while step_index < len(cfg.lr_steps) and iteration >= cfg.lr_steps[step_index]:\n step_index += 1\n set_lr(optimizer, args.lr * (args.gamma ** step_index))\n \n # Zero the grad to get ready to compute gradients\n optimizer.zero_grad()\n\n # Forward Pass + Compute loss at the same time (see CustomDataParallel and NetLoss)\n losses = net(datum)\n \n losses = { k: (v).mean() for k,v in losses.items() } # Mean here because Dataparallel\n loss = sum([losses[k] for k in losses])\n \n # no_inf_mean removes some components from the loss, so make sure to backward through all of it\n # all_loss = sum([v.mean() for v in losses.values()])\n\n # Backprop\n loss.backward() # Do this to free up vram even if loss is not finite\n if torch.isfinite(loss).item():\n optimizer.step()\n \n # Add the loss to the moving average for bookkeeping\n for k in losses:\n loss_avgs[k].add(losses[k].item())\n\n cur_time = time.time()\n elapsed = cur_time - last_time\n last_time = cur_time\n\n # Exclude graph setup from the timing information\n if iteration != args.start_iter:\n time_avg.add(elapsed)\n\n if iteration % 10 == 0:\n eta_str = str(datetime.timedelta(seconds=(cfg.max_iter-iteration) * time_avg.get_avg())).split('.')[0]\n \n total = sum([loss_avgs[k].get_avg() for k in losses])\n loss_labels = sum([[k, loss_avgs[k].get_avg()] for k in loss_types if k in losses], [])\n \n print(('[%3d] %7d ||' + (' %s: %.3f |' * len(losses)) + ' T: %.3f || ETA: %s || timer: %.3f')\n % tuple([epoch, iteration] + loss_labels + [total, eta_str, elapsed]), flush=True)\n\n if args.log:\n precision = 5\n loss_info = {k: round(losses[k].item(), precision) for k in losses}\n loss_info['T'] = round(losses[k].item(), precision)\n\n if args.log_gpu:\n log.log_gpu_stats = (iteration % 10 == 0) # nvidia-smi is sloooow\n \n log.log('train', loss=loss_info, epoch=epoch, iter=iteration,\n lr=round(cur_lr, 10), elapsed=elapsed)\n\n log.log_gpu_stats = args.log_gpu\n \n iteration += 1\n\n if iteration % args.save_interval == 0 and iteration != args.start_iter:\n if args.keep_latest:\n latest = SavePath.get_latest(args.save_folder, cfg.name)\n\n print('Saving state, iter:', iteration)\n yolact_net.save_weights(save_path(epoch, iteration))\n\n if args.keep_latest and latest is not None:\n if args.keep_latest_interval <= 0 or iteration % args.keep_latest_interval != args.save_interval:\n print('Deleting old save...')\n os.remove(latest)\n \n # This is done per epoch\n if args.validation_epoch > 0:\n if epoch % args.validation_epoch == 0 and epoch > 0:\n compute_validation_map(epoch, iteration, yolact_net, val_dataset, log if args.log else None)\n \n # Compute validation mAP after training is finished\n compute_validation_map(epoch, iteration, yolact_net, val_dataset, log if args.log else None)\n except KeyboardInterrupt:\n if args.interrupt:\n print('Stopping early. Saving network...')\n \n # Delete previous copy of the interrupted network so we don't spam the weights folder\n SavePath.remove_interrupt(args.save_folder)\n \n yolact_net.save_weights(save_path(epoch, repr(iteration) + '_interrupt'))\n exit()\n\n yolact_net.save_weights(save_path(epoch, iteration))\n\n\ndef set_lr(optimizer, new_lr):\n for param_group in optimizer.param_groups:\n param_group['lr'] = new_lr\n \n global cur_lr\n cur_lr = new_lr\n\ndef gradinator(x):\n x.requires_grad = False\n return x\n\ndef prepare_data(datum, devices:list=None, allocation:list=None):\n with torch.no_grad():\n if devices is None:\n devices = ['cuda:0'] if args.cuda else ['cpu']\n if allocation is None:\n allocation = [args.batch_size // len(devices)] * (len(devices) - 1)\n allocation.append(args.batch_size - sum(allocation)) # The rest might need more/less\n \n images, (targets, masks, num_crowds) = datum\n\n cur_idx = 0\n for device, alloc in zip(devices, allocation):\n for _ in range(alloc):\n images[cur_idx] = gradinator(images[cur_idx].to(device))\n targets[cur_idx] = gradinator(targets[cur_idx].to(device))\n masks[cur_idx] = gradinator(masks[cur_idx].to(device))\n cur_idx += 1\n\n if cfg.preserve_aspect_ratio:\n # Choose a random size from the batch\n _, h, w = images[random.randint(0, len(images)-1)].size()\n\n for idx, (image, target, mask, num_crowd) in enumerate(zip(images, targets, masks, num_crowds)):\n images[idx], targets[idx], masks[idx], num_crowds[idx] \\\n = enforce_size(image, target, mask, num_crowd, w, h)\n \n cur_idx = 0\n split_images, split_targets, split_masks, split_numcrowds \\\n = [[None for alloc in allocation] for _ in range(4)]\n\n for device_idx, alloc in enumerate(allocation):\n split_images[device_idx] = torch.stack(images[cur_idx:cur_idx+alloc], dim=0)\n split_targets[device_idx] = targets[cur_idx:cur_idx+alloc]\n split_masks[device_idx] = masks[cur_idx:cur_idx+alloc]\n split_numcrowds[device_idx] = num_crowds[cur_idx:cur_idx+alloc]\n\n cur_idx += alloc\n\n return split_images, split_targets, split_masks, split_numcrowds\n\ndef no_inf_mean(x:torch.Tensor):\n \"\"\"\n Computes the mean of a vector, throwing out all inf values.\n If there are no non-inf values, this will return inf (i.e., just the normal mean).\n \"\"\"\n\n no_inf = [a for a in x if torch.isfinite(a)]\n\n if len(no_inf) > 0:\n return sum(no_inf) / len(no_inf)\n else:\n return x.mean()\n\ndef compute_validation_loss(net, data_loader, criterion):\n global loss_types\n\n with torch.no_grad():\n losses = {}\n \n # Don't switch to eval mode because we want to get losses\n iterations = 0\n for datum in data_loader:\n images, targets, masks, num_crowds = prepare_data(datum)\n out = net(images)\n\n wrapper = ScatterWrapper(targets, masks, num_crowds)\n _losses = criterion(out, wrapper, wrapper.make_mask())\n \n for k, v in _losses.items():\n v = v.mean().item()\n if k in losses:\n losses[k] += v\n else:\n losses[k] = v\n\n iterations += 1\n if args.validation_size <= iterations * args.batch_size:\n break\n \n for k in losses:\n losses[k] /= iterations\n \n \n loss_labels = sum([[k, losses[k]] for k in loss_types if k in losses], [])\n print(('Validation ||' + (' %s: %.3f |' * len(losses)) + ')') % tuple(loss_labels), flush=True)\n\ndef compute_validation_map(epoch, iteration, yolact_net, dataset, log:Log=None):\n with torch.no_grad():\n yolact_net.eval()\n \n start = time.time()\n print()\n print(\"Computing validation mAP (this may take a while)...\", flush=True)\n val_info = eval_script.evaluate(yolact_net, dataset, train_mode=True)\n end = time.time()\n\n if log is not None:\n log.log('val', val_info, elapsed=(end - start), epoch=epoch, iter=iteration)\n\n yolact_net.train()\n\ndef setup_eval():\n eval_script.parse_args(['--no_bar', '--max_images='+str(args.validation_size)])\n\nif __name__ == '__main__':\n train()\n"
] |
[
[
"torch.set_default_tensor_type",
"torch.zeros",
"torch.utils.data.DataLoader",
"torch.isfinite",
"torch.no_grad",
"torch.cuda.is_available",
"torch.stack",
"torch.cuda.device_count"
]
] |
HenrikJantti/xFELTOR
|
[
"321fe73ce28fa590baedabc0aa13c5ba50a32dff"
] |
[
"xfeltor/load.py"
] |
[
"import xarray as xr\nimport numpy as np\nfrom typing import Union\nimport json\n\n\ndef open_feltordataset(\n datapath: str = \"./*.nc\",\n chunks: Union[int, dict] = None,\n restart_indices: bool = False,\n probes: bool = False,\n **kwargs: dict,\n) -> xr.Dataset:\n \"\"\"Loads FELTOR output into one xarray Dataset. Can load either a single\n output file or multiple coherent files for restarted simulations.\n\n Parameters\n ----------\n datapath : str or (list or tuple of xr.Dataset), optional\n Path to the data to open. Can point to either a set of one or more *nc\n files.\n\n chunks : dict, optional\n Dictionary with keys given by dimension names and values given by chunk sizes.\n By default, chunks will be chosen to load entire input files into memory at once.\n This has a major impact on performance: please see the full documentation for more details:\n http://xarray.pydata.org/en/stable/user-guide/dask.html#chunking-and-performance\n restart_indices: bool, optional\n if True, duplicate time steps from restared runs are kept\n Keyword arguments are passed down to `xarray.open_mfdataset`, which in\n turn passes extra kwargs down to `xarray.open_dataset`.\n probes: bool, optional\n if True, indicates that the dataset contains probes and associates values of the\n x and y possition for each probe with the coresponding probe_id.\n Also changes the combine option to \"by_coords\".\n \"\"\"\n if chunks is None:\n chunks = {}\n\n combine_opt = \"by_coords\" if probes else \"nested\"\n\n ds = xr.open_mfdataset(\n datapath,\n chunks=chunks,\n combine=combine_opt,\n concat_dim=\"time\",\n decode_times=False,\n join=\"outer\",\n **kwargs,\n )\n\n if restart_indices:\n return ds\n\n _, index = np.unique(ds[\"time\"], return_index=True)\n\n # store inputfile data in ds.attrs\n input_variables = json.loads(ds.attrs[\"inputfile\"])\n\n for i in input_variables:\n ds.attrs[i] = input_variables[i]\n\n if probes:\n x = np.unique(ds.px.values)\n y = np.unique(ds.py.values)\n ds = ds.assign_coords(\n dict(\n probe_x=x,\n probe_y=y,\n )\n )\n reshaped_prb = np.reshape(\n ds.electrons_prb.values, (y.size, x.size, ds.probe_time.values.size)\n )\n ds = ds.assign(\n electrons_prb=([\"probe_y\", \"probe_x\", \"probe_time\"], reshaped_prb)\n )\n reshaped_prb = np.reshape(\n ds.ions_prb.values, (y.size, x.size, ds.probe_time.values.size)\n )\n ds = ds.assign(ions_prb=([\"probe_y\", \"probe_x\", \"probe_time\"], reshaped_prb))\n reshaped_prb = np.reshape(\n ds.potential_prb.values, (y.size, x.size, ds.probe_time.values.size)\n )\n ds = ds.assign(\n potential_prb=([\"probe_y\", \"probe_x\", \"probe_time\"], reshaped_prb)\n )\n reshaped_prb = np.reshape(\n ds.vorticity_prb.values, (y.size, x.size, ds.probe_time.values.size)\n )\n ds = ds.assign(\n vorticity_prb=([\"probe_y\", \"probe_x\", \"probe_time\"], reshaped_prb)\n )\n ds = ds.drop_dims((\"probes\"))\n\n return ds.isel(time=index)\n"
] |
[
[
"numpy.reshape",
"numpy.unique"
]
] |
alexchungio/under-water-detect
|
[
"312672ccbe5e31ca21dffab26e1438ea190f3e5a"
] |
[
"docs/check_and_draw_box.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#------------------------------------------------------\n# @ File : check_and_draw_box.py\n# @ Description: \n# @ Author : Alex Chung\n# @ Contact : [email protected]\n# @ License : Copyright (c) 2017-2018\n# @ Time : 2021/1/26 下午2:43\n# @ Software : PyCharm\n#-------------------------------------------------------\nimport os\nimport json\nimport os.path as osp\nimport numpy as np\nfrom PIL import Image, ImageFont, ImageDraw\n\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nfrom tqdm import tqdm\n\n\ndef draw_box_with_pil(image, bbox, label, color_dict):\n \"\"\"\n\n :param image:\n :param bbox:\n :param label:\n :param color_dict:\n :return:\n \"\"\"\n\n img_w = image.size[0]\n img_h = image.size[1]\n\n bbox = np.array(bbox, dtype=np.int32).reshape(-1, 4)\n # print('image shape ({},{})'.format(img_w, img_h))\n # set font\n font = ImageFont.truetype(font=fm.findfont(fm.FontProperties()),\n size=np.floor(1.5e-2 * img_w ).astype(np.int32), encoding=\"unic\")\n\n # draw box\n draw = ImageDraw.Draw(image)\n for box, tag in zip(bbox, label):\n # get label size\n label_size = draw.textsize(tag, font)\n # get label start point\n text_origin = np.array([box[0], box[1] - label_size[1]])\n # draw bbox rectangle and label rectangle\n draw.rectangle([box[0], box[1], box[2], box[3]], outline=color_dict[tag], width=2)\n draw.rectangle([tuple(text_origin), tuple(text_origin + label_size)], fill=color_dict[tag])\n draw.text(text_origin, str(tag), fill=(255, 255, 255), font=font)\n\n return image\n\n\ndef check_bbox_boundary(images_info, annotations_info, img_dir, box_img_dir, label_tag, color_dict):\n \"\"\"\n\n :return:\n \"\"\"\n\n for img in tqdm(images_info):\n img_name = img['file_name']\n img_id = img['id']\n img_w, img_h = img['width'], img['height']\n # get image bbox\n bboxs = []\n labels = []\n for anns in annotations_info:\n if anns['image_id'] == img['id']:\n x1, y1, w, h = anns['bbox']\n\n w, h = w -1, h - 1\n if anns['area'] < 0 or w < 0 or h < 0:\n print(anns['area'], w, h)\n continue\n # x1, y1, x2, y2 = x1, y1, x1 + w, y1 + h\n # restrict bbox to image area\n x1 = max(x1, 0)\n y1 = max(y1, 0)\n x2 = min(x1 + w, img_w)\n y2 = min(y1 + h, img_h)\n bboxs.append([x1, y1, x2, y2])\n labels.append(anns['category_id'])\n\n bboxs = np.array(bboxs, dtype=np.int32).reshape(-1, 4)\n # assert (bboxs[:, 2] >= 1).all(), \"Warning, {} bbox tag error in width aspect {}\".format(img_name, bboxs)\n # assert (bboxs[:, 3] >= 1).all(), \"Warning, {} bbox tag error in height aspect {}\".format(img_name, bboxs)\n\n # bboxs[:, 2:] = bboxs[:,:2] + bboxs[:, 2:]\n assert (bboxs[:, 0] >= 0).all() and (bboxs[:, 2] <= img_w).all(), \\\n \"Warning, {} bbox size out of range in width aspect {} {}\".format(img_name, bboxs, img_w)\n assert (bboxs[:, 1] >= 0).all() and ( bboxs[:, 3] <= img_h).all(), \\\n \"Warning, {} bbox size out of range in height aspect {} {}\".format(img_name, bboxs, img_h)\n\n # draw box on image\n label = [label_tag[label] for label in labels]\n\n image = Image.open(osp.join(img_dir, img_name))\n box_img = draw_box_with_pil(image, bboxs, label, color_dict)\n\n box_img.save(osp.join(box_img_dir, img_name))\n\n\ndef main():\n\n json_path = '/media/alex/80CA308ECA308288/alex_dataset/URPC-2020/train/annotation/voc_all.json'\n img_dir = '/media/alex/80CA308ECA308288/alex_dataset/URPC-2020/train/image'\n box_img_dir = '/media/alex/80CA308ECA308288/alex_dataset/URPC-2020/train/box_image'\n\n # load annotation\n with open(json_path) as f:\n all_data = json.load(f)\n images_info = all_data['images']\n annotations_info = []\n for ann in all_data['annotations']:\n ann.pop('id') # remove annotation id\n ann.pop('iscrowd')\n annotations_info.append(ann)\n category_dict = {x['name']: x['id'] for x in all_data['categories']}\n\n label_tag = {id:name for name, id in category_dict.items()}\n color_dict = {'echinus': 'red', 'starfish': 'green', 'holothurian': 'blue', 'scallop': 'purple'}\n os.makedirs(box_img_dir, exist_ok=True)\n check_bbox_boundary(images_info, annotations_info, img_dir=img_dir, box_img_dir=box_img_dir, label_tag=label_tag,\n color_dict=color_dict)\n print('Done')\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"numpy.array",
"matplotlib.font_manager.FontProperties",
"numpy.floor"
]
] |
UKPLab/tacl2018-preference-convincing
|
[
"65eb1cd3bf76f8068889880e0f80178e790350ce"
] |
[
"python/analysis/habernal_comparison/analyse_features.py"
] |
[
"'''\nCreated on 1 Jun 2017\n\nLoad a set of feature lengthscales from a good run with 'both' types of features. \nSort them by lengthscale.\nPlot the distribution.\n\nIdentify which type of feature they are: add colours or markers to the plot.\n\nProvide a zoomed-in variant for the best 25 features.\n\n@author: simpson\n'''\n\nimport os, pickle\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom tests import load_embeddings, get_fold_data, TestRunner\nfrom data_loader import load_train_test_data, load_ling_features\nfrom matplotlib.ticker import MaxNLocator\n\nif __name__ == '__main__':\n\n # expt_folder_name = 'crowdsourcing_argumentation_opt/'\n expt_folder_name = 'crowdsourcing_argumentation_expts/'\n\n dataset = 'UKPConvArgStrict' # 'UKPConvArgAll_evalMACE'\n methods = ['SinglePrefGP_weaksprior'] # ['SinglePrefGP_weaksprior_1104']\n\n feature_type = 'both'\n embeddings_type = 'word_mean'\n di = 0.00\n\n selected_folds_all = [[0, 1, 6, 12, 13]]\n\n original_fold_order_file = './results/feature_analysis/foldorder_old.txt'\n o_fold_order = np.genfromtxt(os.path.expanduser(original_fold_order_file), dtype=str)\n\n mean_ls = None\n\n for m, method in enumerate(methods):\n\n data_root_dir = os.path.expanduser(\"~/data/personalised_argumentation/\")\n resultsfile_template = 'habernal_%s_%s_%s_%s_acc%.2f_di%.2f'\n\n resultsfile = data_root_dir + 'outputdata/' + expt_folder_name + \\\n resultsfile_template % (dataset, method,\n feature_type, embeddings_type, 1.0, di) + '_test.pkl'\n\n resultsdir = data_root_dir + 'outputdata/' + expt_folder_name + \\\n resultsfile_template % (dataset, method,\n feature_type, embeddings_type, 1.0, di)\n\n foldorderfile = None\n if foldorderfile is not None:\n fold_order = np.genfromtxt(os.path.expanduser(foldorderfile),\n dtype=str)\n elif os.path.isfile(resultsdir + '/foldorder.txt'):\n fold_order = np.genfromtxt(os.path.expanduser(resultsdir + '/foldorder.txt'),\n dtype=str)\n else:\n fold_order = None\n\n selected_folds = selected_folds_all[m]\n nFolds = len(selected_folds)\n\n if os.path.isfile(resultsfile):\n\n with open(resultsfile, 'r') as fh:\n data = pickle.load(fh)\n\n if nFolds < 1:\n nFolds = len(data[0])\n else:\n data = None\n\n min_folds = 0\n\n # Sort the features by their ID.\n # If we have discarded some features that were all 0s, the current index will not be the original feature idx.\n # How to map them back? Reload the original data and find out which features were discarded.\n\n folds, _, folds_regression, word_index_to_embeddings_map, word_to_indices_map, index_to_word_map = load_train_test_data(dataset)\n word_embeddings = load_embeddings(word_index_to_embeddings_map)\n ling_feat_spmatrix, docids = load_ling_features(dataset)\n\n #default_ls_value = compute_lengthscale_heuristic(feature_type, embeddings_type, word_embeddings,\n # ling_feat_spmatrix, docids, folds, index_to_word_map)\n\n\n for o_foldidx, o_fold in enumerate(o_fold_order):\n\n if o_foldidx not in selected_folds:\n continue\n\n if fold_order is None: # fall back to the order on the current machine\n foldidx = np.argwhere(np.array(list(folds.keys())) == o_fold)[0][0]\n fold = list(folds.keys())[foldidx]\n else:\n foldidx = np.argwhere(fold_order == o_fold)[0][0]\n fold = fold_order[foldidx]\n if fold[-2] == \"'\" and fold[0] == \"'\":\n fold = fold[1:-2]\n elif fold[-1] == \"'\" and fold[0] == \"'\":\n fold = fold[1:-1]\n fold_order[foldidx] = fold\n\n # look for new-style data in separate files for each fold. Prefer new-style if both are found.\n foldfile = resultsdir + '/fold%i.pkl' % foldidx\n if os.path.isfile(foldfile):\n with open(foldfile, 'rb') as fh:\n data_f = pickle.load(fh, encoding='latin1')\n else: # convert the old stuff to new stuff\n if data is None:\n min_folds = foldidx+1\n print('Skipping fold with no data %i' % foldidx)\n print(\"Skipping results for %s, %s, %s, %s\" % (method,\n dataset,\n feature_type,\n embeddings_type))\n print(\"Skipped filename was: %s, old-style results file would be %s\" % (foldfile,\n resultsfile))\n continue\n\n if not os.path.isdir(resultsdir):\n os.mkdir(resultsdir)\n data_f = []\n for thing in data:\n if foldidx in thing:\n data_f.append(thing[foldidx])\n else:\n data_f.append(thing)\n with open(foldfile, 'wb') as fh:\n pickle.dump(data_f, fh)\n\n trainids_a1, trainids_a2, prefs_train, personIDs_train, testids_a1, testids_a2, prefs_test, personIDs_test, \\\n X, uids, utexts, _ = get_fold_data(folds, fold, docids)\n\n # get the embedding values for the test data -- need to find embeddings of the whole piece of text\n runner = TestRunner('crowdsourcing_argumentation_expts_first_submission', [dataset], [feature_type],\n [embeddings_type], [method], 0)\n runner.embeddings = word_embeddings\n runner.X = X\n runner.ling_feat_spmatrix = ling_feat_spmatrix\n runner.load_features(feature_type, embeddings_type, trainids_a1, trainids_a2, uids)\n items_feat = runner.items_feat\n valid_feats = runner.valid_feats\n\n min_vals = np.min(items_feat, axis=0)\n max_vals = np.max(items_feat, axis=0)\n\n nfeats = len(valid_feats)\n # take the mean ls for each feature across the folds\n if mean_ls is None:\n mean_ls = np.zeros(nfeats, dtype=float)\n totals = np.zeros(nfeats, dtype=int)\n\n #print \"Warning: not computing means.\"\n learned_ls = data_f[7]\n initial_ls = data_f[5] #/ float(len(valid_feats)) # we want the data relative to the median -- the initial LS were also scaled by no. features\n mean_ls[valid_feats] += learned_ls / initial_ls # normalisation in original drafts\n norm_ls = learned_ls / (max_vals - min_vals)\n #mean_ls[valid_feats] += norm_ls\n\n print(\"Max normed l: %f\" % np.max(norm_ls))\n totals[valid_feats] += 1\n \n #mean_ls = mean_ls[valid_feats]\n #totals = totals[valid_feats]\n mean_ls[totals != 0] = mean_ls[totals != 0] / totals[totals != 0]\n \n if feature_type == 'debug':\n feat_cats = np.array(['one', 'two', 'three'])\n featnames = feat_cats\n col = np.array(['r', 'lightgreen', 'b'])\n marks = np.array(['2', 'p', '^'])\n nembeddings = 3\n else:\n # assign category labels to each feature\n feat_cats = np.empty(nfeats, dtype=object)\n nembeddings = word_embeddings.shape[1]\n feat_cats[:nembeddings] = \"embeddings\"\n \n catnames = np.array(['embeddings', '_pos_ngram', 'ProductionRule', 'Rate', 'CONTEXTUALITY_MEASURE_FN',\n 'ExclamationRatio', 'upperCaseRatio', 'Ratio', 'DependencyTreeDepth', 'Modal',\n 'sentiment', 'oovWordsCount', 'spell_skill', '_length', 'word_more', 'Ending', 'ner.type.', '_'])\n special_catnames = np.array(['flesch', 'coleman', 'ari'])\n\n marks = np.array(['2', 'p', '^', 'H', 'x', ',', 'D', '<', '>', 'v', ',', '8', '1', 'o', '*'])\n col = np.array(['r', 'lightgreen', 'b', 'y', 'purple', 'black', 'darkgoldenrod', 'magenta', 'darkgreen', 'darkblue',\n 'brown', 'darkgray', 'orange', 'dodgerblue', 'lightgray', 'cyan', ])\n \n with open(data_root_dir + \"/tempdata/feature_names_all3.txt\", 'r') as fh:\n lines = fh.readlines()\n \n featnames = lines[0].strip()\n featidxs = lines[1].strip()\n \n if featnames[-1] == ']':\n featnames = featnames[:-1]\n if featnames[0] == '[':\n featnames = featnames[1:]\n \n featidxs = np.fromstring(featidxs, dtype=int, sep=',') + nembeddings\n featnames = np.array(featnames.split(', '), dtype=str)\n \n for f, fname in enumerate(featnames):\n featnames[f] = featnames[f][2:] # skip the a1 bit at the start\n\n for catname in special_catnames:\n if catname == fname:\n print(\"%i, Recognised %s as special cat %s\" % (f, fname, catname))\n feat_cats[nembeddings + f] = catname\n\n for catname in catnames:\n if catname in fname:\n print(\"%i, Recognised %s as type %s\" % (f, fname, catname))\n feat_cats[nembeddings + f] = catname\n break\n if not feat_cats[nembeddings + f]:\n print(\"%i, Unrecognised language feature: %s\" % (f, fname))\n feat_cats[nembeddings + f] = 'ngram'\n\n\n for catname in catnames:\n print(\"No. features in category %s = %i\" % (catname, np.sum(feat_cats == catname)))\n\n feat_cats[feat_cats=='_'] = 'ngram'\n\n # readability\n feat_cats[feat_cats=='ari'] = 'vocab/surface'\n feat_cats[feat_cats=='coleman'] = 'vocab/surface'\n feat_cats[feat_cats=='flesch'] = 'vocab/surface'\n\n feat_cats[feat_cats=='Rate'] = 'other'\n feat_cats[feat_cats=='Ratio'] = 'other'\n feat_cats[feat_cats=='Modal'] = 'other'\n feat_cats[feat_cats=='CONTEXTUALITY_MEASURE_FN'] = 'other'\n feat_cats[feat_cats == 'Ending'] = 'other'\n\n feat_cats[feat_cats=='_pos_ngram'] = 'POS'\n\n feat_cats[feat_cats=='_length'] = 'other'\n feat_cats[feat_cats=='word_more'] = 'other'\n feat_cats[feat_cats=='upperCaseRatio'] = 'other'\n feat_cats[feat_cats=='oovWordsCount'] = 'other'\n feat_cats[feat_cats=='spell_skill'] = 'other'\n feat_cats[feat_cats=='ExclamationRatio'] = 'other'\n\n feat_cats[feat_cats=='DependencyTreeDepth'] = 'other'\n feat_cats[feat_cats=='ProductionRule'] = 'prod. rule'\n\n feat_cats[feat_cats=='ner.type.'] = 'other'\n\n feat_cats[feat_cats=='sentiment'] = 'other'\n\n # for f in range(len(feat_cats)):\n # feat_cats[f] = feat_cats[f].lower()\n\n print(\"After combining some categories.............................\")\n\n for catname in np.unique(feat_cats):\n print(\"No. features in category %s = %i\" % (catname, np.sum(feat_cats == catname)))\n\n # sort by length scale\n sorted_idxs = np.argsort(mean_ls)\n sorted_vals = mean_ls[sorted_idxs]\n \n # ignore those that were not valid\n sorted_vals = sorted_vals[totals[sorted_idxs]>0]\n sorted_idxs = sorted_idxs[totals[sorted_idxs]>0]\n\n sorted_cats = feat_cats[sorted_idxs]\n sorted_cats = sorted_cats[totals[sorted_idxs]>0]\n \n embeddingnames = np.empty(nembeddings, dtype=object)\n for e in range(nembeddings):\n embeddingnames[e] = 'Emb_dimension_%i' % e\n \n featnames = np.concatenate((embeddingnames, featnames))\n sorted_featnames = featnames[sorted_idxs]\n sorted_featnames = sorted_featnames[totals[sorted_idxs]>0]\n \n '''\n An alternative to plotting the distributions would be to list the top ten most important and least important features.\n '''\n figure_path = os.path.expanduser('./documents/pref_learning_for_convincingness/figures/features2/')\n \n np.savetxt(figure_path + '/feature_table.tex', np.concatenate((sorted_featnames[:, None], sorted_vals[:, None]), \n axis=1), fmt='%s & %.5f \\\\nonumber\\\\\\\\')\n\n cat_arr = []\n labels = []\n for c, cat in enumerate(np.unique(feat_cats)):\n clengthscales = sorted_vals[sorted_cats == cat]\n cat_arr.append(clengthscales)\n labels.append(cat)\n\n # # Try a histogram instead? For each length-scale band, how many features of each type are there?\n # plt.figure()\n #\n # plt.hist(cat_arr, label=labels, color=col[:len(labels)], histtype='bar',\n # bins=np.logspace(np.log10(1), np.log10(100000), 18), density=True) # density=True causes the values to be normalised\n # plt.xlabel('length-scale')\n # plt.ylabel('log_10 no. features')\n # plt.legend(loc='best')\n # plt.gca().set_xscale('log')\n #\n # plt.savefig(figure_path + 'hist.pdf')\n \n # produce content for a latex table\n\n matplotlib.rcParams.update({'font.size': 16})\n plt.figure(figsize=(10,3))\n\n meds = []\n low = []\n high = []\n mins = []\n maxs = []\n vals = []\n for c, cat in enumerate(np.unique(feat_cats)):\n clengthscales = sorted_vals[sorted_cats == cat]\n #print '%s & %s & %s' & (cat, np.median(clengthscales), np.percentile(clengthscales, 25), np.percentile(clengthscales, 75))\n #meds.append(np.median(clengthscales))\n #low.append(np.percentile(clengthscales, 25))\n #high.append(np.percentile(clengthscales, 75))\n #mins.append(np.min(clengthscales))\n #maxs.append(np.max(clengthscales))\n vals.append(clengthscales)\n\n\n ax = plt.subplot(1, len(np.unique(feat_cats)), c+1)\n\n #plt.xlim(0, 20)\n plt.hist(clengthscales, label=labels[c], color='blue', histtype='bar',\n #bins=np.logspace(np.log10(100), np.log10(100000), 24), density=False, orientation='horizontal')\n #bins = np.logspace(np.log10(5500), np.log10(34000), 24), density = False, orientation = 'horizontal')\n bins=np.arange(30) * 0.02 + 0.52, density=False, orientation='horizontal')\n\n # ax.set_yscale('log')\n #\n if c == 0:\n plt.ylabel('length-scale')# x10^3')\n #ax.get_yaxis().set_ticks([6e3, 1e4, 2e4, 3e4])\n #ax.get_yaxis().set_ticklabels(['6', '10', '20', '30'])\n else:\n ax.get_yaxis().set_ticks([])\n ax.get_yaxis().set_ticklabels([])\n\n #ax.get_xaxis().set_ticks([]) # write the x axis limits in the caption!!!\n plt.title(cat)\n\n #plt.gca().yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)\n\n # for i, v in enumerate(vals):\n # vals[i] = np.log10(v)\n\n #bp = plt.boxplot(vals, labels=labels, notch=0, whiskerprops={'linestyle':'solid'},\n # patch_artist=True)\n #plt.setp(bp['boxes'], color='black')\n #plt.setp(bp['whiskers'], color='black')\n #for patch in bp['boxes']:\n # patch.set_facecolor('tan')\n\n # yrange = np.arange(-2, 3)\n # plt.gca().set_yticks(yrange)\n # plt.gca().set_yticklabels(10.0**yrange)\n\n # plt.gca().set_axisbelow(True)\n\n #plt.ylim(0,3)\n\n plt.savefig(figure_path + 'boxplot.pdf')\n\n ############\n \n # plt.figure()\n #\n # rowsize = 5\n #\n # for c, cat in enumerate(np.unique(feat_cats)):\n # clengthscales = sorted_vals[sorted_cats == cat]\n # #plt.scatter(clengthscales, np.zeros(len(clengthscales)) + (1+c)*1000, marker=marks[c], color=col[c])\n # ax = plt.subplot(len(labels)/rowsize + 1, rowsize, c+1)\n # plt.plot(clengthscales, color=col[c], label=cat, marker=marks[c], linewidth=0)\n # plt.title(cat)\n # plt.ylim(np.min(sorted_vals), np.max(sorted_vals))\n #\n # frame1 = plt.gca()\n # if np.mod(c, rowsize):\n # frame1.axes.get_yaxis().set_ticks([])\n # else:\n # plt.ylabel('length-scale')\n # ax.xaxis.set_major_locator(MaxNLocator(nbins=2))\n #\n # plt.xlabel('features')\n # plt.show()\n \n output = np.concatenate((sorted_cats[:, None], featnames[sorted_idxs][:, None], sorted_vals[:, None]), axis=1)\n np.savetxt(\"./results/feature_analysis/features.tsv\", output, fmt='%s\\t%s\\t%s\\t', delimiter='\\t', header='category, feature_name, length-scale')\n\n # repeat this but make a separate sorted file by category\n for catname in np.unique(sorted_cats):\n catidxs = sorted_cats == catname\n output = np.concatenate((sorted_cats[catidxs, None], featnames[sorted_idxs][catidxs, None],\n sorted_vals[catidxs, None]), axis=1)\n np.savetxt(\"./results/feature_analysis/features_%s.tsv\" % catname, output, fmt='%s\\t%s\\t%s\\t', delimiter='\\t',\n header='category, feature_name, length-scale')\n\n\n print('all done.')"
] |
[
[
"matplotlib.pyplot.title",
"numpy.unique",
"numpy.min",
"numpy.arange",
"matplotlib.pyplot.savefig",
"numpy.argwhere",
"numpy.concatenate",
"numpy.max",
"matplotlib.pyplot.ylabel",
"numpy.fromstring",
"matplotlib.rcParams.update",
"numpy.savetxt",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.empty",
"matplotlib.pyplot.figure"
]
] |
bibekuchiha/dataquest
|
[
"c7d8a2966fe2eee864442a59d64309033ea9993e"
] |
[
"5. Probability and Statistics/statistics-intermediate/The Mode-307.py"
] |
[
"## 1. Introduction ##\n\nimport pandas as pd\n\nhouses = pd.read_table('AmesHousing_1.txt')\nprint(houses[['Land Slope','Roof Style','Kitchen AbvGr']])\n\nscale_land = 'ordinal'\n\nscale_roof = 'nominal'\n\nkitchen_variable = 'discrete'\n\n\n## 2. The Mode for Ordinal Variables ##\n\ndef mode(array):\n counts = {}\n \n for value in array:\n if value in counts:\n counts[value] += 1\n else:\n counts[value] = 1\n \n return max(counts, key = counts.get)\n\nmode_function = mode(houses['Land Slope'])\nmode_method = houses['Land Slope'].mode()\nsame = (mode_function == mode_method)\n\n## 3. The Mode for Nominal Variables ##\n\n# The function we wrote (you can copy-paste yours from the previous screen)\ndef mode(array):\n counts = {}\n \n for value in array:\n if value in counts:\n counts[value] += 1\n else:\n counts[value] = 1\n \n return max(counts, key = counts.get)\ndef mode(array):\n counts = {}\n \n for value in array:\n if value in counts:\n counts[value] += 1\n else:\n counts[value] = 1\n \n return (max(counts, key = counts.get),\n counts\n )\n\nmode, value_counts = mode(houses['Roof Style'])\n\n## 4. The Mode for Discrete Variables ##\n\nbedroom_variable = 'discrete'\nbedroom_mode = houses['Bedroom AbvGr'].mode()\n\nprice_variable = 'continuous'\n\n## 5. Special Cases ##\n\nintervals = pd.interval_range(start = 0, end = 800000, freq = 100000)\ngr_freq_table = pd.Series([0,0,0,0,0,0,0,0], index = intervals)\n\nfor value in houses['SalePrice']:\n for interval in intervals:\n if value in interval:\n gr_freq_table.loc[interval] += 1\n break\n\nprint(gr_freq_table)\nmode = 150000\nmean = houses['SalePrice'].mean()\nmedian = houses['SalePrice'].median()\n\nsentence_1 = True\nsentence_2 = True\n\n## 6. Skewed Distributions ##\n\ndistribution_1 = {'mean': 3021 , 'median': 3001, 'mode': 2947}\ndistribution_2 = {'median': 924 , 'mode': 832, 'mean': 962}\ndistribution_3 = {'mode': 202, 'mean': 143, 'median': 199}\nshape_1 = 'right skew'\nshape_2 = 'right skew'\nshape_3 = 'left skew'\n\n## 7. Symmetrical Distributions ##\n\nhouses['Mo Sold'].plot.kde(xlim = [1,12])\n\nimport matplotlib.pyplot as plt\nplt.axvline(houses['Mo Sold'].mode()[0], color = 'Green', label = 'Mode')\nplt.axvline(houses['Mo Sold'].median(), color = 'Orange', label = 'Median')\nplt.axvline(houses['Mo Sold'].mean(), color = 'Black', label = 'Mean')\nplt.legend()"
] |
[
[
"pandas.read_table",
"pandas.interval_range",
"pandas.Series",
"matplotlib.pyplot.legend"
]
] |
swkokr/FetchPickAndPlace_HER_DDPG
|
[
"8378b53dac922cffeff8e2bdabca69cf6fd8bd54"
] |
[
"baselines/her/ddpg.py"
] |
[
"from collections import OrderedDict\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.contrib.staging import StagingArea\n\nfrom baselines import logger\nfrom baselines.her.util import (\n import_function, store_args, flatten_grads, transitions_in_episode_batch, convert_episode_to_batch_major)\nfrom baselines.her.normalizer import Normalizer\nfrom baselines.her.replay_buffer import ReplayBuffer\nfrom baselines.common.mpi_adam import MpiAdam\nfrom baselines.common import tf_util\n\n\ndef dims_to_shapes(input_dims):\n return {key: tuple([val]) if val > 0 else tuple() for key, val in input_dims.items()}\n\n\nglobal DEMO_BUFFER #buffer for demonstrations\n\nclass DDPG(object):\n @store_args\n def __init__(self, input_dims, buffer_size, hidden, layers, network_class, polyak, batch_size,\n Q_lr, pi_lr, norm_eps, norm_clip, max_u, action_l2, clip_obs, scope, T,\n rollout_batch_size, subtract_goals, relative_goals, clip_pos_returns, clip_return,\n bc_loss, q_filter, num_demo, demo_batch_size, prm_loss_weight, aux_loss_weight,\n sample_transitions, gamma, reuse=False, **kwargs):\n \"\"\"Implementation of DDPG that is used in combination with Hindsight Experience Replay (HER).\n Added functionality to use demonstrations for training to Overcome exploration problem.\n\n Args:\n input_dims (dict of ints): dimensions for the observation (o), the goal (g), and the\n actions (u)\n buffer_size (int): number of transitions that are stored in the replay buffer\n hidden (int): number of units in the hidden layers\n layers (int): number of hidden layers\n network_class (str): the network class that should be used (e.g. 'baselines.her.ActorCritic')\n polyak (float): coefficient for Polyak-averaging of the target network\n batch_size (int): batch size for training\n Q_lr (float): learning rate for the Q (critic) network\n pi_lr (float): learning rate for the pi (actor) network\n norm_eps (float): a small value used in the normalizer to avoid numerical instabilities\n norm_clip (float): normalized inputs are clipped to be in [-norm_clip, norm_clip]\n max_u (float): maximum action magnitude, i.e. actions are in [-max_u, max_u]\n action_l2 (float): coefficient for L2 penalty on the actions\n clip_obs (float): clip observations before normalization to be in [-clip_obs, clip_obs]\n scope (str): the scope used for the TensorFlow graph\n T (int): the time horizon for rollouts\n rollout_batch_size (int): number of parallel rollouts per DDPG agent\n subtract_goals (function): function that subtracts goals from each other\n relative_goals (boolean): whether or not relative goals should be fed into the network\n clip_pos_returns (boolean): whether or not positive returns should be clipped\n clip_return (float): clip returns to be in [-clip_return, clip_return]\n sample_transitions (function) function that samples from the replay buffer\n gamma (float): gamma used for Q learning updates\n reuse (boolean): whether or not the networks should be reused\n bc_loss: whether or not the behavior cloning loss should be used as an auxilliary loss\n q_filter: whether or not a filter on the q value update should be used when training with demonstartions\n num_demo: Number of episodes in to be used in the demonstration buffer\n demo_batch_size: number of samples to be used from the demonstrations buffer, per mpi thread\n prm_loss_weight: Weight corresponding to the primary loss\n aux_loss_weight: Weight corresponding to the auxilliary loss also called the cloning loss\n \"\"\"\n if self.clip_return is None:\n self.clip_return = np.inf\n\n self.create_actor_critic = import_function(self.network_class)\n\n input_shapes = dims_to_shapes(self.input_dims)\n self.dimo = self.input_dims['o']\n self.dimg = self.input_dims['g']\n self.dimu = self.input_dims['u']\n\n # Prepare staging area for feeding data to the model.\n stage_shapes = OrderedDict()\n for key in sorted(self.input_dims.keys()):\n if key.startswith('info_'):\n continue\n stage_shapes[key] = (None, *input_shapes[key])\n for key in ['o', 'g']:\n stage_shapes[key + '_2'] = stage_shapes[key]\n stage_shapes['r'] = (None,)\n self.stage_shapes = stage_shapes\n\n # Create network.\n with tf.variable_scope(self.scope):\n self.staging_tf = StagingArea(\n dtypes=[tf.float32 for _ in self.stage_shapes.keys()],\n shapes=list(self.stage_shapes.values()))\n self.buffer_ph_tf = [\n tf.placeholder(tf.float32, shape=shape) for shape in self.stage_shapes.values()]\n self.stage_op = self.staging_tf.put(self.buffer_ph_tf)\n\n self._create_network(reuse=reuse)\n\n # Configure the replay buffer.\n buffer_shapes = {key: (self.T-1 if key != 'o' else self.T, *input_shapes[key])\n for key, val in input_shapes.items()}\n buffer_shapes['g'] = (buffer_shapes['g'][0], self.dimg)\n buffer_shapes['ag'] = (self.T, self.dimg)\n\n buffer_size = (self.buffer_size // self.rollout_batch_size) * self.rollout_batch_size\n self.buffer = ReplayBuffer(buffer_shapes, buffer_size, self.T, self.sample_transitions)\n\n global DEMO_BUFFER\n DEMO_BUFFER = ReplayBuffer(buffer_shapes, buffer_size, self.T, self.sample_transitions) #initialize the demo buffer; in the same way as the primary data buffer\n\n def _random_action(self, n):\n return np.random.uniform(low=-self.max_u, high=self.max_u, size=(n, self.dimu))\n\n def _preprocess_og(self, o, ag, g):\n if self.relative_goals:\n g_shape = g.shape\n g = g.reshape(-1, self.dimg)\n ag = ag.reshape(-1, self.dimg)\n g = self.subtract_goals(g, ag)\n g = g.reshape(*g_shape)\n o = np.clip(o, -self.clip_obs, self.clip_obs)\n g = np.clip(g, -self.clip_obs, self.clip_obs)\n return o, g\n\n def step(self, obs):\n actions = self.get_actions(obs['observation'], obs['achieved_goal'], obs['desired_goal'])\n return actions, None, None, None\n\n\n def get_actions(self, o, ag, g, noise_eps=0., random_eps=0., use_target_net=False,\n compute_Q=False):\n o, g = self._preprocess_og(o, ag, g)\n policy = self.target if use_target_net else self.main\n # values to compute\n vals = [policy.pi_tf]\n if compute_Q:\n vals += [policy.Q_pi_tf]\n # feed\n feed = {\n policy.o_tf: o.reshape(-1, self.dimo),\n policy.g_tf: g.reshape(-1, self.dimg),\n policy.u_tf: np.zeros((o.size // self.dimo, self.dimu), dtype=np.float32)\n }\n\n ret = self.sess.run(vals, feed_dict=feed)\n # action postprocessing\n u = ret[0]\n noise = noise_eps * self.max_u * np.random.randn(*u.shape) # gaussian noise\n u += noise\n u = np.clip(u, -self.max_u, self.max_u)\n u += np.random.binomial(1, random_eps, u.shape[0]).reshape(-1, 1) * (self._random_action(u.shape[0]) - u) # eps-greedy\n if u.shape[0] == 1:\n u = u[0]\n u = u.copy()\n ret[0] = u\n\n if len(ret) == 1:\n return ret[0]\n else:\n return ret\n\n def init_demo_buffer(self, demoDataFile, update_stats=True): #function that initializes the demo buffer\n\n demoData = np.load(demoDataFile, allow_pickle=True) #load the demonstration data from data file\n info_keys = [key.replace('info_', '') for key in self.input_dims.keys() if key.startswith('info_')]\n info_values = [np.empty((self.T - 1, 1, self.input_dims['info_' + key]), np.float32) for key in info_keys]\n\n demo_data_obs = demoData['obs']\n demo_data_acs = demoData['acs']\n demo_data_info = demoData['info']\n\n for epsd in range(self.num_demo): # we initialize the whole demo buffer at the start of the training\n obs, acts, goals, achieved_goals = [], [] ,[] ,[]\n i = 0\n for transition in range(self.T - 1):\n obs.append([demo_data_obs[epsd][transition].get('observation')])\n acts.append([demo_data_acs[epsd][transition]])\n goals.append([demo_data_obs[epsd][transition].get('desired_goal')])\n achieved_goals.append([demo_data_obs[epsd][transition].get('achieved_goal')])\n for idx, key in enumerate(info_keys):\n info_values[idx][transition, i] = demo_data_info[epsd][transition][key]\n\n\n obs.append([demo_data_obs[epsd][self.T - 1].get('observation')])\n achieved_goals.append([demo_data_obs[epsd][self.T - 1].get('achieved_goal')])\n\n episode = dict(o=obs,\n u=acts,\n g=goals,\n ag=achieved_goals)\n for key, value in zip(info_keys, info_values):\n episode['info_{}'.format(key)] = value\n\n episode = convert_episode_to_batch_major(episode)\n global DEMO_BUFFER\n DEMO_BUFFER.store_episode(episode) # create the observation dict and append them into the demonstration buffer\n logger.debug(\"Demo buffer size currently \", DEMO_BUFFER.get_current_size()) #print out the demonstration buffer size\n\n if update_stats:\n # add transitions to normalizer to normalize the demo data as well\n episode['o_2'] = episode['o'][:, 1:, :]\n episode['ag_2'] = episode['ag'][:, 1:, :]\n num_normalizing_transitions = transitions_in_episode_batch(episode)\n transitions = self.sample_transitions(episode, num_normalizing_transitions)\n\n o, g, ag = transitions['o'], transitions['g'], transitions['ag']\n transitions['o'], transitions['g'] = self._preprocess_og(o, ag, g)\n # No need to preprocess the o_2 and g_2 since this is only used for stats\n\n self.o_stats.update(transitions['o'])\n self.g_stats.update(transitions['g'])\n\n self.o_stats.recompute_stats()\n self.g_stats.recompute_stats()\n episode.clear()\n\n logger.info(\"Demo buffer size: \", DEMO_BUFFER.get_current_size()) #print out the demonstration buffer size\n\n def store_episode(self, episode_batch, update_stats=True):\n \"\"\"\n episode_batch: array of batch_size x (T or T+1) x dim_key\n 'o' is of size T+1, others are of size T\n \"\"\"\n\n self.buffer.store_episode(episode_batch)\n\n if update_stats:\n # add transitions to normalizer\n episode_batch['o_2'] = episode_batch['o'][:, 1:, :]\n episode_batch['ag_2'] = episode_batch['ag'][:, 1:, :]\n num_normalizing_transitions = transitions_in_episode_batch(episode_batch)\n transitions = self.sample_transitions(episode_batch, num_normalizing_transitions)\n\n o, g, ag = transitions['o'], transitions['g'], transitions['ag']\n transitions['o'], transitions['g'] = self._preprocess_og(o, ag, g)\n # No need to preprocess the o_2 and g_2 since this is only used for stats\n\n self.o_stats.update(transitions['o'])\n self.g_stats.update(transitions['g'])\n\n self.o_stats.recompute_stats()\n self.g_stats.recompute_stats()\n\n def get_current_buffer_size(self):\n return self.buffer.get_current_size()\n\n def _sync_optimizers(self):\n self.Q_adam.sync()\n self.pi_adam.sync()\n\n def _grads(self):\n # Avoid feed_dict here for performance!\n critic_loss, actor_loss, Q_grad, pi_grad = self.sess.run([\n self.Q_loss_tf,\n self.main.Q_pi_tf,\n self.Q_grad_tf,\n self.pi_grad_tf\n ])\n return critic_loss, actor_loss, Q_grad, pi_grad\n\n def _update(self, Q_grad, pi_grad):\n self.Q_adam.update(Q_grad, self.Q_lr)\n self.pi_adam.update(pi_grad, self.pi_lr)\n\n def sample_batch(self):\n if self.bc_loss: #use demonstration buffer to sample as well if bc_loss flag is set TRUE\n transitions = self.buffer.sample(self.batch_size - self.demo_batch_size)\n global DEMO_BUFFER\n transitions_demo = DEMO_BUFFER.sample(self.demo_batch_size) #sample from the demo buffer\n for k, values in transitions_demo.items():\n rolloutV = transitions[k].tolist()\n for v in values:\n rolloutV.append(v.tolist())\n transitions[k] = np.array(rolloutV)\n else:\n transitions = self.buffer.sample(self.batch_size) #otherwise only sample from primary buffer\n\n o, o_2, g = transitions['o'], transitions['o_2'], transitions['g']\n ag, ag_2 = transitions['ag'], transitions['ag_2']\n transitions['o'], transitions['g'] = self._preprocess_og(o, ag, g)\n transitions['o_2'], transitions['g_2'] = self._preprocess_og(o_2, ag_2, g)\n\n transitions_batch = [transitions[key] for key in self.stage_shapes.keys()]\n return transitions_batch\n\n def stage_batch(self, batch=None):\n if batch is None:\n batch = self.sample_batch()\n assert len(self.buffer_ph_tf) == len(batch)\n self.sess.run(self.stage_op, feed_dict=dict(zip(self.buffer_ph_tf, batch)))\n\n def train(self, stage=True):\n if stage:\n self.stage_batch()\n critic_loss, actor_loss, Q_grad, pi_grad = self._grads()\n self._update(Q_grad, pi_grad)\n return critic_loss, actor_loss\n\n def _init_target_net(self):\n self.sess.run(self.init_target_net_op)\n\n def update_target_net(self):\n self.sess.run(self.update_target_net_op)\n\n def clear_buffer(self):\n self.buffer.clear_buffer()\n\n def _vars(self, scope):\n res = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.scope + '/' + scope)\n assert len(res) > 0\n return res\n\n def _global_vars(self, scope):\n res = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.scope + '/' + scope)\n return res\n\n def _create_network(self, reuse=False):\n logger.info(\"Creating a DDPG agent with action space %d x %s...\" % (self.dimu, self.max_u))\n self.sess = tf_util.get_session()\n\n # running averages\n with tf.variable_scope('o_stats') as vs:\n if reuse:\n vs.reuse_variables()\n self.o_stats = Normalizer(self.dimo, self.norm_eps, self.norm_clip, sess=self.sess)\n with tf.variable_scope('g_stats') as vs:\n if reuse:\n vs.reuse_variables()\n self.g_stats = Normalizer(self.dimg, self.norm_eps, self.norm_clip, sess=self.sess)\n\n # mini-batch sampling.\n batch = self.staging_tf.get()\n batch_tf = OrderedDict([(key, batch[i])\n for i, key in enumerate(self.stage_shapes.keys())])\n batch_tf['r'] = tf.reshape(batch_tf['r'], [-1, 1])\n\n #choose only the demo buffer samples\n mask = np.concatenate((np.zeros(self.batch_size - self.demo_batch_size), np.ones(self.demo_batch_size)), axis = 0)\n\n # networks\n with tf.variable_scope('main') as vs:\n if reuse:\n vs.reuse_variables()\n self.main = self.create_actor_critic(batch_tf, net_type='main', **self.__dict__)\n vs.reuse_variables()\n with tf.variable_scope('target') as vs:\n if reuse:\n vs.reuse_variables()\n target_batch_tf = batch_tf.copy()\n target_batch_tf['o'] = batch_tf['o_2']\n target_batch_tf['g'] = batch_tf['g_2']\n self.target = self.create_actor_critic(\n target_batch_tf, net_type='target', **self.__dict__)\n vs.reuse_variables()\n assert len(self._vars(\"main\")) == len(self._vars(\"target\"))\n\n # loss functions\n target_Q_pi_tf = self.target.Q_pi_tf\n clip_range = (-self.clip_return, 0. if self.clip_pos_returns else np.inf)\n target_tf = tf.clip_by_value(batch_tf['r'] + self.gamma * target_Q_pi_tf, *clip_range)\n self.Q_loss_tf = tf.reduce_mean(tf.square(tf.stop_gradient(target_tf) - self.main.Q_tf))\n\n if self.bc_loss ==1 and self.q_filter == 1 : # train with demonstrations and use bc_loss and q_filter both\n maskMain = tf.reshape(tf.boolean_mask(self.main.Q_tf > self.main.Q_pi_tf, mask), [-1]) #where is the demonstrator action better than actor action according to the critic? choose those samples only\n #define the cloning loss on the actor's actions only on the samples which adhere to the above masks\n self.cloning_loss_tf = tf.reduce_sum(tf.square(tf.boolean_mask(tf.boolean_mask((self.main.pi_tf), mask), maskMain, axis=0) - tf.boolean_mask(tf.boolean_mask((batch_tf['u']), mask), maskMain, axis=0)))\n self.pi_loss_tf = -self.prm_loss_weight * tf.reduce_mean(self.main.Q_pi_tf) #primary loss scaled by it's respective weight prm_loss_weight\n self.pi_loss_tf += self.prm_loss_weight * self.action_l2 * tf.reduce_mean(tf.square(self.main.pi_tf / self.max_u)) #L2 loss on action values scaled by the same weight prm_loss_weight\n self.pi_loss_tf += self.aux_loss_weight * self.cloning_loss_tf #adding the cloning loss to the actor loss as an auxilliary loss scaled by its weight aux_loss_weight\n\n elif self.bc_loss == 1 and self.q_filter == 0: # train with demonstrations without q_filter\n self.cloning_loss_tf = tf.reduce_sum(tf.square(tf.boolean_mask((self.main.pi_tf), mask) - tf.boolean_mask((batch_tf['u']), mask)))\n self.pi_loss_tf = -self.prm_loss_weight * tf.reduce_mean(self.main.Q_pi_tf)\n self.pi_loss_tf += self.prm_loss_weight * self.action_l2 * tf.reduce_mean(tf.square(self.main.pi_tf / self.max_u))\n self.pi_loss_tf += self.aux_loss_weight * self.cloning_loss_tf\n\n else: #If not training with demonstrations\n self.pi_loss_tf = -tf.reduce_mean(self.main.Q_pi_tf)\n self.pi_loss_tf += self.action_l2 * tf.reduce_mean(tf.square(self.main.pi_tf / self.max_u))\n\n Q_grads_tf = tf.gradients(self.Q_loss_tf, self._vars('main/Q'))\n pi_grads_tf = tf.gradients(self.pi_loss_tf, self._vars('main/pi'))\n assert len(self._vars('main/Q')) == len(Q_grads_tf)\n assert len(self._vars('main/pi')) == len(pi_grads_tf)\n self.Q_grads_vars_tf = zip(Q_grads_tf, self._vars('main/Q'))\n self.pi_grads_vars_tf = zip(pi_grads_tf, self._vars('main/pi'))\n self.Q_grad_tf = flatten_grads(grads=Q_grads_tf, var_list=self._vars('main/Q'))\n self.pi_grad_tf = flatten_grads(grads=pi_grads_tf, var_list=self._vars('main/pi'))\n\n # optimizers\n self.Q_adam = MpiAdam(self._vars('main/Q'), scale_grad_by_procs=False)\n self.pi_adam = MpiAdam(self._vars('main/pi'), scale_grad_by_procs=False)\n\n # polyak averaging\n self.main_vars = self._vars('main/Q') + self._vars('main/pi')\n self.target_vars = self._vars('target/Q') + self._vars('target/pi')\n self.stats_vars = self._global_vars('o_stats') + self._global_vars('g_stats')\n self.init_target_net_op = list(\n map(lambda v: v[0].assign(v[1]), zip(self.target_vars, self.main_vars)))\n self.update_target_net_op = list(\n map(lambda v: v[0].assign(self.polyak * v[0] + (1. - self.polyak) * v[1]), zip(self.target_vars, self.main_vars)))\n\n # initialize all variables\n tf.variables_initializer(self._global_vars('')).run()\n self._sync_optimizers()\n self._init_target_net()\n\n def logs(self, prefix=''):\n logs = []\n logs += [('stats_o/mean', np.mean(self.sess.run([self.o_stats.mean])))]\n logs += [('stats_o/std', np.mean(self.sess.run([self.o_stats.std])))]\n logs += [('stats_g/mean', np.mean(self.sess.run([self.g_stats.mean])))]\n logs += [('stats_g/std', np.mean(self.sess.run([self.g_stats.std])))]\n\n if prefix != '' and not prefix.endswith('/'):\n return [(prefix + '/' + key, val) for key, val in logs]\n else:\n return logs\n\n def __getstate__(self):\n \"\"\"Our policies can be loaded from pkl, but after unpickling you cannot continue training.\n \"\"\"\n excluded_subnames = ['_tf', '_op', '_vars', '_adam', 'buffer', 'sess', '_stats',\n 'main', 'target', 'lock', 'env', 'sample_transitions',\n 'stage_shapes', 'create_actor_critic']\n\n state = {k: v for k, v in self.__dict__.items() if all([not subname in k for subname in excluded_subnames])}\n state['buffer_size'] = self.buffer_size\n state['tf'] = self.sess.run([x for x in self._global_vars('') if 'buffer' not in x.name])\n return state\n\n def __setstate__(self, state):\n if 'sample_transitions' not in state:\n # We don't need this for playing the policy.\n state['sample_transitions'] = None\n\n self.__init__(**state)\n # set up stats (they are overwritten in __init__)\n for k, v in state.items():\n if k[-6:] == '_stats':\n self.__dict__[k] = v\n # load TF variables\n vars = [x for x in self._global_vars('') if 'buffer' not in x.name]\n assert(len(vars) == len(state[\"tf\"]))\n node = [tf.assign(var, val) for var, val in zip(vars, state[\"tf\"])]\n self.sess.run(node)\n\n def save(self, save_path):\n tf_util.save_variables(save_path)\n\n"
] |
[
[
"numpy.random.randn",
"tensorflow.boolean_mask",
"numpy.clip",
"tensorflow.get_collection",
"tensorflow.stop_gradient",
"tensorflow.square",
"numpy.load",
"numpy.zeros",
"tensorflow.placeholder",
"numpy.random.binomial",
"numpy.array",
"tensorflow.clip_by_value",
"tensorflow.reduce_mean",
"tensorflow.reshape",
"tensorflow.assign",
"numpy.ones",
"tensorflow.variable_scope",
"numpy.random.uniform",
"numpy.empty"
]
] |
devillove084/CollageDesign
|
[
"e2a85a8d15f82d1f72b754de04af78126eae9a1c"
] |
[
"MLCtr/machineLearning/decision_tree.py"
] |
[
"from __future__ import division, print_function\nimport numpy as np\nimport cupy\n\n\nfrom graduateutil import divide_on_feature, train_test_split, standardize, mean_squared_error\nfrom graduateutil import calculate_entropy, accuracy_score, calculate_variance\n\nclass DecisionNode():\n \"\"\"Class that represents a decision node or leaf in the decision tree\n\n Parameters:\n -----------\n feature_i: int\n Feature index which we want to use as the threshold measure.\n threshold: float\n The value that we will compare feature values at feature_i against to\n determine the prediction.\n value: float\n The class prediction if classification tree, or float value if regression tree.\n true_branch: DecisionNode\n Next decision node for samples where features value met the threshold.\n false_branch: DecisionNode\n Next decision node for samples where features value did not meet the threshold.\n \"\"\"\n def __init__(self, feature_i=None, threshold=None,\n value=None, true_branch=None, false_branch=None):\n self.feature_i = feature_i # Index for the feature that is tested\n self.threshold = threshold # Threshold value for feature\n self.value = value # Value if the node is a leaf in the tree\n self.true_branch = true_branch # 'Left' subtree\n self.false_branch = false_branch # 'Right' subtree\n\n\nclass DecisionTree(object):\n \"\"\"Super class of RegressionTree and ClassificationTree.\n\n Parameters:\n -----------\n min_samples_split: int\n The minimum number of samples needed to make a split when building a tree.\n min_impurity: float\n The minimum impurity required to split the tree further.\n max_depth: int\n The maximum depth of a tree.\n loss: function\n Loss function that is used for Gradient Boosting models to calculate impurity.\n \"\"\"\n def __init__(self, min_samples_split=2, min_impurity=1e-7,\n max_depth=float(\"inf\"), loss=None):\n self.root = None # Root node in dec. tree\n # Minimum n of samples to justify split\n self.min_samples_split = min_samples_split\n # The minimum impurity to justify split\n self.min_impurity = min_impurity\n # The maximum depth to grow the tree to\n self.max_depth = max_depth\n # Function to calculate impurity (classif.=>info gain, regr=>variance reduct.)\n self._impurity_calculation = None\n # Function to determine prediction of y at leaf\n self._leaf_value_calculation = None\n # If y is one-hot encoded (multi-dim) or not (one-dim)\n self.one_dim = None\n # If Gradient Boost\n self.loss = loss\n \n def fit(self, X, y, loss=None):\n \"\"\" Build decicion tree \"\"\"\n self.one_dim = len(y.shape) == 1\n self.root = self._build_tree(X,y)\n self.loss = None\n \n\n def _build_tree(self, X ,y, current_depth=0):\n \"\"\" Recursive method which builds out the decision tree and splits X and respective y\n on the feature of X which (based on impurity) best separates the data\"\"\"\n\n largest_impurity = 0\n best_criteria = None # Feature index and threshold\n best_sets = None # Subsets of the data\n\n if len(y.shape) == 1:\n y = np.expand_dims(y, axis=1)\n\n Xy = np.concatenate((X,y), axis=1)\n n_samples, n_features = X.shape\n\n if n_samples >= self.min_samples_split and current_depth <= self.max_depth:\n # Calculate the impurity for each feature\n for feature_i in range(n_features):\n # All values of feature_i\n feature_values = np.expand_dims(X[:, feature_i],axis = 1)\n unique_values = np.unique(feature_values)\n\n # Iterate through all unique values of feature column i and\n # calculate the impurity\n for threshold in unique_values:\n # Divide X and y depending on if the feature value of X at index feature_i\n # meets the threshold\n Xy1, Xy2 = divide_on_feature(Xy, feature_i, threshold)\n\n if len(Xy1) > 0 and len(Xy2) > 0:\n # Select the y-values of the two sets\n y1 = Xy1[:, n_features:]\n y2 = Xy2[:, n_features:]\n\n # Calculate impurity\n impurity = self._impurity_calculation(y, y1, y2)\n\n if impurity > largest_impurity:\n largest_impurity = impurity\n best_criteria = {\"feature_i\": feature_i, \"threshold\": threshold}\n best_sets = {\n \"leftX\": Xy1[:, :n_features], # X of left subtree\n \"lefty\": Xy1[:, n_features:], # y of left subtree\n \"rightX\": Xy2[:, :n_features], # X of right subtree\n \"righty\": Xy2[:, n_features:] # y of right subtree\n }\n\n if largest_impurity > self.min_impurity:\n # Build subtrees for the right and left branches\n true_branch = self._build_tree(best_sets[\"leftX\"], best_sets[\"lefty\"], current_depth + 1)\n false_branch = self._build_tree(best_sets[\"rightX\"], best_sets[\"righty\"], current_depth + 1)\n return DecisionNode(feature_i=best_criteria[\"feature_i\"], threshold=best_criteria[\n \"threshold\"], true_branch=true_branch, false_branch=false_branch)\n\n # We're at leaf => determine value\n leaf_value = self._leaf_value_calculation(y)\n\n return DecisionNode(value=leaf_value)\n \n def predict_value(self, x, tree=None):\n \"\"\" Do a recursive search down the tree and make a prediction of the data sample by the\n value of the leaf that we end up at \"\"\"\n\n if tree is None:\n tree = self.root\n\n # If we have a value (i.e we're at a leaf) => return value as the prediction\n if tree.value is not None:\n return tree.value\n\n # Choose the feature that we will test\n feature_value = x[tree.feature_i]\n\n # Determine if we will follow left or right branch\n branch = tree.false_branch\n if isinstance(feature_value, int) or isinstance(feature_value, float):\n if feature_value >= tree.threshold:\n branch = tree.true_branch\n elif feature_value == tree.threshold:\n branch = tree.true_branch\n\n # Test subtree\n return self.predict_value(x, branch)\n\n def predict(self, X):\n \"\"\" Classify samples one by one and return the set of labels \"\"\"\n y_pred = [self.predict_value(sample) for sample in X]\n return y_pred\n\n def print_tree(self, tree=None, indent=\" \"):\n \"\"\" Recursively print the decision tree \"\"\"\n if not tree:\n tree = self.root\n\n # If we're at leaf => print the label\n if tree.value is not None:\n print (tree.value)\n # Go deeper down the tree\n else:\n # Print test\n print (\"%s:%s? \" % (tree.feature_i, tree.threshold))\n # Print the true scenario\n print (\"%sT->\" % (indent), end=\"\")\n self.print_tree(tree.true_branch, indent + indent)\n # Print the false scenario\n print (\"%sF->\" % (indent), end=\"\")\n self.print_tree(tree.false_branch, indent + indent)\n \n\nclass XGBoostRegressionTree(DecisionTree):\n \"\"\"\n Regression tree for XGBoost\n - Reference -\n http://xgboost.readthedocs.io/en/latest/model.html\n \"\"\"\n\n def _split(self, y):\n \"\"\" y contains y_true in left half of the middle column and\n y_pred in the right half. Split and return the two matrices \"\"\"\n col = int(np.shape(y)[1]/2)\n y, y_pred = y[:, :col], y[:, col:]\n return y, y_pred\n\n def _gain(self, y, y_pred):\n nominator = np.power((y * self.loss.gradient(y, y_pred)).sum(), 2)\n denominator = self.loss.hess(y, y_pred).sum()\n return 0.5 * (nominator / denominator)\n\n def _gain_by_taylor(self, y, y1, y2):\n # Split\n y, y_pred = self._split(y)\n y1, y1_pred = self._split(y1)\n y2, y2_pred = self._split(y2)\n\n true_gain = self._gain(y1, y1_pred)\n false_gain = self._gain(y2, y2_pred)\n gain = self._gain(y, y_pred)\n return true_gain + false_gain - gain\n\n def _approximate_update(self, y):\n # y split into y, y_pred\n y, y_pred = self._split(y)\n # Newton's Method\n gradient = np.sum(y * self.loss.gradient(y, y_pred), axis=0)\n hessian = np.sum(self.loss.hess(y, y_pred), axis=0)\n update_approximation = gradient / hessian\n\n return update_approximation\n \n def fit(self, X, y):\n self._impurity_calculation = self._gain_by_taylor\n self._leaf_value_calculation = self._approximate_update\n super(XGBoostRegressionTree, self).fit(X, y)\n\nclass RegressionTree(DecisionTree):\n def _calculate_variance_reduction(self, y, y1, y2):\n var_tot = calculate_variance(y)\n var_1 = calculate_variance(y1)\n var_2 = calculate_variance(y2)\n frac_1 = len(y1) / len(y)\n frac_2 = len(y2) / len(y)\n\n # Calculate the variance reduction\n variance_reduction = var_tot - (frac_1 * var_1 + frac_2 * var_2)\n\n return sum(variance_reduction)\n\n def _mean_of_y(self, y):\n value = np.mean(y, axis=0)\n return value if len(value) > 1 else value[0]\n\n def fit(self, X, y):\n self._impurity_calculation = self._calculate_variance_reduction\n self._leaf_value_calculation = self._mean_of_y\n super(RegressionTree, self).fit(X, y)\n\n\nclass ClassificationTree(DecisionTree):\n def _calculate_information_gain(self, y, y1, y2):\n # Calculate information gain\n p = len(y1) / len(y)\n entropy = calculate_entropy(y)\n info_gain = entropy - p * \\\n calculate_entropy(y1) - (1 - p) * \\\n calculate_entropy(y2)\n\n return info_gain\n\n def _majority_vote(self, y):\n most_common = None\n max_count = 0\n for label in np.unique(y):\n # Count number of occurences of samples with label\n count = len(y[y == label])\n if count > max_count:\n most_common = label\n max_count = count\n return most_common\n\n def fit(self, X, y):\n self._impurity_calculation = self._calculate_information_gain\n self._leaf_value_calculation = self._majority_vote\n super(ClassificationTree, self).fit(X, y)"
] |
[
[
"numpy.expand_dims",
"numpy.unique",
"numpy.concatenate",
"numpy.mean",
"numpy.shape"
]
] |
jcboyd/cyclegan-roi
|
[
"f0c80c6122d17406f5282f58ea09abaf2b70c388"
] |
[
"src/utils.py"
] |
[
"import numpy as np\nimport torch\nfrom torch.nn import UpsamplingNearest2d, UpsamplingBilinear2d\n\nfrom rectpack import newPacker\n\n\ndef get_mnist_canvas(images, labels, nb_classes=10, dim=128):\n\n canvas = -torch.ones((dim, dim))\n noise_canvas = torch.zeros((nb_classes, dim, dim))\n condition_canvas = torch.zeros((nb_classes, dim, dim))\n\n num_objs, h, w = images.shape\n\n y, x = (torch.randint(0, dim - h, size=(num_objs, 1)),\n torch.randint(0, dim - w, size=(num_objs, 1)))\n\n bboxes = torch.cat([x, y, x + w, y + h], axis=1)\n\n for i, (x1, y1, x2, y2) in enumerate(bboxes):\n\n canvas[y1:y2, x1:x2] = torch.max(canvas[y1:y2, x1:x2],\n images[i].squeeze())\n\n z = torch.randn(1, 1, w // 4, h // 4)\n z = UpsamplingNearest2d(scale_factor=4)(z)\n\n noise_canvas[labels[i], y1:y2, x1:x2] = z.squeeze()\n\n condition_canvas[labels[i], y1:y2, x1:x2] = torch.ones((h, w))\n\n #bboxes = torch.cat([bboxes, labels[:, None]], axis=1)\n\n return canvas, noise_canvas, condition_canvas, bboxes\n\n\ndef get_mnist_knapsack(images, labels, nb_classes=10, dim=128):\n\n bboxes = []\n\n canvas = -torch.ones((dim, dim))\n noise_canvas = torch.zeros((nb_classes, dim, dim))\n condition_canvas = torch.zeros((nb_classes, dim, dim))\n\n hs, ws = 28 + 5 * np.random.randn(2, images.shape[0])\n hs = np.clip(hs, 14, 48).astype('int')\n ws = np.clip(ws, 14, 48).astype('int')\n\n rectangles = list(zip(hs, hs))\n bins = [(128, 128)]\n\n packer = newPacker()\n\n # Add the rectangles to packing queue\n for r in rectangles:\n packer.add_rect(*r)\n\n # Add the bins where the rectangles will be placed\n for b in bins:\n packer.add_bin(*b)\n\n # Start packing\n packer.pack()\n\n for i, rect in enumerate(packer.rect_list()):\n _, x, y, w, h, _ = rect\n\n scaled_crop = UpsamplingBilinear2d(size=(h, w))(images[i][None, None])\n canvas[y:y+h, x:x+w] = torch.max(canvas[y:y+h, x:x+w], scaled_crop)\n\n z = torch.randn(1, 1, 7, 7)\n z = UpsamplingNearest2d(size=(h, w))(z)\n noise_canvas[labels[i], y:y+h, x:x+w] = z\n\n condition_canvas[labels[i], y:y+h, x:x+w] = torch.ones((h, w))\n\n bboxes.append([x, y, x + w, y + h])\n\n return canvas, noise_canvas, condition_canvas, torch.Tensor(bboxes)\n\n\ndef mnist_canvas_generator(x_data, y_data, nb_batch, nb_obj, knapsack):\n\n f_canvas = get_mnist_knapsack if knapsack else get_mnist_canvas\n\n while True:\n\n batch_idx = torch.randint(x_data.shape[0], size=(nb_batch, nb_obj))\n data = [f_canvas(x_data[idx], y_data[idx]) for idx in batch_idx]\n\n canvas_batch = torch.cat([canvas[None, None] for canvas, _, _, _ in data])\n noise_batch = torch.cat([noise[None] for _, noise, _, _ in data])\n condition_batch = torch.cat([condition[None] for _, _, condition, _ in data])\n\n# bbox_batch = [torch.cat([i * torch.ones(nb_obj, 1), bboxes], axis=1)\n# for i, (_, _, _, bboxes) in enumerate(data)]\n\n bbox_batch = [torch.cat([i * torch.ones(bboxes.shape[0], 1), bboxes], axis=1)\n for i, (_, _, _, bboxes) in enumerate(data)]\n\n bbox_batch = torch.cat(bbox_batch, axis=0)\n\n yield canvas_batch, noise_batch, condition_batch, bbox_batch\n"
] |
[
[
"torch.randint",
"torch.ones",
"torch.max",
"torch.cat",
"torch.zeros",
"torch.randn",
"torch.Tensor",
"numpy.clip",
"torch.nn.UpsamplingBilinear2d",
"numpy.random.randn",
"torch.nn.UpsamplingNearest2d"
]
] |
ireina7/gzsl-seg
|
[
"9aad220274b4a58b59f5da430f873b5dfc21e458"
] |
[
"src/util/logging.py"
] |
[
"import sys\nimport matplotlib.pyplot as plt\n\nfrom util.typing.basic import *\nfrom src.config import *\n\n\ndef show_figure_nonblocking() -> None:\n plt.show(block = False)\n plt.pause(0.001)\n #end show_figure_nonblocking\n\ndef show_figure_blocking() -> None:\n plt.show()\n #end show_figure_blocking\n\ndef show_if(EPOCH: int, LOOP: int):\n def f(epoch: int, loop: int, call_back) -> None:\n if epoch % EPOCH == 0 and loop % LOOP == 0:\n call_back()\n return f\n\nshow_figure_if = show_if(EPOCH_TO_SHOW_FIGURE, LOOP_TO_SHOW_FIGURE)\nshow_msg_if = show_if(EPOCH_TO_SHOW_MSG, LOOP_TO_SHOW_MSG )\n\n\ndef draw_sample(batch):\n imgs, msks = batch['image'], batch['label']\n fig, axs = plt.subplots(1, 3, figsize=(10, 3))\n axs[0].imshow(imgs[0].permute(1, 2, 0))\n axs[1].imshow(msks[0], cmap = 'tab20', vmin = 0, vmax = 21)\n #end draw_sample\n\n# def show_sample(batch):\n# draw_sample(batch)\n# log(\"Displaying image of {}\".format(batch['name']))\n# # plt.colorbar()\n# show_figure_nonblocking()\n# #end show_sample\n\n\ndef show_single_figure_result(batch, pred, mask):\n ans = pred[0].clone().detach().cpu().numpy()\n #x = np.where(ans == 0, 255, ans)\n x = ans\n y = mask.cpu()[0]\n x[y == 255] = 255\n draw_sample(batch)\n # pyplot.figure()\n plt.imshow(x, cmap = 'tab20', vmin = 0, vmax = 21)\n plt.colorbar()\n # show_figure_nonblocking()\n\n\n\n\n\n\n\nend = 0\n\n\ndefault_files = {\n 'msg': sys.stdout,\n 'err': sys.stderr,\n 'mod': './output',\n}\n\nclass Logger(object):\n \"\"\"\n The main Logger\n @param files: {\n msg: File | sys.stdout -- Message\n err: File | sys.stderr -- Error\n mod: File -- model\n } -- Determine where the logger should log.\n @param painter: Painter -- Determine how to log and show figures.\n \"\"\"\n def __init__(self, files=default_files):\n self.files = files\n # self.painter = painter\n end\n #end __init__\n\n def log(self, msg: str) -> None:\n \"\"\"\n Log messages\n \"\"\"\n log_msg = f'[info] {msg}'\n print(log_msg, file=self.files['msg'])\n #end log\n\n def debug(self, msg: str, description: str = \"\") -> None:\n \"\"\"\n For debugging.\n Should have the same output file as the `log` method.\n \"\"\"\n dbg_msg = f'[debug] {msg}'\n print(dbg_msg, file=self.files['msg'])\n #end debug\n\n def error(self, msg: str) -> None:\n \"\"\"\n Print error messages.\n \"\"\"\n err_msg = f'[error] {msg}'\n print(err_msg, file=self.files['err'])\n #end error\n\n def custom(self, tag: str):\n \"\"\"\n For custom logging.\n \"\"\"\n def custom_msg(msg: str) -> None:\n cus_msg = f'{tag} {msg}'\n print(cus_msg, file=self.files['msg'])\n #end custom_msg\n return custom_msg\n #end custom\n\n def blank_line(self, i: int=1) -> None:\n \"\"\"\n Only for log.\n Should not be used in error logging and others.\n \"\"\"\n print(\"\", file=self.files['msg'])\n #end blank_line\n\n#end class Logger\n\nlogger = Logger()\n\n\n\n\n\n\nclass Painter(object):\n def __init__(self, logger: Logger=logger):\n self.logger = logger\n\n def plot(self, xs: List[int], ys: List[int], style='.-') -> None:\n plt.figure()\n plt.plot(xs, ys, style)\n plt.grid()\n #end plot\n\n def draw_sample(self, batch):\n draw_sample(batch)\n\n def draw_seg_result(self, batch, pred, mask):\n show_single_figure_result(batch, pred, mask)\n\n def save_figure(self, path: str) -> None:\n try:\n plt.savefig(path)\n self.logger.log(f'saved figure {path}.')\n except IOError:\n self.logger.error(f'Trying to save figure {path} failed: {IOError}')\n #end save_figure\n #end save_figure\n\n#end class Painter\npainter = Painter()\n"
] |
[
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.show",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.figure"
]
] |
plenoi/EvoloPy
|
[
"7c943925b9a73ad671735493ce281b67d178dc7c"
] |
[
"PSO.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 15 22:37:00 2016\n\n@author: Hossam Faris\n\"\"\"\n\nimport random\nimport numpy\nfrom colorama import Fore, Back, Style\nfrom solution import solution\nimport time\n\n\n\n\n\n\ndef PSO(objf,lb,ub,dim,PopSize,iters):\n\n # PSO parameters\n \n# dim=30\n# iters=200\n Vmax=6\n# PopSize=50 #population size\n wMax=0.9\n wMin=0.2\n c1=2\n c2=2\n# lb=-10\n# ub=10\n# \n s=solution()\n if not isinstance(lb, list):\n lb = [lb] * dim\n if not isinstance(ub, list):\n ub = [ub] * dim\n \n \n ######################## Initializations\n \n vel=numpy.zeros((PopSize,dim))\n \n pBestScore=numpy.zeros(PopSize) \n pBestScore.fill(float(\"inf\"))\n \n pBest=numpy.zeros((PopSize,dim))\n gBest=numpy.zeros(dim)\n \n \n gBestScore=float(\"inf\")\n\n pos = numpy.zeros((PopSize, dim))\n for i in range(dim):\n pos[:, i] = numpy.random.uniform(0,1, PopSize) * (ub[i] - lb[i]) + lb[i]\n \n convergence_curve=numpy.zeros(iters)\n \n ############################################\n print(\"PSO is optimizing \\\"\"+objf.__name__+\"\\\"\") \n \n timerStart=time.time() \n s.startTime=time.strftime(\"%Y-%m-%d-%H-%M-%S\")\n \n for l in range(0,iters):\n for i in range(0,PopSize):\n #pos[i,:]=checkBounds(pos[i,:],lb,ub)\n for j in range(dim):\n pos[i, j] = numpy.clip(pos[i,j], lb[j], ub[j])\n #Calculate objective function for each particle\n fitness=objf(pos[i,:])\n \n if(pBestScore[i]>fitness):\n pBestScore[i]=fitness\n pBest[i,:]=pos[i,:].copy()\n \n if(gBestScore>fitness):\n gBestScore=fitness\n gBest=pos[i,:].copy()\n \n #Update the W of PSO\n w=wMax-l*((wMax-wMin)/iters);\n \n for i in range(0,PopSize):\n for j in range (0,dim):\n r1=random.random()\n r2=random.random()\n vel[i,j]=w*vel[i,j]+c1*r1*(pBest[i,j]-pos[i,j])+c2*r2*(gBest[j]-pos[i,j])\n \n if(vel[i,j]>Vmax):\n vel[i,j]=Vmax\n \n if(vel[i,j]<-Vmax):\n vel[i,j]=-Vmax\n \n pos[i,j]=pos[i,j]+vel[i,j]\n \n convergence_curve[l]=gBestScore\n \n if (l%1==0):\n print(['At iteration '+ str(l+1)+ ' the best fitness is '+ str(gBestScore)]);\n timerEnd=time.time() \n s.endTime=time.strftime(\"%Y-%m-%d-%H-%M-%S\")\n s.executionTime=timerEnd-timerStart\n s.convergence=convergence_curve\n s.optimizer=\"PSO\"\n s.objfname=objf.__name__\n\n return s\n \n \n"
] |
[
[
"numpy.random.uniform",
"numpy.zeros",
"numpy.clip"
]
] |
esowc/DAAQS
|
[
"141b4d97edb319ab67d9f42a1aa54a4555829de2"
] |
[
"DAAQS/utils/preprocess.py"
] |
[
"import numpy as np\n\nfrom DAAQS.utils.misc import index_to_center\n\ndef temporal_average(c_data, o_data, index_lat, index_lon):\n\n ## Ideally CAMS data is time_step x 3 x 3\n ## And openaq_data is list of all stations in that 3x3 grid \n\n ## CAMS Data\n c_grid = c_data[:,index_lat-1:index_lat+2,index_lon-1:index_lon+2]\n \n cams_list = [[] for k in range(8)]\n\n for time in range(c_grid.shape[0]):\n index_time = time%8\n cams_list[index_time].append(np.ravel(c_grid[time,:,:]))\n cams_stack = np.stack(cams_list)\n cams_avg = np.mean(cams_stack, axis = 1)\n \n c_dict = dict()\n\n lat_0, lon_0 = index_to_center(index_lat-1,index_lon-1)\n lat_1, lon_1 = index_to_center(index_lat,index_lon) \n lat_2, lon_2 = index_to_center(index_lat+1,index_lon+1) \n\n coordinate_list = [(lat_0, lon_0), (lat_0, lon_1), (lat_0, lon_2), \n (lat_1, lon_0), (lat_1, lon_1), (lat_1, lon_2), \n (lat_2, lon_0), (lat_2, lon_1), (lat_2, lon_2),]\n\n lat_lon_list = [(index_lat-1, index_lon-1),(index_lat-1, index_lon), (index_lat-1, index_lon+1),\n (index_lat, index_lon-1), (index_lat, index_lon), (index_lat, index_lon+1), \n (index_lat+1, index_lon-1),(index_lat+1, index_lon), (index_lat+1, index_lon+1)]\n\n\n for grid in range(cams_avg.shape[1]): \n if \"grid_\"+str(grid) in c_dict:\n pass\n else: \n c_dict[\"grid_\"+str(grid)] = list(cams_avg[:,grid])\n c_dict[\"grid_\"+str(grid)].append({\"coordinates\":coordinate_list[grid]})\n c_dict[\"grid_\"+str(grid)].append({\"lat_lon_index\":lat_lon_list[grid]})\n c_dict[\"grid_\"+str(grid)].append({\"center_index\":(index_lat, index_lon)})\n\n # cams_avg is 8x9 values which is at each 9 location we have 1x8 different values \n\n ## OPENAQ Data\n \n o_dict = dict()\n for lat in range(index_lat-1,index_lat+2):\n for lon in range(index_lon-1, index_lon+2):\n for time in range(len(o_data)):\n for obs in o_data[time][lat][lon]:\n time_index = time%8\n if obs.location in o_dict:\n o_dict[obs.location][time_index].append(obs.value)\n else:\n o_dict[obs.location] = [[],[],[],[],[],[],[],[], {\"coordinates\":(obs.lat, obs.lon)}, {\"lat_lon_index\":(lat, lon)}, {\"center_index\":(index_lat, index_lon)}]\n\n for each in o_dict:\n for i in range(8):\n try:\n vals = o_dict[each][i]\n o_dict[each][i] = sum(vals)/len(vals)\n except:\n o_dict[each][i] = -1\n \n return c_dict, o_dict "
] |
[
[
"numpy.ravel",
"numpy.mean",
"numpy.stack"
]
] |
HolmesShuan/Location-aware-Upsampling-for-Semantic-Segmentation
|
[
"83822e86570bbff4ca721d80089b5d82f1958852"
] |
[
"LaU-reg/experiments/segmentation/option.py"
] |
[
"###########################################################################\n# Created by: Hang Zhang \n# Email: [email protected] \n# Copyright (c) 2017\n###########################################################################\nimport argparse\n\nimport torch\n\nclass Options():\n def __init__(self):\n parser = argparse.ArgumentParser(description='PyTorch \\\n Segmentation')\n # model and dataset \n parser.add_argument('--model', type=str, default='encnet',\n help='model name (default: encnet)')\n parser.add_argument('--backbone', type=str, default='resnet50',\n help='backbone name (default: resnet50)')\n parser.add_argument('--jpu', action='store_true', default=\n False, help='JPU')\n parser.add_argument('--dilated', action='store_true', default=\n False, help='dilation')\n parser.add_argument('--lateral', action='store_true', default=\n False, help='employ FPN')\n parser.add_argument('--dataset', type=str, default='ade20k',\n help='dataset name (default: pascal12)')\n parser.add_argument('--workers', type=int, default=8,\n metavar='N', help='dataloader threads')\n parser.add_argument('--base-size', type=int, default=520,\n help='base image size')\n parser.add_argument('--crop-size', type=int, default=480,\n help='crop image size')\n parser.add_argument('--train-split', type=str, default='train',\n help='dataset train split (default: train)')\n # training hyper params\n parser.add_argument('--aux', action='store_true', default= False,\n help='Auxilary Loss')\n parser.add_argument('--aux-weight', type=float, default=0.2,\n help='Auxilary loss weight (default: 0.2)')\n parser.add_argument('--se-loss', action='store_true', default= False,\n help='Semantic Encoding Loss SE-loss')\n parser.add_argument('--se-weight', type=float, default=0.2,\n help='SE-loss weight (default: 0.2)')\n parser.add_argument('--epochs', type=int, default=None, metavar='N',\n help='number of epochs to train (default: auto)')\n parser.add_argument('--start_epoch', type=int, default=0,\n metavar='N', help='start epochs (default:0)')\n parser.add_argument('--batch-size', type=int, default=None,\n metavar='N', help='input batch size for \\\n training (default: auto)')\n parser.add_argument('--test-batch-size', type=int, default=None,\n metavar='N', help='input batch size for \\\n testing (default: same as batch size)')\n # LaU offset loss\n parser.add_argument('--offset-loss', action='store_true', default= True,\n help='Location-aware loss')\n parser.add_argument('--offset-weight', type=float, default=0.5,\n help='offset-loss weight (default: 0.5)')\n parser.add_argument('--location-weight', type=float, default=0.125,\n help='location regression weight (default: 0.125)')\n # optimizer params\n parser.add_argument('--lr', type=float, default=None, metavar='LR',\n help='learning rate (default: auto)')\n parser.add_argument('--lr-scheduler', type=str, default='poly',\n help='learning rate scheduler (default: poly)')\n parser.add_argument('--momentum', type=float, default=0.9,\n metavar='M', help='momentum (default: 0.9)')\n parser.add_argument('--weight-decay', type=float, default=1e-4,\n metavar='M', help='w-decay (default: 1e-4)')\n # cuda, seed and logging\n parser.add_argument('--no-cuda', action='store_true', default=\n False, help='disables CUDA training')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n # checking point\n parser.add_argument('--resume', type=str, default=None,\n help='put the path to resuming file if needed')\n parser.add_argument('--checkname', type=str, default='default',\n help='set the checkpoint name')\n parser.add_argument('--model-zoo', type=str, default=None,\n help='evaluating on model zoo model')\n # finetuning pre-trained models\n parser.add_argument('--ft', action='store_true', default= False,\n help='finetuning on a different dataset')\n # evaluation option\n parser.add_argument('--split', default='val')\n parser.add_argument('--mode', default='testval')\n parser.add_argument('--ms', action='store_true', default=False,\n help='multi scale & flip')\n parser.add_argument('--no-val', action='store_true', default= False,\n help='skip validation during training')\n parser.add_argument('--save-folder', type=str, default='results',\n help = 'path to save images')\n # LaU option\n parser.add_argument('--batch-size-per-gpu', type=int, default=4,\n help='batch size per GPU')\n parser.add_argument('--up-factor', type=int, default=4,\n help='upsampling factor in LaU') \n parser.add_argument('--bottleneck-channel', type=int, default=64,\n help='reduce channel number to C')\n parser.add_argument('--offset-branch-input-channel', type=int, default=512,\n help='input channel number in LaU') \n parser.add_argument('--category', type=int, default=59,\n help='category number') \n parser.add_argument('--downsampled-input-size', type=int, default=60,\n help='downsampled input size')\n # the parser\n self.parser = parser\n\n def parse(self):\n args = self.parser.parse_args()\n args.cuda = not args.no_cuda and torch.cuda.is_available()\n # default settings for epochs, batch_size and lr\n if args.epochs is None:\n epoches = {\n 'coco': 30,\n 'citys': 240,\n 'pascal_voc': 50,\n 'pascal_aug': 50,\n 'pcontext': 80,\n 'ade20k': 120,\n }\n args.epochs = epoches[args.dataset.lower()]\n if args.batch_size is None:\n args.batch_size = 16\n if args.test_batch_size is None:\n args.test_batch_size = args.batch_size\n if args.lr is None:\n lrs = {\n 'coco': 0.01,\n 'citys': 0.01,\n 'pascal_voc': 0.0001,\n 'pascal_aug': 0.001,\n 'pcontext': 0.001,\n 'ade20k': 0.004,\n }\n args.lr = lrs[args.dataset.lower()] / 16 * args.batch_size\n print(args)\n return args\n"
] |
[
[
"torch.cuda.is_available"
]
] |
andreea-zaharia/flower
|
[
"c576f0118e5c3d7a7d774dc156fb4b6db194655d",
"c576f0118e5c3d7a7d774dc156fb4b6db194655d"
] |
[
"examples/dp-sgd-mnist/server.py",
"baselines/flwr_baselines/publications/fedbn/convergence_rate/client.py"
] |
[
"import argparse\nimport os\n\nimport tensorflow as tf\n\nimport flwr as fl\n\nimport common\n\n# Make TensorFlow logs less verbose\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\n\n\ndef get_eval_fn(model):\n \"\"\"Return an evaluation function for server-side evaluation.\"\"\"\n\n # Load test data here to avoid the overhead of doing it in `evaluate` itself\n _, test = tf.keras.datasets.mnist.load_data()\n test_data, test_labels = test\n\n # preprocessing\n test_data, test_labels = common.preprocess(test_data, test_labels)\n\n # The `evaluate` function will be called after every round\n def evaluate(weights: fl.common.Weights):\n model.set_weights(weights) # Update model with the latest parameters\n loss, accuracy = model.evaluate(test_data, test_labels)\n return loss, {\"accuracy\": accuracy}\n\n return evaluate\n\n\ndef main(args) -> None:\n model = common.create_cnn_model()\n loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True)\n model.compile(\"sgd\", loss=loss, metrics=[\"accuracy\"])\n strategy = fl.server.strategy.FedAvg(\n fraction_fit=args.fraction_fit,\n min_available_clients=args.num_clients,\n eval_fn=get_eval_fn(model),\n initial_parameters=fl.common.weights_to_parameters(model.get_weights()),\n )\n fl.server.start_server(\n strategy=strategy,\n config={\"num_rounds\": args.num_rounds},\n )\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Server Script\")\n parser.add_argument(\"--num-clients\", default=2, type=int)\n parser.add_argument(\"--num-rounds\", default=1, type=int)\n parser.add_argument(\"--fraction-fit\", default=1.0, type=float)\n args = parser.parse_args()\n main(args)\n",
"\"\"\"FedBN client.\"\"\"\nimport argparse\nimport json\nfrom collections import OrderedDict\nfrom typing import Dict, List, Tuple\n\nimport flwr as fl\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torchvision import transforms\n\nfrom .utils import cnn_model, data_utils\n\nFL_ROUND = 0\n\neval_list = []\n\n\n# pylint: disable=no-member\nDEVICE: str = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n# pylint: enable=no-member\n\n# mypy: allow-any-generics\n# pylint: disable= too-many-arguments, too-many-locals, global-statement\nclass FlowerClient(fl.client.NumPyClient):\n \"\"\"Flower client implementing image classification using PyTorch.\"\"\"\n\n def __init__(\n self,\n model: cnn_model.CNNModel,\n trainloader: torch.utils.data.DataLoader,\n testloader: torch.utils.data.DataLoader,\n num_examples: Dict,\n mode: str,\n ) -> None:\n self.model = model\n self.trainloader = trainloader\n self.testloader = testloader\n self.num_examples = num_examples\n self.mode = mode\n\n def get_parameters(self) -> List[np.ndarray]:\n \"\"\"\n Return model parameters as a list of NumPy ndarrays w or w/o using BN layers\n \"\"\"\n self.model.train()\n # pylint: disable = no-else-return\n if self.mode == \"fedbn\":\n # Excluding parameters of BN layers when using FedBN\n return [\n val.cpu().numpy()\n for name, val in self.model.state_dict().items()\n if \"bn\" not in name\n ]\n else:\n # Return all model parameters as a list of NumPy ndarrays\n return [val.cpu().numpy() for _, val in self.model.state_dict().items()]\n\n def set_parameters(self, parameters: List[np.ndarray]) -> None:\n \"\"\"\n Set model parameters from a list of NumPy ndarrays\n Exclude the bn layer if available\n \"\"\"\n self.model.train()\n # pylint: disable=not-callable\n if self.mode == \"fedbn\":\n keys = [k for k in self.model.state_dict().keys() if \"bn\" not in k]\n params_dict = zip(keys, parameters)\n state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict})\n self.model.load_state_dict(state_dict, strict=False)\n else:\n params_dict = zip(self.model.state_dict().keys(), parameters)\n state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict})\n self.model.load_state_dict(state_dict, strict=True)\n # pylint: enable=not-callable\n\n def fit(\n self, parameters: List[np.ndarray], config: Dict[str, str]\n ) -> Tuple[List[np.ndarray], int, Dict]:\n \"\"\"\n Set model parameters, train model, return updated model parameters\n \"\"\"\n self.set_parameters(parameters)\n test_loss, test_accuracy = test(\n self.model, self.num_examples[\"dataset\"], self.trainloader, device=DEVICE\n )\n test_dict = {\n \"dataset\": self.num_examples[\"dataset\"],\n \"fl_round\": FL_ROUND,\n \"strategy\": self.mode,\n \"train_loss\": test_loss,\n \"train_accuracy\": test_accuracy,\n }\n loss, accuracy = train(\n self.model,\n self.trainloader,\n self.num_examples[\"dataset\"],\n epochs=1,\n device=DEVICE,\n )\n eval_list.append(test_dict)\n return (\n self.get_parameters(),\n self.num_examples[\"trainset\"],\n {\"loss\": loss, \"accuracy\": accuracy},\n )\n\n def evaluate(\n self, parameters: List[np.ndarray], config: Dict[str, str]\n ) -> Tuple[float, int, Dict]:\n \"\"\"\n Set model parameters, evaluate model on local test dataset, return result\n \"\"\"\n self.set_parameters(parameters)\n global FL_ROUND\n print(f\"FL Round:{FL_ROUND}\")\n loss, accuracy = test(\n self.model, self.num_examples[\"dataset\"], self.testloader, device=DEVICE\n )\n test_dict = {\n \"dataset\": self.num_examples[\"dataset\"],\n \"fl_round\": FL_ROUND,\n \"strategy\": self.mode,\n \"test_loss\": loss,\n \"test_accuracy\": accuracy,\n }\n eval_list.append(test_dict)\n FL_ROUND += 1\n return (\n float(loss),\n self.num_examples[\"testset\"],\n {\"loss\": loss, \"accuracy\": accuracy},\n )\n\n\ndef load_partition(\n dataset: str,\n) -> Tuple[torch.utils.data.DataLoader, torch.utils.data.DataLoader, Dict]:\n \"\"\"\n Load 'MNIST', 'SVHN', 'USPS', 'SynthDigits', 'MNIST-M' for the training\n and test data to simulate a partition.\n \"\"\"\n\n if dataset == \"MNIST\":\n print(f\"Load {dataset} dataset\")\n\n transform = transforms.Compose(\n [\n transforms.Grayscale(num_output_channels=3),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ]\n )\n\n trainset = data_utils.DigitsDataset(\n data_path=\"data/MNIST\",\n channels=1,\n percent=0.1,\n train=True,\n transform=transform,\n )\n testset = data_utils.DigitsDataset(\n data_path=\"data/MNIST\",\n channels=1,\n percent=0.1,\n train=False,\n transform=transform,\n )\n\n elif dataset == \"SVHN\":\n print(f\"Load {dataset} dataset\")\n\n transform = transforms.Compose(\n [\n transforms.Resize([28, 28]),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ]\n )\n\n trainset = data_utils.DigitsDataset(\n data_path=\"data/SVHN\",\n channels=3,\n percent=0.1,\n train=True,\n transform=transform,\n )\n testset = data_utils.DigitsDataset(\n data_path=\"data/SVHN\",\n channels=3,\n percent=0.1,\n train=False,\n transform=transform,\n )\n\n elif dataset == \"USPS\":\n print(f\"Load {dataset} dataset\")\n\n transform = transforms.Compose(\n [\n transforms.Resize([28, 28]),\n transforms.Grayscale(num_output_channels=3),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ]\n )\n\n trainset = data_utils.DigitsDataset(\n data_path=\"data/USPS\",\n channels=1,\n percent=0.1,\n train=True,\n transform=transform,\n )\n testset = data_utils.DigitsDataset(\n data_path=\"data/USPS\",\n channels=1,\n percent=0.1,\n train=False,\n transform=transform,\n )\n\n elif dataset == \"SynthDigits\":\n print(f\"Load {dataset} dataset\")\n\n transform = transforms.Compose(\n [\n transforms.Resize([28, 28]),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ]\n )\n\n trainset = data_utils.DigitsDataset(\n data_path=\"data/SynthDigits/\",\n channels=3,\n percent=0.1,\n train=True,\n transform=transform,\n )\n testset = data_utils.DigitsDataset(\n data_path=\"data/SynthDigits/\",\n channels=3,\n percent=0.1,\n train=False,\n transform=transform,\n )\n\n elif dataset == \"MNIST-M\":\n print(f\"Load {dataset} dataset\")\n\n transform = transforms.Compose(\n [\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ]\n )\n\n trainset = data_utils.DigitsDataset(\n data_path=\"data/MNIST_M/\",\n channels=3,\n percent=0.1,\n train=True,\n transform=transform,\n )\n testset = data_utils.DigitsDataset(\n data_path=\"data/MNIST_M/\",\n channels=3,\n percent=0.1,\n train=False,\n transform=transform,\n )\n\n else:\n print(\"No valid dataset available\")\n\n num_examples = {\n \"dataset\": dataset,\n \"trainset\": len(trainset),\n \"testset\": len(testset),\n }\n\n print(f\"Loaded {dataset} dataset with {num_examples} samples. Good Luck!\")\n\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=32, shuffle=True)\n testloader = torch.utils.data.DataLoader(testset, batch_size=32, shuffle=False)\n\n return trainloader, testloader, num_examples\n\n\ndef train(model, traindata, dataset, epochs, device) -> Tuple[float, float]:\n \"\"\"\n Train the network.\n \"\"\"\n # Define loss and optimizer\n criterion = nn.CrossEntropyLoss()\n optimizer = torch.optim.SGD(model.parameters(), lr=1e-2)\n\n print(\n f\"Training {dataset} dataset with {epochs} local epoch(s) w/ {len(traindata)} batches each\"\n )\n\n # Train the network\n model.to(device)\n model.train()\n for epoch in range(epochs): # loop over the dataset multiple times\n running_loss = 0.0\n total = 0.0\n correct = 0\n for i, data in enumerate(traindata, 0):\n images, labels = data[0].to(device), data[1].to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = model(images)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n # print statistics\n running_loss += loss.item()\n _, predicted = torch.max(outputs.data, 1) # pylint: disable=no-member\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n loss = running_loss\n accuracy = correct / total\n if i == len(traindata) - 1: # print every 100 mini-batches\n accuracy = correct / total\n loss_batch = running_loss / len(traindata)\n print(\n f\"Train Dataset {dataset} with [{epoch+1}, {i+1}] \\\n loss: {loss_batch} accuracy: {accuracy}\"\n )\n running_loss = 0.0\n loss = loss / len(traindata)\n return loss, accuracy\n\n\ndef test(model, dataset, testdata, device) -> Tuple[float, float]:\n \"\"\"\n Validate the network on the entire test set.\n \"\"\"\n # Define loss and metrics\n criterion = nn.CrossEntropyLoss()\n correct = 0\n total = 0\n loss = 0.0\n\n # Evaluate the network\n model.to(device)\n model.eval()\n with torch.no_grad():\n for data in testdata:\n images, labels = data[0].to(device), data[1].to(device)\n outputs = model(images)\n loss += criterion(outputs, labels).item()\n _, predicted = torch.max(outputs.data, 1) # pylint: disable=no-member\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n accuracy = correct / total\n loss = loss / len(testdata)\n print(f\"Dataset {dataset} with evaluation loss: {loss}\")\n return loss, accuracy\n\n\ndef main() -> None:\n \"\"\"Load data, start FlowerClient.\"\"\"\n\n # Parse command line argument `partition` (type of dataset) and `mode` (type of strategy)\n parser = argparse.ArgumentParser(description=\"Flower\")\n parser.add_argument(\n \"--partition\",\n type=str,\n choices=[\"MNIST\", \"SVHN\", \"USPS\", \"SynthDigits\", \"MNIST-M\"],\n required=True,\n )\n parser.add_argument(\n \"--mode\",\n type=str,\n choices=[\"fedbn\", \"fedavg\"],\n required=True,\n default=\"fedbn\",\n )\n args = parser.parse_args()\n\n # Load model\n model = cnn_model.CNNModel().to(DEVICE).train()\n\n # Load data\n trainloader, testloader, num_examples = load_partition(args.partition)\n\n # Perform a single forward pass to properly initialize BatchNorm\n _ = model(next(iter(trainloader))[0].to(DEVICE))\n\n # Start client\n client = FlowerClient(model, trainloader, testloader, num_examples, args.mode)\n print(\"Start client of dataset\", num_examples[\"dataset\"])\n fl.client.start_numpy_client(server_address=\"[::]:8000\", client=client)\n # Save train and evaluation loss and accuracy in json file\n with open(\n f\"results/{args.partition}_{args.mode}_results.json\", mode=\"r+\"\n ) as eval_file:\n json.dump(eval_list, eval_file)\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"tensorflow.keras.losses.CategoricalCrossentropy",
"tensorflow.keras.datasets.mnist.load_data"
],
[
"torch.nn.CrossEntropyLoss",
"torch.max",
"torch.utils.data.DataLoader",
"torch.tensor",
"torch.no_grad",
"torch.cuda.is_available"
]
] |
RPGroup-PBoC/mwc_mutants
|
[
"35581602c35793fc8ec42c8aff37b8305c5e54e1"
] |
[
"mut/flow.py"
] |
[
"import numpy as np\nimport fcsparser\nimport pandas as pd\nfrom ._fit_bivariate_normal_AstroML import fit_bivariate_normal\nimport scipy.stats\n\n# #######################\n# Automated Gating\n# #######################\ndef fit_2D_gaussian(df, x_val='FSC-H', y_val='SSC-H', log=False):\n '''\n This function hacks astroML fit_bivariate_normal to return the mean\n and covariance matrix when fitting a 2D gaussian fuction to the data\n contained in the x_val and y_val columns of the DataFrame df.\n\n Parameters\n ----------\n df : DataFrame.\n dataframe containing the data from which to fit the distribution\n x_val, y_val : str.\n name of the dataframe columns to be used in the function\n log : bool.\n indicate if the log of the data should be use for the fit or not\n\n Returns\n -------\n mu : tuple.\n (x, y) location of the best-fit bivariate normal\n cov : 2 x 2 array\n covariance matrix.\n cov[0, 0] = variance of the x_val column\n cov[1, 1] = variance of the y_val column\n cov[0, 1] = cov[1, 0] = covariance of the data\n '''\n if log:\n x = np.log10(df[x_val])\n y = np.log10(df[y_val])\n else:\n x = df[x_val]\n y = df[y_val]\n\n # Fit the 2D Gaussian distribution using atroML function\n mu, sigma_1, sigma_2, alpha = fit_bivariate_normal(x, y, robust=True)\n\n # compute covariance matrix from the standar deviations and the angle\n # that the fit_bivariate_normal function returns\n sigma_xx = ((sigma_1 * np.cos(alpha)) ** 2 +\n (sigma_2 * np.sin(alpha)) ** 2)\n sigma_yy = ((sigma_1 * np.sin(alpha)) ** 2 +\n (sigma_2 * np.cos(alpha)) ** 2)\n sigma_xy = (sigma_1 ** 2 - sigma_2 ** 2) * np.sin(alpha) * np.cos(alpha)\n\n # put elements of the covariance matrix into an actual matrix\n cov = np.array([[sigma_xx, sigma_xy], [sigma_xy, sigma_yy]])\n\n return mu, cov\n\n\n# #################\ndef gauss_interval(df, mu, cov, x_val='FSC-H', y_val='SSC-H', log=False):\n '''\n Computes the of the statistic\n\n (x - µx)'Σ(x - µx)\n\n for each of the elements in df columns x_val and y_val.\n\n Parameters\n ----------\n df : DataFrame.\n dataframe containing the data from which to fit the distribution\n mu : array-like.\n (x, y) location of bivariate normal\n cov : 2 x 2 array\n covariance matrix\n x_val, y_val : str.\n name of the dataframe columns to be used in the function\n log : bool.\n indicate if the log of the data should be use for the fit or not.\n\n Returns\n -------\n statistic_gauss : array-like.\n array containing the result of the linear algebra operation:\n (x - µx)'sum(x - µx)\n '''\n # Determine that the covariance matrix is not singular\n det = np.linalg.det(cov)\n if det == 0:\n raise NameError(\"The covariance matrix can't be singular\")\n\n # Compute the vector x defined as [[x - mu_x], [y - mu_y]]\n if log is True:\n x_vect = np.log10(np.array(df[[x_val, y_val]]))\n else:\n x_vect = np.array(df[[x_val, y_val]])\n\n x_vect[:, 0] = x_vect[:, 0] - mu[0]\n x_vect[:, 1] = x_vect[:, 1] - mu[1]\n\n # compute the inverse of the covariance matrix\n inv_sigma = np.linalg.inv(cov)\n\n # compute the operation\n interval_array = np.zeros(len(df))\n for i, x in enumerate(x_vect):\n interval_array[i] = np.dot(np.dot(x, inv_sigma), x.T)\n\n return interval_array\n\n\ndef gaussian_gate(df, alpha, x_val='FSC-A', y_val='SSC-A', log=True,\n verbose=False):\n '''\n Function that applies an \"unsupervised bivariate Gaussian gate\" to the data\n over the channels x_val and y_val.\n\n Parameters\n ----------\n df : DataFrame.\n dataframe containing the data from which to fit the distribution\n alpha : float. [0, 1]\n fraction of data aimed to keep. Used to compute the chi^2 quantile\n function\n x_val, y_val : str.\n name of the dataframe columns to be used in the function\n log : bool.\n indicate if the log of the data should be use for the fit or not\n verbose : bool.\n indicate if the percentage of data kept should be print\n\n Returns\n -------\n df_thresh : DataFrame\n Pandas data frame to which the automatic gate was applied.\n '''\n\n # Perform sanity checks.\n if alpha < 0 or alpha > 1:\n return RuntimeError(\"`alpha` must be a float between 0 and 1.\")\n\n data = df[[x_val, y_val]]\n # Fit the bivariate Gaussian distribution\n mu, cov = fit_2D_gaussian(data, log=log, x_val=x_val, y_val=y_val)\n\n # Compute the statistic for each of the pair of log scattering data\n interval_array = gauss_interval(data, mu, cov, log=log,\n x_val=x_val, y_val=y_val)\n\n # Find which data points fall inside the interval\n idx = interval_array <= scipy.stats.chi2.ppf(alpha, 2)\n\n # print the percentage of data kept\n if verbose:\n print('''\n with parameter alpha={0:0.2f}, percentage of data kept = {1:0.2f}\n '''.format(alpha, np.sum(idx) / len(df)))\n return df[idx]\n\n\n# #######################\n# File Parsing Utilities\n# #######################\n\ndef fcs_to_csv(path, file_name, save_metadata=True):\n R\"\"\"\n Reads in a Flow Cytometry Standard (FCS) file and exports all content\n directly to an easily parseable csv fie.\n\n Parameters\n ----------\n path : str\n Path to .fcs file\n file_name : str\n Path to save file to .csv\n save_metadata : bool\n If True, a metadata file will also be saved. It will have the name of\n `path` with `_metadata.csv`\n \"\"\"\n\n # Ensure provided file is actually .fcs\n if path.split('.')[-1] is not '.fcs':\n raise RuntimeError(\"`path` is not an FCS file.\")\n\n meta, data = fcsparser.parse(path)\n data.to_csv(file_name, index=False)\n\n if save_metadata:\n meta_df = pd.DataFrame(meta)\n meta_name = '{0}_metadata.csv'.format(path[:-4])\n meta_df.to_csv(meta_name, index=False)\n"
] |
[
[
"numpy.dot",
"numpy.linalg.inv",
"numpy.cos",
"pandas.DataFrame",
"numpy.sin",
"numpy.linalg.det",
"numpy.log10",
"numpy.array",
"numpy.sum"
]
] |
fkopsaf/OpenAeroStruct
|
[
"414bd76a7f14f1bd52d6dacc6694382d52e5fabc"
] |
[
"openaerostruct/geometry/utils.py"
] |
[
"from __future__ import print_function, division\r\nimport warnings\r\nimport numpy as np\r\nfrom numpy import cos, sin, tan\r\n\r\nfrom openaerostruct.geometry.CRM_definitions import get_crm_points\r\n\r\n\r\ndef rotate(mesh, theta_y, symmetry, rotate_x=True):\r\n \"\"\"\r\n Compute rotation matrices given mesh and rotation angles in degrees.\r\n\r\n Parameters\r\n ----------\r\n mesh[nx, ny, 3] : numpy array\r\n Nodal mesh defining the initial aerodynamic surface.\r\n theta_y[ny] : numpy array\r\n 1-D array of rotation angles about y-axis for each wing slice in degrees.\r\n symmetry : boolean\r\n Flag set to True if surface is reflected about y=0 plane.\r\n rotate_x : boolean\r\n Flag set to True if the user desires the twist variable to always be\r\n applied perpendicular to the wing (say, in the case of a winglet).\r\n\r\n Returns\r\n -------\r\n mesh[nx, ny, 3] : numpy array\r\n Nodal mesh defining the twisted aerodynamic surface.\r\n\r\n \"\"\"\r\n te = mesh[-1]\r\n le = mesh[ 0]\r\n quarter_chord = 0.25 * te + 0.75 * le\r\n\r\n nx, ny, _ = mesh.shape\r\n\r\n if rotate_x:\r\n # Compute spanwise z displacements along quarter chord\r\n if symmetry:\r\n dz_qc = quarter_chord[:-1,2] - quarter_chord[1:,2]\r\n dy_qc = quarter_chord[:-1,1] - quarter_chord[1:,1]\r\n theta_x = np.arctan(dz_qc/dy_qc)\r\n\r\n # Prepend with 0 so that root is not rotated\r\n rad_theta_x = np.append(theta_x, 0.0)\r\n else:\r\n root_index = int((ny - 1) / 2)\r\n dz_qc_left = quarter_chord[:root_index,2] - quarter_chord[1:root_index+1,2]\r\n dy_qc_left = quarter_chord[:root_index,1] - quarter_chord[1:root_index+1,1]\r\n theta_x_left = np.arctan(dz_qc_left/dy_qc_left)\r\n dz_qc_right = quarter_chord[root_index+1:,2] - quarter_chord[root_index:-1,2]\r\n dy_qc_right = quarter_chord[root_index+1:,1] - quarter_chord[root_index:-1,1]\r\n theta_x_right = np.arctan(dz_qc_right/dy_qc_right)\r\n\r\n # Concatenate thetas\r\n rad_theta_x = np.concatenate((theta_x_left, np.zeros(1), theta_x_right))\r\n\r\n else:\r\n rad_theta_x = 0.0\r\n\r\n rad_theta_y = theta_y * np.pi / 180.\r\n\r\n mats = np.zeros((ny, 3, 3), dtype=type(rad_theta_y[0]))\r\n\r\n cos_rtx = cos(rad_theta_x)\r\n cos_rty = cos(rad_theta_y)\r\n sin_rtx = sin(rad_theta_x)\r\n sin_rty = sin(rad_theta_y)\r\n\r\n mats[:, 0, 0] = cos_rty\r\n mats[:, 0, 2] = sin_rty\r\n mats[:, 1, 0] = sin_rtx * sin_rty\r\n mats[:, 1, 1] = cos_rtx\r\n mats[:, 1, 2] = -sin_rtx * cos_rty\r\n mats[:, 2, 0] = -cos_rtx * sin_rty\r\n mats[:, 2, 1] = sin_rtx\r\n mats[:, 2, 2] = cos_rtx*cos_rty\r\n\r\n mesh[:] = np.einsum(\"ikj, mij -> mik\", mats, mesh - quarter_chord) + quarter_chord\r\n\r\n\r\ndef scale_x(mesh, chord_dist):\r\n \"\"\"\r\n Modify the chords along the span of the wing by scaling only the x-coord.\r\n\r\n Parameters\r\n ----------\r\n mesh[nx, ny, 3] : numpy array\r\n Nodal mesh defining the initial aerodynamic surface.\r\n chord_dist[ny] : numpy array\r\n Chord length for each panel edge.\r\n\r\n Returns\r\n -------\r\n mesh[nx, ny, 3] : numpy array\r\n Nodal mesh with the new chord lengths.\r\n \"\"\"\r\n te = mesh[-1]\r\n le = mesh[ 0]\r\n quarter_chord = 0.25 * te + 0.75 * le\r\n\r\n ny = mesh.shape[1]\r\n\r\n for i in range(ny):\r\n mesh[:, i, 0] = (mesh[:, i, 0] - quarter_chord[i, 0]) * chord_dist[i] + \\\r\n quarter_chord[i, 0]\r\n\r\ndef shear_x(mesh, xshear):\r\n \"\"\"\r\n Shear the wing in the x direction (distributed sweep).\r\n\r\n Parameters\r\n ----------\r\n mesh[nx, ny, 3] : numpy array\r\n Nodal mesh defining the initial aerodynamic surface.\r\n xshear[ny] : numpy array\r\n Distance to translate wing in x direction.\r\n\r\n Returns\r\n -------\r\n mesh[nx, ny, 3] : numpy array\r\n Nodal mesh with the new chord lengths.\r\n \"\"\"\r\n mesh[:, :, 0] += xshear\r\n\r\ndef shear_y(mesh, yshear):\r\n \"\"\" Shear the wing in the y direction (distributed span).\r\n\r\n Parameters\r\n ----------\r\n mesh[nx, ny, 3] : numpy array\r\n Nodal mesh defining the initial aerodynamic surface.\r\n yshear[ny] : numpy array\r\n Distance to translate wing in y direction.\r\n\r\n Returns\r\n -------\r\n mesh[nx, ny, 3] : numpy array\r\n Nodal mesh with the new span widths.\r\n \"\"\"\r\n mesh[:, :, 1] += yshear\r\n\r\ndef shear_z(mesh, zshear):\r\n \"\"\"\r\n Shear the wing in the z direction (distributed dihedral).\r\n\r\n Parameters\r\n ----------\r\n mesh[nx, ny, 3] : numpy array\r\n Nodal mesh defining the initial aerodynamic surface.\r\n zshear[ny] : numpy array\r\n Distance to translate wing in z direction.\r\n\r\n Returns\r\n -------\r\n mesh[nx, ny, 3] : numpy array\r\n Nodal mesh with the new chord lengths.\r\n \"\"\"\r\n mesh[:, :, 2] += zshear\r\n\r\ndef sweep(mesh, sweep_angle, symmetry):\r\n \"\"\"\r\n Apply shearing sweep. Positive sweeps back.\r\n\r\n Parameters\r\n ----------\r\n mesh[nx, ny, 3] : numpy array\r\n Nodal mesh defining the initial aerodynamic surface.\r\n sweep_angle : float\r\n Shearing sweep angle in degrees.\r\n symmetry : boolean\r\n Flag set to true if surface is reflected about y=0 plane.\r\n\r\n Returns\r\n -------\r\n mesh[nx, ny, 3] : numpy array\r\n Nodal mesh defining the swept aerodynamic surface.\r\n\r\n \"\"\"\r\n\r\n # Get the mesh parameters and desired sweep angle\r\n num_x, num_y, _ = mesh.shape\r\n le = mesh[0]\r\n p180 = np.pi / 180\r\n tan_theta = tan(p180*sweep_angle)\r\n\r\n # If symmetric, simply vary the x-coord based on the distance from the\r\n # center of the wing\r\n if symmetry:\r\n y0 = le[-1, 1]\r\n dx = -(le[:, 1] - y0) * tan_theta\r\n\r\n # Else, vary the x-coord on either side of the wing\r\n else:\r\n ny2 = (num_y - 1) // 2\r\n y0 = le[ny2, 1]\r\n\r\n dx_right = (le[ny2:, 1] - y0) * tan_theta\r\n dx_left = -(le[:ny2, 1] - y0) * tan_theta\r\n dx = np.hstack((dx_left, dx_right))\r\n\r\n # dx added spanwise.\r\n mesh[:, :, 0] += dx\r\n\r\ndef dihedral(mesh, dihedral_angle, symmetry):\r\n \"\"\"\r\n Apply dihedral angle. Positive angles up.\r\n\r\n Parameters\r\n ----------\r\n mesh[nx, ny, 3] : numpy array\r\n Nodal mesh defining the initial aerodynamic surface.\r\n dihedral_angle : float\r\n Dihedral angle in degrees.\r\n symmetry : boolean\r\n Flag set to true if surface is reflected about y=0 plane.\r\n\r\n Returns\r\n -------\r\n mesh[nx, ny, 3] : numpy array\r\n Nodal mesh defining the aerodynamic surface with dihedral angle.\r\n\r\n \"\"\"\r\n\r\n # Get the mesh parameters and desired sweep angle\r\n num_x, num_y, _ = mesh.shape\r\n le = mesh[0]\r\n p180 = np.pi / 180\r\n tan_theta = tan(p180*dihedral_angle)\r\n\r\n # If symmetric, simply vary the z-coord based on the distance from the\r\n # center of the wing\r\n if symmetry:\r\n y0 = le[-1, 1]\r\n dz = -(le[:, 1] - y0) * tan_theta\r\n\r\n else:\r\n ny2 = (num_y-1) // 2\r\n y0 = le[ny2, 1]\r\n dz_right = (le[ny2:, 1] - y0) * tan_theta\r\n dz_left = -(le[:ny2, 1] - y0) * tan_theta\r\n dz = np.hstack((dz_left, dz_right))\r\n\r\n # dz added spanwise.\r\n mesh[:, :, 2] += dz\r\n\r\n\r\ndef stretch(mesh, span, symmetry):\r\n \"\"\"\r\n Stretch mesh in spanwise direction to reach specified span.\r\n\r\n Parameters\r\n ----------\r\n mesh[nx, ny, 3] : numpy array\r\n Nodal mesh defining the initial aerodynamic surface.\r\n span : float\r\n Relative stetch ratio in the spanwise direction.\r\n symmetry : boolean\r\n Flag set to true if surface is reflected about y=0 plane.\r\n\r\n Returns\r\n -------\r\n mesh[nx, ny, 3] : numpy array\r\n Nodal mesh defining the stretched aerodynamic surface.\r\n\r\n \"\"\"\r\n\r\n # Set the span along the quarter-chord line\r\n le = mesh[0]\r\n te = mesh[-1]\r\n quarter_chord = 0.25 * te + 0.75 * le\r\n\r\n # The user always deals with the full span, so if they input a specific\r\n # span value and have symmetry enabled, we divide this value by 2.\r\n if symmetry:\r\n span /= 2.\r\n\r\n # Compute the previous span and determine the scalar needed to reach the\r\n # desired span\r\n prev_span = quarter_chord[-1, 1] - quarter_chord[0, 1]\r\n s = quarter_chord[:,1] / prev_span\r\n mesh[:, :, 1] = s * span\r\n\r\ndef taper(mesh, taper_ratio, symmetry):\r\n \"\"\"\r\n Alter the spanwise chord linearly to produce a tapered wing. Note that\r\n we apply taper around the quarter-chord line.\r\n\r\n Parameters\r\n ----------\r\n mesh[nx, ny, 3] : numpy array\r\n Nodal mesh defining the initial aerodynamic surface.\r\n taper_ratio : float\r\n Taper ratio for the wing; 1 is untapered, 0 goes to a point.\r\n symmetry : boolean\r\n Flag set to true if surface is reflected about y=0 plane.\r\n\r\n Returns\r\n -------\r\n mesh[nx, ny, 3] : numpy array\r\n Nodal mesh defining the tapered aerodynamic surface.\r\n\r\n \"\"\"\r\n\r\n # Get mesh parameters and the quarter-chord\r\n le = mesh[0]\r\n te = mesh[-1]\r\n num_x, num_y, _ = mesh.shape\r\n quarter_chord = 0.25 * te + 0.75 * le\r\n x = quarter_chord[:, 1]\r\n span = x[-1] - x[0]\r\n\r\n # If symmetric, solve for the correct taper ratio, which is a linear\r\n # interpolation problem\r\n if symmetry:\r\n xp = np.array([-span, 0.])\r\n fp = np.array([taper_ratio, 1.])\r\n\r\n # Otherwise, we set up an interpolation problem for the entire wing, which\r\n # consists of two linear segments\r\n else:\r\n xp = np.array([-span/2, 0., span/2])\r\n fp = np.array([taper_ratio, 1., taper_ratio])\r\n\r\n taper = np.interp(x.real, xp.real, fp.real)\r\n\r\n # Modify the mesh based on the taper amount computed per spanwise section\r\n mesh[:] = np.einsum('ijk, j->ijk', mesh - quarter_chord, taper) + quarter_chord\r\n\r\n\r\ndef gen_rect_mesh(num_x, num_y, span, chord, span_cos_spacing=0., chord_cos_spacing=0.):\r\n \"\"\"\r\n Generate simple rectangular wing mesh.\r\n\r\n Parameters\r\n ----------\r\n num_x : float\r\n Desired number of chordwise node points for the final mesh.\r\n num_y : float\r\n Desired number of chordwise node points for the final mesh.\r\n span : float\r\n Total wingspan.\r\n chord : float\r\n Root chord.\r\n span_cos_spacing : float (optional)\r\n Blending ratio of uniform and cosine spacing in the spanwise direction.\r\n A value of 0. corresponds to uniform spacing and a value of 1.\r\n corresponds to regular cosine spacing. This increases the number of\r\n spanwise node points near the wingtips.\r\n chord_cos_spacing : float (optional)\r\n Blending ratio of uniform and cosine spacing in the chordwise direction.\r\n A value of 0. corresponds to uniform spacing and a value of 1.\r\n corresponds to regular cosine spacing. This increases the number of\r\n chordwise node points near the wingtips.\r\n\r\n Returns\r\n -------\r\n mesh[nx, ny, 3] : numpy array\r\n Rectangular nodal mesh defining the final aerodynamic surface with the\r\n specified parameters.\r\n \"\"\"\r\n\r\n mesh = np.zeros((num_x, num_y, 3))\r\n ny2 = (num_y + 1) // 2\r\n\r\n # Hotfix a special case for spacing bunched at the root and tips\r\n if span_cos_spacing == 2.:\r\n beta = np.linspace(0, np.pi, ny2)\r\n\r\n # mixed spacing with span_cos_spacing as a weighting factor\r\n # this is for the spanwise spacing\r\n cosine = .25 * (1 - np.cos(beta)) # cosine spacing\r\n uniform = np.linspace(0, .5, ny2)[::-1] # uniform spacing\r\n half_wing = cosine[::-1] * span_cos_spacing + (1 - span_cos_spacing) * uniform\r\n full_wing = np.hstack((-half_wing[:-1], half_wing[::-1])) * span\r\n\r\n else:\r\n beta = np.linspace(0, np.pi/2, ny2)\r\n\r\n # mixed spacing with span_cos_spacing as a weighting factor\r\n # this is for the spanwise spacing\r\n cosine = .5 * np.cos(beta) # cosine spacing\r\n uniform = np.linspace(0, .5, ny2)[::-1] # uniform spacing\r\n half_wing = cosine * span_cos_spacing + (1 - span_cos_spacing) * uniform\r\n full_wing = np.hstack((-half_wing[:-1], half_wing[::-1])) * span\r\n\r\n nx2 = (num_x + 1) // 2\r\n beta = np.linspace(0, np.pi/2, nx2)\r\n\r\n # mixed spacing with span_cos_spacing as a weighting factor\r\n # this is for the chordwise spacing\r\n cosine = .5 * np.cos(beta) # cosine spacing\r\n uniform = np.linspace(0, .5, nx2)[::-1] # uniform spacing\r\n half_wing = cosine * chord_cos_spacing + (1 - chord_cos_spacing) * uniform\r\n full_wing_x = np.hstack((-half_wing[:-1], half_wing[::-1])) * chord\r\n\r\n # Special case if there are only 2 chordwise nodes\r\n if num_x <= 2:\r\n full_wing_x = np.array([0., chord])\r\n\r\n for ind_x in range(num_x):\r\n for ind_y in range(num_y):\r\n mesh[ind_x, ind_y, :] = [full_wing_x[ind_x], full_wing[ind_y], 0]\r\n\r\n return mesh\r\n\r\n\r\ndef gen_crm_mesh(num_x, num_y, span_cos_spacing=0., chord_cos_spacing=0., wing_type=\"CRM:jig\"):\r\n \"\"\"\r\n Generate Common Research Model wing mesh.\r\n\r\n Parameters\r\n ----------\r\n num_x : float\r\n Desired number of chordwise node points for the final mesh.\r\n num_y : float\r\n Desired number of chordwise node points for the final mesh.\r\n span : float\r\n Total wingspan.\r\n chord : float\r\n Root chord.\r\n span_cos_spacing : float (optional)\r\n Blending ratio of uniform and cosine spacing in the spanwise direction.\r\n A value of 0. corresponds to uniform spacing and a value of 1.\r\n corresponds to regular cosine spacing. This increases the number of\r\n spanwise node points near the wingtips.\r\n chord_cos_spacing : float (optional)\r\n Blending ratio of uniform and cosine spacing in the chordwise direction.\r\n A value of 0. corresponds to uniform spacing and a value of 1.\r\n corresponds to regular cosine spacing. This increases the number of\r\n chordwise node points near the wingtips.\r\n wing_type : string (optional)\r\n Describes the desired CRM shape. Current options are:\r\n \"CRM:jig\" (undeformed jig shape),\r\n \"CRM:alpha_2.75\" (shape from wind tunnel testing at a=2.75 from DPW6)\r\n\r\n Returns\r\n -------\r\n mesh[nx, ny, 3] : numpy array\r\n Rectangular nodal mesh defining the final aerodynamic surface with the\r\n specified parameters.\r\n eta : numpy array\r\n Spanwise locations of the airfoil slices. Later used in the\r\n interpolation function to obtain correct twist values at\r\n points along the span that are not aligned with these slices.\r\n twist : numpy array\r\n Twist along the span at the spanwise eta locations. We use these twists\r\n as training points for interpolation to obtain twist values at\r\n arbitrary points along the span.\r\n\r\n \"\"\"\r\n\r\n # Call an external function to get the data points for the specific CRM\r\n # type requested. See `CRM_definitions.py` for more information and the\r\n # raw data.\r\n raw_crm_points = get_crm_points(wing_type)\r\n\r\n # If this is a jig shape, remove all z-deflection to create a\r\n # poor person's version of the undeformed CRM.\r\n if 'jig' in wing_type or 'CRM' == wing_type:\r\n raw_crm_points[:, 3] = 0.\r\n\r\n # Get the leading edge of the raw crm points\r\n le = np.vstack((raw_crm_points[:,1],\r\n raw_crm_points[:,2],\r\n raw_crm_points[:,3]))\r\n\r\n # Get the chord, twist(in correct order), and eta values from the points\r\n chord = raw_crm_points[:, 5]\r\n twist = raw_crm_points[:, 4][::-1]\r\n eta = raw_crm_points[:, 0]\r\n\r\n # Get the trailing edge of the crm points, based on the chord + le distance.\r\n # Note that we do not account for twist here; instead we set that using\r\n # the twist design variable later in run_classes.py.\r\n te = np.vstack((raw_crm_points[:,1] + chord,\r\n raw_crm_points[:,2],\r\n raw_crm_points[:,3]))\r\n\r\n # Get the number of points that define this CRM shape and create a mesh\r\n # array based on this size\r\n n_raw_points = raw_crm_points.shape[0]\r\n mesh = np.empty((2, n_raw_points, 3))\r\n\r\n # Set the leading and trailing edges of the mesh matrix\r\n mesh[0, :, :] = le.T\r\n mesh[1, :, :] = te.T\r\n\r\n # Convert the mesh points to meters from inches.\r\n raw_mesh = mesh * 0.0254\r\n\r\n # Create the blended spacing using the user input for span_cos_spacing\r\n ny2 = (num_y + 1) // 2\r\n beta = np.linspace(0, np.pi/2, ny2)\r\n\r\n # Distribution for cosine spacing\r\n cosine = np.cos(beta)\r\n\r\n # Distribution for uniform spacing\r\n uniform = np.linspace(0, 1., ny2)[::-1]\r\n\r\n # Combine the two distrubtions using span_cos_spacing as the weighting factor.\r\n # span_cos_spacing == 1. is for fully cosine, 0. for uniform\r\n lins = cosine * span_cos_spacing + (1 - span_cos_spacing) * uniform\r\n\r\n # Populate a mesh object with the desired num_y dimension based on\r\n # interpolated values from the raw CRM points.\r\n mesh = np.empty((2, ny2, 3))\r\n for j in range(2):\r\n for i in range(3):\r\n mesh[j, :, i] = np.interp(lins[::-1], eta, raw_mesh[j, :, i].real)\r\n\r\n # That is just one half of the mesh and we later expect the full mesh,\r\n # even if we're using symmetry == True.\r\n # So here we mirror and stack the two halves of the wing.\r\n full_mesh = getFullMesh(right_mesh=mesh)\r\n\r\n # If we need to add chordwise panels, do so\r\n if num_x > 2:\r\n full_mesh = add_chordwise_panels(full_mesh, num_x, chord_cos_spacing)\r\n\r\n return full_mesh, eta, twist\r\n\r\n\r\ndef add_chordwise_panels(mesh, num_x, chord_cos_spacing):\r\n \"\"\"\r\n Generate a new mesh with multiple chordwise panels.\r\n\r\n Parameters\r\n ----------\r\n mesh[nx, ny, 3] : numpy array\r\n Nodal mesh defining the initial aerodynamic surface with only\r\n the leading and trailing edges defined.\r\n num_x : float\r\n Desired number of chordwise node points for the final mesh.\r\n chord_cos_spacing : float\r\n Blending ratio of uniform and cosine spacing in the chordwise direction.\r\n A value of 0. corresponds to uniform spacing and a value of 1.\r\n corresponds to regular cosine spacing. This increases the number of\r\n chordwise node points near the wingtips.\r\n\r\n Returns\r\n -------\r\n new_mesh[nx, ny, 3] : numpy array\r\n Nodal mesh defining the final aerodynamic surface with the\r\n specified number of chordwise node points.\r\n\r\n \"\"\"\r\n\r\n # Obtain mesh and num properties\r\n num_y = mesh.shape[1]\r\n ny2 = (num_y + 1) // 2\r\n nx2 = (num_x + 1) // 2\r\n\r\n # Create beta, an array of linear sampling points to pi/2\r\n beta = np.linspace(0, np.pi/2, nx2)\r\n\r\n # Obtain the two spacings that we will use to blend\r\n cosine = .5 * np.cos(beta) # cosine spacing\r\n uniform = np.linspace(0, .5, nx2)[::-1] # uniform spacing\r\n\r\n # Create half of the wing in the chordwise direction\r\n half_wing = cosine * chord_cos_spacing + (1 - chord_cos_spacing) * uniform\r\n\r\n if chord_cos_spacing == 0.:\r\n full_wing_x = np.linspace(0, 1., num_x)\r\n\r\n else:\r\n # Mirror this half wing into a full wing; offset by 0.5 so it goes 0 to 1\r\n full_wing_x = np.hstack((-half_wing[:-1], half_wing[::-1])) + .5\r\n\r\n # Obtain the leading and trailing edges\r\n le = mesh[ 0, :, :]\r\n te = mesh[-1, :, :]\r\n\r\n # Create a new mesh with the desired num_x and set the leading and trailing edge values\r\n new_mesh = np.zeros((num_x, num_y, 3))\r\n new_mesh[ 0, :, :] = le\r\n new_mesh[-1, :, :] = te\r\n\r\n for i in range(1, num_x-1):\r\n w = full_wing_x[i]\r\n new_mesh[i, :, :] = (1 - w) * le + w * te\r\n\r\n return new_mesh\r\n\r\ndef get_default_geo_dict():\r\n \"\"\"\r\n Obtain the default settings for the surface descriptions. Note that\r\n these defaults are overwritten based on user input for each surface.\r\n Each dictionary describes one surface.\r\n\r\n Returns\r\n -------\r\n defaults : dict\r\n A python dict containing the default surface-level settings.\r\n \"\"\"\r\n\r\n defaults = {\r\n # Wing definition\r\n 'num_x' : 3, # number of chordwise points\r\n 'num_y' : 5, # number of spanwise points\r\n 'span_cos_spacing' : 0, # 0 for uniform spanwise panels\r\n # 1 for cosine-spaced panels\r\n # any value between 0 and 1 for\r\n # a mixed spacing\r\n 'chord_cos_spacing' : 0., # 0 for uniform chordwise panels\r\n # 1 for cosine-spaced panels\r\n # any value between 0 and 1 for\r\n # a mixed spacing\r\n 'wing_type' : 'rect', # initial shape of the wing\r\n # either 'CRM' or 'rect'\r\n # 'CRM' can have different options\r\n # after it, such as 'CRM:alpha_2.75'\r\n # for the CRM shape at alpha=2.75\r\n 'symmetry' : True, # if true, model one half of wing\r\n # reflected across the plane y = 0\r\n 'offset' : np.zeros((3)), # coordinates to offset\r\n # the surface from its default location\r\n\r\n # Simple Geometric Variables\r\n 'span' : 10., # full wingspan, even for symmetric cases\r\n 'root_chord' : 1., # root chord\r\n 'dihedral' : 0., # wing dihedral angle in degrees\r\n # positive is upward\r\n 'sweep' : 0., # wing sweep angle in degrees\r\n # positive sweeps back\r\n 'taper' : 1., # taper ratio; 1. is uniform chord\r\n }\r\n\r\n return defaults\r\n\r\ndef generate_mesh(input_dict):\r\n\r\n # Get defaults and update surface with the user-provided input\r\n surf_dict = get_default_geo_dict()\r\n surf_dict.update(input_dict)\r\n\r\n num_x = surf_dict['num_x']\r\n num_y = surf_dict['num_y']\r\n span = surf_dict['span']\r\n chord = surf_dict['root_chord']\r\n span_cos_spacing = surf_dict['span_cos_spacing']\r\n chord_cos_spacing = surf_dict['chord_cos_spacing']\r\n\r\n # Check to make sure that an odd number of spanwise points (num_y) was provided\r\n if not num_y % 2:\r\n raise ValueError('num_y must be an odd number.')\r\n\r\n # Check to make sure that an odd number of chordwise points (num_x) was provided\r\n if not num_x % 2 and not num_x==2:\r\n raise ValueError('num_x must be an odd number.')\r\n\r\n # Generate rectangular mesh\r\n if surf_dict['wing_type'] == 'rect':\r\n mesh = gen_rect_mesh(num_x, num_y, span, chord,\r\n span_cos_spacing, chord_cos_spacing)\r\n\r\n # Generate CRM mesh. Note that this outputs twist information\r\n # based on the data from the CRM definition paper, so we save\r\n # this twist information to the surf_dict.\r\n elif 'CRM' in surf_dict['wing_type']:\r\n mesh, eta, twist = gen_crm_mesh(num_x, num_y,\r\n span_cos_spacing, chord_cos_spacing, surf_dict['wing_type'])\r\n surf_dict['crm_twist'] = twist\r\n\r\n else:\r\n raise NameError('wing_type option not understood. Must be either a type of ' +\r\n '\"CRM\" or \"rect\".')\r\n\r\n # Chop the mesh in half if using symmetry during analysis.\r\n # Note that this means that the provided mesh should be the full mesh\r\n if surf_dict['symmetry']:\r\n num_y = int((num_y+1)/2)\r\n mesh = mesh[:, :num_y, :]\r\n\r\n # Apply the user-provided coordinate offset to position the mesh\r\n mesh = mesh + surf_dict['offset']\r\n\r\n # If CRM wing, then compute the jig twist values.\r\n # Interpolate the twist values from the CRM wing definition to the twist\r\n # control points.\r\n if 'CRM' in surf_dict['wing_type']:\r\n num_twist = surf_dict['num_twist_cp']\r\n\r\n # If the surface is symmetric, simply interpolate the initial\r\n # twist_cp values based on the mesh data\r\n if surf_dict['symmetry']:\r\n twist = np.interp(np.linspace(0, 1, num_twist), eta, surf_dict['crm_twist'])\r\n else:\r\n\r\n # If num_twist is odd, create the twist vector and mirror it\r\n # then stack the two together, but remove the duplicated twist\r\n # value.\r\n if num_twist % 2:\r\n twist = np.interp(np.linspace(0, 1, (num_twist+1)/2), eta, surf_dict['crm_twist'])\r\n twist = np.hstack((twist[:-1], twist[::-1]))\r\n\r\n # If num_twist is even, mirror the twist vector and stack\r\n # them together\r\n else:\r\n twist = np.interp(np.linspace(0, 1, num_twist/2), eta, surf_dict['crm_twist'])\r\n twist = np.hstack((twist, twist[::-1]))\r\n\r\n return mesh, twist\r\n\r\n else:\r\n\r\n return mesh\r\n\r\ndef write_FFD_file(surface, mx, my):\r\n\r\n mesh = surface['mesh']\r\n nx, ny = mesh.shape[:2]\r\n\r\n half_ffd = np.zeros((mx, my, 3))\r\n\r\n LE = mesh[0, :, :]\r\n TE = mesh[-1, :, :]\r\n\r\n half_ffd[0, :, 0] = np.interp(np.linspace(0, 1, my), np.linspace(0, 1, ny), LE[:, 0])\r\n half_ffd[0, :, 1] = np.interp(np.linspace(0, 1, my), np.linspace(0, 1, ny), LE[:, 1])\r\n half_ffd[0, :, 2] = np.interp(np.linspace(0, 1, my), np.linspace(0, 1, ny), LE[:, 2])\r\n\r\n half_ffd[-1, :, 0] = np.interp(np.linspace(0, 1, my), np.linspace(0, 1, ny), TE[:, 0])\r\n half_ffd[-1, :, 1] = np.interp(np.linspace(0, 1, my), np.linspace(0, 1, ny), TE[:, 1])\r\n half_ffd[-1, :, 2] = np.interp(np.linspace(0, 1, my), np.linspace(0, 1, ny), TE[:, 2])\r\n\r\n for i in range(my):\r\n half_ffd[:, i, 0] = np.linspace(half_ffd[0, i, 0], half_ffd[-1, i, 0], mx)\r\n half_ffd[:, i, 1] = np.linspace(half_ffd[0, i, 1], half_ffd[-1, i, 1], mx)\r\n half_ffd[:, i, 2] = np.linspace(half_ffd[0, i, 2], half_ffd[-1, i, 2], mx)\r\n\r\n cushion = .5\r\n\r\n half_ffd[0, :, 0] -= cushion\r\n half_ffd[-1, :, 0] += cushion\r\n half_ffd[:, 0, 1] -= cushion\r\n half_ffd[:, -1, 1] += cushion\r\n\r\n bottom_ffd = half_ffd.copy()\r\n bottom_ffd[:, :, 2] -= cushion\r\n\r\n top_ffd = half_ffd.copy()\r\n top_ffd[:, :, 2] += cushion\r\n\r\n ffd = np.vstack((bottom_ffd, top_ffd))\r\n\r\n if 0:\r\n import matplotlib.pyplot as plt\r\n from mpl_toolkits.mplot3d import Axes3D\r\n\r\n fig = plt.figure()\r\n axes = []\r\n\r\n axes.append(fig.add_subplot(221, projection='3d'))\r\n axes.append(fig.add_subplot(222, projection='3d'))\r\n axes.append(fig.add_subplot(223, projection='3d'))\r\n axes.append(fig.add_subplot(224, projection='3d'))\r\n\r\n for i, ax in enumerate(axes):\r\n xs = ffd[:, :, 0].flatten()\r\n ys = ffd[:, :, 1].flatten()\r\n zs = ffd[:, :, 2].flatten()\r\n\r\n ax.scatter(xs, ys, zs, c='red', alpha=1., clip_on=False)\r\n\r\n xs = ffd[:, :, 0].flatten()\r\n ys = ffd[:, :, 1].flatten()\r\n zs = ffd[:, :, 2].flatten()\r\n\r\n ax.scatter(xs, ys, zs, c='blue', alpha=1.)\r\n\r\n xs = mesh[:, :, 0]\r\n ys = mesh[:, :, 1]\r\n zs = mesh[:, :, 2]\r\n\r\n ax.plot_wireframe(xs, ys, zs, color='k')\r\n\r\n ax.set_xlim([-5, 5])\r\n ax.set_ylim([-5, 5])\r\n ax.set_zlim([-5, 5])\r\n\r\n ax.set_xlim([20, 40])\r\n ax.set_ylim([-25, -5.])\r\n ax.set_zlim([-10, 10])\r\n\r\n ax.set_xlabel('x')\r\n ax.set_ylabel('y')\r\n ax.set_zlabel('z')\r\n\r\n ax.set_axis_off()\r\n\r\n ax.set_axis_off()\r\n\r\n if i == 0:\r\n ax.view_init(elev=0, azim=180)\r\n elif i == 1:\r\n ax.view_init(elev=0, azim=90)\r\n elif i == 2:\r\n ax.view_init(elev=100000, azim=0)\r\n else:\r\n ax.view_init(elev=40, azim=-30)\r\n\r\n plt.tight_layout()\r\n plt.subplots_adjust(wspace=0, hspace=0)\r\n\r\n plt.show()\r\n\r\n filename = surface['name'] + '_ffd.fmt'\r\n\r\n with open(filename, 'w') as f:\r\n f.write('1\\n')\r\n f.write('{} {} {}\\n'.format(mx, 2, my))\r\n x = np.array_str(ffd[:, :, 0].flatten(order='F'))[1:-1] + '\\n'\r\n y = np.array_str(ffd[:, :, 1].flatten(order='F'))[1:-1] + '\\n'\r\n z = np.array_str(ffd[:, :, 2].flatten(order='F'))[1:-1] + '\\n'\r\n\r\n f.write(x)\r\n f.write(y)\r\n f.write(z)\r\n\r\n return filename\r\n\r\ndef writeMesh(mesh,filename):\r\n \"\"\"\r\n Writes the OAS mesh in Tecplot .dat file format, for visualization and debugging purposes.\r\n\r\n Parameters\r\n ----------\r\n mesh[nx,ny,3] : numpy array\r\n The OAS mesh to be written.\r\n filename : str\r\n The file name including the .dat extension.\r\n \"\"\"\r\n num_y = mesh.shape[0]\r\n num_x = mesh.shape[1]\r\n f = open(filename, 'w')\r\n f.write('\\t\\t1\\n')\r\n f.write('\\t\\t%d\\t\\t%d\\t\\t%d\\n' % (num_y, num_x, 1))\r\n\r\n x = mesh[:, :, 0]\r\n y = mesh[:, :, 1]\r\n z = mesh[:, :, 2]\r\n\r\n for dim in [x, y, z]:\r\n for iy in range(num_x):\r\n row = dim[:, iy]\r\n for val in row:\r\n f.write('\\t{: 3.6f}'.format(val))\r\n f.write('\\n')\r\n f.close()\r\n\r\n\r\ndef getFullMesh(left_mesh=None, right_mesh=None):\r\n \"\"\"\r\n For a symmetric wing, OAS only keeps and does computation on the left half.\r\n This script mirros the OAS mesh and attaches it to the existing mesh to\r\n obtain the full mesh.\r\n\r\n Parameters\r\n ----------\r\n left_mesh[nx,ny,3] or right_mesh : numpy array\r\n The half mesh to be mirrored.\r\n\r\n Returns\r\n -------\r\n full_mesh[nx,2*ny-1,3] : numpy array\r\n The computed full mesh.\r\n \"\"\"\r\n if left_mesh is None and right_mesh is None:\r\n raise ValueError(\"Either the left or right mesh need to be supplied.\")\r\n elif left_mesh is not None and right_mesh is not None:\r\n raise ValueError(\"Please only provide either left or right mesh, not both.\")\r\n elif left_mesh is not None:\r\n right_mesh = np.flip(left_mesh,axis=1).copy()\r\n right_mesh[:,:,1] *= -1\r\n else:\r\n left_mesh = np.flip(right_mesh,axis=1).copy()\r\n left_mesh[:,:,1] *= -1\r\n full_mesh = np.concatenate((left_mesh,right_mesh[:,1:,:]),axis=1)\r\n return full_mesh\r\n"
] |
[
[
"numpy.linspace",
"numpy.einsum",
"numpy.arctan",
"numpy.vstack",
"numpy.concatenate",
"numpy.hstack",
"matplotlib.pyplot.tight_layout",
"numpy.sin",
"numpy.interp",
"matplotlib.pyplot.subplots_adjust",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.tan",
"numpy.append",
"matplotlib.pyplot.show",
"numpy.flip",
"numpy.array",
"numpy.cos",
"numpy.empty"
]
] |
keforres/PreREISE
|
[
"fcc111fdccc0626d3d34f1749a14035e47991043"
] |
[
"prereise/gather/demanddata/eia/tests/test_get_eia_data.py"
] |
[
"import getpass\nimport os\nfrom datetime import datetime\n\nimport pandas as pd\nimport pytest\n\nfrom prereise.gather.demanddata.eia import get_eia_data\n\n\[email protected](reason=\"Need API key\")\ndef test_eia_download():\n \"\"\"Check data frame assembled from data download by API call from EIA. Test\n checks that the correct number of files are downloaded and correct\n number of columns are created.\n\n Token string can be obtained by registering\n `here <https://www.eia.gov/opendata/>`_.\n \"\"\"\n print(\n \"A API key is required for the API download. The key \"\n \"can be obtained by a user by registering at \"\n \"https://www.eia.gov/opendata/.\"\n )\n token = getpass.getpass(prompt=\"API key=\")\n\n offset = 3\n start = pd.to_datetime(\"2018-07-01 07:00:00\")\n end = datetime.today()\n\n demand_list = [\n \"EBA.BANC-ALL.D.H\",\n \"EBA.BPAT-ALL.D.H\",\n \"EBA.CHPD-ALL.D.H\",\n \"EBA.CISO-ALL.D.H\",\n ]\n this = get_eia_data.from_download(token, start, end, offset, demand_list)\n\n assert len(this.columns) == (len(demand_list))\n\n\ndef test_from_excel():\n \"\"\"Tests data frame assembled from Excel spreadsheets manually downloaded\n from EIA. Test checks that correct number of columns are created.\n \"\"\"\n\n dir1 = os.path.join(os.path.dirname(__file__), \"data\")\n\n start = pd.to_datetime(\"2018-07-01 07:00:00\")\n end = pd.to_datetime(\"2018-10-01 07:00:00\")\n ba_list = [\"BPAT\", \"CISO\", \"EPE\"]\n\n ba_from_excel = get_eia_data.from_excel(dir1, ba_list, start, end)\n assert len(ba_from_excel.columns) == len(ba_list)\n"
] |
[
[
"pandas.to_datetime"
]
] |
zegerk/gym-micropolis
|
[
"554bf41e9c4001140cdba90c5bbb3cc6bacf4c65"
] |
[
"arguments.py"
] |
[
"import argparse\n\nimport torch\n\n\ndef get_args():\n parser = argparse.ArgumentParser(description='RL')\n parser.add_argument('--algo', default='a2c',\n help='algorithm to use: a2c | ppo | acktr')\n parser.add_argument('--lr', type=float, default=7e-4,\n help='learning rate (default: 7e-4)')\n parser.add_argument('--eps', type=float, default=1e-5,\n help='RMSprop optimizer epsilon (default: 1e-5)')\n parser.add_argument('--alpha', type=float, default=0.99,\n help='RMSprop optimizer apha (default: 0.99)')\n parser.add_argument('--gamma', type=float, default=0.99,\n help='discount factor for rewards (default: 0.99)')\n parser.add_argument('--use-gae', action='store_true', default=False,\n help='use generalized advantage estimation')\n parser.add_argument('--tau', type=float, default=0.95,\n help='gae parameter (default: 0.95)')\n parser.add_argument('--entropy-coef', type=float, default=0.01,\n help='entropy term coefficient (default: 0.01)')\n parser.add_argument('--value-loss-coef', type=float, default=0.5,\n help='value loss coefficient (default: 0.5)')\n parser.add_argument('--max-grad-norm', type=float, default=0.5,\n help='max norm of gradients (default: 0.5)')\n parser.add_argument('--seed', type=int, default=1,\n help='random seed (default: 1)')\n parser.add_argument('--num-processes', type=int, default=12,\n help='how many training CPU processes to use (default: 12)')\n parser.add_argument('--num-steps', type=int, default=5,\n help='number of forward steps in A2C (default: 5)')\n parser.add_argument('--ppo-epoch', type=int, default=4,\n help='number of ppo epochs (default: 4)')\n parser.add_argument('--num-mini-batch', type=int, default=32,\n help='number of batches for ppo (default: 32)')\n parser.add_argument('--clip-param', type=float, default=0.2,\n help='ppo clip parameter (default: 0.2)')\n parser.add_argument('--log-interval', type=int, default=10,\n help='log interval, one log per n updates (default: 10)')\n parser.add_argument('--save-interval', type=int, default=100,\n help='save interval, one save per n updates (default: 100)')\n parser.add_argument('--eval-interval', type=int, default=None,\n help='eval interval, one eval per n updates (default: None)')\n parser.add_argument('--vis-interval', type=int, default=100,\n help='vis interval, one log per n updates (default: 100)')\n parser.add_argument('--num-frames', type=int, default=10e6,\n help='number of frames to train (default: 10e6)')\n parser.add_argument('--env-name', default='MicropolisEnv-v0',\n help='environment to train on (default: PongNoFrameskip-v4)')\n parser.add_argument('--log-dir', default='trained_models/a2c/',\n help='directory to save agent logs (default: /tmp/gym)')\n parser.add_argument('--save-dir', default='./trained_models/',\n help='directory to save agent logs (default: ./trained_models/)')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--render', action='store_true', default=False, \n help=\"render gui of single agent during training\")\n parser.add_argument('--print-map', action='store_true', default=False)\n parser.add_argument('--add-timestep', action='store_true', default=False,\n help='add timestep to observations')\n parser.add_argument('--recurrent-policy', action='store_true', default=False,\n help='use a recurrent policy')\n parser.add_argument('--vis', action='store_true', default=True,\n help='enable visdom visualization')\n parser.add_argument('--port', type=int, default=8097,\n help='port to run the server on (default: 8097)')\n parser.add_argument('--map-width', type=int, default=20, \n help=\"width of micropolis map\")\n parser.add_argument('--empty-start', action='store_true', default=False)\n parser.add_argument('--model', default='fixed')\n parser.add_argument('--curiosity', action='store_true', default=False)\n parser.add_argument('--no-reward', action='store_true', default=False)\n parser.add_argument('--env-type', default='yeet')\n########################################### ICM\n parser.add_argument(\n '--eta', \n type=float, \n default=0.01, \n metavar='LR', \n help='scaling factor for intrinsic reward')\n parser.add_argument(\n '--beta', \n type=float, \n default=0.2,\n metavar='LR',\n help='balance between inverse & forward')\n parser.add_argument(\n '--lmbda', \n type=float, \n default=0.1,\n metavar='LR',\n help='lambda : balance between A2C & icm')\n args = parser.parse_args()\n\n args.cuda = not args.no_cuda and torch.cuda.is_available()\n\n return args\n"
] |
[
[
"torch.cuda.is_available"
]
] |
abecker99/Interpolation
|
[
"0527e6296c98b1c7f6cf512e614090f61754705d"
] |
[
"TestingBisection.py"
] |
[
"import numpy as np\n\n\ndef find_sign_change(f, step, a, b):\n x = a\n pairs = []\n while (x + step < b):\n if (f(x + step)/f(x) < 0):\n pairs.append([x, x+step])\n x += step\n return pairs\n\ndef bisect(f, pairs, tolerance):\n zeros = []\n for pair in pairs:\n midpoint = (pair[1] - pair[0])/2 + pair[0]\n while (abs(f(midpoint)) > tolerance):\n if (f(midpoint)/f(pair[0]) < 0):\n pair[1] = midpoint\n else:\n pair[0] = midpoint\n midpoint = (pair[1] - pair[0])/2 + pair[0]\n max_iter = 1000\n zeros.append(midpoint)\n return zeros\n#zeros are z, need to computer energy with it \ndef sinc(x):\n if (x == 0):\n return 1\n else:\n return np.sin(x)/x\n\npairs = find_sign_change(sinc, 0.1, 0, 10)\nprint(pairs)\nzeros = bisect(sinc, pairs, 1E-10)\nprint(zeros)\nprint(np.pi, 2.0*np.pi, 3.0*np.pi)"
] |
[
[
"numpy.sin"
]
] |
wangxinxin08/Paddle
|
[
"1b0c5ef264b52a9d75f971216618ebbbbc7e5931"
] |
[
"python/paddle/tensor/manipulation.py"
] |
[
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nfrom ..fluid.layers import core\nfrom ..fluid.layer_helper import LayerHelper\nfrom ..fluid.framework import Variable, OpProtoHolder, in_dygraph_mode, convert_np_dtype_to_dtype_, device_guard, dygraph_only\nfrom ..fluid.data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype\nfrom ..fluid.layers.tensor import fill_constant\nfrom ..fluid.layers import utils\nimport numpy as np\n# TODO: define functions to manipulate a tensor \nfrom ..fluid.layers import cast # noqa: F401\nfrom ..fluid.layers import slice # noqa: F401\nfrom ..fluid.layers import transpose # noqa: F401\nfrom ..fluid.layers import unstack # noqa: F401\n\nfrom ..fluid.layers import scatter_nd # noqa: F401\nfrom ..fluid.layers import shard_index # noqa: F401\nfrom ..fluid import layers\nfrom ..fluid.dygraph.inplace_utils import inplace_apis_in_dygraph_only\nimport paddle\n\n__all__ = []\n\n\n@dygraph_only\ndef tolist(x):\n \"\"\"\n **Notes**:\n **This API is ONLY available in Dygraph mode**\n\n This function translate the paddle.Tensor to python list.\n\n Args:\n x(Tensor): ``x`` is the Tensor we want to translate to list\n\n Returns:\n list: A list that contain the same value of current Tensor.\n\n Returns type:\n list: dtype is same as current Tensor\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n t = paddle.to_tensor([0,1,2,3,4])\n expectlist = t.tolist()\n print(expectlist) #[0, 1, 2, 3, 4]\n\n expectlist = paddle.tolist(t)\n print(expectlist) #[0, 1, 2, 3, 4]\n\n \"\"\"\n return x.numpy().tolist()\n\n\nsetattr(core.VarBase, 'tolist', tolist)\n\n\ndef concat(x, axis=0, name=None):\n \"\"\"\n\n This OP concatenates the input along the axis.\n\n Args:\n x(list|tuple): ``x`` is a Tensor list or Tensor tuple which is with data type bool, float16,\n float32, float64, int32, int64, uint8. All the Tensors in ``x`` must have same data type.\n axis(int|Tensor, optional): Specify the axis to operate on the input Tensors.\n It's a scalar with data type int or a Tensor with shape [1] and data type int32 \n or int64. The effective range is [-R, R), where R is Rank(x). When ``axis < 0``,\n it works the same way as ``axis+R``. Default is 0.\n name (str, optional): The default value is None. Normally there is no\n need for user to set this property. For more information, please\n refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor: A Tensor with the same data type as ``x``.\n\n Examples:\n .. code-block:: python\n \n import paddle\n \n x1 = paddle.to_tensor([[1, 2, 3],\n [4, 5, 6]])\n x2 = paddle.to_tensor([[11, 12, 13],\n [14, 15, 16]])\n x3 = paddle.to_tensor([[21, 22],\n [23, 24]])\n zero = paddle.full(shape=[1], dtype='int32', fill_value=0)\n # When the axis is negative, the real axis is (axis + Rank(x))\n # As follow, axis is -1, Rank(x) is 2, the real axis is 1\n out1 = paddle.concat(x=[x1, x2, x3], axis=-1)\n out2 = paddle.concat(x=[x1, x2], axis=0)\n out3 = paddle.concat(x=[x1, x2], axis=zero)\n # out1\n # [[ 1 2 3 11 12 13 21 22]\n # [ 4 5 6 14 15 16 23 24]]\n # out2 out3\n # [[ 1 2 3]\n # [ 4 5 6]\n # [11 12 13]\n # [14 15 16]]\n \"\"\"\n return paddle.fluid.layers.concat(input=x, axis=axis, name=name)\n\n\ndef flip(x, axis, name=None):\n \"\"\"\n Reverse the order of a n-D tensor along given axis in axis.\n\n Args:\n x (Tensor): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor x\n should be float32, float64, int32, int64, bool.\n axis (list|tuple): The axis(axes) to flip on. Negative indices for indexing from the end are accepted.\n name (str, optional): The default value is None. Normally there is no need for user to set this property.\n For more information, please refer to :ref:`api_guide_Name` .\n\n Returns:\n Tensor: Tensor or LoDTensor calculated by flip layer. The data type is same with input x.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import numpy as np\n\n image_shape=(3, 2, 2)\n x = np.arange(image_shape[0] * image_shape[1] * image_shape[2]).reshape(image_shape)\n x = x.astype('float32')\n img = paddle.to_tensor(x)\n out = paddle.flip(img, [0,1])\n\n print(out) # [[[10,11][8, 9]],[[6, 7],[4, 5]] [[2, 3],[0, 1]]]\n \"\"\"\n helper = LayerHelper(\"flip\", **locals())\n check_type(x, 'X', (Variable), 'flip')\n dtype = helper.input_dtype('x')\n check_dtype(dtype, 'X',\n ['float16', 'float32', 'float64', 'int32', 'int64', 'bool'],\n 'flip')\n check_type(axis, 'axis', (list, tuple), 'flip')\n if name is None:\n out = helper.create_variable_for_type_inference(dtype)\n else:\n out = helper.create_variable(name=name, dtype=dtype, persistable=False)\n\n helper.append_op(\n type=\"flip\",\n inputs={\"X\": x},\n outputs={\"Out\": out},\n attrs={\"axis\": axis})\n return out\n\n\ndef flatten(x, start_axis=0, stop_axis=-1, name=None):\n r\"\"\"\n **Flatten op**\n\n Flattens a contiguous range of axes in a tensor according to start_axis and stop_axis.\n\n Note that the output Tensor will share data with origin Tensor and doesn't have a \n Tensor copy in ``dygraph`` mode. If you want to use the Tensor copy version, please \n use `Tensor.clone` like ``flatten_clone_x = x.flatten().clone()``.\n\n For Example:\n\n .. code-block:: text\n\n Case 1:\n\n Given\n X.shape = (3, 100, 100, 4)\n\n and\n start_axis = 1\n end_axis = 2\n\n We get:\n Out.shape = (3, 1000 * 100, 2)\n\n Case 2:\n\n Given\n X.shape = (3, 100, 100, 4)\n\n and\n start_axis = 0\n stop_axis = -1\n\n We get:\n Out.shape = (3 * 100 * 100 * 4)\n\n Args:\n x (Tensor): A tensor of number of dimentions >= axis. A tensor with data type float32,\n float64, int8, int32, int64, uint8.\n start_axis (int): the start axis to flatten\n stop_axis (int): the stop axis to flatten\n name(str, Optional): For details, please refer to :ref:`api_guide_Name`.\n Generally, no setting is required. Default: None.\n\n Returns:\n Tensor: A tensor with the contents of the input tensor, with input \\\n axes flattened by indicated start axis and end axis. \\\n A Tensor with data type same as input x.\n\n Raises:\n ValueError: If x is not a Tensor.\n ValueError: If start_axis or stop_axis is illegal.\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n\n image_shape=(2, 3, 4, 4)\n\n x = paddle.arange(end=image_shape[0] * image_shape[1] * image_shape[2] * image_shape[3])\n img = paddle.reshape(x, image_shape)\n\n out = paddle.flatten(img, start_axis=1, stop_axis=2)\n # out shape is [2, 12, 4]\n\n # out shares data with img in dygraph mode\n img[0, 0, 0, 0] = -1\n print(out[0, 0, 0]) # [-1]\n \"\"\"\n if not (isinstance(x, Variable)):\n raise ValueError(\"The input x should be a Tensor\")\n\n check_variable_and_dtype(\n x, 'x', ['float32', 'float64', 'int8', 'int32', 'int64', 'uint8'],\n 'flatten')\n helper = LayerHelper('flatten', **locals())\n\n x_dim = len(x.shape)\n if not (isinstance(start_axis, int)) or (\n start_axis > x_dim - 1) or start_axis < -x_dim:\n raise ValueError(\n \"The start_axis should be a int, and in range [-rank(x), rank(x))\")\n if not (isinstance(stop_axis, int)) or (\n stop_axis > x_dim - 1) or stop_axis < -x_dim:\n raise ValueError(\n \"The stop_axis should be a int, and in range [-rank(x), rank(x))\")\n if start_axis < 0:\n start_axis = start_axis + x_dim\n if stop_axis < 0:\n stop_axis = stop_axis + x_dim\n if start_axis > stop_axis:\n raise ValueError(\"The stop_axis should be larger than stat_axis\")\n\n if in_dygraph_mode():\n dy_out, _ = core.ops.flatten_contiguous_range(\n x, 'start_axis', start_axis, 'stop_axis', stop_axis)\n return dy_out\n\n out = helper.create_variable_for_type_inference(x.dtype)\n x_shape = helper.create_variable_for_type_inference(x.dtype)\n helper.append_op(\n type='flatten_contiguous_range',\n inputs={\"X\": x},\n outputs={'Out': out,\n 'XShape': x_shape},\n attrs={\"start_axis\": start_axis,\n \"stop_axis\": stop_axis})\n return out\n\n\n@inplace_apis_in_dygraph_only\ndef flatten_(x, start_axis=0, stop_axis=-1, name=None):\n \"\"\"\n Inplace version of ``flatten`` API, the output Tensor will be inplaced with input ``x``.\n Please refer to :ref:`api_tensor_flatten`.\n \"\"\"\n if not (isinstance(x, Variable)):\n raise ValueError(\"The input x should be a Tensor\")\n\n x_dim = len(x.shape)\n if not (isinstance(start_axis, int)) or (\n start_axis > x_dim - 1) or start_axis < -x_dim:\n raise ValueError(\n \"The start_axis should be a int, and in range [-rank(x), rank(x))\")\n if not (isinstance(stop_axis, int)) or (\n stop_axis > x_dim - 1) or stop_axis < -x_dim:\n raise ValueError(\n \"The stop_axis should be a int, and in range [-rank(x), rank(x))\")\n if start_axis < 0:\n start_axis = start_axis + x_dim\n if stop_axis < 0:\n stop_axis = stop_axis + x_dim\n if start_axis > stop_axis:\n raise ValueError(\"The stop_axis should be larger than stat_axis\")\n\n dy_out, _ = core.ops.flatten_contiguous_range_(x, 'start_axis', start_axis,\n 'stop_axis', stop_axis)\n return dy_out\n\n\ndef roll(x, shifts, axis=None, name=None):\n \"\"\"\n Roll the `x` tensor along the given axis(axes). With specific 'shifts', Elements that \n roll beyond the last position are re-introduced at the first according to 'shifts'. \n If a axis is not specified, \n the tensor will be flattened before rolling and then restored to the original shape.\n\n Args:\n x (Tensor): The x tensor as input.\n shifts (int|list|tuple): The number of places by which the elements\n of the `x` tensor are shifted.\n axis (int|list|tuple|None): axis(axes) along which to roll.\n\n Returns:\n Tensor: A Tensor with same data type as `x`.\n\n Examples:\n .. code-block:: python\n \n import paddle\n\n x = paddle.to_tensor([[1.0, 2.0, 3.0],\n [4.0, 5.0, 6.0],\n [7.0, 8.0, 9.0]])\n out_z1 = paddle.roll(x, shifts=1)\n print(out_z1)\n #[[9. 1. 2.]\n # [3. 4. 5.]\n # [6. 7. 8.]]\n out_z2 = paddle.roll(x, shifts=1, axis=0)\n print(out_z2)\n #[[7. 8. 9.]\n # [1. 2. 3.]\n # [4. 5. 6.]]\n \"\"\"\n helper = LayerHelper(\"roll\", **locals())\n origin_shape = x.shape\n if type(shifts) == int:\n shifts = [shifts]\n if type(axis) == int:\n axis = [axis]\n\n len_origin_shape = len(origin_shape)\n if axis:\n for i in range(len(axis)):\n if axis[i] >= len_origin_shape or axis[i] < -len_origin_shape:\n raise ValueError(\n \"axis is out of range, it should be in range [{}, {}), but received {}\".\n format(-len_origin_shape, len_origin_shape, axis))\n\n if axis:\n check_type(axis, 'axis', (list, tuple), 'roll')\n check_type(shifts, 'shifts', (list, tuple), 'roll')\n\n if in_dygraph_mode():\n if axis is None:\n x = core.ops.reshape(x, 'shape', [-1, 1])\n axis = [0]\n out = core.ops.roll(x, 'axis', axis, 'shifts', shifts)\n return core.ops.reshape(out, 'shape', origin_shape)\n\n out = helper.create_variable_for_type_inference(x.dtype)\n\n if axis is None:\n x = reshape(x, shape=[-1, 1])\n axis = [0]\n\n helper.append_op(\n type='roll',\n inputs={'X': x},\n outputs={'Out': out},\n attrs={'axis': axis,\n 'shifts': shifts})\n out = layers.reshape(out, shape=origin_shape)\n return out\n\n\ndef stack(x, axis=0, name=None):\n \"\"\"\n This OP stacks all the input tensors ``x`` along ``axis`` dimemsion. \n All tensors must be of the same shape and same dtype.\n \n For example, given N tensors of shape [A, B], if ``axis == 0``, the shape of stacked \n tensor is [N, A, B]; if ``axis == 1``, the shape of stacked \n tensor is [A, N, B], etc.\n \n\n .. code-block:: text\n\n Case 1:\n\n Input:\n x[0].shape = [1, 2]\n x[0].data = [ [1.0 , 2.0 ] ]\n x[1].shape = [1, 2]\n x[1].data = [ [3.0 , 4.0 ] ]\n x[2].shape = [1, 2]\n x[2].data = [ [5.0 , 6.0 ] ]\n\n Attrs:\n axis = 0\n\n Output:\n Out.dims = [3, 1, 2]\n Out.data =[ [ [1.0, 2.0] ],\n [ [3.0, 4.0] ],\n [ [5.0, 6.0] ] ]\n\n\n Case 2:\n\n Input:\n x[0].shape = [1, 2]\n x[0].data = [ [1.0 , 2.0 ] ]\n x[1].shape = [1, 2]\n x[1].data = [ [3.0 , 4.0 ] ]\n x[2].shape = [1, 2]\n x[2].data = [ [5.0 , 6.0 ] ]\n\n\n Attrs:\n axis = 1 or axis = -2 # If axis = -2, axis = axis+ndim(x[0])+1 = -2+2+1 = 1.\n\n Output:\n Out.shape = [1, 3, 2]\n Out.data =[ [ [1.0, 2.0]\n [3.0, 4.0]\n [5.0, 6.0] ] ]\n\n Args:\n x (list[Tensor]|tuple[Tensor]): Input ``x`` can be a ``list`` or ``tuple`` of tensors, the Tensors in ``x``\n must be of the same shape and dtype. Supported data types: float32, float64, int32, int64.\n axis (int, optional): The axis along which all inputs are stacked. ``axis`` range is ``[-(R+1), R+1)``,\n where ``R`` is the number of dimensions of the first input tensor ``x[0]``. \n If ``axis < 0``, ``axis = axis+R+1``. The default value of axis is 0.\n name (str, optional): Please refer to :ref:`api_guide_Name`, Default None.\n \n Returns:\n Tensor: The stacked tensor with same data type as input.\n\n Example: \n .. code-block:: python\n\n import paddle\n \n x1 = paddle.to_tensor([[1.0, 2.0]])\n x2 = paddle.to_tensor([[3.0, 4.0]])\n x3 = paddle.to_tensor([[5.0, 6.0]])\n out = paddle.stack([x1, x2, x3], axis=0)\n print(out.shape) # [3, 1, 2]\n print(out)\n # [[[1., 2.]],\n # [[3., 4.]],\n # [[5., 6.]]]\n \"\"\"\n return layers.stack(x, axis, name)\n\n\ndef split(x, num_or_sections, axis=0, name=None):\n \"\"\"\n Split the input tensor into multiple sub-Tensors.\n \n Args:\n x (Tensor): A N-D Tensor. The data type is bool, float16, float32, float64, int32 or int64.\n num_or_sections (int|list|tuple): If ``num_or_sections`` is an int, then ``num_or_sections`` \n indicates the number of equal sized sub-Tensors that the ``x`` will be divided into.\n If ``num_or_sections`` is a list or tuple, the length of it indicates the number of\n sub-Tensors and the elements in it indicate the sizes of sub-Tensors' dimension orderly.\n The length of the list must not be larger than the ``x`` 's size of specified ``axis``.\n axis (int|Tensor, optional): The axis along which to split, it can be a scalar with type \n ``int`` or a ``Tensor`` with shape [1] and data type ``int32`` or ``int64``.\n If :math::`axis < 0`, the axis to split along is :math:`rank(x) + axis`. Default is 0.\n name (str, optional): The default value is None. Normally there is no need for user to set this property.\n For more information, please refer to :ref:`api_guide_Name` .\n Returns:\n list(Tensor): The list of segmented Tensors.\n \n Example:\n .. code-block:: python\n \n import paddle\n \n # x is a Tensor of shape [3, 9, 5]\n x = paddle.rand([3, 9, 5])\n\n out0, out1, out2 = paddle.split(x, num_or_sections=3, axis=1)\n print(out0.shape) # [3, 3, 5]\n print(out1.shape) # [3, 3, 5]\n print(out2.shape) # [3, 3, 5]\n\n out0, out1, out2 = paddle.split(x, num_or_sections=[2, 3, 4], axis=1)\n print(out0.shape) # [3, 2, 5]\n print(out1.shape) # [3, 3, 5]\n print(out2.shape) # [3, 4, 5]\n\n out0, out1, out2 = paddle.split(x, num_or_sections=[2, 3, -1], axis=1)\n print(out0.shape) # [3, 2, 5]\n print(out1.shape) # [3, 3, 5]\n print(out2.shape) # [3, 4, 5]\n \n # axis is negative, the real axis is (rank(x) + axis)=1\n out0, out1, out2 = paddle.split(x, num_or_sections=3, axis=-2)\n print(out0.shape) # [3, 3, 5]\n print(out1.shape) # [3, 3, 5]\n print(out2.shape) # [3, 3, 5]\n \"\"\"\n return paddle.fluid.layers.split(\n input=x, num_or_sections=num_or_sections, dim=axis, name=name)\n\n\ndef squeeze(x, axis=None, name=None):\n \"\"\"\n This OP will squeeze the dimension(s) of size 1 of input tensor x's shape. \n \n Note that the output Tensor will share data with origin Tensor and doesn't have a \n Tensor copy in ``dygraph`` mode. If you want to use the Tensor copy version, \n please use `Tensor.clone` like ``squeeze_clone_x = x.squeeze().clone()``.\n\n If axis is provided, it will remove the dimension(s) by given axis that of size 1. \n If the dimension of given axis is not of size 1, the dimension remain unchanged. \n If axis is not provided, all dims equal of size 1 will be removed.\n\n .. code-block:: text\n\n Case1:\n\n Input:\n x.shape = [1, 3, 1, 5] # If axis is not provided, all dims equal of size 1 will be removed.\n axis = None\n Output:\n out.shape = [3, 5]\n\n Case2:\n\n Input:\n x.shape = [1, 3, 1, 5] # If axis is provided, it will remove the dimension(s) by given axis that of size 1.\n axis = 0\n Output:\n out.shape = [3, 1, 5]\n \n Case4:\n\n Input:\n x.shape = [1, 3, 1, 5] # If the dimension of one given axis (3) is not of size 1, the dimension remain unchanged. \n axis = [0, 2, 3]\n Output:\n out.shape = [3, 5]\n\n Case4:\n\n Input:\n x.shape = [1, 3, 1, 5] # If axis is negative, axis = axis + ndim (number of dimensions in x). \n axis = [-2]\n Output:\n out.shape = [1, 3, 5]\n\n Args:\n x (Tensor): The input Tensor. Supported data type: float32, float64, bool, int8, int32, int64.\n axis (int|list|tuple, optional): An integer or list/tuple of integers, indicating the dimensions to be squeezed. Default is None.\n The range of axis is :math:`[-ndim(x), ndim(x))`.\n If axis is negative, :math:`axis = axis + ndim(x)`.\n If axis is None, all the dimensions of x of size 1 will be removed.\n name (str, optional): Please refer to :ref:`api_guide_Name`, Default None.\n\n Returns:\n Tensor: Squeezed Tensor with the same data type as input Tensor.\n\n Examples:\n .. code-block:: python\n\n import paddle\n \n x = paddle.rand([5, 1, 10])\n output = paddle.squeeze(x, axis=1)\n\n print(x.shape) # [5, 1, 10]\n print(output.shape) # [5, 10]\n\n # output shares data with x in dygraph mode\n x[0, 0, 0] = 10.\n print(output[0, 0]) # [10.]\n\n \"\"\"\n if axis is None:\n axis = []\n elif isinstance(axis, int):\n axis = [axis]\n elif isinstance(axis, tuple):\n axis = list(axis)\n\n return layers.squeeze(x, axis, name)\n\n\n@inplace_apis_in_dygraph_only\ndef squeeze_(x, axis=None, name=None):\n \"\"\"\n Inplace version of ``squeeze`` API, the output Tensor will be inplaced with input ``x``.\n Please refer to :ref:`api_paddle_tensor_squeeze`.\n \"\"\"\n if axis is None:\n axis = []\n elif isinstance(axis, int):\n axis = [axis]\n elif isinstance(axis, tuple):\n axis = list(axis)\n\n out, _ = core.ops.squeeze2_(x, 'axes', axis)\n return out\n\n\ndef unique(x,\n return_index=False,\n return_inverse=False,\n return_counts=False,\n axis=None,\n dtype=\"int64\",\n name=None):\n r\"\"\"\n Returns the unique elements of `x` in ascending order.\n\n Args:\n x(Tensor): The input tensor, it's data type should be float32, float64, int32, int64.\n return_index(bool, optional): If True, also return the indices of the input tensor that\n result in the unique Tensor.\n return_inverse(bool, optional): If True, also return the indices for where elements in\n the original input ended up in the returned unique tensor.\n return_counts(bool, optional): If True, also return the counts for each unique element.\n axis(int, optional): The axis to apply unique. If None, the input will be flattened.\n Default: None.\n dtype(np.dtype|str, optional): The date type of `indices` or `inverse` tensor: int32 or int64.\n Default: int64.\n name(str, optional): Name for the operation. For more information, please refer to\n :ref:`api_guide_Name`. Default: None.\n\n Returns: \n tuple: (out, indices, inverse, counts). `out` is the unique tensor for `x`. `indices` is \\\n provided only if `return_index` is True. `inverse` is provided only if `return_inverse` \\\n is True. `counts` is provided only if `return_counts` is True.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n x = paddle.to_tensor([2, 3, 3, 1, 5, 3])\n unique = paddle.unique(x)\n np_unique = unique.numpy() # [1 2 3 5]\n _, indices, inverse, counts = paddle.unique(x, return_index=True, return_inverse=True, return_counts=True)\n np_indices = indices.numpy() # [3 0 1 4]\n np_inverse = inverse.numpy() # [1 2 2 0 3 2]\n np_counts = counts.numpy() # [1 1 3 1]\n\n x = paddle.to_tensor([[2, 1, 3], [3, 0, 1], [2, 1, 3]])\n unique = paddle.unique(x)\n np_unique = unique.numpy() # [0 1 2 3]\n\n unique = paddle.unique(x, axis=0)\n np_unique = unique.numpy() \n # [[2 1 3]\n # [3 0 1]]\n \"\"\"\n if axis is None:\n axis = []\n else:\n axis = [axis]\n attr_dtype = convert_np_dtype_to_dtype_(dtype)\n if in_dygraph_mode():\n out, inverse, indices, counts = core.ops.unique(\n x, 'dtype', attr_dtype, 'return_index', return_index,\n 'return_inverse', return_inverse, 'return_counts', return_counts,\n 'axis', axis, \"is_sorted\", True)\n outs = [out]\n if return_index:\n outs.append(indices)\n if return_inverse:\n outs.append(inverse)\n if return_counts:\n outs.append(counts)\n\n if len(outs) == 1:\n return outs[0]\n\n return tuple(outs)\n\n check_variable_and_dtype(x, \"input\",\n ['float32', 'float64', 'int32', 'int64'], 'unique')\n check_type(return_index, 'return_index', bool, 'unique')\n check_type(return_inverse, 'return_inverse', bool, 'unique')\n check_type(return_counts, 'return_counts', bool, 'unique')\n check_dtype(dtype, 'dtype', ['int32', 'int64'], 'unique')\n if len(axis) != 0:\n check_type(axis[0], 'axis', int, 'unique')\n\n helper = LayerHelper('unique', **locals())\n attrs = {\n 'dtype': attr_dtype,\n \"return_index\": return_index,\n \"return_inverse\": return_inverse,\n \"return_counts\": return_counts,\n \"axis\": axis,\n \"is_sorted\": True\n }\n out = helper.create_variable_for_type_inference(\n dtype=x.dtype, stop_gradient=True)\n indices = helper.create_variable_for_type_inference(\n dtype=attr_dtype, stop_gradient=True)\n inverse = helper.create_variable_for_type_inference(\n dtype=attr_dtype, stop_gradient=True)\n counts = helper.create_variable_for_type_inference(\n dtype=attr_dtype, stop_gradient=True)\n outputs = {\n \"Out\": out,\n \"Indices\": indices,\n \"Index\": inverse,\n \"Counts\": counts\n }\n outs = [out]\n if return_index:\n outs.append(indices)\n if return_inverse:\n outs.append(inverse)\n if return_counts:\n outs.append(counts)\n\n helper.append_op(\n type=\"unique\", inputs={\"X\": x}, attrs=attrs, outputs=outputs)\n\n if len(outs) == 1:\n return outs[0]\n\n return tuple(outs)\n\n\ndef unsqueeze(x, axis, name=None):\n \"\"\"\n Insert single-dimensional entries to the shape of input Tensor ``x``. Takes one\n required argument axis, a dimension or list of dimensions that will be inserted.\n Dimension indices in axis are as seen in the output tensor.\n\n Note that the output Tensor will share data with origin Tensor and doesn't have a \n Tensor copy in ``dygraph`` mode. If you want to use the Tensor copy version, \n please use `Tensor.clone` like ``unsqueeze_clone_x = x.unsqueeze(-1).clone()``.\n\n Args:\n x (Tensor): The input Tensor to be unsqueezed. Supported data type: float32, float64, bool, int8, int32, int64.\n axis (int|list|tuple|Tensor): Indicates the dimensions to be inserted. The data type is ``int32`` . \n If ``axis`` is a list or tuple, the elements of it should be integers or Tensors with shape [1]. \n If ``axis`` is a Tensor, it should be an 1-D Tensor .\n If ``axis`` is negative, ``axis = axis + ndim(x) + 1``.\n name (str|None): Name for this layer. Please refer to :ref:`api_guide_Name`, Default None.\n\n Returns:\n Tensor: Unsqueezed Tensor with the same data type as input Tensor.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n x = paddle.rand([5, 10])\n print(x.shape) # [5, 10]\n \n out1 = paddle.unsqueeze(x, axis=0)\n print(out1.shape) # [1, 5, 10]\n \n out2 = paddle.unsqueeze(x, axis=[0, 2]) \n print(out2.shape) # [1, 5, 1, 10]\n\n axis = paddle.to_tensor([0, 1, 2])\n out3 = paddle.unsqueeze(x, axis=axis) \n print(out3.shape) # [1, 1, 1, 5, 10]\n\n # out1, out2, out3 share data with x in dygraph mode\n x[0, 0] = 10.\n print(out1[0, 0, 0]) # [10.]\n print(out2[0, 0, 0, 0]) # [10.]\n print(out3[0, 0, 0, 0, 0]) # [10.]\n \n \"\"\"\n\n return layers.unsqueeze(x, axis, name)\n\n\n@inplace_apis_in_dygraph_only\ndef unsqueeze_(x, axis, name=None):\n \"\"\"\n Inplace version of ``unsqueeze`` API, the output Tensor will be inplaced with input ``x``.\n Please refer to :ref:`api_paddle_tensor_unsqueeze`.\n \"\"\"\n if isinstance(axis, int):\n axis = [axis]\n elif isinstance(axis, Variable):\n axis = axis.numpy().tolist()\n elif isinstance(axis, (list, tuple)):\n axis = [\n item.numpy().item(0) if isinstance(item, Variable) else item\n for item in axis\n ]\n out, _ = core.ops.unsqueeze2_(x, 'axes', axis)\n return out\n\n\ndef gather(x, index, axis=None, name=None):\n \"\"\"\n Output is obtained by gathering entries of ``axis``\n of ``x`` indexed by ``index`` and concatenate them together.\n\n .. code-block:: text\n\n\n Given:\n\n x = [[1, 2],\n [3, 4],\n [5, 6]]\n\n index = [1, 2]\n axis=[0]\n\n Then:\n\n out = [[3, 4],\n [5, 6]] \n\n Args:\n x (Tensor): The source input tensor with rank>=1. Supported data type is\n int32, int64, float32, float64 and uint8 (only for CPU),\n float16 (only for GPU).\n index (Tensor): The index input tensor with rank=1. Data type is int32 or int64.\n axis (Tensor|int, optional): The axis of input to be gathered, it's can be int or a Tensor with data type is int32 or int64. The default value is None, if None, the ``axis`` is 0.\n name (str, optional): The default value is None. Normally there is no need for user to set this property.\n For more information, please refer to :ref:`api_guide_Name` .\n\n Returns:\n output (Tensor): The output is a tensor with the same rank as ``x``.\n \n Examples:\n\n .. code-block:: python\n\n import paddle\n\n input = paddle.to_tensor([[1,2],[3,4],[5,6]])\n index = paddle.to_tensor([0,1])\n output = paddle.gather(input, index, axis=0)\n # expected output: [[1,2],[3,4]]\n \"\"\"\n if axis is None:\n axis = 0\n\n if in_dygraph_mode():\n axis = axis.item() if isinstance(axis, paddle.Tensor) else axis\n return core.ops.gather(x, index, None, \"axis\", axis, \"overwrite\", False)\n\n check_variable_and_dtype(\n x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64', 'uint8'],\n 'gather')\n check_variable_and_dtype(index, 'index', ['int32', 'int64'], 'gather')\n\n if isinstance(axis, Variable):\n check_variable_and_dtype(axis, 'axis', ['int32', 'int64'], 'gather')\n\n helper = LayerHelper('gather', **locals())\n dtype = helper.input_dtype('x')\n out = helper.create_variable_for_type_inference(dtype)\n if not isinstance(axis, Variable):\n helper.append_op(\n type=\"gather\",\n inputs={\"X\": x,\n \"Index\": index},\n attrs={'axis': axis,\n 'overwrite': False},\n outputs={\"Out\": out})\n else:\n helper.append_op(\n type=\"gather\",\n inputs={\"X\": x,\n \"Index\": index,\n \"Axis\": axis},\n attrs={\"overwrite\": False},\n outputs={\"Out\": out})\n\n return out\n\n\ndef unbind(input, axis=0):\n \"\"\"\n\n Removes a tensor dimension, then split the input tensor into multiple sub-Tensors.\n\n Args:\n input (Tensor): The input variable which is an N-D Tensor, data type being float32, float64, int32 or int64.\n axis (int32|int64, optional): A scalar with type ``int32|int64`` shape [1]. The dimension along which to unbind. \n If :math:`axis < 0`, the dimension to unbind along is :math:`rank(input) + axis`. Default is 0.\n Returns:\n list(Tensor): The list of segmented Tensor variables.\n\n Example:\n .. code-block:: python\n\n import paddle\n import numpy as np\n # input is a variable which shape is [3, 4, 5]\n np_input = np.random.rand(3, 4, 5).astype('float32')\n input = paddle.to_tensor(np_input)\n [x0, x1, x2] = paddle.unbind(input, axis=0)\n # x0.shape [4, 5]\n # x1.shape [4, 5]\n # x2.shape [4, 5]\n [x0, x1, x2, x3] = paddle.unbind(input, axis=1)\n # x0.shape [3, 5]\n # x1.shape [3, 5]\n # x2.shape [3, 5]\n # x3.shape [3, 5]\n\n \"\"\"\n helper = LayerHelper(\"unbind\", **locals())\n check_type(input, 'input', (Variable), 'unbind')\n dtype = helper.input_dtype()\n check_dtype(dtype, 'unbind', ['float32', 'float64', 'int32', 'int64'],\n 'unbind')\n if not isinstance(axis, (int)):\n raise TypeError(\"The type of 'axis' must be int, but received %s.\" %\n (type(axis)))\n if isinstance(axis, np.generic):\n axis = np.asscalar(axis)\n input_shape = input.shape\n axis_ = axis if axis >= 0 else len(input_shape) + axis\n num = input_shape[axis_]\n outs = [\n helper.create_variable_for_type_inference(dtype=helper.input_dtype())\n for i in range(num)\n ]\n if in_dygraph_mode():\n return core.ops.unbind(input, num, 'axis', axis)\n\n helper.append_op(\n type=\"unbind\",\n inputs={\"X\": input},\n outputs={\"Out\": outs},\n attrs={\"axis\": axis})\n return outs\n\n\ndef scatter(x, index, updates, overwrite=True, name=None):\n \"\"\"\n **Scatter Layer**\n Output is obtained by updating the input on selected indices based on updates.\n \n .. code-block:: python\n \n import numpy as np\n #input:\n x = np.array([[1, 1], [2, 2], [3, 3]])\n index = np.array([2, 1, 0, 1])\n # shape of updates should be the same as x\n # shape of updates with dim > 1 should be the same as input\n updates = np.array([[1, 1], [2, 2], [3, 3], [4, 4]])\n overwrite = False\n # calculation:\n if not overwrite:\n for i in range(len(index)):\n x[index[i]] = np.zeros((2))\n for i in range(len(index)):\n if (overwrite):\n x[index[i]] = updates[i]\n else:\n x[index[i]] += updates[i]\n # output:\n out = np.array([[3, 3], [6, 6], [1, 1]])\n out.shape # [3, 2]\n\n **NOTICE**: The order in which updates are applied is nondeterministic, \n so the output will be nondeterministic if index contains duplicates.\n\n Args:\n x (Tensor): The input N-D Tensor with ndim>=1. Data type can be float32, float64.\n index (Tensor): The index 1-D Tensor. Data type can be int32, int64. The length of index cannot exceed updates's length, and the value in index cannot exceed input's length.\n updates (Tensor): update input with updates parameter based on index. shape should be the same as input, and dim value with dim > 1 should be the same as input.\n overwrite (bool): The mode that updating the output when there are same indices. \n If True, use the overwrite mode to update the output of the same index,\n\t if False, use the accumulate mode to update the output of the same index.Default value is True.\n name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .\n \n Returns:\n Tensor: The output is a Tensor with the same shape as x.\n\n Examples:\n .. code-block:: python\n \n import paddle\n\n x = paddle.to_tensor([[1, 1], [2, 2], [3, 3]], dtype='float32')\n index = paddle.to_tensor([2, 1, 0, 1], dtype='int64')\n updates = paddle.to_tensor([[1, 1], [2, 2], [3, 3], [4, 4]], dtype='float32')\n \n output1 = paddle.scatter(x, index, updates, overwrite=False)\n # [[3., 3.],\n # [6., 6.],\n # [1., 1.]]\n\n output2 = paddle.scatter(x, index, updates, overwrite=True)\n # CPU device:\n # [[3., 3.],\n # [4., 4.],\n # [1., 1.]]\n # GPU device maybe have two results because of the repeated numbers in index\n # result 1:\n # [[3., 3.],\n # [4., 4.],\n # [1., 1.]]\n # result 2:\n # [[3., 3.],\n # [2., 2.],\n # [1., 1.]]\n \"\"\"\n if in_dygraph_mode():\n return core.ops.scatter(x, index, updates, 'overwrite', overwrite)\n\n check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'scatter')\n check_type(overwrite, 'overwrite', bool, 'scatter')\n helper = LayerHelper('scatter', **locals())\n out = helper.create_variable_for_type_inference(x.dtype)\n helper.append_op(\n type=\"scatter\",\n inputs={\"X\": x,\n \"Ids\": index,\n \"Updates\": updates},\n attrs={'overwrite': overwrite},\n outputs={\"Out\": out})\n return out\n\n\n@inplace_apis_in_dygraph_only\ndef scatter_(x, index, updates, overwrite=True, name=None):\n \"\"\"\n Inplace version of ``scatter`` API, the output Tensor will be inplaced with input ``x``.\n Please refer to :ref:`api_paddle_tensor_scatter`.\n \"\"\"\n return core.ops.scatter_(x, index, updates, 'overwrite', overwrite)\n\n\ndef scatter_nd_add(x, index, updates, name=None):\n r\"\"\"\n **Scatter_nd_add Layer**\n\n Output is obtained by applying sparse addition to a single value\n or slice in a Tensor.\n\n :attr:`x` is a Tensor with ndim :math:`R`\n and :attr:`index` is a Tensor with ndim :math:`K` . Thus, :attr:`index`\n has shape :math:`[i_0, i_1, ..., i_{K-2}, Q]` where :math:`Q \\leq R` . :attr:`updates`\n is a Tensor with ndim :math:`K - 1 + R - Q` and its\n shape is :math:`index.shape[:-1] + x.shape[index.shape[-1]:]` .\n\n According to the :math:`[i_0, i_1, ..., i_{K-2}]` of :attr:`index` ,\n add the corresponding :attr:`updates` slice to the :attr:`x` slice\n which is obtained by the last one dimension of :attr:`index` .\n\n .. code-block:: text\n\n Given:\n\n * Case 1:\n x = [0, 1, 2, 3, 4, 5]\n index = [[1], [2], [3], [1]]\n updates = [9, 10, 11, 12]\n\n we get:\n\n output = [0, 22, 12, 14, 4, 5]\n\n * Case 2:\n x = [[65, 17], [-14, -25]]\n index = [[], []]\n updates = [[[-1, -2], [1, 2]],\n [[3, 4], [-3, -4]]]\n x.shape = (2, 2)\n index.shape = (2, 0)\n updates.shape = (2, 2, 2)\n\n we get:\n\n output = [[67, 19], [-16, -27]]\n\n Args:\n x (Tensor): The x input. Its dtype should be float32, float64.\n index (Tensor): The index input with ndim > 1 and index.shape[-1] <= x.ndim.\n Its dtype should be int32 or int64 as it is used as indexes.\n updates (Tensor): The updated value of scatter_nd_add op, and it must have the same dtype\n as x. It must have the shape index.shape[:-1] + x.shape[index.shape[-1]:].\n name (str|None): The output tensor name. If set None, the layer will be named automatically.\n\n Returns:\n output (Tensor): The output is a tensor with the same shape and dtype as x.\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n import numpy as np\n\n x = paddle.rand(shape=[3, 5, 9, 10], dtype='float32')\n updates = paddle.rand(shape=[3, 9, 10], dtype='float32')\n index_data = np.array([[1, 1],\n [0, 1],\n [1, 3]]).astype(np.int64)\n index = paddle.to_tensor(index_data)\n output = paddle.scatter_nd_add(x, index, updates)\n \"\"\"\n return layers.scatter_nd_add(x, index, updates, name=None)\n\n\ndef chunk(x, chunks, axis=0, name=None):\n \"\"\"\n Split the input tensor into multiple sub-Tensors.\n \n Args:\n x (Tensor): A N-D Tensor. The data type is bool, float16, float32, float64, int32 or int64.\n chunks(int): The number of tensor to be split along the certain axis.\n axis (int|Tensor, optional): The axis along which to split, it can be a scalar with type \n ``int`` or a ``Tensor`` with shape [1] and data type ``int32`` or ``int64``.\n If :math::`axis < 0`, the axis to split along is :math:`rank(x) + axis`. Default is 0.\n name (str, optional): The default value is None. Normally there is no need for user to set this property.\n For more information, please refer to :ref:`api_guide_Name` .\n Returns:\n list(Tensor): The list of segmented Tensors.\n \n Example:\n .. code-block:: python\n \n import numpy as np\n import paddle\n \n # x is a Tensor which shape is [3, 9, 5]\n x_np = np.random.random([3, 9, 5]).astype(\"int32\")\n x = paddle.to_tensor(x_np)\n\n out0, out1, out2 = paddle.chunk(x, chunks=3, axis=1)\n # out0.shape [3, 3, 5]\n # out1.shape [3, 3, 5]\n # out2.shape [3, 3, 5]\n\n \n # axis is negative, the real axis is (rank(x) + axis) which real\n # value is 1.\n out0, out1, out2 = paddle.chunk(x, chunks=3, axis=-2)\n # out0.shape [3, 3, 5]\n # out1.shape [3, 3, 5]\n # out2.shape [3, 3, 5]\n \"\"\"\n check_type(chunks, 'chunks', (int), 'chunk')\n return paddle.fluid.layers.split(\n input=x, num_or_sections=chunks, dim=axis, name=name)\n\n\ndef tile(x, repeat_times, name=None):\n \"\"\"\n\n Construct a new Tensor by repeating ``x`` the number of times given by ``repeat_times``.\n After tiling, the value of the i'th dimension of the output is equal to ``x.shape[i]*repeat_times[i]``.\n\n Both the number of dimensions of ``x`` and the number of elements in ``repeat_times`` should be less than or equal to 6.\n\n Args:\n x (Tensor): The input tensor, its data type should be bool, float32, float64, int32 or int64.\n repeat_times (Tensor|tuple|list): The number of repeating times. If repeat_times is a list or tuple, all its elements\n should be integers or 1-D Tensors with the data type int32. If repeat_times is a Tensor, it should be an 1-D Tensor with the data type int32.\n name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n N-D Tensor. The data type is the same as ``x``.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n data = paddle.to_tensor([1, 2, 3], dtype='int32')\n out = paddle.tile(data, repeat_times=[2, 1])\n np_out = out.numpy()\n # [[1, 2, 3], [1, 2, 3]]\n\n out = paddle.tile(data, repeat_times=[2, 2])\n np_out = out.numpy()\n # [[1, 2, 3, 1, 2, 3], [1, 2, 3, 1, 2, 3]]\n\n repeat_times = paddle.to_tensor([2, 1], dtype='int32')\n out = paddle.tile(data, repeat_times=repeat_times)\n np_out = out.numpy()\n # [[1, 2, 3], [1, 2, 3]]\n \"\"\"\n if in_dygraph_mode():\n return core.ops.tile(x, 'repeat_times', repeat_times)\n check_type(repeat_times, 'repeat_times', (list, tuple, Variable), 'tile')\n if isinstance(repeat_times, Variable):\n assert len(repeat_times.shape) == 1, (\n 'repeat_times must be an 1-D Tensor.')\n else:\n for elem in repeat_times:\n if isinstance(elem, Variable):\n assert len(elem.shape) == 1, (\n 'Elements in repeat_times must be 1-D Tensors or integers.')\n else:\n type_tuple = (int, np.int32, np.int64)\n assert isinstance(elem, type_tuple), (\n 'Elements in repeat_times must be 1-D Tensors or integers.')\n\n check_variable_and_dtype(\n x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'tile')\n if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False:\n raise ValueError(\n \"When the date type is bool for the input 'x' of tile op, you \"\n \"must set its stop_gradient to be True by \"\n \"some_var.stop_gradient == True supporting some_var is the input.\")\n\n helper = LayerHelper('tile', **locals())\n\n inputs = {\"X\": [x]}\n attrs = {}\n\n def get_attr_repeat_times(list_repeat_times):\n attrs_repeat_times = []\n for idx, times in enumerate(list_repeat_times):\n if isinstance(times, Variable):\n attrs_repeat_times.append(-1)\n else:\n attrs_repeat_times.append(times)\n assert times > 0, (\n \"All elements in repeat_times must be positive for tile.\")\n return attrs_repeat_times\n\n if isinstance(repeat_times, Variable):\n repeat_times.stop_gradient = True\n inputs['RepeatTimes'] = repeat_times\n attrs['repeat_times'] = [-1]\n elif isinstance(repeat_times, (list, tuple)):\n attrs['repeat_times'] = get_attr_repeat_times(repeat_times)\n if utils._contain_var(repeat_times):\n inputs['repeat_times_tensor'] = utils._convert_to_tensor_list(\n repeat_times)\n\n dtype = helper.input_dtype(input_param_name='x')\n out = helper.create_variable_for_type_inference(dtype)\n helper.append_op(\n type='tile', inputs=inputs, outputs={'Out': out}, attrs=attrs)\n return out\n\n\ndef expand_as(x, y, name=None):\n \"\"\"\n\n Expand the input tensor ``x`` to the same shape as the input tensor ``y``.\n\n Both the number of dimensions of ``x`` and ``y`` must be less than or equal to 6, and the number of dimensions of ``y`` must be greather than or equal to that of ``x``. The dimension to expand must have a value of 1.\n\n Args:\n x (Tensor): The input tensor, its data type is bool, float32, float64, int32 or int64.\n y (Tensor): The input tensor that gives the shape to expand to.\n name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n N-D Tensor: A Tensor with the same shape as ``y``. The data type is the same as ``x``.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n data_x = paddle.to_tensor([1, 2, 3], 'int32')\n data_y = paddle.to_tensor([[1, 2, 3], [4, 5, 6]], 'int32')\n out = paddle.expand_as(data_x, data_y)\n np_out = out.numpy()\n # [[1, 2, 3], [1, 2, 3]]\n \"\"\"\n if in_dygraph_mode():\n return core.ops.expand_as_v2(x, 'target_shape', y.shape)\n\n check_variable_and_dtype(\n x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'expand_as')\n check_type(y, 'y', Variable, 'expand_as')\n\n if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False:\n raise ValueError(\n \"When the data type of input 'x' for expand_as is bool, \"\n \"you must set its stop_gradient to be False by \"\n \"some_var.stop_gradient = True, supporting \"\n \"some_var as the input 'x'.\")\n inputs = {\"X\": [x]}\n\n helper = LayerHelper('expand_as', **locals())\n dtype = helper.input_dtype(input_param_name='x')\n out = helper.create_variable_for_type_inference(dtype)\n helper.append_op(\n type='expand_as_v2',\n inputs=inputs,\n attrs={'target_shape': y.shape},\n outputs={'Out': out})\n return out\n\n\ndef broadcast_to(x, shape, name=None):\n \"\"\"\n\n Broadcast the input tensor to a given shape.\n\n Both the number of dimensions of ``x`` and the number of elements in ``shape`` should be less than or equal to 6. The dimension to broadcast to must have a value 1.\n\n\n Args:\n x (Tensor): The input tensor, its data type is bool, float32, float64, int32 or int64.\n shape (list|tuple|Tensor): The result shape after broadcasting. The data type is int32. If shape is a list or tuple, all its elements\n should be integers or 1-D Tensors with the data type int32. If shape is a Tensor, it should be an 1-D Tensor with the data type int32. \n The value -1 in shape means keeping the corresponding dimension unchanged.\n name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .\n\n Returns:\n N-D Tensor: A Tensor with the given shape. The data type is the same as ``x``.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n data = paddle.to_tensor([1, 2, 3], dtype='int32')\n out = paddle.broadcast_to(data, shape=[2, 3])\n print(out)\n # [[1, 2, 3], [1, 2, 3]]\n \"\"\"\n if in_dygraph_mode():\n return core.ops.expand_v2(x, 'shape', shape)\n\n if isinstance(shape, Variable):\n assert len(shape.shape) == 1, ('shape must be an 1-D Tensor.')\n else:\n for elem in shape:\n if isinstance(elem, Variable):\n assert len(elem.shape) == 1, (\n 'Elements in shape must be 1-D Tensors or integers.')\n else:\n type_tuple = (int, np.int32, np.int64)\n assert isinstance(elem, type_tuple), (\n 'Elements in shape must be 1-D Tensors or integers.')\n\n check_variable_and_dtype(x, 'x',\n ['bool', 'float32', 'float64', 'int32', 'int64'],\n 'broadcast_to')\n check_type(shape, 'shape', (list, tuple, Variable), 'broadcast_to')\n if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False:\n raise ValueError(\n \"When the data type of input 'x' for broadcast_to is bool, \"\n \"you must set its stop_gradient to be False by \"\n \"some_var.stop_gradient = True, supporting \"\n \"some_var as the input.\")\n\n inputs = {\"X\": [x]}\n attrs = {}\n\n helper = LayerHelper('expand', **locals())\n\n def get_attr_expand_shape(list_expand_shape):\n attrs_expand_shape = []\n for idx, shape in enumerate(list_expand_shape):\n if isinstance(shape, Variable):\n attrs_expand_shape.append(-1)\n else:\n attrs_expand_shape.append(shape)\n assert shape > 0 or shape == -1, (\n \"All elements in shape of broadcast_to must be positive or -1.\"\n )\n return attrs_expand_shape\n\n if isinstance(shape, Variable):\n shape.stop_gradient = True\n inputs['Shape'] = shape\n elif isinstance(shape, (list, tuple)):\n attrs['shape'] = get_attr_expand_shape(shape)\n if utils._contain_var(shape):\n inputs['expand_shapes_tensor'] = utils._convert_to_tensor_list(\n shape)\n\n dtype = helper.input_dtype(input_param_name='x')\n out = helper.create_variable_for_type_inference(dtype)\n helper.append_op(\n type='expand_v2', inputs=inputs, outputs={'Out': out}, attrs=attrs)\n return out\n\n\ndef expand(x, shape, name=None):\n \"\"\"\n\n Expand the input tensor to a given shape.\n\n Both the number of dimensions of ``x`` and the number of elements in ``shape`` should be less than or equal to 6. The dimension to expand must have a value 1.\n\n\n Args:\n x (Tensor): The input tensor, its data type is bool, float32, float64, int32 or int64.\n shape (list|tuple|Tensor): The result shape after expanding. The data type is int32. If shape is a list or tuple, all its elements\n should be integers or 1-D Tensors with the data type int32. If shape is a Tensor, it should be an 1-D Tensor with the data type int32. \n The value -1 in shape means keeping the corresponding dimension unchanged.\n name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .\n\n Returns:\n N-D Tensor: A Tensor with the given shape. The data type is the same as ``x``.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n data = paddle.to_tensor([1, 2, 3], dtype='int32')\n out = paddle.expand(data, shape=[2, 3])\n print(out)\n # [[1, 2, 3], [1, 2, 3]]\n \"\"\"\n if in_dygraph_mode():\n return core.ops.expand_v2(x, 'shape', shape)\n\n if isinstance(shape, Variable):\n assert len(shape.shape) == 1, ('shape must be an 1-D Tensor.')\n else:\n for elem in shape:\n if isinstance(elem, Variable):\n assert len(elem.shape) == 1, (\n 'Elements in shape must be 1-D Tensors or integers.')\n else:\n type_tuple = (int, np.int32, np.int64)\n assert isinstance(elem, type_tuple), (\n 'Elements in shape must be 1-D Tensors or integers.')\n\n check_variable_and_dtype(\n x, 'x', ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],\n 'expand')\n check_type(shape, 'shape', (list, tuple, Variable), 'expand')\n if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False:\n raise ValueError(\"When the data type of input 'x' for expand is bool, \"\n \"you must set its stop_gradient to be False by \"\n \"some_var.stop_gradient = True, supporting \"\n \"some_var as the input.\")\n\n inputs = {\"X\": [x]}\n attrs = {}\n\n helper = LayerHelper('expand', **locals())\n\n def get_attr_expand_shape(list_expand_shape):\n attrs_expand_shape = []\n for idx, shape in enumerate(list_expand_shape):\n if isinstance(shape, Variable):\n attrs_expand_shape.append(-2)\n else:\n attrs_expand_shape.append(shape)\n assert shape > 0 or shape == -1, (\n \"All elements in shape of expand must be positive or -1.\")\n return attrs_expand_shape\n\n if isinstance(shape, Variable):\n shape.stop_gradient = True\n inputs['Shape'] = shape\n elif isinstance(shape, (list, tuple)):\n attrs['shape'] = get_attr_expand_shape(shape)\n if utils._contain_var(shape):\n inputs['expand_shapes_tensor'] = utils._convert_to_tensor_list(\n shape)\n\n dtype = helper.input_dtype(input_param_name='x')\n out = helper.create_variable_for_type_inference(dtype)\n helper.append_op(\n type='expand_v2', inputs=inputs, outputs={'Out': out}, attrs=attrs)\n return out\n\n\ndef reshape(x, shape, name=None):\n \"\"\"\n This operator changes the shape of ``x`` without changing its data.\n\n Note that the output Tensor will share data with origin Tensor and doesn't\n have a Tensor copy in ``dygraph`` mode. \n If you want to use the Tensor copy version, please use `Tensor.clone` like \n ``reshape_clone_x = x.reshape([-1]).clone()``.\n\n Some tricks exist when specifying the target shape.\n\n 1. -1 means the value of this dimension is inferred from the total element\n number of x and remaining dimensions. Thus one and only one dimension can\n be set -1.\n\n 2. 0 means the actual dimension value is going to be copied from the\n corresponding dimension of x. The index of 0s in shape can not exceed\n the dimension of x.\n\n Here are some examples to explain it.\n\n 1. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape\n is [6, 8], the reshape operator will transform x into a 2-D tensor with\n shape [6, 8] and leaving x's data unchanged.\n\n 2. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape\n specified is [2, 3, -1, 2], the reshape operator will transform x into a\n 4-D tensor with shape [2, 3, 4, 2] and leaving x's data unchanged. In this\n case, one dimension of the target shape is set to -1, the value of this\n dimension is inferred from the total element number of x and remaining\n dimensions.\n\n 3. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape\n is [-1, 0, 3, 2], the reshape operator will transform x into a 4-D tensor\n with shape [2, 4, 3, 2] and leaving x's data unchanged. In this case,\n besides -1, 0 means the actual dimension value is going to be copied from\n the corresponding dimension of x.\n\n Args:\n x(Tensor): An N-D Tensor. The data type is ``float32``, ``float64``, ``int32``, ``int64`` or ``bool``\n shape(list|tuple|Tensor): Define the target shape. At most one dimension of the target shape can be -1.\n The data type is ``int32`` . If ``shape`` is a list or tuple, the elements of it should be integers or Tensors with shape [1].\n If ``shape`` is an Tensor, it should be an 1-D Tensor .\n name(str, optional): The default value is None. Normally there is no need for user to set this property.\n For more information, please refer to :ref:`api_guide_Name` .\n\n Returns:\n Tensor: A reshaped Tensor with the same data type as ``x``.\n\n Examples:\n .. code-block:: python\n\n import numpy as np\n import paddle\n\n x = paddle.rand([2, 4, 6], dtype=\"float32\")\n positive_four = paddle.full([1], 4, \"int32\")\n\n out = paddle.reshape(x, [-1, 0, 3, 2])\n print(out)\n # the shape is [2,4,3,2].\n\n out = paddle.reshape(x, shape=[positive_four, 12])\n print(out)\n # the shape of out_2 is [4, 12].\n\n shape_tensor = paddle.to_tensor(np.array([8, 6]).astype(\"int32\"))\n out = paddle.reshape(x, shape=shape_tensor)\n print(out)\n # the shape is [8, 6].\n # out shares data with x in dygraph mode\n x[0, 0, 0] = 10.\n print(out[0, 0])\n # the value is [10.]\n\n \"\"\"\n return paddle.fluid.layers.reshape(x=x, shape=shape, name=name)\n\n\n@inplace_apis_in_dygraph_only\ndef reshape_(x, shape, name=None):\n \"\"\"\n Inplace version of ``reshape`` API, the output Tensor will be inplaced with input ``x``.\n Please refer to :ref:`api_paddle_tensor_reshape`.\n \"\"\"\n if isinstance(shape, (list, tuple)):\n shape = [\n item.numpy().item(0) if isinstance(item, Variable) else item\n for item in shape\n ]\n out, _ = core.ops.reshape2_(x, None, 'shape', shape)\n return out\n elif isinstance(shape, Variable):\n shape.stop_gradient = True\n out, _ = core.ops.reshape2_(x, shape)\n return out\n\n\ndef gather_nd(x, index, name=None):\n \"\"\"\n\n This function is actually a high-dimensional extension of :code:`gather`\n and supports for simultaneous indexing by multiple axes. :attr:`index` is a\n K-dimensional integer tensor, which is regarded as a (K-1)-dimensional\n tensor of :attr:`index` into :attr:`input`, where each element defines\n a slice of params:\n\n .. math::\n\n output[(i_0, ..., i_{K-2})] = input[index[(i_0, ..., i_{K-2})]]\n\n Obviously, :code:`index.shape[-1] <= input.rank` . And, the output tensor has\n shape :code:`index.shape[:-1] + input.shape[index.shape[-1]:]` .\n\n .. code-block:: text\n\n Given:\n x = [[[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11]],\n [[12, 13, 14, 15],\n [16, 17, 18, 19],\n [20, 21, 22, 23]]]\n x.shape = (2, 3, 4)\n\n * Case 1:\n index = [[1]]\n\n gather_nd(x, index)\n = [x[1, :, :]]\n = [[12, 13, 14, 15],\n [16, 17, 18, 19],\n [20, 21, 22, 23]]\n\n * Case 2:\n index = [[0,2]]\n\n gather_nd(x, index)\n = [x[0, 2, :]]\n = [8, 9, 10, 11]\n\n * Case 3:\n index = [[1, 2, 3]]\n\n gather_nd(x, index)\n = [x[1, 2, 3]]\n = [23]\n\n Args:\n x (Tensor): The input Tensor which it's data type should be bool, float32, float64, int32, int64.\n index (Tensor): The index input with rank > 1, index.shape[-1] <= input.rank.\n Its dtype should be int32, int64.\n name(str, optional): The default value is None. Normally there is no need for user to set this property.\n For more information, please refer to :ref:`api_guide_Name` .\n\n Returns:\n output (Tensor): A tensor with the shape index.shape[:-1] + input.shape[index.shape[-1]:]\n \n Examples:\n\n .. code-block:: python\n \n import paddle\n \n x = paddle.to_tensor([[[1, 2], [3, 4], [5, 6]],\n [[7, 8], [9, 10], [11, 12]]])\n index = paddle.to_tensor([[0, 1]])\n \n output = paddle.gather_nd(x, index) #[[3, 4]]\n\n \"\"\"\n\n return paddle.fluid.layers.gather_nd(input=x, index=index, name=name)\n\n\ndef strided_slice(x, axes, starts, ends, strides, name=None):\n \"\"\"\n This operator produces a slice of ``x`` along multiple axes. Similar to numpy:\n https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html\n Slice uses ``axes``, ``starts`` and ``ends`` attributes to specify the start and\n end dimension for each axis in the list of axes and Slice uses this information\n to slice the input data tensor. If a negative value is passed to\n ``starts`` or ``ends`` such as :math:`-i`, it represents the reverse position of the\n axis :math:`i-1` th(here 0 is the initial position). The ``strides`` represents steps of\n slicing and if the ``strides`` is negative, slice operation is in the opposite direction.\n If the value passed to ``starts`` or ``ends`` is greater than n\n (the number of elements in this dimension), it represents n.\n For slicing to the end of a dimension with unknown size, it is recommended\n to pass in INT_MAX. The size of ``axes`` must be equal to ``starts`` , ``ends`` and ``strides``.\n Following examples will explain how strided_slice works:\n\n .. code-block:: text\n\n Case1:\n Given:\n data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]\n axes = [0, 1]\n starts = [1, 0]\n ends = [2, 3]\n strides = [1, 1]\n Then:\n result = [ [5, 6, 7], ]\n\n Case2:\n Given:\n data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]\n axes = [0, 1]\n starts = [0, 1]\n ends = [2, 0]\n strides = [1, -1]\n Then:\n result = [ [8, 7, 6], ]\n Case3:\n Given:\n data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]\n axes = [0, 1]\n starts = [0, 1]\n ends = [-1, 1000]\n strides = [1, 3]\n Then:\n result = [ [2], ]\n\n Args:\n x (Tensor): An N-D ``Tensor``. The data type is ``float32``, ``float64``, ``int32`` or ``int64``.\n axes (list|tuple): The data type is ``int32`` . Axes that `starts` and `ends` apply to.\n It's optional. If it is not provides, it will be treated as :math:`[0,1,...,len(starts)-1]`.\n starts (list|tuple|Tensor): The data type is ``int32`` . If ``starts`` is a list or tuple, the elements of it should be integers or Tensors with shape [1]. If ``starts`` is an Tensor, it should be an 1-D Tensor. It represents starting indices of corresponding axis in ``axes``.\n ends (list|tuple|Tensor): The data type is ``int32`` . If ``ends`` is a list or tuple, the elements of\n it should be integers or Tensors with shape [1]. If ``ends`` is an Tensor, it should be an 1-D Tensor . It represents ending indices of corresponding axis in ``axes``.\n strides (list|tuple|Tensor): The data type is ``int32`` . If ``strides`` is a list or tuple, the elements of\n it should be integers or Tensors with shape [1]. If ``strides`` is an Tensor, it should be an 1-D Tensor . It represents slice step of corresponding axis in ``axes``.\n name(str, optional): The default value is None. Normally there is no need for user to set this property.\n For more information, please refer to :ref:`api_guide_Name` .\n\n Returns:\n Tensor: A ``Tensor`` with the same dimension as ``x``. The data type is same as ``x``.\n\n Examples:\n .. code-block:: python\n\n import paddle\n x = paddle.zeros(shape=[3,4,5,6], dtype=\"float32\")\n # example 1:\n # attr starts is a list which doesn't contain Tensor.\n axes = [1, 2, 3]\n starts = [-3, 0, 2]\n ends = [3, 2, 4]\n strides_1 = [1, 1, 1]\n strides_2 = [1, 1, 2]\n sliced_1 = paddle.strided_slice(x, axes=axes, starts=starts, ends=ends, strides=strides_1)\n # sliced_1 is x[:, 1:3:1, 0:2:1, 2:4:1]. \n # example 2:\n # attr starts is a list which contain tensor Tensor.\n minus_3 = paddle.full(shape=[1], fill_value=-3, dtype='int32')\n sliced_2 = paddle.strided_slice(x, axes=axes, starts=[minus_3, 0, 2], ends=ends, strides=strides_2)\n # sliced_2 is x[:, 1:3:1, 0:2:1, 2:4:2].\n \"\"\"\n\n return paddle.fluid.layers.strided_slice(\n input=x, axes=axes, starts=starts, ends=ends, strides=strides)\n"
] |
[
[
"numpy.asscalar"
]
] |
BillyGareth/pyod
|
[
"7aeefcf65ceb0196434b7adb4fd706bfb404e4e2"
] |
[
"pyod/test/test_xgbod.py"
] |
[
"# -*- coding: utf-8 -*-\n\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nfrom os import path\n\nimport unittest\n# noinspection PyProtectedMember\nfrom numpy.testing import assert_allclose\nfrom numpy.testing import assert_array_less\nfrom numpy.testing import assert_equal\nfrom numpy.testing import assert_raises\n\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.base import clone\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils.validation import check_X_y\nfrom scipy.io import loadmat\nfrom scipy.stats import rankdata\n\n# temporary solution for relative imports in case pyod is not installed\n# if pyod is installed, no need to use the following line\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\n\nfrom pyod.models.xgbod import XGBOD\nfrom pyod.utils.data import generate_data\n\n\nclass TestXGBOD(unittest.TestCase):\n def setUp(self):\n # Define data file and read X and y\n # Generate some data if the source data is missing\n this_directory = path.abspath(path.dirname(__file__))\n mat_file = 'pima.mat'\n try:\n mat = loadmat(path.join(*[this_directory, 'data', mat_file]))\n\n except TypeError:\n print('{data_file} does not exist. Use generated data'.format(\n data_file=mat_file))\n X, y = generate_data(train_only=True) # load data\n except IOError:\n print('{data_file} does not exist. Use generated data'.format(\n data_file=mat_file))\n X, y = generate_data(train_only=True) # load data\n else:\n X = mat['X']\n y = mat['y'].ravel()\n X, y = check_X_y(X, y)\n\n self.X_train, self.X_test, self.y_train, self.y_test = \\\n train_test_split(X, y, test_size=0.4, random_state=42)\n\n self.clf = XGBOD(random_state=42)\n self.clf.fit(self.X_train, self.y_train)\n\n self.roc_floor = 0.75\n\n def test_parameters(self):\n assert (hasattr(self.clf, 'clf_') and\n self.clf.decision_scores_ is not None)\n assert (hasattr(self.clf, '_scalar') and\n self.clf.labels_ is not None)\n assert (hasattr(self.clf, 'n_detector_') and\n self.clf.labels_ is not None)\n assert (hasattr(self.clf, 'X_train_add_') and\n self.clf.labels_ is not None)\n assert (hasattr(self.clf, 'decision_scores_') and\n self.clf.decision_scores_ is not None)\n assert (hasattr(self.clf, 'labels_') and\n self.clf.labels_ is not None)\n\n def test_train_scores(self):\n assert_equal(len(self.clf.decision_scores_), self.X_train.shape[0])\n\n def test_prediction_scores(self):\n pred_scores = self.clf.decision_function(self.X_test)\n\n # check score shapes\n assert_equal(pred_scores.shape[0], self.X_test.shape[0])\n\n # check performance\n assert (roc_auc_score(self.y_test, pred_scores) >= self.roc_floor)\n\n def test_prediction_labels(self):\n pred_labels = self.clf.predict(self.X_test)\n assert_equal(pred_labels.shape, self.y_test.shape)\n\n def test_prediction_proba(self):\n pred_proba = self.clf.predict_proba(self.X_test)\n assert (pred_proba.min() >= 0)\n assert (pred_proba.max() <= 1)\n\n # def test_prediction_proba_linear(self):\n # pred_proba = self.clf.predict_proba(self.X_test, method='linear')\n # assert (pred_proba.min() >= 0)\n # assert (pred_proba.max() <= 1)\n #\n # def test_prediction_proba_unify(self):\n # pred_proba = self.clf.predict_proba(self.X_test, method='unify')\n # assert (pred_proba.min() >= 0)\n # assert (pred_proba.max() <= 1)\n #\n # def test_prediction_proba_parameter(self):\n # with assert_raises(ValueError):\n # self.clf.predict_proba(self.X_test, method='something')\n\n # def test_prediction_labels_confidence(self):\n # pred_labels, confidence = self.clf.predict(self.X_test,\n # return_confidence=True)\n # assert_equal(pred_labels.shape, self.y_test.shape)\n # assert_equal(confidence.shape, self.y_test.shape)\n # assert (confidence.min() >= 0)\n # assert (confidence.max() <= 1)\n #\n # def test_prediction_proba_linear_confidence(self):\n # pred_proba, confidence = self.clf.predict_proba(self.X_test,\n # method='linear',\n # return_confidence=True)\n # assert (pred_proba.min() >= 0)\n # assert (pred_proba.max() <= 1)\n #\n # assert_equal(confidence.shape, self.y_test.shape)\n # assert (confidence.min() >= 0)\n # assert (confidence.max() <= 1)\n\n def test_fit_predict(self):\n pred_labels = self.clf.fit_predict(self.X_train, self.y_train)\n assert_equal(pred_labels.shape, self.y_train.shape)\n\n def test_fit_predict_score(self):\n self.clf.fit_predict_score(self.X_test, self.y_test)\n self.clf.fit_predict_score(self.X_test, self.y_test,\n scoring='roc_auc_score')\n self.clf.fit_predict_score(self.X_test, self.y_test,\n scoring='prc_n_score')\n with assert_raises(NotImplementedError):\n self.clf.fit_predict_score(self.X_test, self.y_test,\n scoring='something')\n\n def test_predict_rank(self):\n pred_socres = self.clf.decision_function(self.X_test)\n pred_ranks = self.clf._predict_rank(self.X_test)\n print(pred_ranks)\n\n # assert the order is reserved\n assert_allclose(rankdata(pred_ranks), rankdata(pred_socres), rtol=4)\n assert_array_less(pred_ranks, self.X_train.shape[0] + 1)\n assert_array_less(-0.1, pred_ranks)\n\n def test_predict_rank_normalized(self):\n pred_socres = self.clf.decision_function(self.X_test)\n pred_ranks = self.clf._predict_rank(self.X_test, normalized=True)\n\n # assert the order is reserved\n assert_allclose(rankdata(pred_ranks), rankdata(pred_socres), rtol=4)\n assert_array_less(pred_ranks, 1.01)\n assert_array_less(-0.1, pred_ranks)\n\n def test_model_clone(self):\n clone_clf = clone(self.clf)\n\n def tearDown(self):\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"numpy.testing.assert_equal",
"sklearn.metrics.roc_auc_score",
"scipy.stats.rankdata",
"sklearn.model_selection.train_test_split",
"numpy.testing.assert_array_less",
"sklearn.base.clone",
"numpy.testing.assert_raises",
"sklearn.utils.validation.check_X_y"
]
] |
Asjidkalam/analytics-zoo
|
[
"0afa8437abc3e5cf5289d2cfde68b237a45f9d0d"
] |
[
"pyzoo/zoo/tfpark/tf_optimizer.py"
] |
[
"#\n# Copyright 2018 Analytics Zoo Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport json\nimport logging\nimport os\nimport sys\nimport tempfile\n\nfrom bigdl.nn.criterion import Criterion\nfrom bigdl.nn.layer import Layer\nfrom bigdl.optim.optimizer import MaxEpoch, EveryEpoch\nfrom bigdl.util.common import to_list, JavaValue\n\nfrom zoo.common.utils import callZooFunc\nfrom zoo.pipeline.api.keras.engine.topology import to_bigdl_metric, Loss, OptimMethod\nfrom zoo.pipeline.api.net.utils import find_placeholders, to_bigdl_optim_method, find_tensors\nfrom zoo.pipeline.estimator import Estimator\nfrom zoo.util import nest\n\n\nif sys.version >= '3':\n long = int\n unicode = str\n\n\nclass IdentityCriterion(Criterion):\n def __init__(self):\n super(IdentityCriterion, self).__init__(None, \"float\")\n\n\nclass TFValidationMethod(JavaValue):\n def __init__(self, val_method, name, output_indices, label_indices):\n self.name = name\n self.val_method = val_method\n JavaValue.__init__(self, None, \"float\",\n val_method, name, output_indices, label_indices)\n\n\nclass StatelessMetric(JavaValue):\n def __init__(self, metric_name, idx, count_idx):\n self.name = metric_name\n self.idx = idx\n self.count_idx = count_idx\n JavaValue.__init__(self, None, \"float\", metric_name, idx, count_idx)\n\n\nclass BigDLMetric(object):\n def __init__(self, val_method, outputs, labels):\n self.val_method = val_method\n self.outputs = outputs\n self.labels = labels\n\n\nclass TFTrainingHelper(Layer):\n def __init__(self, path, config_proto, saver, meta, sess):\n self.saver = saver\n self.meta = meta\n self.export_dir = path\n self.sess = sess\n\n if config_proto is not None:\n import tensorflow as tf\n assert isinstance(config_proto, tf.ConfigProto), \\\n \"session_config should be a tf.ConfigProto\"\n config_proto.use_per_session_threads = True\n byte_arr = bytearray(config_proto.SerializeToString())\n else:\n byte_arr = None\n\n super(TFTrainingHelper, self).__init__(None, \"float\", path, byte_arr)\n\n def save_checkpoint(self):\n callZooFunc(self.bigdl_type, \"saveCheckpoint\",\n self.value)\n\n def get_weights_to_python(self):\n self.save_checkpoint()\n self.saver.restore(self.sess, os.path.join(self.export_dir, \"model\"))\n\n def load_checkpoint(self, path):\n callZooFunc(self.bigdl_type, \"loadZooCheckpoint\", self.value, path)\n self.get_weights_to_python()\n\n\ndef _to_operation_name(name):\n return name.split(\":\")[0]\n\n\ndef _to_floats(vs):\n return [float(v) for v in vs]\n\n\nclass TFModel(object):\n def __init__(self, training_helper_layer, criterion, val_methods):\n\n self.training_helper_layer = training_helper_layer\n self.criterion = criterion\n self.val_methods = val_methods\n\n @staticmethod\n def _expand_inputs(inputs, tensors_with_value, loss):\n additional_inputs = []\n additional_values = []\n inputs = nest.flatten(inputs)\n names = set([i.name for i in inputs])\n\n if tensors_with_value:\n for t, v in tensors_with_value.items():\n if t.name in names:\n msg = f\"tensor {t} already in inputs, cannot put it in tensor_with_value\"\n raise ValueError(msg)\n additional_inputs.append(t)\n additional_values.append(v)\n\n return inputs, additional_inputs, additional_values\n\n @staticmethod\n def _process_session_config(session_config):\n import tensorflow as tf\n if session_config is not None:\n\n assert isinstance(session_config, tf.ConfigProto), \\\n \"session_config should be a tf.ConfigProto\"\n session_config.use_per_session_threads = True\n return session_config\n\n @staticmethod\n def _process_grads(graph, grads):\n\n with graph.as_default():\n from zoo.util.tf import process_grad\n grads = [process_grad(grad) for grad in grads]\n return grads\n\n @staticmethod\n def _process_metrics(graph, metrics, real_batch_size):\n import tensorflow as tf\n outputs = [real_batch_size]\n val_methods = None\n if metrics is not None:\n idx = 1\n val_methods = []\n for metric_name in metrics:\n metric = metrics[metric_name]\n if tf.is_numeric_tensor(metric):\n outputs.append(metric)\n val_methods.append(StatelessMetric(metric_name, idx, 0))\n idx += 1\n else:\n outputs += metric.outputs\n with graph.as_default():\n val_labels = [tf.identity(v) for v in metric.labels]\n outputs += val_labels\n method = TFValidationMethod(metric.val_method,\n metric_name,\n list(range(idx, idx + len(metric.outputs))),\n list(range(idx + len(metric.outputs),\n idx + len(metric.outputs)\n + len(val_labels))))\n val_methods.append(method)\n idx += len(metric.outputs) + len(val_labels)\n\n outputs = [tf.to_float(output) for output in outputs]\n return outputs, val_methods\n\n @staticmethod\n def _process_variables(graph, variables, updates):\n import tensorflow as tf\n all_trainable_variables = variables\n\n name2idx = dict([(v.name, idx) for idx, v in enumerate(all_trainable_variables)])\n\n all_variables = graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)\n\n update_ops = graph.get_collection(tf.GraphKeys.UPDATE_OPS)\n\n if updates is not None:\n update_ops += updates\n\n trainable_variables = [0] * len(all_trainable_variables)\n trainable_assigns = [0] * len(all_trainable_variables)\n trainable_variable_placeholders = [0] * len(all_trainable_variables)\n extra_variables = []\n extra_variable_assigns = []\n extra_variable_assign_placeholders = []\n for v in all_variables:\n p = tf.placeholder(dtype=v.dtype, shape=v.shape)\n a = tf.assign(v, p)\n\n # special treatment for ResourceVariable\n if v.op.type == \"VarHandleOp\":\n v_float_value = tf.to_float(v.read_value())\n else:\n v_float_value = tf.to_float(v)\n\n if v.name in name2idx:\n trainable_variables[name2idx[v.name]] = v_float_value\n trainable_assigns[name2idx[v.name]] = a\n trainable_variable_placeholders[name2idx[v.name]] = p\n else:\n extra_variables.append(v_float_value)\n extra_variable_assigns.append(a)\n extra_variable_assign_placeholders.append(p)\n\n extra_variable_assign = tf.group(*extra_variable_assigns)\n trainable_assign = tf.group(*trainable_assigns)\n update_op = tf.group(update_ops)\n\n return trainable_variables, trainable_variable_placeholders, trainable_assign, \\\n extra_variables, extra_variable_assign_placeholders, \\\n extra_variable_assign, update_op\n\n @staticmethod\n def _save_to_dir(folder, sess, graph,\n metric_tensors,\n batch_size_tensor,\n loss_tensor, inputs, labels, predictions,\n trainable_variables,\n trainable_variable_placeholders,\n trainable_assign,\n extra_variables,\n extra_variable_assign_placeholders,\n extra_variable_assign,\n grads, update_op, train_op,\n additional_inputs,\n additional_values):\n import tensorflow as tf\n from tensorflow import gfile\n saver = tf.train.Saver()\n if not os.path.isdir(folder):\n os.makedirs(folder)\n saver.save(sess, os.path.join(folder, \"model\"), write_meta_graph=False)\n\n meta = {\n \"inputs\": [i.name for i in inputs],\n \"input_types\": [i.dtype.as_datatype_enum for i in inputs],\n \"additional_inputs\": [i.name for i in additional_inputs],\n \"additional_input_types\": [i.dtype.as_datatype_enum for i in additional_inputs],\n \"labels\": [l.name for l in labels],\n \"label_types\": [i.dtype.as_datatype_enum for i in labels],\n \"predictions\": [t.name for t in predictions] if predictions else [],\n \"metric_tensors\": [t.name for t in metric_tensors],\n \"batch_size_tensor\": batch_size_tensor.name,\n \"loss_tensor\": loss_tensor.name,\n \"variables\": [v.name for v in trainable_variables],\n \"variable_types\": [v.dtype.as_datatype_enum for v in trainable_variable_placeholders],\n \"variable_assign_placeholders\": [v.name for v in trainable_variable_placeholders],\n \"assign_variable_op\": trainable_assign.name,\n \"extra_variables\": [v.name for v in extra_variables],\n \"extra_variable_types\": [v.dtype.as_datatype_enum for v\n in extra_variable_assign_placeholders],\n \"extra_variable_assign_placeholders\": [p.name for p in\n extra_variable_assign_placeholders],\n \"assign_extra_variable_op\": extra_variable_assign.name,\n \"grad_variables\": [g.name for g in grads],\n \"update_op\": update_op.name,\n \"restore_op\": saver.saver_def.restore_op_name,\n \"restore_path_placeholder\": saver.saver_def.filename_tensor_name,\n \"save_op\": _to_operation_name(saver.saver_def.save_tensor_name),\n \"save_path_placeholder\": saver.saver_def.filename_tensor_name,\n \"default_tensor_value\": [_to_floats(v) for v in additional_values],\n \"init_op\": tf.tables_initializer().name\n }\n\n if train_op is not None:\n meta[\"train_op\"] = train_op.name\n\n with open(os.path.join(folder, \"training_meta.json\"), \"w\") as f:\n f.write(json.dumps(meta))\n\n with gfile.GFile(os.path.join(folder, \"model.meta\"), \"wb\") as f:\n f.write(graph.as_graph_def().SerializeToString())\n\n return meta, saver\n\n @staticmethod\n def export(model_dir, loss_tensor, sess, inputs, labels, predictions, grads, variables, graph,\n tensors_with_value, metrics, updates, train_op=None):\n import tensorflow as tf\n with graph.as_default():\n batch_size_tensor = tf.to_float(tf.shape(inputs[0])[0])\n inputs, additional_inputs, additional_values = \\\n TFModel._expand_inputs(inputs, tensors_with_value, loss_tensor)\n metric_tensors, val_methods = TFModel._process_metrics(graph, metrics, batch_size_tensor)\n grads = TFModel._process_grads(graph, grads)\n\n trainable_variables, trainable_variable_placeholders, trainable_assign, \\\n extra_variables, extra_variable_assign_placeholders, \\\n extra_variable_assign, update_op = \\\n TFModel._process_variables(graph, variables, updates)\n\n meta, saver = \\\n TFModel._save_to_dir(model_dir, sess, graph,\n metric_tensors,\n batch_size_tensor,\n loss_tensor, inputs, labels, predictions,\n trainable_variables,\n trainable_variable_placeholders,\n trainable_assign,\n extra_variables,\n extra_variable_assign_placeholders,\n extra_variable_assign,\n grads, update_op, train_op,\n additional_inputs,\n additional_values)\n return meta, saver, val_methods\n\n @staticmethod\n def create(loss_tensor, sess, inputs, labels, predictions, grads, variables, graph,\n tensors_with_value, session_config, metrics, updates,\n model_dir, train_op=None):\n\n if model_dir is None:\n model_dir = tempfile.mkdtemp()\n else:\n if not os.path.isdir(model_dir):\n os.makedirs(model_dir)\n\n meta, saver, val_methods = TFModel.export(model_dir, loss_tensor, sess,\n inputs, labels, predictions, grads, variables,\n graph, tensors_with_value, metrics, updates,\n train_op)\n\n training_helper_layer = TFTrainingHelper(model_dir,\n session_config, saver, meta, sess)\n\n criterion = IdentityCriterion()\n\n return TFModel(training_helper_layer, criterion, val_methods)\n\n\nclass TFOptimizer:\n def __init__(self, tf_model, optim_method,\n sess=None, dataset=None,\n clip_norm=None, clip_value=None,\n model_dir=None):\n \"\"\"\n TFOptimizer is used for distributed training of TensorFlow\n on Spark/BigDL.\n\n Note that if grads and variables are not None, then they need to be sorted by name\n if you want to use multiple optimization methods for a TensorFlow model according to\n variable names.\n\n :param loss: The loss tensor of the TensorFlow model, should be a scalar\n :param optim_method: the optimization method to be used, such as bigdl.optim.optimizer.Adam\n :param sess: the current TensorFlow Session, if you want to used a pre-trained model, you\n should use the Session to load the pre-trained variables and pass it to TFOptimizer.\n \"\"\"\n\n self.optim_method = optim_method\n self.sess = sess\n self.dataset = dataset\n\n self.clip_norm = clip_norm\n if clip_value is not None and not isinstance(clip_value, tuple):\n raise ValueError(\"The clip_value argument should be a tuple (min_value, max_value)\")\n self.clip_constant = clip_value\n\n if self.dataset.batch_size <= 0:\n raise ValueError(\"You should set batch_size instead of batch_per_thread for training\")\n\n self.model_dir = model_dir\n\n self.tf_model = tf_model\n\n batch_size = self.dataset.batch_size\n\n self.train_data = self.dataset.get_training_data()\n self.val_data = self.dataset.get_validation_data()\n\n self.batch_size = batch_size\n\n self.estimator = Estimator(self.tf_model.training_helper_layer,\n self.optim_method,\n self.model_dir)\n\n if self.clip_norm:\n self.estimator.set_l2_norm_gradient_clipping(self.clip_norm)\n if self.clip_constant:\n min_value, max_value = self.clip_constant\n self.estimator.set_constant_gradient_clipping(min_value, max_value)\n\n def load_checkpoint(self, path, version):\n # todo make version optional\n model_path = os.path.join(path, \"model.{}\".format(version))\n optim_method_path = os.path.join(path, \"optimMethod-TFParkTraining.{}\".format(version))\n self.tf_model.training_helper_layer.load_checkpoint(model_path)\n self.optim_method = OptimMethod.load(optim_method_path)\n self.estimator = Estimator(self.tf_model.training_helper_layer,\n self.optim_method,\n self.model_dir)\n if self.clip_norm:\n self.estimator.set_l2_norm_gradient_clipping(self.clip_norm)\n if self.clip_constant:\n min_value, max_value = self.clip_constant\n self.estimator.set_constant_gradient_clipping(min_value, max_value)\n\n @staticmethod\n def _get_or_create_session(session):\n import tensorflow as tf\n if session is None:\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n else:\n sess = session\n return sess\n\n @staticmethod\n def _get_dataset_from_loss(loss):\n import tensorflow as tf\n all_required_inputs = find_placeholders([loss])\n dataset = tf.get_collection(all_required_inputs[0].name)[0]\n return dataset\n\n @staticmethod\n def _get_vars_grads(loss):\n import tensorflow as tf\n grads_vars = tf.train.GradientDescentOptimizer(0).compute_gradients(loss)\n grads_vars.sort(key=lambda grad_var: grad_var[1].name)\n variables = []\n grads = []\n for (grad, var) in grads_vars:\n if grad is not None:\n variables.append(var)\n grads.append(grad)\n return grads, variables\n\n @staticmethod\n def _get_vars_grads_from_train_op(train_op):\n def predicate(t):\n return t.name.split(\"/\")[-1].startswith(\"zoo_identity_op_for_grad\")\n grads = find_tensors([train_op], predicate)\n grad_ops = [grad.op for grad in grads]\n variables = []\n for grad in grad_ops:\n var = list(grad.control_inputs)[0]\n if var.name == \"VarHandleOp\":\n variables.append(var)\n else:\n variables.append(list(var.outputs)[0])\n # variables = [grad.op.control_inputs[0].outputs[0] for grad in grads]\n return grads, variables\n\n @classmethod\n def from_train_op(cls, train_op, loss, *, inputs=None, labels=None, metrics=None, updates=None,\n sess=None, dataset=None, tensor_with_value=None, session_config=None,\n model_dir=None):\n\n sess = TFOptimizer._get_or_create_session(sess)\n grads, variables = TFOptimizer._get_vars_grads_from_train_op(train_op)\n if dataset is None:\n dataset = TFOptimizer._get_dataset_from_loss(loss)\n _ = dataset.tensors # trigger create tensors if not available\n dataset_inputs = dataset._original_tensors\n if isinstance(dataset_inputs, tuple) and len(dataset_inputs) == 2:\n if inputs is None:\n inputs = dataset_inputs[0]\n\n if labels is None:\n labels = dataset_inputs[1]\n else:\n if inputs is None:\n inputs = dataset_inputs\n\n if labels is None:\n labels = []\n\n inputs = nest.flatten(inputs)\n labels = nest.flatten(labels)\n from zoo.tfpark.zoo_optimizer import FakeOptimMethod\n return TFOptimizer._from_grads(loss=loss, sess=sess, inputs=inputs, labels=labels,\n grads=grads,\n variables=variables, dataset=dataset, metrics=metrics,\n tensor_with_value=tensor_with_value,\n optim_method=FakeOptimMethod(),\n session_config=session_config, updates=updates,\n model_dir=model_dir, train_op=train_op)\n\n @classmethod\n def _from_grads(cls, loss, sess, inputs, labels, grads, variables, dataset, optim_method=None,\n clip_norm=None, clip_value=None,\n metrics=None, tensor_with_value=None, session_config=None,\n model_dir=None, updates=None, train_op=None):\n graph = loss.graph\n if metrics is None:\n metrics = {}\n\n tf_model = TFModel.create(loss, sess, inputs, labels, [], grads, variables, graph,\n tensor_with_value, session_config, metrics,\n updates, model_dir=None, train_op=train_op)\n return cls(tf_model, optim_method, sess=sess, dataset=dataset,\n clip_norm=clip_norm, clip_value=clip_value, model_dir=model_dir)\n\n @classmethod\n def from_loss(cls, loss, optim_method, session=None, inputs=None, dataset=None,\n val_outputs=None, val_labels=None, val_method=None,\n clip_norm=None, clip_value=None, metrics=None,\n tensor_with_value=None, session_config=None, model_dir=None, updates=None):\n \"\"\"\n Create a TFOptimizer from a TensorFlow loss tensor.\n The loss tensor must come from a TensorFlow graph that only takes TFDataset.tensors and\n the tensors in `tensor_with_value` as inputs.\n :param loss: The loss tensor of the TensorFlow model, should be a scalar\n :param optim_method: the optimization method to be used, such as bigdl.optim.optimizer.Adam\n :param session: the current TensorFlow Session, if you want to used a pre-trained model,\n you should use the Session to load the pre-trained variables and pass it to TFOptimizer.\n :param val_outputs: the validation output TensorFlow tensor to be used by val_methods\n :param val_labels: the validation label TensorFlow tensor to be used by val_methods\n :param val_method: the BigDL val_method(s) to be used.\n :param clip_norm: float >= 0. Gradients will be clipped when their L2 norm exceeds\n this value.\n :param clip_value: float >= 0. Gradients will be clipped when their absolute value\n exceeds this value.\n :param metrics: a dictionary. The key should be a string representing the metric's name\n and the value should be the corresponding TensorFlow tensor, which should be a scalar.\n :param tensor_with_value: a dictionary. The key is TensorFlow tensor, usually a\n placeholder, the value of the dictionary is a tuple of two elements. The first one of\n the tuple is the value to feed to the tensor in training phase and the second one\n is the value to feed to the tensor in validation phase.\n :return: a TFOptimizer\n \"\"\"\n sess = TFOptimizer._get_or_create_session(session)\n grads, variables = TFOptimizer._get_vars_grads(loss)\n\n if dataset is None and inputs is None:\n dataset = TFOptimizer._get_dataset_from_loss(loss)\n inputs = dataset._original_tensors\n else:\n if inputs is None:\n raise ValueError(\"please specify inputs\")\n _ = dataset.tensors # trigger creating placeholders\n\n if isinstance(inputs, tuple) and len(inputs) == 2:\n inputs, labels = inputs\n else:\n labels = []\n\n inputs = nest.flatten(inputs)\n labels = nest.flatten(labels)\n\n if clip_value is not None:\n if isinstance(clip_value, float) or isinstance(clip_value, int):\n if clip_value <= 0:\n ValueError(\"The clip_value argument should be positive number\")\n clip_value = (-float(clip_value), float(clip_value))\n\n if not isinstance(clip_value, tuple):\n raise ValueError(\"The clip_value argument should be\" +\n \" a positive float/int which clips to\" +\n \" (-clip_value, clip_value); \" +\n \"or a tuple which clips to (min_value, max_value)\")\n\n if val_method is not None:\n val_methods = to_list(val_method)\n if metrics is None:\n metrics = {}\n\n for i, method in enumerate(val_methods):\n metrics['bigdl_metric_' + str(i)] = BigDLMetric(method, val_outputs, val_labels)\n\n return TFOptimizer._from_grads(loss, sess, inputs, labels, grads, variables, dataset,\n optim_method, clip_norm, clip_value,\n metrics, tensor_with_value, session_config,\n model_dir, updates)\n\n @staticmethod\n def export_training_model(export_dir, loss, sess, inputs, labels=None, predictions=None,\n metrics=None, tensor_with_value=None, updates=None):\n\n grads, variables = TFOptimizer._get_vars_grads(loss)\n\n TFModel.export(export_dir, loss, sess, inputs, labels, predictions, grads, variables,\n loss.graph, tensor_with_value, metrics, updates)\n logging.info(\"Exported TensorFlow model in {} for training\".format(export_dir))\n\n @staticmethod\n def _shape_match(model_shape, dataset_shape):\n\n for i in range(len(dataset_shape)):\n if dataset_shape[i].value is None:\n return model_shape[i].value is None\n else:\n return dataset_shape[i].value == model_shape[i].value or \\\n model_shape[i].value is None\n\n @classmethod\n def from_keras(cls, keras_model, dataset,\n session_config=None, model_dir=None, metrics=None, optimizer=None):\n \"\"\"\n Create a TFOptimizer from a tensorflow.keras model. The model must be compiled.\n :param keras_model: the tensorflow.keras model, which must be compiled.\n :param dataset: a TFDataset\n :return:\n \"\"\"\n import tensorflow.keras.backend as K\n\n model_inputs = keras_model.inputs\n\n if hasattr(keras_model, \"targets\"):\n model_targets = keras_model.targets\n else:\n model_targets = keras_model._targets\n\n # target can be None if loss is None\n model_targets = list(filter(lambda x: x is not None, model_targets))\n\n flatten_inputs = nest.flatten(dataset.feature_tensors)\n assert len(model_inputs) == len(flatten_inputs), \\\n (\"the keras model and TFDataset should have the same number of tensors\" +\n \" keras model has {} inputs \" +\n \"while TFDataset has {} inputs\").format(len(model_inputs),\n len(flatten_inputs))\n for i in range(len(flatten_inputs)):\n if not TFOptimizer._shape_match(model_inputs[i].shape, flatten_inputs[i].shape):\n raise ValueError((\"The {}th input in keras model {}\"\n \" does not match the TFDataset\"\n \"input {}\").format(i,\n model_inputs[i],\n flatten_inputs[i]))\n\n flatten_targets = nest.flatten(dataset.label_tensors)\n assert len(model_targets) == len(flatten_targets), \\\n (\"the keras model and TFDataset should have the same number of tensors\" +\n \" keras model has {} targets \" +\n \"while TFDataset has {} labels\").format(len(model_targets),\n len(flatten_inputs))\n # todo check targets shape, currently checking target shape will\n # cause too much false alarm.\n\n loss = keras_model.total_loss\n variables = keras_model._collected_trainable_weights\n variables.sort(key=lambda variable: variable.name)\n keras_optimizer = keras_model.optimizer\n\n from zoo.tfpark.zoo_optimizer import get_gradients_for_keras\n grads = get_gradients_for_keras(keras_optimizer, loss, variables)\n grads_and_vars = list(zip(grads, variables))\n import tensorflow.python.keras.optimizers as koptimizers\n if isinstance(keras_optimizer, koptimizers.TFOptimizer):\n # work around keras TFOptimzier bug\n train_op = keras_optimizer.optimizer.apply_gradients(grads_and_vars)\n else:\n train_op = keras_optimizer.apply_gradients(grads_and_vars)\n\n sess = K.get_session()\n\n if keras_model.metrics and (dataset.get_validation_data() is not None):\n if isinstance(keras_model.metrics, dict):\n raise ValueError(\n \"different metrics for different outputs are not supported right now\")\n\n if len(keras_model.outputs) > 1:\n if not all([name.endswith(\"loss\") for name in keras_model.metrics_names]):\n raise ValueError(\"metrics (except loss) for multi-head model is not supported\")\n else:\n bigdl_val_methods = [Loss()]\n val_outputs = keras_model.outputs\n val_labels = model_targets\n else:\n bigdl_val_methods = \\\n [to_bigdl_metric(m, keras_model.loss) for m in keras_model.metrics_names]\n val_outputs = keras_model.outputs\n val_labels = model_targets\n else:\n val_outputs = None\n val_labels = None\n bigdl_val_methods = None\n\n tensor_with_value = {\n K.learning_phase(): [True, False]\n }\n\n updates = []\n\n updates += keras_model.get_updates_for(None)\n # Conditional updates relevant to this model\n updates += keras_model.get_updates_for(keras_model.inputs)\n\n if bigdl_val_methods is not None:\n val_methods = to_list(bigdl_val_methods)\n bigdl_metrics = {}\n for i, method in enumerate(val_methods):\n bigdl_metrics['bigdl_metric_' + str(i)] = BigDLMetric(method,\n val_outputs,\n val_labels)\n if metrics is None:\n metrics = bigdl_metrics\n else:\n metrics.update(bigdl_metrics)\n\n if optimizer is not None:\n clip_norm = None\n clip_value = None\n if hasattr(keras_optimizer, 'clipnorm'):\n clip_norm = keras_optimizer.clipnorm\n if hasattr(keras_optimizer, 'clipvalue'):\n clip_value = (-keras_optimizer.clipvalue, keras_optimizer.clipvalue)\n tf_model = TFModel.create(loss, sess, model_inputs, model_targets, keras_model.outputs,\n grads, variables, loss.graph,\n tensor_with_value, session_config, metrics,\n updates, model_dir=None)\n\n return cls(tf_model, optimizer, sess=sess, dataset=dataset,\n clip_norm=clip_norm, clip_value=clip_value, model_dir=model_dir)\n\n return cls.from_train_op(train_op, loss, inputs=model_inputs, labels=model_targets,\n metrics=metrics, updates=updates, sess=sess, dataset=dataset,\n tensor_with_value=tensor_with_value, session_config=session_config,\n model_dir=model_dir)\n\n def set_constant_gradient_clipping(self, min_value, max_value):\n \"\"\"\n Configure constant clipping settings.\n\n :param min_value: the minimum value to clip by\n :param max_value: the maxmimum value to clip by\n \"\"\"\n self.estimator.set_constant_gradient_clipping(min_value, max_value)\n\n def set_gradient_clipping_by_l2_norm(self, clip_norm):\n \"\"\"\n Configure L2 norm clipping settings.\n :param clip_norm: gradient L2-Norm threshold\n \"\"\"\n self.estimator.set_l2_norm_gradient_clipping(clip_norm)\n\n def optimize(self, end_trigger=None, checkpoint_trigger=None):\n \"\"\"\n Run the training loop of the this optimizer\n :param end_trigger: BigDL's Trigger to indicate when to stop the training.\n :param checkpoint_trigger: When to save a checkpoint and evaluate model.\n \"\"\"\n if end_trigger is None:\n end_trigger = MaxEpoch(1)\n\n if checkpoint_trigger is None:\n checkpoint_trigger = EveryEpoch()\n\n if self.tf_model.val_methods and self.val_data is not None:\n self.estimator.train_minibatch(train_set=self.train_data,\n criterion=self.tf_model.criterion,\n end_trigger=end_trigger,\n checkpoint_trigger=checkpoint_trigger,\n validation_set=self.val_data,\n validation_method=self.tf_model.val_methods)\n else:\n self.estimator.train_minibatch(train_set=self.train_data,\n criterion=self.tf_model.criterion,\n end_trigger=end_trigger,\n checkpoint_trigger=checkpoint_trigger)\n\n self.tf_model.training_helper_layer.get_weights_to_python()\n"
] |
[
[
"tensorflow.shape",
"tensorflow.get_collection",
"tensorflow.keras.backend.get_session",
"tensorflow.keras.backend.learning_phase",
"tensorflow.assign",
"tensorflow.is_numeric_tensor",
"tensorflow.placeholder",
"tensorflow.identity",
"tensorflow.global_variables_initializer",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.to_float",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.group",
"tensorflow.tables_initializer"
]
] |
tunbehaun273/crabmeyerpy
|
[
"d36fe3ed9b8591bb92bd9915996dd21d79fc4dad"
] |
[
"crabmeyerpy/ssc.py"
] |
[
"import yaml\nfrom scipy.special import kv # Bessel function\nfrom scipy.integrate import simps\nfrom scipy.interpolate import interp1d\n\n# imports to speed up integrations:\nfrom numpy import meshgrid, linspace, ones, zeros\nfrom numpy import log, exp, pi, sqrt, power, tan\n\n# import functions for photon fields\nfrom .photonfields import *\nfrom astropy import units as u\nfrom astropy import constants as c\nfrom astropy.cosmology import Planck15 as cosmo\n\n# define conversion factors\nkpc2cm = u.kpc.to('cm')\neV2Hz = 1. / (c.h.value * u.J.to('eV'))\neV2erg = u.eV.to('erg')\nm_e_eV = (c.m_e * c.c**2.).to('eV').value\narcmin2rad = u.arcmin.to('rad')\n\n\ndef ic_kernel(nu, gamma, e):\n \"\"\"\n Calculate the full inverse Compton Kernel, unitless\n\n Parameters\n ----------\n nu: array-like\n final photon frequency in Hz\n gamma: array-like\n gamma factor of electrons\n e: array-like\n initial photon energy in eV\n\n Returns\n -------\n Inner IC kernel including KN limit\n\n Notes\n -----\n gamma, e, and e1 need to have same shape.\n See Blumenthal & Gould 1970, Eq. 2.47 - 2.51\n \"\"\"\n q = nu / eV2Hz / 4. / gamma ** 2. / e / (1. - nu / eV2Hz / m_e_eV / gamma)\n\n m = (q <= 1.) & (q >= 1. / 4. / gamma ** 2.)\n\n f = zeros(q.shape)\n\n f[m] = 2. * q[m] * log(q[m]) + (1. + 2. * q[m]) * (1. - q[m]) + \\\n (4. * e[m] / m_e_eV * gamma[m] * q[m]) ** 2. \\\n / 2. / (1. + 4. * e[m] / m_e_eV * gamma[m] * q[m]) \\\n * (1. - q[m])\n\n return f\n\n\nclass CrabSSC(object):\n def __init__(self, config, n_el, B=124.e-6, d=2., nu_sync_min=1e7, nu_sync_max=1e30):\n \"\"\"\n Initialize the class\n\n Parameters\n ----------\n config: str or dict\n path to config file with model parameters.\n Should contain three dictionaries:\n - params_n_el: parameters for the electron density\n - params_n_seed: parameters for the photon density\n\n n_el: function pointer\n electron density spectrum. Should be called with n_el(gamma, **params_n_el)\n\n {options}\n B: float\n magnetic field of the nebula in G\n\n d: float\n distance to the nebula in kpc\n\n nu_sync_min: float\n minimum frequency considered for syncrotron radiation\n\n nu_sync_max: float\n maximum frequency considered for syncrotron radiation\n \"\"\"\n\n # read in config file\n if isinstance(config, dict):\n conf = config\n else:\n with open(config) as f:\n conf = yaml.safe_load(f)\n\n self._params_n_el = conf['params_n_el']\n self._params_n_seed = conf['params_n_seed']\n\n self._nu_sync_min = nu_sync_min\n self._nu_sync_max = nu_sync_max\n self._n_el = n_el\n self._B = B\n self._d = d\n\n # Interpolate x F(x) of synchrotron function,\n # see e.g. Fig. 13 in Blumenthal & Gould 1970\n steps = 100\n self.__start = -40 # upper limit for x F (x) integration\n self.__end = 20 # upper limit for x F (x) integration\n\n # build a 2d array for interpolation\n logx = np.linspace(self.__start, self.__end+1, steps)\n\n for i, s in enumerate(logx):\n if not i:\n logx_arr = np.linspace(s, self.__end, steps)\n else:\n logx_arr = np.vstack((logx_arr, np.linspace(s, self.__end, steps)))\n\n xF = np.exp(logx) * simps(kv(5./3., np.exp(logx_arr)) * np.exp(logx_arr), logx_arr, axis=1)\n xF[xF < 1e-40] = np.full(np.sum(xF < 1e-40), 1e-40)\n self.log_xF = interp1d(logx, np.log(xF))\n\n self.FSyncInterp = None\n\n return\n\n @property\n def params_n_el(self):\n return self._params_n_el\n\n @property\n def params_n_seed(self):\n return self._params_n_seed\n\n @property\n def n_el(self):\n return self._n_el\n\n @property\n def B(self):\n return self._B\n\n @property\n def d(self):\n return self._d\n\n @n_el.setter\n def n_el(self, n_el):\n self._n_el = n_el\n\n @B.setter\n def B(self, B):\n self._B = B\n\n @d.setter\n def d(self, d):\n self._d = d\n\n def sync(self, nu, g_steps=50, gmin=None, gmax=None):\n \"\"\"\n Spectral synchrotron luminosity F_nu in erg/s/Hz/cm^2 as integral over electron distribution\n\n Parameters:\n -----------\n nu: array-like\n frequencies in Hz\n\n {options}\n\n g_steps: int\n number of integration steps\n\n gmin: float or None\n minimum lorentz factor\n\n gmax: float or None\n maximum lorentz factor\n\n Returns:\n --------\n array with spectral luminosity F_nu density at frequency nu\n \"\"\"\n if gmin is None:\n gmin = self._params_n_el['gradio_min']\n if gmax is None:\n gmax = self._params_n_el['gwind_max']\n\n # 2d grid for Freq and gamma factors\n nn, gg = meshgrid(nu, linspace(log(gmin), log(gmax), g_steps), indexing='ij')\n\n # x = nu / nu_c as 2d grid,\n # nu_c: critical frequency for B in G; Longair vol.2 p. 261\n nu_c = 4.199e10 * self._B * u.G.to('T') * exp(gg)**2.\n x = nn / nu_c\n\n # define a mask for integration\n m = (log(x) > self.__start) & (log(x) < self.__end)\n result = np.full(x.shape, 1e-40)\n\n # synchrotron function\n result[m] = exp(self.log_xF(log(x[m])))\n\n # multiply with electron spectrum\n result *= self._n_el(exp(gg), **self._params_n_el)\n\n # integrate over gamma\n result = simps(result * exp(gg), gg, axis=1)\n\n # pre factors: sqrt(3) * e^3 / mc^2 with B in G, see e.g. B&G 4.44\n # this has then units Fr^3 s^2 B g-1 cm-2\n # When you use Fr G s^2 / (cm g) = 1 you get\n # units Fr^2 / cm and with Fr = cm^3/2 g^1/2 s^-1\n # this becomes g cm^2 s^2 = erg = erg / Hz / s.\n # The pre factor is then consistent with 18.36 in Longair Vol.2\n # since he calculates in W and for B in Tesla\n result *= ((c.e.esu**3.) / (c.m_e.cgs * c.c.cgs**2.) * sqrt(3.)).value\n # this is equal to 2.344355730864404e-22\n\n # average over all pitch angles gives 2/3\n result *= self._B * sqrt(2.0/3.0)\n\n # divide by the distance squared\n # change from intrinsic luminosity to flux\n result /= 4. * pi * self._d * self._d * kpc2cm * kpc2cm\n\n # returns value in unites erg/s/Hz/cm^2 \n return result\n\n def interp_sync_init(self, g_steps=100):\n \"\"\"\n Initialize interpolation of Spectral synchrotron luminosity F_nu in erg/s/Hz/cm^2 for given electron spectrum,\n in log - log space.\n Sets self.FSyncInterp function pointer.\n\n Parameters\n ----------\n g_steps: int,\n number of integration steps\n \"\"\"\n nu = np.logspace(np.log10(self._nu_sync_min), np.log10(self._nu_sync_max), 200)\n F_sync = self.sync(nu, g_steps=g_steps)\n self.FSyncInterp = interp1d(log(nu), log(F_sync))\n\n def grey_body_old(self, nu):\n \"\"\"\n Return grey body nu F_nu spectrum in erg/s/cm^2\n\n Parameters\n ----------\n nu: array like\n array with frequencies in Hz\n\n Returns\n -------\n array with grey body flux in erg/s/cm^2\n\n Note\n ----\n TODO: I don't think that this is correct.\n TODO: From the photon density you should simply\n TODO: multiply with (h nu) * c / 4 pi to get the specific intensity\n \"\"\"\n\n # photons dens of black body in photons/eV/cm^3\n result = black_body(nu / eV2Hz, self._params_n_seed['dust_T'])\n result *= self._params_n_seed['dust_norm']\n\n # this is in units of photons/cm^3/eV \n # assume an emitting volume, using the scale length\n # suggested by Hillas: 1.3 arcmin \n # now this is in units of photons / eV\n result *= 4.0 / 3.0 * pi * power(tan(self._params_n_seed['dust_extension'] * arcmin2rad)\n * self._d * kpc2cm, 3.)\n\n # calculate erg per s per cm**2 \n result *= (nu * nu / eV2Hz / eV2Hz) * eV2erg\n result /= 4.0 * pi * (self._params['d'] * kpc2cm * self._d * kpc2cm)\n return result\n\n def grey_body(self, nu):\n \"\"\"\n Return grey body nu F_nu spectrum in erg/s/cm^2\n\n Parameters\n ----------\n nu: array like\n array with frequencies in Hz\n\n Returns\n -------\n array with grey body flux in erg/s/cm^2/Hz\n \"\"\"\n\n # photons dens of black body in photons/eV/cm^3\n result = black_body(nu / eV2Hz, self._params_n_seed['dust_T'])\n result *= self._params_n_seed['dust_norm']\n\n # change to dens in photon / Hz / cm^3, dn / d nu = dn / de * de / d nu = dn / de * h\n result *= c.h.to('eV s').value\n\n # multiply with energy to get energy density per Hz\n result *= nu * c.h.to('erg s').value\n\n # multiply with c / 4 pi to get energy flux in erg / s / cm^2 / Hz\n result *= c.c.cgs.value / 4. / pi\n\n # rescale this from sphere of emitting region\n # suggested by Hillas: 1.3 arcmin to distance of the Crab\n # 4 pi tan(theta) d ** 2 / 4 pi d**2 = tan(theta)\n result *= tan(self._params_n_seed['dust_extension'] * arcmin2rad)\n return result\n\n def sync_phot_dens(self, eps, gamma):\n \"\"\"\n Calculate synchrotron photon number density of Crab nebula according to Hillas et al. (1998)\n\n Parameters\n ----------\n eps: array-like\n n-dim array with energy of photons, in eV\n gamma: array\n m-dim array with gamma factor of electrons\n\n Returns\n -------\n m x n-dim array with photon densities in photons / eV / cm^3\n\n Notes\n -----\n See https://arxiv.org/pdf/1008.4524.pdf Eq. (A3)\n \"\"\"\n\n # eps is in units of eV \n # get synchrotron luminosity in units of erg/s/cm^2/Hz, F_nu\n S = np.full(eps.shape[0], 1e-40)\n\n # include synchrotron photon density\n if self._params_n_seed['ic_sync']:\n\n # initialize synchrotron interpolation\n if self.FSyncInterp is None:\n self.interp_sync_init()\n\n # mask for frequencies\n m = (log(eps * eV2Hz) > log(self._nu_sync_min)) & \\\n (log(eps * eV2Hz) < log(self._nu_sync_max))\n\n # calculate synchrotron intergral from interpolation\n S[m] = exp(self.FSyncInterp(log(eps * eV2Hz)[m]))\n\n # conversion:\n # Now in units of erg/s/cm^2\n # nu F_nu\n S *= eps * eV2Hz\n\n # convert in units of photons/cm^2/s\n #S /= (eps * u.eV.to('J') / c.h.value) * u.eV.to('erg')\n S /= (eps * eV2erg)\n\n # total production rate of photons in units of 1/s */\n S *= (4.0 * pi * (self._d * kpc2cm)**2.)\n\n # calculate the scale length of the electrons \"seeing\" the photons according to Hillas et al. (1998)\n rho = zeros(gamma.shape)\n m = gamma * m_e_eV / 1e9 < 34.\n rho[m] = tan(1.35 * arcmin2rad) * self._d * kpc2cm\n\n extension = 0.15 + 1.2*power(gamma[~m] * m_e_eV / 34. / 1e9, -0.17)\n rho[~m] = tan(extension * arcmin2rad) * self._d * kpc2cm\n\n # calculate scale length of photon density in the nebular\n sigma = zeros(eps.shape)\n m = eps < 0.02\n sigma[m] = tan(1.35 * arcmin2rad) * self._d * kpc2cm\n extension = 0.16 + 1.19 * power(eps[~m]/0.02, -0.09)\n sigma[~m] = tan(extension * arcmin2rad) * self._d * kpc2cm\n\n # Add Dust Component and line emission\n if self._params_n_seed['ic_dust']:\n S_dust = self.grey_body(eps * eV2Hz)\n S_dust *= eps * eV2Hz\n S_dust /= (eps * eV2erg)\n S_dust *= (4.0 * pi * (self._d * kpc2cm)**2.)\n # calculate scale length of photon density in the nebular\n sigma_dust = tan(self._params_n_seed['dust_extension'] * arcmin2rad) * self._d * kpc2cm\n\n # TODO: check if this combination is the right way to do it\n # TODO: or if the overlap has to be calculated differently\n # calculate photon density in photons/cm**3/eV\n if len(sigma.shape) == 1 and not sigma.shape[0] == rho.shape[0]:\n ss, rr = meshgrid(sigma, rho)\n S, _ = meshgrid(S, rho)\n ee, _ = meshgrid(eps, gamma)\n S /= (4.0 * pi * c.c.cgs.value * (ss * ss + rr * rr))\n\n if self._params_n_seed['ic_dust']:\n sd, _ = meshgrid(sigma_dust, rho)\n S_dust, _ = meshgrid(S_dust, rho)\n S_dust /= (4.0 * pi * c.c.cgs.value * (sd * sd + rr * rr))\n S += S_dust\n\n S /= ee\n else:\n S /= (4.0 * pi * c.c.cgs.value * (sigma * sigma + rho * rho))\n if self._params_n_seed['ic_dust']:\n S_dust /= (4.0 * pi * c.c.cgs.value * (sigma_dust * sigma_dust + rho * rho))\n S += S_dust\n S /= eps\n\n return S\n\n def ic(self, nu, g_steps=200, e_steps=90):\n \"\"\"\n Spectral luminosity F_nu in erg/s/Hz/cm^2 for inverse Compton scattering.\n\n Parameters:\n -----------\n nu: array-like\n n-dim array with frequencies in Hz\n\n {options}\n\n g_steps: int\n number of integration steps for gamma\n e_steps: int\n number of integration steps for energy\n\n Returns:\n --------\n n-dim numpy array spectral luminosity F_nu density at frequency nu\n \"\"\"\n\n log_g = linspace(log(self._params_n_el['gmin']), log(self._params_n_el['gmax']), g_steps)\n gamma = exp(log_g)\n\n result = zeros(nu.shape[0])\n\n # generate the arrays for observed freq nu, gamma factor, in energy of photon field\n nn, gg = meshgrid(nu, log_g, indexing='ij')\n nnn, ggg, eee = meshgrid(nu, log_g, linspace(0., 1., e_steps), indexing='ij')\n x1 = log(nnn / eV2Hz / 4. / ggg ** 2.)\n x1[x1 < 1e-18] = 1e-18\n x2 = log(nnn / eV2Hz)\n\n log_eee = zeros(nnn.shape)\n m = zeros(nnn.shape, dtype=np.bool)\n for i, n in enumerate(nu):\n for j, lg in enumerate(log_g):\n x1 = max(log(n / eV2Hz / 4. / gamma[j] ** 2.), log(1e-18))\n x2 = log(n / eV2Hz)\n # now log_eps has shape g_steps x e_steps\n log_eee[i, j] = linspace(x1, x2, e_steps)\n if x2 > x1:\n m[i, j] = True\n\n # calculate photon densities:\n # these are in photons / eV / cm^3\n phot_dens = np.zeros(eee.shape)\n\n if self._params_n_seed['ic_sync'] or self._params_n_seed['ic_dust']:\n phot_dens[m] = self.sync_phot_dens(exp(log_eee[m]), exp(ggg[m]))\n\n if self._params_n_seed['ic_cmb']:\n phot_dens[m] += black_body(exp(log_eee[m]), cosmo.Tcmb0.value)\n\n # IC scattering kernel\n f = ic_kernel(nnn, exp(ggg), exp(log_eee))\n\n # multiply the two in integrate over initial photon energy\n kernel_in = phot_dens * f\n\n # kernel needs to be divided by exp(log_eee) but\n # cancels since we're integrating over log(energy).\n # now in photons / cm^3 / eV\n kernel_out = simps(kernel_in, log_eee, axis=2)\n kernel_out *= self._n_el(exp(gg), **self._params_n_el) / exp(gg) ** 2.\n\n # integrate over electron gamma factor\n result = simps(kernel_out * exp(gg), gg, axis=1)\n\n # result of integration is in units of photons/cm**3/eV\n # multiplying with Thomson*c*energy gives and convert to\n # units of erg/sec/eV\n result *= 3. / 4. * (c.sigma_T.cgs * c.c.cgs).value * nu / eV2Hz * eV2erg\n # convert to erg / sec / Hz\n # this is the spectral luminosity L_nu\n result /= eV2Hz\n # divide by the distance squared to get the flux\n result /= 4. * pi * (self._d * kpc2cm)**2.\n return result\n"
] |
[
[
"numpy.log",
"numpy.sqrt",
"numpy.meshgrid",
"numpy.linspace",
"numpy.power",
"numpy.tan",
"scipy.integrate.simps",
"numpy.exp",
"numpy.zeros"
]
] |
cvlab-stonybrook/BodyHands
|
[
"dcfe470f6fd31a048d4d17d4ae9a2a524538b380"
] |
[
"bodyhands/utils/extend_utils_boxes.py"
] |
[
"import torch\nfrom detectron2.structures import Boxes\n\ndef pairwise_intersection(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:\n\n boxes1, boxes2 = boxes1.tensor, boxes2.tensor\n width_height = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) - torch.max(\n boxes1[:, None, :2], boxes2[:, :2]\n ) # [N,M,2]\n\n width_height.clamp_(min=0) # [N,M,2]\n intersection = width_height.prod(dim=2) # [N,M]\n return intersection\n\ndef pairwise_ioa(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:\n\n area2 = boxes2.area() # [M]\n inter = pairwise_intersection(boxes1, boxes2)\n\n # handle empty boxes\n ioa = torch.where(\n inter > 0, inter / area2, torch.zeros(1, dtype=inter.dtype, device=inter.device)\n )\n return ioa"
] |
[
[
"torch.min",
"torch.max",
"torch.zeros"
]
] |
maxscheurer/pyscf
|
[
"162c37942289c0aec70e70ba1ea98ade3ec34da5"
] |
[
"pyscf/lo/orth.py"
] |
[
"#!/usr/bin/env python\n# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Author: Qiming Sun <[email protected]>\n#\n\nfrom functools import reduce\nimport numpy\nimport scipy.linalg\nfrom pyscf.lib import param\nfrom pyscf.lib import logger\nfrom pyscf import gto\nfrom pyscf import __config__\n\nREF_BASIS = getattr(__config__, 'lo_orth_pre_orth_ao_method', 'ANO')\nORTH_METHOD = getattr(__config__, 'lo_orth_orth_ao_method', 'meta_lowdin')\nPROJECT_ECP_BASIS = getattr(__config__, 'lo_orth_project_ecp_basis', True)\n\n\ndef lowdin(s):\n ''' new basis is |mu> c^{lowdin}_{mu i} '''\n e, v = scipy.linalg.eigh(s)\n idx = e > 1e-15\n return numpy.dot(v[:,idx]/numpy.sqrt(e[idx]), v[:,idx].conj().T)\n\ndef schmidt(s):\n c = numpy.linalg.cholesky(s)\n return scipy.linalg.solve_triangular(c, numpy.eye(c.shape[1]), lower=True,\n overwrite_b=False).conj().T\n\ndef vec_lowdin(c, s=1):\n ''' lowdin orth for the metric c.T*s*c and get x, then c*x'''\n #u, w, vh = numpy.linalg.svd(c)\n #return numpy.dot(u, vh)\n # svd is slower than eigh\n return numpy.dot(c, lowdin(reduce(numpy.dot, (c.conj().T,s,c))))\n\ndef vec_schmidt(c, s=1):\n ''' schmidt orth for the metric c.T*s*c and get x, then c*x'''\n if isinstance(s, numpy.ndarray):\n return numpy.dot(c, schmidt(reduce(numpy.dot, (c.conj().T,s,c))))\n else:\n return numpy.linalg.qr(c)[0]\n\ndef weight_orth(s, weight):\n ''' new basis is |mu> c_{mu i}, c = w[(wsw)^{-1/2}]'''\n s1 = weight[:,None] * s * weight\n c = lowdin(s1)\n return weight[:,None] * c\n\n\ndef pre_orth_ao(mol, method=REF_BASIS):\n '''Restore AO characters. Possible methods include the ANO/MINAO\n projection or fraction-averaged atomic RHF calculation'''\n if isinstance(method, str) and method.upper() in ('ANO', 'MINAO'):\n # Use ANO/MINAO basis to define the strongly occupied set\n return project_to_atomic_orbitals(mol, method)\n else:\n return pre_orth_ao_atm_scf(mol)\nrestore_ao_character = pre_orth_ao\n\ndef project_to_atomic_orbitals(mol, basname):\n '''projected AO = |bas><bas|ANO>\n '''\n from pyscf.scf.addons import project_mo_nr2nr\n from pyscf.scf import atom_hf\n from pyscf.gto.ecp import core_configuration\n\n def search_atm_l(atm, l):\n bas_ang = atm._bas[:,gto.ANG_OF]\n ao_loc = atm.ao_loc_nr()\n idx = []\n for ib in numpy.where(bas_ang == l)[0]:\n idx.extend(range(ao_loc[ib], ao_loc[ib+1]))\n return idx\n\n # Overlap of ANO and ECP basis\n def ecp_ano_det_ovlp(atm_ecp, atm_ano, ecpcore):\n ecp_ao_loc = atm_ecp.ao_loc_nr()\n ano_ao_loc = atm_ano.ao_loc_nr()\n ecp_ao_dim = ecp_ao_loc[1:] - ecp_ao_loc[:-1]\n ano_ao_dim = ano_ao_loc[1:] - ano_ao_loc[:-1]\n ecp_bas_l = [[atm_ecp.bas_angular(i)]*d for i,d in enumerate(ecp_ao_dim)]\n ano_bas_l = [[atm_ano.bas_angular(i)]*d for i,d in enumerate(ano_ao_dim)]\n ecp_bas_l = numpy.hstack(ecp_bas_l)\n ano_bas_l = numpy.hstack(ano_bas_l)\n\n nelec_core = 0\n ecp_occ_tmp = []\n ecp_idx = []\n ano_idx = []\n for l in range(4):\n nocc, frac = atom_hf.frac_occ(stdsymb, l)\n l_occ = [2] * ((nocc-ecpcore[l])*(2*l+1))\n if frac > 1e-15:\n l_occ.extend([frac] * (2*l+1))\n nocc += 1\n if nocc == 0:\n break\n nelec_core += 2 * ecpcore[l] * (2*l+1)\n i0 = ecpcore[l] * (2*l+1)\n i1 = nocc * (2*l+1)\n ecp_idx.append(numpy.where(ecp_bas_l==l)[0][:i1-i0])\n ano_idx.append(numpy.where(ano_bas_l==l)[0][i0:i1])\n ecp_occ_tmp.append(l_occ[:i1-i0])\n ecp_idx = numpy.hstack(ecp_idx)\n ano_idx = numpy.hstack(ano_idx)\n ecp_occ = numpy.zeros(atm_ecp.nao_nr())\n ecp_occ[ecp_idx] = numpy.hstack(ecp_occ_tmp)\n nelec_valence_left = int(gto.charge(stdsymb) - nelec_core\n - sum(ecp_occ[ecp_idx]))\n if nelec_valence_left > 0:\n logger.warn(mol, 'Characters of %d valence electrons are not identified.\\n'\n 'It can affect the \"meta-lowdin\" localization method '\n 'and the population analysis of SCF method.\\n'\n 'Adjustment to the core/valence partition may be needed '\n '(see function lo.nao.set_atom_conf)\\nto get reasonable '\n 'local orbitals or Mulliken population.\\n',\n nelec_valence_left)\n # Return 0 to force the projection to ANO basis\n return 0\n else:\n s12 = gto.intor_cross('int1e_ovlp', atm_ecp, atm_ano)[ecp_idx][:,ano_idx]\n return numpy.linalg.det(s12)\n\n nelec_ecp_dic = {}\n for ia in range(mol.natm):\n symb = mol.atom_symbol(ia)\n if symb not in nelec_ecp_dic:\n nelec_ecp_dic[symb] = mol.atom_nelec_core(ia)\n\n aos = {}\n atm = gto.Mole()\n atmp = gto.Mole()\n for symb in mol._basis.keys():\n stdsymb = gto.mole._std_symbol(symb)\n atm._atm, atm._bas, atm._env = \\\n atm.make_env([[stdsymb,(0,0,0)]], {stdsymb:mol._basis[symb]}, [])\n atm.cart = mol.cart\n atm._built = True\n s0 = atm.intor_symmetric('int1e_ovlp')\n\n if gto.is_ghost_atom(symb):\n aos[symb] = numpy.diag(1./numpy.sqrt(s0.diagonal()))\n continue\n\n basis_add = gto.basis.load(basname, stdsymb)\n atmp._atm, atmp._bas, atmp._env = \\\n atmp.make_env([[stdsymb,(0,0,0)]], {stdsymb:basis_add}, [])\n atmp.cart = mol.cart\n atmp._built = True\n\n if symb in nelec_ecp_dic and nelec_ecp_dic[symb] > 0:\n # If ECP basis has good atomic character, ECP basis can be used in the\n # localization/population analysis directly. Otherwise project ECP\n # basis to ANO basis.\n if not PROJECT_ECP_BASIS:\n continue\n\n ecpcore = core_configuration(nelec_ecp_dic[symb])\n # Comparing to ANO valence basis, to check whether the ECP basis set has\n # reasonable AO-character contraction. The ANO valence AO should have\n # significant overlap to ECP basis if the ECP basis has AO-character.\n if abs(ecp_ano_det_ovlp(atm, atmp, ecpcore)) > .1:\n aos[symb] = numpy.diag(1./numpy.sqrt(s0.diagonal()))\n continue\n else:\n ecpcore = [0] * 4\n\n # MINAO for heavier elements needs to be used with pseudo potential\n if (basname.upper() == 'MINAO' and\n gto.charge(stdsymb) > 36 and symb not in nelec_ecp_dic):\n raise RuntimeError('Basis MINAO has to be used with ecp for heavy elements')\n\n ano = project_mo_nr2nr(atmp, numpy.eye(atmp.nao_nr()), atm)\n rm_ano = numpy.eye(ano.shape[0]) - reduce(numpy.dot, (ano, ano.T, s0))\n c = rm_ano.copy()\n for l in range(param.L_MAX):\n idx = numpy.asarray(search_atm_l(atm, l))\n nbf_atm_l = len(idx)\n if nbf_atm_l == 0:\n break\n\n idxp = numpy.asarray(search_atm_l(atmp, l))\n if l < 4:\n idxp = idxp[ecpcore[l]:]\n nbf_ano_l = len(idxp)\n\n if mol.cart:\n degen = (l + 1) * (l + 2) // 2\n else:\n degen = l * 2 + 1\n\n if nbf_atm_l > nbf_ano_l > 0:\n # For angular l, first place the projected ANO, then the rest AOs.\n sdiag = reduce(numpy.dot, (rm_ano[:,idx].T, s0, rm_ano[:,idx])).diagonal()\n nleft = (nbf_atm_l - nbf_ano_l) // degen\n shell_average = numpy.einsum('ij->i', sdiag.reshape(-1,degen))\n shell_rest = numpy.argsort(-shell_average)[:nleft]\n idx_rest = []\n for k in shell_rest:\n idx_rest.extend(idx[k*degen:(k+1)*degen])\n c[:,idx[:nbf_ano_l]] = ano[:,idxp]\n c[:,idx[nbf_ano_l:]] = rm_ano[:,idx_rest]\n elif nbf_ano_l >= nbf_atm_l > 0: # More ANOs than the mol basis functions\n c[:,idx] = ano[:,idxp[:nbf_atm_l]]\n sdiag = numpy.einsum('pi,pq,qi->i', c, s0, c)\n c *= 1./numpy.sqrt(sdiag)\n aos[symb] = c\n\n nao = mol.nao_nr()\n c = numpy.zeros((nao,nao))\n p1 = 0\n for ia in range(mol.natm):\n symb = mol.atom_symbol(ia)\n if symb in mol._basis:\n ano = aos[symb]\n else:\n ano = aos[mol.atom_pure_symbol(ia)]\n p0, p1 = p1, p1 + ano.shape[1]\n c[p0:p1,p0:p1] = ano\n return c\npre_orth_project_ano = project_to_atomic_orbitals\n\ndef pre_orth_ao_atm_scf(mol):\n assert(not mol.cart)\n from pyscf.scf import atom_hf\n atm_scf = atom_hf.get_atm_nrhf(mol)\n aoslice = mol.aoslice_by_atom()\n coeff = []\n for ia in range(mol.natm):\n symb = mol.atom_symbol(ia)\n if symb not in atm_scf:\n symb = mol.atom_pure_symbol(ia)\n\n if symb in atm_scf:\n e_hf, e, c, occ = atm_scf[symb]\n else: # symb's basis is not specified in the input\n nao_atm = aoslice[ia,3] - aoslice[ia,2]\n c = numpy.zeros((nao_atm, nao_atm))\n coeff.append(c)\n return scipy.linalg.block_diag(*coeff)\n\n\ndef orth_ao(mf_or_mol, method=ORTH_METHOD, pre_orth_ao=None, scf_method=None,\n s=None):\n '''Orthogonalize AOs\n\n Kwargs:\n method : str\n One of\n | lowdin : Symmetric orthogonalization\n | meta-lowdin : Lowdin orth within core, valence, virtual space separately (JCTC, 10, 3784)\n | NAO\n '''\n from pyscf.lo import nao\n mf = scf_method\n if isinstance(mf_or_mol, gto.Mole):\n mol = mf_or_mol\n else:\n mol = mf_or_mol.mol\n if mf is None:\n mf = mf_or_mol\n\n if s is None:\n if getattr(mol, 'pbc_intor', None): # whether mol object is a cell\n s = mol.pbc_intor('int1e_ovlp', hermi=1)\n else:\n s = mol.intor_symmetric('int1e_ovlp')\n\n if pre_orth_ao is None:\n pre_orth_ao = project_to_atomic_orbitals(mol, REF_BASIS)\n\n if method.lower() == 'lowdin':\n s1 = reduce(numpy.dot, (pre_orth_ao.conj().T, s, pre_orth_ao))\n c_orth = numpy.dot(pre_orth_ao, lowdin(s1))\n elif method.lower() == 'nao':\n assert(mf is not None)\n c_orth = nao.nao(mol, mf, s)\n else:\n # meta_lowdin: partition AOs into core, valence and Rydberg sets,\n # orthogonalizing within each set\n weight = numpy.ones(pre_orth_ao.shape[0])\n c_orth = nao._nao_sub(mol, weight, pre_orth_ao, s)\n # adjust phase\n for i in range(c_orth.shape[1]):\n if c_orth[i,i] < 0:\n c_orth[:,i] *= -1\n return c_orth\n\ndel(ORTH_METHOD)\n\n\nif __name__ == '__main__':\n from pyscf import scf\n from pyscf.lo import nao\n mol = gto.Mole()\n mol.verbose = 1\n mol.output = 'out_orth'\n mol.atom.extend([\n ['O' , (0. , 0. , 0.)],\n [1 , (0. , -0.757 , 0.587)],\n [1 , (0. , 0.757 , 0.587)] ])\n mol.basis = {'H': '6-31g',\n 'O': '6-31g',}\n mol.build()\n\n mf = scf.RHF(mol)\n mf.scf()\n\n c0 = nao.prenao(mol, mf.make_rdm1())\n c = orth_ao(mol, 'meta_lowdin', c0)\n\n s = mol.intor_symmetric('int1e_ovlp_sph')\n p = reduce(numpy.dot, (s, mf.make_rdm1(), s))\n print(reduce(numpy.dot, (c.T, p, c)).diagonal())\n"
] |
[
[
"numpy.hstack",
"numpy.sqrt",
"numpy.einsum",
"numpy.eye",
"numpy.ones",
"numpy.linalg.det",
"numpy.linalg.cholesky",
"numpy.linalg.qr",
"numpy.argsort",
"numpy.zeros",
"numpy.where"
]
] |
diegoolano/biomedical_interpretable_entity_representations
|
[
"3c35f02ee8dd7ee0f2a23b0014e4b112beab6461"
] |
[
"ier_model/run_et.py"
] |
[
"#!/usr/bin/env python3\nimport argparse\nimport gc\nimport json\nimport numpy as np\nimport pickle\nimport random\nimport time\nimport torch\nimport torch.nn as nn\nfrom tqdm import tqdm\nfrom transformers import AdamW, get_linear_schedule_with_warmup\n\nimport transformer_constant\nimport transformer_data_utils\nfrom transformer_data_utils import to_torch\nfrom models import TransformerModel\n\n\n\"\"\"\nArgs\n\"\"\"\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-model_id\", help=\"Identifier for model\")\nparser.add_argument('-device', type=int, default=0, help='CUDA device')\nparser.add_argument(\"-n_gpu\", help=\"Number of GPUs.\", type=int, default=1)\nparser.add_argument(\"-mode\", help=\"Whether to train or test\", default=\"train\", choices=[\"train\", \"val\", \"test\"])\nparser.add_argument(\"-local_rank\", type=int, default=-1, help=\"For distributed training: local_rank\")\n\n# Data\nparser.add_argument(\"-train_data\", help=\"Train data\",\n default=\"train/wiki_et_zeroshot_60k_ex_random/train_*.json\")\nparser.add_argument(\"-dev_data\", help=\"Dev data\",\n default=\"validation/dev_wiki_et_zeroshot_60k_ex_random_999.json\")\nparser.add_argument(\"-eval_data\", help=\"Test data\", default=\"\")\nparser.add_argument(\"-goal\", help=\"category vocab size.\", default=\"60k\", choices=[\"medwiki\",\"60k\", \"ufet\"])\nparser.add_argument(\"-seed\", help=\"Pytorch random Seed\", default=113)\nparser.add_argument(\"-context_window_size\", help=\"Left and right context size.\", default=100)\n\n# learning\nparser.add_argument(\"-num_epoch\", help=\"The number of epoch\", default=5000, type=int)\nparser.add_argument(\"-per_gpu_train_batch_size\", help=\"The batch size per GPU\", default=8, type=int)\nparser.add_argument(\"-per_gpu_eval_batch_size\", help=\"The batch size per GPU\", default=8, type=int)\nparser.add_argument(\"-learning_rate_enc\", help=\"BERT: start learning rate\", default=2e-5, type=float)\nparser.add_argument(\"-learning_rate_cls\", help=\"BERT: start learning rate\", default=1e-3, type=float)\nparser.add_argument(\"-adam_epsilon_enc\", default=1e-8, type=float, help=\"Epsilon for Adam optimizer.\")\nparser.add_argument(\"-adam_epsilon_cls\", default=1e-8, type=float, help=\"Epsilon for Adam optimizer.\")\nparser.add_argument(\"-hidden_dropout_prob\", help=\"Dropout rate\", default=.1, type=float)\nparser.add_argument(\"-warmup_steps\", default=0, type=int, help=\"Linear warmup over warmup_steps.\")\nparser.add_argument(\n \"-gradient_accumulation_steps\",\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\",\n)\n\n# Model\nparser.add_argument(\n \"-model_type\",\n default=\"bert-base-uncased\",\n choices=[\n \"bert-base-uncased\",\n \"bert-large-uncased\",\n \"bert-large-uncased-whole-word-masking\",\n \"roberta-base\",\n \"roberta-large\",\n \"allenai/biomed_roberta_base\",\n \"monologg/biobert_v1.1_pubmed\",\n \"allenai/scibert_scivocab_uncased\",\n \"microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext\"\n ]\n)\nparser.add_argument(\"-threshold\", help=\"threshold\", default=0.5, type=float)\nparser.add_argument(\"-avg_pooling\", help=\"Averaging all hidden states instead of using [CLS].\", action='store_true')\n\n# Save / log related\nparser.add_argument(\"-save_period\", help=\"How often to save\", default=1000, type=int)\nparser.add_argument(\"-eval_period\", help=\"How often to run dev\", default=500, type=int)\nparser.add_argument(\"-log_period\", help=\"How often to save\", default=1000, type=int)\nparser.add_argument(\"-eval_after\", help=\"How often to run dev\", default=10, type=int)\nparser.add_argument(\"-load\", help=\"Load existing model.\", action='store_true')\nparser.add_argument(\"-reload_model_name\", help=\"\")\nparser.add_argument(\"-reload_model_name_desc\", help=\"\")\n\n# Extra param So we can run different data for same goal\nparser.add_argument(\"-env\", help=\"data sub for medwiki\", default=\"\", choices=[\"yasu\", \"0720_3k_full\",\"0720_3k_full_orig\", \"0720_3k_drugs\",\"0720_600k_full\",\"0720_600k_full_orig\",\"0720_600k_drugs\"])\n\n\nparser.add_argument(\"-examples_limit\", help=\"How many examples to do eval on in def _val\", default=1000, type=int)\n\n\"\"\"\nUtils\n\"\"\"\n\nSIGMOID = nn.Sigmoid()\n\n\ndef set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n\ndef get_data_gen(dataname, mode, args, tokenizer):\n data_path = transformer_constant.get(args.env, 'FILE_ROOT') + dataname\n print(\"load data path\", data_path, \"with args.env\", args.env)\n dataset = transformer_data_utils.DatasetLoader(data_path, args, tokenizer)\n if mode == 'train':\n data_gen = dataset.get_batch(args.train_batch_size, args.max_position_embeddings, args.num_epoch, eval_data=False)\n else: # test mode\n data_gen = dataset.get_batch(args.eval_batch_size, args.max_position_embeddings, 1, eval_data=True)\n return data_gen\n\n\ndef get_all_datasets(args, tokenizer):\n train_gen_list = []\n if args.mode in ['train']:\n if 'wiki_desc' in args.model_id:\n print(\"load wiki_desc\",)\n train_gen_list.append(get_data_gen(transformer_constant.get(args.env,'WIKI_TRAIN_DATA'), 'train', args, tokenizer))\n else:\n train_gen_list.append(get_data_gen(transformer_constant.get(args.env,'TRAIN_DATA'), 'train', args, tokenizer))\n #train_gen_list.append(get_data_gen(args.train_data, 'train', args, tokenizer))\n return train_gen_list\n\n\ndef get_datasets(data_lists, args, tokenizer):\n data_gen_list = []\n for dataname, mode in data_lists:\n data_gen_list.append(get_data_gen(dataname, mode, args, tokenizer))\n return data_gen_list\n\n\ndef evaluate_data(batch_num, dev_fname, model, args, device):\n print(\"in evaluate data, batchnum\", batch_num, dev_fname)\n #print(args)\n model.eval()\n dev_gen = get_data_gen(dev_fname, 'test', args, model.transformer_tokenizer)\n gold_pred = []\n eval_loss = 0.\n total_ex_count = 0\n\n #IMPORTANT since this takes so long sub sample for now 500\n\n for batch in tqdm(dev_gen):\n if total_ex_count > 500:\n break \n total_ex_count += len(batch['targets'])\n try:\n inputs, targets = to_torch(batch, device)\n loss, output_logits = model(inputs, targets)\n except Exception as e:\n print(\"in Eval to torch error so continue: \",e )\n continue\n\n output_index = get_output_index(output_logits, threshold=args.threshold)\n gold_pred += get_gold_pred_str(output_index, batch['targets'].data.cpu().clone(), args.goal, args.env)\n\n eval_loss += loss.clone().item()\n\n print(\"Gold Pred\", len(gold_pred),gold_pred[0:4])\n eval_str = get_eval_string(gold_pred)\n _, _, _, _, _, macro_f1 = macro(gold_pred)\n eval_loss_str = 'Eval loss: {0:.7f} at step {1:d}'.format(eval_loss, batch_num)\n print('==> EVAL: seen ' + repr(total_ex_count) + ' examples.')\n print(eval_loss_str)\n print(gold_pred[:3])\n print('==> ' + eval_str)\n model.train()\n dev_gen = None\n return eval_loss, macro_f1\n\n\ndef f1(p, r):\n if r == 0.:\n return 0.\n return 2 * p * r / float(p + r)\n\n\ndef macro(true_and_prediction):\n num_examples = len(true_and_prediction)\n p = 0.\n r = 0.\n pred_example_count = 0.\n pred_label_count = 0.\n gold_label_count = 0.\n for true_labels, predicted_labels in true_and_prediction:\n if predicted_labels:\n pred_example_count += 1\n pred_label_count += len(predicted_labels)\n per_p = len(set(predicted_labels).intersection(set(true_labels))) / float(len(predicted_labels))\n p += per_p\n if len(true_labels):\n gold_label_count += 1\n per_r = len(set(predicted_labels).intersection(set(true_labels))) / float(len(true_labels))\n r += per_r\n if pred_example_count > 0:\n precision = p / pred_example_count\n if gold_label_count > 0:\n recall = r / gold_label_count\n\n if pred_example_count == 0:\n print(\"In Macro: Pred Example Count == 0\")\n avg_elem_per_pred = 0\n else:\n avg_elem_per_pred = pred_label_count / pred_example_count\n\n return num_examples, pred_example_count, avg_elem_per_pred, precision, recall, f1(precision, recall)\n\n\ndef micro(true_and_prediction):\n num_examples = len(true_and_prediction)\n num_predicted_labels = 0.\n num_true_labels = 0.\n num_correct_labels = 0.\n pred_example_count = 0.\n for true_labels, predicted_labels in true_and_prediction:\n if predicted_labels:\n pred_example_count += 1\n num_predicted_labels += len(predicted_labels)\n num_true_labels += len(true_labels)\n num_correct_labels += len(set(predicted_labels).intersection(set(true_labels)))\n if pred_example_count == 0:\n return num_examples, 0, 0, 0, 0, 0\n precision = num_correct_labels / num_predicted_labels\n recall = num_correct_labels / num_true_labels\n avg_elem_per_pred = num_predicted_labels / pred_example_count\n return num_examples, pred_example_count, avg_elem_per_pred, precision, recall, f1(precision, recall)\n\n\ndef load_model(reload_model_name, save_dir, model_id, model,\n optimizer_enc=None, optimizer_cls=None, scheduler_enc=None, scheduler_cls=None):\n if reload_model_name:\n model_file_name = '{0:s}/{1:s}.pt'.format(save_dir, reload_model_name)\n else:\n model_file_name = '{0:s}/{1:s}.pt'.format(save_dir, model_id)\n print(\"Loading \", model_file_name)\n checkpoint = torch.load(model_file_name)\n model.load_state_dict(checkpoint['state_dict'])\n if optimizer_enc and optimizer_cls: # Continue training\n #if optimizer_enc and optimizer_cls and scheduler_enc and scheduler_cls: # Continue training\n optimizer_enc.load_state_dict(checkpoint['optimizer_enc'])\n optimizer_cls.load_state_dict(checkpoint['optimizer_cls'])\n else: # Test\n total_params = 0\n # Log params\n for k in checkpoint['state_dict']:\n elem = checkpoint['state_dict'][k]\n param_s = 1\n for size_dim in elem.size():\n param_s = size_dim * param_s\n #print(k, elem.size())\n total_params += param_s\n param_str = ('Number of total parameters..{0:d}'.format(total_params))\n print(param_str)\n print('Loading model from ... {0:s}'.format(model_file_name))\n\n\ndef get_output_index(outputs, threshold=0.5):\n \"\"\"\n Given outputs from the decoder, generate prediction index.\n :param outputs:\n :return:\n \"\"\"\n pred_idx = []\n outputs = SIGMOID(outputs).data.cpu().clone()\n for single_dist in outputs:\n single_dist = single_dist.numpy()\n arg_max_ind = np.argmax(single_dist)\n pred_id = [arg_max_ind]\n pred_id.extend(\n [i for i in range(len(single_dist)) if single_dist[i] > threshold and i != arg_max_ind])\n pred_idx.append(pred_id)\n return pred_idx\n\n\ndef get_gold_pred_str(pred_idx, gold, goal, env):\n \"\"\"\n Given predicted ids and gold ids, generate a list of (gold, pred) pairs of length batch_size.\n \"\"\"\n if goal == '60k':\n id2word_dict = transformer_constant.ID2ANS_DICT_60K\n elif goal == 'ufet':\n id2word_dict = transformer_constant.ID2ANS_DICT_UFET\n elif goal == 'medwiki':\n id2word_dict = transformer_constant.ID2ANS_MEDWIKI_DICT[env]\n else:\n print('ERROR: Invalid input...' + goal)\n raise\n gold_strs = []\n for gold_i in gold:\n gold_strs.append([id2word_dict[i] for i in range(len(gold_i)) if gold_i[i] == 1])\n pred_strs = []\n for pred_idx1 in pred_idx:\n pred_strs.append([(id2word_dict[ind]) for ind in pred_idx1])\n else:\n return list(zip(gold_strs, pred_strs))\n\n\ndef get_eval_string(true_prediction):\n \"\"\"\n Given a list of (gold, prediction)s, generate output string.\n \"\"\"\n count, pred_count, avg_pred_count, p, r, f1 = micro(true_prediction)\n _, _, _, ma_p, ma_r, ma_f1 = macro(true_prediction)\n output_str = \"Eval: {0} {1} {2:.3f} P:{3:.3f} R:{4:.3f} F1:{5:.3f} Ma_P:{6:.3f} Ma_R:{7:.3f} Ma_F1:{8:.3f}\".format(\n count, pred_count, avg_pred_count, p, r, f1, ma_p, ma_r, ma_f1)\n accuracy = sum([set(y) == set(yp) for y, yp in true_prediction]) * 1.0 / len(true_prediction)\n output_str += '\\t Dev accuracy: {0:.1f}%'.format(accuracy * 100)\n return output_str\n\n\n\"\"\"\nTraining \n\"\"\"\n\ndef _train(args, model, device):\n\n args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n args.eval_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n print('==> Loading data generator... ')\n train_gen_list = get_all_datasets(args, model.transformer_tokenizer)\n\n print('done. {} data gen(s)'.format(len(train_gen_list)))\n print('Model Type: {}'.format(args.model_type))\n\n total_loss = 0.\n batch_num = 0\n best_macro_f1 = 0.\n start_time = time.time()\n init_time = time.time()\n print('Total {} named params.'.format(len([n for n, p in model.named_parameters()])))\n\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n classifier_param_name = [\"classifier.linear.weight\"]\n encoder_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters()\n if not any(nd in n for nd in no_decay) and n not in classifier_param_name],\n \"weight_decay\": 0.0 #args.weight_decay,\n },\n {\n \"params\": [p for n, p in model.named_parameters()\n if any(nd in n for nd in no_decay) and n not in classifier_param_name],\n \"weight_decay\": 0.0\n },\n ]\n classifier_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if n in classifier_param_name],\n \"weight_decay\": 0.0\n },\n ]\n print(\n 'Encoder {}, Classifier {}'.format(\n sum([len(p['params']) for p in encoder_parameters]),\n sum([len(p['params']) for p in classifier_parameters])\n )\n )\n optimizer_enc = AdamW(encoder_parameters, lr=args.learning_rate_enc, eps=args.adam_epsilon_enc)\n optimizer_cls = AdamW(classifier_parameters, lr=args.learning_rate_cls, eps=args.adam_epsilon_cls)\n\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n if args.load:\n load_model(args.reload_model_name, transformer_constant.get(args.env,'EXP_ROOT'), args.model_id, model, optimizer_enc, optimizer_cls)\n\n optimizer_enc.zero_grad()\n optimizer_cls.zero_grad()\n set_seed(args)\n\n global_train_sum, global_train_n = 0, 0\n global_overlap_train_sum, global_overlap_train_n = 0, 0\n while True:\n batch_num += 1 # single batch composed of all train signal passed by.\n for data_gen in train_gen_list:\n try:\n batch = next(data_gen)\n inputs, targets = to_torch(batch, device)\n except StopIteration:\n print('Done!')\n torch.save(\n {\n 'state_dict': model.state_dict(),\n 'optimizer_cls': optimizer_cls.state_dict(),\n 'optimizer_enc': optimizer_enc.state_dict(),\n 'args': args\n },\n '{0:s}/{1:s}.pt'.format(transformer_constant.get(args.env,'EXP_ROOT'), args.model_id)\n )\n return\n except Exception as e:\n print(\"To torch error so continue: \",e )\n print(\"Batch num\",batch_num)\n print(batch)\n print(inputs)\n print(targets)\n continue\n\n model.train()\n\n if args.model_type != \"distilbert\":\n inputs[\"token_type_ids\"] = (\n batch[\"token_type_ids\"] if args.model_type in [\"bert\", \"xlnet\", \"albert\"] else None\n ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids\n\n try:\n loss, output_logits = model(inputs, targets)\n\n except Exception as e:\n print(\"Error computing loss on batch_num: \", batch_num, e)\n #print(\"Inputs: \", inputs) <-- even printing this out gives an error\n #print(\"Targets: \", targets)\n # skip batch and try to figure out what happned\n continue\n\n inputs, targets = None, None\n if args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n loss.backward()\n total_loss += loss.item()\n\n if batch_num % args.gradient_accumulation_steps == 0:\n optimizer_enc.step()\n optimizer_cls.step()\n optimizer_enc.zero_grad()\n optimizer_cls.zero_grad()\n\n if batch_num % args.log_period == 0 and batch_num > 0:\n gc.collect()\n cur_loss = float(1.0 * loss.clone().item())\n elapsed = time.time() - start_time\n train_loss_str = ('|loss {0:3f} | at {1:d}step | @ {2:.2f} ms/batch'.format(cur_loss, batch_num,\n elapsed * 1000 / args.log_period))\n start_time = time.time()\n print(train_loss_str)\n\n if batch_num % args.eval_period == 0 and batch_num > 0:\n output_index = get_output_index(output_logits, threshold=args.threshold)\n batch_targets_clone = batch['targets'].data.cpu().clone()\n gold_pred_train = get_gold_pred_str(output_index, batch_targets_clone, args.goal, args.env)\n #print(\"OUTPUT INDEX:\",output_index[:4])\n #print(\"TARGETS:\", batch_targets_clone[:4].shape, type(batch_targets_clone), batch_targets_clone[:4])\n #print(torch.nonzero(batch_targets_clone[:4]))\n print(\"1st ten preds (true cats, pred cats)\", [(i,len(v[0]), len(v[1]), len(v[0])==len(v[1]), v[0], v[1]) for i,v in enumerate(gold_pred_train[:10])])\n accuracy = sum([set(y) == set(yp) for y, yp in gold_pred_train]) * 1.0 / len(gold_pred_train)\n print('==> Train accuracy: {0:.1f}%'.format(accuracy * 100))\n\n overlap_accuracy = sum([len(set(y).intersection(set(yp)))/len(yp) for y, yp in gold_pred_train]) * 1.0 / len(gold_pred_train)\n print('==> Train overlap accuracy: {0:.1f}%'.format(overlap_accuracy * 100))\n\n global_train_sum += sum([set(y) == set(yp) for y, yp in gold_pred_train]) * 1.0\n global_train_n += len(gold_pred_train)\n global_acc = global_train_sum / global_train_n\n print('==> Global Train accuracy: {0:.1f}%'.format(global_acc * 100))\n\n global_overlap_train_sum += sum([len(set(y).intersection(set(yp)))/len(yp) for y, yp in gold_pred_train]) * 1.0\n global_overlap_train_n += len(gold_pred_train)\n global_overlap_acc = global_overlap_train_sum / global_overlap_train_n\n print('==> Global Train overlap accuracy: {0:.1f}%'.format(global_overlap_acc * 100))\n\n if batch_num % args.eval_period == 0 and batch_num > args.eval_after:\n # Evaluate Loss on the Turk Dev dataset.\n print('---- eval at step {0:d} ---'.format(batch_num))\n if 'wiki_desc' in args.model_id:\n _, macro_f1 = evaluate_data(batch_num, transformer_constant.get(args.env,'WIKI_DEV_DATA'), model, args, device)\n else:\n _, macro_f1 = evaluate_data(batch_num, transformer_constant.get(args.env,'DEV_DATA'), model, args, device)\n if best_macro_f1 < macro_f1:\n best_macro_f1 = macro_f1\n save_fname = '{0:s}/{1:s}_best.pt'.format(transformer_constant.get(args.env,'EXP_ROOT'), args.model_id)\n torch.save(\n {\n 'state_dict': model.state_dict(),\n 'optimizer_cls': optimizer_cls.state_dict(),\n 'optimizer_enc': optimizer_enc.state_dict(),\n 'args': args\n },\n save_fname\n )\n print(\n 'Total {0:.2f} minutes have passed, saving at {1:s} '.format((time.time() - init_time) / 60, save_fname))\n\n #if batch_num % args.save_period == 0 and batch_num > 30000:\n if batch_num % args.save_period == 0 and batch_num >= args.eval_after:\n save_fname = '{0:s}/{1:s}_{2:d}.pt'.format(transformer_constant.get(args.env,'EXP_ROOT'), args.model_id, batch_num)\n torch.save(\n {\n 'state_dict': model.state_dict(),\n 'optimizer_cls': optimizer_cls.state_dict(),\n 'optimizer_enc': optimizer_enc.state_dict(),\n 'args': args\n },\n save_fname\n )\n print(\n 'Total {0:.2f} minutes have passed, saving at {1:s} '.format((time.time() - init_time) / 60, save_fname))\n\n\n\"\"\"\nVAL\n\"\"\"\n\ndef _val(args, model, device):\n examples_limit = args.examples_limit\n start_time = time.time()\n assert args.load\n dev_fname = transformer_constant.get(args.env,'DEV_DATA')\n args.eval_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n model.eval()\n load_model(args.reload_model_name, transformer_constant.get(args.env,'EXP_ROOT'), args.model_id, model)\n\n print(\"in evaluate data, \", dev_fname, args.reload_model_name)\n dev_gen = get_data_gen(dev_fname, 'test', args, model.transformer_tokenizer)\n gold_pred = []\n eval_loss = 0.\n total_ex_count = 0\n\n #IMPORTANT since this takes so long sub sample for now 500\n\n for batch in tqdm(dev_gen):\n if total_ex_count > examples_limit:\n break \n total_ex_count += len(batch['targets'])\n try:\n inputs, targets = to_torch(batch, device)\n loss, output_logits = model(inputs, targets)\n except Exception as e:\n print(\"in Eval to torch error so continue: \",e )\n continue\n\n output_index = get_output_index(output_logits, threshold=args.threshold)\n gold_pred += get_gold_pred_str(output_index, batch['targets'].data.cpu().clone(), args.goal, args.env)\n\n eval_loss += loss.clone().item()\n\n print(\"Gold Pred\", len(gold_pred),gold_pred[0:4])\n eval_str = get_eval_string(gold_pred)\n _, _, _, _, _, macro_f1 = macro(gold_pred)\n elapsed = start_time - time.time()\n eval_loss_str = 'Eval loss: {0:.7f} at step {1} time elapsed {2}'.format(eval_loss, args.reload_model_name, elapsed)\n print('==> EVAL: seen ' + repr(total_ex_count) + ' examples.')\n print(eval_loss_str)\n print(gold_pred[:3])\n print('==> ' + eval_str)\n\n\n\"\"\"\nTest\n\"\"\"\n\ndef _test(args, model, device):\n start_time = time.time()\n assert args.load\n test_fname = transformer_constant.get(args.env,'EVAL_DATA') #this takes way too long on test, and really we want to see which does best on DEV.. so use _val\n #test_fname = transformer_constant.get(args.env,'DEV_DATA')\n args.eval_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n data_gens = get_datasets([(test_fname, 'test')], args, model.transformer_tokenizer)\n model.eval()\n load_model(args.reload_model_name, transformer_constant.get(args.env,'EXP_ROOT'), args.model_id, model)\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n print(\"==> use\", torch.cuda.device_count(), \"GPUs.\")\n cur_time = time.time()\n for name, dataset in [(test_fname, data_gens[0])]:\n print('Processing... ' + name + \" with len \",type(dataset), \" Elapsed Time: \", cur_time - start_time)\n total_gold_pred = []\n total_annot_ids = []\n total_probs = []\n total_ys = []\n for batch_num, batch in tqdm(enumerate(dataset)):\n if batch_num % 1 == 0:\n print(batch_num)\n if not isinstance(batch, dict):\n print('==> batch: ', batch)\n inputs, targets = to_torch(batch, device)\n annot_ids = batch.pop('ex_ids')\n if args.n_gpu > 1:\n output_logits = model(inputs, targets)\n else:\n _, output_logits = model(inputs)\n output_index = get_output_index(output_logits, threshold=args.threshold)\n output_prob = model.sigmoid_fn(output_logits).data.cpu().clone().numpy()\n\n \"\"\"\n print(\"Inputs: \",inputs)\n print(\"Targets: \", targets) \n print(\"Batch: \",batch)\n print(\"Annot_ids: \",annot_ids)\n print(\"output_index: \",output_index)\n print(\"output_prob: \",output_prob)\n \"\"\"\n\n #y = inputs['targets'].data.cpu().clone().numpy() #orig\n y = batch['targets'].data.cpu().clone().numpy() #maybe fix? maybe should be just targets\n\n gold_pred = get_gold_pred_str(output_index, y, args.goal, args.env)\n\n print(\"Gold Pred\", len(gold_pred),gold_pred[0:3])\n eval_str = get_eval_string(gold_pred)\n _, _, _, _, _, macro_f1 = macro(gold_pred)\n print('==> ' + eval_str)\n\n total_probs.extend(output_prob)\n total_ys.extend(y)\n total_gold_pred.extend(gold_pred)\n total_annot_ids.extend(annot_ids)\n\n cur_time2 = time.time()\n print(\"DONE SAVING PICKLE. Elapsed Time\", cur_time2 - cur_time)\n pickle.dump({'gold_id_array': total_ys, 'pred_dist': total_probs},\n open(transformer_constant.get(args.env,'FILE_ROOT') + '/outputs/{0:s}.pkl'.format(args.model_id), \"wb\"))\n print(\"LENS\",len(total_annot_ids), len(total_gold_pred))\n\n with open(transformer_constant.get(args.env, 'FILE_ROOT') + '/outputs/{0:s}.json'.format(args.model_id), 'w') as f_out:\n output_dict = {}\n counter = 0\n for a_id, (gold, pred) in zip(total_annot_ids, total_gold_pred):\n output_dict[a_id] = {\"gold\": gold, \"pred\": pred}\n counter += 1\n json.dump(output_dict, f_out)\n\n #eval_str = get_eval_string(total_gold_pred)\n eval_str = 'none'\n print(\"DONE\")\n print(eval_str)\n\n\ndef main():\n args = parser.parse_args()\n # Lower text for BERT uncased models\n args.do_lower = True if 'uncased' in args.model_type else False\n # Setup CUDA, GPU & distributed training\n assert torch.cuda.is_available()\n if args.local_rank == -1:\n device = torch.device(\"cuda\")\n args.n_gpu = torch.cuda.device_count()\n else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend=\"nccl\")\n args.n_gpu = 1\n args.device = device\n set_seed(args)\n # Load pretrained model and tokenizer\n if args.local_rank not in [-1, 0]:\n torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab\n\n ind = args.goal if args.env == \"\" else args.env\n model = TransformerModel(args, transformer_constant.ANSWER_NUM_DICT[ind])\n if args.local_rank == 0:\n torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab\n model.to(args.device)\n #print(model)\n args.max_position_embeddings = model.transformer_config.max_position_embeddings\n print(\"MAX POSITION EMBEDDINGS: \",args.max_position_embeddings )\n\n print('-' * 80)\n for k, v in vars(args).items():\n print(k, ':', v)\n print('-' * 80)\n if args.mode == 'train':\n print('==> mode: train')\n _train(args, model, device)\n elif args.mode == 'val':\n # helper function 1005\n print('==> mode: val')\n _val(args, model, device)\n elif args.mode == 'test':\n print('==> mode: test')\n _test(args, model, device)\n else:\n raise ValueError(\"invalid value for 'mode': {}\".format(args.mode))\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"torch.distributed.init_process_group",
"numpy.random.seed",
"torch.load",
"torch.cuda.set_device",
"torch.manual_seed",
"torch.cuda.device_count",
"torch.distributed.barrier",
"torch.nn.Sigmoid",
"numpy.argmax",
"torch.cuda.is_available",
"torch.cuda.manual_seed_all",
"torch.device",
"torch.nn.DataParallel"
]
] |
drewhayward/DFOL-VQA
|
[
"8c7d403bac560588ab3ac45774a3e4f71fbe9c90"
] |
[
"src/nsvqa/nn/interpreter/batch_base_interpreter.py"
] |
[
"# Copyright (c) Microsoft. All rights reserved.\n# Licensed under the MIT license. See LICENSE.md file\n# in the project root for full license information.\n\nimport torch\nimport torch.nn as nn\nimport os\n\nfrom operator import itemgetter\nfrom nsvqa.nn.interpreter import util\nfrom nsvqa.nn.interpreter.batch_base_types import BatchWorld, BatchVariableSet, BatchAttentionState\nfrom nsvqa.nn.interpreter.data_parallel import gather_results\n\nclass BatchInterpreterBase(nn.Module):\n\n def __init__(self, name, oracle, featurizer=None, attention_transfer_state_dim=0, apply_modulation_everywhere=True, cached=False, visual_rule_learner=None, calibrator=None): #, attention_transfer_modulator=None):\n super(BatchInterpreterBase, self).__init__()\n self._featurizer = featurizer\n self._oracle = oracle\n self._name = name\n self._global_step = nn.Parameter(torch.tensor([0], dtype=torch.float), requires_grad=False)\n # self._atm = attention_transfer_modulator\n self._has_modulator = False\n self._attention_transfer_state_dim = attention_transfer_state_dim\n self._apply_modulation_everywhere = apply_modulation_everywhere\n self._cached = cached\n self._visual_rule_learner = visual_rule_learner\n self._calibrator = calibrator\n\n def _execute(self, op_id, world, operator_batch, input_tuple, is_terminal, is_training):\n pass\n\n def _transform_attention(self, op_id, is_forward, world, operator_batch, input_tuple, is_terminal, is_training):\n pass\n\n def parameter_count(self):\n return sum(p.numel() for p in self.parameters() if p.requires_grad)\n \n def save(self, export_path_base):\n torch.save(self.state_dict(), os.path.join(export_path_base, self._name))\n\n def load(self, import_path_base):\n self.load_state_dict(torch.load(os.path.join(import_path_base, self._name)), strict=False)\n\n def build_scene(self, device, object_features, batch_index, meta_data):\n \n if self._featurizer is not None:\n features = self._featurizer.featurize_scene(device, object_features, batch_index, meta_data)\n attribute_features = features['attribute_features']\n relation_features = features['relation_features']\n object_num = features['object_num']\n\n if self._cached:\n attribute_features, relation_features['features'] = self._oracle.compute_all_log_likelihood_2(attribute_features, relation_features['features'])\n\n if self._calibrator is not None:\n attribute_features[:, self._oracle._ontology._attribute_index], relation_features = self._calibrator(attribute_features[:, self._oracle._ontology._attribute_index], relation_features)\n\n if self._visual_rule_learner is not None:\n relation_features['object_num'] = object_num\n attribute_features[:, self._oracle._ontology._attribute_index], relation_features = self._visual_rule_learner(attribute_features[:, self._oracle._ontology._attribute_index], relation_features)\n else:\n object_num = object_features.size()[0]\n attribute_features = object_features.view(object_num, -1)\n arg1 = attribute_features.repeat(1, object_num).view(object_num**2, -1)\n arg2 = attribute_features.repeat(self._object_num, 1)\n relation_features = torch.cat([arg1, arg2], dim=1)\n\n return BatchWorld(device, object_num, attribute_features, relation_features, batch_index, meta_data, \\\n attention_transfer_state_dim=self._attention_transfer_state_dim).to(object_features.dtype)\n\n def forward(self, program_batch_list, is_training, return_trace=False, modulator_switch=True):\n\n # Initialize the trace\n all_traces = []\n all_results = []\n device = program_batch_list[0].device\n\n # Main loop\n for program_batch in program_batch_list:\n \n # Set the objects features\n world = self.build_scene(program_batch.device, program_batch._object_features, program_batch._object_batch_index, program_batch._meta_data)\n # print('---------------------------------------------')\n\n # Modulator loops\n if self._has_modulator and modulator_switch:\n if not self._apply_modulation_everywhere:\n for i in range(len(program_batch._op_batch_list) - 1):\n program_batch._op_batch_list._op_id += 'n'\n \n # Forward loop\n trace = []\n for i, op_batch in enumerate(program_batch._op_batch_list):\n if len(program_batch._dependencies[i]) > 1:\n input_tuple = tuple(itemgetter(*program_batch._dependencies[i])(trace)) \n elif len(program_batch._dependencies[i]) == 1:\n input_tuple = (trace[program_batch._dependencies[i][0]],)\n else: \n input_tuple = (None,)\n\n x, terminate = self._transform_attention(op_batch._op_id, True, world, op_batch, input_tuple, i == len(program_batch._op_batch_list) - 1, is_training)\n \n # Gate the unaffected questions\n if i < len(program_batch._op_batch_list) - 1 and input_tuple[0] is not None and op_batch._mask is not None:\n x = x.gate(input_tuple[0], op_batch._mask)\n\n trace.append(x)\n\n if terminate:\n break\n\n # Backward loop\n reversed_dependencies = util.reverse_dependencies(program_batch._dependencies)\n first_attention_state = (BatchAttentionState(trace[-1]._name, device, trace[-1]._state, set_zeros=True).to(world.dtype), ) if not isinstance(trace[-1], (tuple, list)) else \\\n tuple([BatchAttentionState(att._name, device, att._state, set_zeros=True).to(world.dtype) for att in trace[-1]])\n \n trace = [None for _ in range(len(program_batch._op_batch_list))]\n for i, op_batch in reversed(list(enumerate(program_batch._op_batch_list))):\n if len(reversed_dependencies[i]) == 1:\n temp = trace[reversed_dependencies[i][0]]\n\n if isinstance(temp, (tuple, list)):\n input_tuple = (temp[1],) if i == len(program_batch._op_batch_list) - 2 else (temp[0],)\n else:\n input_tuple = (temp,) \n else: \n input_tuple = first_attention_state\n\n x, terminate = self._transform_attention(op_batch._op_id, False, world, op_batch, input_tuple, i == 0, is_training)\n \n # Gate the unaffected questions\n # print(op_batch._op_name)\n if len(program_batch._dependencies[i]) > 0 and op_batch._mask is not None and isinstance(x, BatchAttentionState) and i != len(program_batch._op_batch_list) - 1:\n x = x.gate(input_tuple[0], op_batch._mask)\n\n trace[i] = x\n\n if terminate:\n break\n\n # if self._atm is not None:\n # attention_transfer = self._atm(program_batch)\n\n # Execution loop\n trace = []\n for i, op_batch in enumerate(program_batch._op_batch_list):\n # print(op_batch._op_name)\n if len(program_batch._dependencies[i]) > 1:\n input_tuple = tuple(itemgetter(*program_batch._dependencies[i])(trace)) \n elif len(program_batch._dependencies[i]) == 1:\n input_tuple = (trace[program_batch._dependencies[i][0]],)\n else: \n input_tuple = ()\n\n x, terminate = self._execute(op_batch._op_id, world, op_batch, input_tuple, i == len(program_batch._op_batch_list) - 1, is_training)\n\n # # Apply the transfer function if available\n # if self._atm is not None and isinstance(x, BatchVariableSet):\n # alpha = attention_transfer[i, :, 0].unsqueeze(1)\n # beta = attention_transfer[i, :, 1].unsqueeze(1)\n # temp = alpha * x._log_attention\n # x._log_attention = temp - util.safe_log((beta * util.log_not(x._log_attention)).exp() + temp.exp())\n \n # Gate the unaffected questions\n if isinstance(x, BatchVariableSet) and len(input_tuple) > 0 and op_batch._mask is not None:\n x = x.gate(input_tuple[0], op_batch._mask)\n\n trace.append(x)\n\n if terminate:\n break\n\n result = trace[-1] if len(trace) > 0 else None\n all_results.append(result)\n all_traces.append(trace)\n\n result = gather_results(all_results, device, util.is_cuda(device))\n \n if return_trace:\n return result, all_traces \n \n return result\n"
] |
[
[
"torch.cat",
"torch.tensor"
]
] |
weecology/albumentations
|
[
"cc8fbb6e2fcc4f6a4c87a29b6b0784391b0e2db4"
] |
[
"tests/test_transforms.py"
] |
[
"from functools import partial\n\nimport cv2\nimport numpy as np\nimport pytest\nimport random\n\nimport albumentations as A\nimport albumentations.augmentations.functional as F\nimport albumentations.augmentations.geometric.functional as FGeometric\n\nfrom torchvision.transforms import ColorJitter\nfrom PIL import Image\n\n\ndef set_seed(seed=0):\n random.seed(seed)\n np.random.seed(seed)\n\n\ndef test_transpose_both_image_and_mask():\n image = np.ones((8, 6, 3))\n mask = np.ones((8, 6))\n augmentation = A.Transpose(p=1)\n augmented = augmentation(image=image, mask=mask)\n assert augmented[\"image\"].shape == (6, 8, 3)\n assert augmented[\"mask\"].shape == (6, 8)\n\n\[email protected](\"interpolation\", [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC])\ndef test_safe_rotate_interpolation(interpolation):\n image = np.random.randint(low=0, high=256, size=(100, 100, 3), dtype=np.uint8)\n mask = np.random.randint(low=0, high=2, size=(100, 100), dtype=np.uint8)\n aug = A.SafeRotate(limit=(45, 45), interpolation=interpolation, p=1)\n data = aug(image=image, mask=mask)\n expected_image = FGeometric.safe_rotate(image, 45, interpolation=interpolation, border_mode=cv2.BORDER_REFLECT_101)\n expected_mask = FGeometric.safe_rotate(\n mask, 45, interpolation=cv2.INTER_NEAREST, border_mode=cv2.BORDER_REFLECT_101\n )\n assert np.array_equal(data[\"image\"], expected_image)\n assert np.array_equal(data[\"mask\"], expected_mask)\n\n\[email protected](\"interpolation\", [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC])\ndef test_rotate_interpolation(interpolation):\n image = np.random.randint(low=0, high=256, size=(100, 100, 3), dtype=np.uint8)\n mask = np.random.randint(low=0, high=2, size=(100, 100), dtype=np.uint8)\n aug = A.Rotate(limit=(45, 45), interpolation=interpolation, p=1)\n data = aug(image=image, mask=mask)\n expected_image = FGeometric.rotate(image, 45, interpolation=interpolation, border_mode=cv2.BORDER_REFLECT_101)\n expected_mask = FGeometric.rotate(mask, 45, interpolation=cv2.INTER_NEAREST, border_mode=cv2.BORDER_REFLECT_101)\n assert np.array_equal(data[\"image\"], expected_image)\n assert np.array_equal(data[\"mask\"], expected_mask)\n\n\[email protected](\"interpolation\", [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC])\ndef test_shift_scale_rotate_interpolation(interpolation):\n image = np.random.randint(low=0, high=256, size=(100, 100, 3), dtype=np.uint8)\n mask = np.random.randint(low=0, high=2, size=(100, 100), dtype=np.uint8)\n aug = A.ShiftScaleRotate(\n shift_limit=(0.2, 0.2), scale_limit=(1.1, 1.1), rotate_limit=(45, 45), interpolation=interpolation, p=1\n )\n data = aug(image=image, mask=mask)\n expected_image = FGeometric.shift_scale_rotate(\n image, angle=45, scale=2.1, dx=0.2, dy=0.2, interpolation=interpolation, border_mode=cv2.BORDER_REFLECT_101\n )\n expected_mask = FGeometric.shift_scale_rotate(\n mask, angle=45, scale=2.1, dx=0.2, dy=0.2, interpolation=cv2.INTER_NEAREST, border_mode=cv2.BORDER_REFLECT_101\n )\n assert np.array_equal(data[\"image\"], expected_image)\n assert np.array_equal(data[\"mask\"], expected_mask)\n\n\[email protected](\"interpolation\", [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC])\ndef test_optical_distortion_interpolation(interpolation):\n image = np.random.randint(low=0, high=256, size=(100, 100, 3), dtype=np.uint8)\n mask = np.random.randint(low=0, high=2, size=(100, 100), dtype=np.uint8)\n aug = A.OpticalDistortion(distort_limit=(0.05, 0.05), shift_limit=(0, 0), interpolation=interpolation, p=1)\n data = aug(image=image, mask=mask)\n expected_image = F.optical_distortion(\n image, k=0.05, dx=0, dy=0, interpolation=interpolation, border_mode=cv2.BORDER_REFLECT_101\n )\n expected_mask = F.optical_distortion(\n mask, k=0.05, dx=0, dy=0, interpolation=cv2.INTER_NEAREST, border_mode=cv2.BORDER_REFLECT_101\n )\n assert np.array_equal(data[\"image\"], expected_image)\n assert np.array_equal(data[\"mask\"], expected_mask)\n\n\[email protected](\"interpolation\", [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC])\ndef test_grid_distortion_interpolation(interpolation):\n image = np.random.randint(low=0, high=256, size=(100, 100, 3), dtype=np.uint8)\n mask = np.random.randint(low=0, high=2, size=(100, 100), dtype=np.uint8)\n aug = A.GridDistortion(num_steps=1, distort_limit=(0.3, 0.3), interpolation=interpolation, p=1)\n data = aug(image=image, mask=mask)\n expected_image = F.grid_distortion(\n image, num_steps=1, xsteps=[1.3], ysteps=[1.3], interpolation=interpolation, border_mode=cv2.BORDER_REFLECT_101\n )\n expected_mask = F.grid_distortion(\n mask,\n num_steps=1,\n xsteps=[1.3],\n ysteps=[1.3],\n interpolation=cv2.INTER_NEAREST,\n border_mode=cv2.BORDER_REFLECT_101,\n )\n assert np.array_equal(data[\"image\"], expected_image)\n assert np.array_equal(data[\"mask\"], expected_mask)\n\n\[email protected](\"size\", [17, 21, 33])\ndef test_grid_distortion_steps(size):\n image = np.random.rand(size, size, 3)\n aug = A.GridDistortion(num_steps=size - 2, p=1)\n data = aug(image=image)\n assert np.array_equal(data[\"image\"].shape, (size, size, 3))\n\n\[email protected](\"interpolation\", [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC])\ndef test_elastic_transform_interpolation(monkeypatch, interpolation):\n image = np.random.randint(low=0, high=256, size=(100, 100, 3), dtype=np.uint8)\n mask = np.random.randint(low=0, high=2, size=(100, 100), dtype=np.uint8)\n monkeypatch.setattr(\n \"albumentations.augmentations.geometric.ElasticTransform.get_params\", lambda *_: {\"random_state\": 1111}\n )\n aug = A.ElasticTransform(alpha=1, sigma=50, alpha_affine=50, interpolation=interpolation, p=1)\n data = aug(image=image, mask=mask)\n expected_image = FGeometric.elastic_transform(\n image,\n alpha=1,\n sigma=50,\n alpha_affine=50,\n interpolation=interpolation,\n border_mode=cv2.BORDER_REFLECT_101,\n random_state=np.random.RandomState(1111),\n )\n expected_mask = FGeometric.elastic_transform(\n mask,\n alpha=1,\n sigma=50,\n alpha_affine=50,\n interpolation=cv2.INTER_NEAREST,\n border_mode=cv2.BORDER_REFLECT_101,\n random_state=np.random.RandomState(1111),\n )\n assert np.array_equal(data[\"image\"], expected_image)\n assert np.array_equal(data[\"mask\"], expected_mask)\n\n\[email protected](\n [\"augmentation_cls\", \"params\"],\n [\n [A.ElasticTransform, {}],\n [A.GridDistortion, {}],\n [A.ShiftScaleRotate, {\"rotate_limit\": 45}],\n [A.RandomScale, {\"scale_limit\": 0.5}],\n [A.RandomSizedCrop, {\"min_max_height\": (80, 90), \"height\": 100, \"width\": 100}],\n [A.LongestMaxSize, {\"max_size\": 50}],\n [A.Rotate, {}],\n [A.SafeRotate, {}],\n [A.OpticalDistortion, {}],\n [A.IAAAffine, {\"scale\": 1.5}],\n [A.IAAPiecewiseAffine, {\"scale\": 1.5}],\n [A.IAAPerspective, {}],\n [A.GlassBlur, {}],\n [A.Perspective, {}],\n [A.Affine, {}],\n [A.PiecewiseAffine, {}],\n ],\n)\ndef test_binary_mask_interpolation(augmentation_cls, params):\n \"\"\"Checks whether transformations based on DualTransform does not introduce a mask interpolation artifacts\"\"\"\n aug = augmentation_cls(p=1, **params)\n image = np.random.randint(low=0, high=256, size=(100, 100, 3), dtype=np.uint8)\n mask = np.random.randint(low=0, high=2, size=(100, 100), dtype=np.uint8)\n data = aug(image=image, mask=mask)\n assert np.array_equal(np.unique(data[\"mask\"]), np.array([0, 1]))\n\n\[email protected](\n [\"augmentation_cls\", \"params\"],\n [\n [A.ElasticTransform, {}],\n [A.GridDistortion, {}],\n [A.ShiftScaleRotate, {\"rotate_limit\": 45}],\n [A.RandomScale, {\"scale_limit\": 0.5}],\n [A.RandomSizedCrop, {\"min_max_height\": (80, 90), \"height\": 100, \"width\": 100}],\n [A.LongestMaxSize, {\"max_size\": 50}],\n [A.Rotate, {}],\n [A.SafeRotate, {}],\n [A.Resize, {\"height\": 80, \"width\": 90}],\n [A.Resize, {\"height\": 120, \"width\": 130}],\n [A.OpticalDistortion, {}],\n [A.GlassBlur, {}],\n [A.Perspective, {}],\n [A.Affine, {}],\n [A.PiecewiseAffine, {}],\n ],\n)\ndef test_semantic_mask_interpolation(augmentation_cls, params):\n \"\"\"Checks whether transformations based on DualTransform does not introduce a mask interpolation artifacts.\n Note: IAAAffine, IAAPiecewiseAffine, IAAPerspective does not properly operate if mask has values other than {0;1}\n \"\"\"\n aug = augmentation_cls(p=1, **params)\n image = np.random.randint(low=0, high=256, size=(100, 100, 3), dtype=np.uint8)\n mask = np.random.randint(low=0, high=4, size=(100, 100), dtype=np.uint8) * 64\n\n data = aug(image=image, mask=mask)\n assert np.array_equal(np.unique(data[\"mask\"]), np.array([0, 64, 128, 192]))\n\n\ndef __test_multiprocessing_support_proc(args):\n x, transform = args\n return transform(image=x)\n\n\[email protected](\n [\"augmentation_cls\", \"params\"],\n [\n [A.ElasticTransform, {}],\n [A.GridDistortion, {}],\n [A.ShiftScaleRotate, {\"rotate_limit\": 45}],\n [A.RandomScale, {\"scale_limit\": 0.5}],\n [A.RandomSizedCrop, {\"min_max_height\": (80, 90), \"height\": 100, \"width\": 100}],\n [A.LongestMaxSize, {\"max_size\": 50}],\n [A.Rotate, {}],\n [A.SafeRotate, {}],\n [A.OpticalDistortion, {}],\n [A.IAAAffine, {\"scale\": 1.5}],\n [A.IAAPiecewiseAffine, {\"scale\": 1.5}],\n [A.IAAPerspective, {}],\n [A.Sharpen, {}],\n [A.FancyPCA, {}],\n [A.GlassBlur, {}],\n [A.Perspective, {}],\n [A.Affine, {}],\n [A.PiecewiseAffine, {}],\n ],\n)\ndef test_multiprocessing_support(augmentation_cls, params, multiprocessing_context):\n \"\"\"Checks whether we can use augmentations in multiprocessing environments\"\"\"\n aug = augmentation_cls(p=1, **params)\n image = np.random.randint(low=0, high=256, size=(100, 100, 3), dtype=np.uint8)\n\n pool = multiprocessing_context.Pool(8)\n pool.map(__test_multiprocessing_support_proc, map(lambda x: (x, aug), [image] * 100))\n pool.close()\n pool.join()\n\n\ndef test_force_apply():\n \"\"\"\n Unit test for https://github.com/albumentations-team/albumentations/issues/189\n \"\"\"\n aug = A.Compose(\n [\n A.OneOrOther(\n A.Compose(\n [\n A.RandomSizedCrop(min_max_height=(256, 1025), height=512, width=512, p=1),\n A.OneOf(\n [\n A.RandomSizedCrop(min_max_height=(256, 512), height=384, width=384, p=0.5),\n A.RandomSizedCrop(min_max_height=(256, 512), height=512, width=512, p=0.5),\n ]\n ),\n ]\n ),\n A.Compose(\n [\n A.RandomSizedCrop(min_max_height=(256, 1025), height=256, width=256, p=1),\n A.OneOf([A.HueSaturationValue(p=0.5), A.RGBShift(p=0.7)], p=1),\n ]\n ),\n ),\n A.HorizontalFlip(p=1),\n A.RandomBrightnessContrast(p=0.5),\n ]\n )\n\n res = aug(image=np.zeros((1248, 1248, 3), dtype=np.uint8))\n assert res[\"image\"].shape[0] in (256, 384, 512)\n assert res[\"image\"].shape[1] in (256, 384, 512)\n\n\[email protected](\n [\"augmentation_cls\", \"params\"],\n [\n [A.ChannelShuffle, {}],\n [A.GaussNoise, {}],\n [A.Cutout, {}],\n [A.CoarseDropout, {}],\n [A.ImageCompression, {}],\n [A.HueSaturationValue, {}],\n [A.RGBShift, {}],\n [A.RandomBrightnessContrast, {}],\n [A.Blur, {}],\n [A.MotionBlur, {}],\n [A.MedianBlur, {}],\n [A.CLAHE, {}],\n [A.InvertImg, {}],\n [A.RandomGamma, {}],\n [A.ToGray, {}],\n [A.VerticalFlip, {}],\n [A.HorizontalFlip, {}],\n [A.Flip, {}],\n [A.Transpose, {}],\n [A.RandomRotate90, {}],\n [A.Rotate, {}],\n [A.SafeRotate, {}],\n [A.OpticalDistortion, {}],\n [A.GridDistortion, {}],\n [A.ElasticTransform, {}],\n [A.Normalize, {}],\n [A.ToFloat, {}],\n [A.FromFloat, {}],\n [A.ChannelDropout, {}],\n [A.Solarize, {}],\n [A.Posterize, {}],\n [A.Equalize, {}],\n [A.MultiplicativeNoise, {}],\n [A.FancyPCA, {}],\n [A.GlassBlur, {}],\n [A.GridDropout, {}],\n [A.ColorJitter, {}],\n [A.Perspective, {}],\n [A.Sharpen, {\"alpha\": [0.2, 0.2], \"lightness\": [0.5, 0.5]}],\n ],\n)\ndef test_additional_targets_for_image_only(augmentation_cls, params):\n aug = A.Compose([augmentation_cls(always_apply=True, **params)], additional_targets={\"image2\": \"image\"})\n for _i in range(10):\n image1 = np.random.randint(low=0, high=256, size=(100, 100, 3), dtype=np.uint8)\n image2 = image1.copy()\n res = aug(image=image1, image2=image2)\n aug1 = res[\"image\"]\n aug2 = res[\"image2\"]\n assert np.array_equal(aug1, aug2)\n\n\ndef test_lambda_transform():\n def negate_image(image, **kwargs):\n return -image\n\n def one_hot_mask(mask, num_channels, **kwargs):\n new_mask = np.eye(num_channels, dtype=np.uint8)[mask]\n return new_mask\n\n def vflip_bbox(bbox, **kwargs):\n return F.bbox_vflip(bbox, **kwargs)\n\n def vflip_keypoint(keypoint, **kwargs):\n return F.keypoint_vflip(keypoint, **kwargs)\n\n aug = A.Lambda(\n image=negate_image, mask=partial(one_hot_mask, num_channels=16), bbox=vflip_bbox, keypoint=vflip_keypoint, p=1\n )\n\n output = aug(\n image=np.ones((10, 10, 3), dtype=np.float32),\n mask=np.tile(np.arange(0, 10), (10, 1)),\n bboxes=[(10, 15, 25, 35)],\n keypoints=[(20, 30, 40, 50)],\n )\n assert (output[\"image\"] < 0).all()\n assert output[\"mask\"].shape[2] == 16 # num_channels\n assert output[\"bboxes\"] == [F.bbox_vflip((10, 15, 25, 35), 10, 10)]\n assert output[\"keypoints\"] == [F.keypoint_vflip((20, 30, 40, 50), 10, 10)]\n\n\ndef test_channel_droput():\n img = np.ones((10, 10, 3), dtype=np.float32)\n\n aug = A.ChannelDropout(channel_drop_range=(1, 1), always_apply=True) # Drop one channel\n\n transformed = aug(image=img)[\"image\"]\n\n assert sum(transformed[:, :, c].max() for c in range(img.shape[2])) == 2\n\n aug = A.ChannelDropout(channel_drop_range=(2, 2), always_apply=True) # Drop two channels\n transformed = aug(image=img)[\"image\"]\n\n assert sum(transformed[:, :, c].max() for c in range(img.shape[2])) == 1\n\n\ndef test_equalize():\n aug = A.Equalize(p=1)\n\n img = np.random.randint(0, 256, 256 * 256 * 3, np.uint8).reshape((256, 256, 3))\n a = aug(image=img)[\"image\"]\n b = F.equalize(img)\n assert np.all(a == b)\n\n mask = np.random.randint(0, 2, 256 * 256, np.uint8).reshape((256, 256))\n aug = A.Equalize(mask=mask, p=1)\n a = aug(image=img)[\"image\"]\n b = F.equalize(img, mask=mask)\n assert np.all(a == b)\n\n def mask_func(image, test): # skipcq: PYL-W0613\n return mask\n\n aug = A.Equalize(mask=mask_func, mask_params=[\"test\"], p=1)\n assert np.all(aug(image=img, test=mask)[\"image\"] == F.equalize(img, mask=mask))\n\n\ndef test_crop_non_empty_mask():\n def _test_crop(mask, crop, aug, n=1):\n for _ in range(n):\n augmented = aug(image=mask, mask=mask)\n np.testing.assert_array_equal(augmented[\"image\"], crop)\n np.testing.assert_array_equal(augmented[\"mask\"], crop)\n\n # test general case\n mask_1 = np.zeros([10, 10])\n mask_1[0, 0] = 1\n crop_1 = np.array([[1]])\n aug_1 = A.CropNonEmptyMaskIfExists(1, 1)\n\n # test empty mask\n mask_2 = np.zeros([10, 10])\n crop_2 = np.array([[0]])\n aug_2 = A.CropNonEmptyMaskIfExists(1, 1)\n\n # test ignore values\n mask_3 = np.ones([2, 2])\n mask_3[0, 0] = 2\n crop_3 = np.array([[2]])\n aug_3 = A.CropNonEmptyMaskIfExists(1, 1, ignore_values=[1])\n\n # test ignore channels\n mask_4 = np.zeros([2, 2, 2])\n mask_4[0, 0, 0] = 1\n mask_4[1, 1, 1] = 2\n crop_4 = np.array([[[1, 0]]])\n aug_4 = A.CropNonEmptyMaskIfExists(1, 1, ignore_channels=[1])\n\n # test full size crop\n mask_5 = np.random.random([10, 10, 3])\n crop_5 = mask_5\n aug_5 = A.CropNonEmptyMaskIfExists(10, 10)\n\n mask_6 = np.zeros([10, 10, 3])\n mask_6[0, 0, 0] = 0\n crop_6 = mask_6\n aug_6 = A.CropNonEmptyMaskIfExists(10, 10, ignore_values=[1])\n\n _test_crop(mask_1, crop_1, aug_1, n=1)\n _test_crop(mask_2, crop_2, aug_2, n=1)\n _test_crop(mask_3, crop_3, aug_3, n=5)\n _test_crop(mask_4, crop_4, aug_4, n=5)\n _test_crop(mask_5, crop_5, aug_5, n=1)\n _test_crop(mask_6, crop_6, aug_6, n=10)\n\n\[email protected](\"interpolation\", [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC])\ndef test_downscale(interpolation):\n img_float = np.random.rand(100, 100, 3)\n img_uint = (img_float * 255).astype(\"uint8\")\n\n aug = A.Downscale(scale_min=0.5, scale_max=0.5, interpolation=interpolation, always_apply=True)\n\n for img in (img_float, img_uint):\n transformed = aug(image=img)[\"image\"]\n func_applied = F.downscale(img, scale=0.5, interpolation=interpolation)\n np.testing.assert_almost_equal(transformed, func_applied)\n\n\ndef test_crop_keypoints():\n image = np.random.randint(0, 256, (100, 100), np.uint8)\n keypoints = [(50, 50, 0, 0)]\n\n aug = A.Crop(0, 0, 80, 80, p=1)\n result = aug(image=image, keypoints=keypoints)\n assert result[\"keypoints\"] == keypoints\n\n aug = A.Crop(50, 50, 100, 100, p=1)\n result = aug(image=image, keypoints=keypoints)\n assert result[\"keypoints\"] == [(0, 0, 0, 0)]\n\n\ndef test_longest_max_size_keypoints():\n img = np.random.randint(0, 256, [50, 10], np.uint8)\n keypoints = [(9, 5, 0, 0)]\n\n aug = A.LongestMaxSize(max_size=100, p=1)\n result = aug(image=img, keypoints=keypoints)\n assert result[\"keypoints\"] == [(18, 10, 0, 0)]\n\n aug = A.LongestMaxSize(max_size=5, p=1)\n result = aug(image=img, keypoints=keypoints)\n assert result[\"keypoints\"] == [(0.9, 0.5, 0, 0)]\n\n aug = A.LongestMaxSize(max_size=50, p=1)\n result = aug(image=img, keypoints=keypoints)\n assert result[\"keypoints\"] == [(9, 5, 0, 0)]\n\n\ndef test_smallest_max_size_keypoints():\n img = np.random.randint(0, 256, [50, 10], np.uint8)\n keypoints = [(9, 5, 0, 0)]\n\n aug = A.SmallestMaxSize(max_size=100, p=1)\n result = aug(image=img, keypoints=keypoints)\n assert result[\"keypoints\"] == [(90, 50, 0, 0)]\n\n aug = A.SmallestMaxSize(max_size=5, p=1)\n result = aug(image=img, keypoints=keypoints)\n assert result[\"keypoints\"] == [(4.5, 2.5, 0, 0)]\n\n aug = A.SmallestMaxSize(max_size=10, p=1)\n result = aug(image=img, keypoints=keypoints)\n assert result[\"keypoints\"] == [(9, 5, 0, 0)]\n\n\ndef test_resize_keypoints():\n img = np.random.randint(0, 256, [50, 10], np.uint8)\n keypoints = [(9, 5, 0, 0)]\n\n aug = A.Resize(height=100, width=5, p=1)\n result = aug(image=img, keypoints=keypoints)\n assert result[\"keypoints\"] == [(4.5, 10, 0, 0)]\n\n aug = A.Resize(height=50, width=10, p=1)\n result = aug(image=img, keypoints=keypoints)\n assert result[\"keypoints\"] == [(9, 5, 0, 0)]\n\n\[email protected](\n \"image\",\n [\n np.random.randint(0, 256, [256, 320], np.uint8),\n np.random.random([256, 320]).astype(np.float32),\n np.random.randint(0, 256, [256, 320, 1], np.uint8),\n np.random.random([256, 320, 1]).astype(np.float32),\n ],\n)\ndef test_multiplicative_noise_grayscale(image):\n m = 0.5\n aug = A.MultiplicativeNoise(m, p=1)\n result = aug(image=image)[\"image\"]\n image = F.clip(image * m, image.dtype, F.MAX_VALUES_BY_DTYPE[image.dtype])\n assert np.allclose(image, result)\n\n aug = A.MultiplicativeNoise(elementwise=True, p=1)\n params = aug.get_params_dependent_on_targets({\"image\": image})\n mul = params[\"multiplier\"]\n assert mul.shape == image.shape\n result = aug.apply(image, mul)\n dtype = image.dtype\n image = image.astype(np.float32) * mul\n image = F.clip(image, dtype, F.MAX_VALUES_BY_DTYPE[dtype])\n assert np.allclose(image, result)\n\n\[email protected](\n \"image\", [np.random.randint(0, 256, [256, 320, 3], np.uint8), np.random.random([256, 320, 3]).astype(np.float32)]\n)\ndef test_multiplicative_noise_rgb(image):\n dtype = image.dtype\n\n m = 0.5\n aug = A.MultiplicativeNoise(m, p=1)\n result = aug(image=image)[\"image\"]\n image = F.clip(image * m, dtype, F.MAX_VALUES_BY_DTYPE[dtype])\n assert np.allclose(image, result)\n\n aug = A.MultiplicativeNoise(elementwise=True, p=1)\n params = aug.get_params_dependent_on_targets({\"image\": image})\n mul = params[\"multiplier\"]\n assert mul.shape == image.shape[:2] + (1,)\n result = aug.apply(image, mul)\n image = F.clip(image.astype(np.float32) * mul, dtype, F.MAX_VALUES_BY_DTYPE[dtype])\n assert np.allclose(image, result)\n\n aug = A.MultiplicativeNoise(per_channel=True, p=1)\n params = aug.get_params_dependent_on_targets({\"image\": image})\n mul = params[\"multiplier\"]\n assert mul.shape == (3,)\n result = aug.apply(image, mul)\n image = F.clip(image.astype(np.float32) * mul, dtype, F.MAX_VALUES_BY_DTYPE[dtype])\n assert np.allclose(image, result)\n\n aug = A.MultiplicativeNoise(elementwise=True, per_channel=True, p=1)\n params = aug.get_params_dependent_on_targets({\"image\": image})\n mul = params[\"multiplier\"]\n assert mul.shape == image.shape\n result = aug.apply(image, mul)\n image = F.clip(image.astype(np.float32) * mul, image.dtype, F.MAX_VALUES_BY_DTYPE[image.dtype])\n assert np.allclose(image, result)\n\n\ndef test_mask_dropout():\n # In this case we have mask with all ones, so MaskDropout wipe entire mask and image\n img = np.random.randint(0, 256, [50, 10], np.uint8)\n mask = np.ones([50, 10], dtype=np.long)\n\n aug = A.MaskDropout(p=1)\n result = aug(image=img, mask=mask)\n assert np.all(result[\"image\"] == 0)\n assert np.all(result[\"mask\"] == 0)\n\n # In this case we have mask with zeros , so MaskDropout will make no changes\n img = np.random.randint(0, 256, [50, 10], np.uint8)\n mask = np.zeros([50, 10], dtype=np.long)\n\n aug = A.MaskDropout(p=1)\n result = aug(image=img, mask=mask)\n assert np.all(result[\"image\"] == img)\n assert np.all(result[\"mask\"] == 0)\n\n\[email protected](\n \"image\", [np.random.randint(0, 256, [256, 320, 3], np.uint8), np.random.random([256, 320, 3]).astype(np.float32)]\n)\ndef test_grid_dropout_mask(image):\n mask = np.ones([256, 320], dtype=np.uint8)\n aug = A.GridDropout(p=1, mask_fill_value=0)\n result = aug(image=image, mask=mask)\n # with mask on ones and fill_value = 0 the sum of pixels is smaller\n assert result[\"image\"].sum() < image.sum()\n assert result[\"image\"].shape == image.shape\n assert result[\"mask\"].sum() < mask.sum()\n assert result[\"mask\"].shape == mask.shape\n\n # with mask of zeros and fill_value = 0 mask should not change\n mask = np.zeros([256, 320], dtype=np.uint8)\n aug = A.GridDropout(p=1, mask_fill_value=0)\n result = aug(image=image, mask=mask)\n assert result[\"image\"].sum() < image.sum()\n assert np.all(result[\"mask\"] == 0)\n\n # with mask mask_fill_value=100, mask sum is larger\n mask = np.random.randint(0, 10, [256, 320], np.uint8)\n aug = A.GridDropout(p=1, mask_fill_value=100)\n result = aug(image=image, mask=mask)\n assert result[\"image\"].sum() < image.sum()\n assert result[\"mask\"].sum() > mask.sum()\n\n # with mask mask_fill_value=None, mask is not changed\n mask = np.ones([256, 320], dtype=np.uint8)\n aug = A.GridDropout(p=1, mask_fill_value=None)\n result = aug(image=image, mask=mask)\n assert result[\"image\"].sum() < image.sum()\n assert result[\"mask\"].sum() == mask.sum()\n\n\[email protected](\n [\"ratio\", \"holes_number_x\", \"holes_number_y\", \"unit_size_min\", \"unit_size_max\", \"shift_x\", \"shift_y\"],\n [\n (0.00001, 10, 10, 100, 100, 50, 50),\n (0.9, 100, None, 200, None, 0, 0),\n (0.4556, 10, 20, None, 200, 0, 0),\n (0.00004, None, None, 2, 100, None, None),\n ],\n)\ndef test_grid_dropout_params(ratio, holes_number_x, holes_number_y, unit_size_min, unit_size_max, shift_x, shift_y):\n img = np.random.randint(0, 256, [256, 320], np.uint8)\n\n aug = A.GridDropout(\n ratio=ratio,\n unit_size_min=unit_size_min,\n unit_size_max=unit_size_max,\n holes_number_x=holes_number_x,\n holes_number_y=holes_number_y,\n shift_x=shift_x,\n shift_y=shift_y,\n random_offset=False,\n fill_value=0,\n p=1,\n )\n result = aug(image=img)[\"image\"]\n # with fill_value = 0 the sum of pixels is smaller\n assert result.sum() < img.sum()\n assert result.shape == img.shape\n params = aug.get_params_dependent_on_targets({\"image\": img})\n holes = params[\"holes\"]\n assert len(holes[0]) == 4\n # check grid offsets\n if shift_x:\n assert holes[0][0] == shift_x\n else:\n assert holes[0][0] == 0\n if shift_y:\n assert holes[0][1] == shift_y\n else:\n assert holes[0][1] == 0\n # for grid set with limits\n if unit_size_min and unit_size_max:\n assert max(1, unit_size_min * ratio) <= (holes[0][2] - holes[0][0]) <= min(max(1, unit_size_max * ratio), 256)\n elif holes_number_x and holes_number_y:\n assert (holes[0][2] - holes[0][0]) == max(1, int(ratio * 320 // holes_number_x))\n assert (holes[0][3] - holes[0][1]) == max(1, int(ratio * 256 // holes_number_y))\n\n\ndef test_gauss_noise_incorrect_var_limit_type():\n with pytest.raises(TypeError) as exc_info:\n A.GaussNoise(var_limit={\"low\": 70, \"high\": 90})\n message = \"Expected var_limit type to be one of (int, float, tuple, list), got <class 'dict'>\"\n assert str(exc_info.value) == message\n\n\[email protected](\n [\"blur_limit\", \"sigma\", \"result_blur\", \"result_sigma\"],\n [\n [[0, 0], [1, 1], 0, 1],\n [[1, 1], [0, 0], 1, 0],\n [[1, 1], [1, 1], 1, 1],\n [[0, 0], [0, 0], 3, 0],\n [[0, 3], [0, 0], 3, 0],\n [[0, 3], [0.1, 0.1], 3, 0.1],\n ],\n)\ndef test_gaus_blur_limits(blur_limit, sigma, result_blur, result_sigma):\n img = np.zeros([100, 100, 3], dtype=np.uint8)\n\n aug = A.Compose([A.GaussianBlur(blur_limit=blur_limit, sigma_limit=sigma, p=1)])\n\n res = aug(image=img)[\"image\"]\n assert np.allclose(res, F.gaussian_blur(img, result_blur, result_sigma))\n\n\[email protected](\n [\"brightness\", \"contrast\", \"saturation\", \"hue\"],\n [\n [1, 1, 1, 0],\n [0.123, 1, 1, 0],\n [1.321, 1, 1, 0],\n [1, 0.234, 1, 0],\n [1, 1.432, 1, 0],\n [1, 1, 0.345, 0],\n [1, 1, 1.543, 0],\n ],\n)\ndef test_color_jitter(brightness, contrast, saturation, hue):\n np.random.seed(0)\n img = np.random.randint(0, 256, [100, 100, 3], dtype=np.uint8)\n pil_image = Image.fromarray(img)\n\n transform = A.Compose(\n [\n A.ColorJitter(\n brightness=[brightness, brightness],\n contrast=[contrast, contrast],\n saturation=[saturation, saturation],\n hue=[hue, hue],\n p=1,\n )\n ]\n )\n\n pil_transform = ColorJitter(\n brightness=[brightness, brightness],\n contrast=[contrast, contrast],\n saturation=[saturation, saturation],\n hue=[hue, hue],\n )\n\n res1 = transform(image=img)[\"image\"]\n res2 = np.array(pil_transform(pil_image))\n\n _max = np.abs(res1.astype(np.int16) - res2.astype(np.int16)).max()\n assert _max <= 2, \"Max: {}\".format(_max)\n\n\[email protected](\n [\"brightness\", \"contrast\", \"saturation\", \"hue\"],\n [\n [1, 1, 1, 0],\n [0.123, 1, 1, 0],\n [1.321, 1, 1, 0],\n [1, 0.234, 1, 0],\n [1, 1.432, 1, 0],\n [1, 1, 0.345, 0],\n [1, 1, 1.543, 0],\n [1, 1, 1, 0.456],\n [1, 1, 1, -0.432],\n ],\n)\ndef test_color_jitter_float_uint8_equal(brightness, contrast, saturation, hue):\n img = np.random.randint(0, 256, [100, 100, 3], dtype=np.uint8)\n\n transform = A.Compose(\n [\n A.ColorJitter(\n brightness=[brightness, brightness],\n contrast=[contrast, contrast],\n saturation=[saturation, saturation],\n hue=[hue, hue],\n p=1,\n )\n ]\n )\n\n res1 = transform(image=img)[\"image\"]\n res2 = (transform(image=img.astype(np.float32) / 255.0)[\"image\"] * 255).astype(np.uint8)\n\n _max = np.abs(res1.astype(np.int16) - res2.astype(np.int16)).max()\n\n if hue != 0:\n assert _max <= 10, \"Max: {}\".format(_max)\n else:\n assert _max <= 2, \"Max: {}\".format(_max)\n\n\[email protected]([\"hue\", \"sat\", \"val\"], [[13, 17, 23], [14, 18, 24], [131, 143, 151], [132, 144, 152]])\ndef test_hue_saturation_value_float_uint8_equal(hue, sat, val):\n img = np.random.randint(0, 256, [100, 100, 3], dtype=np.uint8)\n\n for i in range(2):\n sign = 1 if i == 0 else -1\n for i in range(4):\n if i == 0:\n _hue = hue * sign\n _sat = 0\n _val = 0\n elif i == 1:\n _hue = 0\n _sat = sat * sign\n _val = 0\n elif i == 2:\n _hue = 0\n _sat = 0\n _val = val * sign\n else:\n _hue = hue * sign\n _sat = sat * sign\n _val = val * sign\n\n t1 = A.Compose(\n [\n A.HueSaturationValue(\n hue_shift_limit=[_hue, _hue], sat_shift_limit=[_sat, _sat], val_shift_limit=[_val, _val], p=1\n )\n ]\n )\n t2 = A.Compose(\n [\n A.HueSaturationValue(\n hue_shift_limit=[_hue / 180 * 360, _hue / 180 * 360],\n sat_shift_limit=[_sat / 255, _sat / 255],\n val_shift_limit=[_val / 255, _val / 255],\n p=1,\n )\n ]\n )\n\n res1 = t1(image=img)[\"image\"]\n res2 = (t2(image=img.astype(np.float32) / 255.0)[\"image\"] * 255).astype(np.uint8)\n\n _max = np.abs(res1.astype(np.int) - res2).max()\n assert _max <= 10, \"Max value: {}\".format(_max)\n\n\ndef test_shift_scale_separate_shift_x_shift_y(image, mask):\n aug = A.ShiftScaleRotate(shift_limit=(0.3, 0.3), shift_limit_y=(0.4, 0.4), scale_limit=0, rotate_limit=0, p=1)\n data = aug(image=image, mask=mask)\n expected_image = FGeometric.shift_scale_rotate(\n image, angle=0, scale=1, dx=0.3, dy=0.4, interpolation=cv2.INTER_LINEAR, border_mode=cv2.BORDER_REFLECT_101\n )\n expected_mask = FGeometric.shift_scale_rotate(\n mask, angle=0, scale=1, dx=0.3, dy=0.4, interpolation=cv2.INTER_NEAREST, border_mode=cv2.BORDER_REFLECT_101\n )\n assert np.array_equal(data[\"image\"], expected_image)\n assert np.array_equal(data[\"mask\"], expected_mask)\n\n\[email protected]([\"val_uint8\"], [[0], [1], [128], [255]])\ndef test_glass_blur_float_uint8_diff_less_than_two(val_uint8):\n\n x_uint8 = np.zeros((5, 5)).astype(np.uint8)\n x_uint8[2, 2] = val_uint8\n\n x_float32 = np.zeros((5, 5)).astype(np.float32)\n x_float32[2, 2] = val_uint8 / 255.0\n\n glassblur = A.GlassBlur(always_apply=True, max_delta=1)\n\n np.random.seed(0)\n blur_uint8 = glassblur(image=x_uint8)[\"image\"]\n\n np.random.seed(0)\n blur_float32 = glassblur(image=x_float32)[\"image\"]\n\n # Before comparison, rescale the blur_float32 to [0, 255]\n diff = np.abs(blur_uint8 - blur_float32 * 255)\n\n # The difference between the results of float32 and uint8 will be at most 2.\n assert np.all(diff <= 2.0)\n\n\[email protected](\n [\"img_dtype\", \"px\", \"percent\", \"pad_mode\", \"pad_cval\", \"keep_size\"],\n [\n [np.uint8, 10, None, cv2.BORDER_CONSTANT, 0, True],\n [np.uint8, -10, None, cv2.BORDER_CONSTANT, 0, True],\n [np.uint8, 10, None, cv2.BORDER_CONSTANT, 0, False],\n [np.uint8, -10, None, cv2.BORDER_CONSTANT, 0, False],\n [np.uint8, None, 0.1, cv2.BORDER_CONSTANT, 0, True],\n [np.uint8, None, -0.1, cv2.BORDER_CONSTANT, 0, True],\n [np.uint8, None, 0.1, cv2.BORDER_CONSTANT, 0, False],\n [np.uint8, None, -0.1, cv2.BORDER_CONSTANT, 0, False],\n [np.float32, None, 0.1, cv2.BORDER_CONSTANT, 0, False],\n [np.float32, None, -0.1, cv2.BORDER_CONSTANT, 0, False],\n [np.uint8, None, 0.1, cv2.BORDER_WRAP, 0, False],\n [np.uint8, None, 0.1, cv2.BORDER_REPLICATE, 0, False],\n [np.uint8, None, 0.1, cv2.BORDER_REFLECT101, 0, False],\n ],\n)\ndef test_compare_crop_and_pad(img_dtype, px, percent, pad_mode, pad_cval, keep_size):\n h, w, c = 100, 100, 3\n mode_mapping = {\n cv2.BORDER_CONSTANT: \"constant\",\n cv2.BORDER_REPLICATE: \"edge\",\n cv2.BORDER_REFLECT101: \"reflect\",\n cv2.BORDER_WRAP: \"wrap\",\n }\n pad_mode_iaa = mode_mapping[pad_mode]\n\n bbox_params = A.BboxParams(format=\"pascal_voc\")\n keypoint_params = A.KeypointParams(format=\"xy\", remove_invisible=False)\n\n keypoints = np.random.randint(0, min(h, w), [10, 2])\n\n bboxes = []\n for i in range(10):\n x1, y1 = np.random.randint(0, min(h, w) - 2, 2)\n x2 = np.random.randint(x1 + 1, w - 1)\n y2 = np.random.randint(y1 + 1, h - 1)\n bboxes.append([x1, y1, x2, y2, 0])\n\n transform_albu = A.Compose(\n [\n A.CropAndPad(\n px=px,\n percent=percent,\n pad_mode=pad_mode,\n pad_cval=pad_cval,\n keep_size=keep_size,\n p=1,\n interpolation=cv2.INTER_AREA\n if (px is not None and px < 0) or (percent is not None and percent < 0)\n else cv2.INTER_LINEAR,\n )\n ],\n bbox_params=bbox_params,\n keypoint_params=keypoint_params,\n )\n transform_iaa = A.Compose(\n [A.IAACropAndPad(px=px, percent=percent, pad_mode=pad_mode_iaa, pad_cval=pad_cval, keep_size=keep_size, p=1)],\n bbox_params=bbox_params,\n keypoint_params=keypoint_params,\n )\n\n if img_dtype == np.uint8:\n img = np.random.randint(0, 256, (h, w, c), dtype=np.uint8)\n else:\n img = np.random.random((h, w, c)).astype(img_dtype)\n\n res_albu = transform_albu(image=img, keypoints=keypoints, bboxes=bboxes)\n res_iaa = transform_iaa(image=img, keypoints=keypoints, bboxes=bboxes)\n\n for key, item in res_albu.items():\n if key == \"bboxes\":\n bboxes = np.array(res_iaa[key])\n h = bboxes[:, 3] - bboxes[:, 1]\n w = bboxes[:, 2] - bboxes[:, 0]\n res_iaa[key] = bboxes[(h > 0) & (w > 0)]\n assert np.allclose(item, res_iaa[key]), f\"{key} are not equal\"\n\n\ndef test_perspective_keep_size():\n h, w = 100, 100\n img = np.zeros([h, w, 3], dtype=np.uint8)\n h, w = img.shape[:2]\n bboxes = []\n for _ in range(10):\n x1 = np.random.randint(0, w - 1)\n y1 = np.random.randint(0, h - 1)\n x2 = np.random.randint(x1 + 1, w)\n y2 = np.random.randint(y1 + 1, h)\n bboxes.append([x1, y1, x2, y2])\n keypoints = [(np.random.randint(0, w), np.random.randint(0, h), np.random.random()) for _ in range(10)]\n\n transform_1 = A.Compose(\n [A.Perspective(keep_size=True, p=1)],\n keypoint_params=A.KeypointParams(\"xys\"),\n bbox_params=A.BboxParams(\"pascal_voc\", label_fields=[\"labels\"]),\n )\n transform_2 = A.Compose(\n [A.Perspective(keep_size=False, p=1), A.Resize(h, w)],\n keypoint_params=A.KeypointParams(\"xys\"),\n bbox_params=A.BboxParams(\"pascal_voc\", label_fields=[\"labels\"]),\n )\n\n set_seed()\n res_1 = transform_1(image=img, bboxes=bboxes, keypoints=keypoints, labels=[0] * len(bboxes))\n set_seed()\n res_2 = transform_2(image=img, bboxes=bboxes, keypoints=keypoints, labels=[0] * len(bboxes))\n\n assert np.allclose(res_1[\"bboxes\"], res_2[\"bboxes\"])\n assert np.allclose(res_1[\"keypoints\"], res_2[\"keypoints\"])\n"
] |
[
[
"numpy.random.random",
"numpy.allclose",
"numpy.array_equal",
"numpy.random.seed",
"numpy.abs",
"numpy.unique",
"numpy.eye",
"numpy.arange",
"numpy.ones",
"numpy.all",
"numpy.testing.assert_almost_equal",
"numpy.testing.assert_array_equal",
"numpy.random.rand",
"numpy.array",
"numpy.zeros",
"numpy.random.RandomState",
"numpy.random.randint"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.