repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
GardnerOne/ml-agents
[ "4f3d2117cfc72999abeea99039145d0bee9c56cf" ]
[ "ml-agents/mlagents/trainers/policy/policy.py" ]
[ "from abc import abstractmethod\nfrom typing import Dict, List, Optional\nimport numpy as np\n\nfrom mlagents_envs.base_env import DecisionSteps\nfrom mlagents_envs.exception import UnityException\n\nfrom mlagents.model_serialization import SerializationSettings\nfrom mlagents.trainers.action_info import ActionInfo\nfrom mlagents_envs.base_env import BehaviorSpec\nfrom mlagents.trainers.settings import TrainerSettings, NetworkSettings\n\n\nclass UnityPolicyException(UnityException):\n \"\"\"\n Related to errors with the Trainer.\n \"\"\"\n\n pass\n\n\nclass Policy:\n def __init__(\n self,\n seed: int,\n behavior_spec: BehaviorSpec,\n trainer_settings: TrainerSettings,\n model_path: str,\n load: bool = False,\n tanh_squash: bool = False,\n reparameterize: bool = False,\n condition_sigma_on_obs: bool = True,\n ):\n self.behavior_spec = behavior_spec\n self.trainer_settings = trainer_settings\n self.network_settings: NetworkSettings = trainer_settings.network_settings\n self.seed = seed\n self.act_size = (\n list(behavior_spec.discrete_action_branches)\n if behavior_spec.is_action_discrete()\n else [behavior_spec.action_size]\n )\n self.vec_obs_size = sum(\n shape[0] for shape in behavior_spec.observation_shapes if len(shape) == 1\n )\n self.vis_obs_size = sum(\n 1 for shape in behavior_spec.observation_shapes if len(shape) == 3\n )\n self.model_path = model_path\n self.initialize_path = self.trainer_settings.init_path\n self._keep_checkpoints = self.trainer_settings.keep_checkpoints\n self.use_continuous_act = behavior_spec.is_action_continuous()\n self.num_branches = self.behavior_spec.action_size\n self.previous_action_dict: Dict[str, np.array] = {}\n self.memory_dict: Dict[str, np.ndarray] = {}\n self.normalize = trainer_settings.network_settings.normalize\n self.use_recurrent = self.network_settings.memory is not None\n self.load = load\n self.h_size = self.network_settings.hidden_units\n num_layers = self.network_settings.num_layers\n if num_layers < 1:\n num_layers = 1\n self.num_layers = num_layers\n\n self.vis_encode_type = self.network_settings.vis_encode_type\n self.tanh_squash = tanh_squash\n self.reparameterize = reparameterize\n self.condition_sigma_on_obs = condition_sigma_on_obs\n\n self.m_size = 0\n self.sequence_length = 1\n if self.network_settings.memory is not None:\n self.m_size = self.network_settings.memory.memory_size\n self.sequence_length = self.network_settings.memory.sequence_length\n\n # Non-exposed parameters; these aren't exposed because they don't have a\n # good explanation and usually shouldn't be touched.\n self.log_std_min = -20\n self.log_std_max = 2\n\n def make_empty_memory(self, num_agents):\n \"\"\"\n Creates empty memory for use with RNNs\n :param num_agents: Number of agents.\n :return: Numpy array of zeros.\n \"\"\"\n return np.zeros((num_agents, self.m_size), dtype=np.float32)\n\n def save_memories(\n self, agent_ids: List[str], memory_matrix: Optional[np.ndarray]\n ) -> None:\n if memory_matrix is None:\n return\n for index, agent_id in enumerate(agent_ids):\n self.memory_dict[agent_id] = memory_matrix[index, :]\n\n def retrieve_memories(self, agent_ids: List[str]) -> np.ndarray:\n memory_matrix = np.zeros((len(agent_ids), self.m_size), dtype=np.float32)\n for index, agent_id in enumerate(agent_ids):\n if agent_id in self.memory_dict:\n memory_matrix[index, :] = self.memory_dict[agent_id]\n return memory_matrix\n\n def remove_memories(self, agent_ids):\n for agent_id in agent_ids:\n if agent_id in self.memory_dict:\n self.memory_dict.pop(agent_id)\n\n def make_empty_previous_action(self, num_agents):\n \"\"\"\n Creates empty previous action for use with RNNs and discrete control\n :param num_agents: Number of agents.\n :return: Numpy array of zeros.\n \"\"\"\n return np.zeros((num_agents, self.num_branches), dtype=np.int)\n\n def save_previous_action(\n self, agent_ids: List[str], action_matrix: Optional[np.ndarray]\n ) -> None:\n if action_matrix is None:\n return\n for index, agent_id in enumerate(agent_ids):\n self.previous_action_dict[agent_id] = action_matrix[index, :]\n\n def retrieve_previous_action(self, agent_ids: List[str]) -> np.ndarray:\n action_matrix = np.zeros((len(agent_ids), self.num_branches), dtype=np.int)\n for index, agent_id in enumerate(agent_ids):\n if agent_id in self.previous_action_dict:\n action_matrix[index, :] = self.previous_action_dict[agent_id]\n return action_matrix\n\n def remove_previous_action(self, agent_ids):\n for agent_id in agent_ids:\n if agent_id in self.previous_action_dict:\n self.previous_action_dict.pop(agent_id)\n\n def get_action(\n self, decision_requests: DecisionSteps, worker_id: int = 0\n ) -> ActionInfo:\n raise NotImplementedError\n\n @abstractmethod\n def update_normalization(self, vector_obs: np.ndarray) -> None:\n pass\n\n @abstractmethod\n def increment_step(self, n_steps):\n pass\n\n @abstractmethod\n def get_current_step(self):\n pass\n\n @abstractmethod\n def checkpoint(self, checkpoint_path: str, settings: SerializationSettings) -> None:\n pass\n\n @abstractmethod\n def save(self, output_filepath: str, settings: SerializationSettings) -> None:\n pass\n\n @abstractmethod\n def load_weights(self, values: List[np.ndarray]) -> None:\n pass\n\n @abstractmethod\n def get_weights(self) -> List[np.ndarray]:\n return []\n\n @abstractmethod\n def init_load_weights(self) -> None:\n pass\n" ]
[ [ "numpy.zeros" ] ]
ConleyKong/conley_estimator
[ "1f71cdb07ff5a8b2ba85c70de43941b9224e4f13" ]
[ "utils/tf_metrics.py" ]
[ "#-*- coding:utf-8 -*-\n\"\"\"\n-----------------------------------\n Project Name: conley_estimator\n File Name : MnistEstData\n Author : Conley.K\n Create Date : 2020/5/28\n Description : 引用自https://github.com/guillaumegenthial/tf_metrics\n--------------------------------------------\n Change Activity:\n 2020/4/13 10:02 :\n\"\"\"\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.ops.metrics_impl import _streaming_confusion_matrix\n\n\ndef precision(labels, predictions, num_classes, pos_indices=None,\n weights=None, average='micro'):\n \"\"\"Multi-class precision metric for Tensorflow\n\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n\n Returns\n -------\n tuple of (scalar float Tensor, update_op)\n \"\"\"\n cm, op = _streaming_confusion_matrix(\n labels, predictions, num_classes, weights)\n pr, _, _ = metrics_from_confusion_matrix(\n cm, pos_indices, average=average)\n op, _, _ = metrics_from_confusion_matrix(\n op, pos_indices, average=average)\n return (pr, op)\n\n\ndef recall(labels, predictions, num_classes, pos_indices=None, weights=None,\n average='micro'):\n \"\"\"Multi-class recall metric for Tensorflow\n\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n\n Returns\n -------\n tuple of (scalar float Tensor, update_op)\n \"\"\"\n cm, op = _streaming_confusion_matrix(\n labels, predictions, num_classes, weights)\n _, re, _ = metrics_from_confusion_matrix(\n cm, pos_indices, average=average)\n _, op, _ = metrics_from_confusion_matrix(\n op, pos_indices, average=average)\n return (re, op)\n\n\ndef f1(labels, predictions, num_classes, pos_indices=None, weights=None,\n average='micro'):\n return fbeta(labels, predictions, num_classes, pos_indices, weights,\n average)\n\n\ndef fbeta(labels, predictions, num_classes, pos_indices=None, weights=None,\n average='micro', beta=1):\n \"\"\"Multi-class fbeta metric for Tensorflow\n\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n beta : int, optional\n Weight of precision in harmonic mean\n\n Returns\n -------\n tuple of (scalar float Tensor, update_op)\n \"\"\"\n cm, op = _streaming_confusion_matrix(\n labels, predictions, num_classes, weights)\n _, _, fbeta = metrics_from_confusion_matrix(\n cm, pos_indices, average=average, beta=beta)\n _, _, op = metrics_from_confusion_matrix(\n op, pos_indices, average=average, beta=beta)\n return (fbeta, op)\n\n\ndef safe_div(numerator, denominator):\n \"\"\"Safe division, return 0 if denominator is 0\"\"\"\n numerator, denominator = tf.to_float(numerator), tf.to_float(denominator)\n zeros = tf.zeros_like(numerator, dtype=numerator.dtype)\n denominator_is_zero = tf.equal(denominator, zeros)\n return tf.where(denominator_is_zero, zeros, numerator / denominator)\n\n\ndef pr_re_fbeta(cm, pos_indices, beta=1):\n \"\"\"Uses a confusion matrix to compute precision, recall and fbeta\"\"\"\n num_classes = cm.shape[0]\n neg_indices = [i for i in range(num_classes) if i not in pos_indices]\n cm_mask = np.ones([num_classes, num_classes])\n cm_mask[neg_indices, neg_indices] = 0\n diag_sum = tf.reduce_sum(tf.diag_part(cm * cm_mask))\n\n cm_mask = np.ones([num_classes, num_classes])\n cm_mask[:, neg_indices] = 0\n tot_pred = tf.reduce_sum(cm * cm_mask)\n\n cm_mask = np.ones([num_classes, num_classes])\n cm_mask[neg_indices, :] = 0\n tot_gold = tf.reduce_sum(cm * cm_mask)\n\n pr = safe_div(diag_sum, tot_pred)\n re = safe_div(diag_sum, tot_gold)\n fbeta = safe_div((1. + beta**2) * pr * re, beta**2 * pr + re)\n\n return pr, re, fbeta\n\n\ndef metrics_from_confusion_matrix(cm, pos_indices=None, average='micro',\n beta=1):\n \"\"\"Precision, Recall and F1 from the confusion matrix\n\n Parameters\n ----------\n cm : tf.Tensor of type tf.int32, of shape (num_classes, num_classes)\n The streaming confusion matrix.\n pos_indices : list of int, optional\n The indices of the positive classes\n beta : int, optional\n Weight of precision in harmonic mean\n average : str, optional\n 'micro', 'macro' or 'weighted'\n \"\"\"\n num_classes = cm.shape[0]\n if pos_indices is None:\n pos_indices = [i for i in range(num_classes)]\n\n if average == 'micro':\n return pr_re_fbeta(cm, pos_indices, beta)\n elif average in {'macro', 'weighted'}:\n precisions, recalls, fbetas, n_golds = [], [], [], []\n for idx in pos_indices:\n pr, re, fbeta = pr_re_fbeta(cm, [idx], beta)\n precisions.append(pr)\n recalls.append(re)\n fbetas.append(fbeta)\n cm_mask = np.zeros([num_classes, num_classes])\n cm_mask[idx, :] = 1\n n_golds.append(tf.to_float(tf.reduce_sum(cm * cm_mask)))\n\n if average == 'macro':\n pr = tf.reduce_mean(precisions)\n re = tf.reduce_mean(recalls)\n fbeta = tf.reduce_mean(fbetas)\n return pr, re, fbeta\n if average == 'weighted':\n n_gold = tf.reduce_sum(n_golds)\n pr_sum = sum(p * n for p, n in zip(precisions, n_golds))\n pr = safe_div(pr_sum, n_gold)\n re_sum = sum(r * n for r, n in zip(recalls, n_golds))\n re = safe_div(re_sum, n_gold)\n fbeta_sum = sum(f * n for f, n in zip(fbetas, n_golds))\n fbeta = safe_div(fbeta_sum, n_gold)\n return pr, re, fbeta\n\n else:\n raise NotImplementedError()\n" ]
[ [ "tensorflow.python.ops.metrics_impl._streaming_confusion_matrix", "numpy.zeros", "tensorflow.where", "tensorflow.diag_part", "numpy.ones", "tensorflow.equal", "tensorflow.zeros_like", "tensorflow.reduce_sum", "tensorflow.to_float", "tensorflow.reduce_mean" ] ]
lapaniku/cortex
[ "746be852caeff2ad80fcf45dcbaaf1899163ad2e" ]
[ "examples/pytorch/multi-model-text-analyzer/predictor.py" ]
[ "# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)\n\nimport torch\nfrom transformers import pipeline\nfrom starlette.responses import JSONResponse\n\n\nclass PythonPredictor:\n def __init__(self, config):\n device = 0 if torch.cuda.is_available() else -1\n print(f\"using device: {'cuda' if device == 0 else 'cpu'}\")\n\n self.analyzer = pipeline(task=\"sentiment-analysis\", device=device)\n self.summarizer = pipeline(task=\"summarization\", device=device)\n\n def predict(self, query_params, payload):\n model_name = query_params.get(\"model\")\n\n if model_name == \"sentiment\":\n return self.analyzer(payload[\"text\"])[0]\n elif model_name == \"summarizer\":\n summary = self.summarizer(payload[\"text\"])\n return summary[0][\"summary_text\"]\n else:\n return JSONResponse({\"error\": f\"unknown model: {model_name}\"}, status_code=400)\n" ]
[ [ "torch.cuda.is_available" ] ]
amrzv/google-research
[ "4e7e88504eff132be8adeaf426d8e13c432a0902" ]
[ "tf3d/utils/label_map_util.py" ]
[ "# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Label map utility functions.\"\"\"\nimport tensorflow as tf\nfrom google.protobuf import text_format\nfrom object_detection.protos import string_int_label_map_pb2\n\n_LABEL_OFFSET = 1\n\n\ndef _validate_label_map(label_map):\n \"\"\"Checks if a label map is valid.\n\n Args:\n label_map: StringIntLabelMap to validate.\n\n Raises:\n ValueError: if label map is invalid.\n \"\"\"\n for item in label_map.item:\n if item.id < 0:\n raise ValueError('Label map ids should be >= 0.')\n if (item.id == 0 and item.name != 'background' and\n item.display_name != 'background'):\n raise ValueError('Label map id 0 is reserved for the background label')\n\n\ndef load_labelmap(path):\n \"\"\"Loads label map proto.\n\n Args:\n path: path to StringIntLabelMap proto text file.\n Returns:\n a StringIntLabelMapProto\n \"\"\"\n with tf.io.gfile.GFile(path, 'r') as fid:\n label_map_string = fid.read()\n label_map = string_int_label_map_pb2.StringIntLabelMap()\n try:\n text_format.Merge(label_map_string, label_map)\n except text_format.ParseError:\n label_map.ParseFromString(label_map_string)\n _validate_label_map(label_map)\n return label_map\n" ]
[ [ "tensorflow.io.gfile.GFile" ] ]
johnswanson/pytorch-gpu-benchmark
[ "c4ed9cb025bcc798166756ad4823243cf622ccdc" ]
[ "benchmark_models.py" ]
[ "\"\"\"Compare speed of different models with batch size 16\"\"\"\nimport torch\nfrom torchvision.models import resnet, densenet, vgg, squeezenet,inception\nfrom torch.autograd import Variable\nfrom info_utils import print_info\nimport torch.nn as nn\nimport time\nimport pandas\nimport argparse\nimport os\nfrom plot import *\n\nprint_info()\n\nMODEL_LIST = {\n resnet: resnet.__all__[1:],\n densenet: densenet.__all__[1:],\n squeezenet: squeezenet.__all__[1:],\n vgg: vgg.__all__[5:]\n}\n\nprecision=[\"single\",\"half\",'double']\ndevice_name=torch.cuda.get_device_name(0)\ndevice_count=torch.cuda.device_count()\n\n# Training settings\nparser = argparse.ArgumentParser(description='PyTorch Benchmarking')\nparser.add_argument('--WARM_UP','-w', type=int,default=5, required=False, help=\"Num of warm up\")\nparser.add_argument('--NUM_TEST','-n', type=int,default=50,required=False, help=\"Num of Test\")\nparser.add_argument('--BATCH_SIZE','-b', type=int, default=20, required=False, help='Num of batch size')\nparser.add_argument('--NUM_CLASSES','-c', type=int, default=1000, required=False, help='Num of class')\nparser.add_argument('--NUM_GPU','-g', type=int, default=device_count, required=False, help='Num of class')\n\nargs = parser.parse_args()\ndevice_name+='_'+str(args.NUM_GPU)+'_gpus_'\nargs.BATCH_SIZE*=args.NUM_GPU\ntorch.backends.cudnn.benchmark = True\ndef train(type='single'):\n \"\"\"use fake image for training speed test\"\"\"\n img = Variable(torch.randn(args.BATCH_SIZE, 3, 224, 224)).cuda()\n target = Variable(torch.LongTensor(args.BATCH_SIZE).random_(args.NUM_CLASSES)).cuda()\n criterion = nn.CrossEntropyLoss()\n benchmark = {}\n for model_type in MODEL_LIST.keys():\n for model_name in MODEL_LIST[model_type]:\n model = getattr(model_type, model_name)(pretrained=False)\n if args.NUM_GPU > 1:\n model = nn.DataParallel(model)\n if type is 'double':\n model=model.double()\n img=img.double()\n elif type is 'single':\n model=model.float()\n img=img.float()\n elif type is 'half':\n model=model.half()\n img=img.half()\n model.cuda()\n model.train()\n durations = []\n print('Benchmarking Training '+type+' precision type %s' % (model_name))\n for step in range(args.WARM_UP + args.NUM_TEST):\n torch.cuda.synchronize()\n start = time.time()\n model.zero_grad()\n prediction = model.forward(img)\n loss = criterion(prediction, target)\n loss.backward()\n torch.cuda.synchronize()\n end = time.time()\n if step >= args.WARM_UP:\n durations.append((end - start)*1000)\n del model\n benchmark[model_name] = durations\n return benchmark\n\ndef inference(type='single'):\n benchmark = {}\n img = Variable(torch.randn(args.BATCH_SIZE, 3, 224, 224), requires_grad=True).cuda()\n print('Device: %s' % (device_name))\n with torch.no_grad():\n for model_type in MODEL_LIST.keys():\n for model_name in MODEL_LIST[model_type]:\n model = getattr(model_type, model_name)(pretrained=False)\n if args.NUM_GPU > 1:\n model = nn.DataParallel(model)\n if type is 'double':\n model=model.double()\n img=img.double()\n elif type is 'single':\n model=model.float()\n img=img.float()\n elif type is 'half':\n model=model.half()\n img=img.half()\n model.cuda()\n model.eval()\n durations = []\n print('Benchmarking Inference '+type+' precision type %s ' % (model_name))\n for step in range(args.WARM_UP + args.NUM_TEST):\n torch.cuda.synchronize()\n start = time.time()\n model.forward(img)\n torch.cuda.synchronize()\n end = time.time()\n if step >= args.WARM_UP:\n durations.append((end - start)*1000)\n del model\n benchmark[model_name] = durations\n return benchmark\n\n\n\nif __name__ == '__main__':\n os.makedirs('results', exist_ok=True)\n for i in precision:\n training_benchmark = pandas.DataFrame(train(i))\n training_benchmark.to_csv('results/'+device_name+\"_\"+i+'_model_training_benchmark.csv', index=False)\n inference_benchmark = pandas.DataFrame(inference(i))\n inference_benchmark.to_csv('results/'+device_name+\"_\"+i+'_model_inference_benchmark.csv', index=False)\n train=arr_train(device_name)\n inference=arr_inference(device_name)\n\n\n total_model(train,device_name)\n total_model(inference,device_name)\n" ]
[ [ "torch.cuda.synchronize", "torch.no_grad", "torch.cuda.get_device_name", "torch.nn.CrossEntropyLoss", "torch.cuda.device_count", "torch.LongTensor", "torch.randn", "torch.nn.DataParallel" ] ]
vstadnytskyi/caproto-sandbox
[ "712d44a15770b0a51503fba1068a68b3286d8ee5" ]
[ "caproto_sandbox/io_device_client_simple.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n\n\"\"\"\nfrom matplotlib import pyplot as plt\nfrom logging import debug,warn,info,error\nimport epics\nfrom time import time,sleep\nfrom _thread import start_new_thread\nfrom caproto.threading.client import Context\nfrom pdb import pm\nimport epics\nfrom numpy import nan, argmax\ndefault_prefix='io_device_single:'\nctx = Context()\nca_img,ca_t1,ca_t2 = ctx.get_pvs(default_prefix+'image',default_prefix+'t1',default_prefix+'t2')\n\n\nimage = epics.PV(pvname = default_prefix+'image', connection_timeout = 20, verbose = True, auto_monitor = False)\nt1 = epics.PV(pvname = default_prefix+'t1', verbose = False)\nt2 = epics.PV(pvname = default_prefix+'t2', verbose = False)\n\nlst = []\ndef pyepics_for_loop():\n print('img,img.shape,img.max(),img.mean(),t1.get(),t2.get()')\n for i in range(10):\n img = image.get(timeout = 20)\n if img.max()> 255:\n plt.plot(img)\n plt.show()\n print(img,img.shape,img.max(),img.mean(),t1.get(),t2.get())\n sleep(4.3)\n\ndef caproto_for_loop():\n print('img,img.shape,img.max(),img.mean(),ca_t2.read().data[0]')\n for i in range(4):\n img = ca_img.read().data\n print(img,img.shape,img.max(),img.mean(),ca_t1.read().data[0],ca_t2.read().data[0])\n sleep(1)\n#pyepics_for_loop()\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.plot" ] ]
richard-vock/multi2novel
[ "a81b2e20b6cbf9cc1d3ce139631d9608bbdaf714" ]
[ "dataset.py" ]
[ "from os.path import join\nimport numpy as np\nimport h5py\nimport torch\n\nseq_length = 4\n\nclass Dataset(object):\n def __init__(self, root, ids, n,\n max_examples=None, bound=10, cache=False):\n self.ids = list(ids)\n self.n = n\n self.bound = bound\n\n if max_examples is not None:\n self.ids = self.ids[:max_examples]\n\n self.data = h5py.File(join(root, 'data.hdf5'), 'r')\n\n self.cache = None\n if cache:\n self.cache = []\n for id in ids:\n self.cache.append(self.single_item(id))\n\n def __getitem__(self, idx):\n if self.cache is not None:\n return self.cache[idx]\n\n return self.single_item(self.ids[idx])\n\n def single_item(self, id):\n if isinstance(id, bytes):\n id = id.decode(\"utf-8\")\n\n image = self.data[id]['image'][()]/255.*2 - 1\n image = torch.Tensor(image.transpose(2, 0, 1)).unsqueeze(0)\n pose = torch.Tensor(self.data[id]['pose'][()]).unsqueeze(0)\n\n enough = False\n id_num = int(id[-seq_length:])\n while not enough:\n random_num = np.random.randint(-self.bound, self.bound)\n id_target = id[:-seq_length] + str(id_num + random_num).zfill(seq_length)\n\n if id_target in self.data:\n image_tmp = self.data[id_target]['image'][()]/255.*2 - 1\n image_tmp = torch.Tensor(image_tmp.transpose(2, 0, 1)).unsqueeze(0)\n pose_tmp = torch.Tensor(self.data[id_target]['pose'][()]).unsqueeze(0)\n image = torch.cat((image, image_tmp), dim=0)\n pose = torch.cat((pose, pose_tmp), dim=0)\n\n if pose.shape[0] == self.n + 1:\n enough = True\n\n return (image[1:], pose[1:]), (image[0], pose[0])\n\n def __len__(self):\n return len(self.ids)\n\ndef create_default_splits(n, root, bound=10, cache=False):\n ids_train = []\n ids_test = []\n with open(join(root, 'id_train.txt'), 'r') as fp:\n ids_train = [s.strip() for s in fp.readlines() if s]\n with open(join(root, 'id_test.txt'), 'r') as fp:\n ids_test = [s.strip() for s in fp.readlines() if s]\n\n dataset_train = Dataset(root, ids_train, n, bound=bound, cache=cache)\n dataset_test = Dataset(root, ids_test, n, bound=bound, cache=cache)\n\n return dataset_train, dataset_test\n" ]
[ [ "torch.Tensor", "torch.cat", "numpy.random.randint" ] ]
hsukyle/cactus-maml
[ "b9319fe3480955c17e376cecc667c38e365c510e" ]
[ "maml.py" ]
[ "\"\"\" Code for the MAML algorithm and network definitions. \"\"\"\nfrom __future__ import print_function\nimport numpy as np\nimport sys\nimport tensorflow as tf\ntry:\n import special_grads\nexcept KeyError as e:\n print('WARN: Cannot define MaxPoolGrad, likely already defined for this version of tensorflow: %s' % e,\n file=sys.stderr)\n\nfrom collections import OrderedDict\nfrom tensorflow.python.platform import flags\nfrom utils import xent, conv_block, normalize, bn_relu_conv_block\n\nFLAGS = flags.FLAGS\n\nclass MAML:\n def __init__(self, dim_input=1, dim_output_train=1, dim_output_val=1, test_num_updates=5):\n \"\"\" must call construct_model() after initializing MAML! \"\"\"\n self.dim_input = dim_input\n self.dim_output_train = dim_output_train\n self.dim_output_val = dim_output_val\n self.update_lr = FLAGS.update_lr\n self.meta_lr = tf.placeholder_with_default(FLAGS.meta_lr, ())\n self.classification = False\n self.test_num_updates = test_num_updates\n self.loss_func = xent\n self.classification = True\n if FLAGS.on_encodings:\n print('Meta-learning on encodings')\n self.dim_hidden = [FLAGS.num_filters] * FLAGS.num_hidden_layers\n print('hidden layers: {}'.format(self.dim_hidden))\n self.forward = self.forward_fc\n self.construct_weights = self.construct_fc_weights\n else:\n if FLAGS.conv:\n self.dim_hidden = FLAGS.num_filters\n if FLAGS.resnet:\n if FLAGS.input_type == 'images_84x84':\n self.forward = self.forward_resnet84\n self.construct_weights = self.construct_resnet_weights84\n assert FLAGS.num_parts_per_res_block == 2\n assert FLAGS.num_res_blocks == 4\n self.num_parts_per_res_block = FLAGS.num_parts_per_res_block\n self.blocks = ['input', 'maxpool', 'res0', 'maxpool', 'res1', 'maxpool', 'res2', 'maxpool', 'res3', 'output']\n elif FLAGS.input_type == 'images_224x224':\n self.forward = self.forward_resnet224\n self.construct_weights = self.construct_resnet_weights224\n assert FLAGS.num_parts_per_res_block == 2\n assert FLAGS.num_res_blocks == 4\n self.num_parts_per_res_block = FLAGS.num_parts_per_res_block\n self.blocks = ['input', 'maxpool', 'res0', 'maxpool', 'res1', 'maxpool', 'res2', 'maxpool', 'res3', 'output']\n else:\n raise ValueError\n else:\n self.forward = self.forward_conv\n self.construct_weights = self.construct_conv_weights\n else:\n self.dim_hidden = [1024, 512, 256, 128]\n print('hidden layers: {}'.format(self.dim_hidden))\n self.forward = self.forward_fc\n self.construct_weights = self.construct_fc_weights\n if FLAGS.dataset == 'mnist' or FLAGS.dataset == 'omniglot':\n self.channels = 1\n else:\n self.channels = 3\n self.img_size = int(np.sqrt(self.dim_input/self.channels))\n if FLAGS.dataset not in ['mnist', 'omniglot', 'miniimagenet', 'celeba', 'imagenet']:\n raise ValueError('Unrecognized data source.')\n\n # resnet things\n\n\n def construct_model(self, input_tensors=None, prefix='metatrain_'):\n # a: training data for inner gradient, b: test data for meta gradient\n if prefix == 'metatrain_':\n inner_update_batch_size = FLAGS.inner_update_batch_size_train\n else:\n inner_update_batch_size = FLAGS.inner_update_batch_size_val\n outer_update_batch_size = FLAGS.outer_update_batch_size\n if input_tensors is None:\n self.inputa = tf.placeholder(tf.float32)\n self.inputb = tf.placeholder(tf.float32)\n self.labela = tf.placeholder(tf.float32)\n self.labelb = tf.placeholder(tf.float32)\n else:\n self.inputa = input_tensors['inputa']\n self.inputb = input_tensors['inputb']\n self.labela = input_tensors['labela']\n self.labelb = input_tensors['labelb']\n if prefix == 'metaval_':\n self.mv_inputa = self.inputa\n self.mv_inputb = self.inputb\n self.mv_labela = self.labela\n self.mv_labelb = self.labelb\n\n with tf.variable_scope('model', reuse=None) as training_scope:\n if 'weights' in dir(self):\n training_scope.reuse_variables()\n weights = self.weights\n else:\n # Define the weights\n self.weights = weights = self.construct_weights()\n print(weights.keys())\n\n # outputbs[i] and lossesb[i] is the output and loss after i+1 gradient updates\n lossesa, outputas, lossesb, outputbs = [], [], [], []\n accuraciesa, accuraciesb = [], []\n num_updates = max(self.test_num_updates, FLAGS.num_updates)\n outputbs = [[]]*num_updates\n lossesb = [[]]*num_updates\n accuraciesb = [[]]*num_updates\n if FLAGS.from_scratch:\n train_accuracies = [[]]*num_updates\n\n def task_metalearn(inp, reuse=True):\n \"\"\" Perform gradient descent for one task in the meta-batch. \"\"\"\n inputa, inputb, labela, labelb = inp\n task_outputbs, task_lossesb = [], []\n\n if FLAGS.from_scratch:\n task_outputas = []\n\n if self.classification:\n task_accuraciesb = []\n if FLAGS.from_scratch:\n task_accuraciesa = []\n\n\n task_outputa = self.forward(inputa, weights, prefix, reuse=reuse) # only reuse on the first iter\n if FLAGS.from_scratch:\n task_outputas.append(task_outputa)\n task_lossa = self.loss_func(task_outputa, labela, inner_update_batch_size)\n\n grads = tf.gradients(task_lossa, list(weights.values()))\n if FLAGS.stop_grad:\n grads = [tf.stop_gradient(grad) for grad in grads]\n gradients = dict(zip(weights.keys(), grads))\n fast_weights = dict(zip(weights.keys(), [weights[key] - self.update_lr*gradients[key] for key in weights.keys()]))\n output = self.forward(inputb, fast_weights, prefix, reuse=True)\n task_outputbs.append(output)\n task_lossesb.append(self.loss_func(output, labelb, outer_update_batch_size))\n\n for j in range(num_updates - 1):\n outputa = self.forward(inputa, fast_weights, prefix, reuse=True)\n loss = self.loss_func(outputa, labela, inner_update_batch_size)\n if FLAGS.from_scratch:\n task_outputas.append(outputa)\n\n grads = tf.gradients(loss, list(fast_weights.values()))\n if FLAGS.stop_grad:\n grads = [tf.stop_gradient(grad) for grad in grads]\n gradients = dict(zip(fast_weights.keys(), grads))\n fast_weights = dict(zip(fast_weights.keys(), [fast_weights[key] - self.update_lr*gradients[key] for key in fast_weights.keys()]))\n output = self.forward(inputb, fast_weights, prefix, reuse=True)\n task_outputbs.append(output)\n task_lossesb.append(self.loss_func(output, labelb, outer_update_batch_size))\n\n task_output = [task_outputa, task_outputbs, task_lossa, task_lossesb]\n\n if self.classification:\n task_accuracya = tf.contrib.metrics.accuracy(tf.argmax(tf.nn.softmax(task_outputa), 1), tf.argmax(labela, 1))\n for j in range(num_updates):\n task_accuraciesb.append(tf.contrib.metrics.accuracy(tf.argmax(tf.nn.softmax(task_outputbs[j]), 1), tf.argmax(labelb, 1)))\n if FLAGS.from_scratch:\n task_accuraciesa.append(tf.contrib.metrics.accuracy(tf.argmax(tf.nn.softmax(task_outputas[j]), 1), tf.argmax(labela, 1)))\n task_output.extend([task_accuracya, task_accuraciesb])\n if FLAGS.from_scratch:\n task_output.extend([task_accuraciesa])\n\n return task_output\n\n if FLAGS.norm is not 'None':\n # to initialize the batch norm vars, might want to combine this, and not run idx 0 twice.\n unused = task_metalearn((self.inputa[0], self.inputb[0], self.labela[0], self.labelb[0]), False)\n\n out_dtype = [tf.float32, [tf.float32]*num_updates, tf.float32, [tf.float32]*num_updates]\n if self.classification:\n out_dtype.extend([tf.float32, [tf.float32]*num_updates])\n if FLAGS.from_scratch:\n out_dtype.extend([[tf.float32] * num_updates])\n result = tf.map_fn(task_metalearn, elems=(self.inputa, self.inputb, self.labela, self.labelb), dtype=out_dtype, parallel_iterations=FLAGS.meta_batch_size)\n if self.classification:\n if FLAGS.from_scratch:\n outputas, outputbs, lossesa, lossesb, accuraciesa, accuraciesb, train_accuracies = result\n else:\n outputas, outputbs, lossesa, lossesb, accuraciesa, accuraciesb = result\n else:\n outputas, outputbs, lossesa, lossesb = result\n\n ## Performance & Optimization\n if 'train' in prefix:\n self.total_loss1 = total_loss1 = tf.reduce_sum(lossesa) / tf.to_float(FLAGS.meta_batch_size)\n self.total_losses2 = total_losses2 = [tf.reduce_sum(lossesb[j]) / tf.to_float(FLAGS.meta_batch_size) for j in range(num_updates)]\n # after the map_fn\n self.outputas, self.outputbs = outputas, outputbs\n if self.classification:\n self.total_accuracy1 = total_accuracy1 = tf.reduce_sum(accuraciesa) / tf.to_float(FLAGS.meta_batch_size)\n self.total_accuracies2 = total_accuracies2 = [tf.reduce_sum(accuraciesb[j]) / tf.to_float(FLAGS.meta_batch_size) for j in range(num_updates)]\n self.pretrain_op = tf.train.AdamOptimizer(self.meta_lr).minimize(total_loss1)\n\n if FLAGS.metatrain_iterations > 0:\n optimizer = tf.train.AdamOptimizer(self.meta_lr)\n self.gvs = gvs = optimizer.compute_gradients(self.total_losses2[FLAGS.num_updates-1])\n if FLAGS.dataset == 'miniimagenet' or FLAGS.dataset == 'celeba' or FLAGS.dataset == 'imagenet':\n gvs = [(tf.clip_by_value(grad, -10, 10), var) for grad, var in gvs]\n self.metatrain_op = optimizer.apply_gradients(gvs)\n else:\n self.metaval_total_loss1 = total_loss1 = tf.reduce_sum(lossesa) / tf.to_float(FLAGS.meta_batch_size)\n self.metaval_total_losses2 = total_losses2 = [tf.reduce_sum(lossesb[j]) / tf.to_float(FLAGS.meta_batch_size) for j in range(num_updates)]\n if self.classification:\n self.metaval_total_accuracy1 = total_accuracy1 = tf.reduce_sum(accuraciesa) / tf.to_float(FLAGS.meta_batch_size)\n self.metaval_total_accuracies2 = total_accuracies2 =[tf.reduce_sum(accuraciesb[j]) / tf.to_float(FLAGS.meta_batch_size) for j in range(num_updates)]\n self.mv_outputbs = outputbs\n if FLAGS.from_scratch:\n self.metaval_train_accuracies = [tf.reduce_sum(train_accuracies[j]) / tf.to_float(FLAGS.meta_batch_size) for j in range(num_updates)]\n ## Summaries\n tf.summary.scalar(prefix+'Pre-update loss', total_loss1)\n if self.classification:\n tf.summary.scalar(prefix+'Pre-update accuracy', total_accuracy1)\n\n for j in range(num_updates):\n tf.summary.scalar(prefix+'Post-update loss, step ' + str(j+1), total_losses2[j])\n if self.classification:\n tf.summary.scalar(prefix+'Post-update accuracy, step ' + str(j+1), total_accuracies2[j])\n\n ### Network construction functions (fc networks and conv networks)\n def construct_fc_weights(self):\n weights = {}\n weights['w1'] = tf.Variable(tf.truncated_normal([self.dim_input, self.dim_hidden[0]], stddev=0.01))\n weights['b1'] = tf.Variable(tf.zeros([self.dim_hidden[0]]))\n for i in range(1,len(self.dim_hidden)):\n weights['w'+str(i+1)] = tf.Variable(tf.truncated_normal([self.dim_hidden[i-1], self.dim_hidden[i]], stddev=0.01))\n weights['b'+str(i+1)] = tf.Variable(tf.zeros([self.dim_hidden[i]]))\n weights['w'+str(len(self.dim_hidden)+1)] = tf.Variable(tf.truncated_normal([self.dim_hidden[-1], self.dim_output_train], stddev=0.01))\n weights['b'+str(len(self.dim_hidden)+1)] = tf.Variable(tf.zeros([self.dim_output_train]))\n return weights\n\n def forward_fc(self, inp, weights, prefix, reuse=False):\n hidden = normalize(tf.matmul(inp, weights['w1']) + weights['b1'], activation=tf.nn.relu, reuse=reuse, scope='0')\n for i in range(1,len(self.dim_hidden)):\n hidden = normalize(tf.matmul(hidden, weights['w'+str(i+1)]) + weights['b'+str(i+1)], activation=tf.nn.relu, reuse=reuse, scope=str(i+1))\n logits = tf.matmul(hidden, weights['w'+str(len(self.dim_hidden)+1)]) + weights['b'+str(len(self.dim_hidden)+1)]\n if 'val' in prefix:\n logits = tf.gather(logits, tf.range(self.dim_output_val), axis=1)\n return logits\n\n def construct_conv_weights(self):\n weights = {}\n\n dtype = tf.float32\n conv_initializer = tf.contrib.layers.xavier_initializer_conv2d(dtype=dtype)\n fc_initializer = tf.contrib.layers.xavier_initializer(dtype=dtype)\n k = 3\n\n channels = self.channels\n weights['conv1'] = tf.get_variable('conv1', [k, k, channels, self.dim_hidden], initializer=conv_initializer, dtype=dtype)\n weights['b1'] = tf.Variable(tf.zeros([self.dim_hidden]))\n weights['conv2'] = tf.get_variable('conv2', [k, k, self.dim_hidden, self.dim_hidden], initializer=conv_initializer, dtype=dtype)\n weights['b2'] = tf.Variable(tf.zeros([self.dim_hidden]))\n weights['conv3'] = tf.get_variable('conv3', [k, k, self.dim_hidden, self.dim_hidden], initializer=conv_initializer, dtype=dtype)\n weights['b3'] = tf.Variable(tf.zeros([self.dim_hidden]))\n weights['conv4'] = tf.get_variable('conv4', [k, k, self.dim_hidden, self.dim_hidden], initializer=conv_initializer, dtype=dtype)\n weights['b4'] = tf.Variable(tf.zeros([self.dim_hidden]))\n if FLAGS.dataset == 'miniimagenet' or FLAGS.dataset == 'celeba' or FLAGS.dataset == 'imagenet':\n # assumes max pooling\n weights['w5'] = tf.get_variable('w5', [self.dim_hidden*5*5, self.dim_output_train], initializer=fc_initializer)\n weights['b5'] = tf.Variable(tf.zeros([self.dim_output_train]), name='b5')\n else:\n weights['w5'] = tf.Variable(tf.random_normal([self.dim_hidden, self.dim_output_train]), name='w5')\n weights['b5'] = tf.Variable(tf.zeros([self.dim_output_train]), name='b5')\n\n return weights\n\n def forward_conv(self, inp, weights, prefix, reuse=False, scope=''):\n # reuse is for the normalization parameters.\n channels = self.channels\n inp = tf.reshape(inp, [-1, self.img_size, self.img_size, channels])\n\n hidden1 = conv_block(inp, weights['conv1'], weights['b1'], reuse, scope+'0')\n hidden2 = conv_block(hidden1, weights['conv2'], weights['b2'], reuse, scope+'1')\n hidden3 = conv_block(hidden2, weights['conv3'], weights['b3'], reuse, scope+'2')\n hidden4 = conv_block(hidden3, weights['conv4'], weights['b4'], reuse, scope+'3')\n if FLAGS.dataset == 'miniimagenet' or FLAGS.dataset == 'celeba' or FLAGS.dataset == 'imagenet':\n # last hidden layer is 6x6x64-ish, reshape to a vector\n hidden4 = tf.reshape(hidden4, [-1, np.prod([int(dim) for dim in hidden4.get_shape()[1:]])])\n else:\n hidden4 = tf.reduce_mean(hidden4, [1, 2])\n\n logits = tf.matmul(hidden4, weights['w5']) + weights['b5']\n\n if 'val' in prefix:\n logits = tf.gather(logits, tf.range(self.dim_output_val), axis=1)\n return logits\n\n def construct_resnet_weights224(self):\n weights = OrderedDict()\n dtype = tf.float32\n\n conv_initializer = tf.contrib.layers.xavier_initializer_conv2d(dtype=dtype)\n bias_initializer = tf.zeros_initializer(dtype=dtype)\n fc_initializer = tf.contrib.layers.xavier_initializer(dtype=dtype)\n def make_conv_layer_weights(weights, scope, k, filters_in, filters_out, bias=True):\n weights['{}/conv'.format(scope)] = tf.get_variable('{}/conv'.format(scope), [k, k, filters_in, filters_out], initializer=conv_initializer, dtype=dtype)\n if bias:\n weights['{}/bias'.format(scope)] = tf.get_variable('{}/bias'.format(scope), [filters_out], initializer=bias_initializer, dtype=dtype)\n def make_fc_layer_weights(weights, scope, dims_in, dims_out):\n weights['{}/fc'.format(scope)] = tf.get_variable('{}/fc'.format(scope), [dims_in, dims_out], initializer=fc_initializer, dtype=dtype)\n weights['{}/bias'.format(scope)] = tf.get_variable('{}/bias'.format(scope), [dims_out], initializer=bias_initializer, dtype=dtype)\n for block_name in self.blocks:\n if block_name == 'input':\n make_conv_layer_weights(weights, block_name, k=7, filters_in=self.channels, filters_out=64)\n elif 'res' in block_name:\n j = int(block_name[-1])\n last_block_filter = 64 if j == 0 else 64 * 2 ** (j-1)\n this_block_filter = 64 if j == 0 else last_block_filter * 2\n print(block_name, last_block_filter, this_block_filter)\n make_conv_layer_weights(weights, '{}/shortcut'.format(block_name), k=1, filters_in=last_block_filter,\n filters_out=this_block_filter, bias=False)\n for i in range(self.num_parts_per_res_block):\n make_conv_layer_weights(weights, '{}/part{}'.format(block_name, i), k=3,\n filters_in=last_block_filter if i == 0 else this_block_filter,\n filters_out=this_block_filter)\n elif block_name == 'output':\n make_fc_layer_weights(weights, block_name, dims_in=512, dims_out=self.dim_output_train)\n return weights\n\n def construct_resnet_weights84(self):\n weights = OrderedDict()\n dtype = tf.float32\n\n conv_initializer = tf.contrib.layers.xavier_initializer_conv2d(dtype=dtype)\n bias_initializer = tf.zeros_initializer(dtype=dtype)\n fc_initializer = tf.contrib.layers.xavier_initializer(dtype=dtype)\n def make_conv_layer_weights(weights, scope, k, filters_in, filters_out, bias=True):\n weights['{}/conv'.format(scope)] = tf.get_variable('{}/conv'.format(scope), [k, k, filters_in, filters_out], initializer=conv_initializer, dtype=dtype)\n if bias:\n weights['{}/bias'.format(scope)] = tf.get_variable('{}/bias'.format(scope), [filters_out], initializer=bias_initializer, dtype=dtype)\n def make_fc_layer_weights(weights, scope, dims_in, dims_out):\n weights['{}/fc'.format(scope)] = tf.get_variable('{}/fc'.format(scope), [dims_in, dims_out], initializer=fc_initializer, dtype=dtype)\n weights['{}/bias'.format(scope)] = tf.get_variable('{}/bias'.format(scope), [dims_out], initializer=bias_initializer, dtype=dtype)\n for block_name in self.blocks:\n if block_name == 'input':\n make_conv_layer_weights(weights, block_name, k=3, filters_in=self.channels, filters_out=64)\n elif 'res' in block_name:\n j = int(block_name[-1])\n last_block_filter = 64 if j == 0 else 64 * 2 ** (j - 1)\n this_block_filter = 64 if j == 0 else last_block_filter * 2\n print(block_name, last_block_filter, this_block_filter)\n make_conv_layer_weights(weights, '{}/shortcut'.format(block_name), k=1, filters_in=last_block_filter,\n filters_out=this_block_filter, bias=False)\n for i in range(self.num_parts_per_res_block):\n make_conv_layer_weights(weights, '{}/part{}'.format(block_name, i), k=3,\n filters_in=last_block_filter if i == 0 else this_block_filter,\n filters_out=this_block_filter)\n elif block_name == 'output':\n make_fc_layer_weights(weights, block_name, dims_in=512, dims_out=self.dim_output_train)\n return weights\n\n def forward_resnet224(self, inp, weights, prefix, reuse=False):\n\n inp = tf.reshape(inp, [-1, self.img_size, self.img_size, self.channels])\n\n for block_name in self.blocks:\n if block_name == 'input':\n conv = weights['{}/conv'.format(block_name)]\n bias = weights['{}/bias'.format(block_name)]\n inp = tf.nn.conv2d(inp, filter=conv, strides=[1, 2, 2, 1], padding=\"SAME\") + bias\n elif 'res' in block_name:\n shortcut = inp\n conv = weights['{}/shortcut/conv'.format(block_name)]\n shortcut = tf.nn.conv2d(input=shortcut, filter=conv, strides=[1, 1, 1, 1], padding=\"SAME\")\n for part in range(self.num_parts_per_res_block):\n part_name = 'part{}'.format(part)\n scope = '{}/{}'.format(block_name, part_name)\n conv = weights['{}/{}/conv'.format(block_name, part_name)]\n bias = weights['{}/{}/bias'.format(block_name, part_name)]\n inp = bn_relu_conv_block(inp=inp, conv=conv, bias=bias, reuse=reuse, scope=scope)\n inp = shortcut + inp\n elif 'maxpool' in block_name:\n inp = tf.nn.max_pool(inp, [1, 2, 2, 1], [1, 2, 2, 1], \"VALID\")\n elif 'output' in block_name:\n inp = tf.reduce_mean(inp, [1, 2])\n fc = weights['{}/fc'.format(block_name)]\n bias = weights['{}/bias'.format(block_name)]\n inp = tf.matmul(inp, fc) + bias\n if 'val' in prefix:\n inp = tf.gather(inp, tf.range(self.dim_output_val), axis=1)\n return inp\n\n def forward_resnet84(self, inp, weights, prefix, reuse=False):\n\n inp = tf.reshape(inp, [-1, self.img_size, self.img_size, self.channels])\n\n for block_name in self.blocks:\n if block_name == 'input':\n conv = weights['{}/conv'.format(block_name)]\n bias = weights['{}/bias'.format(block_name)]\n inp = tf.nn.conv2d(inp, filter=conv, strides=[1, 1, 1, 1], padding=\"SAME\") + bias\n elif 'res' in block_name:\n shortcut = inp\n conv = weights['{}/shortcut/conv'.format(block_name)]\n shortcut = tf.nn.conv2d(input=shortcut, filter=conv, strides=[1, 1, 1, 1], padding=\"SAME\")\n for part in range(self.num_parts_per_res_block):\n part_name = 'part{}'.format(part)\n scope = '{}/{}'.format(block_name, part_name)\n conv = weights['{}/{}/conv'.format(block_name, part_name)]\n bias = weights['{}/{}/bias'.format(block_name, part_name)]\n inp = bn_relu_conv_block(inp=inp, conv=conv, bias=bias, reuse=reuse, scope=scope)\n inp = shortcut + inp\n elif 'maxpool' in block_name:\n inp = tf.nn.max_pool(inp, [1, 2, 2, 1], [1, 2, 2, 1], \"VALID\")\n elif 'output' in block_name:\n inp = tf.reduce_mean(inp, [1, 2])\n fc = weights['{}/fc'.format(block_name)]\n bias = weights['{}/bias'.format(block_name)]\n inp = tf.matmul(inp, fc) + bias\n if 'val' in prefix:\n inp = tf.gather(inp, tf.range(self.dim_output_val), axis=1)\n return inp\n\n def wrap(self, inp, weights, prefix, reuse=False, scope=''):\n unused = self.forward_resnet(inp, weights, prefix, reuse=False)\n return self.forward_resnet(inp, weights, prefix, reuse=True)\n\n\nif __name__ == '__main__':\n import ipdb\n\n FLAGS = flags.FLAGS\n\n ## Dataset/method options\n flags.DEFINE_string('dataset', 'omniglot', 'omniglot or mnist or miniimagenet or celeba')\n flags.DEFINE_integer('num_encoding_dims', -1, 'of unsupervised representation learning method')\n flags.DEFINE_string('encoder', 'acai', 'acai or bigan or deepcluster or infogan')\n\n ## Training options\n flags.DEFINE_integer('metatrain_iterations', 30000, 'number of metatraining iterations.')\n flags.DEFINE_integer('meta_batch_size', 8, 'number of tasks sampled per meta-update')\n flags.DEFINE_float('meta_lr', 0.001, 'the base learning rate of the generator')\n flags.DEFINE_float('update_lr', 0.05, 'step size alpha for inner gradient update.')\n flags.DEFINE_integer('inner_update_batch_size_train', 1,\n 'number of examples used for inner gradient update (K for K-shot learning).')\n flags.DEFINE_integer('inner_update_batch_size_val', 5, 'above but for meta-val')\n flags.DEFINE_integer('outer_update_batch_size', 5, 'number of examples used for outer gradient update')\n flags.DEFINE_integer('num_updates', 5, 'number of inner gradient updates during training.')\n flags.DEFINE_string('mt_mode', 'gtgt', 'meta-training mode (for sampling, labeling): gtgt or encenc')\n flags.DEFINE_string('mv_mode', 'gtgt', 'meta-validation mode (for sampling, labeling): gtgt or encenc')\n flags.DEFINE_integer('num_classes_train', 5, 'number of classes used in classification for meta-training')\n flags.DEFINE_integer('num_classes_val', 5, 'number of classes used in classification for meta-validation.')\n flags.DEFINE_float('margin', 0.0, 'margin for generating partitions using random hyperplanes')\n flags.DEFINE_integer('num_partitions', 1, 'number of partitions, -1 for same as number of meta-training tasks')\n flags.DEFINE_string('partition_algorithm', 'kmeans', 'hyperplanes or kmeans')\n flags.DEFINE_integer('num_clusters', -1, 'number of clusters for kmeans')\n flags.DEFINE_boolean('scaled_encodings', True, 'if True, use randomly scaled encodings for kmeans')\n flags.DEFINE_boolean('on_encodings', False, 'if True, train MAML on top of encodings')\n flags.DEFINE_integer('num_hidden_layers', 2, 'number of mlp hidden layers')\n flags.DEFINE_integer('num_parallel_calls', 8, 'for loading data')\n flags.DEFINE_integer('gpu', 7, 'CUDA_VISIBLE_DEVICES=')\n\n ## Model options\n flags.DEFINE_string('norm', 'batch_norm', 'batch_norm, layer_norm, or None')\n flags.DEFINE_integer('num_filters', 32, 'number of filters for each conv layer')\n flags.DEFINE_bool('conv', True, 'whether or not to use a convolutional network')\n flags.DEFINE_bool('max_pool', False, 'Whether or not to use max pooling rather than strided convolutions')\n flags.DEFINE_bool('stop_grad', False, 'if True, do not use second derivatives in meta-optimization (for speed)')\n\n ## Logging, saving, and testing options\n flags.DEFINE_bool('log', True, 'if false, do not log summaries, for debugging code.')\n flags.DEFINE_string('logdir', './log', 'directory for summaries and checkpoints.')\n flags.DEFINE_bool('resume', True, 'resume training if there is a model available')\n flags.DEFINE_bool('train', True, 'True to train, False to test.')\n flags.DEFINE_integer('test_iter', -1, 'iteration to load model (-1 for latest model)')\n flags.DEFINE_bool('test_set', False, 'Set to true to test on the the test set, False for the validation set.')\n flags.DEFINE_integer('log_inner_update_batch_size_val', -1,\n 'specify log directory iubsv. (use to test with different iubsv)')\n flags.DEFINE_float('train_update_lr', -1,\n 'value of inner gradient step step during training. (use if you want to test with a different value)')\n flags.DEFINE_bool('save_checkpoints', False, 'if True, save model weights as checkpoints')\n flags.DEFINE_bool('debug', False, 'if True, use tf debugger')\n flags.DEFINE_string('suffix', '', 'suffix for an exp_string')\n flags.DEFINE_bool('from_scratch', False, 'fast-adapt from scratch')\n flags.DEFINE_integer('num_eval_tasks', 1000, 'number of tasks to meta-test on')\n\n # Imagenet\n flags.DEFINE_string('input_type', 'images_84x84',\n 'features or features_processed or images_fullsize or images_84x84')\n flags.DEFINE_string('data_dir', '/data3/kylehsu/data', 'location of data')\n flags.DEFINE_bool('resnet', False, 'use resnet architecture')\n flags.DEFINE_integer('num_res_blocks', 5, 'number of resnet blocks')\n flags.DEFINE_integer('num_parts_per_res_block', 2, 'number of bn-relu-conv parts in a res block')\n\n FLAGS.resnet = True\n\n maml = MAML(dim_input=3*84*84, dim_output_train=10, dim_output_val=5, test_num_updates=5)\n maml.channels = 3\n maml.img_size = 84\n weights = maml.construct_resnet_weights()\n input_ph = tf.placeholder(tf.float32)\n unused = maml.forward_resnet(input_ph, weights, 'hi', reuse=False)\n\n\n\n\n sess = tf.InteractiveSession()\n sess.run(tf.global_variables_initializer())\n input = np.ones((1, 84 * 84 * 3), dtype=np.float32)\n\n y = sess.run(maml.forward_resnet(input_ph, weights, 'val', reuse=True), {input_ph: input})\n\n\n ipdb.set_trace()\n x=1\n\n\n\n\n\n" ]
[ [ "tensorflow.nn.conv2d", "tensorflow.contrib.layers.xavier_initializer", "tensorflow.matmul", "tensorflow.reshape", "tensorflow.python.platform.flags.DEFINE_float", "tensorflow.python.platform.flags.DEFINE_string", "tensorflow.clip_by_value", "tensorflow.to_float", "tensorflow.nn.softmax", "tensorflow.global_variables_initializer", "tensorflow.random_normal", "tensorflow.InteractiveSession", "tensorflow.python.platform.flags.DEFINE_integer", "tensorflow.python.platform.flags.DEFINE_boolean", "tensorflow.argmax", "tensorflow.python.platform.flags.DEFINE_bool", "tensorflow.variable_scope", "tensorflow.contrib.layers.xavier_initializer_conv2d", "numpy.sqrt", "tensorflow.nn.max_pool", "tensorflow.zeros", "tensorflow.train.AdamOptimizer", "tensorflow.range", "tensorflow.summary.scalar", "tensorflow.map_fn", "tensorflow.truncated_normal", "tensorflow.placeholder", "tensorflow.get_variable", "tensorflow.reduce_sum", "tensorflow.placeholder_with_default", "tensorflow.zeros_initializer", "numpy.ones", "tensorflow.reduce_mean", "tensorflow.stop_gradient" ] ]
nodonoughue/emitter-detection-python
[ "ebff19acebcc1edfd941280e05f8ddf2ff20c974" ]
[ "make_figures/chapter5.py" ]
[ "\"\"\"\nDraw Figures - Chapter 5\n\nThis script generates all of the figures that appear in Chapter 5 of the textbook.\n\nPorted from MATLAB Code\n\nNicholas O'Donoughue\n25 March 2021\n\"\"\"\n\nimport utils\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nfrom examples import chapter5\n\n\ndef make_all_figures(close_figs=False):\n \"\"\"\n Call all the figure generators for this chapter\n\n :close_figs: Boolean flag. If true, will close all figures after generating them; for batch scripting.\n Default=False\n :return: List of figure handles\n \"\"\"\n\n # Initializes colorSet - Mx3 RGB vector for successive plot lines\n colors = plt.get_cmap(\"tab10\")\n\n # Reset the random number generator, to ensure reproducability\n rng = np.random.default_rng(0)\n\n # Find the output directory\n prefix = utils.init_output_dir('chapter5')\n\n # Activate seaborn for prettier plots\n sns.set()\n\n # Generate all figures\n fig4 = make_figure_4(prefix, rng, colors)\n fig6 = make_figure_6(prefix, rng, colors)\n fig7 = make_figure_7(prefix, rng, colors)\n\n figs = [fig4, fig6, fig7]\n\n if close_figs:\n for fig in figs:\n plt.close(fig)\n\n return None\n else:\n plt.show()\n\n return figs\n\n\ndef make_figure_4(prefix=None, rng=None, colors=None):\n \"\"\"\n Figure 4 - Example 5.1 - Superhet Performance\n\n Ported from MATLAB Code\n\n Nicholas O'Donoughue\n 25 March 2021\n\n :param prefix: output directory to place generated figure\n :param rng: random number generator\n :param colors: colormap for plots\n :return: figure handle\n \"\"\"\n\n if rng is None:\n rng = np.random.default_rng(0)\n \n if colors is None:\n colors = plt.get_cmap('tab10')\n\n fig4 = chapter5.example1()\n\n # Save figure\n if prefix is not None:\n plt.savefig(prefix + 'fig4.svg')\n plt.savefig(prefix + 'fig4.png')\n\n return fig4\n\n\ndef make_figure_6(prefix=None, rng=None, colors=None):\n \"\"\"\n Figure 6 - Example 5.2 - FMCW Radar\n\n Ported from MATLAB Code\n\n Nicholas O'Donoughue\n 25 March 2021\n\n :param prefix: output directory to place generated figure\n :param rng: random number generator\n :param colors: colormap for plots\n :return: figure handle\n \"\"\"\n\n if rng is None:\n rng = np.random.default_rng(0)\n\n if colors is None:\n colors = plt.get_cmap('tab10')\n\n fig6 = chapter5.example2()\n\n # Save figure\n if prefix is not None:\n plt.savefig(prefix + 'fig6.svg')\n plt.savefig(prefix + 'fig6.png')\n\n return fig6\n\n\ndef make_figure_7(prefix=None, rng=None, colors=None):\n \"\"\"\n Figure 7 - Example 5.3 - Pulsed Radar\n\n Ported from MATLAB Code\n\n Nicholas O'Donoughue\n 25 March 2021\n\n :param prefix: output directory to place generated figure\n :param rng: random number generator\n :param colors: colormap for plots\n :return: figure handle\n \"\"\"\n\n if rng is None:\n rng = np.random.default_rng(0)\n\n if colors is None:\n colors = plt.get_cmap('tab10')\n\n fig7 = chapter5.example3()\n\n # Save figure\n if prefix is not None:\n plt.savefig(prefix + 'fig7.svg')\n plt.savefig(prefix + 'fig7.png')\n\n return fig7\n" ]
[ [ "matplotlib.pyplot.savefig", "matplotlib.pyplot.get_cmap", "numpy.random.default_rng", "matplotlib.pyplot.close", "matplotlib.pyplot.show" ] ]
tanzhenyu/keras-cv
[ "b7208ee25735c492ccc171874e34076111dcf637" ]
[ "kerascv/data/voc_segmentation.py" ]
[ "import os\nimport random\nimport numpy as np\nimport tensorflow as tf\nfrom PIL import Image, ImageOps, ImageFilter\n\n\ndef voc_segmentation_dataset_from_directory(\n directory=None,\n base_size=520,\n crop_size=480,\n batch_size=20,\n split=\"train\",\n shuffle=True,\n seed=None,\n preprocess_input=tf.keras.applications.vgg16.preprocess_input,\n n_classes=21,\n):\n directory = directory or os.path.expanduser('~/VOCdevkit/VOC2012')\n if not os.path.isdir(directory):\n raise ValueError(\"Directory Not Found {}\".format(directory))\n mask_dir = os.path.join(directory, \"SegmentationClass\")\n image_dir = os.path.join(directory, \"JPEGImages\")\n splits_dir = os.path.join(directory, \"ImageSets/Segmentation\")\n if split == \"train\":\n splits_dir = os.path.join(splits_dir, \"trainval.txt\")\n elif split == \"val\":\n splits_dir = os.path.join(splits_dir, \"val.txt\")\n elif split == \"test\":\n splits_dir = os.path.join(splits_dir, \"test.txt\")\n else:\n raise ValueError(\"Unknown split {}\".format(split))\n\n random.seed(seed)\n\n def file_generator():\n with tf.io.gfile.GFile(splits_dir, mode=\"r\") as f:\n lines = f.readlines()\n for line in lines:\n image_file = os.path.join(image_dir, line.rstrip(\"\\n\") + \".jpg\")\n mask_file = os.path.join(mask_dir, line.rstrip(\"\\n\") + \".png\")\n img_pil = Image.open(image_file)\n mask_pil = Image.open(mask_file)\n if random.random() < 0.5:\n img_pil = img_pil.transpose(Image.FLIP_LEFT_RIGHT)\n mask_pil = mask_pil.transpose(Image.FLIP_LEFT_RIGHT)\n long_size = random.randint(int(base_size * 0.5), int(base_size * 2.0))\n w, h = img_pil.size\n if h > w:\n oh = long_size\n ow = int(1.0 * w * long_size / h + 0.5)\n short_size = ow\n else:\n ow = long_size\n oh = int(1.0 * h * long_size / w + 0.5)\n short_size = oh\n img_pil = img_pil.resize((ow, oh), Image.BILINEAR)\n mask_pil = mask_pil.resize((ow, oh), Image.NEAREST)\n # pad crop\n if short_size < crop_size:\n padh = crop_size - oh if oh < crop_size else 0\n padw = crop_size - ow if ow < crop_size else 0\n img_pil = ImageOps.expand(img_pil, border=(0, 0, padw, padh), fill=0)\n mask_pil = ImageOps.expand(mask_pil, border=(0, 0, padw, padh), fill=255)\n # random crop crop_size\n w, h = img_pil.size\n x1 = random.randint(0, w - crop_size)\n y1 = random.randint(0, h - crop_size)\n img_pil = img_pil.crop((x1, y1, x1 + crop_size, y1 + crop_size))\n mask_pil = mask_pil.crop((x1, y1, x1 + crop_size, y1 + crop_size))\n # gaussian blur as in PSP\n if random.random() < 0.5:\n img_pil = img_pil.filter(ImageFilter.GaussianBlur(\n radius=random.random()))\n # preprocess image before returning\n img = np.array(img_pil)\n if preprocess_input is not None:\n img = preprocess_input(img)\n mask = np.array(mask_pil)\n sample_weights = np.ones_like(mask, dtype=np.float)\n ignore_mask_indices = (mask == 255)\n sample_weights[ignore_mask_indices] = 0.\n mask[ignore_mask_indices] = 0\n # Automatically convert palette mode to grayscale with class index.\n yield img, mask, sample_weights\n\n img_ds = tf.data.Dataset.from_generator(file_generator, (tf.float32, tf.uint8, tf.float32))\n\n def set_shape_fn(img, mask, sample_weights):\n img.set_shape([crop_size, crop_size, 3])\n mask.set_shape([crop_size, crop_size])\n # mask_one_hot = tf.one_hot(mask, depth=n_classes)\n # mask_one_hot.set_shape([crop_size, crop_size, n_classes])\n sample_weights.set_shape([crop_size, crop_size])\n # return img, mask_one_hot, sample_weights\n return img, mask, sample_weights\n\n if shuffle:\n img_ds = img_ds.shuffle(buffer_size=8 * batch_size, seed=seed)\n img_ds = img_ds.map(set_shape_fn)\n img_ds = img_ds.batch(batch_size)\n return img_ds\n" ]
[ [ "tensorflow.data.Dataset.from_generator", "numpy.array", "tensorflow.io.gfile.GFile", "numpy.ones_like" ] ]
bmelaiths/meta-dataset
[ "10088555c400768d895f97ae004738196e38a237", "10088555c400768d895f97ae004738196e38a237" ]
[ "meta_dataset/learners/experimental/optimization_learners.py", "meta_dataset/learners/experimental/base.py" ]
[ "# coding=utf-8\n# Copyright 2020 The Meta-Dataset Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Optimization-based learners.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nfrom __future__ import print_function\n\nimport collections\nimport itertools\nimport gin.tf\n\nfrom meta_dataset.learners.experimental import base as learner_base\nfrom meta_dataset.models.experimental import reparameterizable_backbones\nfrom meta_dataset.models.experimental import reparameterizable_base\nfrom meta_dataset.models.experimental import reparameterizable_distributions\nfrom six.moves import zip\nimport tensorflow as tf\n\n\[email protected]\ndef sgd(learning_rate):\n \"\"\"Construct optimizer triple for stochastic gradient descent (SGD).\n\n Inspired by the optimizer definitions in JAX\n (https://github.com/google/jax/blob/master/jax/experimental/optimizers.py),\n this implementation of SGD is fully functional (i.e., it maintains no hidden\n state) and so is compatible for use with an optimization-based meta-learner.\n\n Args:\n learning_rate: A positive scalar.\n\n Returns:\n An (init, update, get_params) function triple.\n \"\"\"\n\n def init(x0):\n return x0\n\n def update(i, grad, state):\n del i\n x = state\n return x - learning_rate * grad\n\n def get_params(state):\n x = state\n return x\n\n return init, update, get_params\n\n\[email protected]\ndef adam(learning_rate, b1=0.9, b2=0.999, eps=1e-8):\n \"\"\"Construct optimizer triple for Adam.\n\n Inspired by the optimizer definitions in JAX\n (https://github.com/google/jax/blob/master/jax/experimental/optimizers.py),\n this implementation of Adam is fully functional (i.e., it maintains no hidden\n state) and so is compatible for use with an optimization-based meta-learner.\n\n Args:\n learning_rate: A positive scalar.\n b1: optional, a positive scalar value for beta_1, the exponential decay rate\n for the first moment estimates (default 0.9).\n b2: optional, a positive scalar value for beta_2, the exponential decay rate\n for the second moment estimates (default 0.999).\n eps: optional, a positive scalar value for epsilon, a small constant for\n numerical stability (default 1e-8).\n\n Returns:\n An (init, update, get_params) function triple.\n \"\"\"\n\n def init(x0):\n m0 = tf.zeros_like(x0)\n v0 = tf.zeros_like(x0)\n return x0, m0, v0\n\n def update(i, grad, state):\n i = tf.cast(i, dtype=tf.float32)\n x, m, v = state\n m = (1. - b1) * grad + b1 * m # First moment estimate.\n v = (1. - b2) * (grad**2.) + b2 * v # Second moment estimate.\n mhat = m / (1. - b1**(i + 1.)) # Bias correction.\n vhat = v / (1. - b2**(i + 1.))\n x = x - learning_rate * mhat / (tf.sqrt(vhat) + eps)\n return x, m, v\n\n def get_params(state):\n x, _, _ = state\n return x\n\n return init, update, get_params\n\n\ndef optimizer_update(iterate_collection, iteration_idx, objective_fn, update_fn,\n get_params_fn, first_order, clip_grad_norm):\n \"\"\"Returns the next iterate in the optimization of objective_fn wrt variables.\n\n Args:\n iterate_collection: A (potentially structured) container of tf.Tensors\n corresponding to the state of the current iterate.\n iteration_idx: An int Tensor; the iteration number.\n objective_fn: Callable that takes in variables and produces the value of the\n objective function.\n update_fn: Callable that takes in the gradient of the objective function and\n the current iterate and produces the next iterate.\n get_params_fn: Callable that takes in the gradient of the objective function\n and the current iterate and produces the next iterate.\n first_order: If True, prevent the computation of higher order gradients.\n clip_grad_norm: If not None, gradient dimensions are independently clipped\n to lie in the interval [-clip_grad_norm, clip_grad_norm].\n \"\"\"\n variables = [get_params_fn(iterate) for iterate in iterate_collection]\n\n if tf.executing_eagerly():\n with tf.GradientTape(persistent=True) as g:\n g.watch(variables)\n loss = objective_fn(variables, iteration_idx)\n grads = g.gradient(loss, variables)\n else:\n loss = objective_fn(variables, iteration_idx)\n grads = tf.gradients(ys=loss, xs=variables)\n\n if clip_grad_norm:\n grads = [\n tf.clip_by_value(grad, -1 * clip_grad_norm, clip_grad_norm)\n for grad in grads\n ]\n\n if first_order:\n grads = [tf.stop_gradient(dv) for dv in grads]\n\n return [\n update_fn(i=iteration_idx, grad=dv, state=s)\n for (s, dv) in zip(iterate_collection, grads)\n ]\n\n\ndef em_loop(\n num_updates,\n e_step,\n m_step,\n variables,\n):\n \"\"\"Expectation-maximization of objective_fn wrt variables for num_updates.\"\"\"\n\n def _body(step, preupdate_vars):\n train_predictions_, responsibilities_ = e_step(preupdate_vars)\n updated_vars = m_step(preupdate_vars, train_predictions_, responsibilities_)\n return step + 1, updated_vars\n\n def _cond(step, *args):\n del args\n return step < num_updates\n\n step = tf.Variable(0, trainable=False, name='inner_step_counter')\n loop_vars = (step, variables)\n step, updated_vars = tf.while_loop(\n cond=_cond, body=_body, loop_vars=loop_vars, swap_memory=True)\n\n return updated_vars\n\n\[email protected]\ndef optimizer_loop(\n num_updates,\n objective_fn,\n update_fn,\n variables,\n first_order,\n clip_grad_norm,\n):\n \"\"\"Optimization of `objective_fn` for `num_updates` of `variables`.\"\"\"\n\n # Optimizer specifics.\n init, update, get_params = update_fn()\n\n def _body(step, preupdate_vars):\n \"\"\"Optimization loop body.\"\"\"\n updated_vars = optimizer_update(\n iterate_collection=preupdate_vars,\n iteration_idx=step,\n objective_fn=objective_fn,\n update_fn=update,\n get_params_fn=get_params,\n first_order=first_order,\n clip_grad_norm=clip_grad_norm,\n )\n\n return step + 1, updated_vars\n\n def _cond(step, *args):\n \"\"\"Optimization truncation condition.\"\"\"\n del args\n return step < num_updates\n\n step = tf.Variable(0, trainable=False, name='inner_step_counter')\n loop_vars = (step, [init(var) for var in variables])\n step, updated_vars = tf.while_loop(\n cond=_cond, body=_body, loop_vars=loop_vars, swap_memory=True)\n\n return [get_params(v) for v in updated_vars]\n\n\nForwardPass = collections.namedtuple('ForwardPass', (\n 'embeddings',\n 'predictions',\n 'inner_objective_value',\n 'outer_objective_value',\n 'accuracy',\n))\n\nAdaptation = collections.namedtuple('Adaptation', (\n 'pre_adaptation_support_results',\n 'post_adaptation_support_results',\n 'pre_adaptation_query_results',\n 'post_adaptation_query_results',\n 'objective_fn',\n 'support_module_objective_fn',\n 'query_module_objective_fn',\n 'forward_pass_fn',\n 'init_loop_variables_mapping',\n 'final_loop_variables_mapping',\n))\n\n\[email protected]\nclass ExperimentalOptimizationLearner(learner_base.ExperimentalEpisodicLearner):\n \"\"\"An optimization-based learner.\"\"\"\n\n def __init__(self, adapt_embedding_predicate, num_update_steps,\n additional_evaluation_update_steps, first_order,\n adapt_batch_norm, clip_grad_norm, update_fn, **kwargs):\n \"\"\"Initializes a `ExperimentalOptimizationLearner` instance.\n\n Args:\n adapt_embedding_predicate: A callable that returns True for `tf.Variable`\n attributes of the embedding function should be adapted for each task.\n num_update_steps: The number of inner loop optimization steps to take.\n additional_evaluation_update_steps: The number of additional inner loop\n optimization steps to take during evaluation (on the meta-test and\n meta-validation sets).\n first_order: If True, prevent the computation of higher order gradients.\n adapt_batch_norm: If True, adapt the scale and offset parameteres of batch\n normalization layers in the inner loop of optimization.\n clip_grad_norm: If not None, gradient dimensions are independently clipped\n to lie in the interval [-clip_grad_norm, clip_grad_norm] before being\n processed by the `update_fn`.\n update_fn: A Callable that takes in a learning rate and produces a\n function triple defining an iterative optimization process; see `sgd`\n and `adam` for examples.\n **kwargs: Keyword arguments common to all `ExperimentalEpisodicLearner`s.\n \"\"\"\n self.adapt_embedding_predicate = adapt_embedding_predicate\n self.num_update_steps = num_update_steps\n self.additional_evaluation_update_steps = additional_evaluation_update_steps\n self.adapt_batch_norm = adapt_batch_norm\n self.first_order = first_order\n self.clip_grad_norm = clip_grad_norm\n self.update_fn = update_fn\n super(ExperimentalOptimizationLearner, self).__init__(**kwargs)\n assert isinstance(self.embedding_fn,\n reparameterizable_base.ReparameterizableModule)\n\n def compute_loss(self, onehot_labels, predictions):\n \"\"\"Computes the loss on the query set of a given episode.\"\"\"\n return (self.outer_objective(\n onehot_labels=onehot_labels, predictions=predictions))\n\n @property\n def trainable_variables(self):\n \"\"\"Returns a tuple of variables to update in the outer optimization loop.\"\"\"\n raise NotImplementedError\n\n @property\n def task_parameters(self):\n \"\"\"Returns a tuple of variables to update in the inner optimization loop.\"\"\"\n raise NotImplementedError\n\n def episodic_init_ops(self, labels, embeddings, task_parameters):\n raise NotImplementedError\n\n def inner_loop_prediction(self, embeddings):\n raise NotImplementedError\n\n def inner_objective(self, onehot_labels, predictions, iteration_idx):\n raise NotImplementedError\n\n def outer_loop_prediction(self, embeddings):\n raise NotImplementedError\n\n def outer_objective(self, onehot_labels, predictions):\n raise NotImplementedError\n\n def forward_pass(self, data):\n \"\"\"Wrapper around `detailed_forward_pass` to return query set predictions.\n\n Args:\n data: A `meta_dataset.providers.Episode` containing the data for the\n episode.\n\n Returns:\n A Tensor of the predictions on the query set.\n \"\"\"\n forward_pass_result = self.detailed_forward_pass(data)\n\n post_adaptation_query_results = (\n forward_pass_result.post_adaptation_query_results)\n\n return post_adaptation_query_results.predictions\n\n def detailed_forward_pass(self, data):\n \"\"\"Returns all information from a forward pass of the `OptimizationLearner`.\n\n Args:\n data: A `meta_dataset.providers.Episode` containing the data for the\n episode.\n\n Returns:\n A `collections.NamedTuple` that contains the results of the forward pass.\n \"\"\"\n # Loop initialization.\n init_loop_variables = self.task_parameters\n init_loop_variable_refs = [\n v.experimental_ref() for v in init_loop_variables\n ]\n\n # Construct ops for data-dependent episodic initialization.\n episodic_init_ops = self.episodic_init_ops(\n labels=data.support_labels,\n embeddings=self.embedding_fn(data.support_images, training=True),\n task_parameters=init_loop_variables,\n )\n\n def _forward_pass(iteration_idx_, variables_mapping_, images_,\n onehot_labels_):\n \"\"\"Helper function to compute the outputs of a forward pass.\"\"\"\n\n with self.embedding_fn.reparameterize(variables_mapping_):\n # TODO(eringrant): Implement non-transductive batch normalization (i.e.,\n # pass the support set statistics through the query set forward pass.\n embeddings_ = self.embedding_fn(images_, training=True)\n\n # TODO(eringrant): `head_fn` is an attribute of the subclass.\n with self.head_fn.reparameterize(variables_mapping_):\n predictions_ = self.head_fn(embeddings_)[:, :data.way]\n\n accuracy_ = tf.reduce_mean(\n input_tensor=self.compute_accuracy(\n onehot_labels=onehot_labels_, predictions=predictions_))\n\n inner_objective_ = self.inner_objective(\n onehot_labels=onehot_labels_,\n predictions=predictions_,\n iteration_idx=iteration_idx_)\n\n outer_objective_ = self.outer_objective(\n onehot_labels=onehot_labels_,\n predictions=predictions_,\n )\n\n return ForwardPass(\n embeddings=embeddings_,\n predictions=predictions_,\n inner_objective_value=inner_objective_,\n outer_objective_value=outer_objective_,\n accuracy=accuracy_,\n )\n\n def _objective_fn(loop_variables_, iteration_idx_):\n \"\"\"Evaluate the support set objective given `loop_variables_`.\"\"\"\n\n # Get attribute paths for the loop_variables.\n loop_variables_mapping_ = dict(\n zip(init_loop_variable_refs, loop_variables_))\n\n adaptation_support_results = _forward_pass(\n iteration_idx_=iteration_idx_,\n variables_mapping_=loop_variables_mapping_,\n images_=data.support_images,\n onehot_labels_=data.onehot_support_labels)\n\n return adaptation_support_results.inner_objective_value\n\n def _e_step(loop_variables_):\n \"\"\"Evaluate expectations given `loop_variables_`.\"\"\"\n\n # Get attribute paths for the loop_variables.\n loop_variables_dict_ = dict(zip(init_loop_variable_refs, loop_variables_))\n\n with self.embedding_fn.reparameterize(loop_variables_dict_):\n # TODO(eringrant): training to True for normalization with batch stats.\n # Figure out the appropriate way to pass this around.\n train_embeddings_ = self.embedding_fn(data.train_images, training=True)\n\n class_embeddings_ = learner_base.class_specific_data(\n data.onehot_train_labels, train_embeddings_, self.logit_dim)\n\n def _compute_responsibilities(examples_, class_idx):\n train_predictions_ = tf.squeeze(\n self.head_fn(\n embeddings=examples_, components=True, class_idx=[class_idx]),\n axis=1)\n return tf.nn.softmax(train_predictions_, axis=-1)\n\n with self.head_fn.reparameterize(loop_variables_dict_):\n class_responsibilities_ = [\n _compute_responsibilities(embeddings_, class_idx=i)\n for i, embeddings_ in enumerate(class_embeddings_)\n ]\n\n return class_embeddings_, class_responsibilities_\n\n def _m_step(preupdate_vars, all_embeddings_, all_responsibilities_):\n \"\"\"Compute parameter estimates given `loop_variables_`.\"\"\"\n\n means, log_scales, logits = zip(*map(\n reparameterizable_distributions.fit_gaussian_mixture, all_embeddings_,\n all_responsibilities_, itertools.repeat(self.head_fn.damping)))\n\n def flatten(x):\n return list(itertools.chain.from_iterable(x))\n\n means = flatten(means)\n log_scales = flatten(log_scales)\n logits = flatten(logits)\n\n if not self.head_fn.estimate_loc:\n means = [None for _ in means]\n\n if not self.head_fn.estimate_scale:\n log_scales = [None for _ in log_scales]\n\n if not self.head_fn.estimate_logits:\n logits = [None for _ in logits]\n\n updated_vars = means + log_scales + logits\n\n # Replace constant variables.\n # TODO(eringrant): This interface differs from just excluding these\n # variables from `task_variables`.\n no_none_updated_vars = []\n for preupdate_var, updated_var in zip(preupdate_vars, updated_vars):\n if updated_var is None:\n no_none_updated_vars.append(preupdate_var)\n else:\n no_none_updated_vars.append(updated_var)\n\n # TODO(eringrant): This assumes an ordering of mean, log_scales,\n # mixing_logits.\n return no_none_updated_vars\n\n # Loop body.\n with tf.control_dependencies(episodic_init_ops):\n\n # Inner loop of expectation maximization.\n num_em_steps = self.getattr('num_em_steps', 0)\n if num_em_steps > 0:\n loop_variables = em_loop(\n num_updates=self.num_em_steps,\n e_step=_e_step,\n m_step=_m_step,\n variables=loop_variables)\n\n # Inner loop of gradient-based optimization.\n num_optimizer_steps = (\n self.num_update_steps + (self.additional_evaluation_update_steps\n if not self.is_training else 0))\n if num_optimizer_steps > 0:\n # pylint: disable=no-value-for-parameter\n final_loop_variables = optimizer_loop(\n num_updates=num_optimizer_steps,\n objective_fn=_objective_fn,\n update_fn=self.update_fn,\n variables=init_loop_variables,\n first_order=self.first_order,\n clip_grad_norm=self.clip_grad_norm,\n )\n # pylint: enable=no-value-for-parameter\n\n # If no inner loop adaptation is performed, ensure the episodic\n # initialization is still part of the graph via a control dependency.\n if num_optimizer_steps + num_em_steps == 0:\n loop_variables = [tf.identity(v) for v in init_loop_variables]\n\n # Get variable references to use when remapping the loop_variables.\n init_loop_variables_mapping = dict(\n zip(init_loop_variable_refs, init_loop_variables))\n final_loop_variables_mapping = dict(\n zip(init_loop_variable_refs, final_loop_variables))\n\n # Collect statistics about the inner optimization.\n with tf.compat.v1.name_scope('pre-adaptation'):\n with tf.compat.v1.name_scope('support'):\n pre_adaptation_support_results = _forward_pass(\n iteration_idx_=0,\n variables_mapping_=init_loop_variables_mapping,\n images_=data.support_images,\n onehot_labels_=data.onehot_support_labels)\n\n with tf.compat.v1.name_scope('query'):\n pre_adaptation_query_results = _forward_pass(\n iteration_idx_=0,\n variables_mapping_=init_loop_variables_mapping,\n images_=data.query_images,\n onehot_labels_=data.onehot_query_labels)\n\n with tf.compat.v1.name_scope('post-adaptation'):\n with tf.compat.v1.name_scope('support'):\n post_adaptation_support_results = _forward_pass(\n iteration_idx_=num_optimizer_steps,\n variables_mapping_=final_loop_variables_mapping,\n images_=data.support_images,\n onehot_labels_=data.onehot_support_labels,\n )\n\n with tf.compat.v1.name_scope('query'):\n post_adaptation_query_results = _forward_pass(\n iteration_idx_=num_optimizer_steps,\n variables_mapping_=final_loop_variables_mapping,\n images_=data.query_images,\n onehot_labels_=data.onehot_query_labels,\n )\n\n def _support_module_objective_fn(module_variables_, module_variable_refs_):\n \"\"\"Evaluate the query set objective given `module_variables_`.\"\"\"\n # Use the values of the parameters at convergence as the default value.\n variables_mapping_ = final_loop_variables_mapping.copy()\n\n # Loop over and replace the module-specific variables.\n for module_variable_ref, module_variable in zip(module_variable_refs_,\n module_variables_):\n variables_mapping_[module_variable_ref] = module_variable\n\n adaptation_query_results = _forward_pass(\n iteration_idx_=num_optimizer_steps,\n variables_mapping_=variables_mapping_,\n images_=data.support_images,\n onehot_labels_=data.onehot_support_labels,\n )\n\n return adaptation_query_results.inner_objective_value\n\n def _query_module_objective_fn(module_variables_, module_variable_refs_):\n \"\"\"Evaluate the query set objective given `module_variables_`.\"\"\"\n # Use the values of the parameters at convergence as the default value.\n variables_mapping_ = final_loop_variables_mapping.copy()\n\n # Loop over and replace the module-specific variables.\n for module_variable_ref, module_variable in zip(module_variable_refs_,\n module_variables_):\n variables_mapping_[module_variable_ref] = module_variable\n\n adaptation_query_results = _forward_pass(\n iteration_idx_=num_optimizer_steps,\n variables_mapping_=variables_mapping_,\n images_=data.query_images,\n onehot_labels_=data.onehot_query_labels)\n\n return adaptation_query_results.inner_objective_value\n\n return Adaptation(\n pre_adaptation_support_results=pre_adaptation_support_results,\n post_adaptation_support_results=post_adaptation_support_results,\n pre_adaptation_query_results=pre_adaptation_query_results,\n post_adaptation_query_results=post_adaptation_query_results,\n objective_fn=_objective_fn,\n support_module_objective_fn=_support_module_objective_fn,\n query_module_objective_fn=_query_module_objective_fn,\n forward_pass_fn=_forward_pass,\n init_loop_variables_mapping=init_loop_variables_mapping,\n final_loop_variables_mapping=final_loop_variables_mapping,\n )\n\n\[email protected]\nclass HeadAndBackboneLearner(ExperimentalOptimizationLearner):\n \"\"\"A head-and-backbone learner.\"\"\"\n\n def __init__(self,\n head_cls,\n adapt_head_predicate,\n episodic_head_init_fn=None,\n **kwargs):\n \"\"\"Initializes a `HeadAndBackboneLearner` instance.\n\n Args:\n head_cls: A subclass of `ReparameterizableModule` used to instantiate the\n head function.\n adapt_head_predicate: A callable that returns True for `tf.Variable`\n attributes of the head function should be adapted for each task.\n episodic_head_init_fn: A callable that takes in a tuple of one-hot labels,\n embeddings and head classifier weights, and produces intialization\n operations to be executed at the start of each episode. If None, no\n episodic initialization is performed.\n **kwargs: Keyword arguments common to all\n `ExperimentalOptimizationLearner`s.\n \"\"\"\n super(HeadAndBackboneLearner, self).__init__(**kwargs)\n assert issubclass(head_cls, reparameterizable_base.ReparameterizableModule)\n self.adapt_head_predicate = adapt_head_predicate\n self.head_fn = head_cls(output_dim=self.logit_dim)\n\n def no_op_initialization(onehot_labels, embeddings, *vbls):\n del onehot_labels\n del embeddings\n del vbls\n return [tf.no_op()]\n\n self.episodic_head_init_fn = episodic_head_init_fn or no_op_initialization\n\n def compute_regularizer(self, onehot_labels, predictions):\n \"\"\"Computes a regularizer, maybe using `predictions` and `onehot_labels`.\"\"\"\n del onehot_labels\n del predictions\n return (tf.reduce_sum(input_tensor=self.embedding_fn.losses) +\n tf.reduce_sum(input_tensor=self.head_fn.losses))\n\n def build(self):\n \"\"\"Instantiate the parameters belonging to this `HeadAndBackboneLearner`.\"\"\"\n super(HeadAndBackboneLearner, self).build()\n if not self.head_fn.built:\n self.head_fn.build(self.embedding_shape)\n self.output_shape = self.head_fn.compute_output_shape(self.embedding_shape)\n\n def episodic_init_ops(self, labels, embeddings, task_parameters):\n \"\"\"Return operations for episodic initalization of `task_parameters`.\"\"\"\n # Isolate the head parameters.\n head_parameters = task_parameters[len(list(self.backbone_parameters)):]\n assert len(head_parameters) == len(list(self.head_parameters))\n return self.episodic_head_init_fn(labels, embeddings, *head_parameters)\n\n def inner_objective(self, onehot_labels, predictions, iteration_idx):\n \"\"\"Alias for softmax cross entropy loss.\"\"\"\n cce = tf.keras.losses.CategoricalCrossentropy()\n return cce(onehot_labels, predictions)\n\n def outer_objective(self, onehot_labels, predictions):\n \"\"\"Alias for softmax cross entropy loss.\"\"\"\n cce = tf.keras.losses.CategoricalCrossentropy()\n regularization = self.compute_regularizer(\n onehot_labels=onehot_labels, predictions=predictions)\n return cce(onehot_labels, predictions) + regularization\n\n @property\n def variables(self):\n \"\"\"Returns a tuple of this Learner's variables.\"\"\"\n if not self._built:\n raise learner_base.NotBuiltError\n return self.embedding_fn.variables + self.head_fn.variables\n\n @property\n def trainable_variables(self):\n \"\"\"Returns a tuple of this Learner's trainable variables.\"\"\"\n if not self._built:\n raise learner_base.NotBuiltError\n return (self.embedding_fn.trainable_variables +\n self.head_fn.trainable_variables)\n\n @property\n def task_parameters(self):\n \"\"\"Returns a tuple of the variables to be adapted for each task.\"\"\"\n if not self._built:\n raise learner_base.NotBuiltError\n return list(itertools.chain(self.backbone_parameters, self.head_parameters))\n\n @property\n def backbone_parameters(self):\n return list(\n self.embedding_fn.reparameterizables(self.adapt_embedding_predicate))\n\n @property\n def head_parameters(self):\n return list(self.head_fn.reparameterizables(self.adapt_head_predicate))\n\n\[email protected](whitelist=['prototype_multiplier'])\ndef proto_maml_fc_layer_init_fn(labels, embeddings, weights, biases,\n prototype_multiplier):\n \"\"\"Return a list of operations for reparameterized ProtoNet initialization.\"\"\"\n\n # This is robust to classes missing from the training set, but assumes that\n # the last class is present.\n num_ways = tf.cast(\n tf.math.reduce_max(input_tensor=tf.unique(labels)[0]) + 1, tf.int32)\n\n # When there are no examples for a given class, we default its prototype to\n # zeros, per the implementation of `tf.math.unsorted_segment_mean`.\n prototypes = tf.math.unsorted_segment_mean(embeddings, labels, num_ways)\n\n # Scale the prototypes, which acts as a regularizer on the weights and biases.\n prototypes *= prototype_multiplier\n\n # logit = -<squared Euclidian distance to prototype>\n # = -(x - p)^T.(x - p)\n # = 2 x^T.p - p^T.p - x^T.x\n # = x^T.w + b\n # where w = 2p, b = -p^T.p\n output_weights = tf.transpose(a=2 * prototypes)\n output_biases = -tf.reduce_sum(input_tensor=prototypes * prototypes, axis=1)\n\n # We zero-pad to align with the original weights and biases.\n output_weights = tf.pad(\n tensor=output_weights,\n paddings=[[\n 0, 0\n ], [0, tf.shape(input=weights)[1] - tf.shape(input=output_weights)[1]]],\n mode='CONSTANT',\n constant_values=0)\n output_biases = tf.pad(\n tensor=output_biases,\n paddings=[[\n 0, tf.shape(input=biases)[0] - tf.shape(input=output_biases)[0]\n ]],\n mode='CONSTANT',\n constant_values=0)\n\n return [\n weights.assign(output_weights),\n biases.assign(output_biases),\n ]\n\n\ndef zero_init_fn(labels, embeddings, *vbls):\n \"\"\"Return a list of operations for initialization at zero.\"\"\"\n del labels\n del embeddings\n return [vbl.assign(tf.zeros_like(vbl)) for vbl in vbls]\n\n\[email protected]\nclass MAML(HeadAndBackboneLearner):\n \"\"\"A 'model-agnostic' meta-learner.\"\"\"\n\n def __init__(self, proto_maml_fc_layer_init, zero_fc_layer_init, **kwargs):\n \"\"\"Initializes a MAML instance.\n\n Args:\n proto_maml_fc_layer_init: Whether to use `PrototypicalNetwork`-equivalent\n fc layer initialization.\n zero_fc_layer_init: Whether to initialize the parameters of the output\n layer to zero.\n **kwargs: Keyword arguments common to all `HeadAndBackboneLearner`s.\n\n Raises:\n ValueError: If both `proto_maml_fc_layer_init` and `zero_fc_layer_init`\n are `True`.\n \"\"\"\n if proto_maml_fc_layer_init and zero_fc_layer_init:\n raise ValueError('Conflicting initialization options for `MAML`.')\n\n super(MAML, self).__init__(\n episodic_head_init_fn=(proto_maml_fc_layer_init_fn\n if proto_maml_fc_layer_init else\n zero_init_fn if zero_fc_layer_init else None),\n adapt_embedding_predicate=reparameterizable_base.is_trainable_variable,\n adapt_head_predicate=reparameterizable_base.is_trainable_variable,\n head_cls=reparameterizable_backbones.LinearModel,\n **kwargs)\n\n\[email protected]\nclass ANIL(HeadAndBackboneLearner):\n \"\"\"An 'almost-no-inner-loop' learner.\"\"\"\n\n def __init__(self, proto_maml_fc_layer_init, zero_fc_layer_init, **kwargs):\n \"\"\"Initializes an ANIL instance.\n\n Args:\n proto_maml_fc_layer_init: Whether to use `PrototypicalNetwork`-equivalent\n fc layer initialization.\n zero_fc_layer_init: Whether to initialize the parameters of the output\n layer to zero.\n **kwargs: Keyword arguments common to all `HeadAndBackboneLearner`s.\n\n Raises:\n ValueError: If both `proto_maml_fc_layer_init` and `zero_fc_layer_init`\n are `True`.\n \"\"\"\n if proto_maml_fc_layer_init and zero_fc_layer_init:\n raise ValueError('Conflicting initialization options for `ANIL`.')\n\n super(ANIL, self).__init__(\n episodic_head_init_fn=(proto_maml_fc_layer_init_fn\n if proto_maml_fc_layer_init else\n zero_init_fn if zero_fc_layer_init else None),\n adapt_embedding_predicate=lambda x: False,\n adapt_head_predicate=reparameterizable_base.is_trainable_variable,\n head_cls=reparameterizable_backbones.LinearModel,\n **kwargs)\n\n\[email protected]\ndef generative_then_discriminative_schedule(proportion_generative, num_updates):\n num_generative_updates = int(proportion_generative * num_updates)\n num_discriminative_updates = num_updates - num_generative_updates\n return [0.0] * num_generative_updates + [1.0] * num_discriminative_updates\n\n\[email protected]\nclass GenerativeClassifier(HeadAndBackboneLearner):\n \"\"\"A generative classifier.\"\"\"\n\n def __init__(self, generative_scaling, interpolation_schedule, **kwargs):\n \"\"\"Initializes a GenerativeClassifier instance.\n\n Args:\n generative_scaling:\n interpolation_schedule: A callable that produces a sequence of\n coefficients used to interpolate between the generative and\n discriminative objectives. additional_evaluation_update_steps] array of\n coefficients used to interpolate between the generative and\n discriminative objectives.\n **kwargs: Keyword arguments common to all `HeadAndBackboneLearner`s.\n \"\"\"\n\n super(GenerativeClassifier, self).__init__(\n adapt_embedding_predicate=lambda x: False,\n adapt_head_predicate=reparameterizable_base.is_trainable_variable,\n **kwargs)\n assert isinstance(\n self.head_fn,\n reparameterizable_distributions.ReparameterizableClassMixture)\n\n self.generative_scaling = generative_scaling\n\n self.gen_disc_interpolation = (\n interpolation_schedule(num_updates=self.num_update_steps) +\n [1.0] * self.additional_evaluation_update_steps\n ) # Assume discriminative.\n assert all(coef >= 0 for coef in self.gen_disc_interpolation), (\n 'Interpolation coefficient should be nonnegative.')\n\n # Validate interpolation coefficient.\n # TODO(eringrant): generalize to other models admitting EM.\n if isinstance(self.head_fn,\n reparameterizable_distributions.GaussianMixture):\n # Override the usual generative training to perform EM.\n try:\n num_em_steps = self.gen_disc_interpolation.index(1.0)\n except ValueError:\n # All steps are EM.\n num_em_steps = self.num_update_steps\n assert (\n all(coef == 0.0\n for coef in self.gen_disc_interpolation[:num_em_steps]) and\n all(coef == 1.0\n for coef in self.gen_disc_interpolation[num_em_steps:])\n ), ('Each step must be fully discriminative or generative when using EM.')\n self.num_em_steps = num_em_steps\n self.num_update_steps -= num_em_steps\n\n @property\n def task_parameters(self):\n return self.head_fn.task_parameters\n\n def joint_log_likelihood(self, onehot_labels, log_probs):\n \"\"\"Compute p(z, y).\"\"\"\n labels = tf.cast(\n tf.reduce_sum(input_tensor=onehot_labels, axis=0), dtype=tf.float32)\n class_log_probs = tf.math.log(labels / tf.reduce_sum(input_tensor=labels))\n return log_probs + tf.expand_dims(class_log_probs, 0)\n\n def inner_objective(self, onehot_labels, predictions, iteration_idx):\n \"\"\"Compute the inner-loop objective.\"\"\"\n # p(z, y), joint log-likelihood.\n joint_log_probs = self.joint_log_likelihood(onehot_labels, predictions)\n labels = tf.expand_dims(tf.argmax(input=onehot_labels, axis=-1), axis=-1)\n numerator = tf.gather(joint_log_probs, labels, axis=-1, batch_dims=1)\n\n # p(z), normalization constant.\n evidence = tf.reduce_logsumexp(\n input_tensor=joint_log_probs, axis=-1, keepdims=True)\n\n # p(y | z) if interpolation coefficient > 0 else p(z, y).\n # TODO(eringrant): This assumes that `interp` is either 1 or 0.\n # Adapt to a hybridized approach.\n interp = tf.gather(self.gen_disc_interpolation, iteration_idx)\n scale = tf.cond(\n pred=interp > 0.0,\n true_fn=lambda: 1.0,\n false_fn=lambda: self.generative_scaling)\n\n return -scale * tf.reduce_mean(\n input_tensor=numerator - interp * evidence, axis=0)\n\n def outer_objective(self, onehot_labels, predictions):\n \"\"\"Compute the outer-loop objective.\"\"\"\n joint_log_probs = self.joint_log_likelihood(onehot_labels, predictions)\n cce = tf.keras.losses.CategoricalCrossentropy()\n regularization = self.compute_regularizer(\n onehot_labels=onehot_labels, predictions=predictions)\n return cce(onehot_labels, joint_log_probs) + regularization\n\n def validate_model_independence(self, labels, log_probs, task_parameters):\n \"\"\"Partition gradients into those assumed active and inactive.\"\"\"\n num_task_parameters = len(task_parameters)\n # pylint: disable=g-complex-comprehension\n on_gradients = [[\n tf.norm(tensor=on_gradient) for on_gradient in on_gradients\n ] for on_gradients in [\n tf.gradients(\n ys=tf.gather(log_probs, tf.compat.v1.where(tf.equal(labels, i))),\n xs=task_parameters[i * num_task_parameters:(i + 1) *\n num_task_parameters]) for i in range(1)\n ]]\n off_gradients = [[\n tf.norm(tensor=off_gradient) for off_gradient in off_gradients\n ] for off_gradients in [\n tf.gradients(\n ys=tf.gather(log_probs, tf.compat.v1.where(tf.equal(labels, i))),\n xs=task_parameters[i * num_task_parameters:(i + 1) *\n num_task_parameters]) for i in range(1)\n ]]\n # pylint: enable=g-complex-comprehension\n\n return (list(itertools.chain.from_iterable(on_gradients)),\n list(itertools.chain.from_iterable(off_gradients)))\n", "# coding=utf-8\n# Copyright 2020 The Meta-Dataset Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python2, python3\n\"\"\"Abstract experimental learners that use `ReparameterizableModule`s.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nfrom __future__ import print_function\n\nimport gin.tf\n\nfrom meta_dataset.learners import base as learner_base\nimport tensorflow as tf\n\n\nclass NotBuiltError(RuntimeError):\n\n def __init__(self):\n super(NotBuiltError, self).__init__(\n 'The `build` method of `ExperimentalLearner` must be called before '\n 'accessing its variables.')\n\n\ndef class_specific_data(onehot_labels, data, num_classes, axis=0):\n # TODO(eringrant): Deal with case of no data for a class in [1...num_classes].\n data_shape = [s for i, s in enumerate(data.shape) if i != axis]\n labels = tf.argmax(onehot_labels, axis=-1)\n class_idx = [tf.where(tf.equal(labels, i)) for i in range(num_classes)]\n return [\n tf.reshape(tf.gather(data, idx, axis=axis), [-1] + data_shape)\n for idx in class_idx\n ]\n\n\[email protected]\nclass ExperimentalLearner(learner_base.Learner):\n \"\"\"An experimental learner.\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Constructs an `ExperimentalLearner`.\n\n Args:\n **kwargs: Keyword arguments common to all `Learner`s.\n\n Raises:\n ValueError: If the `embedding_fn` provided is not an instance of\n `tf.Module`.\n \"\"\"\n super(ExperimentalLearner, self).__init__(**kwargs)\n\n if not isinstance(self.embedding_fn, tf.Module):\n raise ValueError('The `embedding_fn` provided to `ExperimentalLearner`s '\n 'must be an instance of `tf.Module`.')\n\n self._built = False\n\n def compute_regularizer(self, onehot_labels, predictions):\n \"\"\"Computes a regularizer, maybe using `predictions` and `onehot_labels`.\"\"\"\n del onehot_labels\n del predictions\n return tf.reduce_sum(input_tensor=self.embedding_fn.losses)\n\n def build(self):\n \"\"\"Instantiate the parameters belonging to this `ExperimentalLearner`.\"\"\"\n if not self.embedding_fn.built:\n self.embedding_fn.build([None] + self.input_shape)\n self.embedding_shape = self.embedding_fn.compute_output_shape(\n [None] + self.input_shape)\n self._built = True\n\n @property\n def variables(self):\n \"\"\"Returns a list of this `ExperimentalLearner`'s variables.\"\"\"\n if not self._built:\n raise NotBuiltError\n return self.embedding_fn.variables\n\n @property\n def trainable_variables(self):\n \"\"\"Returns a list of this `ExperimentalLearner`'s trainable variables.\"\"\"\n if not self._built:\n raise NotBuiltError\n return self.embedding_fn.trainable_variables\n\n\nclass ExperimentalEpisodicLearner(ExperimentalLearner,\n learner_base.EpisodicLearner):\n \"\"\"An experimental episodic learner.\"\"\"\n\n pass\n\n\nclass ExperimentalBatchLearner(ExperimentalLearner, learner_base.BatchLearner):\n \"\"\"An experimental batch learner.\"\"\"\n\n pass\n" ]
[ [ "tensorflow.unique", "tensorflow.gradients", "tensorflow.executing_eagerly", "tensorflow.zeros_like", "tensorflow.sqrt", "tensorflow.clip_by_value", "tensorflow.keras.losses.CategoricalCrossentropy", "tensorflow.control_dependencies", "tensorflow.nn.softmax", "tensorflow.identity", "tensorflow.cast", "tensorflow.no_op", "tensorflow.shape", "tensorflow.GradientTape", "tensorflow.compat.v1.name_scope", "tensorflow.argmax", "tensorflow.while_loop", "tensorflow.Variable", "tensorflow.transpose", "tensorflow.norm", "tensorflow.reduce_logsumexp", "tensorflow.expand_dims", "tensorflow.math.unsorted_segment_mean", "tensorflow.reduce_sum", "tensorflow.cond", "tensorflow.equal", "tensorflow.gather", "tensorflow.reduce_mean", "tensorflow.stop_gradient" ], [ "tensorflow.argmax", "tensorflow.equal", "tensorflow.reduce_sum", "tensorflow.gather" ] ]
ihumphrey/HGDL
[ "996a9dd01a4e7f34e85bd14bb24ec23537555104" ]
[ "tests/test_schwefel.py" ]
[ "import numpy as np\nfrom hgdl.hgdl import HGDL as hgdl\nfrom .support_functions import *\nimport time\nimport dask.distributed as distributed\nimport tracemalloc\n\n\n\ndef test_schwefel():\n arr = 5\n brr = 6\n bounds = np.array([[-500,500],[-500,500]])\n #dask_client = distributed.Client(\"10.0.0.184:8786\")\n a = hgdl(schwefel, schwefel_gradient, bounds,\n hess = schwefel_hessian,\n #global_optimizer = \"random\",\n global_optimizer = \"genetic\",\n #global_optimizer = \"gauss\",\n local_optimizer = \"dNewton\",\n number_of_optima = 300,\n args = (arr,brr), radius = 0.10, num_epochs = 100)\n #a.optimize(dask_client = distributed.Client())\n x0 = np.random.uniform(low = bounds[:, 0], high = bounds[:,1],size = (20,2))\n print(\"starting positions: \")\n print(x0)\n print(\"--------------------\")\n a.optimize(x0 = x0)\n #a.optimize(dask_client = False)\n #res = a.optima_list\n #print(res)\n\n\n #print(a.optima_list)\n print(\"main thread submitted HGDL and will now sleep for 2 seconds\")\n time.sleep(2)\n print(\"main thread asks for 10 best solutions:\")\n print(a.get_latest())\n #a.cancel_tasks()\n print(\"main sleeps for another 2 seconds\")\n time.sleep(2)\n print(\"main thread kills optimization\")\n res = a.kill_client()\n print(\"hgdl was killed but I am waiting 2s\")\n print(\"\")\n print(\"\")\n print(\"\")\n print(\"\")\n print(res)\n\nif __name__ == '__main__':\n test_schwefel()\n" ]
[ [ "numpy.array", "numpy.random.uniform" ] ]
mikailkhona/1DGCN
[ "615fe1e999c179f034e4f5d5dd2f5ec5789cdc82" ]
[ "LNP_helper_functions.py" ]
[ "\"\"\"\nOriginal MATLAB code by Ila Fiete and John Widloski (2014)\nAdapted into Python by Francisco Acosta in 2020 \n\"\"\"\n\nimport numpy as np\n\n\n\ndef compute_LNP_ionic_currents(g_L):\n \n '''\n This function computes the total ionic current (I_ion) based on:\n - ionic conductance amplitudes of the leak current (g_L)\n '''\n \n I_ion = g_L\n \n return I_ion\n\n\ndef compute_LNP_syn_currents_det(rate,s,dt,ind_EL,ind_ER,ind_I,G_I_EL,G_I_ER,G_EL_I,G_ER_I,G_I_I,tau_g):\n \n '''\n This function computes synaptic currents to all neurons (I_syn) and updates values of\n synaptic activations (s), based on:\n \n - current spike vector (spk) \n - time step (dt)\n - synaptic activations from previous time step (s)\n - indices of neurons in each population (ind_EL, ind_ER, ind_I)\n - synaptic conductance amplitudes (G_I_EL,G_I_ER,G_EL_I,G_ER_I,G_I_I)\n - synaptic conductance time constants (tau_g)\n '''\n s_EL = s[ind_EL]\n s_ER = s[ind_ER]\n s_I = s[ind_I]\n \n ''' Update synaptic inputs into inhibitory pop. '''\n I_I_syn = G_I_EL@s_EL + G_I_ER@s_ER - G_I_I@s_I\n \n ''' Update synaptic inputs into excitatory pops. '''\n I_EL_syn = -G_EL_I@s_I\n I_ER_syn = -G_ER_I@s_I\n\n I_syn = np.concatenate((I_I_syn,I_EL_syn,I_ER_syn),axis = 0)\n \n '''Update synaptic activations'''\n s_EL = s_EL + dt/tau_g*(-s_EL + tau_g*rate[ind_EL])\n s_ER = s_ER + dt/tau_g*(-s_ER + tau_g*rate[ind_ER])\n s_I = s_I + dt/tau_g*(-s_I + tau_g*rate[ind_I])\n\n s = np.concatenate((s_I,s_EL,s_ER),axis = 0)\n \n return I_syn,s\n\n\n\ndef compute_LNP_output_det(dt,I_ion,I_syn,I_app,A_env,velocity,beta_vel):\n \n '''\n This function outputs the spike vector (spk), based on:\n \n - time step (dt)\n - ionic currents (I_ion)\n - synaptic currents (I_syn)\n - externally applied currents (I_app)\n - envelope function for suppressing edge neurons in aperiodic nets (A_env)\n '''\n \n '''total input current into all neurons''' \n\n modulation = (1 + beta_vel*velocity)\n\n I_in = -I_ion + A_env*modulation*(I_syn + I_app)\n \n '''apply threshold nonlinearity to input current'''\n rate = I_in*(I_in>0)\n \n '''draw spikes with rate = I_in*dt'''\n #spk = np.random.poisson(I_in*dt)\n \n return rate\n\n\n\ndef compute_LNP_syn_currents(spk,s,dt,ind_EL,ind_ER,ind_I,G_I_EL,G_I_ER,G_EL_I,G_ER_I,G_I_I,tau_g):\n \n '''\n This function computes synaptic currents to all neurons (I_syn) and updates values of\n synaptic activations (s), based on:\n \n - current spike vector (spk) \n - time step (dt)\n - synaptic activations from previous time step (s)\n - indices of neurons in each population (ind_EL, ind_ER, ind_I)\n - synaptic conductance amplitudes (G_I_EL,G_I_ER,G_EL_I,G_ER_I,G_I_I)\n - synaptic conductance time constants (tau_g)\n '''\n s_EL = s[ind_EL]\n s_ER = s[ind_ER]\n s_I = s[ind_I]\n \n ''' Update synaptic inputs into inhibitory pop. '''\n I_I_syn = G_I_EL@s_EL + G_I_ER@s_ER - G_I_I@s_I\n \n ''' Update synaptic inputs into excitatory pops. '''\n I_EL_syn = -G_EL_I@s_I\n I_ER_syn = -G_ER_I@s_I\n\n I_syn = np.concatenate((I_I_syn,I_EL_syn,I_ER_syn),axis = 0)\n \n '''Update synaptic activations'''\n s_EL = s_EL + dt/tau_g*(-s_EL) + spk[ind_EL]\n s_ER = s_ER + dt/tau_g*(-s_ER) + spk[ind_ER]\n s_I = s_I + dt/tau_g*(-s_I) + spk[ind_I]\n\n s = np.concatenate((s_I,s_EL,s_ER),axis = 0)\n \n return I_syn,s\n\n\n\ndef compute_LNP_output(dt,I_ion,I_syn,I_app,A_env,velocity,beta_vel):\n \n '''\n This function outputs the spike vector (spk), based on:\n \n - time step (dt)\n - ionic currents (I_ion)\n - synaptic currents (I_syn)\n - externally applied currents (I_app)\n - envelope function for suppressing edge neurons in aperiodic nets (A_env)\n '''\n \n '''total input current into all neurons'''\n modulation = (1 + beta_vel*velocity)\n I_in = -I_ion + A_env*modulation*(I_syn + I_app)\n \n '''apply threshold nonlinearity to input current'''\n I_in = I_in*(I_in>0)\n \n '''draw spikes with rate = I_in*dt'''\n spk = np.random.poisson(I_in*dt)\n \n return spk\n\n\n\ndef create_envelope(periodic,N):\n \n '''\n This function returns an envelope for network of size N; The envelope can\n either be suppressive (periodic = 0) or flat and equal to one (periodic = 1)\n '''\n \n kappa = 0.3 #controls width of main body of envelope\n a0 = 30 #controls steepness of envelope\n \n if not periodic:\n A = np.zeros((N))\n for m in range(1,N+1):\n r = abs(m-N/2)\n if r < kappa*N:\n A[m-1] = 1\n else:\n A[m-1] = np.exp(-a0*((r-kappa*N)/((1-kappa)*N))**2)\n \n else:\n A = np.ones((N))\n \n return A" ]
[ [ "numpy.concatenate", "numpy.zeros", "numpy.random.poisson", "numpy.ones", "numpy.exp" ] ]
JakeL77/PyFR
[ "19deeb3f550f7a31803b54a6b54d7c80d4200e8b" ]
[ "pyfr/plugins/residual.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport numpy as np\n\nfrom pyfr.mpiutil import get_comm_rank_root, get_mpi\nfrom pyfr.plugins.base import BasePlugin, init_csv\n\n\nclass ResidualPlugin(BasePlugin):\n name = 'residual'\n systems = ['*']\n formulations = ['std']\n\n def __init__(self, intg, cfgsect, suffix):\n super().__init__(intg, cfgsect, suffix)\n\n comm, rank, root = get_comm_rank_root()\n\n # Output frequency\n self.nsteps = self.cfg.getint(cfgsect, 'nsteps')\n\n # The root rank needs to open the output file\n if rank == root:\n header = ['t'] + intg.system.elementscls.convarmap[self.ndims]\n\n # Open\n self.outf = init_csv(self.cfg, cfgsect, ','.join(header))\n\n # Call ourself in case output is needed after the first step\n self(intg)\n\n def __call__(self, intg):\n # If an output is due this step\n if intg.nacptsteps % self.nsteps == 0 and intg.nacptsteps:\n # MPI info\n comm, rank, root = get_comm_rank_root()\n\n # Previous and current solution\n prev = self._prev\n curr = intg.soln\n\n # Square of the residual vector for each variable\n resid = sum(np.linalg.norm(p - c, axis=(0, 2))**2\n for p, c in zip(prev, curr))\n\n # Reduce and, if we are the root rank, output\n if rank != root:\n comm.Reduce(resid, None, op=get_mpi('sum'), root=root)\n else:\n comm.Reduce(get_mpi('in_place'), resid, op=get_mpi('sum'),\n root=root)\n\n # Normalise\n resid = np.sqrt(resid) / (intg.tcurr - self._tprev)\n\n # Write\n print(intg.tcurr, *resid, sep=',', file=self.outf)\n\n # Flush to disk\n self.outf.flush()\n\n del self._prev, self._tprev\n\n # If an output is due next step\n if (intg.nacptsteps + 1) % self.nsteps == 0:\n self._prev = [s.copy() for s in intg.soln]\n self._tprev = intg.tcurr\n" ]
[ [ "numpy.linalg.norm", "numpy.sqrt" ] ]
urunimi/tf-sample
[ "51566b3f9b8bfde35eeed81c36f2c360e226f229" ]
[ "org.tensorflow/01_helloworld/placeholder.py" ]
[ "import tensorflow as tf\n\na = tf.placeholder(tf.float32)\nb = tf.placeholder(tf.float32)\nadder_node = a + b\n\nss = tf.Session()\n\nprint(ss.run(adder_node, {a: 3, b: 4.5}))\nprint(ss.run(adder_node, {a: [1, 3], b: [2, 4]}))\n\nadd_and_triple = adder_node * 3\nprint(ss.run(add_and_triple, {a: 3, b: 4.5}))" ]
[ [ "tensorflow.Session", "tensorflow.placeholder" ] ]
psi1104/HiDT
[ "148e790e7711d4032aa2a0f458ba0985e54fd328" ]
[ "main.py" ]
[ "import argparse\nimport glob\nimport os\nimport sys\nsys.path.append('./HiDT')\n\nimport torch\nfrom PIL import Image\nfrom torchvision import transforms\nfrom tqdm import tqdm\n\nfrom hidt.networks.enhancement.RRDBNet_arch import RRDBNet\nfrom hidt.style_transformer import StyleTransformer\nfrom hidt.utils.preprocessing import GridCrop, enhancement_preprocessing\nfrom hidt.utils.io import save_img\n\nconfig_path = './configs/daytime.yaml'\ngen_weights_path = './trained_models/generator/daytime.pt'\ninference_size = 256 # the network has been trained to do inference in 256px, any higher value might lead to artifacts\ndevice= torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nimage_path = './images/daytime/content/0.jpg'\nstyles_path = './styles.txt'\nenhancer_weights = './trained_models/enhancer/enhancer.pth'\nresult_path = './results'\nstyle = 'hard_day'\n\nstyle_transformer = StyleTransformer(config_path,\n gen_weights_path,\n inference_size=inference_size,\n device=device)\nwith open(styles_path) as f:\n styles = f.read()\nstyles = {style.split(',')[0]: torch.tensor([float(el) for el in style.split(',')[1][1:-1].split(' ')]) for style in styles.split('\\n')[:-1]}\nimage = Image.open(image_path)\ncrop_transform = GridCrop(4, 1, hires_size=inference_size * 4)\n\nstyle_to_transfer = styles[style]\n\nstyle_to_transfer = style_to_transfer.view(1, 1, 3, 1).to(device)\nwith torch.no_grad():\n content_decomposition = style_transformer.get_content(image)[0]\n decoder_input = {'content': content_decomposition['content'],\n 'intermediate_outputs': content_decomposition['intermediate_outputs'],\n 'style': style_to_transfer}\n transferred = style_transformer.trainer.gen.decode(decoder_input)['images']\n\nresult_images = transforms.ToPILImage()((transferred[0].cpu().clamp(-1, 1) + 1.) / 2.)\n\nos.makedirs(result_path, exist_ok=True)\n\nsource_name = image_path.split('/')[-1].split('.')[0]\nstyle_name = style\n\nsave_img(result_images,\n os.path.join(result_path,\n source_name + '_to_' + style_name + '.jpg')\n )" ]
[ [ "torch.no_grad", "torch.cuda.is_available" ] ]
dinhtungtp/pSp
[ "0b95f32569ad89eae2eab156e2ee5c0f5655ed09" ]
[ "models/psp.py" ]
[ "\"\"\"\r\nThis file defines the core research contribution\r\n\"\"\"\r\nimport matplotlib\r\n\r\nmatplotlib.use('Agg')\r\nimport torch\r\nfrom torch import nn\r\nfrom models.encoders import psp_encoders\r\nfrom models.stylegan2.model import Generator, ConstantRectangleInput\r\nfrom configs.paths_config import model_paths\r\n\r\n\r\ndef get_keys(d, name):\r\n\tif 'state_dict' in d:\r\n\t\td = d['state_dict']\r\n\td_filt = {k[len(name) + 1:]: v for k, v in d.items() if k[:len(name)] == name}\r\n\treturn d_filt\r\n\r\n\r\nclass pSp(nn.Module):\r\n\r\n\tdef __init__(self, opts):\r\n\t\tsuper(pSp, self).__init__()\r\n\t\tself.set_opts(opts)\r\n\t\t# Define architecture\r\n\t\tself.encoder = self.set_encoder()\r\n\t\tself.decoder = Generator(1024, 512, 8)\r\n\t\tself.face_pool = torch.nn.AdaptiveAvgPool2d((256, int(256*1.5)))\r\n\t\t# Load weights if needed\r\n\t\tself.load_weights()\r\n\r\n\tdef set_encoder(self):\r\n\t\tif self.opts.encoder_type == 'GradualStyleEncoder':\r\n\t\t\tencoder = psp_encoders.GradualStyleEncoder(50, 'ir_se', self.opts)\r\n\t\telif self.opts.encoder_type == 'BackboneEncoderUsingLastLayerIntoW':\r\n\t\t\tencoder = psp_encoders.BackboneEncoderUsingLastLayerIntoW(50, 'ir_se', self.opts)\r\n\t\telif self.opts.encoder_type == 'BackboneEncoderUsingLastLayerIntoWPlus':\r\n\t\t\tencoder = psp_encoders.BackboneEncoderUsingLastLayerIntoWPlus(50, 'ir_se', self.opts)\r\n\t\telse:\r\n\t\t\traise Exception('{} is not a valid encoders'.format(self.opts.encoder_type))\r\n\t\treturn encoder\r\n\r\n\tdef load_weights(self):\r\n\t\tif self.opts.checkpoint_path is not None:\r\n\t\t\tprint('Loading pSp from checkpoint: {}'.format(self.opts.checkpoint_path))\r\n\t\t\tckpt = torch.load(self.opts.checkpoint_path, map_location='cpu')\r\n\t\t\tconstant_input = ConstantRectangleInput(512)\r\n\t\t\tself.decoder.input = constant_input.to(self.opts.device)\r\n\t\t\tself.encoder.load_state_dict(get_keys(ckpt, 'encoder'), strict=True)\r\n\t\t\tself.decoder.load_state_dict(get_keys(ckpt, 'decoder'), strict=True)\r\n\t\t\tself.__load_latent_avg(ckpt)\r\n\t\telse:\r\n\t\t\tprint('Loading encoders weights from irse50!')\r\n\t\t\tencoder_ckpt = torch.load(model_paths['ir_se50'])\r\n\t\t\t# if input to encoder is not an RGB image, do not load the input layer weights\r\n\t\t\tif self.opts.label_nc != 0:\r\n\t\t\t\tencoder_ckpt = {k: v for k, v in encoder_ckpt.items() if \"input_layer\" not in k}\r\n\t\t\tself.encoder.load_state_dict(encoder_ckpt, strict=False)\r\n\t\t\tprint('Loading decoder weights from pretrained!')\r\n\t\t\tckpt = torch.load(self.opts.stylegan_weights)\r\n\t\t\tconstant_input = ConstantRectangleInput(512)\r\n\t\t\tself.decoder.input = constant_input.to(self.opts.device)\r\n\t\t\tself.decoder.load_state_dict(ckpt['g_ema'], strict=False)\r\n\t\t\tif self.opts.learn_in_w:\r\n\t\t\t\tself.__load_latent_avg(ckpt, repeat=1)\r\n\t\t\telse:\r\n\t\t\t\tself.__load_latent_avg(ckpt, repeat=18)\r\n\r\n\tdef forward(self, x, resize=True, latent_mask=None, input_code=False, randomize_noise=True,\r\n\t inject_latent=None, return_latents=False, alpha=None):\r\n\t\tif input_code:\r\n\t\t\tcodes = x\r\n\t\telse:\r\n\t\t\tcodes = self.encoder(x)\r\n\t\t\t# normalize with respect to the center of an average face\r\n\t\t\tif self.opts.start_from_latent_avg:\r\n\t\t\t\tif self.opts.learn_in_w:\r\n\t\t\t\t\tcodes = codes + self.latent_avg.repeat(codes.shape[0], 1)\r\n\t\t\t\telse:\r\n\t\t\t\t\tcodes = codes + self.latent_avg.repeat(codes.shape[0], 1, 1)\r\n\r\n\r\n\t\tif latent_mask is not None:\r\n\t\t\tfor i in latent_mask:\r\n\t\t\t\tif inject_latent is not None:\r\n\t\t\t\t\tif alpha is not None:\r\n\t\t\t\t\t\tcodes[:, i] = alpha * inject_latent[:, i] + (1 - alpha) * codes[:, i]\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tcodes[:, i] = inject_latent[:, i]\r\n\t\t\t\telse:\r\n\t\t\t\t\tcodes[:, i] = 0\r\n\r\n\t\tinput_is_latent = not input_code\r\n\t\timages, result_latent = self.decoder([codes],\r\n\t\t input_is_latent=input_is_latent,\r\n\t\t randomize_noise=randomize_noise,\r\n\t\t return_latents=return_latents)\r\n\r\n\t\tif resize:\r\n\t\t\timages = self.face_pool(images)\r\n\r\n\t\tif return_latents:\r\n\t\t\treturn images, result_latent\r\n\t\telse:\r\n\t\t\treturn images\r\n\r\n\tdef set_opts(self, opts):\r\n\t\tself.opts = opts\r\n\r\n\tdef __load_latent_avg(self, ckpt, repeat=None):\r\n\t\tif 'latent_avg' in ckpt:\r\n\t\t\tself.latent_avg = ckpt['latent_avg'].to(self.opts.device)\r\n\t\t\tif repeat is not None:\r\n\t\t\t\tself.latent_avg = self.latent_avg.repeat(repeat, 1)\r\n\t\telse:\r\n\t\t\tself.decoder.to(self.opts.device)\r\n\t\t\tself.latent_avg = self.decoder.mean_latent(4096, self.opts.device)\r\n" ]
[ [ "matplotlib.use", "torch.load" ] ]
dnwissel/msc_thesis
[ "857dd7624ba9e0730be79c8968215699a442c2fa" ]
[ "src/chores/get_permutation_importance.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport json\nimport os\nimport sys\n\nmodule_path = os.path.abspath(os.path.join(\"./src/\"))\nif module_path not in sys.path:\n sys.path.append(module_path)\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom model.autoencoders import SHAENet, SHAE\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom utils.utils import FixRandomSeed, shae_criterion\n\n\ndef main():\n PERMUTATION_REPS = 10\n with open(\"config/config.json\") as f:\n config = json.load(f)\n for cancer in config[\"cancers\"]:\n print(f\"Starting: {cancer}\")\n data = pd.read_csv(\n f\"./data/processed/{cancer}/merged/{config['data_name_tcga']}\"\n )\n X = data[data.columns[2:]]\n X = X.loc[:, (X != X.iloc[0]).any()]\n y_str = data[\"OS\"].astype(str) + \"|\" + data[\"OS.time\"].astype(str)\n\n train_splits = pd.read_csv(\n f\"./data/splits/{cancer}/{config['train_split_name_tcga']}\"\n )\n test_splits = pd.read_csv(\n f\"./data/splits/{cancer}/{config['test_split_name_tcga']}\"\n )\n clinical_indices = [\n i for i in range(len(X.columns)) if \"clinical\" in X.columns[i]\n ]\n gex_indices = [\n i for i in range(len(X.columns)) if \"gex\" in X.columns[i]\n ]\n cnv_indices = [\n i for i in range(len(X.columns)) if \"cnv\" in X.columns[i]\n ]\n meth_indices = [\n i for i in range(len(X.columns)) if \"meth\" in X.columns[i]\n ]\n mirna_indices = [\n i for i in range(len(X.columns)) if \"mirna\" in X.columns[i]\n ]\n mut_indices = [\n i for i in range(len(X.columns)) if \"mut\" in X.columns[i]\n ]\n rppa_indices = [\n i for i in range(len(X.columns)) if \"rppa\" in X.columns[i]\n ]\n\n blocks = [\n clinical_indices,\n gex_indices,\n cnv_indices,\n meth_indices,\n mirna_indices,\n mut_indices,\n rppa_indices,\n ]\n # Make sure that all variables are considered in the blocks\n assert sum([len(i) for i in blocks]) == X.shape[1]\n model = \"shaenet\"\n params = pd.read_csv(\n f\"./data/benchmarks/{cancer}/{model}_tuned_parameters_timed_euler.csv\"\n )\n scores = pd.read_csv(\n f\"./data/benchmarks/{cancer}/{model}_tuned_scores_timed_euler.csv\"\n )\n mapping = {\n np.argmax(scores[\"concordance\"]): \"best_shae\",\n }\n\n for i in list(mapping.keys()):\n test_scores = []\n train_scores = []\n print(f\"Split: {i+1}/10\")\n train_ix = train_splits.iloc[i, :].dropna().values\n test_ix = test_splits.iloc[i, :].dropna().values\n net = SHAENet(\n module=SHAE,\n criterion=shae_criterion,\n max_epochs=config[\"epochs\"],\n lr=config[\"lr\"],\n train_split=None,\n optimizer=torch.optim.Adam,\n callbacks=[\n (\"seed\", FixRandomSeed(config[\"seed\"])),\n ],\n verbose=0,\n batch_size=-1,\n module__blocks=blocks,\n )\n pipe = make_pipeline(StandardScaler(), net)\n pipe.set_params(**{key: val[i] for key, val in params.items()})\n pipe.fit(\n X.iloc[train_ix, :].to_numpy().astype(np.float32),\n y_str.iloc[train_ix].to_numpy().astype(str),\n )\n test_scores.append(\n pipe.score(\n X.iloc[test_ix, :].to_numpy().astype(np.float32),\n y_str.iloc[test_ix].to_numpy().astype(str),\n )\n )\n train_scores.append(\n pipe.score(\n X.iloc[train_ix, :].to_numpy().astype(np.float32),\n y_str.iloc[train_ix].to_numpy().astype(str),\n )\n )\n train_feature_importances = {i: [] for i in range(len(blocks))}\n test_feature_importances = {i: [] for i in range(len(blocks))}\n rstate = np.random.RandomState(config[\"seed\"])\n for q in range(PERMUTATION_REPS):\n print(f\"Permutation: {q+1} / 10\")\n permuted_ix_train = np.arange(train_ix.shape[0])\n permuted_ix_test = np.arange(test_ix.shape[0])\n for block in range(len(blocks)):\n rstate.shuffle(permuted_ix_train)\n rstate.shuffle(permuted_ix_test)\n X_train_permuted = (\n X.iloc[train_ix, :]\n .copy(deep=True)\n .reset_index(drop=True)\n )\n X_train_permuted.iloc[\n :, blocks[block]\n ] = X_train_permuted.iloc[\n permuted_ix_train, blocks[block]\n ].reset_index(\n drop=True\n )\n X_test_permuted = (\n X.iloc[test_ix, :]\n .copy(deep=True)\n .reset_index(drop=True)\n )\n X_test_permuted.iloc[\n :, blocks[block]\n ] = X_test_permuted.iloc[\n permuted_ix_test, blocks[block]\n ].reset_index(\n drop=True\n )\n\n train_feature_importance = (\n 1\n - pipe.score(\n X_train_permuted.to_numpy().astype(np.float32),\n y_str[train_ix].to_numpy().astype(str),\n )\n ) / (1 - train_scores[-1])\n test_feature_importance = (\n 1\n - pipe.score(\n X_test_permuted.to_numpy().astype(np.float32),\n y_str[test_ix].to_numpy().astype(str),\n )\n ) / (1 - test_scores[-1])\n train_feature_importances[block].append(\n train_feature_importance\n )\n test_feature_importances[block].append(\n test_feature_importance\n )\n\n importance_frame_train = pd.DataFrame(train_feature_importances)\n importance_frame_test = pd.DataFrame(test_feature_importances)\n\n importance_frame_train.columns = [\n \"clinical\",\n \"gex\",\n \"cnv\",\n \"meth\",\n \"mirna\",\n \"mut\",\n \"rppa\",\n ]\n importance_frame_train.to_csv(\n f\"./data/permutation_importances/{cancer}/{model}_train_permutation_importances_{mapping[i]}.csv\",\n index=False,\n )\n importance_frame_test.columns = [\n \"clinical\",\n \"gex\",\n \"cnv\",\n \"meth\",\n \"mirna\",\n \"mut\",\n \"rppa\",\n ]\n importance_frame_test.to_csv(\n f\"./data/permutation_importances/{cancer}/{model}_test_permutation_importances_{mapping[i]}.csv\",\n index=False,\n )\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n" ]
[ [ "sklearn.preprocessing.StandardScaler", "numpy.random.RandomState", "pandas.DataFrame", "numpy.argmax", "numpy.arange", "pandas.read_csv" ] ]
ArthMx/torchtrainer
[ "cc0fdbf7151b45425d4ab56cc61a669adf4d04c4" ]
[ "torchtrainer/trainer.py" ]
[ "import torch\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom functools import reduce\nimport os\nimport time\nfrom .utils import Progbar\n\nclass Trainer(object):\n \"\"\"Class abstracting model training in PyTorch.\"\"\"\n \n def __init__(self, model, loss_fn, optimizer, metrics=None, device=\"auto\"):\n \"\"\"Wrapper class to train a PyTorch model.\n Args:\n model: A Pytorch model.\n loss_fn: A function returning the loss.\n optimizer: A Pytorch optimizer (torch.optim).\n device: Can be either a torch.device instance or one of the choices:\n \"auto\", \"cpu\", \"cuda\".\n \"\"\"\n assert device in (\"auto\", \"cpu\", \"cuda\") or isinstance(device, torch.device)\n if device == \"auto\":\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n elif device == \"cpu\":\n self.device = torch.device(\"cpu\")\n elif device == \"cuda\":\n self.device = torch.device(\"cuda\")\n elif isinstance(device, torch.device):\n self.device = device\n self.model = model.to(self.device)\n self.loss_fn = loss_fn\n self.optimizer = optimizer\n self.metrics = metrics\n \n self.best_val_loss = np.inf\n \n def fit(self, train_loader, val_loader=None, epochs=1, checkpoint_path=None, \n early_stopping=False, verbose=1, plot_loss=True):\n \"\"\"Train the model.\n Args:\n train_loader: A Pytorch Dataloader returning the training batches.\n val_loader: A Pytorch Dataloader returning the validation batches.\n epochs (int): The number of iteration for training.\n checkpoint_path: The path to the file where a checkpoint will be \n saved when the model improve (a val_loader is necessary).\n early_stopping (int or None): If a number n is specified, \n early stopping will be done if there is no improvement on the \n validation set after n epochs.\n verbose: Verbosity of the progress bar: \n - 0: almost silent, no progrss bar, only value after each \n epochs for train loss, val loss and val metrics.\n - 1: + Progress bar and show metrics updated in real time.\n - 2: + train metrics.\n plot_loss (bool): If True, plot the training and validation loss \n at the end.\n \"\"\"\n self.verbose = verbose\n \n if early_stopping or checkpoint_path is not None:\n assert val_loader is not None\n if early_stopping:\n assert isinstance(early_stopping, int)\n n = 0\n \n if plot_loss:\n logs = {}\n \n for epoch in range(1, epochs + 1):\n \n print(\"Epoch {}/{}\".format(epoch, epochs))\n self.progbar = self._make_progbar(train_loader, val_loader, verbose)\n \n # Training loop\n self._train_loop(train_loader)\n \n # Validation loop\n if val_loader:\n self._validate_loop(val_loader)\n \n # Get average value of all metrics for last epoch\n metrics_val = self.progbar.logger.average()\n \n if val_loader:\n # Save best model if improvement on validation loss\n if checkpoint_path and metrics_val[\"val_loss\"] < self.best_val_loss:\n # Save model_dict model_state_dict, optimizer_state_dict\n # and all metrics in progbar.\n self.save_checkpoint(checkpoint_path, metrics_val)\n print(\"Model improved, saved at \" + checkpoint_path)\n \n # Check for early stopping\n if early_stopping:\n if metrics_val[\"val_loss\"] >= self.best_val_loss:\n n += 1\n if n < early_stopping:\n print(\"No improvement in %d Epochs.\" % n)\n if n >= early_stopping:\n print(\"No improvement in %d Epochs: Early Stopping.\" % n)\n break\n else:\n n = 0\n \n # Update best_val_loss\n if metrics_val[\"val_loss\"] < self.best_val_loss:\n self.best_val_loss = metrics_val[\"val_loss\"]\n \n if plot_loss:\n for key in metrics_val:\n if key not in logs:\n logs[key] = [metrics_val[key]]\n else:\n logs[key].append(metrics_val[key])\n \n # Plot loss\n if plot_loss:\n plt.figure()\n plt.plot(logs[\"train_loss\"], label=\"Training\")\n if val_loader:\n plt.plot(logs[\"val_loss\"], label=\"Validation\")\n plt.ylabel(\"Loss\")\n plt.xlabel(\"Epoch\")\n plt.legend()\n plt.show()\n \n def train(self, batch):\n \"\"\"A single step of training through one batch. \n Can be overwritten, useful if batch is a dictionnary, for example.\n Args:\n batch: A single batch returned by a Dataloader.\n Return:\n A list of tuple, each tuple must contain a key (string) and a value \n (float or int), to track on the progress bar.\n \"\"\"\n x, y = batch\n x, y = x.to(self.device), y.to(self.device)\n \n y_pred = self.model(x)\n loss = self.loss_fn(y_pred, y)\n \n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n \n # Track metrics\n values = {\"train_loss\": loss.item()}\n if self.verbose == 2:\n for k in self.metrics:\n values[\"train_\" + k] = self.metrics[k](y_pred, y)\n return values\n \n def validate(self, batch):\n \"\"\"A single step of validation through one batch. \n Can be overwritten, useful if batch is a dictionnary, for example.\n Args:\n batch: A single batch returned by a Dataloader.\n Return:\n A list of tuple, each tuple must contain a key (string) and a value \n (float or int), to track on the progress bar.\n \"\"\"\n x, y = batch\n x, y = x.to(self.device), y.to(self.device)\n \n y_pred = self.model(x)\n loss = self.loss_fn(y_pred, y)\n\n # Track metrics\n values = {\"val_loss\": loss.item()}\n for k in self.metrics:\n values[\"val_\" + k] = self.metrics[k](y_pred, y)\n return values\n \n def _train_loop(self, train_loader):\n \"\"\"Do a single epoch of training.\"\"\"\n # Set model to train mode\n self.model.train()\n \n for batch in train_loader:\n \n values = self.train(batch)\n \n # Update progress bar\n self.progbar.update(values)\n \n def _validate_loop(self, val_loader):\n \"\"\"Do a single epoch of validating.\"\"\"\n # Set model to eval mode\n self.model.eval()\n \n with torch.no_grad():\n for batch in val_loader:\n values = self.validate(batch)\n \n # Update progress bar\n self.progbar.update(values, validating=True)\n \n def check_data_time(self, train_loader):\n \"\"\"Perform one training loop through the dataloader to check batch data \n preparation time vs complete batch time.\"\"\"\n self.progbar = self._make_progbar(train_loader, val_loader=None, verbose=1)\n self.model.train()\n \n t0 = time.time()\n for batch in train_loader:\n t_data = time.time() - t0\n \n _ = self.train(batch)\n \n t_batch = time.time() - t0\n values = {}\n values[\"t_data\"] = t_data\n values[\"t_batch\"] = t_batch\n values[\"t_data/t_batch\"] = t_data / t_batch\n \n # Update progress bar\n self.progbar.update(values)\n \n t0 = time.time()\n \n def save_checkpoint(self, path, metrics_dict=None):\n \"\"\"Save a checkpoint of the model (the model state_dict and the \n optimizer state_dict).\n Args:\n path: Path to the file where the checkpoint will be saved.\n metrics_dict: A dictionnary of additional metrics that will be \n saved.\n \"\"\"\n# # Create directory if necessary\n# checkpoint_dir = os.path.join(*path.split(\"/\")[:-1])\n# if not os.path.exists(checkpoint_dir):\n# os.mkdir(checkpoint_dir)\n \n # make checkpoint\n checkpoint = {\n \"model_state_dict\": self.model.state_dict(),\n \"optimizer_state_dict\": self.optimizer.state_dict()\n }\n if metrics_dict:\n for k in metrics_dict:\n checkpoint[k] = metrics_dict[k]\n \n # save checkpoint\n torch.save(checkpoint, path)\n \n def get_num_parameters(self):\n \"\"\"Return the total number of parameters of the model.\"\"\"\n return sum(reduce(lambda a, b: a*b, x.size()) for x in self.model.parameters())\n \n def _make_progbar(self, train_loader, val_loader, verbose):\n \"\"\"Make a progress bar to show training progress.\"\"\"\n len_train_loader = len(iter(train_loader))\n if val_loader:\n len_val_loader = len(iter(val_loader))\n else:\n len_val_loader = None\n \n return Progbar(target=len_train_loader, \n val_target=len_val_loader, verbose=verbose)" ]
[ [ "torch.device", "matplotlib.pyplot.xlabel", "torch.save", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "torch.no_grad", "matplotlib.pyplot.figure", "torch.cuda.is_available", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show" ] ]
behrouzz/numeph
[ "8fa395f1f429ffa67c5dd18a15bb646ef0d0e290" ]
[ "numeph/core.py" ]
[ "import pickle, re\nimport numpy as np\nfrom datetime import datetime\nfrom jplephem import spk\nfrom .julian import datetime_to_jd, jd_to_sec\nfrom .utils import objects, num2txt, txt2num\n\n\nclass Segment:\n def __init__(self, cet_tar, domain, coef):\n self.cet_tar = cet_tar\n self.center = cet_tar[0]\n self.target = cet_tar[1]\n self.domain = domain\n self.coef = coef\n\n def to_str(self):\n cfx = self.coef[0,:,:]\n cfy = self.coef[1,:,:]\n cfz = self.coef[2,:,:]\n cf_xyz = np.concatenate((cfx,cfy,cfz))\n cf_str = num2txt(cf_xyz)\n n_cols = cf_xyz.shape[1]\n ini_dom = self.domain[0,0]\n dt_rec = self.domain[0,1] - self.domain[0,0]\n dt_dom = self.domain[-1,-1] - self.domain[0,0]\n n_recs = cfx.shape[0]\n\n first_row = [self.center, self.target, n_cols, n_recs, dt_rec, ini_dom, dt_dom]\n first_row = ['segment'] + [str(int(i)) for i in first_row]\n first_row = ','.join(first_row)+'\\n'\n return first_row + cf_str\n\n def get_pos(self, t):\n \"\"\"\n Get position of the object at time t\n \n Parameters\n ----------\n t (datetime) : time for which the position is requested\n \n Returns\n ----------\n pos (np.array): position of the object at t\n \"\"\"\n jd = datetime_to_jd(t)\n t = jd_to_sec(jd)\n mask = np.logical_and(t>=self.domain[:,0], t<self.domain[:,1])\n rec = np.where(mask)[0][0] # record index\n cfx = self.coef[0,rec,:]\n cfy = self.coef[1,rec,:]\n cfz = self.coef[2,rec,:]\n fx = np.polynomial.chebyshev.Chebyshev(coef=cfx, domain=self.domain[rec])\n fy = np.polynomial.chebyshev.Chebyshev(coef=cfy, domain=self.domain[rec])\n fz = np.polynomial.chebyshev.Chebyshev(coef=cfz, domain=self.domain[rec])\n pos = np.vstack((fx(t),fy(t),fz(t))).T[0]\n return pos\n\n\nclass SPK:\n \"\"\"\n Load desired segments from bsp file to memory in desired interval\n \n Parameters\n ----------\n fname (str) : path and name of bsp file\n t1 (datetime) : ephemeris start time\n t2 (datetime) : ephemeris end time\n segs_tup (list) : segments as (center, target) tuples\n \"\"\"\n def __init__(self, fname, t1=None, t2=None, segs_tup=None):\n kernel = spk.SPK.open(fname)\n self.segments = {}\n self.array = {}\n #self.data = []\n\n # select segments\n all_segs = kernel.segments\n all_segs_tup = [(i.center, i.target) for i in all_segs]\n if segs_tup is None:\n segs_tup = all_segs_tup\n segments = all_segs\n else:\n segments = [i for i in all_segs if (i.center, i.target) in segs_tup]\n\n # select time interval\n slicing = False\n if (t1 is not None) and (t2 is not None):\n t1 = jd_to_sec(datetime_to_jd(t1))\n t2 = jd_to_sec(datetime_to_jd(t2))\n slicing = True\n\n for seg in segments:\n INIT, INTLEN, RSIZE, N = seg.daf.read_array(seg.end_i - 3, seg.end_i)\n t_ini, interval, coefficients = seg.load_array()\n\n cf_count = int(RSIZE - 2) // 3\n cf = seg.daf.map_array(seg.start_i, seg.end_i - 4)\n cf.shape = (int(N), int(RSIZE))\n\n MID_and_RADIUS = cf[:,:2]\n MID = MID_and_RADIUS[:,0]\n RADIUS = MID_and_RADIUS[:,1]\n\n domains = np.zeros(MID_and_RADIUS.shape)\n domains[:,0] = MID - RADIUS\n domains[:,1] = MID + RADIUS\n\n if slicing:\n mask1 = np.logical_and(t1>=domains[:,0], t1<domains[:,1])\n mask2 = np.logical_and(t2>=domains[:,0], t2<domains[:,1])\n rec1 = np.where(mask1)[0][0]\n rec2 = np.where(mask2)[0][0]\n if rec1==rec2:\n coefficients = coefficients[:, [rec1], :]\n domains = domains[[rec1], :]\n else:\n coefficients = coefficients[:, rec1:rec2+1, :]\n domains = domains[rec1:rec2+1, :]\n\n if coefficients.shape[1]==0:\n continue\n\n #self.data.append([(seg.center, seg.target), domains, coefficients])\n self.array[(seg.center, seg.target)] = [domains, coefficients]\n self.segments[(seg.center, seg.target)] = \\\n Segment((seg.center, seg.target), domains, coefficients)\n kernel.close()\n\n def to_txt(self, fname):\n all_str = ''\n for seg in self.segments.values():\n all_str = all_str + seg.to_str()\n f = open(fname, 'w')\n f.write(all_str)\n f.close()\n\n def to_pickle(self, fname):\n f = open(fname, 'wb')\n pickle.dump(self.array, f)\n f.close()\n\n \ndef load_pickle(fname):\n \"\"\"\n Load an ephemeris pickle file\n \n Parameters\n ----------\n fname (str) : path and name of the pickle file\n \n Returns\n ----------\n Dictionary of Segments\n \"\"\"\n f = open(fname, 'rb')\n data = pickle.load(f)\n f.close()\n dc = {}\n for k,v in data.items():\n dc[k] = Segment(k, v[0], v[1])\n return dc\n \n\ndef load_txt(fname):\n \"\"\"\n Load an ephemeris text file\n \n Parameters\n ----------\n fname (str) : path and name of the text file\n \n Returns\n ----------\n Dictionary of Segments\n \"\"\"\n dc = {}\n f = open(fname, 'r')\n all_str = f.read()\n f.close()\n fmt = 'segment,\\d+,\\d+,\\d+,\\d+,\\d+,\\d+,\\d+\\n'\n first_rows = re.findall(fmt, all_str)\n coef_rows = re.split(fmt, all_str)[1:]\n for i in range(len(first_rows)):\n first_row, cf_str = first_rows[i], coef_rows[i]\n first_row = first_row.split(',')[1:]\n first_row = [int(i) for i in first_row]\n center, target, n_cols, n_recs, dt_rec, ini_dom, dt_dom = first_row\n cf = txt2num(cf_str)\n cf = cf.reshape((3,n_recs,n_cols))\n domain = np.zeros((n_recs,2))\n domain[:,0] = [ini_dom + i*dt_rec for i in range(n_recs)]\n domain[:,1] = domain[:,0] + dt_rec\n dc[(center, target)] = Segment((center,target), domain, cf)\n return dc\n \n\n\ndef get_pos(file, seg_tup, t):\n \"\"\"\n Get position of an object from a segment\n \n Parameters\n ----------\n file (str) : path of pickle file\n seg_tup (tuple) : (center, target) of segment to be used\n t (datetime) : time for which the position is requested\n \n Returns\n ----------\n pos (np.array): position of the object\n \"\"\"\n \n f = open(file, 'rb')\n data = pickle.load(f)\n f.close()\n\n data = [i for i in data if i[0]==seg_tup][0]\n _, domains, coefficients = data\n \n jd = datetime_to_jd(t)\n t = jd_to_sec(jd)\n \n mask = np.logical_and(t>=domains[:,0], t<domains[:,1])\n rec = np.where(mask)[0][0] # record index\n\n cfx = coefficients[0,rec,:]\n cfy = coefficients[1,rec,:]\n cfz = coefficients[2,rec,:]\n\n fx = np.polynomial.chebyshev.Chebyshev(coef=cfx, domain=domains[rec])\n fy = np.polynomial.chebyshev.Chebyshev(coef=cfy, domain=domains[rec])\n fz = np.polynomial.chebyshev.Chebyshev(coef=cfz, domain=domains[rec])\n\n pos = np.vstack((fx(t),fy(t),fz(t))).T[0]\n return pos\n\n\ndef geocentric(target, t, file):\n \"\"\"\n Get geocentric position of an object\n \n Parameters\n ----------\n target (str) : name of the target, i.e. planet, sun or moon\n t (datetime) : time for which the position is requested\n file (str) : path of pickle file\n \n Returns\n ----------\n pos (np.array): geocentric position of the object\n \"\"\"\n \n target = target.lower()\n if target not in objects.keys():\n raise Exception('target not recognized!')\n target = objects[target]\n \n earB_ear = get_pos(file=file, seg_tup=(3,399), t=t)\n if target==301:\n earB_moo = get_pos(file=file, seg_tup=(3,301), t=t)\n pos = earB_moo - earB_ear\n elif target in [1,2,4,5,6,7,8,9,10]:\n SSB_plaB = get_pos(file=file, seg_tup=(0, target), t=t)\n SSB_earB = get_pos(file=file, seg_tup=(0,3), time=t)\n pos = SSB_plaB - earB_ear - SSB_earB\n return pos\n" ]
[ [ "numpy.concatenate", "numpy.zeros", "numpy.logical_and", "numpy.where", "numpy.polynomial.chebyshev.Chebyshev" ] ]
michaelberks/madym_python
[ "e1d0f55552dc44cb6fc76e8c8fcdd29b601bd00d" ]
[ "src/QbiPy/dce_models/tofts_model.py" ]
[ "'''\nModule for woring with the extended-Tofts model.\n\nWe provide the forward model (concentration_from_model), to be used elsewhere in\nfitting if required. In addition we provide methods for fitting the model using\na linear-least squares approach.\n\nThe model includes the standard 3 parameters Ktrans, ve, vp. It also includes a delay\nparameter tau_a, which is used to interpolate/delay the AIF to given time.\n\nAll times are assumed to be in minutes.\n\nThe AIF must be a QbiPy AIF object (see dce_aif). However if you have a set of AIF values (Ca_t)\nand associated dynamic times (t), it is trivial to create an AIF object:\n\naif = dce_aif.Aif(times = t, base_aif=Ca_t, aif_type=ARRAY)\n\nThe remaining model parameters can either be input as scalars, or 1D np.arrays. The two forms\ncan be mixed, but any paramaters set as arrays must be the same length. The output is always\na 2D array C(t) = (n_samples x n_times).\n\nThe main concentration_from_model function is written this way because it is primarily used\nfor setting up ground truth inputs from Monte-Carlo simulations. However, for convenience\nif using as a forward model during model fits, a wrapper function is provided in which\na single set of model parameters are input as a list/tuple/array and C(t) is returned\nas a 1D-array\n\n'''\n\nimport numpy as np\nfrom QbiPy.dce_models import dce_aif \nfrom QbiPy import helpers\n\n#\n#---------------------------------------------------------------------------------\ndef concentration_from_model(aif:dce_aif.Aif, \n Ktrans: np.array, v_e: np.array, v_p: np.array, tau_a: np.array, \n use_exp_conv:bool=False, all_scalar=False)->np.array:\n '''\n Compute concentration time-series of extended-Tofts model from input\n paramaters\n \n Inputs:\n aif (Aif object, num_times): \n object to store and resample arterial input function values\n \n Ktrans (1D np.array, num_voxels): \n Ktrans values, 1 for each voxel or scalar\n \n v_p (1D np.array, num_voxels): \n vascular volume fraction values, 1 for each voxel or scalar\n \n v_e (1D np.array, num_voxels): \n extra-cellular, extra-vascular volume fraction values, 1 for each voxel or scalar\n \n tau_a (1D np.array, num_voxels): \n arterial delay values, 1 for each voxel or scalar\n \n use_exp_conv, bool: \n if true, uses non-interpolating exponential convolution, otherwise does standard stepwise\n\n all_scalar, bool: \n if true, skips checks on parameter dimensions, and runs for a single voxel\n \n Outputs:\n C_model (2D np.array, num_voxels x num_times):\n Model concentrations at each time point for each voxel computed from model paramaters\n '''\n\n if all_scalar:\n num_voxels = 1\n else:\n #We allow the model paramaters to be scalar, whilst also accepting higher dimension arrays\n num_voxels,Ktrans, v_e, v_p, tau_a = helpers.check_param_size(\n Ktrans=Ktrans,v_e=v_e,v_p=v_p, tau_a=tau_a\n )\n\n #precompute exponential\n k_ep = Ktrans / v_e\n\n #Make time relative to first scan, and compute time intervals\n num_times = aif.times_.size\n t = aif.times_\n\n #create container for running integral sum\n #integral_sum = np.zeros(num_voxels) #1d nv\n\n #Resample the AIF\n aif_offset = aif.resample_AIF(tau_a) #nv x nt\n \n #Create container for model concentrations\n C_model = np.zeros([num_voxels, num_times])\n\n e_i = 0\n for i_t in range(1, num_times):\n \n #Get current time, and time change\n t1 = t[i_t] #scalar\n delta_t = t1 - t[i_t-1] #scalar\n \n #Compute (tau_a) combined arterial and vascular input for this time\n Ca_t0 = aif_offset[:,i_t-1]#1d n_v\n Ca_t1 = aif_offset[:,i_t]#1d n_v\n\n if use_exp_conv:\n e_i = helpers.exp_conv(k_ep, delta_t, Ca_t1, Ca_t0, e_i)\n \n else:\n #Update the exponentials for the transfer terms in the two compartments\n e_delta = np.exp(-delta_t * k_ep) #1d n_v \n \n A = delta_t * 0.5 * (Ca_t1 + Ca_t0*e_delta)\n e_i = e_i * e_delta + A\n\n #Combine the two compartments with the rate constant to get the final\n #concentration at this time point\n C_model[:,i_t] = v_p * Ca_t1 + Ktrans * e_i\n\n return C_model\n\n#\n#---------------------------------------------------------------------------------\ndef concentration_from_model_single(params: np.array, aif:dce_aif.Aif)->np.array:\n '''\n Compute concentration time-series of extended-Tofts model from input\n paramaters\n \n Inputs:\n params (tuple/list/1D np.array): \n 4 element array containing [Ktrans, v_e, v_p, tau_a] for a single sample\n \n aif (Aif object, num_times): \n object to store and resample arterial input function values\n \n Outputs:\n C_model (1D np.array num_times) - Model concentrations at each time point for each \n voxel computed from model paramaters\n '''\n return concentration_from_model(aif, \n params[0], params[1], params[2], params[3], \n use_exp_conv=False, all_scalar=True)[0,]\n\n#\n#---------------------------------------------------------------------------\ndef construct_LLS_matrix(Ctis_t:np.array, aif:dce_aif.Aif, tau_a:float):\n '''\n Make a matrix for linear least-sqaures (LLS) solving for a single tissue time-series \n\t\n\tInputs:\n Ct_sig: np.array (num_times)\n time-series of signal derived CA concentration\n\n aif (Aif object, num_times): \n object to store and resample arterial input function values (1 for each time point)\n \n tau_a: float \n arterial delay values, 1 for each voxel\n\n Outputs:\n A_:np.array (num_times x 3)\n Matrix for LLS solver collapsed column major to a single data vector\n\n '''\n t = aif.times_\n Cp_t = aif.resample_AIF(tau_a)[0,]\n n_t = aif.num_times()\n \n A_ = np.zeros((n_t,3))\n\n Cp_t_int = helpers.trapz_integral(Cp_t, t)\n Ctis_t_int = helpers.trapz_integral(Ctis_t, t)\n\n A_[:,0] = Cp_t_int\n A_[:,1] = -Ctis_t_int\n A_[:,2] = Cp_t\n\n return A_\n\n#\n#---------------------------------------------------------------------------\ndef solve_LLS(Ctis_t:np.array, aif:dce_aif.Aif, tau_a:float):\n '''\n Solve model parameters for a single tissue time-series using LLS\n\t\n\tInputs:\n Ct_sig: np.array (num_times)\n time-series of signal derived CA concentration\n\n aif (Aif object, num_times): \n object to store and resample arterial input function values (1 for each time point)\n \n tau_a: float \n arterial delay values, 1 for each voxel\n\n Outputs:\n Ktrans, v_e, v_p : float\n TK model parameters\n '''\n A_ = construct_LLS_matrix(Ctis_t, aif, tau_a)\n C_ = Ctis_t\n B_ = np.linalg.lstsq(A_, C_, rcond=None)[0]\n k_2 = B_[1]\n v_p = B_[2]\n Ktrans = B_[0] - k_2*v_p\n v_e = Ktrans / k_2\n return Ktrans, v_e, v_p" ]
[ [ "numpy.linalg.lstsq", "numpy.exp", "numpy.zeros" ] ]
timozerrer/DRL4IOT
[ "1cb8df0c4c0cff9922717b5ff8d73315eb10d242" ]
[ "wrappers/glove_obs_wrapper.py" ]
[ "import gym\nimport numpy as np\nimport os\n\nclass GloveObsWrapper(gym.Wrapper):\n def __init__(self, gym_env, glove_model_path):\n env = gym_env\n self.glove = self.loadGloveModel(glove_model_path)\n super(GloveObsWrapper, self).__init__(env)\n print(\"shape\", env.observation_space.shape) \n high = np.ones(len(self.unwrapped.observation_space.sample()) * len(self.glove[\"0\"])) * 100\n self.observation_space = gym.spaces.Box(-high, high)\n # Adapt the observation_space to the observations.\n # the observation space has still the shape for the unrapped observations: 4 ints\n # reset the environment to get a dummy observation for the first dimension\n # use entry \"0\" in the glove-model to get the second dimension\n obs_dummy = env.reset()\n self.observation_space.shape = (len(obs_dummy), len(self.glove[\"0\"]))\n self.unwrapped.observation_space = gym.spaces.Box(-high, high)\n\n def step(self, action):\n \"\"\"Wrap the step functions to embed obs using GloVe\"\"\"\n observation, reward, done, info = super().step(action)\n return self.embed_obs(observation), reward, done, info\n\n def reset(self, **kwargs):\n observations = super().reset(**kwargs)\n return self.embed_obs(observations)\n\n def embed_obs(self, observation):\n def isfloat(value):\n try:\n float(value)\n return True\n except ValueError:\n return False\n new_observation = []\n for obs in range(len(observation)):\n if not isfloat(observation[obs]):\n try:\n new_observation.append(self.glove[str(observation[obs])])\n except KeyError:\n raise KeyError(\"String '\" + str(observation[obs]) + \"' not found in GloVe set\")\n continue\n new_observation.append(self.glove[str((np.clip(int(float(observation[obs])), -75, 75)))])\n return np.array(new_observation)\n\n def loadGloveModel(self, gloveFile):\n f = open(os.path.join(os.getcwd(),gloveFile), 'r' ,encoding=\"utf8\")\n model = {}\n for line in f:\n splitLine = line.split()\n word = splitLine[0]\n embedding = np.array([float(val) for val in splitLine[1:]])\n model[word] = embedding\n return model\n" ]
[ [ "numpy.array" ] ]
equitensor/EquiTensor_2021
[ "aa8f234380f6e827478e8f5650230ebef9b0b960" ]
[ "EquiTensors/train_equitensor_aw.py" ]
[ "# EquiTensor + AW: Core + Fairness (adversary + disentanglement module)\n# + AW (Adaptive weighting)\n\n# 1) up date AE, supply sensitive info map (binarized) as y, into decoder.\n# L = L(rec) + lamda * (1 - L(adversary))\n# where L(rec) = sum (L(ds_i) * weight(ds_i))\n# weight(ds_i) is determinded by adaptive weighting\n# 2) update proxy adversary.\n\n\nimport pandas as pd\nimport numpy as np\nimport sys\nimport os\nimport os.path\ndir_path = os.path.dirname(os.path.realpath(__file__))\nparent_dir_path = os.path.abspath(os.path.join(dir_path, os.pardir))\nsys.path.insert(0, parent_dir_path)\nfrom os.path import join\nimport argparse\nimport time\nimport datetime\nfrom datetime import timedelta\nimport equitensor_aw\nimport random\nimport pickle\nfrom utils.training_AE_setup import train\nfrom utils import datetime_utils\n\n\n\nHEIGHT = 32\nWIDTH = 20\nTIMESTEPS = 24\nCHANNEL = 27 # number of all features\nBATCH_SIZE = 32\nTRAINING_STEPS = 80\nLEARNING_RATE = 0.01\nHOURLY_TIMESTEPS = 24\nDAILY_TIMESTEPS = 7\nTHREE_HOUR_TIMESTEP = 56\n\n\ndef generate_fixlen_timeseries(rawdata_arr, timestep = 24):\n raw_seq_list = list()\n arr_shape = rawdata_arr.shape\n for i in range(0, arr_shape[0] - (timestep)+1):\n start = i\n end = i+ (timestep )\n temp_seq = rawdata_arr[start: end]\n raw_seq_list.append(temp_seq)\n raw_seq_arr = np.array(raw_seq_list)\n raw_seq_arr = np.swapaxes(raw_seq_arr,0,1)\n return raw_seq_arr\n\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('lamda', nargs='?', type = float, help = 'lambda for fairness strength', default = 0.1)\n parser.add_argument('-s', '--suffix',\n action=\"store\", help = 'save path suffix', default = '')\n parser.add_argument('-k', '--key',\n action=\"store\", help = 'train only one dataset', default = '')\n parser.add_argument('-a', '--attribute',\n action=\"store\", help = 'sensitive attribute', default = 'race')\n parser.add_argument('-d', '--dim', type=int,\n action=\"store\", help = 'dims of latent rep', default = 5)\n parser.add_argument(\"-r\",\"--resume_training\", type=bool, default=False,\n \t\t\t\thelp=\"A boolean value whether or not to resume training from checkpoint\")\n parser.add_argument('-t', '--train_dir',\n action=\"store\", help = 'training dir containing checkpoints', default = '')\n parser.add_argument('-c', '--checkpoint',\n action=\"store\", help = 'checkpoint path (resume training)', default = None)\n parser.add_argument('-e', '--epoch', type=int,\n action=\"store\", help = 'epochs to train', default = 80)\n parser.add_argument('-l', '--learning_rate', type=float,\n action=\"store\", help = 'epochs to train', default = 0.01)\n parser.add_argument(\"-i\",\"--inference\", type=bool, default=False,\n \t\t\t\thelp=\"inference\")\n parser.add_argument(\"-up\",\"--use_pretrained\", type=bool, default=False,\n \t\t\t\thelp=\"A boolean value whether or not to start from pretrained model\")\n parser.add_argument('-pc', '--pretrained_checkpoint',\n action=\"store\", help = 'checkpoint path to pretrained models',\n default = '')\n return parser.parse_args()\n\n\n\ndef main():\n args = parse_args()\n suffix = args.suffix\n lamda = args.lamda\n attribute= args.attribute\n resume_training = args.resume_training\n train_dir = args.train_dir\n checkpoint = args.checkpoint\n epoch = args.epoch\n learning_rate= args.learning_rate\n dim = args.dim\n inference = args.inference\n key = args.key\n use_pretrained = args.use_pretrained\n pretrained_checkpoint = args.pretrained_checkpoint\n\n if checkpoint is not None:\n checkpoint = train_dir + checkpoint\n print('pick up checkpoint: ', checkpoint)\n\n\n print('load data for Seattle...')\n globals()['TRAINING_STEPS'] = epoch\n globals()['LEARNING_RATE'] = learning_rate\n\n intersect_pos = pd.read_csv('../auxillary_data/intersect_pos_32_20.csv')\n intersect_pos_set = set(intersect_pos['0'].tolist())\n # demographic data\n demo_raw = pd.read_csv('../auxillary_data/whole_grid_32_20_demo_1000_intersect_geodf_2018_corrected.csv', index_col = 0)\n train_obj = train(demo_raw)\n train_obj.generate_binary_demo_attr(intersect_pos_set)\n\n ######## load sensitive demo data #######################\n demo_arr_norm = np.load('../auxillary_data/sensitive_arr_age_race_edu_income.npy')\n if attribute == 'race':\n sensitive_idx = 1\n if attribute == 'income':\n sensitive_idx = 3\n sensitive_list = ['age65', 'white_pop', 'edu_uni', 'income_high']\n # income: 3, race: 1\n sensitive_demo_arr = demo_arr_norm[:,:,sensitive_idx] # shape: [32, 20]\n sensitive_demo_arr = np.expand_dims(sensitive_demo_arr, axis = -1) #shape: [32, 20, 1]\n\n # ---- reading data ---------------------#\n print('Reading 1d, 2d, and 3d data')\n path_1d = '../data_processing/1d_source_data/'\n path_2d = '../data_processing/2d_source_data/'\n path_3d = '../data_processing/3d_source_data/'\n # 1d\n weather_arr = np.load(path_1d + 'weather_arr_20140201_20190501.npy')\n airquality_arr = np.load(path_1d + 'air_quality_arr_20140201_20190501.npy')\n weather_arr = weather_arr[0,0,:,:]\n airquality_arr = airquality_arr[0,0,:,:]\n\n\n # 2d\n house_price_arr = np.load(path_2d + 'house_price.npy')\n POI_business_arr = np.load(path_2d + 'POI_business.npy')\n POI_food_arr = np.load(path_2d + 'POI_food.npy')\n POI_government_arr = np.load(path_2d + 'POI_government.npy')\n POI_hospitals_arr = np.load(path_2d + 'POI_hospitals.npy')\n POI_publicservices_arr = np.load(path_2d + 'POI_publicservices.npy')\n POI_recreation_arr = np.load(path_2d + 'POI_recreation.npy')\n POI_school_arr = np.load(path_2d + 'POI_school.npy')\n POI_transportation_arr = np.load(path_2d + 'POI_transportation.npy')\n seattle_street_arr = np.load(path_2d + 'seattle_street.npy')\n total_flow_count_arr = np.load(path_2d + 'total_flow_count.npy')\n transit_routes_arr = np.load(path_2d + 'transit_routes.npy')\n transit_signals_arr = np.load(path_2d + 'transit_signals.npy')\n transit_stop_arr = np.load(path_2d + 'transit_stop.npy')\n\n slope_arr = np.load(path_2d + 'slope_arr.npy')\n bikelane_arr = np.load(path_2d + 'bikelane_arr.npy')\n\n # 3d\n building_permit_arr = np.load(path_3d + 'building_permit_arr_20140201_20190501_python3.npy')\n collisions_arr = np.load(path_3d + 'collisions_arr_20140201_20190501_python3.npy')\n crime_arr = np.load(path_3d + 'crime_arr_20140201_20190501_python3.npy')\n seattle911calls_arr = np.load(path_3d + 'seattle911calls_arr_20140201_20190501.npy')\n building_permit_arr_seq_extend = np.repeat(building_permit_arr, 24, axis =0)\n collisions_arr_seq_extend = np.repeat(collisions_arr, 24, axis =0)\n\n # construct dictionary\n print('use dictionary to organize data')\n rawdata_1d_dict = {\n 'precipitation': np.expand_dims(weather_arr[:,0], axis=1) , # core\n 'temperature': np.expand_dims(weather_arr[:,1], axis=1) , # core\n 'pressure': np.expand_dims(weather_arr[:,2], axis=1), # core\n 'airquality': airquality_arr,\n }\n\n rawdata_2d_dict = {\n 'house_price': house_price_arr, # core\n 'POI_business': POI_business_arr, # core\n 'POI_food': POI_food_arr, # core\n 'POI_government': POI_government_arr,\n 'POI_hospitals': POI_hospitals_arr,\n 'POI_publicservices': POI_publicservices_arr,\n 'POI_recreation': POI_recreation_arr, # core\n 'POI_school': POI_school_arr, # core\n 'POI_transportation': POI_transportation_arr,\n 'seattle_street': seattle_street_arr, # core\n 'total_flow_count': total_flow_count_arr,\n 'transit_routes': transit_routes_arr, # core\n 'transit_signals': transit_signals_arr,\n 'transit_stop':transit_stop_arr, # core\n 'slope': slope_arr, # core\n 'bikelane': bikelane_arr, # core\n }\n\n rawdata_3d_dict = {\n 'building_permit': building_permit_arr_seq_extend,\n 'collisions': collisions_arr_seq_extend, # expect (1, 45984, 32, 20)\n 'seattle911calls': seattle911calls_arr # (45984, 32, 20) # core\n }\n\n\n keys_1d = list(rawdata_1d_dict.keys())\n keys_2d = list(rawdata_2d_dict.keys())\n keys_3d = list(rawdata_3d_dict.keys())\n\n if key != '' and key in keys_1d:\n temp_var = rawdata_1d_dict[key]\n rawdata_1d_dict.clear()\n rawdata_1d_dict[key] = temp_var\n rawdata_2d_dict.clear()\n rawdata_3d_dict.clear()\n\n if key != '' and key in keys_2d:\n temp_var = rawdata_2d_dict[key]\n rawdata_2d_dict.clear()\n rawdata_2d_dict[key] = temp_var\n rawdata_1d_dict.clear()\n rawdata_3d_dict.clear()\n\n if key != '' and key in keys_3d:\n temp_var = rawdata_3d_dict[key]\n rawdata_3d_dict.clear()\n rawdata_3d_dict[key] = temp_var\n rawdata_2d_dict.clear()\n rawdata_1d_dict.clear()\n\n keys_1d = list(rawdata_1d_dict.keys())\n keys_2d = list(rawdata_2d_dict.keys())\n keys_3d = list(rawdata_3d_dict.keys())\n keys_all = keys_1d+ keys_2d+keys_3d\n\n ################ read corrputed data ########################\n\n with open(path_1d + 'rawdata_1d_corrupted_dict', 'rb') as handle:\n rawdata_1d_corrupted_dict_all = pickle.load(handle)\n rawdata_1d_corrupted_dict = {k: rawdata_1d_corrupted_dict_all[k] for k in keys_1d}\n\n with open(path_2d + 'rawdata_2d_corrupted_dict', 'rb') as handle:\n rawdata_2d_corrupted_dict_all = pickle.load(handle)\n rawdata_2d_corrupted_dict = {k: rawdata_2d_corrupted_dict_all[k] for k in keys_2d}\n\n with open(path_3d + 'rawdata_3d_corrupted_dict', 'rb') as handle:\n rawdata_2d_corrupted_dict_all = pickle.load(handle)\n rawdata_3d_corrupted_dict = {k: rawdata_2d_corrupted_dict_all[k] for k in keys_3d}\n\n # optimal loss for adaptive weighting\n base_dict_all = {\n 'precipitation':0.001215384,\n 'temperature': 0.023403276,\n 'pressure': 0.001060321,\n 'airquality': 0.006243059,\n 'house_price': 0.00008934,\n 'POI_business': 0.0000209,\n 'POI_food': 0.000030507,\n 'POI_government': 9.73292557827174E-06,\n 'POI_hospitals': 8.28E-06,\n 'POI_publicservices': 0.000017891,\n 'POI_recreation':0.00006412,\n 'POI_school': 0.00003219,\n 'POI_transportation': 0.00001365,\n 'seattle_street': 0.00007704,\n 'total_flow_count': 0.000070992,\n 'transit_routes': 0.00005772,\n 'transit_signals': 0.000050377,\n 'transit_stop':0.00007195,\n 'slope': 0.00007734,\n 'bikelane': 0.00006382,\n 'building_permit':0.001142442,\n 'collisions': 0.000470792,\n 'seattle911calls':0.000298507,\n }\n\n base_dict = {k: base_dict_all[k] for k in keys_all}\n\n\n if suffix == '':\n save_path = './equitensor_aw_'+ 'dim'+ str(dim) + '_lamda_'+ str(lamda) +'_' + str(attribute)+'/'\n else:\n if key == '':\n save_path = './equitensor_aw_'+ 'dim' + str(dim) + '_lamda_'+ str(lamda)+'_' + str(attribute)+ '_'+ suffix +'/'\n else:\n save_path = './equitensor_aw_'+ 'dim' + str(dim) + '_lamda_'+ str(lamda) +'_' + str(attribute)+ '_'+ suffix+ '_' + key +'/'\n\n if train_dir:\n save_path = train_dir\n\n print(\"training dir: \", train_dir)\n print(\"save_path: \", save_path)\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n\n # generate mask arr for city boundary (32, 20, 1)\n demo_mask_arr = train_obj.demo_mask()\n # generate demographic in array format\n print('generating demo_arr array')\n demo_arr = train_obj.selected_demo_to_tensor()\n if not os.path.isfile(save_path + '_demo_arr_' + str(HEIGHT) + '.npy'):\n np.save(save_path + '_demo_arr_'+ str(HEIGHT) + '.npy', demo_arr)\n\n\n timer = str(time.time())\n if resume_training == False:\n if inference == False:\n print('Train Model')\n latent_representation = equitensor_aw.Autoencoder_entry(train_obj,\n rawdata_1d_dict, rawdata_2d_dict, rawdata_3d_dict,\n rawdata_1d_corrupted_dict, rawdata_2d_corrupted_dict, rawdata_3d_corrupted_dict,\n base_dict_all,\n intersect_pos_set,\n demo_mask_arr, save_path, dim, lamda,\n sensitive_demo_arr,\n HEIGHT, WIDTH, TIMESTEPS, CHANNEL, BATCH_SIZE, TRAINING_STEPS, LEARNING_RATE,\n use_pretrained = use_pretrained, pretrained_ckpt_path = pretrained_checkpoint,\n ).train_lat_rep\n else:\n latent_representation = equitensor_aw.Autoencoder_entry(train_obj,\n rawdata_1d_dict, rawdata_2d_dict, rawdata_3d_dict,\n rawdata_1d_corrupted_dict, rawdata_2d_corrupted_dict, rawdata_3d_corrupted_dict,\n base_dict_all,\n intersect_pos_set,\n demo_mask_arr, save_path, dim,lamda,\n sensitive_demo_arr,\n HEIGHT, WIDTH, TIMESTEPS, CHANNEL, BATCH_SIZE, TRAINING_STEPS, LEARNING_RATE,\n True, checkpoint, False, train_dir,\n use_pretrained = use_pretrained, pretrained_ckpt_path = pretrained_checkpoint,\n\n ).final_lat_rep\n else:\n # resume training\n print('resume trainging from : ', train_dir)\n latent_representation = equitensor_aw.Autoencoder_entry(train_obj,\n rawdata_1d_dict, rawdata_2d_dict, rawdata_3d_dict,\n rawdata_1d_corrupted_dict, rawdata_2d_corrupted_dict, rawdata_3d_corrupted_dict,\n base_dict_all,\n intersect_pos_set,\n demo_mask_arr,\n train_dir, dim, lamda,\n sensitive_demo_arr,\n HEIGHT, WIDTH, TIMESTEPS, CHANNEL,\n BATCH_SIZE, TRAINING_STEPS, LEARNING_RATE,\n False, checkpoint, True, train_dir).train_lat_rep\n\n txt_name = save_path + 'equitensor_aw_' + 'dim_' + str(dim) +'_' + timer + '.txt'\n with open(txt_name, 'w') as the_file:\n the_file.write('dim\\n')\n the_file.write(str(dim) + '\\n')\n the_file.write('learning rate\\n')\n the_file.write(str(LEARNING_RATE) + '\\n')\n the_file.write('lamda\\n')\n the_file.write(str(lamda) + '\\n')\n the_file.write('sensitive attribute\\n')\n the_file.write(str(sensitive_list[sensitive_idx]) + '\\n')\n the_file.close()\n\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.array", "numpy.load", "numpy.swapaxes", "numpy.repeat", "pandas.read_csv", "numpy.expand_dims" ] ]
Optimus-Q/infertrade
[ "6f177d63d90bb63b00eb4380a634bbc229425008" ]
[ "infertrade/utilities/api_automation.py" ]
[ "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Author: Nikola Rokvic\n# Created: 18.10.2021\n# Copyright 2021 InferStat Ltd\n\nimport http.client\nimport json\nimport pandas as pd\nimport requests\nimport markdown\nimport pathlib\n\n\"\"\"Scripts made to allow package users to use the InferTrade API easier\"\"\"\n\n\ndef remove_at(i, s):\n \"\"\"Removes character from string at provided position\"\"\"\n return s[:i] + s[i + 1 :]\n\n\ndef parse_csv_file(file_name: str = None, file_location: str = None):\n \"\"\"Function reads provided CSV file (found in package folder) and returns data parsed to dict\"\"\"\n if file_name is None and file_location is None:\n raise ValueError(\"Please provide file name or file location\")\n if file_name is not None:\n if \".csv\" not in str(file_name):\n raise ValueError(\"Please provide CSV file or add .csv to file name\")\n file_path = str(pathlib.Path(__file__).parent.parent.parent.resolve()) + \"/\" + file_name\n elif file_location is not None:\n if \".csv\" not in str(file_location):\n raise ValueError(\"Please provide CSV file or add .csv to file location\")\n file_path = file_location\n dataframe = pd.read_csv(file_path)\n dictionary = dataframe.to_dict(\"list\")\n return dictionary\n\n\ndef find_request_method(html, request_name: str):\n \"\"\"Function finds request method in provided HTML file\"\"\"\n a = \"\"\n for _ in html:\n if _ is not \"\\n\":\n a = a + str(_)\n elif (\"POST \" + request_name) in a:\n return \"POST\", html.index((\"POST \" + request_name))\n elif (\"GET \" + request_name) in a:\n return \"GET\", html.index((\"GET \" + request_name))\n else:\n a = \"\"\n\n raise ValueError(\"Could not find request method in documentation\")\n\n\ndef scrape_request_body(request_name: str):\n \"\"\"Converts API_GUIDANCE.md file into HTML\"\"\"\n md_location = str(pathlib.Path(__file__).parent.parent.parent.resolve()) + \"/API_GUIDANCE.md\"\n with open(md_location, \"r\") as f:\n text = f.read()\n html = markdown.markdown(text)\n\n request_method, request_index = find_request_method(html=html, request_name=request_name)\n\n request_body = retrieve_request_body(request_index=request_index, html=html)\n return request_body, request_method\n\n\ndef retrieve_request_body(request_index: int, html):\n \"\"\"Returns default request body from API_GUIDANCE.md file\"\"\"\n request_body = \"\"\n a = \"\"\n body = False\n for _ in range(request_index, len(html)):\n if html[_] != \" \":\n if body is False:\n a = a + html[_]\n elif body is False:\n a = \"\"\n if a == \"json.dumps({\":\n if (str(html[_] + html[_ + 1])) == \"})\":\n return request_body\n body = True\n request_body = request_body + html[_]\n return \"\"\n\n\ndef check_api_status():\n \"\"\"Function checks current status of InferTrade API\"\"\"\n conn = http.client.HTTPSConnection(\"prod.api.infertrade.com\")\n payload = \"\"\n headers = {\"Content-Type\": \"application/json\"}\n conn.request(\"GET\", \"/status\", payload, headers)\n res = conn.getresponse()\n data = res.read()\n status = data.decode(\"utf-8\")\n return status\n\n\ndef check_float(string_rep):\n \"\"\"Function checks if provided string represents float number\"\"\"\n try:\n float(string_rep)\n return True\n except ValueError:\n return False\n\n\ndef find_and_replace_bracket(test_str):\n test_str = test_str\n open = 0\n open_positions = dict()\n ordered_keys = list()\n for _, i in enumerate(test_str):\n if i == \"[\":\n open += 1\n elif i == \"]\":\n open -= 1\n if open == 0:\n b = test_str.find(\"[\")\n open_positions[b] = _\n ordered_keys.append(b)\n test_str = test_str[:b] + \"]\" + test_str[b + 1 :]\n\n for i in range(0, (len(ordered_keys))):\n test_str = (\n test_str[: ordered_keys[(len(ordered_keys) - 1) - i]]\n + \"placeholder\"\n + test_str[(open_positions[ordered_keys[(len(ordered_keys) - 1) - i]] + 1) :]\n )\n return test_str\n\n\ndef string_to_dict(string, in_recursion: int = 0, fillers: dict() = None):\n \"\"\"Function turns scraped request body into a dictionary\"\"\"\n dictionary = dict()\n value = None\n key = None\n list_key = list()\n list_value = list()\n position = None\n str_len = len(string)\n i = 0\n for i, _ in enumerate(string):\n if position is not None:\n if i != str_len - len(string):\n continue\n else:\n position = None\n if _ == \":\":\n if string.find(_) != 0:\n key = string[: (string.find(_))]\n string = string[(string.find(_) + 1) :]\n elif _ == \",\":\n if string.find(_) != 0:\n value = string[: string.find(_)]\n string = string[(string.find(_) + 1) :]\n elif _ == \"}\":\n if string.find(_) != 0:\n value = string[: string.find(_)]\n string = string[(string.find(_) + 1) :]\n if value is not None and key is not None:\n list_value.append(value)\n list_key.append(key)\n key = str(list_key.pop())\n a = list_value.pop()\n if isinstance(a, dict):\n dictionary[key] = a\n elif check_float(a):\n a = \"placeholder\"\n dictionary[key] = a\n else:\n dictionary[key] = a\n if fillers is not None and key in fillers.keys():\n dictionary[key] = fillers[key]\n return dictionary, string, i\n elif _ == \"{\":\n string = string[(string.find(_) + 1) :]\n in_recursion += 1\n value, string, position = string_to_dict(string, in_recursion=in_recursion, fillers=fillers)\n in_recursion -= 1\n if len(string) > 1:\n if string[0] == \"}\":\n string = string[1:]\n if value is not None and key is not None:\n list_value.append(value)\n list_key.append(key)\n key = str(list_key.pop())\n a = list_value.pop()\n if isinstance(a, dict):\n dictionary[key] = a\n elif check_float(a):\n a = \"placeholder\"\n dictionary[key] = a\n else:\n dictionary[key] = a\n if fillers is not None and key in fillers.keys():\n dictionary[key] = fillers[key]\n return dictionary, string, i\n\n if value is not None and key is not None:\n list_value.append(value)\n list_key.append(key)\n key = str(list_key.pop())\n a = list_value.pop()\n if isinstance(a, dict):\n dictionary[key] = a\n elif check_float(a):\n a = \"placeholder\"\n dictionary[key] = a\n else:\n dictionary[key] = a\n if fillers is not None and key in fillers.keys():\n dictionary[key] = fillers[key]\n key = None\n value = None\n\n return dictionary, string, i\n\n\ndef convert_string(string: str, fillers: dict() = None):\n \"\"\"Makes retrieved string compatible with \"string_to_dict\" method and passes converted string to \"string_to_dict\" \"\"\"\n body = remove_at(0, string)\n new_body = \"\".join(body.splitlines())\n new_body = new_body.replace(\" \", \"\")\n new_body = new_body.replace('\"', \"\")\n new_body = find_and_replace_bracket(new_body)\n new_body = \"\".join(new_body.splitlines())\n new_body, string, pos = string_to_dict(new_body, fillers=fillers)\n return dict(new_body)\n\n\ndef execute_it_api_request(\n request_name: str,\n api_key: str,\n request_body: dict() = None,\n header: dict() = None,\n additional_data: list() = None,\n Content_Type: str = \"application/json\",\n selected_module: str = \"requests\",\n execute_request: bool = True,\n):\n \"\"\"Combines data and execute InferTrade API request, returns response\"\"\"\n\n status = check_api_status()\n if \"running\" not in status:\n raise Exception(\"Failed to establish connection to InferTrade API\")\n\n scraped_body, method = scrape_request_body(request_name)\n if request_body is not None:\n new_body = request_body\n elif scraped_body != \"\":\n new_body = convert_string(scraped_body, fillers=additional_data)\n else:\n new_body = scraped_body\n\n payload = json.dumps(new_body)\n\n if execute_request is False:\n return payload\n\n if header is None:\n headers = {\"Content-Type\": Content_Type, \"x-api-key\": api_key}\n else:\n headers = header\n\n if selected_module == \"http.client\":\n conn = http.client.HTTPSConnection(\"prod.api.infertrade.com\")\n conn.request(method, \"/\", payload, headers)\n res = conn.getresponse()\n data = res.read()\n response = data.decode(\"utf-8\")\n elif selected_module == \"requests\":\n url = \"https://prod.api.infertrade.com/\"\n response = requests.request(method, url, headers=headers, data=payload)\n\n return response\n" ]
[ [ "pandas.read_csv" ] ]
finalljx/Elastic-Federated-Learning-Solution
[ "fb588fdc03a2c1598b40b36712b27bdffdd24258" ]
[ "efls-train/python/efl/privacy/fixedpoint_tensor.py" ]
[ "# Copyright (C) 2016-2021 Alibaba Group Holding Limited\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport sys\n\nimport tensorflow as tf\nfrom efl.lib import ops as fed_ops\nimport numpy as np\n\nclass FixedPointTensor():\n\n BASE = 16.\n LOG2_BASE = math.log(BASE, 2)\n FLOAT_MANTISSA_BITS = sys.float_info.mant_dig\n Q = 293973345475167247070445277780365744413\n TF_CONVERT_NUM_LENGTH = 31\n\n def __init__(self, n=None, max_int=None):\n if n is None:\n n = Q\n max_int = Q // 3 - 1\n self._n = n\n self._max_int = max_int\n self._encoding = None\n self._exponent = None\n\n @property\n def encoding(self):\n return self._encoding\n\n @property\n def exponent(self):\n return self._exponent\n\n def set_encoding(self, encoding, exponent):\n self._encoding = encoding\n self._exponent = exponent\n return self\n\n def encode(self, scalar, max_exponent=None):\n scalar = tf.where(tf.less_equal(tf.math.abs(scalar), 1e-200),\n tf.zeros_like(scalar),\n scalar)\n if scalar.dtype in (tf.int8, tf.int16, tf.int32, tf.int64):\n exponent = tf.zeros_like(scalar, dtype=tf.float32)\n elif scalar.dtype in (tf.float16, tf.float32, tf.float64):\n scalar = tf.cast(scalar, tf.float32)\n _, flt_exponent = fed_ops.frexp(scalar)\n lsb_exponent = FixedPointTensor.FLOAT_MANTISSA_BITS - flt_exponent\n exponent = tf.math.floor(lsb_exponent / FixedPointTensor.LOG2_BASE)\n else:\n raise ValueError(f\"FixedPointTensor not support encode for type: {scalar.dtype}\")\n if max_exponent is not None:\n max_exponent = tf.ones_like(scalar, dtype=tf.float32) * max_exponent\n max_exponent = tf.where(tf.greater(max_exponent, exponent), max_exponent, exponent)\n diff_exponent = tf.cast(max_exponent - exponent, dtype=tf.int64)\n else:\n diff_exponent = tf.zeros_like(scalar, dtype=tf.int64)\n max_exponent = exponent\n\n n = tf.constant(str(self._n), dtype=tf.string, shape=[1] * len(scalar.get_shape()))\n n = tf.tile(n, tf.shape(scalar))\n int_fixpoint = tf.round(scalar * tf.pow(tf.cast(FixedPointTensor.BASE, tf.float32), exponent))\n fixpoint = tf.strings.as_string(tf.cast(int_fixpoint, dtype=tf.int64))\n base = tf.constant(str(int(FixedPointTensor.BASE)), dtype=tf.string, shape=[1] * len(scalar.get_shape()))\n base = tf.tile(base, tf.shape(scalar))\n pow_base = fed_ops.gmp_pow(base, diff_exponent)\n fixpoint = fed_ops.gmp_mul(fixpoint, pow_base)\n encoding = fed_ops.gmp_mod(fixpoint, n)\n self._encoding = encoding\n self._exponent = max_exponent\n return self\n\n def _format_encode(self, encoded, exponent):\n expand_exponent = tf.zeros_like(exponent, dtype=tf.float32)\n expand_length = tf.cast(tf.strings.length(encoded) - FixedPointTensor.TF_CONVERT_NUM_LENGTH, dtype=tf.float32)\n expand_exponent = tf.where(tf.greater(expand_length, 0), expand_length, expand_exponent)\n base = tf.constant(str(int(FixedPointTensor.BASE)), dtype=tf.string, shape=[1] * len(encoded.get_shape()))\n base = tf.tile(base, tf.shape(encoded))\n pow_base = fed_ops.gmp_pow(base, tf.cast(expand_exponent, dtype=tf.int64))\n self._encoding = fed_ops.gmp_div(encoded, pow_base)\n self._exponent = exponent - expand_exponent\n\n def decode(self):\n max_int = tf.constant(str(self._max_int), dtype=tf.string, shape=[1] * len(self._encoding.get_shape()))\n max_int = tf.tile(max_int, tf.shape(self._encoding))\n n = tf.constant(str(self._n), dtype=tf.string, shape=[1] * len(self._encoding.get_shape()))\n n = tf.tile(n, tf.shape(self._encoding))\n cmp_result = fed_ops.gmp_cmp(self._encoding, max_int)\n pos_matrix = tf.less_equal(cmp_result, 0)\n encoded = tf.where(pos_matrix, self.encoding, fed_ops.gmp_sub(n, self.encoding))\n self._format_encode(encoded, self.exponent)\n encoded = tf.strings.to_number(self.encoding, tf.float32)\n pos_matrix = tf.cast(pos_matrix, tf.float32) * 2. - 1.\n decoded = encoded * tf.pow(tf.cast(FixedPointTensor.BASE, tf.float32), -self.exponent)\n return decoded * pos_matrix\n" ]
[ [ "tensorflow.strings.to_number", "tensorflow.shape", "tensorflow.strings.length", "tensorflow.ones_like", "tensorflow.math.abs", "tensorflow.zeros_like", "tensorflow.greater", "tensorflow.less_equal", "tensorflow.math.floor", "tensorflow.cast" ] ]
Seraphyx/fm-intro
[ "58b25b198712fe7703d1015b82fbdca7e4f1c1ba" ]
[ "src/tf/load_data.py" ]
[ "'''\nData pre process for AFM and FM\n@author: \nLizi Liao ([email protected])\nXiangnan He ([email protected])\n'''\nimport numpy as np\nimport os\nimport pandas as pd\nfrom scipy.sparse import csr_matrix\nfrom sklearn.feature_extraction import DictVectorizer\n\n\nclass LoadData(object):\n '''given the path of data, return the data format for AFM and FM\n :param path\n return:\n Train_data: a dictionary, 'Y' refers to a list of y values; 'X' refers to a list of features_M dimension vectors with 0 or 1 entries\n Test_data: same as Train_data\n Validation_data: same as Train_data\n '''\n\n # Three files are needed in the path\n def __init__(self, path, name_train, name_valid, name_test, loss_type=\"square_loss\"):\n self.path = path + \"/\"\n self.trainfile = self.path + name_train\n self.testfile = self.path + name_test\n self.validationfile = self.path + name_valid\n self.features_M = self.map_features( )\n self.Train_data, self.Validation_data, self.Test_data = self.construct_data( loss_type )\n\n def map_features(self): # map the feature entries in all files, kept in self.features dictionary\n self.features = {}\n self.read_features(self.trainfile)\n self.read_features(self.testfile)\n self.read_features(self.validationfile)\n # print(\"features_M:\", len(self.features))\n return len(self.features)\n\n def read_features(self, file): # read a feature file\n f = open( file )\n line = f.readline()\n i = len(self.features)\n while line:\n items = line.strip().split(' ')\n for item in items[1:]:\n if item not in self.features:\n self.features[ item ] = i\n i = i + 1\n line = f.readline()\n f.close()\n\n def construct_data(self, loss_type):\n X_, Y_ , Y_for_logloss= self.read_data(self.trainfile)\n if loss_type == 'log_loss':\n Train_data = self.construct_dataset(X_, Y_for_logloss)\n else:\n Train_data = self.construct_dataset(X_, Y_)\n #print(\"Number of samples in Train:\" , len(Y_))\n\n X_, Y_ , Y_for_logloss= self.read_data(self.validationfile)\n if loss_type == 'log_loss':\n Validation_data = self.construct_dataset(X_, Y_for_logloss)\n else:\n Validation_data = self.construct_dataset(X_, Y_)\n #print(\"Number of samples in Validation:\", len(Y_))\n\n X_, Y_ , Y_for_logloss = self.read_data(self.testfile)\n if loss_type == 'log_loss':\n Test_data = self.construct_dataset(X_, Y_for_logloss)\n else:\n Test_data = self.construct_dataset(X_, Y_)\n #print(\"Number of samples in Test:\", len(Y_))\n\n return Train_data, Validation_data, Test_data\n\n def read_data(self, file):\n # read a data file. For a row, the first column goes into Y_;\n # the other columns become a row in X_ and entries are maped to indexs in self.features\n f = open( file )\n X_ = []\n Y_ = []\n Y_for_logloss = []\n line = f.readline()\n while line:\n items = line.strip().split(' ')\n Y_.append( 1.0*float(items[0]) )\n\n if float(items[0]) > 0:# > 0 as 1; others as 0\n v = 1.0\n else:\n v = 0.0\n Y_for_logloss.append( v )\n\n X_.append( [ self.features[item] for item in items[1:]] )\n line = f.readline()\n f.close()\n return X_, Y_, Y_for_logloss\n\n def construct_dataset(self, X_, Y_):\n Data_Dic = {}\n X_lens = [ len(line) for line in X_]\n indexs = np.argsort(X_lens)\n Data_Dic['Y'] = [ Y_[i] for i in indexs]\n Data_Dic['X'] = [ X_[i] for i in indexs]\n return Data_Dic\n \n def truncate_features(self):\n \"\"\"\n Make sure each feature vector is of the same length\n \"\"\"\n num_variable = len(self.Train_data['X'][0])\n for i in xrange(len(self.Train_data['X'])):\n num_variable = min([num_variable, len(self.Train_data['X'][i])])\n # truncate train, validation and test\n for i in xrange(len(self.Train_data['X'])):\n self.Train_data['X'][i] = self.Train_data['X'][i][0:num_variable]\n for i in xrange(len(self.Validation_data['X'])):\n self.Validation_data['X'][i] = self.Validation_data['X'][i][0:num_variable]\n for i in xrange(len(self.Test_data['X'])):\n self.Test_data['X'][i] = self.Test_data['X'][i][0:num_variable]\n return num_variable\n\n\n\n\nclass sparse_to_dense(object):\n\n def __init__(self, path, filename, header=None, cols=[]):\n self.path = path + \"/\"\n self.header = header\n self.cols = cols\n self.filepath = self.path + filename\n self.data = self.read_data(self.filepath, self.header)\n self.data_dense = self.long_to_wide(self.data, self.cols)\n self.data_dense_mat = self.dense_matrix(self.data)\n\n # Read in delimited file using Pandas\n def read_data(self, filepath, header=None):\n print(\"\\tReading datafile [\" + filepath + \"]...\")\n\n data = pd.read_table(filepath, sep=\"\\t\", header=None, names=header)\n return(data)\n\n # Convert from Long to Wide format\n def long_to_wide(self, data, cols=[], row_id=None, col_id=None):\n\n col_names = list(data)\n group_row = col_names[0] if row_id is None else row_id\n group_col = col_names[1] if col_id is None else col_id\n\n # Subset\n if(len(cols)):\n data = data[cols]\n\n # Dense Matrix\n print(\"\\tGrouping by [\" + group_row + \"] and [\" + group_col + \"]\")\n data = data.groupby([group_row, group_col]).size().unstack(fill_value=0)\n\n return(data)\n\n # Convert to Dense Matrix Format\n def dense_matrix(self, data, label_id=None, row_id=None, col_id=None):\n\n col_names = list(data)\n group_row = col_names[0] if row_id is None else row_id\n group_col = col_names[1] if col_id is None else col_id\n group_label = col_names[2] if label_id is None else label_id\n\n # Convert to List of Dictionaries\n X_raw = data.drop(group_label, axis=1)\n\n # Convert int to string so that sklearn indexes them\n X_raw.item_id = X_raw.item_id.astype(str)\n X_raw.user_id = X_raw.user_id.astype(str)\n\n # y = Labels\n y = data.as_matrix(columns=[group_label])\n\n # X - Features\n data_to_dict = X_raw.T.to_dict().values()\n data_to_dict = list(data_to_dict)\n\n v = DictVectorizer(sparse=True)\n X = v.fit_transform(data_to_dict)\n X_data = X.toarray()\n\n return X_data, y\n\n\n\n# # Load data and get dense matrix form\n# # https://github.com/coreylynch/pyFM\n# data = sparse_to_dense(\n# path=\"../../data/ml-100k\", \n# filename=\"u.data\", \n# header=['user_id','item_id','rating','timestamp'],\n# cols=['user_id','item_id','rating'])\n\n\n# print(data.data_dense_mat)\n\n# X, y = data.data_dense_mat\n\n\n# print(X)\n# print(y)\n# print(X.shape)\n\n\n\n" ]
[ [ "pandas.read_table", "sklearn.feature_extraction.DictVectorizer", "numpy.argsort" ] ]
ChenMnZ/CF-ViT
[ "afc7ba54510cfbd410921a8b5eb5d6f0243718e7" ]
[ "lvvit/loss/cross_entropy.py" ]
[ "import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport math\r\nimport pdb\r\n\r\nclass SoftTargetCrossEntropy(nn.Module):\r\n\r\n def __init__(self):\r\n super(SoftTargetCrossEntropy, self).__init__()\r\n\r\n def forward(self, x, target):\r\n N_rep = x.shape[0]\r\n N = target.shape[0]\r\n if not N==N_rep:\r\n target = target.repeat(N_rep//N,1)\r\n loss = torch.sum(-target * F.log_softmax(x, dim=-1), dim=-1)\r\n return loss.mean()\r\n\r\nclass TokenLabelSoftTargetCrossEntropy(nn.Module):\r\n\r\n def __init__(self):\r\n super(TokenLabelSoftTargetCrossEntropy, self).__init__()\r\n\r\n def forward(self, x, target):\r\n N_rep = x.shape[0]\r\n N = target.shape[0]\r\n if not N==N_rep:\r\n target = target.repeat(N_rep//N,1)\r\n if len(target.shape)==3 and target.shape[-1]==2:\r\n ground_truth=target[:,:,0]\r\n target = target[:,:,1]\r\n loss = torch.sum(-target * F.log_softmax(x, dim=-1), dim=-1)\r\n return loss.mean()\r\n\r\nclass TokenLabelCrossEntropy(nn.Module):\r\n \"\"\"\r\n Token labeling loss.\r\n \"\"\"\r\n def __init__(self, dense_weight=1.0, cls_weight = 1.0, mixup_active=True, classes = 1000, ground_truth = False):\r\n \"\"\"\r\n Constructor Token labeling loss.\r\n \"\"\"\r\n super(TokenLabelCrossEntropy, self).__init__()\r\n\r\n\r\n self.CE = SoftTargetCrossEntropy()\r\n\r\n self.dense_weight = dense_weight\r\n self.mixup_active = mixup_active\r\n self.classes = classes\r\n self.cls_weight = cls_weight\r\n self.ground_truth = ground_truth\r\n assert dense_weight+cls_weight>0\r\n\r\n\r\n def forward(self, x, target):\r\n output, aux_output, bb = x\r\n bbx1, bby1, bbx2, bby2 = bb\r\n\r\n B,N,C = aux_output.shape\r\n if len(target.shape)==2:\r\n target_cls=target\r\n target_aux = target.repeat(1,N).reshape(B*N,C)\r\n else: \r\n target_cls = target[:,:,1]\r\n if self.ground_truth:\r\n # use ground truth to help correct label.\r\n # rely more on ground truth if target_cls is incorrect.\r\n ground_truth = target[:,:,0]\r\n ratio = (0.9 - 0.4 * (ground_truth.max(-1)[1] == target_cls.max(-1)[1])).unsqueeze(-1)\r\n target_cls = target_cls * ratio + ground_truth * (1 - ratio)\r\n target_aux = target[:,:,2:]\r\n target_aux = target_aux.transpose(1,2).reshape(-1,C)\r\n lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / N)\r\n if lam<1:\r\n target_cls = lam*target_cls + (1-lam)*target_cls.flip(0)\r\n\r\n aux_output = aux_output.reshape(-1,C)\r\n loss_cls = self.CE(output, target_cls)\r\n loss_aux = self.CE(aux_output, target_aux)\r\n return self.cls_weight*loss_cls+self.dense_weight* loss_aux\r\n\r\n" ]
[ [ "torch.nn.functional.log_softmax" ] ]
mmvih/polus-plugins
[ "c424938e3f35900758f7d74f3dfec2adfb3228fc" ]
[ "transforms/images/polus-autocropping-plugin/src/autocrop.py" ]
[ "import logging\nimport random\nfrom concurrent.futures import as_completed\nfrom concurrent.futures import ProcessPoolExecutor\nfrom functools import reduce\nfrom pathlib import Path\n\nimport numpy\nimport scipy.ndimage\nimport scipy.stats\nfrom bfio import BioReader\nfrom bfio import BioWriter\n\nfrom utils import constants\nfrom utils import helpers\nfrom utils import local_distogram as distogram\n\nlogging.basicConfig(\n format='%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s',\n datefmt='%d-%b-%y %H:%M:%S',\n)\nlogger = logging.getLogger(\"autocrop\")\nlogger.setLevel(constants.POLUS_LOG)\n\n\ndef calculate_strip_entropy(\n *,\n file_path: Path,\n z_index: int,\n strip_index: int,\n along_x: bool,\n direction: bool,\n smoothing: bool,\n) -> list[float]:\n \"\"\" Get the entropy for each row/column in the indexed strip along the given\n axis. A strip spans the entire length/width of the image.\n\n Args:\n file_path: Path to the image.\n z_index: The index of the z-slice.\n strip_index: index of the current strip.\n along_x: Whether the strip runs along the x-axis.\n direction: Whether we are looking in the forward or backward direction.\n smoothing: Whether to use Gaussian smoothing for each tile.\n\n Returns:\n A list of scores for each row in the strip.\n \"\"\"\n histograms: list[list[distogram.Distogram]] = list()\n\n with BioReader(file_path) as reader:\n for x_min, x_max, y_min, y_max in helpers.iter_strip(file_path, strip_index, along_x):\n tile = numpy.asarray(\n reader[y_min:y_max, x_min:x_max, z_index:z_index + 1, 0, 0],\n dtype=numpy.float32,\n )\n if smoothing:\n tile = scipy.ndimage.gaussian_filter(tile, sigma=1, mode='constant', cval=numpy.mean(tile))\n\n # It is simpler to work with tiles of shape (strip_width, :) so we can\n # always iterate over the 0th axis to get the rows/columns of the image.\n tile = tile if along_x else numpy.transpose(tile)\n\n row_range = range(tile.shape[0]) if direction else reversed(range(tile.shape[0]))\n\n # Create a distogram for each row in the tile. We use more binds for now\n # and later merge into a distogram with fewer bins\n row_histograms: list[distogram.Distogram] = [\n helpers.distogram_from_batch(\n tile[i, :].flat,\n constants.MAX_BINS * 2,\n constants.WEIGHTED_BINS,\n )\n for i in row_range\n ]\n histograms.append(row_histograms)\n\n # In case the last tile had fewer rows than other tiles,\n # simply pad the list with empty Distograms.\n histograms[-1].extend([\n distogram.Distogram(bin_count=constants.MAX_BINS, weighted_diff=constants.WEIGHTED_BINS)\n for _ in range(len(histograms[0]) - len(histograms[-1]))\n ])\n\n # Merge the Distograms for the same row from across the strip.\n histograms: list[distogram.Distogram] = [\n reduce(\n lambda residual, value: distogram.merge(residual, value),\n row_histograms,\n distogram.Distogram(bin_count=constants.MAX_BINS, weighted_diff=constants.WEIGHTED_BINS),\n )\n for row_histograms in zip(*histograms)\n ]\n\n # Now that each row has its own Distogram, we can compute the entropy of\n # each row.\n strip_entropy: list[float] = [\n scipy.stats.entropy([c for _, c in histogram.bins])\n for histogram in histograms\n ]\n return strip_entropy\n\n\ndef find_gradient_spike_xy(\n file_path: Path,\n z_index: int,\n along_x: bool,\n direction: bool,\n smoothing: bool,\n) -> int:\n \"\"\" Find the index of the row/column, after padding, of the first large\n spike in the gradient of entropy of rows/columns.\n\n Args:\n file_path: Path to the image.\n z_index: index of the z-slice.\n along_x: Whether to crop along the x-axis.\n direction: Whether we are working forward/down from the left/top edge or\n backward/up from the right/bottom edge.\n smoothing: Whether to use gaussian smoothing on tiles.\n\n Returns:\n The index of the row/column where we found the high gradient value.\n \"\"\"\n with BioReader(file_path) as reader:\n end = reader.Y if along_x else reader.X\n\n num_strips = end // constants.TILE_STRIDE\n if end % constants.TILE_STRIDE != 0:\n num_strips += 1\n\n # In case we are going backward, reverse the strip indices.\n strip_indices = list(range(num_strips) if direction else reversed(range(num_strips)))\n\n # We don't want to look too deep into the image. If we go through\n # too many strips, we will just use a high percentile gradient value.\n deepest_strip = max(1, len(strip_indices) // 4)\n raw_entropies = list()\n smoothed_gradients = list()\n for i, index in enumerate(strip_indices[:deepest_strip]):\n logger.info(\n f'Checking strip {index + 1} of {len(strip_indices)} '\n f'along {\"x\" if along_x else \"y\"}-axis in the {z_index}-slice...'\n )\n\n raw_entropies.extend(calculate_strip_entropy(\n file_path=file_path,\n z_index=z_index,\n strip_index=index,\n along_x=along_x,\n direction=direction,\n smoothing=smoothing,\n ))\n\n smoothed_gradients = helpers.smoothed_gradients(raw_entropies)\n index_val = helpers.find_spike(smoothed_gradients, constants.GRADIENT_THRESHOLD)\n if index_val is None:\n raw_entropies = raw_entropies[-(1 + 2 * constants.WINDOW_SIZE):]\n else:\n break\n else: # There was no break in the loop, i.e. no high gradient was found.\n logger.debug(f'Gradient threshold {constants.GRADIENT_THRESHOLD:.2e} was too high. '\n f'Using {constants.GRADIENT_PERCENTILE}th percentile instead...')\n threshold = numpy.percentile(smoothed_gradients, q=constants.GRADIENT_PERCENTILE)\n index_val = helpers.find_spike(smoothed_gradients, float(threshold))\n\n stop = index_val[0] if direction else end - index_val[0]\n logger.debug(f'Found gradient spike at index {stop} along axis {\"x\" if along_x else \"y\"}')\n return stop\n\n\ndef estimate_slice_entropies_thread(\n file_path: Path,\n smoothing: bool,\n z_index: int,\n) -> distogram.Distogram:\n tile_indices = list(helpers.iter_tiles_2d(file_path))\n if len(tile_indices) > 25:\n tile_indices = list(random.sample(tile_indices, 25))\n\n tile_histograms = list()\n\n with BioReader(file_path) as reader:\n for x_min, x_max, y_min, y_max in tile_indices:\n tile = numpy.asarray(\n reader[y_min:y_max, x_min:x_max, z_index, 0, 0],\n dtype=numpy.float32,\n )\n if smoothing:\n tile = scipy.ndimage.gaussian_filter(tile, sigma=1, mode='constant', cval=numpy.mean(tile))\n\n tile_histograms.append(helpers.distogram_from_batch(\n tile.flat,\n constants.MAX_BINS * 2,\n constants.WEIGHTED_BINS,\n ))\n\n return reduce(\n lambda residual, value: distogram.merge(residual, value),\n tile_histograms,\n distogram.Distogram(bin_count=constants.MAX_BINS, weighted_diff=constants.WEIGHTED_BINS),\n )\n\n\ndef estimate_slice_entropies(file_path: Path, smoothing: bool) -> list[float]:\n with BioReader(file_path) as reader:\n z_end = reader.Z\n\n # Find a bounding box for each image in the group.\n slice_histograms = list()\n with ProcessPoolExecutor(max_workers=constants.NUM_THREADS) as executor:\n processes = [\n executor.submit(\n estimate_slice_entropies_thread,\n file_path,\n smoothing,\n z,\n )\n for z in range(z_end)\n ]\n for process in processes:\n slice_histograms.append(process.result())\n\n return [\n scipy.stats.entropy([c for _, c in histogram.bins])\n for histogram in slice_histograms\n ]\n\n\ndef determine_bounding_box_thread(\n file_path: Path,\n smoothing: bool,\n crop_y: bool,\n crop_x: bool,\n z_index: int,\n) -> tuple[int, int, int, int]:\n with BioReader(file_path) as reader:\n x_end, y_end, z_end = reader.X, reader.Y, reader.Z\n\n if crop_y:\n y1 = find_gradient_spike_xy(file_path, z_index, True, True, smoothing)\n y2 = find_gradient_spike_xy(file_path, z_index, True, False, smoothing)\n else:\n y1, y2 = 0, y_end\n\n if crop_x:\n x1 = find_gradient_spike_xy(file_path, z_index, False, True, smoothing)\n x2 = find_gradient_spike_xy(file_path, z_index, False, False, smoothing)\n else:\n x1, x2 = 0, x_end\n\n return y1, y2, x1, x2\n\n\ndef determine_bounding_box(\n file_path: Path,\n crop_axes: tuple[bool, bool, bool],\n smoothing: bool,\n) -> helpers.BoundingBox:\n \"\"\" Using the gradient of entropy values of rows/columns in an image,\n determine the bounding-box around the region of the image which contains\n useful information.\n\n This bounding-box can be used to crop the image.\n\n Args:\n file_path: Path to the image.\n crop_axes: A 3-tuple indicating whether to crop along the x-axis, y-axis, z-axis.\n smoothing: Whether to use Gaussian smoothing\n\n Returns:\n A 4-tuple of integers representing a bounding-box.\n \"\"\"\n logger.info(f'Finding bounding_box for {file_path.name}...')\n\n crop_x, crop_y, crop_z = crop_axes\n bounding_boxes: list[helpers.BoundingBox] = list()\n with BioReader(file_path) as reader:\n x_end, y_end, z_end = reader.X, reader.Y, reader.Z\n\n if z_end > 1 and crop_z:\n\n def _find_spike(values: list[float]) -> int:\n gradients = helpers.smoothed_gradients(values, prepend_zeros=True)\n index_val = helpers.find_spike(gradients, constants.GRADIENT_THRESHOLD)\n if index_val is None:\n threshold = numpy.percentile(gradients, q=constants.GRADIENT_PERCENTILE)\n index_val = helpers.find_spike(gradients, float(threshold))\n return index_val[0]\n\n slice_entropies = estimate_slice_entropies(file_path, smoothing)\n try:\n z1 = _find_spike(slice_entropies)\n z2 = z_end - _find_spike(list(reversed(slice_entropies)))\n except IndexError as e:\n logger.error(f'entropies {slice_entropies} produced index error {e}')\n raise e\n else:\n z1, z2 = 0, z_end\n\n # Find a bounding box for each z-slice in the image.\n bounding_boxes_2d = list()\n with ProcessPoolExecutor(max_workers=constants.NUM_THREADS) as executor:\n processes = [\n executor.submit(\n determine_bounding_box_thread,\n file_path,\n smoothing,\n crop_y,\n crop_x,\n z,\n )\n for z in range(z_end)\n ]\n for process in as_completed(processes):\n bounding_boxes_2d.append(process.result())\n\n bounding_boxes.extend([\n (z1, z2, y1, y2, x1, x2)\n for y1, y2, x1, x2 in bounding_boxes_2d\n ])\n\n bounding_box = helpers.bounding_box_superset(bounding_boxes)\n logger.info(f'Determined {bounding_box = } for {file_path.name}')\n return bounding_box\n\n\ndef verify_group_shape(file_paths: list[Path]):\n \"\"\" Verifies that all given images have the same x, y, and z dimensions.\n\n Args:\n file_paths: A list of file-paths that belong to the same group.\n \"\"\"\n # Verify that all images in the group have the same dimensions.\n depths, heights, widths = set(), set(), set()\n for file_path in file_paths:\n with BioReader(file_path) as reader:\n depths.add(reader.Z), heights.add(reader.X), widths.add(reader.Y)\n\n if len(depths) > 1 or len(heights) > 1 or len(widths) > 1:\n message = 'Group contains images which have different dimensions.'\n logger.error(message)\n raise ValueError(message)\n\n logger.info(f'Starting from shape {(depths.pop(), heights.pop(), widths.pop())}...')\n return\n\n\ndef crop_image_group(\n *,\n file_paths: list[Path],\n crop_axes: tuple[bool, bool, bool],\n smoothing: bool,\n output_dir: Path,\n):\n \"\"\" Given a list of file-paths to images in the same group, crop those\n images and write the results in the given output directory.\n\n Args:\n file_paths: A list of file-paths that belong to the same group.\n crop_axes: A 3-tuple indicating whether to crop along the x-axis, y-axis, z-axis.\n smoothing: Whether to use gaussian smoothing.\n output_dir: A path to a directory where to write the results.\n \"\"\"\n verify_group_shape(file_paths)\n\n # Find a bounding box for each image in the group.\n bounding_boxes = list()\n with ProcessPoolExecutor(max_workers=constants.NUM_THREADS) as executor:\n processes = {\n executor.submit(determine_bounding_box, file_path, crop_axes, smoothing)\n for file_path in file_paths\n }\n for process in as_completed(processes):\n bounding_boxes.append(process.result())\n\n bounding_box = helpers.bounding_box_superset(bounding_boxes)\n write_cropped_images(file_paths, output_dir, bounding_box)\n return\n\n\ndef write_cropped_images(\n file_paths: list[Path],\n output_dir: Path,\n bounding_box: helpers.BoundingBox,\n):\n \"\"\" Crops and writes the given group of images using the given bounding box.\n\n Args:\n file_paths: A list of Paths for the input images.\n output_dir: A Path to the output directory.\n bounding_box: The bounding-box to use for cropping the images\n\n \"\"\"\n z1, z2, y1, y2, x1, x2 = bounding_box\n out_depth, out_width, out_height = z2 - z1, y2 - y1, x2 - x1\n logger.info(f'Superset bounding {bounding_box = }...')\n logger.info(f'Cropping to shape (z, y, x) = {out_depth, out_width, out_height}...')\n\n for file_path in file_paths:\n out_path = output_dir.joinpath(helpers.replace_extension(file_path.name))\n logger.info(f'Writing {out_path.name}...')\n\n with BioReader(file_path) as reader:\n with BioWriter(out_path, metadata=reader.metadata, max_workers=constants.NUM_THREADS) as writer:\n writer.Z = out_depth\n writer.Y = out_width\n writer.X = out_height\n\n for z_out in range(writer.Z):\n z_in = z_out + z1\n\n for out_y in range(0, writer.Y, constants.TILE_STRIDE):\n out_y_max = min(writer.Y, out_y + constants.TILE_STRIDE)\n in_y = out_y + y1\n in_y_max = min(y2, in_y + constants.TILE_STRIDE)\n\n for out_x in range(0, writer.X, constants.TILE_STRIDE):\n out_x_max = min(writer.X, out_x + constants.TILE_STRIDE)\n in_x = out_x + x1\n in_x_max = min(x2, in_x + constants.TILE_STRIDE)\n\n try:\n tile = reader[in_y:in_y_max, in_x:in_x_max, z_in:z_in + 1, 0, 0]\n writer[out_y:out_y_max, out_x:out_x_max, z_out:z_out + 1, 0, 0] = tile[:]\n except AssertionError as e:\n logger.error(\n f'failed to read tile {(in_y, in_y_max, in_x, in_x_max, z_in, z_in + 1) = }\\n'\n f'and write to {(out_y, out_y_max, out_x, out_x_max, z_out, z_out + 1) = }\\n'\n f'because {e}'\n )\n raise e\n return\n" ]
[ [ "numpy.percentile", "numpy.transpose", "numpy.asarray", "numpy.mean" ] ]
nils91/tensorflow-ml
[ "3aee5a3ef738e49c9e746bebb9c607a8fe203add" ]
[ "nb.py" ]
[ "from sklearn.naive_bayes import GaussianNB\nfrom sklearn import datasets, metrics\n\niris = datasets.load_iris()\nclassifier = GaussianNB()\nclassifier.fit(iris.data, iris.target)\nscore = metrics.accuracy_score(iris.target, classifier.predict(iris.data))\nprint(\"Accuracy: %f\" % score)\n\nnewfeatures=[[4.9,3.1,1.5,3.2],[5,3,1.5,0.2],[6,2.5,1.5,0.2],[4.2,1.3,1.7,0],[5.15,2.9,3.5,1.2]]\npredictions=classifier.predict(newfeatures)\nfor p in predictions:\n\tprint(\"Prediction: %i\" % p);" ]
[ [ "sklearn.naive_bayes.GaussianNB", "sklearn.datasets.load_iris" ] ]
selective-inference/Python-software
[ "e906fbb98946b129eb6713e8956bde7a080181f4" ]
[ "selectinf/randomized/tests/test_multiple_queries.py" ]
[ "from __future__ import division, print_function\n\nimport numpy as np\nimport nose.tools as nt\n\nimport regreg.api as rr\n\nfrom ..lasso import lasso, selected_targets, full_targets, debiased_targets\nfrom ..screening import marginal_screening\nfrom ..query import multiple_queries\nfrom ...tests.instance import gaussian_instance\nfrom ...algorithms.sqrt_lasso import choose_lambda, solve_sqrt_lasso\n\n# the test here is marginal_screening + lasso\ndef test_multiple_queries(n=500, p=100, signal_fac=1.5, s=5, sigma=3, rho=0.4, randomizer_scale=1, ndraw=5000, burnin=1000):\n\n inst, const1, const2 = gaussian_instance, marginal_screening, lasso.gaussian\n signal = np.sqrt(signal_fac * np.log(p))\n X, Y, beta = inst(n=n,\n p=p, \n signal=signal, \n s=s, \n equicorrelated=False, \n rho=rho, \n sigma=sigma, \n random_signs=True)[:3]\n\n n, p = X.shape\n\n q = 0.1\n conv1 = const1.type1(-X.T.dot(Y),\n sigma**2 * X.T.dot(X),\n q,\n randomizer_scale * sigma)\n\n boundary1 = conv1.fit()\n nonzero1 = boundary1 != 0\n\n sigma_ = np.std(Y)\n W = np.ones(X.shape[1]) * np.sqrt(1.5 * np.log(p)) * sigma_\n\n conv2 = const2(X, \n Y, \n W, \n randomizer_scale=randomizer_scale * sigma_)\n \n signs2 = conv2.fit()\n nonzero2 = signs2 != 0\n\n nonzero = nonzero1 * nonzero2\n\n if nonzero.sum() == 0:\n return [], []\n\n observed_target1, cov_target1, cov_target_score1, alternatives1 = conv1.multivariate_targets(nonzero, sigma**2)\n\n (observed_target2, \n cov_target2, \n cov_target_score2, \n alternatives2) = selected_targets(conv2.loglike, \n conv2._W, \n nonzero)\n\n mq = multiple_queries([conv1, conv2])\n\n _, pval, intervals = mq.summary(observed_target1, \n [(cov_target1, cov_target_score1), (cov_target2, cov_target_score2)],\n compute_intervals=True)\n \n return pval[beta[nonzero] == 0], pval[beta[nonzero] != 0]\n\n\ndef main(nsim=500, n=500, p=100, sigma=3):\n\n P0, PA = [], []\n from statsmodels.distributions import ECDF\n import matplotlib.pyplot as plt\n\n for i in range(nsim):\n if True:\n p0, pA = test_multiple_queries(n=n, p=p, sigma=sigma)\n else: \n p0, pA = [], []\n P0.extend(p0)\n PA.extend(pA)\n\n P0_clean = np.array(P0)\n \n P0_clean = P0_clean[P0_clean > 1.e-5] # \n print(np.mean(P0_clean), np.std(P0_clean), np.mean(np.array(PA) < 0.05), np.mean(np.array(P0) < 0.05), np.mean(P0_clean < 0.05), np.mean(np.array(P0) < 1e-5))\n \n if i % 3 == 0 and i > 0:\n U = np.linspace(0, 1, 101)\n plt.clf()\n if len(P0_clean) > 0:\n plt.plot(U, ECDF(P0_clean)(U))\n if len(PA) > 0:\n plt.plot(U, ECDF(PA)(U), 'r')\n plt.plot([0, 1], [0, 1], 'k--')\n plt.savefig(\"plot.pdf\")\n plt.show()\n\n" ]
[ [ "numpy.array", "numpy.log", "matplotlib.pyplot.clf", "matplotlib.pyplot.savefig", "numpy.ones", "matplotlib.pyplot.plot", "numpy.mean", "numpy.std", "matplotlib.pyplot.show", "numpy.linspace" ] ]
Aimledge/tvm
[ "f41c050fc681a9d9805e6c73e729df233e1acbac" ]
[ "nnvm/tests/python/compiler/test_rpc_exec.py" ]
[ "import tvm\nfrom tvm import rpc\nfrom tvm.contrib import util, graph_runtime\nimport nnvm.symbol as sym\nimport nnvm.compiler\nimport numpy as np\n\ndef test_rpc_executor():\n host = \"localhost\"\n port = 9100\n server = rpc.Server(host, port, use_popen=True)\n\n x = sym.Variable(\"x\")\n y = sym.Variable(\"y\")\n z = sym.exp(y + x)\n shape = (10, 128)\n dtype = tvm.float32\n shape_dict = {\"x\": shape, \"y\": shape}\n tmp = util.tempdir()\n lib_name = tmp.relpath(\"net.o\")\n\n graph, lib, _ = nnvm.compiler.build(z, \"llvm\", shape_dict)\n # save module\n lib.save(lib_name)\n remote = rpc.connect(host, port)\n remote.upload(lib_name)\n ctx = remote.cpu(0)\n # load remote\n rlib = remote.load_module(\"net.o\")\n\n # Create remotemodule\n m = graph_runtime.create(graph, rlib, remote.cpu(0))\n # get member functions\n set_input, run, get_output = m[\"set_input\"], m[\"run\"], m[\"get_output\"]\n na = tvm.nd.array(np.ones(shape).astype(dtype), ctx)\n nb = tvm.nd.array(np.ones(shape).astype(dtype), ctx)\n # set inputs\n set_input(\"x\", na)\n set_input(\"y\", nb)\n # execute\n run()\n # get outputs\n out = tvm.nd.empty(shape, dtype, ctx)\n get_output(0, out)\n np.testing.assert_allclose(\n out.asnumpy(), np.exp(na.asnumpy() + nb.asnumpy()))\n server.terminate()\n\nif __name__ == \"__main__\":\n test_rpc_executor()\n" ]
[ [ "numpy.ones" ] ]
SiriusKY/onnxruntime
[ "3c5853dcbc9d5dda2476afa8c6105802d2b8e53d" ]
[ "onnxruntime/test/python/onnxruntime_test_python_iobinding.py" ]
[ "import numpy as np\nfrom numpy.testing import assert_almost_equal\nfrom onnx.mapping import NP_TYPE_TO_TENSOR_TYPE\nfrom onnx.defs import onnx_opset_version\nfrom onnx import helper\nimport onnxruntime as onnxrt\nfrom onnxruntime.capi._pybind_state import ( # pylint: disable=E0611\n OrtDevice as C_OrtDevice, OrtValue as C_OrtValue, SessionIOBinding)\nimport unittest\n\nfrom helper import get_name\n\nclass TestIOBinding(unittest.TestCase):\n\n def create_ortvalue_input_on_gpu(self):\n return onnxrt.OrtValue.ortvalue_from_numpy(np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32), 'cuda', 0)\n\n def create_ortvalue_alternate_input_on_gpu(self):\n return onnxrt.OrtValue.ortvalue_from_numpy(np.array([[2.0, 4.0], [6.0, 8.0], [10.0, 12.0]], dtype=np.float32), 'cuda', 0)\n\n def create_uninitialized_ortvalue_input_on_gpu(self):\n return onnxrt.OrtValue.ortvalue_from_shape_and_type([3, 2], np.float32, 'cuda', 0)\n\n def create_numpy_input(self):\n return np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)\n\n def create_expected_output(self):\n return np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)\n\n def create_expected_output_alternate(self):\n return np.array([[2.0, 8.0], [18.0, 32.0], [50.0, 72.0]], dtype=np.float32)\n\n def test_bind_input_to_cpu_arr(self):\n input = self.create_numpy_input()\n\n session = onnxrt.InferenceSession(get_name(\"mul_1.onnx\"), providers=onnxrt.get_available_providers())\n io_binding = session.io_binding()\n \n # Bind Numpy object (input) that's on CPU to wherever the model needs it\n io_binding.bind_cpu_input('X', self.create_numpy_input())\n \n # Bind output to CPU\n io_binding.bind_output('Y')\n \n # Invoke Run\n session.run_with_iobinding(io_binding)\n \n # Sync if different CUDA streams\n io_binding.synchronize_outputs()\n\n # Get outputs over to CPU (the outputs which were bound to CUDA will get copied over to the host here)\n ort_output = io_binding.copy_outputs_to_cpu()[0]\n\n # Validate results\n self.assertTrue(np.array_equal(self.create_expected_output(), ort_output))\n\n def test_bind_input_types(self):\n\n opset = onnx_opset_version()\n devices = [(C_OrtDevice(C_OrtDevice.cpu(), C_OrtDevice.default_memory(), 0), ['CPUExecutionProvider'])]\n if \"CUDAExecutionProvider\" in onnxrt.get_all_providers():\n devices.append((C_OrtDevice(C_OrtDevice.cuda(), C_OrtDevice.default_memory(), 0), ['CUDAExecutionProvider']))\n \n for device, provider in devices:\n for dtype in [np.float32, np.float64, np.int32, np.uint32,\n np.int64, np.uint64, np.int16, np.uint16,\n np.int8, np.uint8, np.float16, np.bool_]:\n with self.subTest(dtype=dtype, device=str(device)):\n\n x = np.arange(8).reshape((-1, 2)).astype(dtype)\n proto_dtype = NP_TYPE_TO_TENSOR_TYPE[x.dtype]\n\n X = helper.make_tensor_value_info('X', proto_dtype, [None, x.shape[1]])\n Y = helper.make_tensor_value_info('Y', proto_dtype, [None, x.shape[1]])\n\n # inference\n node_add = helper.make_node('Identity', ['X'], ['Y'])\n\n # graph\n graph_def = helper.make_graph([node_add], 'lr', [X], [Y], [])\n model_def = helper.make_model(\n graph_def, producer_name='dummy', ir_version=7,\n producer_version=\"0\",\n opset_imports=[helper.make_operatorsetid('', opset)])\n\n sess = onnxrt.InferenceSession(model_def.SerializeToString(), providers=provider)\n\n bind = SessionIOBinding(sess._sess)\n ort_value = C_OrtValue.ortvalue_from_numpy(x, device)\n bind.bind_ortvalue_input('X', ort_value)\n bind.bind_output('Y', device)\n sess._sess.run_with_iobinding(bind, None)\n ortvalue = bind.get_outputs()[0]\n y = ortvalue.numpy()\n assert_almost_equal(x, y)\n\n bind = SessionIOBinding(sess._sess)\n bind.bind_input('X', device, dtype, x.shape, ort_value.data_ptr())\n bind.bind_output('Y', device)\n sess._sess.run_with_iobinding(bind, None)\n ortvalue = bind.get_outputs()[0]\n y = ortvalue.numpy()\n assert_almost_equal(x, y)\n\n def test_bind_input_only(self):\n input = self.create_ortvalue_input_on_gpu()\n\n session = onnxrt.InferenceSession(get_name(\"mul_1.onnx\"), providers=onnxrt.get_available_providers())\n io_binding = session.io_binding()\n \n # Bind input to CUDA\n io_binding.bind_input('X', 'cuda', 0, np.float32, [3, 2], input.data_ptr())\n\n # Sync if different CUDA streams\n io_binding.synchronize_inputs()\n\n # Bind output to CPU\n io_binding.bind_output('Y')\n \n # Invoke Run\n session.run_with_iobinding(io_binding)\n\n # Sync if different CUDA streams\n io_binding.synchronize_outputs()\n \n # Get outputs over to CPU (the outputs which were bound to CUDA will get copied over to the host here)\n ort_output = io_binding.copy_outputs_to_cpu()[0]\n\n # Validate results\n self.assertTrue(np.array_equal(self.create_expected_output(), ort_output))\n\n def test_bind_input_and_preallocated_output(self):\n input = self.create_ortvalue_input_on_gpu()\n\n session = onnxrt.InferenceSession(get_name(\"mul_1.onnx\"), providers=onnxrt.get_available_providers())\n io_binding = session.io_binding()\n \n # Bind input to CUDA\n io_binding.bind_input('X', 'cuda', 0, np.float32, [3, 2], input.data_ptr())\n\n # Bind output to CUDA\n output = self.create_uninitialized_ortvalue_input_on_gpu()\n io_binding.bind_output('Y', 'cuda', 0, np.float32, [3, 2], output.data_ptr())\n\n # Sync if different CUDA streams\n io_binding.synchronize_inputs()\n\n # Invoke Run\n session.run_with_iobinding(io_binding)\n\n # Sync if different CUDA streams\n io_binding.synchronize_outputs()\n \n # Get outputs over to CPU (the outputs which were bound to CUDA will get copied over to the host here)\n ort_output_vals = io_binding.copy_outputs_to_cpu()[0]\n # Validate results\n self.assertTrue(np.array_equal(self.create_expected_output(), ort_output_vals))\n \n # Validate if ORT actually wrote to pre-allocated buffer by copying the Torch allocated buffer\n # to the host and validating its contents\n ort_output_vals_in_cpu = output.numpy()\n # Validate results\n self.assertTrue(np.array_equal(self.create_expected_output(), ort_output_vals_in_cpu))\n\n\n def test_bind_input_and_non_preallocated_output(self):\n session = onnxrt.InferenceSession(get_name(\"mul_1.onnx\"), providers=onnxrt.get_available_providers())\n io_binding = session.io_binding()\n \n # Bind input to CUDA\n io_binding.bind_input('X', 'cuda', 0, np.float32, [3, 2], self.create_ortvalue_input_on_gpu().data_ptr())\n\n # Bind output to CUDA\n io_binding.bind_output('Y', 'cuda')\n\n # Sync if different CUDA streams\n io_binding.synchronize_inputs()\n\n # Invoke Run\n session.run_with_iobinding(io_binding)\n\n # Sync if different CUDA streams\n io_binding.synchronize_outputs()\n\n # This call returns an OrtValue which has data allocated by ORT on CUDA\n ort_outputs = io_binding.get_outputs()\n self.assertEqual(len(ort_outputs), 1)\n self.assertEqual(ort_outputs[0].device_name(), \"cuda\")\n # Validate results (by copying results to CPU by creating a Numpy object)\n self.assertTrue(np.array_equal(self.create_expected_output(), ort_outputs[0].numpy()))\n \n # We should be able to repeat the above process as many times as we want - try once more\n ort_outputs = io_binding.get_outputs()\n self.assertEqual(len(ort_outputs), 1)\n self.assertEqual(ort_outputs[0].device_name(), \"cuda\")\n # Validate results (by copying results to CPU by creating a Numpy object)\n self.assertTrue(np.array_equal(self.create_expected_output(), ort_outputs[0].numpy()))\n\n # Change the bound input and validate the results in the same bound OrtValue\n # Bind alternate input to CUDA\n io_binding.bind_input('X', 'cuda', 0, np.float32, [3, 2], self.create_ortvalue_alternate_input_on_gpu().data_ptr())\n\n # Sync if different CUDA streams\n io_binding.synchronize_inputs()\n\n # Invoke Run\n session.run_with_iobinding(io_binding)\n\n # Sync if different CUDA streams\n io_binding.synchronize_outputs()\n\n # This call returns an OrtValue which has data allocated by ORT on CUDA\n ort_outputs = io_binding.get_outputs()\n self.assertEqual(len(ort_outputs), 1)\n self.assertEqual(ort_outputs[0].device_name(), \"cuda\")\n # Validate results (by copying results to CPU by creating a Numpy object)\n self.assertTrue(np.array_equal(self.create_expected_output_alternate(), ort_outputs[0].numpy()))\n\n def test_bind_input_and_bind_output_with_ortvalues(self):\n session = onnxrt.InferenceSession(get_name(\"mul_1.onnx\"), providers=onnxrt.get_available_providers())\n io_binding = session.io_binding()\n \n # Bind ortvalue as input\n input_ortvalue = self.create_ortvalue_input_on_gpu()\n io_binding.bind_ortvalue_input('X', input_ortvalue)\n\n # Bind ortvalue as output\n output_ortvalue = self.create_uninitialized_ortvalue_input_on_gpu()\n io_binding.bind_ortvalue_output('Y', output_ortvalue)\n\n # Sync if different CUDA streams\n io_binding.synchronize_inputs()\n\n # Invoke Run\n session.run_with_iobinding(io_binding)\n\n # Sync if different CUDA streams\n io_binding.synchronize_outputs()\n\n # Inspect contents of output_ortvalue and make sure that it has the right contents\n self.assertTrue(np.array_equal(self.create_expected_output(), output_ortvalue.numpy()))\n\n # Bind another ortvalue as input\n input_ortvalue_2 = self.create_ortvalue_alternate_input_on_gpu()\n io_binding.bind_ortvalue_input('X', input_ortvalue_2)\n\n # Sync if different CUDA streams\n io_binding.synchronize_inputs()\n\n # Invoke Run\n session.run_with_iobinding(io_binding)\n\n # Sync if different CUDA streams\n io_binding.synchronize_outputs()\n\n # Inspect contents of output_ortvalue and make sure that it has the right contents\n self.assertTrue(np.array_equal(self.create_expected_output_alternate(), output_ortvalue.numpy()))\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.testing.assert_almost_equal", "numpy.array", "numpy.arange" ] ]
ZedThree/tokamesh
[ "6671cd5a7e9b11ae7f81dee3dfcb85662145b35f" ]
[ "tokamesh/geometry.py" ]
[ "\nfrom numpy import sqrt, log, pi, tan, dot, cross, identity\nfrom numpy import absolute, nan, isfinite, minimum, maximum\nfrom numpy import array, ndarray, linspace, full, zeros, stack, savez, int64\nfrom collections import defaultdict\nfrom time import time\nimport sys\n\n\nclass BarycentricGeometryMatrix(object):\n \"\"\"\n Class for calculating geometry matrices over triangular meshes using\n barycentric linear interpolation.\n\n :param R: \\\n The major radius of each mesh vertex as a 1D numpy array.\n\n :param z: \\\n The z-height of each mesh vertex as a 1D numpy array.\n\n :param triangles: \\\n A 2D numpy array of integers specifying the indices of the vertices which form\n each of the triangles in the mesh. The array must have shape ``(N,3)`` where\n ``N`` is the total number of triangles.\n\n :param ray_origins: \\\n The ``(x,y,z)`` position vectors of the origin of each ray (i.e. line-of-sight)\n as a 2D numpy array. The array must have shape ``(M,3)`` where ``M`` is the\n total number of rays.\n\n :param ray_ends: \\\n The ``(x,y,z)`` position vectors of the end-points of each ray (i.e. line-of-sight)\n as a 2D numpy array. The array must have shape ``(M,3)`` where ``M`` is the\n total number of rays.\n \"\"\"\n def __init__(self, R, z, triangles, ray_origins, ray_ends):\n # first check the validity of the data\n self.check_geometry_data(R, z, triangles, ray_origins, ray_ends)\n\n self.R = R\n self.z = z\n self.triangle_vertices = triangles\n self.n_vertices = self.R.size\n self.n_triangles = self.triangle_vertices.shape[0]\n self.GeomFacs = GeometryFactors()\n\n # calculate the ray data\n diffs = ray_ends - ray_origins\n self.lengths = sqrt((diffs**2).sum(axis=1))\n self.rays = diffs / self.lengths[:,None]\n self.pixels = ray_origins\n self.n_rays = self.lengths.size\n\n # coefficients for the quadratic representation of the ray radius\n self.q0 = self.pixels[:,0]**2 + self.pixels[:,1]**2\n self.q1 = 2*(self.pixels[:,0]*self.rays[:,0] + self.pixels[:,1]*self.rays[:,1])\n self.q2 = self.rays[:,0]**2 + self.rays[:,1]**2\n self.sqrt_q2 = sqrt(self.q2)\n\n # calculate terms used in the linear inequalities\n self.L_tan = -0.5*self.q1 / self.q2 # distance of the tangency point\n self.R_tan_sqr = self.q0 + 0.5*self.q1*self.L_tan\n self.R_tan = sqrt(self.R_tan_sqr) # major radius of the tangency point\n self.z_tan = self.pixels[:,2] + self.rays[:,2]*self.L_tan # z-height of the tangency point\n self.m = self.rays[:,2] / sqrt(self.q2) # gradient of the hyperbola asymptote line\n\n # Construct a mapping from triangles to edges, and edges to vertices\n self.triangle_edges, self.edge_vertices, _ = build_edge_map(self.triangle_vertices)\n self.R_edges = self.R[self.edge_vertices]\n self.z_edges = self.z[self.edge_vertices]\n self.n_edges = self.edge_vertices.shape[0]\n\n # pre-calculate the properties of each edge\n self.R_edge_mid = self.R_edges.mean(axis=1)\n self.z_edge_mid = self.z_edges.mean(axis=1)\n self.edge_lengths = sqrt((self.R_edges[:,0]-self.R_edges[:,1])**2 + (self.z_edges[:,0]-self.z_edges[:,1])**2)\n self.edge_drn = zeros([self.n_edges, 2])\n self.edge_drn[:,0] = self.R_edges[:,1] - self.R_edges[:,0]\n self.edge_drn[:,1] = self.z_edges[:,1] - self.z_edges[:,0]\n self.edge_drn /= self.edge_lengths[:,None]\n\n # pre-calculate barycentric coordinate coefficients for each triangle\n R1, R2, R3 = [self.R[self.triangle_vertices[:,k]] for k in range(3)]\n z1, z2, z3 = [self.z[self.triangle_vertices[:,k]] for k in range(3)]\n self.area = 0.5*((z2 - z3)*(R1 - R3) + (R3 - R2)*(z1 - z3))\n self.lam1_coeffs = 0.5*stack([z2-z3, R3-R2, R2*z3 - R3*z2], axis=1) / self.area[:,None]\n self.lam2_coeffs = 0.5*stack([z3-z1, R1-R3, R3*z1 - R1*z3], axis=1) / self.area[:,None]\n\n def calculate(self, save_file=None):\n \"\"\"\n Calculate the geometry matrix.\n\n :keyword str save_file: \\\n A string specifying a file path to which the geometry matrix data will be\n saved using the numpy ``.npz`` format. If not specified, the geometry matrix\n data is still returned as a dictionary, but is not saved.\n\n :return: \\\n The geometry matrix data as a dictionary of numpy arrays. The structure of\n the dictionary is as follows: ``entry_values`` is a 1D numpy array containing\n the values of all non-zero matrix entries. ``row_indices`` is a 1D numpy\n array containing the row-index of each of the non-zero entries. ``col_indices``\n is a 1D numpy array containing the column-index of each of the non-zero entries.\n ``shape`` is a 1D numpy array containing the dimensions of the matrix. The\n arrays defining the mesh are also stored as ``R``, ``z`` and ``triangles``.\n \"\"\"\n # clear the geometry factors in case they contains data from a previous calculation\n self.GeomFacs.vertex_map.clear()\n # process the first triangle so we can estimate the run-time\n t_start = time()\n self.process_triangle(0)\n dt = time() - t_start\n\n # use the estimate to break the evaluation into groups\n group_size = max(int(1. / dt), 1)\n rem = (self.n_triangles-1) % group_size\n ranges = zip(range(1, self.n_triangles, group_size), range(1 + group_size, self.n_triangles, group_size))\n\n # calculate the contribution to the matrix for each triangle\n for start, end in ranges:\n [self.process_triangle(i) for i in range(start, end)]\n\n # print the progress\n f_complete = (end+1)/self.n_triangles\n eta = int((time() - t_start) * (1 - f_complete) / f_complete)\n sys.stdout.write(\n f\"\\r >> Calculating geometry matrix: [ {f_complete:.1%} complete ETA: {eta} sec ] \"\n )\n sys.stdout.flush()\n\n # clean up any remaining triangles\n if rem != 0:\n [self.process_triangle(i) for i in range(self.n_triangles - rem, self.n_triangles)]\n\n t_elapsed = time() - t_start\n mins, secs = divmod(t_elapsed, 60)\n hrs, mins = divmod(mins, 60)\n time_taken = \"%d:%02d:%02d\" % (hrs, mins, secs)\n sys.stdout.write(\n f\"\\r >> Calculating geometry matrix: [ completed in {time_taken} sec ] \"\n )\n sys.stdout.flush()\n sys.stdout.write('\\n')\n\n # convert the calculated matrix elements to a form appropriate for sparse matrices\n data_vals, vertex_inds, ray_inds = self.GeomFacs.get_sparse_matrix_data()\n\n data_dict = {\n 'entry_values': data_vals,\n 'row_indices': ray_inds,\n 'col_indices': vertex_inds,\n 'shape': array([self.n_rays, self.n_vertices]),\n 'R': self.R,\n 'z': self.z,\n 'triangles': self.triangle_vertices\n }\n\n # save the matrix data\n if save_file is not None:\n savez(save_file, **data_dict)\n\n return data_dict\n\n def inequality_checks(self, R, z):\n dz = z[:,None] - self.z_tan[None,:]\n mR = R[:,None]*self.m[None,:]\n R_check = R[:,None] > self.R_tan[None,:]\n t = self.m[None,:]*(R[:,None] - self.R_tan[None,:])\n rgn_A = (dz > -mR).all(axis=0)\n rgn_B = (dz < mR).all(axis=0)\n rgn_C = ((t < dz) & (dz < -t) & R_check).all(axis=0)\n rgn_D = (~R_check).all(axis=0)\n return ~(rgn_A | rgn_B | rgn_C | rgn_D)\n\n def edge_hyperbola_intersections(self, R0, z0, uR, uz, w):\n u_ratio = uR / uz\n alpha = R0 + (self.pixels[:,2] - z0)*u_ratio\n beta = self.rays[:,2]*u_ratio\n\n # calculate the quadratic coefficients\n a = self.q2 - beta**2\n b = self.q1 - 2*alpha*beta\n c = self.q0 - alpha**2\n\n # use the descriminant to check for the existence of the roots\n D = b**2 - 4*a*c\n i = (D >= 0).nonzero()\n\n # where roots exists, calculate them\n intersections = full([self.n_rays, 2], nan)\n sqrt_D = sqrt(D[i])\n twice_a = 2*a[i]\n intersections[i,0] = -(b[i] + sqrt_D) / twice_a\n intersections[i,1] = -(b[i] - sqrt_D) / twice_a\n\n # convert the ray-distances of the intersections to side-displacements\n side_displacements = (intersections*self.rays[:,2,None] + self.pixels[:,2,None] - z0) / uz\n # reject any intersections which don't occur on the edge itself\n invalid_intersections = absolute(side_displacements) > 0.5*w\n intersections[invalid_intersections] = nan\n return intersections\n\n def horizontal_hyperbola_intersections(self, R0, z0, uR, uz, w):\n # find the intersections\n intersections = (z0 - self.pixels[:,2])/self.rays[:,2]\n # convert the ray-distances of the intersections to side-displacements\n R_intersect = sqrt(self.q2*(intersections - self.L_tan)**2 + self.R_tan_sqr)\n side_displacements = (R_intersect - R0) / uR\n # reject any intersections which don't occur on the edge itself\n invalid_intersections = absolute(side_displacements) > 0.5*w\n intersections[invalid_intersections] = nan\n return intersections\n\n def process_triangle(self, tri):\n # a hyperbola can at most intersect a triangle six times, so we create space for this.\n intersections = zeros([self.n_rays, 6])\n # loop over each triangle edge and check for intersections\n edges = self.triangle_edges[tri,:]\n for j, edge in enumerate(edges):\n R0 = self.R_edge_mid[edge]\n z0 = self.z_edge_mid[edge]\n uR, uz = self.edge_drn[edge,:]\n w = self.edge_lengths[edge]\n if uz == 0.: # if the edge is horizontal, a simplified method can be used\n intersections[:,2*j] = self.horizontal_hyperbola_intersections(R0, z0, uR, uz, w)\n intersections[:,2*j+1] = nan\n else: # else we need the general intersection calculation\n intersections[:,2*j:2*j+2] = self.edge_hyperbola_intersections(R0, z0, uR, uz, w)\n\n # clip all the intersections so that they lie in the allowed range\n maximum(intersections, 0., out=intersections)\n minimum(intersections, self.lengths[:,None], out=intersections)\n # now sort the intersections for each ray in order of distance\n intersections.sort(axis=1)\n\n # After sorting the intersections by distance along the ray, we now have (up to)\n # three pairs of distances where the ray enters and then leaves the triangle.\n # After the clipping operation, if any of these pairs contain the same value,\n # then that intersection occurs outside the allowed range and must be discarded.\n\n # loop over each of the three pairs:\n for j in range(3):\n equal = intersections[:,2*j] == intersections[:,2*j+1]\n if equal.any(): # discard any pairs with equal distance values\n intersections[equal,2*j:2*j+2] = nan\n\n # re-sort the intersections\n intersections.sort(axis=1)\n\n # check where valid intersections exist, and count how many there are per ray\n valid_intersections = isfinite(intersections)\n intersection_count = valid_intersections.sum(axis=1)\n\n # At this point, each ray should have an even number of intersections, if any\n # have an odd number then something has gone wrong, so raise an error.\n if (intersection_count % 2 == 1).any():\n raise ValueError('One or more rays has an odd number of intersections')\n\n\n max_intersections = intersection_count.max()\n for j in range(max_intersections//2):\n indices = (intersection_count >= 2*(j+1)).nonzero()[0]\n # calculate the integrals of the barycentric coords over the intersection path\n L1_int, L2_int, L3_int = self.barycentric_coord_integral(\n l1=intersections[:,2*j],\n l2=intersections[:,2*j+1],\n inds=indices,\n tri=tri\n )\n\n # update the vertices with the integrals\n v1, v2, v3 = self.triangle_vertices[tri,:]\n self.GeomFacs.update_vertex(vertex_ind=v1, ray_indices=indices, integral_vals=L1_int)\n self.GeomFacs.update_vertex(vertex_ind=v2, ray_indices=indices, integral_vals=L2_int)\n self.GeomFacs.update_vertex(vertex_ind=v3, ray_indices=indices, integral_vals=L3_int)\n\n def barycentric_coord_integral(self, l1, l2, inds, tri):\n l1_slice = l1[inds]\n l2_slice = l2[inds]\n dl = l2_slice - l1_slice\n\n R_coeff = radius_hyperbolic_integral(\n l1=l1_slice,\n l2=l2_slice,\n l_tan=self.L_tan[inds],\n R_tan_sqr=self.R_tan_sqr[inds],\n sqrt_q2=self.sqrt_q2[inds]\n )\n\n z_coeff = self.pixels[inds,2]*dl + 0.5*self.rays[inds,2]*(l2_slice**2 - l1_slice**2)\n lam1_int = self.lam1_coeffs[tri,0]*R_coeff + self.lam1_coeffs[tri,1]*z_coeff + self.lam1_coeffs[tri,2]*dl\n lam2_int = self.lam2_coeffs[tri,0]*R_coeff + self.lam2_coeffs[tri,1]*z_coeff + self.lam2_coeffs[tri,2]*dl\n lam3_int = dl - lam1_int - lam2_int\n return lam1_int, lam2_int, lam3_int\n\n @staticmethod\n def check_geometry_data(R, z, triangle_inds, ray_starts, ray_ends):\n \"\"\"\n Check that all the data have the correct shapes / types\n \"\"\"\n if not all(type(arg) is ndarray for arg in [R, z, triangle_inds, ray_starts, ray_ends]):\n raise TypeError(\n \"\"\"\n [ BarycentricGeometryMatrix error ]\n >> All arguments must be of type numpy.ndarray.\n \"\"\"\n )\n\n if R.ndim != 1 or z.ndim != 1 or R.size != z.size:\n raise ValueError(\n \"\"\"\n [ BarycentricGeometryMatrix error ]\n >> 'R' and 'z' arguments must be 1-dimensional arrays of equal length.\n \"\"\"\n )\n\n if triangle_inds.ndim != 2 or triangle_inds.shape[1] != 3:\n raise ValueError(\n \"\"\"\n [ BarycentricGeometryMatrix error ]\n >> 'triangle_inds' argument must be a 2-dimensional array of shape (N,3)\n >> where 'N' is the total number of triangles.\n \"\"\"\n )\n\n dim_check = ray_starts.ndim != 2 or ray_ends.ndim != 2\n if dim_check or ray_starts.shape[1] != 3 or ray_ends.shape[1] != 3 or ray_ends.shape[0] != ray_starts.shape[0]:\n raise ValueError(\n \"\"\"\n [ BarycentricGeometryMatrix error ]\n >> 'ray_starts' and 'ray_ends' arguments must be 2-dimensional arrays\n >> of shape (M,3), where 'M' is the total number of rays.\n \"\"\"\n )\n\n\n\n\ndef radius_hyperbolic_integral(l1, l2, l_tan, R_tan_sqr, sqrt_q2):\n u1 = sqrt_q2*(l1 - l_tan)\n u2 = sqrt_q2*(l2 - l_tan)\n R1 = sqrt(u1**2 + R_tan_sqr)\n R2 = sqrt(u2**2 + R_tan_sqr)\n\n ratio = (u2 + R2) / (u1 + R1)\n return 0.5*(u2*R2 - u1*R1 + log(ratio)*R_tan_sqr)/sqrt_q2\n\n\n\n\nclass GeometryFactors(object):\n def __init__(self):\n self.vertex_map = defaultdict(lambda: 0.)\n\n def update_vertex(self, vertex_ind, ray_indices, integral_vals):\n for ray_idx, value in zip(ray_indices, integral_vals):\n self.vertex_map[(vertex_ind,ray_idx)] += value\n\n def get_sparse_matrix_data(self):\n vertex_inds = array([key[0] for key in self.vertex_map.keys()])\n ray_inds = array([key[1] for key in self.vertex_map.keys()])\n data_vals = array([v for v in self.vertex_map.values()])\n return data_vals, vertex_inds, ray_inds\n\n\n\n\ndef build_edge_map(triangles):\n \"\"\"\n Generates various mappings to and from edges in the mesh.\n\n :param triangles: \\\n A 2D numpy array of integers specifying the indices of the vertices which form\n each of the triangles in the mesh. The array must have shape ``(N,3)`` where\n ``N`` is the total number of triangles.\n\n :return: \\\n A tuple containing ``triangle_edges``, ``edge_vertices`` and ``edge_map``.\n ``triangle_edges`` specifies the indices of the edges which make up each\n triangle as a 2D numpy array of shape ``(N,3)`` where ``N`` is the total\n number of triangles. ``edge_vertices`` specifies the indices of the vertices\n which make up each edge as a 2D numpy array of shape ``(M,2)`` where ``M``\n is the total number of edges. ``edge_map`` is a dictionary mapping the index\n of an edge to the indices of the triangles to which it belongs.\n \"\"\"\n n_triangles = triangles.shape[0]\n triangle_edges = zeros([n_triangles,3], dtype=int64)\n edge_indices = {}\n edge_map = defaultdict(list)\n for i in range(n_triangles):\n s1 = (min(triangles[i,0], triangles[i,1]), max(triangles[i,0], triangles[i,1]))\n s2 = (min(triangles[i,1], triangles[i,2]), max(triangles[i,1], triangles[i,2]))\n s3 = (min(triangles[i,0], triangles[i,2]), max(triangles[i,0], triangles[i,2]))\n for j, edge in enumerate([s1, s2, s3]):\n if edge not in edge_indices:\n edge_indices[edge] = len(edge_indices)\n triangle_edges[i,j] = edge_indices[edge]\n edge_map[edge_indices[edge]].append(i)\n\n edge_vertices = zeros([len(edge_indices), 2], dtype=int64)\n for edge, i in edge_indices.items():\n edge_vertices[i,:] = [edge[0], edge[1]]\n\n return triangle_edges, edge_vertices, edge_map\n\n\n\n\nclass Camera(object):\n def __init__(self, position, direction, num_x=10, num_y=10, fov=40., max_distance=10.):\n self.u0 = position\n self.du = direction\n self.x_angles = linspace(-fov*pi/360., fov*pi/360., num_x)\n self.y_angles = linspace(-fov*pi/360., fov*pi/360., num_y)\n self.max_distance = max_distance\n\n # make sure the direction is normalised\n self.du /= sqrt(dot(self.du, self.du))\n\n # find the first perpendicular\n K = (self.du[1]/self.du[0])\n b = 1. / sqrt(K**2 + 1.)\n a = -K*b\n self.p1 = array([a, b, 0.])\n\n # use cross-product to find second perpendicular\n self.p2 = cross(self.du, self.p1)\n\n # identity matrix\n self.I = identity(3)\n\n # calculate the ray directions\n tan_x = tan(self.x_angles)\n tan_y = tan(self.y_angles)\n norm = sqrt(1 + (tan_x**2)[:,None] + (tan_y**2)[None,:])\n v = self.du[None,None,:] + tan_x[:,None,None]*self.p1[None,None,:] + tan_y[None,:,None]*self.p2[None,None,:]\n self.ray_directions = v/norm[:,:,None]\n self.ray_directions.resize([num_x*num_y, 3])\n\n self.ray_ends = self.u0[None,:] + max_distance*self.ray_directions\n self.ray_starts = zeros(self.ray_ends.shape) + self.u0[None,:]\n\n def plot_rays(self, axis, points=200):\n dist = linspace(0, self.max_distance, points)\n positions = self.u0[None,:,None] + self.ray_directions[:,:,None]*dist[None,None,:]\n R = sqrt(positions[:,0,:]**2 + positions[:,1,:]**2).T\n z = positions[:,2,:].T\n axis.plot(R,z)\n\n def project_rays(self, distance):\n positions = self.u0[None,:,None] + self.ray_directions[:,:,None]*distance[None,None,:]\n R = sqrt(positions[:,0,:]**2 + positions[:,1,:]**2).T\n z = positions[:,2,:].T\n return R, z\n" ]
[ [ "numpy.full", "numpy.array", "numpy.dot", "numpy.zeros", "numpy.minimum", "numpy.absolute", "numpy.log", "numpy.tan", "numpy.identity", "numpy.savez", "numpy.stack", "numpy.sqrt", "numpy.isfinite", "numpy.linspace", "numpy.cross", "numpy.maximum" ] ]
mjp59/SimpleITK_Segmentation
[ "58350a12ed79547520ac4ed56f5c6b0eb185a645" ]
[ "ImageProcessor.py" ]
[ "import skimage as ski\nimport numpy as np\n\n\nclass ImageProcessor:\n \"\"\"ImageProcessor is a class that implements the image processing\n functions from scikit-image. It has no input parameters for the\n construction and does not have any attributes.\n \"\"\"\n\n def __init__(self):\n pass\n\n def adaptiveEqualization(self, img):\n \"\"\"Applies histogram equalization to input image\n :param img: Image be processed\n :return: hist_eql_img: img after histogram equalization\n \"\"\"\n hist_eql_img = np.array(np.zeros(img.shape))\n if img.ndim >= 3:\n for channel in range(img.shape[2]):\n ch_hist_eql = ski.exposure.equalize_hist(img[:, :, channel])\n\n hist_eql_img[:, :, channel] = ski.exposure.rescale_intensity(\n ch_hist_eql, out_range=(0, 255))\n else:\n hist_eql_img = ski.exposure.equalize_hist(img)\n hist_eql_img = ski.exposure.rescale_intensity(hist_eql_img,\n out_range=(0, 255))\n\n hist_eql_img = hist_eql_img.astype(np.uint8)\n\n return hist_eql_img\n\n def contrastStretch(self, img):\n \"\"\"Applies contrast stretching to input image\n :param img: Image to be processed\n :return: cont_stretch_img: img after contrast stretching\n \"\"\"\n cont_stretch_img = ski.exposure.rescale_intensity(img)\n\n return cont_stretch_img\n\n def logCompression(self, img):\n \"\"\"Applies logarithmic compression to input image\n :param img: Image to be processed\n :return: log_comp_img: img after logarithmic compression\n \"\"\"\n log_comp_img = ski.exposure.adjust_log(img)\n return log_comp_img\n\n def reverseVideo(self, img):\n \"\"\"Inverts the colors in an image\n\n :param img: Image to be processed\n :return: inverted_img: Image with inverted colors\n \"\"\"\n inverted_img = np.invert(img)\n return inverted_img\n\n def isGrayscale(self, img):\n \"\"\"Checks to see if an image is grayscale\n isGrayscale determines if an images is grayscale by assuming a\n grayscale image will have one of the following properties\n 1. Only have two dimensions\n 2. If it has 3D (indicating RGB pixel color values), R=B=G for all\n pixels.\n :param img: Input image\n :return: is_grayscale: Indicates whether the input image is grayscale\n \"\"\"\n\n if img.ndim == 2:\n is_grayscale = True\n return is_grayscale\n img_dimm = img.shape\n\n for x in range(0, img_dimm[0]):\n for y in range(0, img_dimm[1]):\n if img[x, y, 0] == img[x, y, 1] == img[x, y, 2]:\n continue\n else:\n is_grayscale = False\n return is_grayscale\n\n # It makes it through the loop without finding a place where pixels\n # are not equal (causing it to return False), then assume that it is\n # a grayscale image.\n is_grayscale = True\n return is_grayscale\n\n def histogram(self, img):\n \"\"\"Generates a list of histograms with intensity values for each\n channel in the image.\n\n Each item in the list consists of a 2D numpy array, in which the\n first dimension is the histogram itself, and the second dimension\n is the bin values. A histogram item from this list could be plotted\n as plt.plot(histogram_item[1], histogram_item[0])\n\n :param img: input image\n :return: hist (list): List of histograms for each color channel\n \"\"\"\n hist = []\n if self.isGrayscale(img):\n [bins, hist_vals] = ski.exposure.histogram(img)\n bins = bins.tolist()\n hist_vals = hist_vals.tolist()\n hist_temp = (bins, hist_vals)\n\n hist.append(hist_temp)\n\n return hist\n else:\n dimm = img.shape\n hist = []\n for d in range(0, dimm[2]):\n [bins, hist_vals] = ski.exposure.histogram(img[:, :, d])\n bins = bins.tolist()\n hist_vals = hist_vals.tolist()\n hist_temp = (bins, hist_vals)\n\n hist.append(hist_temp)\n return hist\n" ]
[ [ "numpy.invert", "numpy.zeros" ] ]
farrjere/fortune_teller
[ "8489c700cf99d0e291b7a5322dd21cf3aa054c19" ]
[ "example_models/label_encoder.py" ]
[ "from sklearn.preprocessing import LabelEncoder\nfrom sklearn.pipeline import Pipeline\nclass MultiColumnLabelEncoder:\n def __init__(self,columns = None):\n self.columns = columns # array of column names to encode\n\n def fit(self,X,y=None):\n return self # not relevant here\n\n def transform(self,X):\n '''\n Transforms columns of X specified in self.columns using\n LabelEncoder(). If no columns specified, transforms all\n columns in X.\n '''\n output = X.copy()\n if self.columns is not None:\n for col in self.columns:\n output[col] = LabelEncoder().fit_transform(output[col])\n else:\n for colname,col in output.iteritems():\n output[colname] = LabelEncoder().fit_transform(col)\n return output\n\n def fit_transform(self,X,y=None):\n return self.fit(X,y).transform(X)" ]
[ [ "sklearn.preprocessing.LabelEncoder" ] ]
tunahansalih/VPE
[ "97a47820ebe167120af096c8eb746195790e9e4d" ]
[ "code/models/vaeIdsiaStn.py" ]
[ "# This code is modified from the repository\n# https://github.com/bhpfelix/Variational-Autoencoder-PyTorch\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\n\nclass View(nn.Module):\n def __init__(self):\n super(View, self).__init__()\n\n def forward(self, x):\n numel = x.numel() / x.shape[0]\n return x.view(-1, int(numel))\n\n\ndef convNoutput(convs, input_size): # predict output size after conv layers\n input_size = int(input_size)\n input_channels = convs[0][0].weight.shape[1] # input channel\n output = torch.Tensor(1, input_channels, input_size, input_size)\n with torch.no_grad():\n for conv in convs:\n output = conv(output)\n return output.numel(), output.shape\n\n\nclass stn(nn.Module):\n def __init__(self, input_channels, input_size, params):\n super(stn, self).__init__()\n\n self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)\n self.conv1 = nn.Sequential(\n nn.Conv2d(input_channels, params[0], kernel_size=5, stride=1, padding=2),\n nn.ReLU(True),\n nn.MaxPool2d(kernel_size=2, stride=2)\n )\n self.conv2 = nn.Sequential(\n nn.Conv2d(params[0], params[1], kernel_size=5, stride=1, padding=2),\n nn.ReLU(True),\n nn.MaxPool2d(kernel_size=2, stride=2)\n )\n\n out_numel, out_size = convNoutput([self.conv1, self.conv2], input_size / 2)\n # set fc layer based on predicted size\n self.fc = nn.Sequential(\n View(),\n nn.Linear(out_numel, params[2]),\n nn.ReLU()\n )\n self.classifier = classifier = nn.Sequential(\n View(),\n nn.Linear(params[2], 6) # affine transform has 6 parameters\n )\n # initialize stn parameters (affine transform)\n self.classifier[1].weight.data.fill_(0)\n self.classifier[1].bias.data = torch.FloatTensor([1, 0, 0, 0, 1, 0])\n\n def localization_network(self, x):\n x = self.maxpool(x)\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.fc(x)\n x = self.classifier(x)\n return x\n\n def forward(self, x):\n theta = self.localization_network(x)\n theta = theta.view(-1, 2, 3)\n grid = F.affine_grid(theta, x.size())\n x = F.grid_sample(x, grid)\n return x\n\n\nclass VAEIdsia(nn.Module):\n def __init__(self, nc, input_size, latent_variable_size=300, cnn_chn=[100, 150, 250], param1=None, param2=None,\n param3=None):\n super(VAEIdsia, self).__init__()\n\n self.cnn_chn = cnn_chn\n self.param1 = param1\n self.param2 = param2\n self.param3 = param3\n\n self.input_size = input_size\n self.nc = nc\n self.latent_variable_size = latent_variable_size\n\n # encoder\n self.e1 = nn.Conv2d(in_channels=nc, out_channels=self.cnn_chn[0], kernel_size=7, stride=2, padding=3)\n self.bn1 = nn.BatchNorm2d(num_features=self.cnn_chn[0])\n\n self.e2 = nn.Conv2d(in_channels=self.cnn_chn[0], out_channels=self.cnn_chn[1], kernel_size=4, stride=2,\n padding=1) # 1/4\n self.bn2 = nn.BatchNorm2d(num_features=self.cnn_chn[1])\n\n self.e3 = nn.Conv2d(in_channels=self.cnn_chn[1], out_channels=self.cnn_chn[2], kernel_size=4, stride=2,\n padding=1) # 1/8\n self.bn3 = nn.BatchNorm2d(num_features=self.cnn_chn[2])\n\n self.fc1 = nn.Linear(in_features=int(input_size / 8 * input_size / 8 * self.cnn_chn[2]),\n out_features=latent_variable_size)\n self.fc2 = nn.Linear(in_features=int(input_size / 8 * input_size / 8 * self.cnn_chn[2]),\n out_features=latent_variable_size)\n\n # decoder\n self.d1 = nn.Linear(in_features=latent_variable_size,\n out_features=int(input_size / 8 * input_size / 8 * self.cnn_chn[2]))\n\n self.up1 = nn.UpsamplingNearest2d(scale_factor=2) # 8 -> 16\n self.pd1 = nn.ReplicationPad2d(padding=1)\n self.d2 = nn.Conv2d(in_channels=self.cnn_chn[2], out_channels=self.cnn_chn[1], kernel_size=3, stride=1)\n self.bn6 = nn.BatchNorm2d(num_features=self.cnn_chn[1], eps=1.e-3)\n\n self.up2 = nn.UpsamplingNearest2d(scale_factor=2) # 16 -> 32\n self.pd2 = nn.ReplicationPad2d(padding=1)\n self.d3 = nn.Conv2d(in_channels=self.cnn_chn[1], out_channels=self.cnn_chn[0], kernel_size=3, stride=1)\n self.bn7 = nn.BatchNorm2d(num_features=self.cnn_chn[0], eps=1.e-3)\n\n self.up3 = nn.UpsamplingNearest2d(scale_factor=2) # 32 -> 64\n self.pd3 = nn.ReplicationPad2d(padding=1)\n self.d4 = nn.Conv2d(in_channels=self.cnn_chn[0], out_channels=3, kernel_size=3, stride=1)\n\n self.leakyrelu = nn.LeakyReLU(negative_slope=0.2)\n self.relu = nn.ReLU()\n self.sigmoid = nn.Sigmoid()\n\n if param1 is not None:\n self.stn1 = stn(3, self.input_size, param1)\n if param2 is not None:\n self.stn2 = stn(self.cnn_chn[0], self.input_size / 2, param2)\n if param3 is not None:\n self.stn3 = stn(self.cnn_chn[1], self.input_size / 4, param3)\n\n def encode(self, x):\n if self.param1 is not None:\n x = self.stn1(x)\n\n h1 = self.leakyrelu(self.bn1(self.e1(x)))\n if self.param2 is not None:\n h1 = self.stn2(h1)\n\n h2 = self.leakyrelu(self.bn2(self.e2(h1)))\n if self.param3 is not None:\n h2 = self.stn3(h2)\n\n h3 = self.leakyrelu(self.bn3(self.e3(h2)))\n h4 = h3.view(-1, int(self.input_size / 8 * self.input_size / 8 * self.cnn_chn[2]))\n\n return self.fc1(h4), self.fc2(h4), x\n\n def reparametrize(self, mu, logvar):\n std = logvar.mul(0.5).exp_()\n eps = torch.cuda.FloatTensor(std.size()).normal_()\n eps = Variable(eps)\n return eps.mul(std).add_(mu)\n\n def decode(self, z):\n h1 = self.relu(self.d1(z))\n # h1 = h1.view(-1, self.ngf*8*2, 4, 4)\n h1 = h1.view(-1, self.cnn_chn[2], int(self.input_size / 8), int(self.input_size / 8))\n h2 = self.leakyrelu(self.bn6(self.d2(self.pd1(self.up1(h1)))))\n h3 = self.leakyrelu(self.bn7(self.d3(self.pd2(self.up2(h2)))))\n return self.sigmoid(self.d4(self.pd3(self.up3(h3))))\n\n def get_latent_var(self, x):\n mu, logvar = self.encode(x)\n z = self.reparametrize(mu, logvar)\n return z\n\n def forward(self, x):\n mu, logvar, xstn = self.encode(x)\n z = self.reparametrize(mu, logvar)\n res = self.decode(z)\n return res, mu, logvar, xstn\n\n def init_params(self, net):\n print('Loading the model from the file...')\n net_dict = self.state_dict()\n if isinstance(net, dict):\n pre_dict = net\n else:\n pre_dict = net.state_dict()\n # 1. filter out unnecessary keys\n pre_dict = {k: v for k, v in pre_dict.items() if (k in net_dict)} # for fs net\n net_dict.update(pre_dict)\n # 3. load the new state dict\n self.load_state_dict(net_dict)\n" ]
[ [ "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.nn.ReplicationPad2d", "torch.nn.Sigmoid", "torch.FloatTensor", "torch.no_grad", "torch.nn.BatchNorm2d", "torch.nn.LeakyReLU", "torch.autograd.Variable", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.functional.grid_sample", "torch.Tensor", "torch.nn.UpsamplingNearest2d" ] ]
JohnZed/cudf
[ "403d2571e5fcde66b7768b2213f6c142cc8b63db" ]
[ "python/cudf/cudf/core/frame.py" ]
[ "import functools\nimport warnings\nfrom collections import OrderedDict\n\nimport cupy\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import is_dtype_equal\n\nimport cudf\nimport cudf._lib as libcudf\nfrom cudf._lib.nvtx import annotate\nfrom cudf._lib.scalar import as_scalar\nfrom cudf.core.column import as_column, build_categorical_column\nfrom cudf.utils.dtypes import (\n is_categorical_dtype,\n is_numerical_dtype,\n is_scalar,\n min_scalar_type,\n)\n\n\nclass Frame(libcudf.table.Table):\n \"\"\"\n Frame: A collection of Column objects with an optional index.\n\n Parameters\n ----------\n data : OrderedColumnDict\n An OrderedColumnDict mapping column names to Columns\n index : Table\n A Frame representing the (optional) index columns.\n \"\"\"\n\n @classmethod\n def _from_table(cls, table):\n return cls(table._data, index=table._index)\n\n @classmethod\n @annotate(\"CONCAT\", color=\"orange\", domain=\"cudf_python\")\n def _concat(cls, objs, axis=0, ignore_index=False):\n\n # shallow-copy the input DFs in case the same DF instance\n # is concatenated with itself\n objs = [f.copy(deep=False) for f in objs]\n\n from cudf.core.index import as_index\n from cudf.core.column.column import column_empty\n from cudf.core.column.column import build_categorical_column\n\n # Create a dictionary of the common, non-null columns\n def get_non_null_cols_and_dtypes(col_idxs, list_of_columns):\n # A mapping of {idx: np.dtype}\n dtypes = dict()\n # A mapping of {idx: [...columns]}, where `[...columns]`\n # is a list of columns with at least one valid value for each\n # column name across all input dataframes\n non_null_columns = dict()\n for idx in col_idxs:\n for cols in list_of_columns:\n # Skip columns not in this frame\n if idx >= len(cols) or cols[idx] is None:\n continue\n # Store the first dtype we find for a column, even if it's\n # all-null. This ensures we always have at least one dtype\n # for each name. This dtype will be overwritten later if a\n # non-null Column with the same name is found.\n if idx not in dtypes:\n dtypes[idx] = cols[idx].dtype\n if cols[idx].valid_count > 0:\n if idx not in non_null_columns:\n non_null_columns[idx] = [cols[idx]]\n else:\n non_null_columns[idx].append(cols[idx])\n return non_null_columns, dtypes\n\n def find_common_dtypes_and_categories(non_null_columns, dtypes):\n # A mapping of {idx: categories}, where `categories` is a\n # column of all the unique categorical values from each\n # categorical column across all input dataframes\n categories = dict()\n for idx, cols in non_null_columns.items():\n # default to the first non-null dtype\n dtypes[idx] = cols[0].dtype\n # If all the non-null dtypes are int/float, find a common dtype\n if all(is_numerical_dtype(col.dtype) for col in cols):\n dtypes[idx] = np.find_common_type(\n [col.dtype for col in cols], []\n )\n # If all categorical dtypes, combine the categories\n elif all(is_categorical_dtype(col.dtype) for col in cols):\n # Combine and de-dupe the categories\n categories[idx] = (\n cudf.concat([col.cat().categories for col in cols])\n .to_series()\n .drop_duplicates(ignore_index=True)\n ._column\n )\n # Set the column dtype to the codes' dtype. The categories\n # will be re-assigned at the end\n dtypes[idx] = min_scalar_type(len(categories[idx]))\n # Otherwise raise an error if columns have different dtypes\n elif not all(\n is_dtype_equal(c.dtype, dtypes[idx]) for c in cols\n ):\n raise ValueError(\"All columns must be the same type\")\n return categories\n\n def cast_cols_to_common_dtypes(\n col_idxs, list_of_columns, dtypes, categories\n ):\n # Cast all columns to a common dtype, assign combined categories,\n # and back-fill missing columns with all-null columns\n for idx in col_idxs:\n dtype = dtypes[idx]\n for cols in list_of_columns:\n # If column not in this df, fill with an all-null column\n if idx >= len(cols) or cols[idx] is None:\n n = len(next(filter(lambda x: x is not None, cols)))\n cols[idx] = column_empty(n, dtype, masked=True)\n else:\n # If column is categorical, rebase the codes with the\n # combined categories, and cast the new codes to the\n # min-scalar-sized dtype\n if idx in categories:\n cols[idx] = (\n cols[idx]\n .cat()\n ._set_categories(\n cols[idx].cat().categories,\n categories[idx],\n is_unique=True,\n )\n .codes\n )\n cols[idx] = cols[idx].astype(dtype)\n\n def reassign_categories(categories, cols, col_idxs):\n for name, idx in zip(cols, col_idxs):\n if idx in categories:\n cols[name] = build_categorical_column(\n categories=categories[idx],\n codes=as_column(\n cols[name].base_data, dtype=cols[name].dtype\n ),\n mask=cols[name].base_mask,\n offset=cols[name].offset,\n size=cols[name].size,\n )\n\n # Get a list of the unique table column names\n names = [name for f in objs for name in f._column_names]\n names = list(OrderedDict.fromkeys(names).keys())\n\n # Combine the index and table columns for each Frame into a\n # list of [...index_cols, ...table_cols]. If a table is\n # missing a column, that list will have None in the slot instead\n columns = [\n ([] if ignore_index else list(f._index._data.columns))\n + [f._data[name] if name in f._data else None for name in names]\n for i, f in enumerate(objs)\n ]\n\n # Get a list of the combined index and table column indices\n indices = list(range(functools.reduce(max, map(len, columns))))\n # The position of the first table colum in each\n # combined index + table columns list\n first_data_column_position = len(indices) - len(names)\n\n # Get the non-null columns and their dtypes\n non_null_cols, dtypes = get_non_null_cols_and_dtypes(indices, columns)\n\n # Infer common dtypes between numeric columns\n # and combine CategoricalColumn categories\n categories = find_common_dtypes_and_categories(non_null_cols, dtypes)\n\n # Cast all columns to a common dtype, assign combined categories,\n # and back-fill missing columns with all-null columns\n cast_cols_to_common_dtypes(indices, columns, dtypes, categories)\n\n # Construct input tables with the index and data columns in the same\n # order. This strips the given index/column names and replaces the\n # names with their integer positions in the `cols` list\n tables = []\n for cols in columns:\n table_cols = cols[first_data_column_position:]\n table_names = indices[first_data_column_position:]\n table = cls(data=dict(zip(table_names, table_cols)))\n if 1 == first_data_column_position:\n table._index = as_index(cols[0])\n elif first_data_column_position > 1:\n index_cols = cols[:first_data_column_position]\n index_names = indices[:first_data_column_position]\n table._index = cls(data=dict(zip(index_names, index_cols)))\n tables.append(table)\n\n # Concatenate the Tables\n out = cls._from_table(\n libcudf.concat.concat_tables(tables, ignore_index=ignore_index)\n )\n\n # Reassign the categories for any categorical table cols\n reassign_categories(\n categories, out._data, indices[first_data_column_position:]\n )\n\n # Reassign the categories for any categorical index cols\n reassign_categories(\n categories, out._index._data, indices[:first_data_column_position]\n )\n\n # Reassign index and column names\n if isinstance(objs[0].columns, pd.MultiIndex):\n out.columns = objs[0].columns\n else:\n out.columns = names\n\n out._index.name = objs[0]._index.name\n out._index.names = objs[0]._index.names\n\n return out\n\n def _get_columns_by_label(self, labels, downcast=False):\n \"\"\"\n Returns columns of the Frame specified by `labels`\n\n If downcast is True, try and downcast from a DataFrame to a Series\n \"\"\"\n new_data = self._data.get_by_label(labels)\n if downcast:\n if is_scalar(labels):\n nlevels = 1\n elif isinstance(labels, tuple):\n nlevels = len(labels)\n if self._data.multiindex is False or nlevels == self._data.nlevels:\n return self._constructor_sliced(\n new_data, name=labels, index=self.index\n )\n return self._constructor(\n new_data, columns=new_data.to_pandas_index(), index=self.index\n )\n\n def _get_columns_by_index(self, indices):\n \"\"\"\n Returns columns of the Frame specified by `labels`\n\n \"\"\"\n data = self._data.get_by_index(indices)\n return self._constructor(\n data, columns=data.to_pandas_index(), index=self.index\n )\n\n def _gather(self, gather_map, keep_index=True):\n if not pd.api.types.is_integer_dtype(gather_map.dtype):\n gather_map = gather_map.astype(\"int32\")\n result = self.__class__._from_table(\n libcudf.copying.gather(\n self, as_column(gather_map), keep_index=keep_index\n )\n )\n result._copy_categories(self)\n return result\n\n def _hash(self, initial_hash_values=None):\n return libcudf.hash.hash(self, initial_hash_values)\n\n def _hash_partition(\n self, columns_to_hash, num_partitions, keep_index=True\n ):\n output, offsets = libcudf.hash.hash_partition(\n self, columns_to_hash, num_partitions, keep_index\n )\n output = self.__class__._from_table(output)\n output._copy_categories(self, include_index=keep_index)\n return output, offsets\n\n def _as_column(self):\n \"\"\"\n _as_column : Converts a single columned Frame to Column\n \"\"\"\n assert (\n self._num_columns == 1\n and self._index is None\n and self._column_names[0] is None\n ), \"\"\"There should be only one data column,\n no index and None as the name to use this method\"\"\"\n\n return self._data[None].copy(deep=False)\n\n def _scatter(self, key, value):\n result = self._from_table(libcudf.copying.scatter(value, key, self))\n\n result._copy_categories(self)\n return result\n\n def _empty_like(self, keep_index=True):\n result = self._from_table(\n libcudf.copying.table_empty_like(self, keep_index)\n )\n\n result._copy_categories(self, include_index=keep_index)\n return result\n\n def _slice(self, arg):\n \"\"\"\n _slice : slice the frame as per the arg\n\n Parameters\n ----------\n arg : should always be of type slice and doesn't handle step\n\n \"\"\"\n from cudf.core.index import RangeIndex\n\n num_rows = len(self)\n if num_rows == 0:\n return self\n start, stop, stride = arg.indices(num_rows)\n\n # This is just to handle RangeIndex type, stop\n # it from materializing unnecessarily\n keep_index = True\n if self.index is not None and isinstance(self.index, RangeIndex):\n keep_index = False\n\n if start < 0:\n start = start + num_rows\n if stop < 0:\n stop = stop + num_rows\n\n if (start > stop and (stride is None or stride == 1)) or (\n len(self._data) == 0 and keep_index is False\n ):\n return self._empty_like(keep_index)\n else:\n start = len(self) if start > num_rows else start\n stop = len(self) if stop > num_rows else stop\n\n if stride is not None and stride != 1:\n return self._gather(\n cupy.arange(start, stop=stop, step=stride, dtype=np.int32)\n )\n else:\n result = self._from_table(\n libcudf.copying.table_slice(\n self, [start, stop], keep_index\n )[0]\n )\n\n result._copy_categories(self, include_index=keep_index)\n # Adding index of type RangeIndex back to\n # result\n if keep_index is False and self.index is not None:\n result.index = self.index[start:stop]\n result.columns = self.columns\n return result\n\n def clip(self, lower=None, upper=None, inplace=False, axis=1):\n \"\"\"\n Trim values at input threshold(s).\n\n Assigns values outside boundary to boundary values.\n Thresholds can be singular values or array like,\n and in the latter case the clipping is performed\n element-wise in the specified axis. Currently only\n `axis=1` is supported.\n\n Parameters\n ----------\n lower : scalar or array_like, default None\n Minimum threshold value. All values below this\n threshold will be set to it. If it is None,\n there will be no clipping based on lower.\n In case of Series/Index, lower is expected to be\n a scalar or an array of size 1.\n upper : scalar or array_like, default None\n Maximum threshold value. All values below this\n threshold will be set to it. If it is None,\n there will be no clipping based on upper.\n In case of Series, upper is expected to be\n a scalar or an array of size 1.\n inplace : bool, default False\n\n Returns\n -------\n Clipped DataFrame/Series/Index/MultiIndex\n\n Examples\n >>> import cudf\n >>> df = cudf.DataFrame({\"a\":[1, 2, 3, 4], \"b\":['a', 'b', 'c', 'd']})\n >>> df.clip(lower=[2, 'b'], upper=[3, 'c'])\n a b\n 0 2 b\n 1 2 b\n 2 3 c\n 3 3 c\n\n >>> df.clip(lower=None, upper=[3, 'c'])\n a b\n 0 1 a\n 1 2 b\n 2 3 c\n 3 3 c\n\n >>> df.clip(lower=[2, 'b'], upper=None)\n a b\n 0 2 b\n 1 2 b\n 2 3 c\n 3 4 d\n\n >>> df.clip(lower=2, upper=3, inplace=True)\n >>> df\n a b\n 0 2 2\n 1 2 3\n 2 3 3\n 3 3 3\n\n >>> import cudf\n >>> sr = cudf.Series([1, 2, 3, 4])\n >>> sr.clip(lower=2, upper=3)\n 0 2\n 1 2\n 2 3\n 3 3\n dtype: int64\n\n >>> sr.clip(lower=None, upper=3)\n 0 1\n 1 2\n 2 3\n 3 3\n dtype: int64\n\n >>> sr.clip(lower=2, upper=None, inplace=True)\n >>> sr\n 0 2\n 1 2\n 2 3\n 3 4\n dtype: int64\n \"\"\"\n\n if axis != 1:\n raise NotImplementedError(\"`axis is not yet supported in clip`\")\n\n if lower is None and upper is None:\n return None if inplace is True else self.copy(deep=True)\n\n if is_scalar(lower):\n lower = np.full(self._num_columns, lower)\n if is_scalar(upper):\n upper = np.full(self._num_columns, upper)\n\n if len(lower) != len(upper):\n raise ValueError(\"Length of lower and upper should be equal\")\n\n if len(lower) != self._num_columns:\n raise ValueError(\n \"\"\"Length of lower/upper should be\n equal to number of columns in\n DataFrame/Series/Index/MultiIndex\"\"\"\n )\n\n output = self.copy(deep=False)\n if output.ndim == 1:\n # In case of series and Index,\n # swap lower and upper if lower > upper\n if (\n lower[0] is not None\n and upper[0] is not None\n and (lower[0] > upper[0])\n ):\n lower[0], upper[0] = upper[0], lower[0]\n\n for i, name in enumerate(self._data):\n output._data[name] = self._data[name].clip(lower[i], upper[i])\n\n output._copy_categories(self, include_index=False)\n\n return self._mimic_inplace(output, inplace=inplace)\n\n def _normalize_scalars(self, other):\n \"\"\"\n Try to normalizes scalar values as per self dtype\n \"\"\"\n if (\n other is not None\n and (isinstance(other, float) and not np.isnan(other))\n ) and (self.dtype.type(other) != other):\n raise TypeError(\n \"Cannot safely cast non-equivalent {} to {}\".format(\n type(other).__name__, self.dtype.name\n )\n )\n\n return (\n self.dtype.type(other)\n if (\n other is not None\n and (isinstance(other, float) and not np.isnan(other))\n )\n else other\n )\n\n def _normalize_columns_and_scalars_type(self, other):\n \"\"\"\n Try to normalize the other's dtypes as per self.\n\n Parameters\n ----------\n\n self : Can be a DataFrame or Series or Index\n other : Can be a DataFrame, Series, Index, Array\n like object or a scalar value\n\n if self is DataFrame, other can be only a\n scalar or array like with size of number of columns\n in DataFrame or a DataFrame with same dimension\n\n if self is Series, other can be only a scalar or\n a series like with same length as self\n\n Returns:\n --------\n A dataframe/series/list/scalar form of normalized other\n \"\"\"\n if isinstance(self, cudf.DataFrame) and isinstance(\n other, cudf.DataFrame\n ):\n return [\n other[self_col].astype(self._data[self_col].dtype)._column\n for self_col in self._data.names\n ]\n\n elif isinstance(self, (cudf.Series, cudf.Index)) and not is_scalar(\n other\n ):\n other = as_column(other)\n return other.astype(self.dtype)\n\n else:\n # Handles scalar or list/array like scalars\n if isinstance(self, (cudf.Series, cudf.Index)) and is_scalar(\n other\n ):\n return self._normalize_scalars(other)\n\n elif isinstance(self, cudf.DataFrame):\n out = []\n if is_scalar(other):\n other = [other for i in range(len(self._data.names))]\n out = [\n self[in_col_name]._normalize_scalars(sclr)\n for in_col_name, sclr in zip(self._data.names, other)\n ]\n\n return out\n else:\n raise ValueError(\n \"Inappropriate input {} and other {} combination\".format(\n type(self), type(other)\n )\n )\n\n def where(self, cond, other=None, inplace=False):\n \"\"\"\n Replace values where the condition is False.\n\n Parameters\n ----------\n cond : bool Series/DataFrame, array-like\n Where cond is True, keep the original value.\n Where False, replace with corresponding value from other.\n Callables are not supported.\n other: scalar, list of scalars, Series/DataFrame\n Entries where cond is False are replaced with\n corresponding value from other. Callables are not\n supported. Default is None.\n\n DataFrame expects only Scalar or array like with scalars or\n dataframe with same dimension as self.\n\n Series expects only scalar or series like with same length\n inplace : bool, default False\n Whether to perform the operation in place on the data.\n\n Returns\n -------\n Same type as caller\n\n Examples:\n ---------\n >>> import cudf\n >>> df = cudf.DataFrame({\"A\":[1, 4, 5], \"B\":[3, 5, 8]})\n >>> df.where(df % 2 == 0, [-1, -1])\n A B\n 0 -1 -1\n 1 4 -1\n 2 -1 8\n\n >>> ser = cudf.Series([4, 3, 2, 1, 0])\n >>> ser.where(ser > 2, 10)\n 0 4\n 1 3\n 2 10\n 3 10\n 4 10\n dtype: int64\n >>> ser.where(ser > 2)\n 0 4\n 1 3\n 2 null\n 3 null\n 4 null\n dtype: int64\n \"\"\"\n\n if isinstance(self, cudf.DataFrame):\n if hasattr(cond, \"__cuda_array_interface__\"):\n cond = self.from_gpu_matrix(\n cond, columns=self._data.names, index=self.index\n )\n elif not isinstance(cond, cudf.DataFrame):\n cond = self.from_pandas(pd.DataFrame(cond))\n\n common_cols = set(self._data.names).intersection(\n set(cond._data.names)\n )\n if len(common_cols) > 0:\n # If `self` and `cond` are having unequal index,\n # then re-index `cond`.\n if len(self.index) != len(cond.index) or any(\n self.index != cond.index\n ):\n cond = cond.reindex(self.index)\n else:\n if cond.shape != self.shape:\n raise ValueError(\n \"\"\"Array conditional must be same shape as self\"\"\"\n )\n # Setting `self` column names to `cond`\n # as `cond` has no column names.\n cond.columns = self.columns\n\n other = self._normalize_columns_and_scalars_type(other)\n out_df = cudf.DataFrame(index=self.index)\n if len(self._columns) != len(other):\n raise ValueError(\n \"\"\"Replacement list length or number of dataframe columns\n should be equal to Number of columns of dataframe\"\"\"\n )\n\n for column_name, other_column in zip(self._data.names, other):\n input_col = self._data[column_name]\n if column_name in cond._data:\n if is_categorical_dtype(input_col.dtype):\n if np.isscalar(other_column):\n try:\n other_column = input_col._encode(other_column)\n except ValueError:\n # When other is not present in categories,\n # fill with Null.\n other_column = None\n elif hasattr(other_column, \"codes\"):\n other_column = other_column.codes\n input_col = input_col.codes\n\n result = libcudf.copying.copy_if_else(\n input_col, other_column, cond._data[column_name]\n )\n\n if is_categorical_dtype(self._data[column_name].dtype):\n result = build_categorical_column(\n categories=self._data[column_name].categories,\n codes=as_column(\n result.base_data, dtype=result.dtype\n ),\n mask=result.base_mask,\n size=result.size,\n offset=result.offset,\n ordered=self._data[column_name].ordered,\n )\n else:\n from cudf._lib.null_mask import MaskState, create_null_mask\n\n out_mask = create_null_mask(\n len(input_col), state=MaskState.ALL_NULL\n )\n result = input_col.set_mask(out_mask)\n out_df[column_name] = self[column_name].__class__(result)\n\n return self._mimic_inplace(out_df, inplace=inplace)\n\n else:\n\n if isinstance(other, cudf.DataFrame):\n raise NotImplementedError(\n \"cannot align with a higher dimensional Frame\"\n )\n\n other = self._normalize_columns_and_scalars_type(other)\n\n cond = as_column(cond)\n if len(cond) != len(self):\n raise ValueError(\n \"\"\"Array conditional must be same shape as self\"\"\"\n )\n input_col = self._data[self.name]\n if is_categorical_dtype(input_col.dtype):\n if np.isscalar(other):\n try:\n other = input_col._encode(other)\n except ValueError:\n # When other is not present in categories,\n # fill with Null.\n other = None\n elif hasattr(other, \"codes\"):\n other = other.codes\n\n input_col = input_col.codes\n\n result = libcudf.copying.copy_if_else(input_col, other, cond)\n\n if is_categorical_dtype(self.dtype):\n result = build_categorical_column(\n categories=self._data[self.name].categories,\n codes=as_column(result.base_data, dtype=result.dtype),\n mask=result.base_mask,\n size=result.size,\n offset=result.offset,\n ordered=self._data[self.name].ordered,\n )\n\n if isinstance(self, cudf.Index):\n from cudf.core.index import as_index\n\n result = as_index(result, name=self.name)\n else:\n result = self._copy_construct(data=result)\n\n return self._mimic_inplace(result, inplace=inplace)\n\n def mask(self, cond, other=None, inplace=False):\n \"\"\"\n Replace values where the condition is True.\n\n Parameters\n ----------\n cond : bool Series/DataFrame, array-like\n Where cond is False, keep the original value.\n Where True, replace with corresponding value from other.\n Callables are not supported.\n other: scalar, list of scalars, Series/DataFrame\n Entries where cond is True are replaced with\n corresponding value from other. Callables are not\n supported. Default is None.\n\n DataFrame expects only Scalar or array like with scalars or\n dataframe with same dimension as self.\n\n Series expects only scalar or series like with same length\n inplace : bool, default False\n Whether to perform the operation in place on the data.\n\n Returns\n -------\n Same type as caller\n\n Examples:\n ---------\n >>> import cudf\n >>> df = cudf.DataFrame({\"A\":[1, 4, 5], \"B\":[3, 5, 8]})\n >>> df.mask(df % 2 == 0, [-1, -1])\n A B\n 0 1 3\n 1 -1 5\n 2 5 -1\n\n >>> ser = cudf.Series([4, 3, 2, 1, 0])\n >>> ser.mask(ser > 2, 10)\n 0 10\n 1 10\n 2 2\n 3 1\n 4 0\n dtype: int64\n >>> ser.mask(ser > 2)\n 0 null\n 1 null\n 2 2\n 3 1\n 4 0\n dtype: int64\n \"\"\"\n\n if not hasattr(cond, \"__invert__\"):\n # We Invert `cond` below and call `where`, so\n # making sure the object supports\n # `~`(inversion) operator or `__invert__` method\n cond = cupy.asarray(cond)\n\n return self.where(cond=~cond, other=other, inplace=inplace)\n\n def _partition(self, scatter_map, npartitions, keep_index=True):\n\n output_table, output_offsets = libcudf.partitioning.partition(\n self, scatter_map, npartitions, keep_index\n )\n\n # due to the split limitation mentioned\n # here: https://github.com/rapidsai/cudf/issues/4607\n # we need to remove first & last elements in offsets.\n # TODO: Remove this after the above issue is fixed.\n output_offsets = output_offsets[1:-1]\n\n result = libcudf.copying.table_split(\n output_table, output_offsets, keep_index=keep_index\n )\n\n result = [self.__class__._from_table(tbl) for tbl in result]\n\n for frame in result:\n frame._copy_categories(self, include_index=keep_index)\n\n if npartitions:\n for i in range(npartitions - len(result)):\n result.append(self._empty_like(keep_index))\n\n return result\n\n @annotate(\"SCATTER_BY_MAP\", color=\"green\", domain=\"cudf_python\")\n def scatter_by_map(\n self, map_index, map_size=None, keep_index=True, **kwargs\n ):\n \"\"\"Scatter to a list of dataframes.\n\n Uses map_index to determine the destination\n of each row of the original DataFrame.\n\n Parameters\n ----------\n map_index : Series, str or list-like\n Scatter assignment for each row\n map_size : int\n Length of output list. Must be >= uniques in map_index\n keep_index : bool\n Conserve original index values for each row\n\n Returns\n -------\n A list of cudf.DataFrame objects.\n \"\"\"\n\n # map_index might be a column name or array,\n # make it a Column\n if isinstance(map_index, str):\n map_index = self._data[map_index]\n elif isinstance(map_index, cudf.Series):\n map_index = map_index._column\n else:\n map_index = as_column(map_index)\n\n # Convert float to integer\n if map_index.dtype == np.float:\n map_index = map_index.astype(np.int32)\n\n # Convert string or categorical to integer\n if isinstance(map_index, cudf.core.column.StringColumn):\n map_index = map_index.as_categorical_column(\n \"category\"\n ).as_numerical\n warnings.warn(\n \"Using StringColumn for map_index in scatter_by_map. \"\n \"Use an integer array/column for better performance.\"\n )\n elif isinstance(map_index, cudf.core.column.CategoricalColumn):\n map_index = map_index.as_numerical\n warnings.warn(\n \"Using CategoricalColumn for map_index in scatter_by_map. \"\n \"Use an integer array/column for better performance.\"\n )\n\n if kwargs.get(\"debug\", False) == 1 and map_size is not None:\n unique_count = map_index.unique_count()\n if map_size < unique_count:\n raise ValueError(\n \"ERROR: map_size must be >= %d (got %d).\"\n % (unique_count, map_size)\n )\n\n tables = self._partition(map_index, map_size, keep_index)\n\n return tables\n\n def dropna(self, axis=0, how=\"any\", subset=None, thresh=None):\n \"\"\"\n Drops rows (or columns) containing nulls from a Column.\n\n Parameters\n ----------\n axis : {0, 1}, optional\n Whether to drop rows (axis=0, default) or columns (axis=1)\n containing nulls.\n how : {\"any\", \"all\"}, optional\n Specifies how to decide whether to drop a row (or column).\n any (default) drops rows (or columns) containing at least\n one null value. all drops only rows (or columns) containing\n *all* null values.\n subset : list, optional\n List of columns to consider when dropping rows (all columns\n are considered by default). Alternatively, when dropping\n columns, subset is a list of rows to consider.\n thresh: int, optional\n If specified, then drops every row (or column) containing\n less than `thresh` non-null values\n\n\n Returns\n -------\n Copy of the DataFrame with rows/columns containing nulls dropped.\n \"\"\"\n if axis == 0:\n return self._drop_na_rows(how=how, subset=subset, thresh=thresh)\n else:\n return self._drop_na_columns(how=how, subset=subset, thresh=thresh)\n\n def _drop_na_rows(self, how=\"any\", subset=None, thresh=None):\n \"\"\"\n Drops null rows from `self`.\n\n how : {\"any\", \"all\"}, optional\n Specifies how to decide whether to drop a row.\n any (default) drops rows containing at least\n one null value. all drops only rows containing\n *all* null values.\n subset : list, optional\n List of columns to consider when dropping rows.\n thresh: int, optional\n If specified, then drops every row containing\n less than `thresh` non-null values.\n \"\"\"\n if subset is None:\n subset = self._column_names\n elif (\n not np.iterable(subset)\n or isinstance(subset, str)\n or isinstance(subset, tuple)\n and subset in self._data.names\n ):\n subset = (subset,)\n diff = set(subset) - set(self._data)\n if len(diff) != 0:\n raise KeyError(\"columns {!r} do not exist\".format(diff))\n subset_cols = [\n name for name, col in self._data.items() if name in subset\n ]\n if len(subset_cols) == 0:\n return self.copy(deep=True)\n result = self.__class__._from_table(\n libcudf.stream_compaction.drop_nulls(\n self, how=how, keys=subset, thresh=thresh\n )\n )\n result._copy_categories(self)\n return result\n\n def _drop_na_columns(self, how=\"any\", subset=None, thresh=None):\n \"\"\"\n Drop columns containing nulls\n \"\"\"\n out_cols = []\n\n if subset is None:\n df = self\n else:\n df = self.take(subset)\n\n if thresh is None:\n if how == \"all\":\n thresh = 1\n else:\n thresh = len(df)\n\n for col in self._data.names:\n if (len(df[col]) - df[col].null_count) < thresh:\n continue\n out_cols.append(col)\n\n return self[out_cols]\n\n def _apply_boolean_mask(self, boolean_mask):\n \"\"\"\n Applies boolean mask to each row of `self`,\n rows corresponding to `False` is dropped\n \"\"\"\n boolean_mask = as_column(boolean_mask)\n if boolean_mask.has_nulls:\n raise ValueError(\n \"cannot mask with boolean_mask containing null values\"\n )\n result = self.__class__._from_table(\n libcudf.stream_compaction.apply_boolean_mask(\n self, as_column(boolean_mask)\n )\n )\n result._copy_categories(self)\n return result\n\n def _quantiles(\n self,\n q,\n interpolation=\"LINEAR\",\n is_sorted=False,\n column_order=(),\n null_precedence=(),\n ):\n interpolation = libcudf.types.Interpolation[interpolation]\n\n is_sorted = libcudf.types.Sorted[\"YES\" if is_sorted else \"NO\"]\n\n column_order = [libcudf.types.Order[key] for key in column_order]\n\n null_precedence = [\n libcudf.types.NullOrder[key] for key in null_precedence\n ]\n\n result = self.__class__._from_table(\n libcudf.quantiles.quantiles(\n self,\n q,\n interpolation,\n is_sorted,\n column_order,\n null_precedence,\n )\n )\n\n result._copy_categories(self)\n return result\n\n def rank(\n self,\n axis=0,\n method=\"average\",\n numeric_only=None,\n na_option=\"keep\",\n ascending=True,\n pct=False,\n ):\n \"\"\"\n Compute numerical data ranks (1 through n) along axis.\n By default, equal values are assigned a rank that is the average of the\n ranks of those values.\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Index to direct ranking.\n method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'\n How to rank the group of records that have the same value\n (i.e. ties):\n * average: average rank of the group\n * min: lowest rank in the group\n * max: highest rank in the group\n * first: ranks assigned in order they appear in the array\n * dense: like 'min', but rank always increases by 1 between groups.\n numeric_only : bool, optional\n For DataFrame objects, rank only numeric columns if set to True.\n na_option : {'keep', 'top', 'bottom'}, default 'keep'\n How to rank NaN values:\n * keep: assign NaN rank to NaN values\n * top: assign smallest rank to NaN values if ascending\n * bottom: assign highest rank to NaN values if ascending.\n ascending : bool, default True\n Whether or not the elements should be ranked in ascending order.\n pct : bool, default False\n Whether or not to display the returned rankings in percentile\n form.\n Returns\n -------\n same type as caller\n Return a Series or DataFrame with data ranks as values.\n \"\"\"\n if method not in {\"average\", \"min\", \"max\", \"first\", \"dense\"}:\n raise KeyError(method)\n method_enum = libcudf.sort.RankMethod[method.upper()]\n if na_option not in {\"keep\", \"top\", \"bottom\"}:\n raise KeyError(na_option)\n\n # TODO code for selecting numeric columns\n source = self\n if numeric_only:\n warnings.warn(\"numeric_only=True is not implemented yet\")\n\n out_rank_table = libcudf.sort.rank_columns(\n source, method_enum, na_option, ascending, pct\n )\n\n return self._from_table(out_rank_table).astype(np.float64)\n\n def repeat(self, repeats, axis=None):\n \"\"\"Repeats elements consecutively\n\n Parameters\n ----------\n repeats : int, array, numpy array, or Column\n the number of times to repeat each element\n\n Example\n -------\n >>> import cudf as cudf\n >>> s = cudf.Series([0, 2]) # or DataFrame\n >>> s\n 0 0\n 1 2\n dtype: int64\n >>> s.repeat([3, 4])\n 0 0\n 0 0\n 0 0\n 1 2\n 1 2\n 1 2\n 1 2\n dtype: int64\n >>> s.repeat(2)\n 0 0\n 0 0\n 1 2\n 1 2\n dtype: int64\n >>>\n \"\"\"\n if axis is not None:\n raise NotImplementedError(\n \"Only axis=`None` supported at this time.\"\n )\n\n return self._repeat(repeats)\n\n def _repeat(self, count):\n if is_scalar(count):\n count = as_scalar(count)\n else:\n count = as_column(count)\n\n result = self.__class__._from_table(\n libcudf.filling.repeat(self, count)\n )\n\n result._copy_categories(self)\n return result\n\n def _fill(self, fill_values, begin, end, inplace):\n col_and_fill = zip(self._columns, fill_values)\n\n if not inplace:\n data_columns = (c._fill(v, begin, end) for (c, v) in col_and_fill)\n data = zip(self._column_names, data_columns)\n return self.__class__._from_table(Frame(data, self._index))\n\n for (c, v) in col_and_fill:\n c.fill(v, begin, end, inplace=True)\n\n return self\n\n def shift(self, periods=1, freq=None, axis=0, fill_value=None):\n \"\"\"Shift values by `periods` positions.\n \"\"\"\n assert axis in (None, 0) and freq is None\n return self._shift(periods)\n\n def _shift(self, offset, fill_value=None):\n data_columns = (col.shift(offset, fill_value) for col in self._columns)\n data = zip(self._column_names, data_columns)\n return self.__class__._from_table(Frame(data, self._index))\n\n def __array__(self, dtype=None):\n raise TypeError(\n \"Implicit conversion to a host NumPy array via __array__ is not allowed, \\\n To explicitly construct a GPU array, consider using \\\n cupy.asarray(...)\\nTo explicitly construct a \\\n host array, consider using .to_array()\"\n )\n\n def drop_duplicates(\n self,\n subset=None,\n keep=\"first\",\n nulls_are_equal=True,\n ignore_index=False,\n ):\n \"\"\"\n Drops rows in frame as per duplicate rows in `subset` columns from\n self.\n\n subset : list, optional\n List of columns to consider when dropping rows.\n keep : [\"first\", \"last\", False] first will keep first of duplicate,\n last will keep last of the duplicate and False drop all\n duplicate\n nulls_are_equal: null elements are considered equal to other null\n elements\n ignore_index: bool, default False\n If True, the resulting axis will be labeled 0, 1, …, n - 1.\n \"\"\"\n if subset is None:\n subset = self._column_names\n elif (\n not np.iterable(subset)\n or isinstance(subset, str)\n or isinstance(subset, tuple)\n and subset in self._data.names\n ):\n subset = (subset,)\n diff = set(subset) - set(self._data)\n if len(diff) != 0:\n raise KeyError(\"columns {!r} do not exist\".format(diff))\n subset_cols = [name for name in self._column_names if name in subset]\n if len(subset_cols) == 0:\n return self.copy(deep=True)\n\n result = self._from_table(\n libcudf.stream_compaction.drop_duplicates(\n self,\n keys=subset,\n keep=keep,\n nulls_are_equal=nulls_are_equal,\n ignore_index=ignore_index,\n )\n )\n\n result._copy_categories(self)\n return result\n\n def replace(self, to_replace, replacement):\n copy_data = self._data.copy()\n\n for name, col in copy_data.items():\n if not (to_replace is None and replacement is None):\n try:\n (\n col_all_nan,\n col_replacement,\n col_to_replace,\n ) = _get_replacement_values(\n to_replace=to_replace,\n replacement=replacement,\n col_name=name,\n column=col,\n )\n\n copy_data[name] = col.find_and_replace(\n col_to_replace, col_replacement, col_all_nan\n )\n except KeyError:\n # Do not change the copy_data[name]\n pass\n\n result = self._from_table(Frame(copy_data, self.index))\n return result\n\n def _copy_categories(self, other, include_index=True):\n \"\"\"\n Utility that copies category information from `other`\n to `self`.\n \"\"\"\n for name, col, other_col in zip(\n self._data.keys(), self._data.values(), other._data.values()\n ):\n if isinstance(\n other_col, cudf.core.column.CategoricalColumn\n ) and not isinstance(col, cudf.core.column.CategoricalColumn):\n self._data[name] = build_categorical_column(\n categories=other_col.categories,\n codes=as_column(col.base_data, dtype=col.dtype),\n mask=col.base_mask,\n ordered=other_col.ordered,\n size=col.size,\n offset=col.offset,\n )\n if include_index:\n # include_index will still behave as False\n # incase of self._index being a RangeIndex\n if (\n self._index is not None\n and not isinstance(self._index, cudf.core.index.RangeIndex)\n and isinstance(\n other._index,\n (cudf.core.index.CategoricalIndex, cudf.MultiIndex),\n )\n ):\n self._index._copy_categories(other._index, include_index=False)\n # When other._index is a CategoricalIndex, there is\n # possibility that corresposing self._index be GenericIndex\n # with codes. So to update even the class signature, we\n # have to call as_index.\n if isinstance(\n other._index, cudf.core.index.CategoricalIndex\n ) and not isinstance(\n self._index, cudf.core.index.CategoricalIndex\n ):\n self._index = cudf.core.index.as_index(self._index)\n return self\n\n def _unaryop(self, op):\n data_columns = (col.unary_operator(op) for col in self._columns)\n data = zip(self._column_names, data_columns)\n return self.__class__._from_table(Frame(data, self._index))\n\n def isnull(self):\n \"\"\"Identify missing values.\n \"\"\"\n data_columns = (col.isnull() for col in self._columns)\n data = zip(self._column_names, data_columns)\n return self.__class__._from_table(Frame(data, self._index))\n\n def isna(self):\n \"\"\"Identify missing values. Alias for `isnull`\n \"\"\"\n return self.isnull()\n\n def notnull(self):\n \"\"\"Identify non-missing values.\n \"\"\"\n data_columns = (col.notnull() for col in self._columns)\n data = zip(self._column_names, data_columns)\n return self.__class__._from_table(Frame(data, self._index))\n\n def notna(self):\n \"\"\"Identify non-missing values. Alias for `notnull`.\n \"\"\"\n return self.notnull()\n\n def interleave_columns(self):\n \"\"\"\n Interleave Series columns of a table into a single column.\n\n Converts the column major table `cols` into a row major column.\n Parameters\n ----------\n cols : input Table containing columns to interleave.\n\n Example\n -------\n >>> df = DataFrame([['A1', 'A2', 'A3'], ['B1', 'B2', 'B3']])\n >>> df\n 0 [A1, A2, A3]\n 1 [B1, B2, B3]\n >>> df.interleave_columns()\n 0 A1\n 1 B1\n 2 A2\n 3 B2\n 4 A3\n 5 B3\n\n Returns\n -------\n The interleaved columns as a single column\n \"\"\"\n if (\"category\" == self.dtypes).any():\n raise ValueError(\n \"interleave_columns does not support 'category' dtype.\"\n )\n\n result = self._constructor_sliced(\n libcudf.reshape.interleave_columns(self)\n )\n\n return result\n\n def tile(self, count):\n \"\"\"\n Repeats the rows from `self` DataFrame `count` times to form a\n new DataFrame.\n\n Parameters\n ----------\n self : input Table containing columns to interleave.\n count : Number of times to tile \"rows\". Must be non-negative.\n\n Example\n -------\n >>> df = Dataframe([[8, 4, 7], [5, 2, 3]])\n >>> count = 2\n >>> df.tile(df, count)\n 0 1 2\n 0 8 4 7\n 1 5 2 3\n 0 8 4 7\n 1 5 2 3\n\n Returns\n -------\n The table containing the tiled \"rows\".\n \"\"\"\n result = self.__class__._from_table(libcudf.reshape.tile(self, count))\n result._copy_categories(self)\n return result\n\n def searchsorted(\n self, values, side=\"left\", ascending=True, na_position=\"last\"\n ):\n \"\"\"Find indices where elements should be inserted to maintain order\n\n Parameters\n ----------\n value : Frame (Shape must be consistent with self)\n Values to be hypothetically inserted into Self\n side : str {‘left’, ‘right’} optional, default ‘left‘\n If ‘left’, the index of the first suitable location found is given\n If ‘right’, return the last such index\n ascending : bool optional, default True\n Sorted Frame is in ascending order (otherwise descending)\n na_position : str {‘last’, ‘first’} optional, default ‘last‘\n Position of null values in sorted order\n\n Returns\n -------\n 1-D cupy array of insertion points\n \"\"\"\n # Call libcudf++ search_sorted primitive\n from cudf.utils.dtypes import is_scalar\n\n scalar_flag = None\n if is_scalar(values):\n scalar_flag = True\n\n if not isinstance(values, Frame):\n values = as_column(values)\n if values.dtype != self.dtype:\n self = self.astype(values.dtype)\n values = values.as_frame()\n outcol = libcudf.search.search_sorted(\n self, values, side, ascending=ascending, na_position=na_position\n )\n\n # Retrun result as cupy array if the values is non-scalar\n # If values is scalar, result is expected to be scalar.\n result = cupy.asarray(outcol.data_array_view)\n if scalar_flag:\n return result[0].item()\n else:\n return result\n\n def _get_sorted_inds(self, ascending=True, na_position=\"last\"):\n \"\"\"\n Sort by the values.\n\n Parameters\n ----------\n ascending : bool or list of bool, default True\n If True, sort values in ascending order, otherwise descending.\n na_position : {‘first’ or ‘last’}, default ‘last’\n Argument ‘first’ puts NaNs at the beginning, ‘last’ puts NaNs\n at the end.\n Returns\n -------\n out_column_inds : cuDF Column of indices sorted based on input\n\n Difference from pandas:\n * Support axis='index' only.\n * Not supporting: inplace, kind\n * Ascending can be a list of bools to control per column\n \"\"\"\n\n # This needs to be updated to handle list of bools for ascending\n if ascending is True:\n if na_position == \"last\":\n na_position = 0\n elif na_position == \"first\":\n na_position = 1\n elif ascending is False:\n if na_position == \"last\":\n na_position = 1\n elif na_position == \"first\":\n na_position = 0\n else:\n warnings.warn(\n \"When using a sequence of booleans for `ascending`, \"\n \"`na_position` flag is not yet supported and defaults to \"\n \"treating nulls as greater than all numbers\"\n )\n na_position = 0\n\n # If given a scalar need to construct a sequence of length # of columns\n if np.isscalar(ascending):\n ascending = [ascending] * self._num_columns\n\n return libcudf.sort.order_by(self, ascending, na_position)\n\n def sin(self):\n return self._unaryop(\"sin\")\n\n def cos(self):\n return self._unaryop(\"cos\")\n\n def tan(self):\n return self._unaryop(\"tan\")\n\n def asin(self):\n return self._unaryop(\"asin\")\n\n def acos(self):\n return self._unaryop(\"acos\")\n\n def atan(self):\n return self._unaryop(\"atan\")\n\n def exp(self):\n return self._unaryop(\"exp\")\n\n def log(self):\n return self._unaryop(\"log\")\n\n def sqrt(self):\n return self._unaryop(\"sqrt\")\n\n @staticmethod\n def _validate_merge_cfg(\n lhs,\n rhs,\n left_on,\n right_on,\n on,\n how,\n left_index=False,\n right_index=False,\n lsuffix=None,\n rsuffix=None,\n ):\n \"\"\"\n Error for various combinations of merge input parameters\n \"\"\"\n len_left_on = len(left_on) if left_on is not None else 0\n len_right_on = len(right_on) if right_on is not None else 0\n\n # must actually support the requested merge type\n if how not in [\"left\", \"inner\", \"outer\", \"leftanti\", \"leftsemi\"]:\n raise NotImplementedError(\n \"{!r} merge not supported yet\".format(how)\n )\n\n # Passing 'on' with 'left_on' or 'right_on' is potentially ambiguous\n if on:\n if left_on or right_on:\n raise ValueError(\n 'Can only pass argument \"on\" OR \"left_on\" '\n 'and \"right_on\", not a combination of both.'\n )\n\n # Require same total number of columns to join on in both operands\n if not (len_left_on + left_index * len(lhs.index.names)) == (\n len_right_on + right_index * len(rhs.index.names)\n ):\n raise ValueError(\n \"Merge operands must have same number of join key columns\"\n )\n\n # If nothing specified, must have common cols to use implicitly\n same_named_columns = set(lhs._data.keys()) & set(rhs._data.keys())\n if not (left_index or right_index):\n if not (left_on or right_on):\n if len(same_named_columns) == 0:\n raise ValueError(\"No common columns to perform merge on\")\n\n for name in same_named_columns:\n if not (\n name in left_on\n and name in right_on\n and (left_on.index(name) == right_on.index(name))\n ):\n if not (lsuffix or rsuffix):\n raise ValueError(\n \"there are overlapping columns but \"\n \"lsuffix and rsuffix are not defined\"\n )\n\n if on:\n on_keys = [on] if not isinstance(on, list) else on\n for key in on_keys:\n if not (key in lhs._data.keys() and key in rhs._data.keys()):\n raise KeyError(\"Key {} not in both operands\".format(on))\n else:\n for key in left_on:\n if key not in lhs._data.keys():\n raise KeyError('Key \"{}\" not in left operand'.format(key))\n for key in right_on:\n if key not in rhs._data.keys():\n raise KeyError('Key \"{}\" not in right operand'.format(key))\n\n def _merge(\n self,\n right,\n on,\n left_on,\n right_on,\n left_index,\n right_index,\n how,\n sort,\n lsuffix,\n rsuffix,\n method,\n indicator,\n suffixes,\n ):\n\n lhs = self\n rhs = right\n\n from cudf.core.join import Merge\n\n mergeop = Merge(\n lhs,\n rhs,\n on,\n left_on,\n right_on,\n left_index,\n right_index,\n how,\n sort,\n lsuffix,\n rsuffix,\n method,\n indicator,\n suffixes,\n )\n to_return = mergeop.perform_merge()\n\n # If sort=True, Pandas would sort on the key columns in the\n # same order as given in 'on'. If the indices are used as\n # keys, the index will be sorted. If one index is specified,\n # the key column on the other side will be used to sort.\n # If no index is specified, return a new RangeIndex\n if sort:\n to_sort = self.__class__()\n if left_index and right_index:\n by = list(to_return._index._data.columns)\n if left_on and right_on:\n by.extend(to_return[mergeop.left_on]._data.columns)\n elif left_index:\n by = list(to_return[mergeop.right_on]._data.columns)\n elif right_index:\n by = list(to_return[mergeop.left_on]._data.columns)\n else:\n # left_on == right_on, or different names but same columns\n # in both cases we can sort by either\n by = list(to_return[mergeop.left_on]._data.columns)\n for i, col in enumerate(by):\n to_sort[i] = col\n inds = to_sort.argsort()\n to_return = to_return.take(\n inds, keep_index=(left_index or right_index)\n )\n return to_return\n else:\n return to_return\n\n def _is_sorted(self, ascending=None, null_position=None):\n \"\"\"\n Returns a boolean indicating whether the data of the Frame are sorted\n based on the parameters given. Does not account for the index.\n\n Parameters\n ----------\n self : Frame\n Frame whose columns are to be checked for sort order\n ascending : None or list-like of booleans\n None or list-like of boolean values indicating expected sort order\n of each column. If list-like, size of list-like must be\n len(columns). If None, all columns expected sort order is set to\n ascending. False (0) - ascending, True (1) - descending.\n null_position : None or list-like of booleans\n None or list-like of boolean values indicating desired order of\n nulls compared to other elements. If list-like, size of list-like\n must be len(columns). If None, null order is set to before. False\n (0) - before, True (1) - after.\n\n Returns\n -------\n returns : boolean\n Returns True, if sorted as expected by ``ascending`` and\n ``null_position``, False otherwise.\n \"\"\"\n return libcudf.sort.is_sorted(\n self, ascending=ascending, null_position=null_position\n )\n\n\ndef _get_replacement_values(to_replace, replacement, col_name, column):\n from cudf.utils import utils\n from pandas.api.types import is_dict_like\n\n all_nan = False\n\n if is_dict_like(to_replace) and replacement is None:\n replacement = list(to_replace.values())\n to_replace = list(to_replace.keys())\n elif not is_scalar(to_replace):\n if is_scalar(replacement):\n all_nan = replacement is None\n if all_nan:\n replacement = [replacement] * len(to_replace)\n # Do not broadcast numeric dtypes\n elif pd.api.types.is_numeric_dtype(column.dtype):\n if len(to_replace) > 0:\n replacement = [replacement]\n else:\n # If to_replace is empty, replacement has to be empty.\n replacement = []\n else:\n replacement = utils.scalar_broadcast_to(\n replacement,\n (len(to_replace),),\n np.dtype(type(replacement)),\n )\n else:\n # If both are non-scalar\n if len(to_replace) != len(replacement):\n raise ValueError(\n \"Replacement lists must be \"\n \"of same length.\"\n \"Expected {}, got {}.\".format(\n len(to_replace), len(replacement)\n )\n )\n else:\n if not is_scalar(replacement):\n raise TypeError(\n \"Incompatible types '{}' and '{}' \"\n \"for *to_replace* and *replacement*.\".format(\n type(to_replace).__name__, type(replacement).__name__\n )\n )\n to_replace = [to_replace]\n replacement = [replacement]\n\n if is_dict_like(to_replace) and is_dict_like(replacement):\n replacement = replacement[col_name]\n to_replace = to_replace[col_name]\n\n if is_scalar(replacement):\n replacement = [replacement]\n if is_scalar(to_replace):\n to_replace = [to_replace]\n\n if isinstance(replacement, list):\n all_nan = replacement.count(None) == len(replacement)\n return all_nan, replacement, to_replace\n" ]
[ [ "numpy.full", "numpy.isnan", "numpy.find_common_type", "numpy.iterable", "pandas.DataFrame", "pandas.api.types.is_numeric_dtype", "numpy.isscalar", "pandas.api.types.is_dtype_equal", "pandas.api.types.is_dict_like", "pandas.api.types.is_integer_dtype" ] ]
PaccMann/tape
[ "772a461732fc4044a1dee84d2688bf16960e272c" ]
[ "tape/utils/distributed_utils.py" ]
[ "import typing\nimport argparse\nimport os\nimport multiprocessing as mp\nimport sys\nimport signal\n\nimport torch\nimport torch.distributed as dist\nfrom torch.multiprocessing import _prctl_pr_set_pdeathsig # type: ignore\n\nfrom ..errors import EarlyStopping\n\n\ndef reduce_scalar(scalar: float) -> float:\n if dist.is_available() and dist.is_initialized():\n float_tensor = torch.cuda.FloatTensor([scalar]) # type: ignore\n dist.all_reduce(float_tensor)\n float_tensor /= dist.get_world_size()\n scalar = float_tensor.item()\n return scalar\n\n\ndef barrier_if_distributed() -> None:\n \"\"\"Raises a barrier if in a distributed context, otherwise does nothing.\"\"\"\n if dist.is_available() and dist.is_initialized():\n dist.barrier()\n\n\ndef _wrap(fn, kwargs, error_queue):\n # prctl(2) is a Linux specific system call.\n # On other systems the following function call has no effect.\n # This is set to ensure that non-daemonic child processes can\n # terminate if their parent terminates before they do.\n _prctl_pr_set_pdeathsig(signal.SIGINT)\n\n try:\n fn(**kwargs)\n except KeyboardInterrupt:\n pass # SIGINT; Killed by parent, do nothing\n except EarlyStopping:\n sys.exit(signal.SIGUSR1) # tape early stop exception\n except Exception:\n # Propagate exception to parent process, keeping original traceback\n import traceback\n error_queue.put(traceback.format_exc())\n sys.exit(1)\n\n\nclass ProcessContext:\n def __init__(self, processes, error_queues):\n self.error_queues = error_queues\n self.processes = processes\n self.sentinels = {\n process.sentinel: index\n for index, process in enumerate(processes)\n }\n\n def pids(self):\n return [int(process.pid) for process in self.processes]\n\n def join(self, timeout=None):\n r\"\"\"\n Tries to join one or more processes in this process context.\n If one of them exited with a non-zero exit status, this function\n kills the remaining processes and raises an exception with the cause\n of the first process exiting.\n\n Returns ``True`` if all processes have been joined successfully,\n ``False`` if there are more processes that need to be joined.\n\n Arguments:\n timeout (float): Wait this long before giving up on waiting.\n \"\"\"\n # Ensure this function can be called even when we're done.\n if len(self.sentinels) == 0:\n return True\n\n # Wait for any process to fail or all of them to succeed.\n ready = mp.connection.wait(\n self.sentinels.keys(),\n timeout=timeout,\n )\n error_index = None\n for sentinel in ready:\n index = self.sentinels.pop(sentinel)\n process = self.processes[index]\n process.join()\n if process.exitcode != 0:\n error_index = index\n break\n # Return if there was no error.\n if error_index is None:\n # Return whether or not all processes have been joined.\n return len(self.sentinels) == 0\n # Assume failure. Terminate processes that are still alive.\n for process in self.processes:\n if process.is_alive():\n process.terminate()\n process.join()\n\n # There won't be an error on the queue if the process crashed.\n if self.error_queues[error_index].empty():\n exitcode = self.processes[error_index].exitcode\n if exitcode == signal.SIGUSR1:\n return True\n elif exitcode < 0:\n name = signal.Signals(-exitcode).name\n raise Exception(\n \"process %d terminated with signal %s\" %\n (error_index, name)\n )\n else:\n raise Exception(\n \"process %d terminated with exit code %d\" %\n (error_index, exitcode)\n )\n\n original_trace = self.error_queues[error_index].get()\n msg = \"\\n\\n-- Process %d terminated with the following error:\\n\" % error_index\n msg += original_trace\n raise Exception(msg)\n\n\ndef launch_process_group(func: typing.Callable,\n args: argparse.Namespace,\n num_processes: int,\n num_nodes: int = 1,\n node_rank: int = 0,\n master_addr: str = \"127.0.0.1\",\n master_port: int = 29500,\n join: bool = True,\n daemon: bool = False):\n # world size in terms of number of processes\n dist_world_size = num_processes * num_nodes\n\n # set PyTorch distributed related environmental variables\n current_env = os.environ.copy()\n current_env[\"MASTER_ADDR\"] = master_addr\n current_env[\"MASTER_PORT\"] = str(master_port)\n current_env[\"WORLD_SIZE\"] = str(dist_world_size)\n if 'OMP_NUM_THREADS' not in os.environ and num_processes > 1:\n current_env[\"OMP_NUM_THREADS\"] = str(4)\n\n error_queues = []\n processes = []\n\n for local_rank in range(num_processes):\n # each process's rank\n dist_rank = num_processes * node_rank + local_rank\n current_env[\"RANK\"] = str(dist_rank)\n current_env[\"LOCAL_RANK\"] = str(local_rank)\n args.local_rank = local_rank\n\n error_queue: mp.SimpleQueue[Exception] = mp.SimpleQueue()\n kwargs = {'args': args, 'env': current_env}\n process = mp.Process(\n target=_wrap,\n args=(func, kwargs, error_queue),\n daemon=daemon)\n process.start()\n error_queues.append(error_queue)\n processes.append(process)\n\n process_context = ProcessContext(processes, error_queues)\n if not join:\n return process_context\n\n while not process_context.join():\n pass\n" ]
[ [ "torch.distributed.is_available", "torch.distributed.get_world_size", "torch.multiprocessing._prctl_pr_set_pdeathsig", "torch.distributed.is_initialized", "torch.distributed.all_reduce", "torch.cuda.FloatTensor", "torch.distributed.barrier" ] ]
daivuong7696/open_nsfw
[ "41c0acb286c9d858adf5cd8ddb6c2eaa4ad18944" ]
[ "classify_nsfw.py" ]
[ "#!/usr/bin/env python\n\"\"\"\nCopyright 2016 Yahoo Inc.\nLicensed under the terms of the 2 clause BSD license. \nPlease see LICENSE file in the project root for terms.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport os\nfrom tqdm import tqdm\nimport sys\nimport argparse\nimport glob\nimport time\nfrom PIL import Image\n\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\n\ntry:\n from StringIO import StringIO\nexcept ImportError:\n from io import BytesIO as StringIO\n \nimport caffe\nimport cv2\nfrom class_activation_map import save_CAM_caffe\n\ntry:\n caffe_root = os.environ['CAFFE_ROOT'] + '/'\nexcept KeyError:\n raise KeyError(\"Define CAFFE_ROOT in ~/.bashrc\")\n\nimport visualize_result\nfrom sklearn.metrics import classification_report, confusion_matrix, accuracy_score, roc_curve, precision_recall_curve\n\n\nclass_dict = {\n 'notsexy': 0,\n 'sexy': 1\n}\n\n\ndef resize_image(data, sz=(256, 256)):\n \"\"\"\n Resize image. Please use this resize logic for best results instead of the \n caffe, since it was used to generate training dataset \n :param str data:\n The image data\n :param sz tuple:\n The resized image dimensions\n :returns bytearray:\n A byte array with the resized image\n \"\"\"\n img_data = data\n im = Image.open(StringIO(img_data))\n\n if im.mode != \"RGB\":\n im = im.convert('RGB')\n imr = im.resize(sz, resample=Image.BILINEAR)\n fh_im = StringIO()\n imr.save(fh_im, format='JPEG')\n fh_im.seek(0)\n return bytearray(fh_im.read())\n\ndef caffe_preprocess(caffe_net, image_data,\n caffe_transformer=None):\n\n img_data_rs = resize_image(image_data, sz=(256, 256))\n image = caffe.io.load_image(StringIO(img_data_rs))\n\n H, W, _ = image.shape\n _, _, h, w = caffe_net.blobs['data'].data.shape\n h_off = int(max((H - h) / 2, 0))\n w_off = int(max((W - w) / 2, 0))\n crop = image[h_off:h_off + h, w_off:w_off + w, :]\n transformed_image = caffe_transformer.preprocess('data', crop)\n transformed_image.shape = (1,) + transformed_image.shape\n\n return image, transformed_image\n\n\ndef caffe_compute(transformed_image,\n caffe_net=None, output_layers=None):\n \"\"\"\n Run a Caffe network on an input image after preprocessing it to prepare\n it for Caffe.\n\n :param PIL.Image pimg:\n PIL image to be input into Caffe.\n :param caffe.Net caffe_net:\n A Caffe network with which to process pimg afrer preprocessing.\n :param list output_layers:\n A list of the names of the layers from caffe_net whose outputs are to\n to be returned. If this is None, the default outputs for the network\n are returned.\n :return:\n Returns the requested outputs from the Caffe net.\n \"\"\"\n\n if caffe_net is not None:\n\n # Grab the default output names if none were requested specifically.\n if output_layers is None:\n output_layers = caffe_net.outputs\n\n input_name = caffe_net.inputs[0]\n all_outputs = caffe_net.forward_all(blobs=output_layers,\n **{input_name: transformed_image})\n\n\n outputs = all_outputs[output_layers[0]][0].astype(float)\n\n return outputs\n else:\n return []\n\n\ndef main(argv):\n pycaffe_dir = os.path.dirname(__file__)\n\n parser = argparse.ArgumentParser()\n # Required arguments: input file.\n parser.add_argument(\n \"--input_file\",\n help=\"Path to the input image file\"\n )\n parser.add_argument(\n \"--input_label_file\",\n help=\"Path to the input label file\"\n )\n # Optional arguments.\n parser.add_argument(\n \"--model_def\",\n help=\"Model definition file.\"\n )\n parser.add_argument(\n \"--pretrained_model\",\n help=\"Trained model weights file.\"\n )\n parser.add_argument(\n \"--threshold\",\n default=0.5,\n type=float,\n help=\"Path to the input image file\"\n )\n parser.add_argument(\n \"--save_cam_path\",\n help=\"Save class activation map flag\"\n )\n parser.add_argument(\n \"--save_to_folder_path\",\n help=\"Classify images and store them to scores folder\"\n )\n parser.add_argument(\n \"--save_result_path\",\n default='result',\n help=\"Directory where to save ROC curve, confusion matrix\"\n )\n\n args = parser.parse_args()\n\n # Pre-load caffe model.\n nsfw_net = caffe.Net(args.model_def, # pylint: disable=invalid-name\n args.pretrained_model, caffe.TEST)\n\n # Load transformer\n # Note that the parameters are hard-coded for best results\n caffe_transformer = caffe.io.Transformer({'data': nsfw_net.blobs['data'].data.shape})\n # move image channels to outermost\n caffe_transformer.set_transpose('data', (2, 0, 1))\n # subtract the dataset-mean value in each channel\n caffe_transformer.set_mean('data', np.array([104, 117, 123]))\n # rescale from [0, 1] to [0, 255]\n caffe_transformer.set_raw_scale('data', 255)\n # swap channels from RGB to BGR\n caffe_transformer.set_channel_swap('data', (2, 1, 0))\n\n # Preprocess and compute image\n # One image only\n if args.input_file is not None:\n with open(args.input_file, 'rb') as f:\n image_data = f.read()\n # Preprocessing\n original_image, transformed_image = caffe_preprocess(\n caffe_net=nsfw_net, image_data=image_data,\n caffe_transformer=caffe_transformer\n )\n # Calculating scores\n scores = caffe_compute(\n transformed_image=transformed_image, caffe_net=nsfw_net,\n output_layers=['prob']\n )\n # Calculating class activation map\n if args.save_cam_path is not None:\n if not os.path.isdir(args.save_cam_path):\n os.mkdir(args.save_cam_path)\n out_layer = 'fc_nsfw'\n last_conv = 'conv_stage3_block2_branch2c'\n weights_LR = nsfw_net.params[out_layer][0].data\n activation_lastconv = nsfw_net.blobs[last_conv].data\n save_CAM_caffe(image_name=args.input_file,\n image=original_image, fc_weights=weights_LR,\n activation_lastconv=activation_lastconv,\n class_dict=class_dict, class_name='sexy',\n dest_folder='/home/daivuong/Desktop',\n image_size=224\n )\n print(\"NSFW score: {}\".format(scores[1]))\n # Input is a file of many images\n elif args.input_label_file is not None:\n scores = []\n df = pd.read_csv(\n args.input_label_file,\n header=None, delimiter=' ',\n names=['file_name', 'label']\n )\n for i in tqdm(range(len(df))):\n with open(df.iloc[i, 0], 'rb') as f:\n image_data = f.read()\n # Preprocessing\n try:\n original_image, transformed_image = caffe_preprocess(\n caffe_net=nsfw_net, image_data=image_data,\n caffe_transformer=caffe_transformer\n )\n except:\n print(\"Cannot load images\")\n continue\n # Calculating scores\n sexy_score = caffe_compute(\n transformed_image=transformed_image, caffe_net=nsfw_net,\n output_layers=['prob']\n )[1]\n scores.append(sexy_score)\n\n\n # Caclulating class activation map\n # It will store predicted images into seperated\n # folders based on rounded scores (from 0.0 to 1.0)\n # and these two folders will be stored into ground\n # truth folder\n if args.save_cam_path is not None:\n if not os.path.isdir(args.save_cam_path):\n os.mkdir(args.save_cam_path)\n\n # Ground truth folder\n label_path = os.path.join(\n args.save_cam_path,\n str(df.iloc[i, 1])\n )\n if not os.path.isdir(label_path):\n os.mkdir(label_path)\n\n # Rounded scores folders\n dest = os.path.join(\n label_path, str(round(sexy_score, 1))\n )\n if not os.path.isdir(dest):\n os.mkdir(dest)\n\n # Calculate CAM\n out_layer = 'fc_nsfw'\n last_conv = 'conv_stage3_block2_branch2c'\n weights_LR = nsfw_net.params[out_layer][0].data\n activation_lastconv = nsfw_net.blobs[last_conv].data\n\n\n save_CAM_caffe(image_name=df.iloc[i, 0],\n image=original_image, fc_weights=weights_LR,\n activation_lastconv=activation_lastconv,\n class_dict=class_dict, class_name='sexy',\n dest_folder=dest,\n image_size=256\n )\n if args.save_to_folder_path is not None:\n if not os.path.isdir(args.save_to_folder_path):\n os.mkdir(args.save_to_folder_path)\n\n # Ground truth folder\n label_path = os.path.join(\n args.save_to_folder_path,\n str(df.iloc[i, 1])\n )\n if not os.path.isdir(label_path):\n os.mkdir(label_path)\n\n # Rounded scores folders\n dest = os.path.join(\n label_path, str(round(sexy_score, 1))\n )\n if not os.path.isdir(dest):\n os.mkdir(dest)\n src = df.iloc[i, 0]\n dst = os.path.join(dest, src.split('/')[-1])\n os.rename(src, dst)\n \n\n\n df['scores'] = scores\n df['NSFW'] = (df['scores'] >= args.threshold)\n # From boolean to int\n df['NSFW'] = df['NSFW'] + 0\n y = df['label']\n y_pred = df['NSFW']\n\n # confusion matrix and classification report visualization\n target_names = ['nosexy', 'sexy']\n cnf_matrix = confusion_matrix(df['label'], df['NSFW'])\n report = classification_report(y, y_pred, target_names=target_names)\n file_name = args.pretrained_model.split('/')[-1].split('.')[0] + '_cnf_matrix.png'\n visualize_result.save_confusion_matrix_classification_report(cnf_matrix=cnf_matrix, \n classification_report=report,\n class_names=target_names,\n file_name=file_name)\n \n # Accuracy\n accuracy = accuracy_score(y, y_pred)\n print(\"Accuracy: {}\".format(accuracy))\n\n # Plot ROC curve\n file_name=args.pretrained_model.split('/')[-1].split('.')[0] + '_roc_curve.png'\n fpr, tpr, thresholds = roc_curve(y, df['scores'], pos_label=1)\n visualize_result.plot_roc_curve(fpr, tpr, file_name=file_name)\n \n # Precision/recall curve\n file_name = args.pretrained_model.split('/')[-1].split('.')[0] + '_precision_recall.png'\n precisions, recalls, thresholds = precision_recall_curve(y, df['scores'])\n visualize_result.plot_precision_recall_vs_threshold(precisions, recalls, thresholds, file_name=file_name)\n\n # Score result\n file_name = args.pretrained_model.split('/')[-1].split('.')[0] + '_result.txt'\n df[['file_name', 'label', 'scores', 'NSFW']].to_csv(\n file_name, sep=' ', header=None, index=None)\n\nif __name__ == '__main__':\n main(sys.argv)\n" ]
[ [ "matplotlib.use", "numpy.array", "sklearn.metrics.confusion_matrix", "sklearn.metrics.precision_recall_curve", "sklearn.metrics.accuracy_score", "sklearn.metrics.classification_report", "pandas.read_csv", "sklearn.metrics.roc_curve" ] ]
PyXRD/pyxrd
[ "26bacdf64f3153fa74b8caa62e219b76d91a55c1" ]
[ "pyxrd/mixture/views/edit_mixture_view.py" ]
[ "# coding=UTF-8\n# ex:ts=4:sw=4:et=on\n\n# Copyright (c) 2013, Mathijs Dumon\n# All rights reserved.\n# Complete license can be found in the LICENSE file.\n\nfrom pkg_resources import resource_filename # @UnresolvedImport\n\nimport gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk\n\nimport numpy as np\n\nfrom pyxrd.generic.views import BaseView\nfrom pyxrd.generic.views.validators import FloatEntryValidator\n\nclass EditMixtureView(BaseView):\n builder = resource_filename(__name__, \"glade/edit_mixture.glade\")\n top = \"edit_mixture\"\n\n base_width = 4\n base_height = 5\n\n matrix_widget = \"tbl_matrix\"\n wrapper_widget = \"tbl_wrapper\"\n widget_format = \"mixture_%s\"\n\n def __init__(self, *args, **kwargs):\n BaseView.__init__(self, *args, **kwargs)\n\n self.parent.set_title(\"Edit Mixtures\")\n self.matrix = self[self.matrix_widget]\n self.wrapper = self[self.wrapper_widget]\n\n self.labels = [\n self[\"lbl_scales\"], self[\"lbl_fractions\"], self[\"lbl_phases\"], self[\"lbl_bgshifts\"], self[\"lbl_specimens\"], \n self[\"mixture_auto_scales\"], self[\"mixture_auto_bg\"] \n ]\n\n self[\"scolled_window\"].set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)\n # TODO FIXME self.matrix.connect(\"size-request\", self.on_size_requested)\n\n self.reset_view()\n\n def reset_view(self):\n def remove(item):\n if not item in self.labels: self.matrix.remove(item)\n self.matrix.foreach(remove)\n self.matrix.resize(self.base_height, self.base_width)\n\n self.phase_inputs = []\n self.fraction_inputs = []\n self.fraction_checks = []\n self.specimen_combos = []\n self.scale_inputs = []\n self.bgs_inputs = []\n self.phase_combos = np.empty(shape=(0, 0), dtype=np.object_) # 2D list\n\n self.on_size_requested()\n\n def on_size_requested(self, *args):\n sr = self.matrix.size_request()\n self[self.top].set_size_request(sr.width + 100, -1)\n\n def set_edit_view(self, view):\n if self._on_sr_id is not None and self.child_view is not None:\n self.child_view.disconnect(self._on_sr_id)\n self.edit_view = view\n self.child_view = view.get_top_widget()\n self._add_child_view(self.child_view, self[self.edit_view_container])\n if isinstance(self[self.edit_view_container], Gtk.ScrolledWindow):\n sr = self.child_view.get_size_request()\n self[self.edit_view_container].set_size_request(sr[0], -1)\n\n\n\n def update_all(self, fractions, scales, bgs):\n for i, fraction in enumerate(fractions):\n if not i >= len(self.fraction_inputs):\n self.fraction_inputs[i].set_text(str(fraction))\n for i, scale in enumerate(scales):\n if not i >= len(self.scale_inputs):\n self.scale_inputs[i].set_text(str(scale))\n for i, bgs in enumerate(bgs):\n if not i >= len(self.bgs_inputs):\n self.bgs_inputs[i].set_text(str(bgs))\n\n def add_phase_slot(self, phase_store, del_phase_callback, label_callback, check_callback, fraction_callback, combo_callback, label, fraction, phases):\n r, c = self.matrix.get_property('n_rows'), self.matrix.get_property('n_columns')\n self.matrix.resize(r + 1, c)\n\n del_icon = Gtk.Image.new()\n del_icon.set_from_stock (\"192-circle-remove\", Gtk.IconSize.SMALL_TOOLBAR)\n new_phase_del_btn = Gtk.Button.new()\n new_phase_del_btn.set_image(del_icon)\n rid = new_phase_del_btn.connect(\"clicked\", del_phase_callback)\n setattr(new_phase_del_btn, \"deleventid\", rid)\n self.matrix.attach(new_phase_del_btn, 0, 1, r, r + 1, Gtk.AttachOptions.FILL, 0)\n\n new_phase_input = self._get_new_input(label, callback=label_callback)\n self.phase_inputs.append(new_phase_input)\n self.matrix.attach(new_phase_input, 1, 2, r, r + 1, Gtk.AttachOptions.EXPAND | Gtk.AttachOptions.FILL, 0)\n\n new_fraction_check = self._get_new_check(callback=check_callback)\n self.fraction_checks.append(new_fraction_check)\n self.matrix.attach(new_fraction_check, 2, 3, r, r + 1, Gtk.AttachOptions.FILL, 0)\n\n new_fraction_input = self._get_new_input(str(fraction), callback=fraction_callback)\n FloatEntryValidator(new_fraction_input)\n self.fraction_inputs.append(new_fraction_input)\n self.matrix.attach(new_fraction_input, 3, 4, r, r + 1, Gtk.AttachOptions.FILL, 0)\n\n self.phase_combos.resize((c - self.base_width, r + 1 - self.base_height))\n for col in range(c - self.base_width):\n mcol, mrow = r - self.base_height, col\n self._add_new_phase_combo(phase_store, phase_store.c_name, phases[mrow, mcol], mrow, mcol, combo_callback)\n\n self.wrapper.show_all()\n\n def add_specimen_slot(self, phase_store, specimen_store, del_specimen_callback, scale_callback, bgs_callback, specimen_callback, combo_callback, scale, bgs, specimen, phases):\n r, c = self.matrix.get_property('n_rows'), self.matrix.get_property('n_columns')\n self.matrix.resize(r, c + 1)\n\n del_icon = Gtk.Image.new()\n del_icon.set_from_stock(\"192-circle-remove\", Gtk.IconSize.SMALL_TOOLBAR)\n new_specimen_del_btn = Gtk.Button.new()\n new_specimen_del_btn.set_image(del_icon)\n rid = new_specimen_del_btn.connect(\"clicked\", del_specimen_callback)\n setattr(new_specimen_del_btn, \"deleventid\", rid)\n self.matrix.attach(new_specimen_del_btn, c, c + 1, 0, 1, Gtk.AttachOptions.EXPAND | Gtk.AttachOptions.FILL, 0)\n\n new_specimen_combo = self._get_new_combo(specimen_store, specimen_store.c_name, default=specimen, callback=specimen_callback)\n self.specimen_combos.append(new_specimen_combo)\n self.matrix.attach(new_specimen_combo, c, c + 1, 1, 2, Gtk.AttachOptions.EXPAND | Gtk.AttachOptions.FILL, 0)\n\n new_bgs_input = self._get_new_input(str(bgs), callback=bgs_callback)\n FloatEntryValidator(new_bgs_input)\n self.bgs_inputs.append(new_bgs_input)\n self.matrix.attach(new_bgs_input, c, c + 1, 2, 3, Gtk.AttachOptions.EXPAND | Gtk.AttachOptions.FILL, 0)\n\n new_scale_input = self._get_new_input(str(scale), callback=scale_callback)\n FloatEntryValidator(new_scale_input)\n self.scale_inputs.append(new_scale_input)\n self.matrix.attach(new_scale_input, c, c + 1, 3, 4, Gtk.AttachOptions.EXPAND | Gtk.AttachOptions.FILL, 0)\n\n self.phase_combos.resize((c + 1 - self.base_width, r - self.base_height))\n for row in range(r - self.base_height):\n mcol, mrow = row, c - self.base_width\n self._add_new_phase_combo(phase_store, phase_store.c_name, phases[mrow, mcol], mrow, mcol, combo_callback)\n self.wrapper.show_all()\n\n def _get_new_check(self, callback=None):\n \"\"\"\n Creates a new toggle button.\n \"\"\"\n new_check = Gtk.CheckButton.new()\n new_check.set_active(True)\n new_check.set_tooltip_text(\"Tick this box if you want to include this fraction in optimizations and refinements\")\n if callback is not None: new_check.connect(\"toggled\", callback)\n return new_check\n\n def _get_new_input(self, text=\"\", width=7, callback=None):\n \"\"\"\n Creates a new text input box.\n \"\"\"\n new_input = Gtk.Entry.new()\n new_input.set_text(text)\n new_input.set_alignment(0.0)\n new_input.set_width_chars(width)\n if callback is not None: new_input.connect(\"changed\", callback)\n return new_input\n\n def _add_new_phase_combo(self, model, text_column, default, r, c, callback):\n \"\"\"\n Creates a new 'phase slot' combo box, and adds it to the table at\n the given row and column indices.\n \"\"\"\n new_phase_combo = self._get_new_combo(model, text_column, default, callback, r, c)\n self.phase_combos[r, c] = new_phase_combo\n self.matrix.attach(new_phase_combo, self.base_width + r, self.base_width + r + 1, self.base_height + c, self.base_height + c + 1, Gtk.AttachOptions.EXPAND | Gtk.AttachOptions.FILL, 0)\n\n def _get_new_combo(self, model, text_column, default, callback, *args):\n \"\"\"\n Creates a new combo box with the given model as ListStore, setting\n the given column as text column, the given default value set as \n active row, and connecting the given callback with 'changed' signal.\n \"\"\"\n combobox = Gtk.ComboBox.new_with_model(model)\n combobox.set_size_request(75, -1)\n cell = Gtk.CellRendererText.new()\n combobox.pack_start(cell, True)\n combobox.add_attribute(cell, 'text', text_column)\n if default is not None:\n index = model.on_get_path(default)[0]\n combobox.set_active(index)\n combobox.connect(\"changed\", callback, *args)\n return combobox\n\n pass # end of class\n" ]
[ [ "numpy.empty" ] ]
JiahaoYao/torchdrug
[ "39ad8c729542c1c8aab490635106b4ee890558a6" ]
[ "torchdrug/data/dataloader.py" ]
[ "from collections import deque\n\nimport torch\nfrom torch._six import container_abcs, string_classes, int_classes\n\nfrom torchdrug import data\n\n\ndef graph_collate(batch):\n \"\"\"\n Convert any list of same nested container into a container of tensors.\n\n For instances of :class:`data.Graph <torchdrug.data.Graph>`, they are collated\n by :meth:`data.Graph.pack <torchdrug.data.Graph.pack>`.\n\n Parameters:\n batch (list): list of samples with the same nested container\n \"\"\"\n elem = batch[0]\n if isinstance(elem, torch.Tensor):\n out = None\n if torch.utils.data.get_worker_info() is not None:\n numel = sum([x.numel() for x in batch])\n storage = elem.storage()._new_shared(numel)\n out = elem.new(storage)\n return torch.stack(batch, 0, out=out)\n elif isinstance(elem, float):\n return torch.tensor(batch, dtype=torch.float)\n elif isinstance(elem, int_classes):\n return torch.tensor(batch)\n elif isinstance(elem, string_classes):\n return batch\n elif isinstance(elem, data.Graph):\n return elem.pack(batch)\n elif isinstance(elem, container_abcs.Mapping):\n return {key: graph_collate([d[key] for d in batch]) for key in elem}\n elif isinstance(elem, container_abcs.Sequence):\n it = iter(batch)\n elem_size = len(next(it))\n if not all(len(elem) == elem_size for elem in it):\n raise RuntimeError('Each element in list of batch should be of equal size')\n return [graph_collate(samples) for samples in zip(*batch)]\n\n raise TypeError(\"Can't collate data with type `%s`\" % type(elem))\n\n\nclass DataLoader(torch.utils.data.DataLoader):\n \"\"\"\n Extended data loader for batching graph structured data.\n\n See `torch.utils.data.DataLoader`_ for more details.\n\n .. _torch.utils.data.DataLoader:\n https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader\n\n Parameters:\n dataset (Dataset): dataset from which to load the data\n batch_size (int, optional): how many samples per batch to load\n shuffle (bool, optional): set to ``True`` to have the data reshuffled at every epoch\n sampler (Sampler, optional): sampler that draws single sample from the dataset\n batch_sampler (Sampler, optional): sampler that draws a mini-batch of data from the dataset\n num_workers (int, optional): how many subprocesses to use for data loading\n collate_fn (callable, optional): merge a list of samples into a mini-batch\n kwargs: keyword arguments for `torch.utils.data.DataLoader`_\n \"\"\"\n def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None, batch_sampler=None, num_workers=0,\n collate_fn=graph_collate, **kwargs):\n super(DataLoader, self).__init__(dataset, batch_size, shuffle, sampler, batch_sampler, num_workers, collate_fn,\n **kwargs)\n\n\nclass DataQueue(torch.utils.data.Dataset):\n\n def __init__(self):\n self.queue = deque()\n\n def append(self, item):\n self.queue.append(item)\n\n def pop(self):\n self.queue.popleft()\n\n def __getitem__(self, index):\n return self.queue[index]\n\n def __len__(self):\n return len(self.deque)\n\n\nclass ExperienceReplay(torch.utils.data.DataLoader):\n\n def __init__(self, cache_size, batch_size=1, shuffle=True, **kwargs):\n super(ExperienceReplay, self).__init__(DataQueue(), batch_size, shuffle, **kwargs)\n self.cache_size = cache_size\n\n def update(self, items):\n for item in items:\n self.dataset.append(item)\n while len(self.dataset) > self.cache_size:\n self.dataset.pop()\n\n @property\n def cold(self):\n return len(self.dataset) < self.cache_size" ]
[ [ "torch.stack", "torch.tensor", "torch.utils.data.get_worker_info" ] ]
BeckResearchLab/SBMLLint
[ "a5f2b1ad691c192e456e2c0b5d208d921a933a4f" ]
[ "SBMLLint/common/reaction.py" ]
[ "\"\"\"Chemical Reaction.\"\"\"\n\nfrom SBMLLint.common import constants as cn\nfrom SBMLLint.common.molecule import Molecule, MoleculeStoichiometry\n\nimport numpy as np\n\n\nREACTION_SEPARATOR = \"->\"\n\n\n################# FUNCTIONS ###################\ndef getMolecules(libsbml_reaction, func):\n \"\"\"\n Constructs molecules for the species returned by function.\n :param Function func: gets libsbml.SpeciesReference\n :return list-MoleculeStoichiometry:\n \"\"\"\n species = func(libsbml_reaction)\n molecule_stoichiometrys = []\n for spc in species:\n molecule = Molecule.getMolecule(spc.species)\n if molecule is None:\n molecule = Molecule(spc.species)\n molecule_stoichiometrys.append(MoleculeStoichiometry(\n molecule,\n spc.getStoichiometry())\n )\n return molecule_stoichiometrys\n\n\n################# Classes ###################\nclass Reaction(object):\n\n def __init__(self, libsbml_reaction):\n self.reactants = self.makeMoleculeStoichiometrys(\n libsbml_reaction.getReactant,\n libsbml_reaction.getNumReactants)\n self.products = self.makeMoleculeStoichiometrys(\n libsbml_reaction.getProduct,\n libsbml_reaction.getNumProducts)\n if libsbml_reaction.getKineticLaw() is not None:\n self.kinetics_law = libsbml_reaction.getKineticLaw().formula\n else:\n self.kinetics_law = None\n self.label = libsbml_reaction.getId()\n self.identifier = self.makeIdentifier(is_include_kinetics=True)\n self.category = self.getCategory()\n self.kinetics_terms = self.getKineticsTerms(libsbml_reaction)\n\n def makeMoleculeStoichiometrys(self, func_get_one, func_get_num):\n \"\"\"\n Creates a list of MoleculeStoichiometry\n :param Function func_get_one: get one element by index\n :param Function func_get_num: get number of elements\n :return list-MoleculeStoichiometry:\n \"\"\"\n result = []\n collection = [func_get_one(n) for n in range(func_get_num())]\n for s_r in collection:\n molecule = Molecule(s_r.species)\n stoich = s_r.getStoichiometry()\n result.append(MoleculeStoichiometry(molecule, stoich))\n return result\n\n def getId(self, is_include_kinetics=True, is_include_label=True):\n \"\"\"\n Constructs an ID that may be a substring\n of the the full reaction identifier.\n :param bool is_include_kinetics: Include the kinetics law\n :return str:\n \"\"\"\n result = self.identifier\n if not is_include_kinetics:\n pos = result.find(cn.KINETICS_SEPARATOR)\n if pos > 0:\n result = result[:pos]\n if not is_include_label:\n pos = result.find(cn.LABEL_SEPARATOR)\n if pos > 0:\n result = result[pos+2:] # Eliminate the separator and space\n return result\n\n def makeIdentifier(self, is_include_kinetics=True):\n \"\"\"\n Provides a string representation of the reaction\n :param bool is_include_kinetics: include the kinetics formula\n :return str:\n \"\"\"\n def makeStoichiometryString(molecule_stoichiometry):\n num = molecule_stoichiometry.stoichiometry\n if np.isclose(num, 1.0):\n return ''\n else:\n return \"%2.2f \" % num\n #\n def makeTermCollection(molecule_stoichiometries):\n \"\"\"\n Formats a set of terms with stoichiometries.\n :param list-MoleculeStoichiometry:\n :return str:\n \"\"\"\n term_collection = ''\n for m_s in molecule_stoichiometries:\n term = \"%s%s\" % (makeStoichiometryString(m_s), str(m_s.molecule))\n if len(term_collection) == 0:\n term_collection += term\n else:\n term_collection += \" + \" + term\n return term_collection\n #\n reactant_collection = makeTermCollection(self.reactants)\n product_collection = makeTermCollection(self.products)\n if is_include_kinetics:\n if self.kinetics_law is None:\n formula_str = ''\n else:\n formula_str = \"; \" + self.kinetics_law\n else:\n formula_str = ''\n reaction_str = \"%s: %s -> %s\" % (self.label,\n reactant_collection, product_collection)\n reaction_str = reaction_str + formula_str\n return reaction_str\n\n def __repr__(self):\n return self.identifier\n\n def getCategory(self):\n \"\"\"\n :return str: reaction category\n \"\"\"\n num_reactants = len([r.molecule for r in self.reactants \\\n if r.molecule.name!=cn.EMPTYSET])\n num_products = len([p.molecule for p in self.products \\\n if p.molecule.name!=cn.EMPTYSET])\n stoichiometry_reactants = sum([r.stoichiometry for r \\\n in self.reactants \\\n if r.molecule.name!=cn.EMPTYSET])\n stoichiometry_products = sum([p.stoichiometry for p \\\n in self.products \\\n if p.molecule.name!=cn.EMPTYSET])\n for reaction_category in cn.REACTION_CATEGORIES:\n if reaction_category.predicate(num_reactants, num_products, \n stoichiometry_reactants, \n stoichiometry_products):\n return reaction_category.category\n raise ValueError(\"Reaction category not found.\")\n\n def isEqual(self, other_reaction):\n \"\"\"\n Checks if two reactions are the same.\n :param Reaction other_reaction:\n :return bool:\n \"\"\"\n return self.identifier == other_reaction.identifier\n\n @classmethod\n def initialize(cls, simple):\n \"\"\"\n :param SimpleSBML simple:\n \"\"\"\n cls.reactions = []\n [Reaction(r) for r in simple.reactions]\n\n def getKineticsTerms(self, libsbml_reaction):\n \"\"\"\n Gets the terms used in the kinetics law for the reaction\n :param tesbml.libsbml.Reaction libsbml_reaction:\n :return list-of-str: names of the terms\n \"\"\"\n terms = []\n law = libsbml_reaction.getKineticLaw()\n if law is not None:\n math = law.getMath()\n asts = [math]\n while len(asts) > 0:\n this_ast = asts.pop()\n if this_ast.isName():\n terms.append(this_ast.getName())\n else:\n pass\n num = this_ast.getNumChildren()\n for idx in range(num):\n asts.append(this_ast.getChild(idx))\n return terms\n\n @classmethod\n def find(cls, reactions, category=cn.REACTION_1_1):\n return [r for r in reactions if r.category == category]\n \n" ]
[ [ "numpy.isclose" ] ]
sshnan7/deep-high-resolution-net.pytorch
[ "3261cfcf85f3a2d2a5852eb1714c04c7f52c47e0" ]
[ "demo/nlos_infer.py" ]
[ "####### Image data를 읽어와 GT 를 만드는 코드 #######\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport argparse\r\nimport csv\r\nimport os\r\nimport shutil\r\n\r\nfrom PIL import Image\r\nimport torch\r\nimport torch.nn.parallel\r\nimport torch.backends.cudnn as cudnn\r\nimport torch.optim\r\nimport torch.utils.data\r\nimport torch.utils.data.distributed\r\nimport torchvision.transforms as transforms\r\nimport torchvision\r\nimport cv2\r\nimport numpy as np\r\n\r\nimport sys\r\nsys.path.append(\"../lib\")\r\nimport time\r\n\r\n# import _init_paths\r\nimport models\r\nfrom config import cfg\r\nfrom config import update_config\r\nfrom core.inference import get_final_preds\r\nfrom utils.transforms import get_affine_transform\r\nfrom utils.vis import save_batch_heatmaps\r\n\r\nimport glob\r\nfrom PIL import Image\r\nimport imageio\r\n\r\n#CTX = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\r\nGPU_NUM = 0 # 원하는 GPU 번호 입력\r\nCTX = torch.device(f'cuda:{GPU_NUM}' if torch.cuda.is_available() else 'cpu')\r\ntorch.cuda.set_device(CTX)\r\nprint(CTX)\r\nCOCO_KEYPOINT_INDEXES = {\r\n 0: 'nose',\r\n 1: 'left_eye',\r\n 2: 'right_eye',\r\n 3: 'left_ear',\r\n 4: 'right_ear',\r\n 5: 'left_shoulder',\r\n 6: 'right_shoulder',\r\n 7: 'left_elbow',\r\n 8: 'right_elbow',\r\n 9: 'left_wrist',\r\n 10: 'right_wrist',\r\n 11: 'left_hip',\r\n 12: 'right_hip',\r\n 13: 'left_knee',\r\n 14: 'right_knee',\r\n 15: 'left_ankle',\r\n 16: 'right_ankle'\r\n}\r\n\r\nCOCO_INSTANCE_CATEGORY_NAMES = [\r\n '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',\r\n 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign',\r\n 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',\r\n 'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A',\r\n 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',\r\n 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',\r\n 'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',\r\n 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',\r\n 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table',\r\n 'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',\r\n 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book',\r\n 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'\r\n]\r\n\r\n\r\n# 사진에서 먼저 사람의 위치를 추정.\r\ndef get_person_detection_boxes(model, img, threshold=0.5):\r\n pil_image = Image.fromarray(img) # Load the image\r\n transform = transforms.Compose([transforms.ToTensor()]) # Defing PyTorch Transform\r\n transformed_img = transform(pil_image) # Apply the transform to the image\r\n pred = model([transformed_img.to(CTX)]) # Pass the image to the model\r\n # Use the first detected person\r\n pred_classes = [COCO_INSTANCE_CATEGORY_NAMES[i]\r\n for i in list(pred[0]['labels'].cpu().numpy())] # Get the Prediction Score\r\n pred_boxes = [[(i[0], i[1]), (i[2], i[3])]\r\n for i in list(pred[0]['boxes'].cpu().detach().numpy())] # Bounding boxes\r\n pred_scores = list(pred[0]['scores'].cpu().detach().numpy())\r\n\r\n person_boxes = []\r\n # Select box has score larger than threshold and is person\r\n for pred_class, pred_box, pred_score in zip(pred_classes, pred_boxes, pred_scores):\r\n if (pred_score > threshold) and (pred_class == 'person'):\r\n person_boxes.append(pred_box)\r\n\r\n return person_boxes\r\n\r\n# 예측한 keypoint의 좌표가 detection box 내에 있는지를 체크\r\ndef in_box(x, y, box):\r\n if not ((box[0][0] <= x) and (x <= box[1][0])):\r\n return False\r\n if not ((box[0][1] <= y) and (y <= box[1][1])):\r\n return False\r\n return True\r\n\r\n# image로 부터 각 Keypoint의 좌표를 계산.\r\ndef get_pose_estimation_prediction(pose_model, image, centers, scales, box, transform):\r\n rotation = 0\r\n\r\n #print(\"img shape \", image.shape)\r\n #print(\"centers \", centers)\r\n #print(\"scales \", scales)\r\n #print(box)\r\n # pose estimation transformation\r\n model_inputs = []\r\n for center, scale in zip(centers, scales):\r\n trans = get_affine_transform(center, scale, rotation, cfg.MODEL.IMAGE_SIZE)\r\n #print(\"trans\", trans)\r\n # Crop smaller image of people\r\n model_input = cv2.warpAffine(\r\n image,\r\n trans,\r\n (int(cfg.MODEL.IMAGE_SIZE[0]), int(cfg.MODEL.IMAGE_SIZE[1])),\r\n flags=cv2.INTER_LINEAR)\r\n\r\n img = model_input\r\n cv2.imwrite('trans_input.jpg', img)\r\n # hwc -> 1chw\r\n model_input = transform(model_input)#.unsqueeze(0)\r\n model_inputs.append(model_input)\r\n\r\n # n * 1chw -> nchw\r\n model_inputs = torch.stack(model_inputs)\r\n #zero_heatmap = np.zeros((120, 120), dtype=np.float32)\r\n zero_heatmap = torch.cuda.FloatTensor(120, 120).fill_(0)\r\n # compute output heatmap\r\n output = pose_model(model_inputs.to(CTX))\r\n # heatmap output :\r\n coords, _ = get_final_preds(\r\n cfg,\r\n output.cpu().detach().numpy(),\r\n np.asarray(centers),\r\n np.asarray(scales))\r\n\r\n\r\n for idx1, mat in enumerate(coords[0]):\r\n x_coord, y_coord = int(mat[0]), int(mat[1])\r\n if not(in_box(x_coord, y_coord, box)):\r\n coords[0][idx1] = [-1, -1]\r\n output[0][idx1] = zero_heatmap\r\n\r\n\r\n return output, coords\r\n\r\n\r\n# deteciton 결과를 기준으로 center, scale 계산\r\ndef box_to_center_scale(box, model_image_width, model_image_height):\r\n \"\"\"convert a box to center,scale information required for pose transformation\r\n Parameters\r\n ----------\r\n box : list of tuple\r\n list of length 2 with two tuples of floats representing\r\n bottom left and top right corner of a box\r\n model_image_width : int\r\n model_image_height : int\r\n\r\n Returns\r\n -------\r\n (numpy array, numpy array)\r\n Two numpy arrays, coordinates for the center of the box and the scale of the box\r\n \"\"\"\r\n center = np.zeros((2), dtype=np.float32)\r\n\r\n bottom_left_corner = box[0]\r\n top_right_corner = box[1]\r\n box_width = top_right_corner[0]-bottom_left_corner[0]\r\n box_height = top_right_corner[1]-bottom_left_corner[1]\r\n bottom_left_x = bottom_left_corner[0]\r\n bottom_left_y = bottom_left_corner[1]\r\n center[0] = bottom_left_x + box_width * 0.5\r\n center[1] = bottom_left_y + box_height * 0.5\r\n\r\n aspect_ratio = model_image_width * 1.0 / model_image_height\r\n pixel_std = 200\r\n\r\n if box_width > aspect_ratio * box_height:\r\n box_height = box_width * 1.0 / aspect_ratio\r\n elif box_width < aspect_ratio * box_height:\r\n box_width = box_height * aspect_ratio\r\n scale = np.array(\r\n [box_width * 1.0 / pixel_std, box_height * 1.0 / pixel_std],\r\n dtype=np.float32)\r\n if center[0] != -1:\r\n scale = scale * 1.25\r\n\r\n return center, scale\r\n\r\n\r\ndef prepare_output_dirs(prefix='/output/'):\r\n pose_dir = os.path.join(prefix, \"pose\")\r\n if os.path.exists(pose_dir) and os.path.isdir(pose_dir):\r\n shutil.rmtree(pose_dir)\r\n os.makedirs(pose_dir, exist_ok=True)\r\n return pose_dir\r\n\r\n\r\ndef parse_args():\r\n parser = argparse.ArgumentParser(description='Train keypoints network')\r\n # general\r\n parser.add_argument('--cfg', type=str, required=True)\r\n parser.add_argument('--outputDir', type=str, default='../data/nlos/nlos_result/')\r\n #parser.add_argument('--inferenceFps', type=int, default=10)\r\n parser.add_argument('--writeBoxFrames', action='store_true')\r\n\r\n parser.add_argument('opts',\r\n help='Modify config options using the command-line',\r\n default=None,\r\n nargs=argparse.REMAINDER)\r\n\r\n args = parser.parse_args()\r\n\r\n # args expected by supporting codebase\r\n args.modelDir = ''\r\n args.logDir = ''\r\n args.dataDir = ''\r\n args.prevModelDir = ''\r\n return args\r\n\r\n\r\n# 좌표를 받아 heatmap 생성.\r\ndef generate_target(joints):\r\n '''\r\n :param joints: [num_joints, 2]\r\n :param joints_vis: [num_joints, 3]\r\n :return: target, target_weight(1: visible, 0: invisible)\r\n '''\r\n num_joints = joints.shape[0]\r\n sigma = 2\r\n #print(\"num_joints \", num_joints)\r\n target_weight = np.ones((num_joints, 1), dtype=np.float32)\r\n for idx, coord in enumerate(joints):\r\n #print(\"{} = {}\".format(idx, coord))\r\n if(coord[0]== -1):\r\n target_weight[idx] = 0\r\n #print(target_weight[idx])\r\n\r\n heatmap_size = (120, 120)\r\n image_size = (480, 480)\r\n if True:\r\n target = np.zeros((num_joints,\r\n heatmap_size[1],\r\n heatmap_size[0]),\r\n dtype=np.float32)\r\n\r\n tmp_size = sigma * 3\r\n\r\n for joint_id in range(num_joints):\r\n if target_weight[joint_id] ==0:\r\n continue\r\n feat_stride = (image_size[0] / heatmap_size[0], image_size[1] / heatmap_size[1] )\r\n mu_x = int(joints[joint_id][0] / feat_stride[0] + 0.5)\r\n mu_y = int(joints[joint_id][1] / feat_stride[1] + 0.5)\r\n # Check that any part of the gaussian is in-bounds\r\n ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]\r\n br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]\r\n '''\r\n if ul[0] >= heatmap_size[0] or ul[1] >= heatmap_size[1] \\\r\n or br[0] < 0 or br[1] < 0:\r\n # If not, just return the image as is\r\n target_weight[joint_id] = 0\r\n continue\r\n '''\r\n # # Generate gaussian\r\n size = 2 * tmp_size + 1\r\n x = np.arange(0, size, 1, np.float32)\r\n y = x[:, np.newaxis]\r\n x0 = y0 = size // 2\r\n # The gaussian is not normalized, we want the center value to equal 1\r\n g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))\r\n\r\n # Usable gaussian range\r\n g_x = max(0, -ul[0]), min(br[0], heatmap_size[0]) - ul[0]\r\n g_y = max(0, -ul[1]), min(br[1], heatmap_size[1]) - ul[1]\r\n # Image range\r\n img_x = max(0, ul[0]), min(br[0], heatmap_size[0])\r\n img_y = max(0, ul[1]), min(br[1], heatmap_size[1])\r\n\r\n v = target_weight[joint_id]\r\n if v > 0.5:\r\n target[joint_id][img_y[0]:img_y[1], img_x[0]:img_x[1]] = \\\r\n g[g_y[0]:g_y[1], g_x[0]:g_x[1]]\r\n\r\n #if self.use_different_joints_weight:\r\n # target_weight = np.multiply(target_weight, self.joints_weight)\r\n\r\n return target, target_weight\r\n\r\n\r\n\r\ndef main():\r\n # transformation\r\n pose_transform = transforms.Compose([\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225]),\r\n ])\r\n\r\n # cudnn related setting\r\n cudnn.benchmark = cfg.CUDNN.BENCHMARK\r\n torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC\r\n torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED\r\n\r\n args = parse_args()\r\n update_config(cfg, args)\r\n #pose_dir = prepare_output_dirs(args.outputDir)\r\n\r\n # Deection model\r\n box_model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)\r\n box_model.to(CTX)\r\n box_model.eval()\r\n pose_model = eval('models.'+cfg.MODEL.NAME+'.get_pose_net')(\r\n cfg, is_train=False\r\n )\r\n\r\n # 학습된 모델 불러오기\r\n if cfg.TEST.MODEL_FILE:\r\n print('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))\r\n pose_model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=False)\r\n else:\r\n print('expected model defined in config at TEST.MODEL_FILE')\r\n\r\n pose_model.to(CTX)\r\n pose_model.eval()\r\n\r\n # img 파일 및 gt 저장할 폴더 불러오기\r\n\r\n #path_dir = '../..data/nlos/save_data_original/'\r\n path_dir = '/data/nlos/save_data_ver2/'\r\n path2 = os.listdir(path_dir)\r\n dirs = []\r\n img_dirs = []\r\n gt_dirs = []\r\n for dir in path2:\r\n dir_name = os.path.join(path_dir, dir)\r\n if os.path.isdir(dir_name):\r\n dir_name = dir_name\r\n dirs.append(dir_name)\r\n img_dirs.append(os.path.join(dir_name, 'img'))\r\n gt_dirs.append(os.path.join(dir_name, 'gt'))\r\n\r\n #print(dirs)\r\n #print(img_dirs)\r\n #print(gt_dirs)\r\n print(\"============= all {} dirs =============\".format(len(dirs)))\r\n\r\n get_box = True\r\n\r\n for i in range(len(dirs)):\r\n #for i in [0]:\r\n input_dir = img_dirs[i] \r\n output_dir = gt_dirs[i]\r\n print(\"input_dir \", input_dir)\r\n print(\"output_dir \", output_dir)\r\n\r\n input_file_list = os.listdir(input_dir)\r\n num_done = 0\r\n det_cnt = 0\r\n images= []\r\n for f in input_file_list:\r\n file_num = f[:5]\r\n if num_done%1000 == 0:\r\n print(\"{} images done\".format(num_done))\r\n num_done += 1\r\n input_file = os.path.join(input_dir,f)\r\n\r\n img = cv2.imread(input_file) # img 파일 불러와서 크기 조절하기\r\n img = cv2.resize(\r\n img,\r\n (480, 480), # (width, height)\r\n interpolation=cv2.INTER_CUBIC\r\n )\r\n\r\n # print(img.shape)\r\n # detection model을 통해 먼저 사람이 있는 위치를 추정 ( BOX ).\r\n if get_box:\r\n detection_boxes = get_person_detection_boxes(box_model, img, threshold=0.9)\r\n full_boxes = []\r\n full_boxes.append([(0, 0), (480, 480)])\r\n #print(detection_boxes)\r\n centers = []\r\n scales = []\r\n\r\n # 사람의 위치가 detection 되지 않을 시 사진 전체를 BOX로 함.\r\n if not detection_boxes:\r\n box = full_boxes[0]\r\n else:\r\n det_cnt = det_cnt + 1\r\n box = detection_boxes[0]\r\n\r\n\r\n # box를 image위에 나타내고, box 를 기준으로 center, scale 계산 -> center, scale을 기준으로 image를 조정하여 학습 성능을 높임.\r\n cv2.rectangle(img, box[0], box[1], color=(0, 255, 0), thickness=3)\r\n center, scale = box_to_center_scale(box, cfg.MODEL.IMAGE_SIZE[0], cfg.MODEL.IMAGE_SIZE[1])\r\n centers.append(center)\r\n scales.append(scale)\r\n\r\n # image로부터 신체 각 Keypoint의 좌표를 계산함.\r\n output, pose_preds = get_pose_estimation_prediction(pose_model, img, centers, scales, box, transform=pose_transform)\r\n #output_np = output.cpu().detach().numpy()\r\n #output_np = np.concatenate((output_np[0][0, :, :].reshape(1, 120, 120), output_np[0][5:, :, :]), axis=0)\r\n \r\n # 얼굴에 대한 정보 (눈, 귀) 에 대한 정보 제외.\r\n tmp_preds = np.concatenate((pose_preds[0][0].reshape(1, 2), pose_preds[0][5:]))\r\n pose_preds = tmp_preds.reshape(1, 13, 2)\r\n\r\n # 좌표를 기반으로 heatmap 생성\r\n hm, _ = generate_target(pose_preds[0])\r\n \r\n \r\n #print(\"hm {}\".format(hm.shape))\r\n save_dir = output_dir + \"/\" + file_num\r\n np.save(save_dir, hm) # heatmap 저장.\r\n \r\n for idx1, mat in enumerate(pose_preds[0]):\r\n x_coord, y_coord = int(mat[0]), int(mat[1])\r\n if idx1 == 0:\r\n cv2.circle(img, (x_coord, y_coord), 3, (0, 0, 255), -1)\r\n elif idx1 in [1, 3, 5]: # green\r\n cv2.circle(img, (x_coord, y_coord), 3, (0, 255, 0), -1)\r\n elif idx1 in [2, 4, 6]: # blue\r\n cv2.circle(img, (x_coord, y_coord), 3, (255, 0, 0), -1)\r\n elif idx1 in [7, 9, 11]: # 청록\r\n cv2.circle(img, (x_coord, y_coord), 3, (255, 255, 255), -1)\r\n elif idx1 in [8, 10, 12]: # yello\r\n cv2.circle(img, (x_coord, y_coord), 3, (0, 255, 255), -1)\r\n\r\n #images.append(img) \r\n\r\n '''\r\n trans_img = pose_transform(img)\r\n trans_img = trans_img.reshape(1, 3, 480, 480)\r\n torch_hm = torch.from_numpy(hm)\r\n torch_hm = torch_hm.reshape(1, 13, 120, 120)\r\n save_batch_heatmaps(\r\n trans_img, torch_hm, 'my_pred.jpg'\r\n # model_inputs, tmp, 'test_hm_pred.jpg'\r\n )\r\n if num_done ==100:\r\n break #for test only once\r\n '''\r\n print(\"detection : fail {}, success {}\".format(num_done - det_cnt, det_cnt))\r\n #imageio.mimsave(\"test{}.gif\".format(i), images, fps=18)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n" ]
[ [ "numpy.array", "torch.stack", "numpy.asarray", "numpy.zeros", "numpy.ones", "numpy.exp", "numpy.save", "torch.cuda.set_device", "torch.cuda.is_available", "numpy.arange", "torch.cuda.FloatTensor", "torch.load" ] ]
JackGeraghty/AQP-Research
[ "6b78f4fa0bdc7a26dadaf52b3d317e20bb40d1f0" ]
[ "nodes/loadcsvasdfnode.py" ]
[ "\"\"\"Module containing the LoadCSVAsDFNode. Used to load a csv file into a dataframe.\"\"\"\n\nimport sys\nimport logging\nimport pandas as pd\n\nfrom .node import AQPNode\nfrom pathlib import Path\nfrom constants import LOGGER_NAME\n\nLOGGER = logging.getLogger(LOGGER_NAME)\n\nclass LoadCSVAsDFNode(AQPNode):\n \"\"\"Node which takes a path to a csv file and store the csv as a pandas dataframe in the result dictionary upon execution.\"\"\"\n\n def __init__(self, id_: str, output_key: str, path_to_csv: str,\n draw_options=None, **kwargs):\n \"\"\"Initialize a LoadCSVAsDF Node.\n\n Parameters\n ----------\n path_to_csv : str\n Path to the csv file to load.\n \"\"\"\n super().__init__(id_, output_key=output_key, draw_options=draw_options)\n self.path_to_csv = Path(path_to_csv)\n self.type_ = 'LoadCSVAsDFNode'\n\n def execute(self, result: dict, **kwargs):\n \"\"\"Execute the LoadCSVAsDFNode.\n \n Assigns the loaded dataframe to the output key of the node.\n \"\"\"\n super().execute(result, **kwargs)\n try:\n result[self.output_key] = pd.read_csv(self.path_to_csv)\n except (FileNotFoundError) as err:\n LOGGER.error(err)\n sys.exit(-1)\n return result\n" ]
[ [ "pandas.read_csv" ] ]
gregadc/oct
[ "7e9bddeb3b8495a26442b1c86744e9fb187fe88f" ]
[ "oct/results/report.py" ]
[ "import six\nimport time\nfrom collections import defaultdict\n\nimport ujson as json\nimport pandas as pd\n\nfrom oct.results.models import db, Result, Turret\n\n\nclass ReportResults(object):\n \"\"\"Represent a report containing all tests results\n\n :param int run_time: the run_time of the script\n :param int interval: the time interval between each group of results\n \"\"\"\n def __init__(self, run_time, interval):\n self.total_transactions = 0\n self.total_errors = Result.select(Result.id).where(Result.error != \"\", Result.error != None).count()\n self.total_timers = 0\n self.timers_results = {}\n self._timers_values = defaultdict(list)\n self.turrets = []\n self.main_results = {}\n self.interval = interval\n\n self._init_turrets()\n\n def _init_dates(self):\n \"\"\"Initialize all dates properties\n \"\"\"\n if self.total_transactions == 0:\n return None\n self.epoch_start = Result.select(Result.epoch).order_by(Result.epoch.asc()).limit(1).get().epoch\n self.epoch_finish = Result.select(Result.epoch).order_by(Result.epoch.desc()).limit(1).get().epoch\n self.start_datetime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.epoch_start))\n self.finish_datetime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.epoch_finish))\n\n def _init_dataframes(self):\n \"\"\"Initialise the main dataframe for the results and the custom timers dataframes\n \"\"\"\n df = pd.read_sql_query(\"SELECT elapsed, epoch, scriptrun_time, custom_timers FROM result ORDER BY epoch ASC\",\n db.get_conn())\n\n self._get_all_timers(df)\n self.main_results = self._get_processed_dataframe(df)\n\n # create all custom timers dataframes\n for key, value in six.iteritems(self._timers_values):\n df = pd.DataFrame(value, columns=['epoch', 'scriptrun_time'])\n df.index = pd.to_datetime(df['epoch'], unit='s')\n timer_results = self._get_processed_dataframe(df)\n self.timers_results[key] = timer_results\n\n # clear memory\n del self._timers_values\n\n def _get_all_timers(self, dataframe):\n \"\"\"Get all timers and set them in the _timers_values property\n\n :param pandas.DataFrame dataframe: the main dataframe with row results\n \"\"\"\n s = dataframe['custom_timers'].apply(json.loads)\n s.index = dataframe['epoch']\n for index, value in s.iteritems():\n if not value:\n continue\n for key, value in six.iteritems(value):\n self._timers_values[key].append((index, value))\n self.total_timers += 1\n del dataframe['custom_timers']\n del s\n\n def _get_processed_dataframe(self, dataframe):\n \"\"\"Generate required dataframe for results from raw dataframe\n\n :param pandas.DataFrame dataframe: the raw dataframe\n :return: a dict containing raw, compiled, and summary dataframes from original dataframe\n :rtype: dict\n \"\"\"\n dataframe.index = pd.to_datetime(dataframe['epoch'], unit='s', utc=True)\n del dataframe['epoch']\n summary = dataframe.describe(percentiles=[.80, .90, .95]).transpose().loc['scriptrun_time']\n df_grp = dataframe.groupby(pd.TimeGrouper('{}S'.format(self.interval)))\n df_final = df_grp.apply(lambda x: x.describe(percentiles=[.80, .90, .95])['scriptrun_time'])\n\n return {\n \"raw\": dataframe.round(2),\n \"compiled\": df_final.round(2),\n \"summary\": summary.round(2)\n }\n\n def _init_turrets(self):\n \"\"\"Setup data from database\n \"\"\"\n for turret in Turret.select():\n self.turrets.append(turret.to_dict())\n\n def compile_results(self):\n \"\"\"Compile all results for the current test\n \"\"\"\n self._init_dataframes()\n\n self.total_transactions = len(self.main_results['raw'])\n self._init_dates()\n" ]
[ [ "pandas.to_datetime", "pandas.DataFrame" ] ]
YashNita/DCASE2019-Task5-Urban-Sound-Tagging-Multi_Accuracy-Curve-Display-
[ "377a42a937b75aa41c780651ad0ff94062a83515" ]
[ "pytorch/evaluate.py" ]
[ "import os\nimport sys\nsys.path.insert(1, os.path.join(sys.path[0], '../utils'))\nsys.path.insert(1, os.path.join(sys.path[0], '../evaluation_tools'))\n\nimport numpy as np\nimport time\nimport logging\nimport matplotlib.pyplot as plt\nfrom sklearn import metrics\nimport _pickle as cPickle\nimport datetime\nimport sed_eval\n\nfrom utilities import get_filename, inverse_scale, get_labels, write_submission_csv\nfrom pytorch_utils import forward\nimport metrics as offical_metrics\nimport config\n\n\nclass Evaluator(object):\n def __init__(self, model, data_generator, taxonomy_level, cuda=True, \n verbose=False):\n '''Evaluator to evaluate prediction performance. \n \n Args: \n model: object\n data_generator: object\n taxonomy_level: 'fine' | 'coarse'\n cuda: bool\n verbose: bool\n '''\n\n self.model = model\n self.data_generator = data_generator\n self.taxonomy_level = taxonomy_level\n self.cuda = cuda\n self.verbose = verbose\n \n self.frames_per_second = config.frames_per_second\n self.labels = get_labels(taxonomy_level)\n\n def get_binary_target(self, target):\n '''Get binarized target. The original target is between 0 and 1\n representing the average annotations of labelers. Set a threshold to\n binarize the target to either 0 or 1. We set a small threshold \n simulates XOR operation of labels. \n ''' \n \n threshold = 0.001 # XOR of annotations\n return (np.sign(target - threshold) + 1) / 2\n\n def evaluate(self, data_type, submission_path=None, \n annotation_path=None, yaml_path=None, max_iteration=None):\n '''Evaluate prediction performance. \n \n Args:\n data_type: 'train' | 'validate'\n submission_path: None | string, path submission csv\n annotation_path: None | string, path of reference csv\n yaml_path: None | string, path of yaml taxonomy file\n max_iteration: None | int, use maximum iteration of partial data for\n fast evaluation\n '''\n \n generate_func = self.data_generator.generate_validate(\n data_type=data_type, \n max_iteration=max_iteration)\n \n # Forward\n output_dict = forward(\n model=self.model, \n generate_func=generate_func, \n cuda=self.cuda, \n return_target=True)\n \n output = output_dict['output']\n target = output_dict['{}_target'.format(self.taxonomy_level)]\n target = self.get_binary_target(target)\n \n average_precision = metrics.average_precision_score(target, output, average=None)\n \n if self.verbose:\n logging.info('{} average precision:'.format(data_type)) \n for k, label in enumerate(self.labels):\n logging.info(' {:<40}{:.3f}'.format(label, average_precision[k]))\n logging.info(' {:<40}{:.3f}'.format('Average', np.mean(average_precision)))\n else:\n logging.info('{}:'.format(data_type))\n logging.info(' mAP: {:.3f}'.format(np.mean(average_precision)))\n\n statistics = {}\n statistics['average_precision'] = average_precision\n\n # Write submission and evaluate with official evaluation tool\n # https://github.com/sonyc-project/urban-sound-tagging-baseline\n if submission_path:\n write_submission_csv(\n audio_names=output_dict['audio_name'], \n outputs=output, \n taxonomy_level=self.taxonomy_level, \n submission_path=submission_path)\n \n # The following code are from official evaluation code\n df_dict = offical_metrics.evaluate(\n prediction_path=submission_path,\n annotation_path=annotation_path,\n yaml_path=yaml_path,\n mode=self.taxonomy_level)\n \n micro_auprc, eval_df = offical_metrics.micro_averaged_auprc(\n df_dict, return_df=True)\n \n macro_auprc, class_auprc = offical_metrics.macro_averaged_auprc(\n df_dict, return_classwise=True)\n \n # Get index of first threshold that is at least 0.5\n thresh_0pt5_idx = (eval_df['threshold'] >= 0.5).nonzero()[0][0]\n \n logging.info(' Official evaluation: ')\n logging.info(' Micro AUPRC: {:.3f}'.format(micro_auprc))\n logging.info(' Micro F1-score (@0.5): {:.3f}'.format(eval_df['F'][thresh_0pt5_idx]))\n logging.info(' Macro AUPRC: {:.3f}'.format(macro_auprc))\n \n statistics['micro_auprc'] = micro_auprc\n statistics['micro_f1'] = eval_df['F'][thresh_0pt5_idx]\n statistics['macro_auprc'] = macro_auprc\n \n return statistics\n \n def visualize(self, data_type, max_iteration=None):\n '''Visualize the log mel spectrogram. \n \n Args:\n data_type: 'train' | 'validate'\n max_iteration: None | int, use maximum iteration of partial data for\n fast evaluation\n '''\n\n mel_bins = config.mel_bins\n audio_duration = config.audio_duration\n frames_num = config.frames_num\n coarse_classes_num = config.coarse_classes_num\n coarse_idx_to_lb = config.coarse_idx_to_lb\n \n generate_func = self.data_generator.generate_validate(\n data_type=data_type, \n max_iteration=max_iteration)\n \n # Forward\n output_dict = forward(\n model=self.model, \n generate_func=generate_func, \n cuda=self.cuda, \n return_input=True, \n return_target=True)\n\n rows_num = 3\n cols_num = 3\n \n fig, axs = plt.subplots(rows_num, cols_num, figsize=(10, 5))\n\n for k in range(coarse_classes_num):\n for n, audio_name in enumerate(output_dict['audio_name']):\n if output_dict['coarse_target'][n, k] > 0.5:\n row = k // cols_num\n col = k % cols_num\n title = coarse_idx_to_lb[k]\n title = '{}\\n{}'.format(coarse_idx_to_lb[k], audio_name)\n axs[row, col].set_title(title, color='r')\n logmel = inverse_scale(output_dict['feature'][n], \n self.data_generator.scalar['mean'], \n self.data_generator.scalar['std'])\n axs[row, col].matshow(logmel.T, origin='lower', aspect='auto', cmap='jet') \n axs[row, col].set_xticks([0, frames_num])\n axs[row, col].set_xticklabels(['0', '{:.1f} s'.format(audio_duration)])\n axs[row, col].xaxis.set_ticks_position('bottom')\n axs[row, col].set_ylabel('Mel bins')\n axs[row, col].set_yticks([])\n break\n \n for k in range(coarse_classes_num, rows_num * cols_num):\n row = k // cols_num\n col = k % cols_num\n axs[row, col].set_visible(False)\n \n fig.tight_layout(pad=0, w_pad=0, h_pad=0)\n plt.show()\n \n \nclass StatisticsContainer(object):\n def __init__(self, statistics_path):\n '''Container of statistics during training. \n \n Args:\n statistics_path: string, path to write out\n '''\n self.statistics_path = statistics_path\n\n self.backup_statistics_path = '{}_{}.pickle'.format(\n os.path.splitext(self.statistics_path)[0], datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))\n\n self.statistics_list = []\n\n def append_and_dump(self, iteration, statistics):\n '''Append statistics to container and dump the container. \n \n Args:\n iteration: int\n statistics: dict of statistics\n '''\n statistics['iteration'] = iteration\n self.statistics_list.append(statistics)\n\n cPickle.dump(self.statistics_list, open(self.statistics_path, 'wb'))\n cPickle.dump(self.statistics_list, open(self.backup_statistics_path, 'wb'))\n logging.info(' Dump statistics to {}'.format(self.statistics_path))" ]
[ [ "numpy.mean", "matplotlib.pyplot.subplots", "numpy.sign", "sklearn.metrics.average_precision_score", "matplotlib.pyplot.show" ] ]
AshwinBalaji52/Personality-Detection-from-text-using-Deep-Learning
[ "ce3aba2aa2ed118c990eab71c28c82ac78674730" ]
[ "Personality Detection/trainBuild.py" ]
[ "import re\nimport string\nimport pandas as pd\nfrom nltk.corpus import stopwords\nclass trainBuild:\n\tdef __init__(self):\n\t\tself.stop = list(set(stopwords.words(\"english\")))\n\t\tself.word =[] ## NRC word list\n\t\tself.better = {} ## NRC word attributes\n\t\tself.data = pd.DataFrame() ## all data\n\t## get all required values\n\tdef getValues(self):\n\t\tnrc = pd.read_csv(\"process/better2.csv\", header = None, index_col = False)\n\t\tfor index, row in nrc.iterrows():\n\t\t\tself.better[row[0]] = row[1:].values.tolist()\n\t\t\tself.word.append(row[0])\n\t\tself.data = pd.read_csv(\"essays2.csv\")\n\t## get attribute vectors by status\n\tdef getStatusProcessed(self):\n\t\tstatus = [] ## processed status\n\t\t## iterate dataframe by rows\n\t\tfor index, row in self.data.iterrows():\n\t\t\ts = row[\"TEXT\"]\n\t\t\tattr = []## attribute vectors for each status\n\t\t\t## status process\n\t\t\ts = re.sub(r\"(?:@\\S*|#\\S*|http(?=.*://)\\S*)\", \"\", s.rsplit(\"\\n\")[0].lower())\n\t\t\t# s = s.replace(\"rt\", \"\").rsplit(\"\\n\")[0]\n\t\t\tfor word in s.translate(None, string.punctuation).split():\n\t\t\t\tif(word in self.word and word not in self.stop):\n\t\t\t\t\tattr.append(self.better[word])\n\t\t\tstatus.append([sum(x) for x in zip(*attr)])\n\t\t#status = filter(None, status)\n\t\t## keep only english status, and clean the .csv file\n\t\tlabel_delete = [i for i, v in enumerate(status) if not v]\n\t\tself.data.drop(label_delete, inplace = True)\n\t\t## update train dataset for word to vector\n\n\t\tself.data.to_csv(\"clean/train_essayv1.csv\", index = False, header = False)\n\t\tmat = [] ## store processed numerical vectors\n\t\tfor index, row in self.data.iterrows():\n\t\t\tmat.append(status[index] + row[2:].values.tolist())\n\t\t## write to file\n\t\tpd.DataFrame(mat).to_csv(\"clean/train_essay_v2.csv\", index = False, header = False)\nx = trainBuild()\nx.getValues()\nx.getStatusProcessed()" ]
[ [ "pandas.DataFrame", "pandas.read_csv" ] ]
kramer65/matplotlib
[ "a78d7e8e8958307e0e874b7f71de709f5c2fc279", "a78d7e8e8958307e0e874b7f71de709f5c2fc279" ]
[ "lib/matplotlib/figure.py", "lib/matplotlib/__init__.py" ]
[ "\"\"\"\nThe figure module provides the top-level\n:class:`~matplotlib.artist.Artist`, the :class:`Figure`, which\ncontains all the plot elements. The following classes are defined\n\n:class:`SubplotParams`\n control the default spacing of the subplots\n\n:class:`Figure`\n top level container for all plot elements\n\n\"\"\"\n\nfrom __future__ import print_function\nimport warnings\nfrom operator import itemgetter\n\nimport numpy as np\n\nfrom matplotlib import rcParams\nfrom matplotlib import docstring\nfrom matplotlib import __version__ as _mpl_version\n\nimport matplotlib.artist as martist\nfrom matplotlib.artist import Artist, allow_rasterization\n\nimport matplotlib.cbook as cbook\n\nfrom matplotlib.cbook import Stack, iterable\n\nfrom matplotlib import _image\nfrom matplotlib.image import FigureImage\n\nimport matplotlib.colorbar as cbar\n\nfrom matplotlib.axes import Axes, SubplotBase, subplot_class_factory\nfrom matplotlib.blocking_input import BlockingMouseInput, BlockingKeyMouseInput\nfrom matplotlib.legend import Legend\nfrom matplotlib.patches import Rectangle\nfrom matplotlib.projections import (get_projection_names,\n process_projection_requirements)\nfrom matplotlib.text import Text, _process_text_args\nfrom matplotlib.transforms import (Affine2D, Bbox, BboxTransformTo,\n TransformedBbox)\nfrom matplotlib.backend_bases import NonGuiException\n\ndocstring.interpd.update(projection_names=get_projection_names())\n\n\nclass AxesStack(Stack):\n \"\"\"\n Specialization of the Stack to handle all tracking of Axes in a Figure.\n This stack stores ``key, (ind, axes)`` pairs, where:\n\n * **key** should be a hash of the args and kwargs\n used in generating the Axes.\n * **ind** is a serial number for tracking the order\n in which axes were added.\n\n The AxesStack is a callable, where ``ax_stack()`` returns\n the current axes. Alternatively the :meth:`current_key_axes` will\n return the current key and associated axes.\n\n \"\"\"\n def __init__(self):\n Stack.__init__(self)\n self._ind = 0\n\n def as_list(self):\n \"\"\"\n Return a list of the Axes instances that have been added to the figure\n \"\"\"\n ia_list = [a for k, a in self._elements]\n ia_list.sort()\n return [a for i, a in ia_list]\n\n def get(self, key):\n \"\"\"\n Return the Axes instance that was added with *key*.\n If it is not present, return None.\n \"\"\"\n item = dict(self._elements).get(key)\n if item is None:\n return None\n return item[1]\n\n def _entry_from_axes(self, e):\n ind, k = dict([(a, (ind, k)) for (k, (ind, a)) in self._elements])[e]\n return (k, (ind, e))\n\n def remove(self, a):\n \"\"\"Remove the axes from the stack.\"\"\"\n Stack.remove(self, self._entry_from_axes(a))\n\n def bubble(self, a):\n \"\"\"\n Move the given axes, which must already exist in the\n stack, to the top.\n \"\"\"\n return Stack.bubble(self, self._entry_from_axes(a))\n\n def add(self, key, a):\n \"\"\"\n Add Axes *a*, with key *key*, to the stack, and return the stack.\n\n If *a* is already on the stack, don't add it again, but\n return *None*.\n \"\"\"\n # All the error checking may be unnecessary; but this method\n # is called so seldom that the overhead is negligible.\n if not isinstance(a, Axes):\n raise ValueError(\"second argument, %s, is not an Axes\" % a)\n try:\n hash(key)\n except TypeError:\n raise ValueError(\"first argument, %s, is not a valid key\" % key)\n\n a_existing = self.get(key)\n if a_existing is not None:\n Stack.remove(self, (key, a_existing))\n warnings.warn(\n \"key %s already existed; Axes is being replaced\" % key)\n # I don't think the above should ever happen.\n\n if a in self:\n return None\n self._ind += 1\n return Stack.push(self, (key, (self._ind, a)))\n\n def current_key_axes(self):\n \"\"\"\n Return a tuple of ``(key, axes)`` for the active axes.\n\n If no axes exists on the stack, then returns ``(None, None)``.\n\n \"\"\"\n if not len(self._elements):\n return self._default, self._default\n else:\n key, (index, axes) = self._elements[self._pos]\n return key, axes\n\n def __call__(self):\n return self.current_key_axes()[1]\n\n def __contains__(self, a):\n return a in self.as_list()\n\n\nclass SubplotParams:\n \"\"\"\n A class to hold the parameters for a subplot\n \"\"\"\n def __init__(self, left=None, bottom=None, right=None, top=None,\n wspace=None, hspace=None):\n \"\"\"\n All dimensions are fraction of the figure width or height.\n All values default to their rc params\n\n The following attributes are available\n\n *left* : 0.125\n The left side of the subplots of the figure\n\n *right* : 0.9\n The right side of the subplots of the figure\n\n *bottom* : 0.1\n The bottom of the subplots of the figure\n\n *top* : 0.9\n The top of the subplots of the figure\n\n *wspace* : 0.2\n The amount of width reserved for blank space between subplots\n\n *hspace* : 0.2\n The amount of height reserved for white space between subplots\n \"\"\"\n\n self.validate = True\n self.update(left, bottom, right, top, wspace, hspace)\n\n def update(self, left=None, bottom=None, right=None, top=None,\n wspace=None, hspace=None):\n \"\"\"\n Update the current values. If any kwarg is None, default to\n the current value, if set, otherwise to rc\n\n \"\"\"\n\n thisleft = getattr(self, 'left', None)\n thisright = getattr(self, 'right', None)\n thistop = getattr(self, 'top', None)\n thisbottom = getattr(self, 'bottom', None)\n thiswspace = getattr(self, 'wspace', None)\n thishspace = getattr(self, 'hspace', None)\n\n self._update_this('left', left)\n self._update_this('right', right)\n self._update_this('bottom', bottom)\n self._update_this('top', top)\n self._update_this('wspace', wspace)\n self._update_this('hspace', hspace)\n\n def reset():\n self.left = thisleft\n self.right = thisright\n self.top = thistop\n self.bottom = thisbottom\n self.wspace = thiswspace\n self.hspace = thishspace\n\n if self.validate:\n if self.left >= self.right:\n reset()\n raise ValueError('left cannot be >= right')\n\n if self.bottom >= self.top:\n reset()\n raise ValueError('bottom cannot be >= top')\n\n def _update_this(self, s, val):\n if val is None:\n val = getattr(self, s, None)\n if val is None:\n key = 'figure.subplot.' + s\n val = rcParams[key]\n\n setattr(self, s, val)\n\n\nclass Figure(Artist):\n\n \"\"\"\n The Figure instance supports callbacks through a *callbacks*\n attribute which is a :class:`matplotlib.cbook.CallbackRegistry`\n instance. The events you can connect to are 'dpi_changed', and\n the callback will be called with ``func(fig)`` where fig is the\n :class:`Figure` instance.\n\n *patch*\n The figure patch is drawn by a\n :class:`matplotlib.patches.Rectangle` instance\n\n *suppressComposite*\n For multiple figure images, the figure will make composite\n images depending on the renderer option_image_nocomposite\n function. If suppressComposite is True|False, this will\n override the renderer.\n \"\"\"\n\n def __str__(self):\n return \"Figure(%gx%g)\" % tuple(self.bbox.size)\n\n def __init__(self,\n figsize=None, # defaults to rc figure.figsize\n dpi=None, # defaults to rc figure.dpi\n facecolor=None, # defaults to rc figure.facecolor\n edgecolor=None, # defaults to rc figure.edgecolor\n linewidth=0.0, # the default linewidth of the frame\n frameon=True, # whether or not to draw the figure frame\n subplotpars=None, # default to rc\n tight_layout=None, # default to rc figure.autolayout\n ):\n \"\"\"\n *figsize*\n w,h tuple in inches\n\n *dpi*\n Dots per inch\n\n *facecolor*\n The figure patch facecolor; defaults to rc ``figure.facecolor``\n\n *edgecolor*\n The figure patch edge color; defaults to rc ``figure.edgecolor``\n\n *linewidth*\n The figure patch edge linewidth; the default linewidth of the frame\n\n *frameon*\n If *False*, suppress drawing the figure frame\n\n *subplotpars*\n A :class:`SubplotParams` instance, defaults to rc\n\n *tight_layout*\n If *False* use *subplotpars*; if *True* adjust subplot\n parameters using :meth:`tight_layout`. Defaults to\n rc ``figure.autolayout``.\n \"\"\"\n Artist.__init__(self)\n\n self.callbacks = cbook.CallbackRegistry()\n\n if figsize is None:\n figsize = rcParams['figure.figsize']\n if dpi is None:\n dpi = rcParams['figure.dpi']\n if facecolor is None:\n facecolor = rcParams['figure.facecolor']\n if edgecolor is None:\n edgecolor = rcParams['figure.edgecolor']\n\n self.dpi_scale_trans = Affine2D()\n self.dpi = dpi\n self.bbox_inches = Bbox.from_bounds(0, 0, *figsize)\n self.bbox = TransformedBbox(self.bbox_inches, self.dpi_scale_trans)\n\n self.frameon = frameon\n\n self.transFigure = BboxTransformTo(self.bbox)\n\n # the figurePatch name is deprecated\n self.patch = self.figurePatch = Rectangle(\n xy=(0, 0), width=1, height=1,\n facecolor=facecolor, edgecolor=edgecolor,\n linewidth=linewidth)\n self._set_artist_props(self.patch)\n self.patch.set_aa(False)\n\n self._hold = rcParams['axes.hold']\n self.canvas = None\n self._suptitle = None\n\n if subplotpars is None:\n subplotpars = SubplotParams()\n\n self.subplotpars = subplotpars\n self.set_tight_layout(tight_layout)\n\n self._axstack = AxesStack() # track all figure axes and current axes\n self.clf()\n self._cachedRenderer = None\n\n def show(self, warn=True):\n \"\"\"\n If using a GUI backend with pyplot, display the figure window.\n\n If the figure was not created using\n :func:`~matplotlib.pyplot.figure`, it will lack a\n :class:`~matplotlib.backend_bases.FigureManagerBase`, and\n will raise an AttributeError.\n\n For non-GUI backends, this does nothing, in which case\n a warning will be issued if *warn* is True (default).\n \"\"\"\n try:\n manager = getattr(self.canvas, 'manager')\n except AttributeError as err:\n raise AttributeError(\"%s\\n\"\n \"Figure.show works only \"\n \"for figures managed by pyplot, normally \"\n \"created by pyplot.figure().\" % err)\n\n if manager is not None:\n try:\n manager.show()\n return\n except NonGuiException:\n pass\n if warn:\n import warnings\n warnings.warn(\n \"matplotlib is currently using a non-GUI backend, \"\n \"so cannot show the figure\")\n\n def _get_axes(self):\n return self._axstack.as_list()\n\n axes = property(fget=_get_axes, doc=\"Read-only: list of axes in Figure\")\n\n def _get_dpi(self):\n return self._dpi\n\n def _set_dpi(self, dpi):\n self._dpi = dpi\n self.dpi_scale_trans.clear().scale(dpi, dpi)\n self.callbacks.process('dpi_changed', self)\n dpi = property(_get_dpi, _set_dpi)\n\n def get_tight_layout(self):\n \"\"\"\n Return the Boolean flag, True to use :meth`tight_layout` when drawing.\n \"\"\"\n return self._tight\n\n def set_tight_layout(self, tight):\n \"\"\"\n Set whether :meth:`tight_layout` is used upon drawing.\n If None, the rcParams['figure.autolayout'] value will be set.\n\n ACCEPTS: [True | False | None]\n \"\"\"\n if tight is None:\n tight = rcParams['figure.autolayout']\n tight = bool(tight)\n self._tight = tight\n\n def autofmt_xdate(self, bottom=0.2, rotation=30, ha='right'):\n \"\"\"\n Date ticklabels often overlap, so it is useful to rotate them\n and right align them. Also, a common use case is a number of\n subplots with shared xaxes where the x-axis is date data. The\n ticklabels are often long, and it helps to rotate them on the\n bottom subplot and turn them off on other subplots, as well as\n turn off xlabels.\n\n *bottom*\n The bottom of the subplots for :meth:`subplots_adjust`\n\n *rotation*\n The rotation of the xtick labels\n\n *ha*\n The horizontal alignment of the xticklabels\n \"\"\"\n allsubplots = np.alltrue([hasattr(ax, 'is_last_row') for ax\n in self.axes])\n if len(self.axes) == 1:\n for label in self.axes[0].get_xticklabels():\n label.set_ha(ha)\n label.set_rotation(rotation)\n else:\n if allsubplots:\n for ax in self.get_axes():\n if ax.is_last_row():\n for label in ax.get_xticklabels():\n label.set_ha(ha)\n label.set_rotation(rotation)\n else:\n for label in ax.get_xticklabels():\n label.set_visible(False)\n ax.set_xlabel('')\n\n if allsubplots:\n self.subplots_adjust(bottom=bottom)\n\n def get_children(self):\n 'get a list of artists contained in the figure'\n children = [self.patch]\n children.extend(self.artists)\n children.extend(self.axes)\n children.extend(self.lines)\n children.extend(self.patches)\n children.extend(self.texts)\n children.extend(self.images)\n children.extend(self.legends)\n return children\n\n def contains(self, mouseevent):\n \"\"\"\n Test whether the mouse event occurred on the figure.\n\n Returns True,{}\n \"\"\"\n if callable(self._contains):\n return self._contains(self, mouseevent)\n # inside = mouseevent.x >= 0 and mouseevent.y >= 0\n inside = self.bbox.contains(mouseevent.x, mouseevent.y)\n\n return inside, {}\n\n def get_window_extent(self, *args, **kwargs):\n 'get the figure bounding box in display space; kwargs are void'\n return self.bbox\n\n def suptitle(self, t, **kwargs):\n \"\"\"\n Add a centered title to the figure.\n\n kwargs are :class:`matplotlib.text.Text` properties. Using figure\n coordinates, the defaults are:\n\n *x* : 0.5\n The x location of the text in figure coords\n\n *y* : 0.98\n The y location of the text in figure coords\n\n *horizontalalignment* : 'center'\n The horizontal alignment of the text\n\n *verticalalignment* : 'top'\n The vertical alignment of the text\n\n A :class:`matplotlib.text.Text` instance is returned.\n\n Example::\n\n fig.suptitle('this is the figure title', fontsize=12)\n \"\"\"\n x = kwargs.pop('x', 0.5)\n y = kwargs.pop('y', 0.98)\n if ('horizontalalignment' not in kwargs) and ('ha' not in kwargs):\n kwargs['horizontalalignment'] = 'center'\n\n if ('verticalalignment' not in kwargs) and ('va' not in kwargs):\n kwargs['verticalalignment'] = 'top'\n\n sup = self.text(x, y, t, **kwargs)\n if self._suptitle is not None:\n self._suptitle.set_text(t)\n self._suptitle.set_position((x, y))\n self._suptitle.update_from(sup)\n else:\n self._suptitle = sup\n return self._suptitle\n\n def set_canvas(self, canvas):\n \"\"\"\n Set the canvas the contains the figure\n\n ACCEPTS: a FigureCanvas instance\n \"\"\"\n self.canvas = canvas\n\n def hold(self, b=None):\n \"\"\"\n Set the hold state. If hold is None (default), toggle the\n hold state. Else set the hold state to boolean value b.\n\n Eg::\n\n hold() # toggle hold\n hold(True) # hold is on\n hold(False) # hold is off\n \"\"\"\n if b is None:\n self._hold = not self._hold\n else:\n self._hold = b\n\n def figimage(self, X,\n xo=0,\n yo=0,\n alpha=None,\n norm=None,\n cmap=None,\n vmin=None,\n vmax=None,\n origin=None,\n **kwargs):\n \"\"\"\n Adds a non-resampled image to the figure.\n\n call signatures::\n\n figimage(X, **kwargs)\n\n adds a non-resampled array *X* to the figure.\n\n ::\n\n figimage(X, xo, yo)\n\n with pixel offsets *xo*, *yo*,\n\n *X* must be a float array:\n\n * If *X* is MxN, assume luminance (grayscale)\n * If *X* is MxNx3, assume RGB\n * If *X* is MxNx4, assume RGBA\n\n Optional keyword arguments:\n\n ========= =========================================================\n Keyword Description\n ========= =========================================================\n xo or yo An integer, the *x* and *y* image offset in pixels\n cmap a :class:`matplotlib.colors.Colormap` instance, eg\n cm.jet. If *None*, default to the rc ``image.cmap``\n value\n norm a :class:`matplotlib.colors.Normalize` instance. The\n default is normalization(). This scales luminance -> 0-1\n vmin|vmax are used to scale a luminance image to 0-1. If either\n is *None*, the min and max of the luminance values will\n be used. Note if you pass a norm instance, the settings\n for *vmin* and *vmax* will be ignored.\n alpha the alpha blending value, default is *None*\n origin [ 'upper' | 'lower' ] Indicates where the [0,0] index of\n the array is in the upper left or lower left corner of\n the axes. Defaults to the rc image.origin value\n ========= =========================================================\n\n figimage complements the axes image\n (:meth:`~matplotlib.axes.Axes.imshow`) which will be resampled\n to fit the current axes. If you want a resampled image to\n fill the entire figure, you can define an\n :class:`~matplotlib.axes.Axes` with size [0,1,0,1].\n\n An :class:`matplotlib.image.FigureImage` instance is returned.\n\n .. plot:: mpl_examples/pylab_examples/figimage_demo.py\n\n\n Additional kwargs are Artist kwargs passed on to\n :class:`~matplotlib.image.FigureImage`\n \"\"\"\n\n if not self._hold:\n self.clf()\n\n im = FigureImage(self, cmap, norm, xo, yo, origin, **kwargs)\n im.set_array(X)\n im.set_alpha(alpha)\n if norm is None:\n im.set_clim(vmin, vmax)\n self.images.append(im)\n return im\n\n def set_size_inches(self, *args, **kwargs):\n \"\"\"\n set_size_inches(w,h, forward=False)\n\n Set the figure size in inches\n\n Usage::\n\n fig.set_size_inches(w,h) # OR\n fig.set_size_inches((w,h) )\n\n optional kwarg *forward=True* will cause the canvas size to be\n automatically updated; eg you can resize the figure window\n from the shell\n\n ACCEPTS: a w,h tuple with w,h in inches\n \"\"\"\n\n forward = kwargs.get('forward', False)\n if len(args) == 1:\n w, h = args[0]\n else:\n w, h = args\n\n dpival = self.dpi\n self.bbox_inches.p1 = w, h\n\n if forward:\n dpival = self.dpi\n canvasw = w * dpival\n canvash = h * dpival\n manager = getattr(self.canvas, 'manager', None)\n if manager is not None:\n manager.resize(int(canvasw), int(canvash))\n\n def get_size_inches(self):\n return self.bbox_inches.p1\n\n def get_edgecolor(self):\n 'Get the edge color of the Figure rectangle'\n return self.patch.get_edgecolor()\n\n def get_facecolor(self):\n 'Get the face color of the Figure rectangle'\n return self.patch.get_facecolor()\n\n def get_figwidth(self):\n 'Return the figwidth as a float'\n return self.bbox_inches.width\n\n def get_figheight(self):\n 'Return the figheight as a float'\n return self.bbox_inches.height\n\n def get_dpi(self):\n 'Return the dpi as a float'\n return self.dpi\n\n def get_frameon(self):\n 'get the boolean indicating frameon'\n return self.frameon\n\n def set_edgecolor(self, color):\n \"\"\"\n Set the edge color of the Figure rectangle\n\n ACCEPTS: any matplotlib color - see help(colors)\n \"\"\"\n self.patch.set_edgecolor(color)\n\n def set_facecolor(self, color):\n \"\"\"\n Set the face color of the Figure rectangle\n\n ACCEPTS: any matplotlib color - see help(colors)\n \"\"\"\n self.patch.set_facecolor(color)\n\n def set_dpi(self, val):\n \"\"\"\n Set the dots-per-inch of the figure\n\n ACCEPTS: float\n \"\"\"\n self.dpi = val\n\n def set_figwidth(self, val):\n \"\"\"\n Set the width of the figure in inches\n\n ACCEPTS: float\n \"\"\"\n self.bbox_inches.x1 = val\n\n def set_figheight(self, val):\n \"\"\"\n Set the height of the figure in inches\n\n ACCEPTS: float\n \"\"\"\n self.bbox_inches.y1 = val\n\n def set_frameon(self, b):\n \"\"\"\n Set whether the figure frame (background) is displayed or invisible\n\n ACCEPTS: boolean\n \"\"\"\n self.frameon = b\n\n def delaxes(self, a):\n 'remove a from the figure and update the current axes'\n self._axstack.remove(a)\n for func in self._axobservers:\n func(self)\n\n def _make_key(self, *args, **kwargs):\n 'make a hashable key out of args and kwargs'\n\n def fixitems(items):\n #items may have arrays and lists in them, so convert them\n # to tuples for the key\n ret = []\n for k, v in items:\n if iterable(v):\n v = tuple(v)\n ret.append((k, v))\n return tuple(ret)\n\n def fixlist(args):\n ret = []\n for a in args:\n if iterable(a):\n a = tuple(a)\n ret.append(a)\n return tuple(ret)\n\n key = fixlist(args), fixitems(kwargs.iteritems())\n return key\n\n @docstring.dedent_interpd\n def add_axes(self, *args, **kwargs):\n \"\"\"\n Add an axes at position *rect* [*left*, *bottom*, *width*,\n *height*] where all quantities are in fractions of figure\n width and height. kwargs are legal\n :class:`~matplotlib.axes.Axes` kwargs plus *projection* which\n sets the projection type of the axes. (For backward\n compatibility, ``polar=True`` may also be provided, which is\n equivalent to ``projection='polar'``). Valid values for\n *projection* are: %(projection_names)s. Some of these\n projections support additional kwargs, which may be provided\n to :meth:`add_axes`. Typical usage::\n\n rect = l,b,w,h\n fig.add_axes(rect)\n fig.add_axes(rect, frameon=False, axisbg='g')\n fig.add_axes(rect, polar=True)\n fig.add_axes(rect, projection='polar')\n fig.add_axes(ax)\n\n If the figure already has an axes with the same parameters,\n then it will simply make that axes current and return it. If\n you do not want this behavior, e.g. you want to force the\n creation of a new Axes, you must use a unique set of args and\n kwargs. The axes :attr:`~matplotlib.axes.Axes.label`\n attribute has been exposed for this purpose. Eg., if you want\n two axes that are otherwise identical to be added to the\n figure, make sure you give them unique labels::\n\n fig.add_axes(rect, label='axes1')\n fig.add_axes(rect, label='axes2')\n\n In rare circumstances, add_axes may be called with a single\n argument, an Axes instance already created in the present\n figure but not in the figure's list of axes. For example,\n if an axes has been removed with :meth:`delaxes`, it can\n be restored with::\n\n fig.add_axes(ax)\n\n In all cases, the :class:`~matplotlib.axes.Axes` instance\n will be returned.\n\n In addition to *projection*, the following kwargs are supported:\n\n %(Axes)s\n \"\"\"\n if not len(args):\n return\n\n # shortcut the projection \"key\" modifications later on, if an axes\n # with the exact args/kwargs exists, return it immediately.\n key = self._make_key(*args, **kwargs)\n ax = self._axstack.get(key)\n if ax is not None:\n self.sca(ax)\n return ax\n\n if isinstance(args[0], Axes):\n a = args[0]\n assert(a.get_figure() is self)\n else:\n rect = args[0]\n projection_class, kwargs, key = process_projection_requirements(\n self, *args, **kwargs)\n\n # check that an axes of this type doesn't already exist, if it\n # does, set it as active and return it\n ax = self._axstack.get(key)\n if ax is not None and isinstance(ax, projection_class):\n self.sca(ax)\n return ax\n\n # create the new axes using the axes class given\n a = projection_class(self, rect, **kwargs)\n\n self._axstack.add(key, a)\n self.sca(a)\n return a\n\n @docstring.dedent_interpd\n def add_subplot(self, *args, **kwargs):\n \"\"\"\n Add a subplot. Examples::\n\n fig.add_subplot(111)\n\n # equivalent but more general\n fig.add_subplot(1,1,1)\n\n # add subplot with red background\n fig.add_subplot(212, axisbg='r')\n\n # add a polar subplot\n fig.add_subplot(111, projection='polar')\n\n # add Subplot instance sub\n fig.add_subplot(sub)\n\n *kwargs* are legal :class:`~matplotlib.axes.Axes` kwargs plus\n *projection*, which chooses a projection type for the axes.\n (For backward compatibility, *polar=True* may also be\n provided, which is equivalent to *projection='polar'*). Valid\n values for *projection* are: %(projection_names)s. Some of\n these projections\n support additional *kwargs*, which may be provided to\n :meth:`add_axes`.\n\n The :class:`~matplotlib.axes.Axes` instance will be returned.\n\n If the figure already has a subplot with key (*args*,\n *kwargs*) then it will simply make that subplot current and\n return it.\n\n The following kwargs are supported:\n\n %(Axes)s\n \"\"\"\n if not len(args):\n return\n\n if len(args) == 1 and isinstance(args[0], int):\n args = tuple([int(c) for c in str(args[0])])\n\n if isinstance(args[0], SubplotBase):\n\n a = args[0]\n assert(a.get_figure() is self)\n # make a key for the subplot (which includes the axes object id\n # in the hash)\n key = self._make_key(*args, **kwargs)\n else:\n projection_class, kwargs, key = process_projection_requirements(\n self, *args, **kwargs)\n\n # try to find the axes with this key in the stack\n ax = self._axstack.get(key)\n\n if ax is not None:\n if isinstance(ax, projection_class):\n # the axes already existed, so set it as active & return\n self.sca(ax)\n return ax\n else:\n # Undocumented convenience behavior:\n # subplot(111); subplot(111, projection='polar')\n # will replace the first with the second.\n # Without this, add_subplot would be simpler and\n # more similar to add_axes.\n self._axstack.remove(ax)\n\n a = subplot_class_factory(projection_class)(self, *args, **kwargs)\n\n self._axstack.add(key, a)\n self.sca(a)\n return a\n\n def clf(self, keep_observers=False):\n \"\"\"\n Clear the figure.\n\n Set *keep_observers* to True if, for example,\n a gui widget is tracking the axes in the figure.\n \"\"\"\n self.suppressComposite = None\n self.callbacks = cbook.CallbackRegistry()\n\n for ax in tuple(self.axes): # Iterate over the copy.\n ax.cla()\n self.delaxes(ax) # removes ax from self._axstack\n\n toolbar = getattr(self.canvas, 'toolbar', None)\n if toolbar is not None:\n toolbar.update()\n self._axstack.clear()\n self.artists = []\n self.lines = []\n self.patches = []\n self.texts = []\n self.images = []\n self.legends = []\n if not keep_observers:\n self._axobservers = []\n\n def clear(self):\n \"\"\"\n Clear the figure -- synonym for :meth:`clf`.\n \"\"\"\n self.clf()\n\n @allow_rasterization\n def draw(self, renderer):\n \"\"\"\n Render the figure using :class:`matplotlib.backend_bases.RendererBase`\n instance *renderer*.\n \"\"\"\n # draw the figure bounding box, perhaps none for white figure\n if not self.get_visible():\n return\n renderer.open_group('figure')\n\n if self.get_tight_layout() and self.axes:\n try:\n self.tight_layout(renderer)\n except ValueError:\n pass\n # ValueError can occur when resizing a window.\n\n if self.frameon:\n self.patch.draw(renderer)\n\n # a list of (zorder, func_to_call, list_of_args)\n dsu = []\n\n for a in self.patches:\n dsu.append((a.get_zorder(), a, a.draw, [renderer]))\n\n for a in self.lines:\n dsu.append((a.get_zorder(), a, a.draw, [renderer]))\n\n for a in self.artists:\n dsu.append((a.get_zorder(), a, a.draw, [renderer]))\n\n # override the renderer default if self.suppressComposite\n # is not None\n not_composite = renderer.option_image_nocomposite()\n if self.suppressComposite is not None:\n not_composite = self.suppressComposite\n\n if (len(self.images) <= 1 or not_composite or\n not cbook.allequal([im.origin for im in self.images])):\n for a in self.images:\n dsu.append((a.get_zorder(), a, a.draw, [renderer]))\n else:\n # make a composite image blending alpha\n # list of (_image.Image, ox, oy)\n mag = renderer.get_image_magnification()\n ims = [(im.make_image(mag), im.ox, im.oy)\n for im in self.images]\n\n im = _image.from_images(self.bbox.height * mag,\n self.bbox.width * mag,\n ims)\n\n im.is_grayscale = False\n l, b, w, h = self.bbox.bounds\n\n def draw_composite():\n gc = renderer.new_gc()\n gc.set_clip_rectangle(self.bbox)\n gc.set_clip_path(self.get_clip_path())\n renderer.draw_image(gc, l, b, im)\n gc.restore()\n\n dsu.append((self.images[0].get_zorder(), self.images[0],\n draw_composite, []))\n\n # render the axes\n for a in self.axes:\n dsu.append((a.get_zorder(), a, a.draw, [renderer]))\n\n # render the figure text\n for a in self.texts:\n dsu.append((a.get_zorder(), a, a.draw, [renderer]))\n\n for a in self.legends:\n dsu.append((a.get_zorder(), a, a.draw, [renderer]))\n\n dsu = [row for row in dsu if not row[1].get_animated()]\n dsu.sort(key=itemgetter(0))\n for zorder, a, func, args in dsu:\n func(*args)\n\n renderer.close_group('figure')\n\n self._cachedRenderer = renderer\n\n self.canvas.draw_event(renderer)\n\n def draw_artist(self, a):\n \"\"\"\n draw :class:`matplotlib.artist.Artist` instance *a* only --\n this is available only after the figure is drawn\n \"\"\"\n assert self._cachedRenderer is not None\n a.draw(self._cachedRenderer)\n\n def get_axes(self):\n return self.axes\n\n def legend(self, handles, labels, *args, **kwargs):\n \"\"\"\n Place a legend in the figure. Labels are a sequence of\n strings, handles is a sequence of\n :class:`~matplotlib.lines.Line2D` or\n :class:`~matplotlib.patches.Patch` instances, and loc can be a\n string or an integer specifying the legend location\n\n USAGE::\n\n legend( (line1, line2, line3),\n ('label1', 'label2', 'label3'),\n 'upper right')\n\n The *loc* location codes are::\n\n 'best' : 0, (currently not supported for figure legends)\n 'upper right' : 1,\n 'upper left' : 2,\n 'lower left' : 3,\n 'lower right' : 4,\n 'right' : 5,\n 'center left' : 6,\n 'center right' : 7,\n 'lower center' : 8,\n 'upper center' : 9,\n 'center' : 10,\n\n *loc* can also be an (x,y) tuple in figure coords, which\n specifies the lower left of the legend box. figure coords are\n (0,0) is the left, bottom of the figure and 1,1 is the right,\n top.\n\n Keyword arguments:\n\n *prop*: [ *None* | FontProperties | dict ]\n A :class:`matplotlib.font_manager.FontProperties`\n instance. If *prop* is a dictionary, a new instance will be\n created with *prop*. If *None*, use rc settings.\n\n *numpoints*: integer\n The number of points in the legend line, default is 4\n\n *scatterpoints*: integer\n The number of points in the legend line, default is 4\n\n *scatteroffsets*: list of floats\n a list of yoffsets for scatter symbols in legend\n\n *markerscale*: [ *None* | scalar ]\n The relative size of legend markers vs. original. If *None*, use rc\n settings.\n\n *fancybox*: [ *None* | *False* | *True* ]\n if *True*, draw a frame with a round fancybox. If *None*, use rc\n\n *shadow*: [ *None* | *False* | *True* ]\n If *True*, draw a shadow behind legend. If *None*, use rc settings.\n\n *ncol* : integer\n number of columns. default is 1\n\n *mode* : [ \"expand\" | *None* ]\n if mode is \"expand\", the legend will be horizontally expanded\n to fill the axes area (or *bbox_to_anchor*)\n\n *title* : string\n the legend title\n\n Padding and spacing between various elements use following keywords\n parameters. The dimensions of these values are given as a fraction\n of the fontsize. Values from rcParams will be used if None.\n\n ================ ====================================================\n Keyword Description\n ================ ====================================================\n borderpad the fractional whitespace inside the legend border\n labelspacing the vertical space between the legend entries\n handlelength the length of the legend handles\n handletextpad the pad between the legend handle and text\n borderaxespad the pad between the axes and legend border\n columnspacing the spacing between columns\n ================ ====================================================\n\n .. Note:: Not all kinds of artist are supported by the legend.\n See LINK (FIXME) for details.\n\n **Example:**\n\n .. plot:: mpl_examples/pylab_examples/figlegend_demo.py\n \"\"\"\n l = Legend(self, handles, labels, *args, **kwargs)\n self.legends.append(l)\n return l\n\n @docstring.dedent_interpd\n def text(self, x, y, s, *args, **kwargs):\n \"\"\"\n Add text to figure.\n\n Call signature::\n\n text(x, y, s, fontdict=None, **kwargs)\n\n Add text to figure at location *x*, *y* (relative 0-1\n coords). See :func:`~matplotlib.pyplot.text` for the meaning\n of the other arguments.\n\n kwargs control the :class:`~matplotlib.text.Text` properties:\n\n %(Text)s\n \"\"\"\n\n override = _process_text_args({}, *args, **kwargs)\n t = Text(x=x, y=y, text=s)\n\n t.update(override)\n self._set_artist_props(t)\n self.texts.append(t)\n return t\n\n def _set_artist_props(self, a):\n if a != self:\n a.set_figure(self)\n a.set_transform(self.transFigure)\n\n @docstring.dedent_interpd\n def gca(self, **kwargs):\n \"\"\"\n Return the current axes, creating one if necessary\n\n The following kwargs are supported for ensuring the returned axes\n adheres to the given projection etc., and for axes creation if\n the active axes does not exist:\n\n %(Axes)s\n\n \"\"\"\n ckey, cax = self._axstack.current_key_axes()\n # if there exists an axes on the stack see if it maches\n # the desired axes configuration\n if cax is not None:\n\n # if no kwargs are given just return the current axes\n # this is a convenience for gca() on axes such as polar etc.\n if not kwargs:\n return cax\n\n # if the user has specified particular projection detail\n # then build up a key which can represent this\n else:\n # we don't want to modify the original kwargs\n # so take a copy so that we can do what we like to it\n kwargs_copy = kwargs.copy()\n projection_class, _, key = process_projection_requirements(\n self, **kwargs_copy)\n\n # let the returned axes have any gridspec by removing it from\n # the key\n ckey = ckey[1:]\n key = key[1:]\n\n # if the cax matches this key then return the axes, otherwise\n # continue and a new axes will be created\n if key == ckey and isinstance(cax, projection_class):\n return cax\n\n # no axes found, so create one which spans the figure\n return self.add_subplot(1, 1, 1, **kwargs)\n\n def sca(self, a):\n 'Set the current axes to be a and return a'\n self._axstack.bubble(a)\n for func in self._axobservers:\n func(self)\n return a\n\n def _gci(self):\n \"\"\"\n helper for :func:`~matplotlib.pyplot.gci`;\n do not use elsewhere.\n \"\"\"\n for ax in reversed(self.axes):\n im = ax._gci()\n if im is not None:\n return im\n return None\n\n def __getstate__(self):\n state = self.__dict__.copy()\n # the axobservers cannot currently be pickled.\n # Additionally, the canvas cannot currently be pickled, but this has\n # the benefit of meaning that a figure can be detached from one canvas,\n # and re-attached to another.\n for attr_to_pop in ('_axobservers', 'show',\n 'canvas', '_cachedRenderer'):\n state.pop(attr_to_pop, None)\n\n # add version information to the state\n state['__mpl_version__'] = _mpl_version\n\n # check to see if the figure has a manager and whether it is registered\n # with pyplot\n if getattr(self.canvas, 'manager', None) is not None:\n manager = self.canvas.manager\n import matplotlib._pylab_helpers\n if manager in matplotlib._pylab_helpers.Gcf.figs.values():\n state['_restore_to_pylab'] = True\n\n return state\n\n def __setstate__(self, state):\n version = state.pop('__mpl_version__')\n restore_to_pylab = state.pop('_restore_to_pylab', False)\n\n if version != _mpl_version:\n import warnings\n warnings.warn(\"This figure was saved with matplotlib version %s \"\n \"and is unlikely to function correctly.\" %\n (version, ))\n\n self.__dict__ = state\n\n # re-initialise some of the unstored state information\n self._axobservers = []\n self.canvas = None\n\n if restore_to_pylab:\n # lazy import to avoid circularity\n import matplotlib.pyplot as plt\n import matplotlib._pylab_helpers as pylab_helpers\n allnums = plt.get_fignums()\n num = max(allnums) + 1 if allnums else 1\n mgr = plt._backend_mod.new_figure_manager_given_figure(num, self)\n\n # XXX The following is a copy and paste from pyplot. Consider\n # factoring to pylab_helpers\n\n if self.get_label():\n mgr.set_window_title(self.get_label())\n\n # make this figure current on button press event\n def make_active(event):\n pylab_helpers.Gcf.set_active(mgr)\n\n mgr._cidgcf = mgr.canvas.mpl_connect('button_press_event',\n make_active)\n\n pylab_helpers.Gcf.set_active(mgr)\n self.number = num\n\n plt.draw_if_interactive()\n\n def add_axobserver(self, func):\n 'whenever the axes state change, ``func(self)`` will be called'\n self._axobservers.append(func)\n\n def savefig(self, *args, **kwargs):\n \"\"\"\n Save the current figure.\n\n Call signature::\n\n savefig(fname, dpi=None, facecolor='w', edgecolor='w',\n orientation='portrait', papertype=None, format=None,\n transparent=False, bbox_inches=None, pad_inches=0.1)\n\n The output formats available depend on the backend being used.\n\n Arguments:\n\n *fname*:\n A string containing a path to a filename, or a Python\n file-like object, or possibly some backend-dependent object\n such as :class:`~matplotlib.backends.backend_pdf.PdfPages`.\n\n If *format* is *None* and *fname* is a string, the output\n format is deduced from the extension of the filename. If\n the filename has no extension, the value of the rc parameter\n ``savefig.format`` is used.\n\n If *fname* is not a string, remember to specify *format* to\n ensure that the correct backend is used.\n\n Keyword arguments:\n\n *dpi*: [ *None* | ``scalar > 0`` ]\n The resolution in dots per inch. If *None* it will default to\n the value ``savefig.dpi`` in the matplotlibrc file.\n\n *facecolor*, *edgecolor*:\n the colors of the figure rectangle\n\n *orientation*: [ 'landscape' | 'portrait' ]\n not supported on all backends; currently only on postscript output\n\n *papertype*:\n One of 'letter', 'legal', 'executive', 'ledger', 'a0' through\n 'a10', 'b0' through 'b10'. Only supported for postscript\n output.\n\n *format*:\n One of the file extensions supported by the active\n backend. Most backends support png, pdf, ps, eps and svg.\n\n *transparent*:\n If *True*, the axes patches will all be transparent; the\n figure patch will also be transparent unless facecolor\n and/or edgecolor are specified via kwargs.\n This is useful, for example, for displaying\n a plot on top of a colored background on a web page. The\n transparency of these patches will be restored to their\n original values upon exit of this function.\n\n *bbox_inches*:\n Bbox in inches. Only the given portion of the figure is\n saved. If 'tight', try to figure out the tight bbox of\n the figure.\n\n *pad_inches*:\n Amount of padding around the figure when bbox_inches is\n 'tight'.\n\n *bbox_extra_artists*:\n A list of extra artists that will be considered when the\n tight bbox is calculated.\n\n \"\"\"\n\n kwargs.setdefault('dpi', rcParams['savefig.dpi'])\n\n transparent = kwargs.pop('transparent', False)\n if transparent:\n kwargs.setdefault('facecolor', 'none')\n kwargs.setdefault('edgecolor', 'none')\n original_axes_colors = []\n for ax in self.axes:\n patch = ax.patch\n original_axes_colors.append((patch.get_facecolor(),\n patch.get_edgecolor()))\n patch.set_facecolor('none')\n patch.set_edgecolor('none')\n else:\n kwargs.setdefault('facecolor', rcParams['savefig.facecolor'])\n kwargs.setdefault('edgecolor', rcParams['savefig.edgecolor'])\n\n self.canvas.print_figure(*args, **kwargs)\n\n if transparent:\n for ax, cc in zip(self.axes, original_axes_colors):\n ax.patch.set_facecolor(cc[0])\n ax.patch.set_edgecolor(cc[1])\n\n @docstring.dedent_interpd\n def colorbar(self, mappable, cax=None, ax=None, **kw):\n \"\"\"\n Create a colorbar for a ScalarMappable instance, *mappable*.\n\n Documentation for the pylab thin wrapper:\n %(colorbar_doc)s\n \"\"\"\n if ax is None:\n ax = self.gca()\n use_gridspec = kw.pop(\"use_gridspec\", True)\n if cax is None:\n if use_gridspec and isinstance(ax, SubplotBase):\n cax, kw = cbar.make_axes_gridspec(ax, **kw)\n else:\n cax, kw = cbar.make_axes(ax, **kw)\n cax.hold(True)\n cb = cbar.colorbar_factory(cax, mappable, **kw)\n\n self.sca(ax)\n return cb\n\n def subplots_adjust(self, *args, **kwargs):\n \"\"\"\n Call signature::\n\n subplots_adjust(left=None, bottom=None, right=None, top=None,\n wspace=None, hspace=None)\n\n Update the :class:`SubplotParams` with *kwargs* (defaulting to rc when\n *None*) and update the subplot locations\n\n \"\"\"\n self.subplotpars.update(*args, **kwargs)\n import matplotlib.axes\n for ax in self.axes:\n if not isinstance(ax, matplotlib.axes.SubplotBase):\n # Check if sharing a subplots axis\n if (ax._sharex is not None and\n isinstance(ax._sharex,\n matplotlib.axes.SubplotBase)):\n ax._sharex.update_params()\n ax.set_position(ax._sharex.figbox)\n elif (ax._sharey is not None and\n isinstance(ax._sharey, matplotlib.axes.SubplotBase)):\n ax._sharey.update_params()\n ax.set_position(ax._sharey.figbox)\n else:\n ax.update_params()\n ax.set_position(ax.figbox)\n\n def ginput(self, n=1, timeout=30, show_clicks=True, mouse_add=1,\n mouse_pop=3, mouse_stop=2):\n \"\"\"\n Call signature::\n\n ginput(self, n=1, timeout=30, show_clicks=True,\n mouse_add=1, mouse_pop=3, mouse_stop=2)\n\n Blocking call to interact with the figure.\n\n This will wait for *n* clicks from the user and return a list of the\n coordinates of each click.\n\n If *timeout* is zero or negative, does not timeout.\n\n If *n* is zero or negative, accumulate clicks until a middle click\n (or potentially both mouse buttons at once) terminates the input.\n\n Right clicking cancels last input.\n\n The buttons used for the various actions (adding points, removing\n points, terminating the inputs) can be overriden via the\n arguments *mouse_add*, *mouse_pop* and *mouse_stop*, that give\n the associated mouse button: 1 for left, 2 for middle, 3 for\n right.\n\n The keyboard can also be used to select points in case your mouse\n does not have one or more of the buttons. The delete and backspace\n keys act like right clicking (i.e., remove last point), the enter key\n terminates input and any other key (not already used by the window\n manager) selects a point.\n \"\"\"\n\n blocking_mouse_input = BlockingMouseInput(self,\n mouse_add=mouse_add,\n mouse_pop=mouse_pop,\n mouse_stop=mouse_stop)\n return blocking_mouse_input(n=n, timeout=timeout,\n show_clicks=show_clicks)\n\n def waitforbuttonpress(self, timeout=-1):\n \"\"\"\n Call signature::\n\n waitforbuttonpress(self, timeout=-1)\n\n Blocking call to interact with the figure.\n\n This will return True is a key was pressed, False if a mouse\n button was pressed and None if *timeout* was reached without\n either being pressed.\n\n If *timeout* is negative, does not timeout.\n \"\"\"\n\n blocking_input = BlockingKeyMouseInput(self)\n return blocking_input(timeout=timeout)\n\n def get_default_bbox_extra_artists(self):\n bbox_extra_artists = [t for t in self.texts if t.get_visible()]\n for ax in self.axes:\n if ax.get_visible():\n bbox_extra_artists.extend(ax.get_default_bbox_extra_artists())\n return bbox_extra_artists\n\n def get_tightbbox(self, renderer):\n \"\"\"\n Return a (tight) bounding box of the figure in inches.\n\n It only accounts axes title, axis labels, and axis\n ticklabels. Needs improvement.\n \"\"\"\n\n bb = []\n for ax in self.axes:\n if ax.get_visible():\n bb.append(ax.get_tightbbox(renderer))\n\n _bbox = Bbox.union([b for b in bb if b.width != 0 or b.height != 0])\n\n bbox_inches = TransformedBbox(_bbox,\n Affine2D().scale(1. / self.dpi))\n\n return bbox_inches\n\n def tight_layout(self, renderer=None, pad=1.08, h_pad=None,\n w_pad=None, rect=None):\n \"\"\"\n Adjust subplot parameters to give specified padding.\n\n Parameters:\n\n *pad* : float\n padding between the figure edge and the edges of subplots,\n as a fraction of the font-size.\n *h_pad*, *w_pad* : float\n padding (height/width) between edges of adjacent subplots.\n Defaults to `pad_inches`.\n *rect* : if rect is given, it is interpreted as a rectangle\n (left, bottom, right, top) in the normalized figure\n coordinate that the whole subplots area (including\n labels) will fit into. Default is (0, 0, 1, 1).\n \"\"\"\n\n from tight_layout import (get_renderer, get_tight_layout_figure,\n get_subplotspec_list)\n\n subplotspec_list = get_subplotspec_list(self.axes)\n if None in subplotspec_list:\n warnings.warn(\"This figure includes Axes that are not \"\n \"compatible with tight_layout, so its \"\n \"results might be incorrect.\")\n\n if renderer is None:\n renderer = get_renderer(self)\n\n kwargs = get_tight_layout_figure(self, self.axes, subplotspec_list,\n renderer,\n pad=pad, h_pad=h_pad, w_pad=w_pad,\n rect=rect)\n\n self.subplots_adjust(**kwargs)\n\n\ndef figaspect(arg):\n \"\"\"\n Create a figure with specified aspect ratio. If *arg* is a number,\n use that aspect ratio. If *arg* is an array, figaspect will\n determine the width and height for a figure that would fit array\n preserving aspect ratio. The figure width, height in inches are\n returned. Be sure to create an axes with equal with and height,\n eg\n\n Example usage::\n\n # make a figure twice as tall as it is wide\n w, h = figaspect(2.)\n fig = Figure(figsize=(w,h))\n ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])\n ax.imshow(A, **kwargs)\n\n\n # make a figure with the proper aspect for an array\n A = rand(5,3)\n w, h = figaspect(A)\n fig = Figure(figsize=(w,h))\n ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])\n ax.imshow(A, **kwargs)\n\n Thanks to Fernando Perez for this function\n \"\"\"\n\n isarray = hasattr(arg, 'shape')\n\n # min/max sizes to respect when autoscaling. If John likes the idea, they\n # could become rc parameters, for now they're hardwired.\n figsize_min = np.array((4.0, 2.0)) # min length for width/height\n figsize_max = np.array((16.0, 16.0)) # max length for width/height\n #figsize_min = rcParams['figure.figsize_min']\n #figsize_max = rcParams['figure.figsize_max']\n\n # Extract the aspect ratio of the array\n if isarray:\n nr, nc = arg.shape[:2]\n arr_ratio = float(nr) / nc\n else:\n arr_ratio = float(arg)\n\n # Height of user figure defaults\n fig_height = rcParams['figure.figsize'][1]\n\n # New size for the figure, keeping the aspect ratio of the caller\n newsize = np.array((fig_height / arr_ratio, fig_height))\n\n # Sanity checks, don't drop either dimension below figsize_min\n newsize /= min(1.0, *(newsize / figsize_min))\n\n # Avoid humongous windows as well\n newsize /= max(1.0, *(newsize / figsize_max))\n\n # Finally, if we have a really funky aspect ratio, break it but respect\n # the min/max dimensions (we don't want figures 10 feet tall!)\n newsize = np.clip(newsize, figsize_min, figsize_max)\n return newsize\n\ndocstring.interpd.update(Figure=martist.kwdoc(Figure))\n", "\"\"\"\nThis is an object-oriented plotting library.\n\nA procedural interface is provided by the companion pyplot module,\nwhich may be imported directly, e.g::\n\n from matplotlib.pyplot import *\n\nTo include numpy functions too, use::\n\n from pylab import *\n\nor using ipython::\n\n ipython -pylab\n\nFor the most part, direct use of the object-oriented library is\nencouraged when programming; pyplot is primarily for working\ninteractively. The\nexceptions are the pyplot commands :func:`~matplotlib.pyplot.figure`,\n:func:`~matplotlib.pyplot.subplot`,\n:func:`~matplotlib.pyplot.subplots`,\n:func:`~matplotlib.backends.backend_qt4agg.show`, and\n:func:`~pyplot.savefig`, which can greatly simplify scripting.\n\nModules include:\n\n :mod:`matplotlib.axes`\n defines the :class:`~matplotlib.axes.Axes` class. Most pylab\n commands are wrappers for :class:`~matplotlib.axes.Axes`\n methods. The axes module is the highest level of OO access to\n the library.\n\n :mod:`matplotlib.figure`\n defines the :class:`~matplotlib.figure.Figure` class.\n\n :mod:`matplotlib.artist`\n defines the :class:`~matplotlib.artist.Artist` base class for\n all classes that draw things.\n\n :mod:`matplotlib.lines`\n defines the :class:`~matplotlib.lines.Line2D` class for\n drawing lines and markers\n\n :mod:`matplotlib.patches`\n defines classes for drawing polygons\n\n :mod:`matplotlib.text`\n defines the :class:`~matplotlib.text.Text`,\n :class:`~matplotlib.text.TextWithDash`, and\n :class:`~matplotlib.text.Annotate` classes\n\n :mod:`matplotlib.image`\n defines the :class:`~matplotlib.image.AxesImage` and\n :class:`~matplotlib.image.FigureImage` classes\n\n :mod:`matplotlib.collections`\n classes for efficient drawing of groups of lines or polygons\n\n :mod:`matplotlib.colors`\n classes for interpreting color specifications and for making\n colormaps\n\n :mod:`matplotlib.cm`\n colormaps and the :class:`~matplotlib.image.ScalarMappable`\n mixin class for providing color mapping functionality to other\n classes\n\n :mod:`matplotlib.ticker`\n classes for calculating tick mark locations and for formatting\n tick labels\n\n :mod:`matplotlib.backends`\n a subpackage with modules for various gui libraries and output\n formats\n\nThe base matplotlib namespace includes:\n\n :data:`~matplotlib.rcParams`\n a global dictionary of default configuration settings. It is\n initialized by code which may be overridded by a matplotlibrc\n file.\n\n :func:`~matplotlib.rc`\n a function for setting groups of rcParams values\n\n :func:`~matplotlib.use`\n a function for setting the matplotlib backend. If used, this\n function must be called immediately after importing matplotlib\n for the first time. In particular, it must be called\n **before** importing pylab (if pylab is imported).\n\nmatplotlib was initially written by John D. Hunter (1968-2012) and is now\ndeveloped and maintained by a host of others.\n\nOccasionally the internal documentation (python docstrings) will refer\nto MATLAB&reg;, a registered trademark of The MathWorks, Inc.\n\n\"\"\"\nfrom __future__ import print_function\n\n__version__ = '1.3.x'\n__version__numpy__ = '1.4' # minimum required numpy version\n\nimport os, re, shutil, subprocess, sys, warnings\nimport distutils.sysconfig\nimport distutils.version\n\n# cbook must import matplotlib only within function\n# definitions, so it is safe to import from it here.\nfrom matplotlib.cbook import MatplotlibDeprecationWarning\nfrom matplotlib.cbook import is_string_like\n\ntry:\n reload\nexcept NameError:\n # Python 3\n from imp import reload\n\n# Needed for toolkit setuptools support\nif 0:\n try:\n __import__('pkg_resources').declare_namespace(__name__)\n except ImportError:\n pass # must not have setuptools\n\nif not hasattr(sys, 'argv'): # for modpython\n sys.argv = ['modpython']\n\n\n\"\"\"\nManage user customizations through a rc file.\n\nThe default file location is given in the following order\n\n - environment variable MATPLOTLIBRC\n\n - HOME/.matplotlib/matplotlibrc if HOME is defined\n\n - PATH/matplotlibrc where PATH is the return value of\n get_data_path()\n\"\"\"\n\nimport sys, os, tempfile\n\nif sys.version_info[0] >= 3:\n def ascii(s): return bytes(s, 'ascii')\n\n def byte2str(b): return b.decode('ascii')\n\nelse:\n ascii = str\n\n def byte2str(b): return b\n\n\nfrom matplotlib.rcsetup import (defaultParams,\n validate_backend,\n validate_toolbar)\n\nmajor, minor1, minor2, s, tmp = sys.version_info\n_python24 = (major == 2 and minor1 >= 4) or major >= 3\n\n# the havedate check was a legacy from old matplotlib which preceeded\n# datetime support\n_havedate = True\n\n#try:\n# import pkg_resources # pkg_resources is part of setuptools\n#except ImportError: _have_pkg_resources = False\n#else: _have_pkg_resources = True\n\nif not _python24:\n raise ImportError('matplotlib requires Python 2.4 or later')\n\n\nimport numpy\nfrom distutils import version\nexpected_version = version.LooseVersion(__version__numpy__)\nfound_version = version.LooseVersion(numpy.__version__)\nif not found_version >= expected_version:\n raise ImportError(\n 'numpy %s or later is required; you have %s' % (\n __version__numpy__, numpy.__version__))\ndel version\n\n\ndef _is_writable_dir(p):\n \"\"\"\n p is a string pointing to a putative writable dir -- return True p\n is such a string, else False\n \"\"\"\n try: p + '' # test is string like\n except TypeError: return False\n try:\n t = tempfile.TemporaryFile(dir=p)\n try:\n t.write(ascii('1'))\n finally:\n t.close()\n except OSError: return False\n else: return True\n\nclass Verbose:\n \"\"\"\n A class to handle reporting. Set the fileo attribute to any file\n instance to handle the output. Default is sys.stdout\n \"\"\"\n levels = ('silent', 'helpful', 'debug', 'debug-annoying')\n vald = dict( [(level, i) for i,level in enumerate(levels)])\n\n # parse the verbosity from the command line; flags look like\n # --verbose-silent or --verbose-helpful\n _commandLineVerbose = None\n\n for arg in sys.argv[1:]:\n if not arg.startswith('--verbose-'):\n continue\n level_str = arg[10:]\n # If it doesn't match one of ours, then don't even\n # bother noting it, we are just a 3rd-party library\n # to somebody else's script.\n if level_str in levels:\n _commandLineVerbose = level_str\n\n def __init__(self):\n self.set_level('silent')\n self.fileo = sys.stdout\n\n def set_level(self, level):\n 'set the verbosity to one of the Verbose.levels strings'\n\n if self._commandLineVerbose is not None:\n level = self._commandLineVerbose\n if level not in self.levels:\n warnings.warn('matplotlib: unrecognized --verbose-* string \"%s\".'\n ' Legal values are %s' % (level, self.levels))\n else:\n self.level = level\n\n def set_fileo(self, fname):\n std = {\n 'sys.stdout': sys.stdout,\n 'sys.stderr': sys.stderr,\n }\n if fname in std:\n self.fileo = std[fname]\n else:\n try:\n fileo = open(fname, 'w')\n except IOError:\n raise ValueError('Verbose object could not open log file \"%s\" for writing.\\nCheck your matplotlibrc verbose.fileo setting'%fname)\n else:\n self.fileo = fileo\n\n def report(self, s, level='helpful'):\n \"\"\"\n print message s to self.fileo if self.level>=level. Return\n value indicates whether a message was issued\n\n \"\"\"\n if self.ge(level):\n print(s, file=self.fileo)\n return True\n return False\n\n def wrap(self, fmt, func, level='helpful', always=True):\n \"\"\"\n return a callable function that wraps func and reports it\n output through the verbose handler if current verbosity level\n is higher than level\n\n if always is True, the report will occur on every function\n call; otherwise only on the first time the function is called\n \"\"\"\n assert callable(func)\n def wrapper(*args, **kwargs):\n ret = func(*args, **kwargs)\n\n if (always or not wrapper._spoke):\n spoke = self.report(fmt%ret, level)\n if not wrapper._spoke: wrapper._spoke = spoke\n return ret\n wrapper._spoke = False\n wrapper.__doc__ = func.__doc__\n return wrapper\n\n def ge(self, level):\n 'return true if self.level is >= level'\n return self.vald[self.level]>=self.vald[level]\n\n\nverbose=Verbose()\n\n\n\ndef checkdep_dvipng():\n try:\n s = subprocess.Popen(['dvipng','-version'], stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n line = s.stdout.readlines()[1]\n v = byte2str(line.split()[-1])\n return v\n except (IndexError, ValueError, OSError):\n return None\n\ndef checkdep_ghostscript():\n try:\n if sys.platform == 'win32':\n command_args = ['gswin32c', '--version']\n else:\n command_args = ['gs', '--version']\n s = subprocess.Popen(command_args, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n v = byte2str(s.stdout.read()[:-1])\n return v\n except (IndexError, ValueError, OSError):\n return None\n\ndef checkdep_tex():\n try:\n s = subprocess.Popen(['tex','-version'], stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n line = byte2str(s.stdout.readlines()[0])\n pattern = '3\\.1\\d+'\n match = re.search(pattern, line)\n v = match.group(0)\n return v\n except (IndexError, ValueError, AttributeError, OSError):\n return None\n\ndef checkdep_pdftops():\n try:\n s = subprocess.Popen(['pdftops','-v'], stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n for line in s.stderr:\n if b'version' in line:\n v = byte2str(line.split()[-1])\n return v\n except (IndexError, ValueError, UnboundLocalError, OSError):\n return None\n\ndef checkdep_inkscape():\n try:\n s = subprocess.Popen(['inkscape','-V'], stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n for line in s.stdout:\n if b'Inkscape' in line:\n v = byte2str(line.split()[1])\n break\n return v\n except (IndexError, ValueError, UnboundLocalError, OSError):\n return None\n\ndef checkdep_xmllint():\n try:\n s = subprocess.Popen(['xmllint','--version'], stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n for line in s.stderr:\n if b'version' in line:\n v = byte2str(line.split()[-1])\n break\n return v\n except (IndexError, ValueError, UnboundLocalError, OSError):\n return None\n\ndef compare_versions(a, b):\n \"return True if a is greater than or equal to b\"\n if a:\n a = distutils.version.LooseVersion(a)\n b = distutils.version.LooseVersion(b)\n if a>=b: return True\n else: return False\n else: return False\n\ndef checkdep_ps_distiller(s):\n if not s:\n return False\n\n flag = True\n gs_req = '7.07'\n gs_sugg = '7.07'\n gs_v = checkdep_ghostscript()\n if compare_versions(gs_v, gs_sugg): pass\n elif compare_versions(gs_v, gs_req):\n verbose.report(('ghostscript-%s found. ghostscript-%s or later '\n 'is recommended to use the ps.usedistiller option.') % (gs_v, gs_sugg))\n else:\n flag = False\n warnings.warn(('matplotlibrc ps.usedistiller option can not be used '\n 'unless ghostscript-%s or later is installed on your system') % gs_req)\n\n if s == 'xpdf':\n pdftops_req = '3.0'\n pdftops_req_alt = '0.9' # poppler version numbers, ugh\n pdftops_v = checkdep_pdftops()\n if compare_versions(pdftops_v, pdftops_req):\n pass\n elif compare_versions(pdftops_v, pdftops_req_alt) and not \\\n compare_versions(pdftops_v, '1.0'):\n pass\n else:\n flag = False\n warnings.warn(('matplotlibrc ps.usedistiller can not be set to '\n 'xpdf unless xpdf-%s or later is installed on your system') % pdftops_req)\n\n if flag:\n return s\n else:\n return False\n\ndef checkdep_usetex(s):\n if not s:\n return False\n\n tex_req = '3.1415'\n gs_req = '7.07'\n gs_sugg = '7.07'\n dvipng_req = '1.5'\n flag = True\n\n tex_v = checkdep_tex()\n if compare_versions(tex_v, tex_req): pass\n else:\n flag = False\n warnings.warn(('matplotlibrc text.usetex option can not be used '\n 'unless TeX-%s or later is '\n 'installed on your system') % tex_req)\n\n dvipng_v = checkdep_dvipng()\n if compare_versions(dvipng_v, dvipng_req): pass\n else:\n flag = False\n warnings.warn( 'matplotlibrc text.usetex can not be used with *Agg '\n 'backend unless dvipng-1.5 or later is '\n 'installed on your system')\n\n gs_v = checkdep_ghostscript()\n if compare_versions(gs_v, gs_sugg): pass\n elif compare_versions(gs_v, gs_req):\n verbose.report(('ghostscript-%s found. ghostscript-%s or later is '\n 'recommended for use with the text.usetex '\n 'option.') % (gs_v, gs_sugg))\n else:\n flag = False\n warnings.warn(('matplotlibrc text.usetex can not be used '\n 'unless ghostscript-%s or later is '\n 'installed on your system') % gs_req)\n\n return flag\n\n\ndef _get_home():\n \"\"\"Find user's home directory if possible.\n Otherwise raise error.\n\n :see: http://mail.python.org/pipermail/python-list/2005-February/263921.html\n \"\"\"\n path=''\n try:\n path=os.path.expanduser(\"~\")\n except:\n pass\n if not os.path.isdir(path):\n for evar in ('HOME', 'USERPROFILE', 'TMP'):\n try:\n path = os.environ[evar]\n if os.path.isdir(path):\n break\n except: pass\n if path:\n return path\n else:\n raise RuntimeError('please define environment variable $HOME')\n\n\ndef _create_tmp_config_dir():\n \"\"\"\n If the config directory can not be created, create a temporary\n directory.\n \"\"\"\n import getpass\n import tempfile\n\n tempdir = os.path.join(\n tempfile.gettempdir(), 'matplotlib-%s' % getpass.getuser())\n os.environ['MPLCONFIGDIR'] = tempdir\n\n return tempdir\n\n\nget_home = verbose.wrap('$HOME=%s', _get_home, always=False)\n\ndef _get_configdir():\n \"\"\"\n Return the string representing the configuration directory.\n\n Default is HOME/.matplotlib. You can override this with the\n MPLCONFIGDIR environment variable. If the default is not\n writable, and MPLCONFIGDIR is not set, then\n tempfile.gettempdir() is used to provide a directory in\n which a matplotlib subdirectory is created as the configuration\n directory.\n \"\"\"\n\n configdir = os.environ.get('MPLCONFIGDIR')\n if configdir is not None:\n if not os.path.exists(configdir):\n os.makedirs(configdir)\n if not _is_writable_dir(configdir):\n return _create_tmp_config_dir()\n return configdir\n\n h = get_home()\n p = os.path.join(get_home(), '.matplotlib')\n\n if os.path.exists(p):\n if not _is_writable_dir(p):\n return _create_tmp_config_dir()\n else:\n if not _is_writable_dir(h):\n return _create_tmp_config_dir()\n from matplotlib.cbook import mkdirs\n mkdirs(p)\n\n return p\nget_configdir = verbose.wrap('CONFIGDIR=%s', _get_configdir, always=False)\n\n\ndef _get_data_path():\n 'get the path to matplotlib data'\n\n if 'MATPLOTLIBDATA' in os.environ:\n path = os.environ['MATPLOTLIBDATA']\n if not os.path.isdir(path):\n raise RuntimeError('Path in environment MATPLOTLIBDATA not a directory')\n return path\n\n path = os.sep.join([os.path.dirname(__file__), 'mpl-data'])\n if os.path.isdir(path):\n return path\n\n # setuptools' namespace_packages may highjack this init file\n # so need to try something known to be in matplotlib, not basemap\n import matplotlib.afm\n path = os.sep.join([os.path.dirname(matplotlib.afm.__file__), 'mpl-data'])\n if os.path.isdir(path):\n return path\n\n # py2exe zips pure python, so still need special check\n if getattr(sys,'frozen',None):\n exe_path = os.path.dirname(sys.executable)\n path = os.path.join(exe_path, 'mpl-data')\n if os.path.isdir(path):\n return path\n\n # Try again assuming we need to step up one more directory\n path = os.path.join(os.path.split(exe_path)[0], 'mpl-data')\n if os.path.isdir(path):\n return path\n\n # Try again assuming sys.path[0] is a dir not a exe\n path = os.path.join(sys.path[0], 'mpl-data')\n if os.path.isdir(path):\n return path\n\n raise RuntimeError('Could not find the matplotlib data files')\n\ndef _get_data_path_cached():\n if defaultParams['datapath'][0] is None:\n defaultParams['datapath'][0] = _get_data_path()\n return defaultParams['datapath'][0]\n\nget_data_path = verbose.wrap('matplotlib data path %s', _get_data_path_cached,\n always=False)\n\n\n\ndef get_example_data(fname):\n \"\"\"\n get_example_data is deprecated -- use matplotlib.cbook.get_sample_data instead\n \"\"\"\n raise NotImplementedError('get_example_data is deprecated -- use matplotlib.cbook.get_sample_data instead')\n\n\ndef get_py2exe_datafiles():\n datapath = get_data_path()\n head, tail = os.path.split(datapath)\n d = {}\n for root, dirs, files in os.walk(datapath):\n # Need to explicitly remove cocoa_agg files or py2exe complains\n # NOTE I dont know why, but do as previous version\n if 'Matplotlib.nib' in files:\n files.remove('Matplotlib.nib')\n files = [os.path.join(root, filename) for filename in files]\n root = root.replace(tail, 'mpl-data')\n root = root[root.index('mpl-data'):]\n d[root] = files\n return list(d.items())\n\n\ndef matplotlib_fname():\n \"\"\"\n Return the path to the rc file\n\n Search order:\n\n * current working dir\n * environ var MATPLOTLIBRC\n * HOME/.matplotlib/matplotlibrc\n * MATPLOTLIBDATA/matplotlibrc\n\n\n \"\"\"\n\n oldname = os.path.join( os.getcwd(), '.matplotlibrc')\n if os.path.exists(oldname):\n print(\"\"\"\\\nWARNING: Old rc filename \".matplotlibrc\" found in working dir\n and and renamed to new default rc file name \"matplotlibrc\"\n (no leading\"dot\"). \"\"\", file=sys.stderr)\n shutil.move('.matplotlibrc', 'matplotlibrc')\n\n home = get_home()\n oldname = os.path.join( home, '.matplotlibrc')\n if os.path.exists(oldname):\n configdir = get_configdir()\n newname = os.path.join(configdir, 'matplotlibrc')\n print(\"\"\"\\\nWARNING: Old rc filename \"%s\" found and renamed to\n new default rc file name \"%s\".\"\"\"%(oldname, newname), file=sys.stderr)\n\n shutil.move(oldname, newname)\n\n\n fname = os.path.join( os.getcwd(), 'matplotlibrc')\n if os.path.exists(fname): return fname\n\n if 'MATPLOTLIBRC' in os.environ:\n path = os.environ['MATPLOTLIBRC']\n if os.path.exists(path):\n fname = os.path.join(path, 'matplotlibrc')\n if os.path.exists(fname):\n return fname\n\n fname = os.path.join(get_configdir(), 'matplotlibrc')\n if os.path.exists(fname): return fname\n\n\n path = get_data_path() # guaranteed to exist or raise\n fname = os.path.join(path, 'matplotlibrc')\n if not os.path.exists(fname):\n warnings.warn('Could not find matplotlibrc; using defaults')\n return fname\n\n\n_deprecated_map = {\n 'text.fontstyle': 'font.style',\n 'text.fontangle': 'font.style',\n 'text.fontvariant': 'font.variant',\n 'text.fontweight': 'font.weight',\n 'text.fontsize': 'font.size',\n 'tick.size' : 'tick.major.size',\n 'svg.embed_char_paths' : 'svg.fonttype',\n 'savefig.extension' : 'savefig.format'\n }\n\n_deprecated_ignore_map = {\n 'legend.pad' : 'legend.borderpad',\n 'legend.labelsep' : 'legend.labelspacing',\n 'legend.handlelen' : 'legend.handlelength',\n 'legend.handletextsep' : 'legend.handletextpad',\n 'legend.axespad' : 'legend.borderaxespad',\n }\n\n\nclass RcParams(dict):\n\n \"\"\"\n A dictionary object including validation\n\n validating functions are defined and associated with rc parameters in\n :mod:`matplotlib.rcsetup`\n \"\"\"\n\n validate = dict([ (key, converter) for key, (default, converter) in \\\n defaultParams.iteritems() ])\n msg_depr = \"%s is deprecated and replaced with %s; please use the latter.\"\n msg_depr_ignore = \"%s is deprecated and ignored. Use %s\"\n\n def __setitem__(self, key, val):\n try:\n if key in _deprecated_map:\n alt = _deprecated_map[key]\n warnings.warn(self.msg_depr % (key, alt))\n key = alt\n elif key in _deprecated_ignore_map:\n alt = _deprecated_ignore_map[key]\n warnings.warn(self.msg_depr_ignore % (key, alt))\n return\n cval = self.validate[key](val)\n dict.__setitem__(self, key, cval)\n except KeyError:\n raise KeyError('%s is not a valid rc parameter.\\\nSee rcParams.keys() for a list of valid parameters.' % (key,))\n\n def __getitem__(self, key):\n if key in _deprecated_map:\n alt = _deprecated_map[key]\n warnings.warn(self.msg_depr % (key, alt))\n key = alt\n elif key in _deprecated_ignore_map:\n alt = _deprecated_ignore_map[key]\n warnings.warn(self.msg_depr_ignore % (key, alt))\n key = alt\n return dict.__getitem__(self, key)\n\n def keys(self):\n \"\"\"\n Return sorted list of keys.\n \"\"\"\n k = list(dict.keys(self))\n k.sort()\n return k\n\n def values(self):\n \"\"\"\n Return values in order of sorted keys.\n \"\"\"\n return [self[k] for k in self.iterkeys()]\n\ndef rc_params(fail_on_error=False):\n 'Return the default params updated from the values in the rc file'\n\n fname = matplotlib_fname()\n if not os.path.exists(fname):\n # this should never happen, default in mpl-data should always be found\n message = 'could not find rc file; returning defaults'\n ret = RcParams([ (key, default) for key, (default, converter) in \\\n defaultParams.iteritems() ])\n warnings.warn(message)\n return ret\n\n return rc_params_from_file(fname, fail_on_error)\n\n\ndef rc_params_from_file(fname, fail_on_error=False):\n \"\"\"Load and return params from fname.\"\"\"\n\n cnt = 0\n rc_temp = {}\n with open(fname) as fd:\n for line in fd:\n cnt += 1\n strippedline = line.split('#',1)[0].strip()\n if not strippedline: continue\n tup = strippedline.split(':',1)\n if len(tup) !=2:\n warnings.warn('Illegal line #%d\\n\\t%s\\n\\tin file \"%s\"'%\\\n (cnt, line, fname))\n continue\n key, val = tup\n key = key.strip()\n val = val.strip()\n if key in rc_temp:\n warnings.warn('Duplicate key in file \"%s\", line #%d'%(fname,cnt))\n rc_temp[key] = (val, line, cnt)\n\n ret = RcParams([ (key, default) for key, (default, converter) in \\\n defaultParams.iteritems() ])\n\n for key in ('verbose.level', 'verbose.fileo'):\n if key in rc_temp:\n val, line, cnt = rc_temp.pop(key)\n if fail_on_error:\n ret[key] = val # try to convert to proper type or raise\n else:\n try: ret[key] = val # try to convert to proper type or skip\n except Exception as msg:\n warnings.warn('Bad val \"%s\" on line #%d\\n\\t\"%s\"\\n\\tin file \\\n\"%s\"\\n\\t%s' % (val, cnt, line, fname, msg))\n\n verbose.set_level(ret['verbose.level'])\n verbose.set_fileo(ret['verbose.fileo'])\n\n for key, (val, line, cnt) in rc_temp.iteritems():\n if key in defaultParams:\n if fail_on_error:\n ret[key] = val # try to convert to proper type or raise\n else:\n try: ret[key] = val # try to convert to proper type or skip\n except Exception as msg:\n warnings.warn('Bad val \"%s\" on line #%d\\n\\t\"%s\"\\n\\tin file \\\n\"%s\"\\n\\t%s' % (val, cnt, line, fname, msg))\n elif key in _deprecated_ignore_map:\n warnings.warn('%s is deprecated. Update your matplotlibrc to use %s instead.'% (key, _deprecated_ignore_map[key]))\n\n else:\n print(\"\"\"\nBad key \"%s\" on line %d in\n%s.\nYou probably need to get an updated matplotlibrc file from\nhttp://matplotlib.sf.net/_static/matplotlibrc or from the matplotlib source\ndistribution\"\"\" % (key, cnt, fname), file=sys.stderr)\n\n if ret['datapath'] is None:\n ret['datapath'] = get_data_path()\n\n if not ret['text.latex.preamble'] == ['']:\n verbose.report(\"\"\"\n*****************************************************************\nYou have the following UNSUPPORTED LaTeX preamble customizations:\n%s\nPlease do not ask for support with these customizations active.\n*****************************************************************\n\"\"\"% '\\n'.join(ret['text.latex.preamble']), 'helpful')\n\n verbose.report('loaded rc file %s'%fname)\n\n return ret\n\n\n# this is the instance used by the matplotlib classes\nrcParams = rc_params()\n\nif rcParams['examples.directory']:\n # paths that are intended to be relative to matplotlib_fname()\n # are allowed for the examples.directory parameter.\n # However, we will need to fully qualify the path because\n # Sphinx requires absolute paths.\n if not os.path.isabs(rcParams['examples.directory']):\n _basedir, _fname = os.path.split(matplotlib_fname())\n # Sometimes matplotlib_fname() can return relative paths,\n # Also, using realpath() guarentees that Sphinx will use\n # the same path that matplotlib sees (in case of weird symlinks).\n _basedir = os.path.realpath(_basedir)\n _fullpath = os.path.join(_basedir, rcParams['examples.directory'])\n rcParams['examples.directory'] = _fullpath\n\nrcParamsOrig = rcParams.copy()\n\nrcParamsDefault = RcParams([ (key, default) for key, (default, converter) in \\\n defaultParams.iteritems() ])\n\nrcParams['ps.usedistiller'] = checkdep_ps_distiller(rcParams['ps.usedistiller'])\nrcParams['text.usetex'] = checkdep_usetex(rcParams['text.usetex'])\n\nif rcParams['axes.formatter.use_locale']:\n import locale\n locale.setlocale(locale.LC_ALL, '')\n\ndef rc(group, **kwargs):\n \"\"\"\n Set the current rc params. Group is the grouping for the rc, eg.\n for ``lines.linewidth`` the group is ``lines``, for\n ``axes.facecolor``, the group is ``axes``, and so on. Group may\n also be a list or tuple of group names, eg. (*xtick*, *ytick*).\n *kwargs* is a dictionary attribute name/value pairs, eg::\n\n rc('lines', linewidth=2, color='r')\n\n sets the current rc params and is equivalent to::\n\n rcParams['lines.linewidth'] = 2\n rcParams['lines.color'] = 'r'\n\n The following aliases are available to save typing for interactive\n users:\n\n ===== =================\n Alias Property\n ===== =================\n 'lw' 'linewidth'\n 'ls' 'linestyle'\n 'c' 'color'\n 'fc' 'facecolor'\n 'ec' 'edgecolor'\n 'mew' 'markeredgewidth'\n 'aa' 'antialiased'\n ===== =================\n\n Thus you could abbreviate the above rc command as::\n\n rc('lines', lw=2, c='r')\n\n\n Note you can use python's kwargs dictionary facility to store\n dictionaries of default parameters. Eg, you can customize the\n font rc as follows::\n\n font = {'family' : 'monospace',\n 'weight' : 'bold',\n 'size' : 'larger'}\n\n rc('font', **font) # pass in the font dict as kwargs\n\n This enables you to easily switch between several configurations.\n Use :func:`~matplotlib.pyplot.rcdefaults` to restore the default\n rc params after changes.\n \"\"\"\n\n aliases = {\n 'lw' : 'linewidth',\n 'ls' : 'linestyle',\n 'c' : 'color',\n 'fc' : 'facecolor',\n 'ec' : 'edgecolor',\n 'mew' : 'markeredgewidth',\n 'aa' : 'antialiased',\n }\n\n if is_string_like(group):\n group = (group,)\n for g in group:\n for k,v in kwargs.iteritems():\n name = aliases.get(k) or k\n key = '%s.%s' % (g, name)\n try:\n rcParams[key] = v\n except KeyError:\n raise KeyError('Unrecognized key \"%s\" for group \"%s\" and name \"%s\"' %\n (key, g, name))\n\ndef rcdefaults():\n \"\"\"\n Restore the default rc params. These are not the params loaded by\n the rc file, but mpl's internal params. See rc_file_defaults for\n reloading the default params from the rc file\n \"\"\"\n rcParams.update(rcParamsDefault)\n\n\ndef rc_file(fname):\n \"\"\"\n Update rc params from file.\n \"\"\"\n rcParams.update(rc_params_from_file(fname))\n\n\nclass rc_context(object):\n \"\"\"\n Return a context manager for managing rc settings.\n\n This allows one to do::\n\n >>> with mpl.rc_context(fname='screen.rc'):\n >>> plt.plot(x, a)\n >>> with mpl.rc_context(fname='print.rc'):\n >>> plt.plot(x, b)\n >>> plt.plot(x, c)\n\n The 'a' vs 'x' and 'c' vs 'x' plots would have settings from\n 'screen.rc', while the 'b' vs 'x' plot would have settings from\n 'print.rc'.\n\n A dictionary can also be passed to the context manager::\n\n >>> with mpl.rc_context(rc={'text.usetex': True}, fname='screen.rc'):\n >>> plt.plot(x, a)\n\n The 'rc' dictionary takes precedence over the settings loaded from\n 'fname'. Passing a dictionary only is also valid.\n \"\"\"\n\n def __init__(self, rc=None, fname=None):\n self.rcdict = rc\n self.fname = fname\n def __enter__(self):\n self._rcparams = rcParams.copy()\n if self.fname:\n rc_file(self.fname)\n if self.rcdict:\n rcParams.update(self.rcdict)\n def __exit__(self, type, value, tb):\n rcParams.update(self._rcparams)\n\n\ndef rc_file_defaults():\n \"\"\"\n Restore the default rc params from the original matplotlib rc that\n was loaded\n \"\"\"\n rcParams.update(rcParamsOrig)\n\n_use_error_msg = \"\"\" This call to matplotlib.use() has no effect\nbecause the the backend has already been chosen;\nmatplotlib.use() must be called *before* pylab, matplotlib.pyplot,\nor matplotlib.backends is imported for the first time.\n\"\"\"\n\ndef use(arg, warn=True, force=False):\n \"\"\"\n Set the matplotlib backend to one of the known backends.\n\n The argument is case-insensitive. *warn* specifies whether a\n warning should be issued if a backend has already been set up.\n *force* is an **experimental** flag that tells matplotlib to\n attempt to initialize a new backend by reloading the backend\n module.\n\n .. note::\n\n This function must be called *before* importing pyplot for\n the first time; or, if you are not using pyplot, it must be called\n before importing matplotlib.backends. If warn is True, a warning\n is issued if you try and call this after pylab or pyplot have been\n loaded. In certain black magic use cases, e.g.\n :func:`pyplot.switch_backend`, we are doing the reloading necessary to\n make the backend switch work (in some cases, e.g. pure image\n backends) so one can set warn=False to suppress the warnings.\n\n To find out which backend is currently set, see\n :func:`matplotlib.get_backend`.\n\n \"\"\"\n # Lets determine the proper backend name first\n if arg.startswith('module://'):\n name = arg\n else:\n # Lowercase only non-module backend names (modules are case-sensitive)\n arg = arg.lower()\n name = validate_backend(arg)\n\n # Check if we've already set up a backend\n if 'matplotlib.backends' in sys.modules:\n # Warn only if called with a different name\n if (rcParams['backend'] != name) and warn:\n warnings.warn(_use_error_msg)\n\n # Unless we've been told to force it, just return\n if not force:\n return\n need_reload = True\n else:\n need_reload = False\n\n # Store the backend name\n rcParams['backend'] = name\n\n # If needed we reload here because a lot of setup code is triggered on\n # module import. See backends/__init__.py for more detail.\n if need_reload:\n reload(sys.modules['matplotlib.backends'])\n\ndef get_backend():\n \"Returns the current backend.\"\n return rcParams['backend']\n\ndef interactive(b):\n \"\"\"\n Set interactive mode to boolean b.\n\n If b is True, then draw after every plotting command, eg, after xlabel\n \"\"\"\n rcParams['interactive'] = b\n\ndef is_interactive():\n 'Return true if plot mode is interactive'\n b = rcParams['interactive']\n return b\n\ndef tk_window_focus():\n \"\"\"Return true if focus maintenance under TkAgg on win32 is on.\n This currently works only for python.exe and IPython.exe.\n Both IDLE and Pythonwin.exe fail badly when tk_window_focus is on.\"\"\"\n if rcParams['backend'] != 'TkAgg':\n return False\n return rcParams['tk.window_focus']\n\n# Now allow command line to override\n\n# Allow command line access to the backend with -d (MATLAB compatible\n# flag)\n\nfor s in sys.argv[1:]:\n if s.startswith('-d') and len(s) > 2: # look for a -d flag\n try:\n use(s[2:])\n except (KeyError, ValueError):\n pass\n # we don't want to assume all -d flags are backends, eg -debug\n\ndefault_test_modules = [\n 'matplotlib.tests.test_agg',\n 'matplotlib.tests.test_artist',\n 'matplotlib.tests.test_axes',\n 'matplotlib.tests.test_backend_svg',\n 'matplotlib.tests.test_backend_pgf',\n 'matplotlib.tests.test_basic',\n 'matplotlib.tests.test_bbox_tight',\n 'matplotlib.tests.test_cbook',\n 'matplotlib.tests.test_collections',\n 'matplotlib.tests.test_colorbar',\n 'matplotlib.tests.test_colors',\n 'matplotlib.tests.test_contour',\n 'matplotlib.tests.test_dates',\n 'matplotlib.tests.test_delaunay',\n 'matplotlib.tests.test_figure',\n 'matplotlib.tests.test_image',\n 'matplotlib.tests.test_legend',\n 'matplotlib.tests.test_lines',\n 'matplotlib.tests.test_mathtext',\n 'matplotlib.tests.test_mlab',\n 'matplotlib.tests.test_patches',\n 'matplotlib.tests.test_pickle',\n 'matplotlib.tests.test_rcparams',\n 'matplotlib.tests.test_scale',\n 'matplotlib.tests.test_simplification',\n 'matplotlib.tests.test_spines',\n 'matplotlib.tests.test_streamplot',\n 'matplotlib.tests.test_subplots',\n 'matplotlib.tests.test_text',\n 'matplotlib.tests.test_ticker',\n 'matplotlib.tests.test_tightlayout',\n 'matplotlib.tests.test_triangulation',\n 'matplotlib.tests.test_transforms',\n 'matplotlib.tests.test_arrow_patches',\n 'matplotlib.tests.test_backend_qt4',\n ]\n\n\ndef test(verbosity=1):\n \"\"\"run the matplotlib test suite\"\"\"\n old_backend = rcParams['backend']\n try:\n use('agg')\n import nose\n import nose.plugins.builtin\n from .testing.noseclasses import KnownFailure\n from nose.plugins.manager import PluginManager\n\n # store the old values before overriding\n plugins = []\n plugins.append( KnownFailure() )\n plugins.extend( [plugin() for plugin in nose.plugins.builtin.plugins] )\n\n manager = PluginManager(plugins=plugins)\n config = nose.config.Config(verbosity=verbosity, plugins=manager)\n\n success = nose.run( defaultTest=default_test_modules,\n config=config,\n )\n finally:\n if old_backend.lower() != 'agg':\n use(old_backend)\n\n return success\n\ntest.__test__ = False # nose: this function is not a test\n\nverbose.report('matplotlib version %s'%__version__)\nverbose.report('verbose.level %s'%verbose.level)\nverbose.report('interactive is %s'%rcParams['interactive'])\nverbose.report('platform is %s'%sys.platform)\nverbose.report('loaded modules: %s'%sys.modules.iterkeys(), 'debug')\n" ]
[ [ "matplotlib.colorbar.make_axes_gridspec", "matplotlib.colorbar.make_axes", "matplotlib.blocking_input.BlockingKeyMouseInput", "matplotlib._image.from_images", "matplotlib.blocking_input.BlockingMouseInput", "matplotlib.patches.Rectangle", "matplotlib.colorbar.colorbar_factory", "matplotlib.image.FigureImage", "matplotlib.axes.subplot_class_factory", "matplotlib.artist.kwdoc", "matplotlib.projections.get_projection_names", "matplotlib.cbook.Stack.push", "matplotlib.transforms.BboxTransformTo", "matplotlib.projections.process_projection_requirements", "matplotlib.transforms.Affine2D", "numpy.array", "matplotlib.cbook.Stack.remove", "matplotlib.text.Text", "matplotlib.cbook.iterable", "matplotlib.transforms.Bbox.from_bounds", "matplotlib.pyplot.draw_if_interactive", "matplotlib.artist.Artist.__init__", "numpy.clip", "matplotlib._pylab_helpers.Gcf.set_active", "matplotlib.text._process_text_args", "matplotlib.legend.Legend", "matplotlib.transforms.TransformedBbox", "matplotlib.transforms.Bbox.union", "matplotlib.pyplot.get_fignums", "matplotlib.cbook.allequal", "matplotlib.cbook.Stack.__init__", "matplotlib.pyplot._backend_mod.new_figure_manager_given_figure", "matplotlib.cbook.CallbackRegistry" ], [ "matplotlib.cbook.mkdirs", "matplotlib.cbook.is_string_like", "matplotlib.rcsetup.defaultParams.iteritems", "matplotlib.rcsetup.validate_backend" ] ]
nwillemse/nctrader
[ "4754ccdeae465ef4674a829f35fc3f78cf1d3ea4" ]
[ "nctrader/statistics/performance.py" ]
[ "import numpy as np\nimport pandas as pd\nfrom scipy.stats import linregress\n\n\ndef aggregate_returns(returns, convert_to):\n \"\"\"\n Aggregates returns by day, week, month, or year.\n \"\"\"\n def cumulate_returns(x):\n return np.exp(np.log(1 + x).cumsum())[-1] - 1\n\n if convert_to == 'weekly':\n return returns.groupby(\n [lambda x: x.year,\n lambda x: x.month,\n lambda x: x.isocalendar()[1]]).apply(cumulate_returns)\n elif convert_to == 'monthly':\n return returns.groupby(\n [lambda x: x.year, lambda x: x.month]).apply(cumulate_returns)\n elif convert_to == 'yearly':\n return returns.groupby(\n [lambda x: x.year]).apply(cumulate_returns)\n else:\n ValueError('convert_to must be weekly, monthly or yearly')\n\n\ndef create_cagr(equity, periods=252):\n \"\"\"\n Calculates the Compound Annual Growth Rate (CAGR)\n for the portfolio, by determining the number of years\n and then creating a compound annualised rate based\n on the total return.\n\n Parameters:\n equity - A pandas Series representing the equity curve.\n periods - Daily (252), Hourly (252*6.5), Minutely(252*6.5*60) etc.\n \"\"\"\n years = len(equity) / float(periods)\n return (equity[-1] ** (1.0 / years)) - 1.0\n\n\ndef create_sharpe_ratio(returns, periods=252):\n \"\"\"\n Create the Sharpe ratio for the strategy, based on a\n benchmark of zero (i.e. no risk-free rate information).\n\n Parameters:\n returns - A pandas Series representing period percentage returns.\n periods - Daily (252), Hourly (252*6.5), Minutely(252*6.5*60) etc.\n \"\"\"\n if np.std(returns) != 0:\n return np.sqrt(periods) * (np.mean(returns)) / np.std(returns)\n else:\n return 0.0\n\n\ndef create_sortino_ratio(returns, periods=252):\n \"\"\"\n Create the Sortino ratio for the strategy, based on a\n benchmark of zero (i.e. no risk-free rate information).\n\n Parameters:\n returns - A pandas Series representing period percentage returns.\n periods - Daily (252), Hourly (252*6.5), Minutely(252*6.5*60) etc.\n \"\"\"\n if np.std(returns[returns.values < 0]) != 0:\n return np.sqrt(periods) * (np.mean(returns)) / np.std(returns[returns.values < 0])\n else:\n return 0.0\n\n\ndef create_drawdowns(returns):\n \"\"\"\n Calculate the largest peak-to-trough drawdown of the equity curve\n as well as the duration of the drawdown. Requires that the\n pnl_returns is a pandas Series.\n\n Parameters:\n equity - A pandas Series representing period percentage returns.\n\n Returns:\n drawdown, drawdown_max, duration\n \"\"\"\n\n # Calculate the cumulative returns curve\n # and set up the High Water Mark\n hwm = [0]\n\n # Create the drawdown and duration series\n idx = returns.index\n drawdown = pd.Series(index=idx)\n duration = pd.Series(index=idx)\n\n # Loop over the index range\n for t in range(1, len(idx)):\n hwm.append(max(hwm[t - 1], returns.ix[t]))\n drawdown.ix[t] = (hwm[t] - returns.ix[t]) / hwm[t]\n duration.ix[t] = (0 if drawdown.ix[t] == 0 else duration.ix[t - 1] + 1)\n\n #TODO: Vectorize drawdown calculations to make them faster\n \"\"\"\n dd = 1 - returns.div(returns.cummax())\n dd_dur = dd.where(dd == 0, 1)\n #print drawdown, dd\n #print drawdown.max(), dd.max()\n #print dd_dur.groupby(dd_dur).cumcount()\n #print duration.max(), dd_dur.max()\n dd.to_csv('~/dd.csv')\n dd_dur.to_csv('~/dd_dir.csv')\n a = dd_dur.groupby(dd_dur).cumcount()\n \"\"\"\n\n return drawdown, drawdown.max(), duration.max()\n\n\ndef rsquared(x, y):\n \"\"\" Return R^2 where x and y are array-like.\"\"\"\n\n slope, intercept, r_value, p_value, std_err = linregress(x, y)\n return r_value**2\n" ]
[ [ "numpy.log", "scipy.stats.linregress", "numpy.mean", "numpy.std", "numpy.sqrt", "pandas.Series" ] ]
ESCM-summarization/ESCM-summary-evaluation
[ "3780b51f0ed44cbbea3f163a871d875f1e5e9393" ]
[ "Models code/Transformer/train.py" ]
[ "#!/usr/bin/env python\n\"\"\"\n Main training workflow\n\"\"\"\nfrom __future__ import division\nimport argparse\nimport os\nimport signal\nimport torch\n\nimport onmt.opts as opts\nimport onmt.utils.distributed\n\nfrom onmt.utils.logging import logger\nfrom onmt.train_single import main as single_main\n\n\ndef main(opt):\n if opt.rnn_type == \"SRU\" and not opt.gpu_ranks:\n raise AssertionError(\"Using SRU requires -gpu_ranks set.\")\n\n if opt.epochs:\n raise AssertionError(\"-epochs is deprecated please use -train_steps.\")\n\n if opt.truncated_decoder > 0 and opt.accum_count > 1:\n raise AssertionError(\"BPTT is not compatible with -accum > 1\")\n\n if len(opt.gpuid) > 1:\n raise AssertionError(\"gpuid is deprecated \\\n see world_size and gpu_ranks\")\n\n nb_gpu = len(opt.gpu_ranks)\n\n if opt.world_size > 1:\n mp = torch.multiprocessing.get_context('spawn')\n # Create a thread to listen for errors in the child processes.\n error_queue = mp.SimpleQueue()\n error_handler = ErrorHandler(error_queue)\n # Train with multiprocessing.\n procs = []\n for device_id in range(nb_gpu):\n procs.append(mp.Process(target=run, args=(\n opt, device_id, error_queue, ), daemon=True))\n procs[device_id].start()\n logger.info(\" Starting process pid: %d \" % procs[device_id].pid)\n error_handler.add_child(procs[device_id].pid)\n for p in procs:\n p.join()\n\n elif nb_gpu == 1: # case 1 GPU only\n single_main(opt, 0)\n else: # case only CPU\n single_main(opt, -1)\n\n\ndef run(opt, device_id, error_queue):\n \"\"\" run process \"\"\"\n try:\n gpu_rank = onmt.utils.distributed.multi_init(opt, device_id)\n if gpu_rank != opt.gpu_ranks[device_id]:\n raise AssertionError(\"An error occurred in \\\n Distributed initialization\")\n single_main(opt, device_id)\n except KeyboardInterrupt:\n pass # killed by parent, do nothing\n except Exception:\n # propagate exception to parent process, keeping original traceback\n import traceback\n error_queue.put((opt.gpu_ranks[device_id], traceback.format_exc()))\n\n\nclass ErrorHandler(object):\n \"\"\"A class that listens for exceptions in children processes and propagates\n the tracebacks to the parent process.\"\"\"\n\n def __init__(self, error_queue):\n \"\"\" init error handler \"\"\"\n import signal\n import threading\n self.error_queue = error_queue\n self.children_pids = []\n self.error_thread = threading.Thread(\n target=self.error_listener, daemon=True)\n self.error_thread.start()\n signal.signal(signal.SIGUSR1, self.signal_handler)\n\n def add_child(self, pid):\n \"\"\" error handler \"\"\"\n self.children_pids.append(pid)\n\n def error_listener(self):\n \"\"\" error listener \"\"\"\n (rank, original_trace) = self.error_queue.get()\n self.error_queue.put((rank, original_trace))\n os.kill(os.getpid(), signal.SIGUSR1)\n\n def signal_handler(self, signalnum, stackframe):\n \"\"\" signal handler \"\"\"\n for pid in self.children_pids:\n os.kill(pid, signal.SIGINT) # kill children processes\n (rank, original_trace) = self.error_queue.get()\n msg = \"\"\"\\n\\n-- Tracebacks above this line can probably\n be ignored --\\n\\n\"\"\"\n msg += original_trace\n raise Exception(msg)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='train.py',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n opts.add_md_help_argument(parser)\n opts.model_opts(parser)\n opts.train_opts(parser)\n\n opt = parser.parse_args()\n main(opt)\n" ]
[ [ "torch.multiprocessing.get_context" ] ]
bairw660606/PPDM
[ "d89c1e583a87b1fe5f1c6bb94ed4b09838d5e547" ]
[ "src/lib/eval/hoia_eval.py" ]
[ "import json\nimport numpy as np\nimport os\n\nclass hoia():\n def __init__(self, annotation_file):\n self.annotations = json.load(open(annotation_file, 'r'))\n self.overlap_iou = 0.5\n self.verb_name_dict = {1: 'smoke', 2: 'call', 3: 'play(cellphone)', 4: 'eat', 5: 'drink',\n 6: 'ride', 7: 'hold', 8: 'kick', 9: 'read', 10: 'play (computer)'}\n self.fp = {}\n self.tp = {}\n self.score = {}\n self.sum_gt = {}\n for i in list(self.verb_name_dict.keys()):\n self.fp[i] = []\n self.tp[i] = []\n self.score[i] = []\n self.sum_gt[i] = 0\n self.file_name = []\n for gt_i in self.annotations:\n self.file_name.append(gt_i['file_name'])\n gt_hoi = gt_i['hoi_annotation']\n for gt_hoi_i in gt_hoi:\n if isinstance(gt_hoi_i['category_id'], str):\n gt_hoi_i['category_id'] = int(gt_hoi_i['category_id'].replace('\\n', ''))\n if gt_hoi_i['category_id'] in list(self.verb_name_dict.keys()):\n self.sum_gt[gt_hoi_i['category_id']] += 1\n self.num_class = len(list(self.verb_name_dict.keys()))\n\n def evalution(self, predict_annot):\n for pred_i in predict_annot:\n if pred_i['file_name'] not in self.file_name:\n continue\n gt_i = self.annotations[self.file_name.index(pred_i['file_name'])]\n gt_bbox = gt_i['annotations']\n pred_bbox = pred_i['predictions']\n pred_hoi = pred_i['hoi_prediction']\n gt_hoi = gt_i['hoi_annotation']\n bbox_pairs = self.compute_iou_mat(gt_bbox, pred_bbox)\n self.compute_fptp(pred_hoi, gt_hoi, bbox_pairs)\n map = self.compute_map()\n return map\n\n def compute_map(self):\n ap = np.zeros(self.num_class)\n max_recall = np.zeros(self.num_class)\n for i in list(self.verb_name_dict.keys()):\n sum_gt = self.sum_gt[i]\n\n if sum_gt == 0:\n continue\n tp = np.asarray((self.tp[i]).copy())\n fp = np.asarray((self.fp[i]).copy())\n res_num = len(tp)\n if res_num == 0:\n continue\n score = np.asarray(self.score[i].copy())\n sort_inds = np.argsort(-score)\n fp = fp[sort_inds]\n tp = tp[sort_inds]\n fp = np.cumsum(fp)\n tp = np.cumsum(tp)\n rec = tp / sum_gt\n prec = tp / (fp + tp)\n ap[i - 1] = self.voc_ap(rec,prec)\n max_recall[i-1] = np.max(rec)\n # print('class {} --- ap: {} max recall: {}'.format(\n # i, ap[i-1], max_recall[i-1]))\n mAP = np.mean(ap[:])\n m_rec = np.mean(max_recall[:])\n print('--------------------')\n print('mAP: {} max recall: {}'.format(mAP, m_rec))\n print('--------------------')\n return mAP\n\n def voc_ap(self, rec, prec):\n mrec = np.concatenate(([0.], rec, [1.]))\n mpre = np.concatenate(([0.], prec, [0.]))\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n i = np.where(mrec[1:] != mrec[:-1])[0]\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n return ap\n\n def compute_fptp(self, pred_hoi, gt_hoi, match_pairs):\n pos_pred_ids = match_pairs.keys()\n vis_tag = np.zeros(len(gt_hoi))\n pred_hoi.sort(key=lambda k: (k.get('score', 0)), reverse=True)\n if len(pred_hoi) != 0:\n for i, pred_hoi_i in enumerate(pred_hoi):\n is_match = 0\n if isinstance(pred_hoi_i['category_id'], str):\n pred_hoi_i['category_id'] = int(pred_hoi_i['category_id'].replace('\\n', ''))\n if len(match_pairs) != 0 and pred_hoi_i['subject_id'] in pos_pred_ids and pred_hoi_i['object_id'] in pos_pred_ids:\n pred_sub_ids = match_pairs[pred_hoi_i['subject_id']]\n pred_obj_ids = match_pairs[pred_hoi_i['object_id']]\n pred_category_id = pred_hoi_i['category_id']\n for gt_id in np.nonzero(1 - vis_tag)[0]:\n gt_hoi_i = gt_hoi[gt_id]\n if (gt_hoi_i['subject_id'] in pred_sub_ids) and (gt_hoi_i['object_id'] in pred_obj_ids) and (pred_category_id == gt_hoi_i['category_id']):\n is_match = 1\n vis_tag[gt_id] = 1\n continue\n if pred_hoi_i['category_id'] not in list(self.fp.keys()):\n continue\n if is_match == 1:\n self.fp[pred_hoi_i['category_id']].append(0)\n self.tp[pred_hoi_i['category_id']].append(1)\n\n else:\n self.fp[pred_hoi_i['category_id']].append(1)\n self.tp[pred_hoi_i['category_id']].append(0)\n self.score[pred_hoi_i['category_id']].append(pred_hoi_i['score'])\n\n def compute_iou_mat(self, bbox_list1, bbox_list2):\n iou_mat = np.zeros((len(bbox_list1), len(bbox_list2)))\n if len(bbox_list1) == 0 or len(bbox_list2) == 0:\n return {}\n for i, bbox1 in enumerate(bbox_list1):\n for j, bbox2 in enumerate(bbox_list2):\n iou_i = self.compute_IOU(bbox1, bbox2)\n iou_mat[i, j] = iou_i\n iou_mat[iou_mat>= self.overlap_iou] = 1\n iou_mat[iou_mat< self.overlap_iou] = 0\n\n match_pairs = np.nonzero(iou_mat)\n match_pairs_dict = {}\n if iou_mat.max() > 0:\n for i, pred_id in enumerate(match_pairs[1]):\n if pred_id not in match_pairs_dict.keys():\n match_pairs_dict[pred_id] = []\n match_pairs_dict[pred_id].append(match_pairs[0][i])\n return match_pairs_dict\n\n def compute_IOU(self, bbox1, bbox2):\n if isinstance(bbox1['category_id'], str):\n bbox1['category_id'] = int(bbox1['category_id'].replace('\\n', ''))\n if isinstance(bbox2['category_id'], str):\n bbox2['category_id'] = int(bbox2['category_id'].replace('\\n', ''))\n if bbox1['category_id'] == bbox2['category_id']:\n rec1 = bbox1['bbox']\n rec2 = bbox2['bbox']\n # computing area of each rectangles\n S_rec1 = (rec1[2] - rec1[0]) * (rec1[3] - rec1[1])\n S_rec2 = (rec2[2] - rec2[0]) * (rec2[3] - rec2[1])\n\n # computing the sum_area\n sum_area = S_rec1 + S_rec2\n\n # find the each edge of intersect rectangle\n left_line = max(rec1[1], rec2[1])\n right_line = min(rec1[3], rec2[3])\n top_line = max(rec1[0], rec2[0])\n bottom_line = min(rec1[2], rec2[2])\n # judge if there is an intersect\n if left_line >= right_line or top_line >= bottom_line:\n return 0\n else:\n intersect = (right_line - left_line) * (bottom_line - top_line)\n return intersect / (sum_area - intersect)\n else:\n return 0" ]
[ [ "numpy.concatenate", "numpy.max", "numpy.zeros", "numpy.sum", "numpy.mean", "numpy.nonzero", "numpy.where", "numpy.argsort", "numpy.cumsum", "numpy.maximum" ] ]
maps16/FComputacional1
[ "eb4a5b5ea9542023a5f928cc1f15d3f25f7ea0d0" ]
[ "Actividad3/Codigo/x3_Sin3x.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import interp1d\n\n#Generando datos\nx01 = np.random.random(12)\nx1 = 4.0*x01-2.0\ny1 = (x1*x1*x1)*(np.sin(3.0*x1))\n\n\n#Graficar los puntos aleatorios x y los f(x)=Sin(2x)\nplt.plot(x1, y1, 'o', label='Data')\n\n#Punto para interpolar\nx = np.linspace(min(x1),max(x1),200)\n\n#Opciones para interp1d\nopc = ('linear', 'quadratic', 'cubic')\n\nfor o in opc:\n f = interp1d(x1, y1, kind=o)\n plt.plot(x, f(x), label=o)\n\n#Mostrar la grafica\nplt.legend(loc='best')\nplt.show()" ]
[ [ "numpy.sin", "scipy.interpolate.interp1d", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.show", "numpy.random.random" ] ]
tridelat/proteus
[ "13380120826ff0ffa0f244ddd4ee7f389dd8b917" ]
[ "proteus/mprans/VOF.py" ]
[ "# A type of -*- python -*- file\n\"\"\"\nAn optimized volume-of-fluid transport module\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom builtins import range\nfrom past.utils import old_div\nimport numpy as np\nfrom math import fabs\nimport proteus\nfrom proteus import cfemIntegrals, Quadrature, Norms, Comm\nfrom proteus.NonlinearSolvers import NonlinearEquation\nfrom proteus.FemTools import (DOFBoundaryConditions,\n FluxBoundaryConditions,\n C0_AffineLinearOnSimplexWithNodalBasis)\nfrom proteus.Comm import globalMax\nfrom proteus.Profiling import memory\nfrom proteus.Profiling import logEvent\nfrom proteus.Transport import OneLevelTransport\nfrom proteus.TransportCoefficients import TC_base\nfrom proteus.SubgridError import SGE_base\nfrom proteus.ShockCapturing import ShockCapturing_base\nfrom proteus.LinearAlgebraTools import SparseMat\nfrom proteus.NonlinearSolvers import ExplicitLumpedMassMatrix,ExplicitConsistentMassMatrixForVOF,TwoStageNewton\nfrom proteus import TimeIntegration\nfrom proteus.mprans.cVOF import *\nfrom proteus import *\nfrom proteus.Transport import *\nfrom proteus.Transport import OneLevelTransport\n#from . import cVOF3P\n\nclass SubgridError(SGE_base):\n def __init__(self, coefficients, nd):\n proteus.SubgridError.SGE_base.__init__(self, coefficients, nd, lag=False)\n\n def initializeElementQuadrature(self, mesh, t, cq):\n pass\n\n def updateSubgridErrorHistory(self, initializationPhase=False):\n pass\n\n def calculateSubgridError(self, q):\n pass\n\nclass ShockCapturing(ShockCapturing_base):\n def __init__(self,\n coefficients,\n nd,\n shockCapturingFactor=0.25,\n lag=True,\n nStepsToDelay=None):\n proteus.ShockCapturing.ShockCapturing_base.__init__(self,\n coefficients,\n nd,\n shockCapturingFactor,\n lag)\n self.nStepsToDelay = nStepsToDelay\n self.nSteps = 0\n if self.lag:\n logEvent(\"VOF.ShockCapturing: lagging requested but must lag the first step; switching lagging off and delaying\")\n self.nStepsToDelay = 1\n self.lag = False\n\n def initializeElementQuadrature(self, mesh, t, cq):\n self.mesh = mesh\n self.numDiff = []\n self.numDiff_last = []\n for ci in range(self.nc):\n self.numDiff.append(cq[('numDiff', ci, ci)])\n self.numDiff_last.append(cq[('numDiff', ci, ci)])\n\n def updateShockCapturingHistory(self):\n self.nSteps += 1\n if self.lag:\n for ci in range(self.nc):\n self.numDiff_last[ci][:] = self.numDiff[ci]\n if self.lag == False and self.nStepsToDelay is not None and self.nSteps > self.nStepsToDelay:\n logEvent(\"VOF.ShockCapturing: switched to lagged shock capturing\")\n self.lag = True\n self.numDiff_last = []\n for ci in range(self.nc):\n self.numDiff_last.append(self.numDiff[ci].copy())\n logEvent(\"VOF: max numDiff %e\" % (globalMax(self.numDiff_last[0].max()),))\n\nclass NumericalFlux(proteus.NumericalFlux.Advection_DiagonalUpwind_Diffusion_IIPG_exterior):\n def __init__(self,\n vt,\n getPointwiseBoundaryConditions,\n getAdvectiveFluxBoundaryConditions,\n getDiffusiveFluxBoundaryConditions,\n getPeriodicBoundaryConditions=None):\n proteus.NumericalFlux.Advection_DiagonalUpwind_Diffusion_IIPG_exterior.__init__(\n self,\n vt,\n getPointwiseBoundaryConditions,\n getAdvectiveFluxBoundaryConditions,\n getDiffusiveFluxBoundaryConditions)\n\nclass RKEV(proteus.TimeIntegration.SSP):\n from proteus import TimeIntegration\n \"\"\"\n Wrapper for SSPRK time integration using EV\n\n ... more to come ...\n \"\"\"\n\n def __init__(self, transport, timeOrder=1, runCFL=0.1, integrateInterpolationPoints=False):\n TimeIntegration.SSP.__init__(self,\n transport,\n integrateInterpolationPoints=integrateInterpolationPoints)\n self.runCFL = runCFL\n self.dtLast = None\n self.isAdaptive = True\n assert transport.coefficients.STABILIZATION_TYPE>1, \"SSP method just works for edge based EV methods; i.e., STABILIZATION_TYPE>1\"\n assert hasattr(transport, 'edge_based_cfl'), \"No edge based cfl defined\"\n # About the cfl\n self.cfl = transport.edge_based_cfl\n # Stuff particular for SSP\n self.timeOrder = timeOrder # order of approximation\n self.nStages = timeOrder # number of stages total\n self.lstage = 0 # last stage completed\n # storage vectors\n self.u_dof_last = {}\n self.m_old = {}\n # per component stage values, list with array at each stage\n for ci in range(self.nc):\n self.m_last[ci] = transport.q[('u',ci)].copy()\n self.m_old[ci] = transport.q[('u',ci)].copy()\n self.u_dof_last[ci] = transport.u[ci].dof.copy()\n\n def choose_dt(self):\n maxCFL = 1.0e-6\n maxCFL = max(maxCFL, globalMax(self.cfl.max()))\n self.dt = old_div(self.runCFL, maxCFL)\n if self.dtLast is None:\n self.dtLast = self.dt\n self.t = self.tLast + self.dt\n self.substeps = [self.t for i in range(self.nStages)] # Manuel is ignoring different time step levels for now\n\n def initialize_dt(self, t0, tOut, q):\n \"\"\"\n Modify self.dt\n \"\"\"\n self.tLast = t0\n self.choose_dt()\n self.t = t0 + self.dt\n\n def setCoefficients(self):\n \"\"\"\n beta are all 1's here\n mwf not used right now\n \"\"\"\n self.alpha = np.zeros((self.nStages, self.nStages), 'd')\n self.dcoefs = np.zeros((self.nStages), 'd')\n\n def updateStage(self):\n \"\"\"\n Need to switch to use coefficients\n \"\"\"\n self.lstage += 1\n assert self.timeOrder in [1, 2, 3]\n assert self.lstage > 0 and self.lstage <= self.timeOrder\n if self.timeOrder == 3:\n if self.lstage == 1:\n logEvent(\"First stage of SSP33 method\", level=4)\n for ci in range(self.nc):\n # save stage at quad points\n self.m_last[ci][:] = self.transport.q[('u',ci)]\n # DOFs\n self.transport.u_dof_old[:] = self.transport.u[ci].dof\n elif self.lstage == 2:\n logEvent(\"Second stage of SSP33 method\", level=4)\n for ci in range(self.nc):\n # Quad points\n self.m_last[ci][:] = 1./4*self.transport.q[('u',ci)]\n self.m_last[ci][:] += 3./4*self.m_old[ci]\n # DOFs\n self.transport.u_dof_old[:] = 1./4*self.transport.u[ci].dof\n self.transport.u_dof_old[:] += 3./4* self.u_dof_last[ci] \n elif self.lstage == 3:\n logEvent(\"Third stage of SSP33 method\", level=4)\n for ci in range(self.nc):\n # Quad points\n self.m_last[ci][:] = 2./3*self.transport.q[('u',ci)]\n self.m_last[ci][:] += 1./3*self.m_old[ci]\n # DOFs\n self.transport.u[0].dof[:] = 2./3*self.transport.u[ci].dof\n self.transport.u[0].dof[:] += 1./3* self.u_dof_last[ci]\n # update u_dof_old\n self.transport.u_dof_old[:] = self.u_dof_last[ci] \n elif self.timeOrder == 2:\n if self.lstage == 1:\n logEvent(\"First stage of SSP22 method\", level=4)\n for ci in range(self.nc):\n # save stage at quad points\n self.m_last[ci][:] = self.transport.q[('u',ci)]\n # DOFs\n self.transport.u_dof_old[:] = self.transport.u[ci].dof\n elif self.lstage == 2:\n logEvent(\"Second stage of SSP22 method\", level=4)\n for ci in range(self.nc):\n # Quad points\n self.m_last[ci][:] = 1./2*self.transport.q[('u',ci)]\n self.m_last[ci][:] += 1./2*self.m_old[ci]\n # DOFs\n self.transport.u[0].dof[:] = 1./2*self.transport.u[ci].dof\n self.transport.u[0].dof[:] += 1./2*self.u_dof_last[ci]\n # update u_dof_old\n self.transport.u_dof_old[:] = self.u_dof_last[ci] \n else:\n assert self.timeOrder == 1\n for ci in range(self.nc):\n self.m_last[ci][:] = self.transport.q[('u',ci)]\n\n def initializeTimeHistory(self, resetFromDOF=True):\n \"\"\"\n Push necessary information into time history arrays\n \"\"\"\n for ci in range(self.nc):\n self.m_old[ci][:] = self.transport.q[('u',ci)]\n self.m_last[ci][:] = self.transport.q[('u',ci)]\n self.u_dof_last[ci][:] = self.transport.u[ci].dof[:]\n\n def updateTimeHistory(self, resetFromDOF=False):\n \"\"\"\n assumes successful step has been taken\n \"\"\"\n self.t = self.tLast + self.dt\n for ci in range(self.nc):\n self.m_old[ci][:] = self.m_last[ci][:]\n self.u_dof_last[ci][:] = self.transport.u[ci].dof[:]\n self.lstage = 0\n self.dtLast = self.dt\n self.tLast = self.t\n\n def generateSubsteps(self, tList):\n \"\"\"\n create list of substeps over time values given in tList. These correspond to stages\n \"\"\"\n self.substeps = []\n tLast = self.tLast\n for t in tList:\n dttmp = t - tLast\n self.substeps.extend([tLast + dttmp for i in range(self.nStages)])\n tLast = t\n\n def resetOrder(self, order):\n \"\"\"\n initialize data structures for stage updges\n \"\"\"\n self.timeOrder = order # order of approximation\n self.nStages = order # number of stages total\n self.lstage = 0 # last stage completed\n self.substeps = [self.t for i in range(self.nStages)]\n\n def setFromOptions(self, nOptions):\n \"\"\"\n allow classes to set various numerical parameters\n \"\"\"\n if 'runCFL' in dir(nOptions):\n self.runCFL = nOptions.runCFL\n flags = ['timeOrder']\n for flag in flags:\n if flag in dir(nOptions):\n val = getattr(nOptions, flag)\n setattr(self, flag, val)\n if flag == 'timeOrder':\n self.resetOrder(self.timeOrder)\n \nclass Coefficients(proteus.TransportCoefficients.TC_base):\n from proteus.ctransportCoefficients import VOFCoefficientsEvaluate\n from proteus.ctransportCoefficients import VolumeAveragedVOFCoefficientsEvaluate\n from proteus.cfemIntegrals import copyExteriorElementBoundaryValuesFromElementBoundaryValues\n\n def __init__(self,\n LS_model=None,\n V_model=0,\n RD_model=None,\n ME_model=1,\n VOS_model=None,\n checkMass=True,\n epsFact=0.0,\n useMetrics=0.0,\n sc_uref=1.0,\n sc_beta=1.0,\n setParamsFunc=None,\n movingDomain=False,\n set_vos=None,\n forceStrongConditions=False,\n STABILIZATION_TYPE=0,\n # 0: supg\n # 1: Taylor Galerkin with EV\n # 2: EV with FCT (with or without art comp)\n # 3: Smoothness indicator (with or without art comp)\n # 4: DK's with FCT\n #FOR EDGE BASED EV \n ENTROPY_TYPE=0,\n # 0: quadratic\n # 1: logarithmic\n # FOR ENTROPY VISCOSITY\n cE=1.0,\n cMax=1.0,\n uL=0.0,\n uR=1.0,\n # FOR ARTIFICIAL COMPRESSION\n cK=0.0,\n LUMPED_MASS_MATRIX=False,\n FCT=True,\n outputQuantDOFs=False,\n #NULLSPACE INFO\n nullSpace='NoNullSpace',\n initialize=True):\n\n self.variableNames = ['vof']\n self.LS_modelIndex = LS_model\n self.V_model = V_model\n self.RD_modelIndex = RD_model\n self.modelIndex = ME_model\n self.VOS_model=VOS_model\n self.checkMass = checkMass\n self.epsFact = epsFact\n self.flowModelIndex = V_model\n self.modelIndex = ME_model\n self.RD_modelIndex = RD_model\n self.LS_modelIndex = LS_model\n self.V_model = V_model\n self.RD_modelIndex = RD_model\n self.modelIndex = ME_model\n self.VOS_model=VOS_model\n self.checkMass = checkMass\n self.epsFact = epsFact\n self.useMetrics = useMetrics\n self.sc_uref = sc_uref\n self.sc_beta = sc_beta\n self.setParamsFunc = setParamsFunc\n self.movingDomain = movingDomain\n self.forceStrongConditions = forceStrongConditions\n self.STABILIZATION_TYPE = STABILIZATION_TYPE\n self.ENTROPY_TYPE = ENTROPY_TYPE\n self.cE = cE\n self.cMax = cMax\n self.uL = uL\n self.uR = uR\n self.cK = cK\n self.LUMPED_MASS_MATRIX = LUMPED_MASS_MATRIX\n self.FCT = FCT\n self.outputQuantDOFs = outputQuantDOFs\n self.nullSpace = nullSpace\n # VRANS\n self.q_porosity = None\n self.ebq_porosity = None\n self.ebqe_porosity = None\n self.porosity_dof = None\n self.flowCoefficients = None\n self.set_vos = set_vos\n if initialize:\n self.initialize()\n\n def initialize(self):\n nc = 1\n mass = {0: {0: 'linear'}}\n advection = {0: {0: 'linear'}}\n hamiltonian = {}\n diffusion = {}\n potential = {}\n reaction = {}\n TC_base.__init__(self,\n nc,\n mass,\n advection,\n diffusion,\n potential,\n reaction,\n hamiltonian,\n self.variableNames,\n movingDomain=self.movingDomain) \n\n def initializeMesh(self, mesh):\n self.eps = self.epsFact * mesh.h\n\n def attachModels(self, modelList):\n # self\n self.model = modelList[self.modelIndex]\n # redistanced level set\n if self.RD_modelIndex is not None:\n self.rdModel = modelList[self.RD_modelIndex]\n # level set\n if self.LS_modelIndex is not None:\n self.lsModel = modelList[self.LS_modelIndex]\n self.q_phi = modelList[self.LS_modelIndex].q[('u', 0)]\n self.ebqe_phi = modelList[self.LS_modelIndex].ebqe[('u', 0)]\n if ('u', 0) in modelList[self.LS_modelIndex].ebq:\n self.ebq_phi = modelList[self.LS_modelIndex].ebq[('u', 0)]\n else:\n self.ebqe_phi = np.zeros(self.model.ebqe[('u', 0)].shape, 'd') # cek hack, we don't need this\n # flow model\n if self.V_model is not None:\n if ('velocity', 0) in modelList[self.V_model].q:\n self.q_v = modelList[self.V_model].q[('velocity', 0)]\n self.ebqe_v = modelList[self.V_model].ebqe[('velocity', 0)]\n else:\n self.q_v = modelList[self.V_model].q[('f', 0)]\n self.ebqe_v = modelList[self.V_model].ebqe[('f', 0)]\n if ('velocity', 0) in modelList[self.V_model].ebq:\n self.ebq_v = modelList[self.V_model].ebq[('velocity', 0)]\n else:\n if ('f', 0) in modelList[self.V_model].ebq:\n self.ebq_v = modelList[self.V_model].ebq[('f', 0)]\n else:\n self.q_v = np.ones(self.model.q[('u',0)].shape+(self.model.nSpace_global,),'d')\n self.ebqe_v = np.ones(self.model.ebqe[('u',0)].shape+(self.model.nSpace_global,),'d')\n # VRANS\n if self.V_model is not None:\n self.flowCoefficients = modelList[self.V_model].coefficients\n else:\n self.flowCoefficients = None\n if hasattr(self.flowCoefficients, 'q_porosity'):\n self.q_porosity = self.flowCoefficients.q_porosity\n if self.STABILIZATION_TYPE > 1: # edge based stabilization: EV or smoothness based\n assert hasattr(self.flowCoefficients, 'porosity_dof'), 'If STABILIZATION_TYPE>1, the flow model must have porosity_dof'\n self.porosity_dof = self.flowCoefficients.porosity_dof\n else:\n self.porosity_dof = np.ones(modelList[self.modelIndex].u[0].dof.shape, 'd')\n else:\n # If the flow model doesn't have porosity then set q_porosity=1 and porosity_dof=1\n self.q_porosity = np.ones(modelList[self.modelIndex].q[('u', 0)].shape, 'd')\n self.porosity_dof = np.ones(modelList[self.modelIndex].u[0].dof.shape, 'd')\n if self.setParamsFunc is not None:\n self.setParamsFunc(modelList[self.modelIndex].q['x'], self.q_porosity)\n #\n #\n if hasattr(self.flowCoefficients, 'ebq_porosity'):\n self.ebq_porosity = self.flowCoefficients.ebq_porosity\n elif ('u', 0) in modelList[self.modelIndex].ebq:\n self.ebq_porosity = np.ones(modelList[self.modelIndex].ebq[('u', 0)].shape,'d')\n if self.setParamsFunc is not None:\n self.setParamsFunc(modelList[self.modelIndex].ebq['x'], self.ebq_porosity)\n #\n #\n if hasattr(self.flowCoefficients, 'ebqe_porosity'):\n self.ebqe_porosity = self.flowCoefficients.ebqe_porosity\n else:\n self.ebqe_porosity = np.ones(self.model.ebqe[('u', 0)].shape,'d')\n if self.setParamsFunc is not None:\n self.setParamsFunc(modelList[self.LS_modelIndex].ebqe['x'], self.ebqe_porosity)\n #\n #\n\n def initializeElementQuadrature(self, t, cq):\n # VRANS\n self.q_porosity = np.ones(cq[('u', 0)].shape, 'd')\n\n def initializeElementBoundaryQuadrature(self, t, cebq, cebq_global):\n # VRANS\n self.ebq_porosity = np.ones(cebq[('u', 0)].shape, 'd')\n\n def initializeGlobalExteriorElementBoundaryQuadrature(self, t, cebqe):\n # VRANS\n self.ebqe_porosity = np.ones(cebqe[('u', 0)].shape, 'd')\n\n def preStep(self, t, firstStep=False):\n # SAVE OLD SOLUTION #\n self.model.u_dof_old[:] = self.model.u[0].dof\n\n # Restart flags for stages of taylor galerkin\n self.model.stage = 1\n self.model.auxTaylorGalerkinFlag = 1\n \n # COMPUTE NEW VELOCITY (if given by user) #\n if self.model.hasVelocityFieldAsFunction:\n self.model.updateVelocityFieldAsFunction()\n\n if self.checkMass:\n self.m_pre = Norms.scalarDomainIntegral(self.model.q['dV_last'],\n self.model.q[('m', 0)],\n self.model.mesh.nElements_owned)\n logEvent(\"Phase 0 mass before VOF step = %12.5e\" % (self.m_pre,), level=2)\n # self.m_last = Norms.scalarDomainIntegral(self.model.q['dV'],\n # self.model.timeIntegration.m_last[0],\n # self.model.mesh.nElements_owned)\n # logEvent(\"Phase 0 mass before VOF (m_last) step = %12.5e\" % (self.m_last,),level=2)\n copyInstructions = {}\n return copyInstructions\n\n def postStep(self, t, firstStep=False):\n self.model.q['dV_last'][:] = self.model.q['dV']\n if self.checkMass:\n self.m_post = Norms.scalarDomainIntegral(self.model.q['dV'],\n self.model.q[('m', 0)],\n self.model.mesh.nElements_owned)\n logEvent(\"Phase 0 mass after VOF step = %12.5e\" % (self.m_post,), level=2)\n # self.fluxIntegral = Norms.fluxDomainBoundaryIntegral(self.model.ebqe['dS'],\n # self.model.ebqe[('advectiveFlux',0)],\n # self.model.mesh)\n #logEvent(\"Phase 0 mass flux boundary integral after VOF step = %12.5e\" % (self.fluxIntegral,),level=2)\n #logEvent(\"Phase 0 mass conservation after VOF step = %12.5e\" % (self.m_post - self.m_last + self.model.timeIntegration.dt*self.fluxIntegral,),level=2)\n # divergence = Norms.fluxDomainBoundaryIntegralFromVector(self.model.ebqe['dS'],\n # self.ebqe_v,\n # self.model.ebqe['n'],\n # self.model.mesh)\n #logEvent(\"Divergence = %12.5e\" % (divergence,),level=2)\n copyInstructions = {}\n return copyInstructions\n\n def updateToMovingDomain(self, t, c):\n # in a moving domain simulation the velocity coming in is already for the moving domain\n pass\n\n def evaluate(self, t, c):\n # mwf debug\n # print \"VOFcoeficients eval t=%s \" % t\n if c[('f', 0)].shape == self.q_v.shape:\n v = self.q_v\n phi = self.q_phi\n porosity = self.q_porosity\n elif c[('f', 0)].shape == self.ebqe_v.shape:\n v = self.ebqe_v\n phi = self.ebqe_phi\n porosity = self.ebq_porosity\n elif ((self.ebq_v is not None and self.ebq_phi is not None) and c[('f', 0)].shape == self.ebq_v.shape):\n v = self.ebq_v\n phi = self.ebq_phi\n porosity = self.ebq_porosity\n else:\n v = None\n phi = None\n porosity = None\n if v is not None:\n # self.VOFCoefficientsEvaluate(self.eps,\n # v,\n # phi,\n # c[('u',0)],\n # c[('m',0)],\n # c[('dm',0,0)],\n # c[('f',0)],\n # c[('df',0,0)])\n self.VolumeAveragedVOFCoefficientsEvaluate(self.eps,\n v,\n phi,\n porosity,\n c[('u', 0)],\n c[('m', 0)],\n c[('dm', 0, 0)],\n c[('f', 0)],\n c[('df', 0, 0)])\n # if self.checkMass:\n # logEvent(\"Phase 0 mass in eavl = %12.5e\" % (Norms.scalarDomainIntegral(self.model.q['dV'],\n # self.model.q[('m',0)],\n # self.model.mesh.nElements_owned),),level=2)\n\n\nclass LevelModel(proteus.Transport.OneLevelTransport):\n nCalls = 0\n\n def __init__(self,\n uDict,\n phiDict,\n testSpaceDict,\n matType,\n dofBoundaryConditionsDict,\n dofBoundaryConditionsSetterDict,\n coefficients,\n elementQuadrature,\n elementBoundaryQuadrature,\n fluxBoundaryConditionsDict=None,\n advectiveFluxBoundaryConditionsSetterDict=None,\n diffusiveFluxBoundaryConditionsSetterDictDict=None,\n stressTraceBoundaryConditionsSetterDict=None,\n stabilization=None,\n shockCapturing=None,\n conservativeFluxDict=None,\n numericalFluxType=None,\n TimeIntegrationClass=None,\n massLumping=False,\n reactionLumping=False,\n options=None,\n name='defaultName',\n reuse_trial_and_test_quadrature=True,\n sd=True,\n movingDomain=False,\n bdyNullSpace=False):\n\n self.auxiliaryCallCalculateResidual = False\n #\n # set the objects describing the method and boundary conditions\n #\n self.bdyNullSpace = bdyNullSpace\n self.movingDomain = movingDomain\n self.tLast_mesh = None\n #\n self.name = name\n self.sd = sd\n self.Hess = False\n self.lowmem = True\n self.timeTerm = True # allow turning off the time derivative\n # self.lowmem=False\n self.testIsTrial = True\n self.phiTrialIsTrial = True\n self.u = uDict\n self.ua = {} # analytical solutions\n self.phi = phiDict\n self.dphi = {}\n self.matType = matType\n # mwf try to reuse test and trial information across components if spaces are the same\n self.reuse_test_trial_quadrature = reuse_trial_and_test_quadrature # True#False\n if self.reuse_test_trial_quadrature:\n for ci in range(1, coefficients.nc):\n assert self.u[ci].femSpace.__class__.__name__ == self.u[0].femSpace.__class__.__name__, \"to reuse_test_trial_quad all femSpaces must be the same!\"\n self.u_dof_old = None\n # Simplicial Mesh\n self.mesh = self.u[0].femSpace.mesh # assume the same mesh for all components for now\n self.testSpace = testSpaceDict\n self.dirichletConditions = dofBoundaryConditionsDict\n self.dirichletNodeSetList = None # explicit Dirichlet conditions for now, no Dirichlet BC constraints\n self.coefficients = coefficients\n self.coefficients.initializeMesh(self.mesh)\n self.nc = self.coefficients.nc\n self.stabilization = stabilization\n self.shockCapturing = shockCapturing\n self.conservativeFlux = conservativeFluxDict # no velocity post-processing for now\n self.fluxBoundaryConditions = fluxBoundaryConditionsDict\n self.advectiveFluxBoundaryConditionsSetterDict = advectiveFluxBoundaryConditionsSetterDict\n self.diffusiveFluxBoundaryConditionsSetterDictDict = diffusiveFluxBoundaryConditionsSetterDictDict\n # determine whether the stabilization term is nonlinear\n self.stabilizationIsNonlinear = False\n # cek come back\n if self.stabilization is not None:\n for ci in range(self.nc):\n if ci in coefficients.mass:\n for flag in list(coefficients.mass[ci].values()):\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear = True\n if ci in coefficients.advection:\n for flag in list(coefficients.advection[ci].values()):\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear = True\n if ci in coefficients.diffusion:\n for diffusionDict in list(coefficients.diffusion[ci].values()):\n for flag in list(diffusionDict.values()):\n if flag != 'constant':\n self.stabilizationIsNonlinear = True\n if ci in coefficients.potential:\n for flag in list(coefficients.potential[ci].values()):\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear = True\n if ci in coefficients.reaction:\n for flag in list(coefficients.reaction[ci].values()):\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear = True\n if ci in coefficients.hamiltonian:\n for flag in list(coefficients.hamiltonian[ci].values()):\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear = True\n # determine if we need element boundary storage\n self.elementBoundaryIntegrals = {}\n for ci in range(self.nc):\n self.elementBoundaryIntegrals[ci] = ((self.conservativeFlux is not None) or\n (numericalFluxType is not None) or\n (self.fluxBoundaryConditions[ci] == 'outFlow') or\n (self.fluxBoundaryConditions[ci] == 'mixedFlow') or\n (self.fluxBoundaryConditions[ci] == 'setFlow'))\n #\n # calculate some dimensions\n #\n self.nSpace_global = self.u[0].femSpace.nSpace_global # assume same space dim for all variables\n self.nDOF_trial_element = [u_j.femSpace.max_nDOF_element for u_j in list(self.u.values())]\n self.nDOF_phi_trial_element = [phi_k.femSpace.max_nDOF_element for phi_k in list(self.phi.values())]\n self.n_phi_ip_element = [phi_k.femSpace.referenceFiniteElement.interpolationConditions.nQuadraturePoints for phi_k in list(self.phi.values())]\n self.nDOF_test_element = [femSpace.max_nDOF_element for femSpace in list(self.testSpace.values())]\n self.nFreeDOF_global = [dc.nFreeDOF_global for dc in list(self.dirichletConditions.values())]\n self.nVDOF_element = sum(self.nDOF_trial_element)\n self.nFreeVDOF_global = sum(self.nFreeDOF_global)\n #\n NonlinearEquation.__init__(self, self.nFreeVDOF_global)\n #\n # build the quadrature point dictionaries from the input (this\n # is just for convenience so that the input doesn't have to be\n # complete)\n #\n elementQuadratureDict = {}\n elemQuadIsDict = isinstance(elementQuadrature, dict)\n if elemQuadIsDict: # set terms manually\n for I in self.coefficients.elementIntegralKeys:\n if I in elementQuadrature:\n elementQuadratureDict[I] = elementQuadrature[I]\n else:\n elementQuadratureDict[I] = elementQuadrature['default']\n else:\n for I in self.coefficients.elementIntegralKeys:\n elementQuadratureDict[I] = elementQuadrature\n if self.stabilization is not None:\n for I in self.coefficients.elementIntegralKeys:\n if elemQuadIsDict:\n if I in elementQuadrature:\n elementQuadratureDict[('stab',) + I[1:]] = elementQuadrature[I]\n else:\n elementQuadratureDict[('stab',) + I[1:]] = elementQuadrature['default']\n else:\n elementQuadratureDict[('stab',) + I[1:]] = elementQuadrature\n if self.shockCapturing is not None:\n for ci in self.shockCapturing.components:\n if elemQuadIsDict:\n if ('numDiff', ci, ci) in elementQuadrature:\n elementQuadratureDict[('numDiff', ci, ci)] = elementQuadrature[('numDiff', ci, ci)]\n else:\n elementQuadratureDict[('numDiff', ci, ci)] = elementQuadrature['default']\n else:\n elementQuadratureDict[('numDiff', ci, ci)] = elementQuadrature\n if massLumping:\n for ci in list(self.coefficients.mass.keys()):\n elementQuadratureDict[('m', ci)] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global, 1)\n for I in self.coefficients.elementIntegralKeys:\n elementQuadratureDict[('stab',) + I[1:]] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global, 1)\n if reactionLumping:\n for ci in list(self.coefficients.mass.keys()):\n elementQuadratureDict[('r', ci)] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global, 1)\n for I in self.coefficients.elementIntegralKeys:\n elementQuadratureDict[('stab',) + I[1:]] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global, 1)\n elementBoundaryQuadratureDict = {}\n if isinstance(elementBoundaryQuadrature, dict): # set terms manually\n for I in self.coefficients.elementBoundaryIntegralKeys:\n if I in elementBoundaryQuadrature:\n elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature[I]\n else:\n elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature['default']\n else:\n for I in self.coefficients.elementBoundaryIntegralKeys:\n elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature\n #\n # find the union of all element quadrature points and\n # build a quadrature rule for each integral that has a\n # weight at each point in the union\n # mwf include tag telling me which indices are which quadrature rule?\n (self.elementQuadraturePoints, self.elementQuadratureWeights,\n self.elementQuadratureRuleIndeces) = Quadrature.buildUnion(elementQuadratureDict)\n self.nQuadraturePoints_element = self.elementQuadraturePoints.shape[0]\n self.nQuadraturePoints_global = self.nQuadraturePoints_element * self.mesh.nElements_global\n #\n # Repeat the same thing for the element boundary quadrature\n #\n (self.elementBoundaryQuadraturePoints,\n self.elementBoundaryQuadratureWeights,\n self.elementBoundaryQuadratureRuleIndeces) = Quadrature.buildUnion(elementBoundaryQuadratureDict)\n self.nElementBoundaryQuadraturePoints_elementBoundary = self.elementBoundaryQuadraturePoints.shape[0]\n self.nElementBoundaryQuadraturePoints_global = (self.mesh.nElements_global *\n self.mesh.nElementBoundaries_element *\n self.nElementBoundaryQuadraturePoints_elementBoundary)\n #\n # storage dictionaries\n self.scalars_element = set()\n #\n # simplified allocations for test==trial and also check if space is mixed or not\n #\n self.q = {}\n self.ebq = {}\n self.ebq_global = {}\n self.ebqe = {}\n self.phi_ip = {}\n self.edge_based_cfl = np.zeros(self.u[0].dof.shape)\n # mesh\n self.q['x'] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element, 3), 'd')\n self.ebqe['x'] = np.zeros((self.mesh.nExteriorElementBoundaries_global, self.nElementBoundaryQuadraturePoints_elementBoundary, 3), 'd')\n self.q[('u', 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')\n self.q[('dV_u', 0)] = (old_div(1.0, self.mesh.nElements_global)) * np.ones((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')\n self.q[('grad(u)', 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element, self.nSpace_global), 'd')\n self.q[('m', 0)] = self.q[('u', 0)]\n self.q[('m_last', 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')\n self.q[('mt', 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')\n self.q['dV'] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')\n self.q['dV_last'] = -1000 * np.ones((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')\n self.q[('m_tmp', 0)] = self.q[('u', 0)].copy()\n self.q[('cfl', 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')\n self.q[('numDiff', 0, 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')\n self.ebqe[('u', 0)] = np.zeros((self.mesh.nExteriorElementBoundaries_global, self.nElementBoundaryQuadraturePoints_elementBoundary), 'd')\n self.ebqe[('grad(u)', 0)] = np.zeros((self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary, self.nSpace_global), 'd')\n self.ebqe[('advectiveFlux_bc_flag', 0)] = np.zeros(\n (self.mesh.nExteriorElementBoundaries_global, self.nElementBoundaryQuadraturePoints_elementBoundary), 'i')\n self.ebqe[('advectiveFlux_bc', 0)] = np.zeros((self.mesh.nExteriorElementBoundaries_global, self.nElementBoundaryQuadraturePoints_elementBoundary), 'd')\n self.ebqe[('advectiveFlux', 0)] = np.zeros((self.mesh.nExteriorElementBoundaries_global, self.nElementBoundaryQuadraturePoints_elementBoundary), 'd')\n\n self.points_elementBoundaryQuadrature = set()\n self.scalars_elementBoundaryQuadrature = set([('u', ci) for ci in range(self.nc)])\n self.vectors_elementBoundaryQuadrature = set()\n self.tensors_elementBoundaryQuadrature = set()\n self.inflowBoundaryBC = {}\n self.inflowBoundaryBC_values = {}\n self.inflowFlux = {}\n for cj in range(self.nc):\n self.inflowBoundaryBC[cj] = np.zeros((self.mesh.nExteriorElementBoundaries_global,), 'i')\n self.inflowBoundaryBC_values[cj] = np.zeros((self.mesh.nExteriorElementBoundaries_global, self.nDOF_trial_element[cj]), 'd')\n self.inflowFlux[cj] = np.zeros((self.mesh.nExteriorElementBoundaries_global, self.nElementBoundaryQuadraturePoints_elementBoundary), 'd')\n self.internalNodes = set(range(self.mesh.nNodes_global))\n # identify the internal nodes this is ought to be in mesh\n # \\todo move this to mesh\n for ebNE in range(self.mesh.nExteriorElementBoundaries_global):\n ebN = self.mesh.exteriorElementBoundariesArray[ebNE]\n eN_global = self.mesh.elementBoundaryElementsArray[ebN, 0]\n ebN_element = self.mesh.elementBoundaryLocalElementBoundariesArray[ebN, 0]\n for i in range(self.mesh.nNodes_element):\n if i != ebN_element:\n I = self.mesh.elementNodesArray[eN_global, i]\n self.internalNodes -= set([I])\n self.nNodes_internal = len(self.internalNodes)\n self.internalNodesArray = np.zeros((self.nNodes_internal,), 'i')\n for nI, n in enumerate(self.internalNodes):\n self.internalNodesArray[nI] = n\n #\n del self.internalNodes\n self.internalNodes = None\n logEvent(\"Updating local to global mappings\", 2)\n self.updateLocal2Global()\n logEvent(\"Building time integration object\", 2)\n logEvent(memory(\"inflowBC, internalNodes,updateLocal2Global\", \"OneLevelTransport\"), level=4)\n # mwf for interpolating subgrid error for gradients etc\n if self.stabilization and self.stabilization.usesGradientStabilization:\n self.timeIntegration = TimeIntegrationClass(self, integrateInterpolationPoints=True)\n else:\n self.timeIntegration = TimeIntegrationClass(self)\n\n if options is not None:\n self.timeIntegration.setFromOptions(options)\n logEvent(memory(\"TimeIntegration\", \"OneLevelTransport\"), level=4)\n logEvent(\"Calculating numerical quadrature formulas\", 2)\n self.calculateQuadrature()\n self.setupFieldStrides()\n\n comm = Comm.get()\n self.comm = comm\n if comm.size() > 1:\n assert numericalFluxType is not None and numericalFluxType.useWeakDirichletConditions, \"You must use a numerical flux to apply weak boundary conditions for parallel runs\"\n\n logEvent(memory(\"stride+offset\", \"OneLevelTransport\"), level=4)\n if numericalFluxType is not None:\n if options is None or options.periodicDirichletConditions is None:\n self.numericalFlux = numericalFluxType(self,\n dofBoundaryConditionsSetterDict,\n advectiveFluxBoundaryConditionsSetterDict,\n diffusiveFluxBoundaryConditionsSetterDictDict)\n else:\n self.numericalFlux = numericalFluxType(self,\n dofBoundaryConditionsSetterDict,\n advectiveFluxBoundaryConditionsSetterDict,\n diffusiveFluxBoundaryConditionsSetterDictDict,\n options.periodicDirichletConditions)\n else:\n self.numericalFlux = None\n # set penalty terms\n # cek todo move into numerical flux initialization\n if 'penalty' in self.ebq_global:\n for ebN in range(self.mesh.nElementBoundaries_global):\n for k in range(self.nElementBoundaryQuadraturePoints_elementBoundary):\n self.ebq_global['penalty'][ebN, k] = old_div(self.numericalFlux.penalty_constant, \\\n (self.mesh.elementBoundaryDiametersArray[ebN]**self.numericalFlux.penalty_power))\n # penalty term\n # cek move to Numerical flux initialization\n if 'penalty' in self.ebqe:\n for ebNE in range(self.mesh.nExteriorElementBoundaries_global):\n ebN = self.mesh.exteriorElementBoundariesArray[ebNE]\n for k in range(self.nElementBoundaryQuadraturePoints_elementBoundary):\n self.ebqe['penalty'][ebNE, k] = old_div(self.numericalFlux.penalty_constant, \\\n self.mesh.elementBoundaryDiametersArray[ebN]**self.numericalFlux.penalty_power)\n logEvent(memory(\"numericalFlux\", \"OneLevelTransport\"), level=4)\n self.elementEffectiveDiametersArray = self.mesh.elementInnerDiametersArray\n # use post processing tools to get conservative fluxes, None by default\n from proteus import PostProcessingTools\n self.velocityPostProcessor = PostProcessingTools.VelocityPostProcessingChooser(self)\n logEvent(memory(\"velocity postprocessor\", \"OneLevelTransport\"), level=4)\n # helper for writing out data storage\n from proteus import Archiver\n self.elementQuadratureDictionaryWriter = Archiver.XdmfWriter()\n self.elementBoundaryQuadratureDictionaryWriter = Archiver.XdmfWriter()\n self.exteriorElementBoundaryQuadratureDictionaryWriter = Archiver.XdmfWriter()\n # TODO get rid of this\n for ci, fbcObject in list(self.fluxBoundaryConditionsObjectsDict.items()):\n self.ebqe[('advectiveFlux_bc_flag', ci)] = np.zeros(self.ebqe[('advectiveFlux_bc', ci)].shape, 'i')\n for t, g in list(fbcObject.advectiveFluxBoundaryConditionsDict.items()):\n if ci in self.coefficients.advection:\n self.ebqe[('advectiveFlux_bc', ci)][t[0], t[1]] = g(self.ebqe[('x')][t[0], t[1]], self.timeIntegration.t)\n self.ebqe[('advectiveFlux_bc_flag', ci)][t[0], t[1]] = 1\n\n if hasattr(self.numericalFlux, 'setDirichletValues'):\n self.numericalFlux.setDirichletValues(self.ebqe)\n if not hasattr(self.numericalFlux, 'isDOFBoundary'):\n self.numericalFlux.isDOFBoundary = {0: np.zeros(self.ebqe[('u', 0)].shape, 'i')}\n if not hasattr(self.numericalFlux, 'ebqe'):\n self.numericalFlux.ebqe = {('u', 0): np.zeros(self.ebqe[('u', 0)].shape, 'd')}\n # TODO how to handle redistancing calls for calculateCoefficients,calculateElementResidual etc\n self.globalResidualDummy = None\n compKernelFlag = 0\n self.vof = cVOF_base(self.nSpace_global,\n self.nQuadraturePoints_element,\n self.u[0].femSpace.elementMaps.localFunctionSpace.dim,\n self.u[0].femSpace.referenceFiniteElement.localFunctionSpace.dim,\n self.testSpace[0].referenceFiniteElement.localFunctionSpace.dim,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n compKernelFlag)\n\n self.forceStrongConditions = False\n if self.forceStrongConditions:\n self.dirichletConditionsForceDOF = DOFBoundaryConditions(self.u[0].femSpace, dofBoundaryConditionsSetterDict[0], weakDirichletConditions=False)\n\n if self.movingDomain:\n self.MOVING_DOMAIN = 1.0\n else:\n self.MOVING_DOMAIN = 0.0\n if self.mesh.nodeVelocityArray is None:\n self.mesh.nodeVelocityArray = np.zeros(self.mesh.nodeArray.shape, 'd')\n\n # Stuff added by mql.\n # Some ASSERTS to restrict the combination of the methods\n if self.coefficients.STABILIZATION_TYPE > 1:\n assert self.timeIntegration.isSSP == True, \"If STABILIZATION_TYPE>1, use RKEV timeIntegration within VOF model\"\n cond = 'levelNonlinearSolver' in dir(options) and (options.levelNonlinearSolver ==\n ExplicitLumpedMassMatrix or options.levelNonlinearSolver == ExplicitConsistentMassMatrixForVOF)\n assert cond, \"If STABILIZATION_TYPE>1, use levelNonlinearSolver=ExplicitLumpedMassMatrix or ExplicitConsistentMassMatrixForVOF\"\n if 'levelNonlinearSolver' in dir(options) and options.levelNonlinearSolver == ExplicitLumpedMassMatrix:\n assert self.coefficients.LUMPED_MASS_MATRIX, \"If levelNonlinearSolver=ExplicitLumpedMassMatrix, use LUMPED_MASS_MATRIX=True\"\n if self.coefficients.LUMPED_MASS_MATRIX == True:\n cond = 'levelNonlinearSolver' in dir(options) and options.levelNonlinearSolver == ExplicitLumpedMassMatrix\n assert cond, \"Use levelNonlinearSolver=ExplicitLumpedMassMatrix when the mass matrix is lumped\"\n if self.coefficients.FCT == True:\n cond = self.coefficients.STABILIZATION_TYPE > 1, \"Use FCT just with STABILIZATION_TYPE>1; i.e., edge based stabilization\"\n if self.coefficients.STABILIZATION_TYPE==1:\n cond = 'levelNonlinearSolver' in dir(options) and options.levelNonlinearSolver == TwoStageNewton\n assert cond, \"If STABILIZATION_TYPE==1, use levelNonlinearSolver=TwoStageNewton\"\n if self.coefficients.STABILIZATION_TYPE==1:\n self.useTwoStageNewton = True\n assert isinstance(self.timeIntegration, proteus.TimeIntegration.BackwardEuler_cfl), \"If STABILIZATION_TYPE=1, use BackwardEuler_cfl\"\n assert options.levelNonlinearSolver == TwoStageNewton, \"If STABILIZATION_TYPE=1, use levelNonlinearSolver=TwoStageNewton\"\n assert self.coefficients.ENTROPY_TYPE in [0,1], \"Set ENTROPY_TYPE={0,1}\"\n assert self.coefficients.STABILIZATION_TYPE in [0,1,2,3,4]\n if self.coefficients.STABILIZATION_TYPE==4:\n assert self.coefficients.FCT==True, \"If STABILIZATION_TYPE=4, use FCT=True\"\n \n # mql. Allow the user to provide functions to define the velocity field\n self.hasVelocityFieldAsFunction = False\n if ('velocityFieldAsFunction') in dir(options):\n self.velocityFieldAsFunction = options.velocityFieldAsFunction\n self.hasVelocityFieldAsFunction = True\n\n # For edge based methods\n self.ML = None # lumped mass matrix\n self.MC_global = None # consistent mass matrix\n self.uLow = None\n self.dt_times_dC_minus_dL = None\n self.dLow = None\n self.min_u_bc = None\n self.max_u_bc = None\n self.quantDOFs = np.zeros(self.u[0].dof.shape, 'd')\n\n # For Taylor Galerkin methods\n self.stage = 1\n self.auxTaylorGalerkinFlag = 1 \n self.uTilde_dof = np.zeros(self.u[0].dof.shape)\n self.degree_polynomial = 1\n try:\n self.degree_polynomial = self.u[0].femSpace.order\n except:\n pass\n self.calculateJacobian = self.vof.calculateJacobian\n if (self.coefficients.STABILIZATION_TYPE <= 1): # SUPG or Taylor Galerkin\n self.calculateResidual = self.vof.calculateResidualElementBased \n else:\n self.calculateResidual = self.vof.calculateResidualEdgeBased\n \n def FCTStep(self):\n rowptr, colind, MassMatrix = self.MC_global.getCSRrepresentation()\n limited_solution = np.zeros(self.u[0].dof.shape)\n\n self.vof.FCTStep(\n self.timeIntegration.dt,\n self.nnz, \n len(rowptr) - 1, # number of DOFs\n self.ML, \n self.u_dof_old,\n self.timeIntegration.u, # high order solution\n self.uLow,\n self.dLow,\n limited_solution,\n rowptr, # Row indices for Sparsity Pattern (convenient for DOF loops)\n colind, # Column indices for Sparsity Pattern (convenient for DOF loops)\n MassMatrix,\n self.dt_times_dC_minus_dL,\n self.min_u_bc,\n self.max_u_bc,\n self.coefficients.LUMPED_MASS_MATRIX,\n self.coefficients.STABILIZATION_TYPE)\n #self.timeIntegration.u[:] = limited_solution\n fromFreeToGlobal=0 #direction copying\n cfemIntegrals.copyBetweenFreeUnknownsAndGlobalUnknowns(fromFreeToGlobal,\n self.offset[0],\n self.stride[0],\n self.dirichletConditions[0].global2freeGlobal_global_dofs,\n self.dirichletConditions[0].global2freeGlobal_free_dofs,\n self.timeIntegration.u,\n limited_solution)\n\n def updateVelocityFieldAsFunction(self):\n X = {0: self.q[('x')][:, :, 0],\n 1: self.q[('x')][:, :, 1],\n 2: self.q[('x')][:, :, 2]}\n t = self.timeIntegration.t\n self.coefficients.q_v[..., 0] = self.velocityFieldAsFunction[0](X, t)\n self.coefficients.q_v[..., 1] = self.velocityFieldAsFunction[1](X, t)\n if (self.nSpace_global == 3):\n self.coefficients.q_v[..., 2] = self.velocityFieldAsFunction[2](X, t)\n\n # BOUNDARY\n ebqe_X = {0: self.ebqe['x'][:, :, 0],\n 1: self.ebqe['x'][:, :, 1],\n 2: self.ebqe['x'][:, :, 2]}\n self.coefficients.ebqe_v[..., 0] = self.velocityFieldAsFunction[0](ebqe_X, t)\n self.coefficients.ebqe_v[..., 1] = self.velocityFieldAsFunction[1](ebqe_X, t)\n if (self.nSpace_global == 3):\n self.coefficients.ebqe_v[..., 2] = self.velocityFieldAsFunction[2](ebqe_X, t)\n \n def calculateCoefficients(self):\n pass\n\n def calculateElementResidual(self):\n if self.globalResidualDummy is not None:\n self.getResidual(self.u[0].dof, self.globalResidualDummy)\n\n def getMassMatrix(self):\n # JACOBIANS (FOR ELEMENT TRANSFORMATION)\n self.q[('J')] = np.zeros((self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.nSpace_global,\n self.nSpace_global),\n 'd')\n self.q[('inverse(J)')] = np.zeros((self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.nSpace_global,\n self.nSpace_global),\n 'd')\n self.q[('det(J)')] = np.zeros((self.mesh.nElements_global,\n self.nQuadraturePoints_element),\n 'd')\n self.u[0].femSpace.elementMaps.getJacobianValues(self.elementQuadraturePoints,\n self.q['J'],\n self.q['inverse(J)'],\n self.q['det(J)'])\n self.q['abs(det(J))'] = np.abs(self.q['det(J)'])\n # SHAPE FUNCTIONS\n self.q[('w',0)] = np.zeros((self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.nDOF_test_element[0]),\n 'd')\n self.q[('w*dV_m',0)] = self.q[('w',0)].copy()\n self.u[0].femSpace.getBasisValues(self.elementQuadraturePoints, self.q[('w',0)])\n cfemIntegrals.calculateWeightedShape(self.elementQuadratureWeights[('u',0)],\n self.q['abs(det(J))'],\n self.q[('w',0)],\n self.q[('w*dV_m',0)])\n # assume a linear mass term\n dm = np.ones(self.q[('u', 0)].shape, 'd')\n elementMassMatrix = np.zeros((self.mesh.nElements_global,\n self.nDOF_test_element[0],\n self.nDOF_trial_element[0]), 'd')\n cfemIntegrals.updateMassJacobian_weak_lowmem(dm,\n self.q[('w', 0)],\n self.q[('w*dV_m', 0)],\n elementMassMatrix)\n self.MC_a = self.nzval.copy()\n self.MC_global = SparseMat(self.nFreeDOF_global[0],\n self.nFreeDOF_global[0],\n self.nnz,\n self.MC_a,\n self.colind,\n self.rowptr)\n cfemIntegrals.zeroJacobian_CSR(self.nnz, self.MC_global)\n cfemIntegrals.updateGlobalJacobianFromElementJacobian_CSR(self.l2g[0]['nFreeDOF'],\n self.l2g[0]['freeLocal'],\n self.l2g[0]['nFreeDOF'],\n self.l2g[0]['freeLocal'],\n self.csrRowIndeces[(0, 0)],\n self.csrColumnOffsets[(0, 0)],\n elementMassMatrix,\n self.MC_global)\n\n \n self.ML = np.zeros((self.nFreeDOF_global[0],), 'd')\n for i in range(self.nFreeDOF_global[0]):\n self.ML[i] = self.MC_a[self.rowptr[i]:self.rowptr[i + 1]].sum()\n np.testing.assert_almost_equal(self.ML.sum(),\n self.mesh.volume,\n err_msg=\"Trace of lumped mass matrix should be the domain volume\", verbose=True)\n \n def initVectors(self):\n if self.coefficients.porosity_dof is None:\n self.coefficients.porosity_dof = np.ones(self.u[0].dof.shape, 'd')\n if self.u_dof_old is None:\n # Pass initial condition to u_dof_old\n self.u_dof_old = np.copy(self.u[0].dof)\n\n rowptr, colind, MC = self.MC_global.getCSRrepresentation()\n # This is dummy. I just care about the csr structure of the sparse matrix\n self.dt_times_dC_minus_dL = np.zeros(MC.shape, 'd')\n self.uLow = np.zeros(self.u[0].dof.shape, 'd')\n self.dLow = np.zeros(MC.shape, 'd')\n \n def getResidual(self, u, r):\n import pdb\n import copy\n \"\"\"\n Calculate the element residuals and add in to the global residual\n \"\"\"\n\n if self.MC_global is None:\n self.getMassMatrix()\n self.initVectors()\n\n if self.coefficients.set_vos:\n self.coefficients.set_vos(self.q['x'], self.coefficients.q_vos)\n\n # Reset some vectors for FCT\n self.min_u_bc = np.zeros(self.u[0].dof.shape, 'd') + 1E10\n self.max_u_bc = np.zeros(self.u[0].dof.shape, 'd') - 1E10\n self.dt_times_dC_minus_dL.fill(0.0)\n self.uLow.fill(0.0)\n self.dLow.fill(0.0)\n \n r.fill(0.0)\n # Load the unknowns into the finite element dof\n self.timeIntegration.calculateCoefs()\n self.timeIntegration.calculateU(u)\n self.setUnknowns(self.timeIntegration.u)\n # cek can put in logic to skip of BC's don't depend on t or u\n # Dirichlet boundary conditions\n # if hasattr(self.numericalFlux,'setDirichletValues'):\n if (self.stage!=2):\n self.numericalFlux.setDirichletValues(self.ebqe)\n # flux boundary conditions\n for t, g in list(self.fluxBoundaryConditionsObjectsDict[0].advectiveFluxBoundaryConditionsDict.items()):\n self.ebqe[('advectiveFlux_bc', 0)][t[0], t[1]] = g(self.ebqe[('x')][t[0], t[1]], self.timeIntegration.t)\n self.ebqe[('advectiveFlux_bc_flag', 0)][t[0], t[1]] = 1\n\n if self.forceStrongConditions:\n for dofN, g in list(self.dirichletConditionsForceDOF.DOFBoundaryConditionsDict.items()):\n self.u[0].dof[dofN] = g(self.dirichletConditionsForceDOF.DOFBoundaryPointDict[dofN], self.timeIntegration.t)\n\n if (self.stage==2 and self.auxTaylorGalerkinFlag==1):\n self.uTilde_dof[:] = self.u[0].dof\n self.auxTaylorGalerkinFlag=0\n\n self.calculateResidual( # element\n self.timeIntegration.dt,\n self.u[0].femSpace.elementMaps.psi,\n self.u[0].femSpace.elementMaps.grad_psi,\n self.mesh.nodeArray,\n self.mesh.nodeVelocityArray,\n self.MOVING_DOMAIN,\n self.mesh.elementNodesArray,\n self.elementQuadratureWeights[('u', 0)],\n self.u[0].femSpace.psi,\n self.u[0].femSpace.grad_psi,\n self.u[0].femSpace.psi,\n self.u[0].femSpace.grad_psi,\n # element boundary\n self.u[0].femSpace.elementMaps.psi_trace,\n self.u[0].femSpace.elementMaps.grad_psi_trace,\n self.elementBoundaryQuadratureWeights[('u', 0)],\n self.u[0].femSpace.psi_trace,\n self.u[0].femSpace.grad_psi_trace,\n self.u[0].femSpace.psi_trace,\n self.u[0].femSpace.grad_psi_trace,\n self.u[0].femSpace.elementMaps.boundaryNormals,\n self.u[0].femSpace.elementMaps.boundaryJacobians,\n # physics\n self.mesh.nElements_global,\n self.coefficients.useMetrics,\n self.timeIntegration.alpha_bdf,\n self.shockCapturing.lag,\n self.shockCapturing.shockCapturingFactor,\n self.coefficients.sc_uref,\n self.coefficients.sc_beta,\n # VRANS start\n self.coefficients.q_porosity,\n self.coefficients.porosity_dof, # I need this for edge based methods\n # VRANS end\n self.u[0].femSpace.dofMap.l2g,\n self.l2g[0]['freeGlobal'],\n self.mesh.elementDiametersArray,\n self.degree_polynomial,\n self.u[0].dof,\n self.u_dof_old, # For Backward Euler this is un, for SSP this is the lstage\n self.coefficients.q_v,\n self.timeIntegration.m_tmp[0],\n self.q[('u', 0)],\n self.timeIntegration.beta_bdf[0],\n self.q['dV'],\n self.q['dV_last'],\n self.q[('cfl', 0)],\n self.edge_based_cfl,\n self.shockCapturing.numDiff[0],\n self.shockCapturing.numDiff_last[0],\n self.offset[0], self.stride[0],\n r,\n self.mesh.nExteriorElementBoundaries_global,\n self.mesh.exteriorElementBoundariesArray,\n self.mesh.elementBoundaryElementsArray,\n self.mesh.elementBoundaryLocalElementBoundariesArray,\n self.coefficients.ebqe_v,\n # VRANS start\n self.coefficients.ebqe_porosity,\n # VRANS end\n self.numericalFlux.isDOFBoundary[0],\n self.numericalFlux.ebqe[('u', 0)],\n self.ebqe[('advectiveFlux_bc_flag', 0)],\n self.ebqe[('advectiveFlux_bc', 0)],\n self.coefficients.ebqe_phi, self.coefficients.epsFact,\n self.ebqe[('u', 0)],\n self.ebqe[('advectiveFlux', 0)],\n # TAYLOR GALERKIN\n self.stage,\n self.uTilde_dof, \n # ENTROPY VISCOSITY and ARTIFICIAL COMRPESSION\n self.coefficients.cE,\n self.coefficients.cMax,\n self.coefficients.cK,\n # PARAMETERS FOR LOG BASED ENTROPY FUNCTION\n self.coefficients.uL,\n self.coefficients.uR,\n # PARAMETERS FOR EDGE VISCOSITY\n len(self.rowptr) - 1, # num of DOFs\n self.nnz, # num of non-zero entries in the sparsity pattern\n self.rowptr, # Row indices for Sparsity Pattern (convenient for DOF loops)\n self.colind, # Column indices for Sparsity Pattern (convenient for DOF loops)\n self.csrRowIndeces[(0, 0)], # row indices (convenient for element loops)\n self.csrColumnOffsets[(0, 0)], # column indices (convenient for element loops)\n self.csrColumnOffsets_eb[(0, 0)], # indices for boundary terms\n self.ML,\n # PARAMETERS FOR 1st or 2nd ORDER MPP METHOD\n self.coefficients.LUMPED_MASS_MATRIX,\n self.coefficients.STABILIZATION_TYPE,\n self.coefficients.ENTROPY_TYPE,\n # FLUX CORRECTED TRANSPORT\n self.uLow,\n self.dLow,\n self.dt_times_dC_minus_dL,\n self.min_u_bc,\n self.max_u_bc,\n # AUX QUANTITIES OF INTEREST\n self.quantDOFs)\n\n if self.forceStrongConditions:\n for dofN, g in list(self.dirichletConditionsForceDOF.DOFBoundaryConditionsDict.items()):\n r[dofN] = 0\n\n if (self.auxiliaryCallCalculateResidual == False):\n edge_based_cflMax = globalMax(self.edge_based_cfl.max()) * self.timeIntegration.dt\n cell_based_cflMax = globalMax(self.q[('cfl', 0)].max()) * self.timeIntegration.dt\n logEvent(\"... Current dt = \" + str(self.timeIntegration.dt), level=4)\n logEvent(\"... Maximum Cell Based CFL = \" + str(cell_based_cflMax), level=2)\n logEvent(\"... Maximum Edge Based CFL = \" + str(edge_based_cflMax), level=2)\n\n if self.stabilization:\n self.stabilization.accumulateSubgridMassHistory(self.q)\n logEvent(\"Global residual\", level=9, data=r)\n self.nonlinear_function_evaluations += 1\n if self.globalResidualDummy is None:\n self.globalResidualDummy = np.zeros(r.shape, 'd')\n\n def getJacobian(self, jacobian):\n cfemIntegrals.zeroJacobian_CSR(self.nNonzerosInJacobian,\n jacobian)\n\n self.calculateJacobian( # element\n self.u[0].femSpace.elementMaps.psi,\n self.u[0].femSpace.elementMaps.grad_psi,\n self.mesh.nodeArray,\n self.mesh.nodeVelocityArray,\n self.MOVING_DOMAIN,\n self.mesh.elementNodesArray,\n self.elementQuadratureWeights[('u', 0)],\n self.u[0].femSpace.psi,\n self.u[0].femSpace.grad_psi,\n self.u[0].femSpace.psi,\n self.u[0].femSpace.grad_psi,\n # element boundary\n self.u[0].femSpace.elementMaps.psi_trace,\n self.u[0].femSpace.elementMaps.grad_psi_trace,\n self.elementBoundaryQuadratureWeights[('u', 0)],\n self.u[0].femSpace.psi_trace,\n self.u[0].femSpace.grad_psi_trace,\n self.u[0].femSpace.psi_trace,\n self.u[0].femSpace.grad_psi_trace,\n self.u[0].femSpace.elementMaps.boundaryNormals,\n self.u[0].femSpace.elementMaps.boundaryJacobians,\n # physics\n self.mesh.nElements_global,\n self.coefficients.useMetrics,\n self.timeIntegration.alpha_bdf,\n self.shockCapturing.lag,\n self.shockCapturing.shockCapturingFactor,\n # VRANS start\n self.coefficients.q_porosity,\n # VRANS end\n self.u[0].femSpace.dofMap.l2g,\n self.l2g[0]['freeGlobal'],\n self.mesh.elementDiametersArray,\n self.u[0].dof,\n self.coefficients.q_v,\n self.timeIntegration.beta_bdf[0],\n self.q[('cfl', 0)],\n self.shockCapturing.numDiff_last[0],\n self.csrRowIndeces[(0, 0)], self.csrColumnOffsets[(0, 0)],\n jacobian.getCSRrepresentation()[2],\n self.mesh.nExteriorElementBoundaries_global,\n self.mesh.exteriorElementBoundariesArray,\n self.mesh.elementBoundaryElementsArray,\n self.mesh.elementBoundaryLocalElementBoundariesArray,\n self.coefficients.ebqe_v,\n # VRANS start\n self.coefficients.ebqe_porosity,\n # VRANS end\n self.numericalFlux.isDOFBoundary[0],\n self.numericalFlux.ebqe[('u', 0)],\n self.ebqe[('advectiveFlux_bc_flag', 0)],\n self.ebqe[('advectiveFlux_bc', 0)],\n self.csrColumnOffsets_eb[(0, 0)],\n self.coefficients.STABILIZATION_TYPE)\n\n # Load the Dirichlet conditions directly into residual\n if self.forceStrongConditions:\n scaling = 1.0 # probably want to add some scaling to match non-dirichlet diagonals in linear system\n for dofN in list(self.dirichletConditionsForceDOF.DOFBoundaryConditionsDict.keys()):\n global_dofN = dofN\n for i in range(self.rowptr[global_dofN],\n self.rowptr[global_dofN + 1]):\n if (self.colind[i] == global_dofN):\n # print \"RBLES forcing residual cj = %s dofN= %s\n # global_dofN= %s was self.nzval[i]= %s now =%s \" %\n # (cj,dofN,global_dofN,self.nzval[i],scaling)\n self.nzval[i] = scaling\n else:\n self.nzval[i] = 0.0\n # print \"RBLES zeroing residual cj = %s dofN= %s\n # global_dofN= %s \" % (cj,dofN,global_dofN)\n logEvent(\"Jacobian \", level=10, data=jacobian)\n # mwf decide if this is reasonable for solver statistics\n self.nonlinear_function_jacobian_evaluations += 1\n return jacobian\n\n def calculateElementQuadrature(self):\n \"\"\"\n Calculate the physical location and weights of the quadrature rules\n and the shape information at the quadrature points.\n\n This function should be called only when the mesh changes.\n \"\"\"\n self.u[0].femSpace.elementMaps.getValues(self.elementQuadraturePoints,\n self.q['x'])\n self.u[0].femSpace.elementMaps.getBasisValuesRef(\n self.elementQuadraturePoints)\n self.u[0].femSpace.elementMaps.getBasisGradientValuesRef(\n self.elementQuadraturePoints)\n self.u[0].femSpace.getBasisValuesRef(self.elementQuadraturePoints)\n self.u[0].femSpace.getBasisGradientValuesRef(self.elementQuadraturePoints)\n self.coefficients.initializeElementQuadrature(self.timeIntegration.t,\n self.q)\n if self.stabilization is not None:\n self.stabilization.initializeElementQuadrature(\n self.mesh, self.timeIntegration.t, self.q)\n self.stabilization.initializeTimeIntegration(self.timeIntegration)\n if self.shockCapturing is not None:\n self.shockCapturing.initializeElementQuadrature(\n self.mesh, self.timeIntegration.t, self.q)\n\n def calculateElementBoundaryQuadrature(self):\n pass\n\n def calculateExteriorElementBoundaryQuadrature(self):\n \"\"\"\n Calculate the physical location and weights of the quadrature rules\n and the shape information at the quadrature points on global element boundaries.\n\n This function should be called only when the mesh changes.\n \"\"\"\n #\n # get physical locations of element boundary quadrature points\n #\n # assume all components live on the same mesh\n self.u[0].femSpace.elementMaps.getBasisValuesTraceRef(\n self.elementBoundaryQuadraturePoints)\n self.u[0].femSpace.elementMaps.getBasisGradientValuesTraceRef(\n self.elementBoundaryQuadraturePoints)\n self.u[0].femSpace.getBasisValuesTraceRef(\n self.elementBoundaryQuadraturePoints)\n self.u[0].femSpace.getBasisGradientValuesTraceRef(\n self.elementBoundaryQuadraturePoints)\n self.u[0].femSpace.elementMaps.getValuesGlobalExteriorTrace(\n self.elementBoundaryQuadraturePoints, self.ebqe['x'])\n self.fluxBoundaryConditionsObjectsDict = dict([(cj, FluxBoundaryConditions(self.mesh,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.ebqe[('x')],\n self.advectiveFluxBoundaryConditionsSetterDict[cj],\n self.diffusiveFluxBoundaryConditionsSetterDictDict[cj]))\n for cj in list(self.advectiveFluxBoundaryConditionsSetterDict.keys())])\n self.coefficients.initializeGlobalExteriorElementBoundaryQuadrature(\n self.timeIntegration.t, self.ebqe)\n\n def estimate_mt(self):\n pass\n\n def calculateSolutionAtQuadrature(self):\n pass\n\n def calculateAuxiliaryQuantitiesAfterStep(self):\n pass\n\n def updateAfterMeshMotion(self):\n pass\n" ]
[ [ "numpy.abs", "numpy.ones", "numpy.zeros", "numpy.copy" ] ]
GuillemGSubies/TFG
[ "ca808761f397b39626614641544f74ebc6594987" ]
[ "src/model.py" ]
[ "# @author Guillem G. Subies\n\n\nimport datetime\nimport json\nimport zipfile\nfrom math import ceil\nfrom subprocess import check_call\n\nimport jsonpickle\nimport keras.utils as ku\nimport numpy as np\nfrom keras.callbacks import EarlyStopping, ReduceLROnPlateau\nfrom keras.layers import (\n LSTM,\n Bidirectional,\n CuDNNGRU,\n CuDNNLSTM,\n Dense,\n Flatten,\n GlobalAveragePooling1D,\n GlobalMaxPooling1D,\n GRU,\n Dropout,\n)\nfrom keras.layers.embeddings import Embedding\nfrom keras.models import Sequential, load_model\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.preprocessing.text import Tokenizer\nfrom sklearn.base import BaseEstimator\n\nfrom .callbacks import ModelFullCheckpoint\nfrom .generators import batchedpatternsgenerator, infinitegenerator, maskedgenerator\nfrom .metrics import perplexity\nfrom .plotting import plot_history as _plot_history\nfrom .utils import sample\n\n\nclass BaseNetwork(BaseEstimator):\n \"\"\"Class to generate new text with neural networks\"\"\"\n\n ###############################################################################\n ##################################Main methods#################################\n ###############################################################################\n\n def __init__(\n self,\n tokenizer=None,\n max_sequence_len=301,\n min_word_appearences=None,\n vocab_size=None,\n batchsize=32,\n **kwargs,\n ):\n \"\"\"\n Parameters\n ----------\n tokenizer : object, optional\n If None, a default tokanizer will be used.\n max_sequence_len : int, optional\n Maximum lenght, in words, of each sample. It will be the minimum between\n the introduced number and the maximum lenght of the saples befor being processed\n min_word_appearences : int, optional\n Minimum number of appearences of a word in the text in order to take it into account\n This must not be used at the same time that vocab_size\n vocab_size : int, optional\n If None, it will be the same as the full vocabulary. Else, the maximum\n size of the vocabulary will be vocab_size.\n batchsize: int, optional\n Size of batches to pass to the fit_generator\n\n \"\"\"\n\n self.vocab_size = vocab_size\n self.min_word_appearences = min_word_appearences\n if self.vocab_size and self.min_word_appearences:\n raise AttributeException(\n \"You must specify only vocab_size or min_word_appearences, not both.\"\n )\n self.tokenizer = (\n Tokenizer(num_words=vocab_size, oov_token=None)\n if tokenizer is None\n else tokenizer\n )\n self.max_sequence_len = max_sequence_len\n self.batchsize = batchsize\n # Other kwargs, this is used in the load_model method\n self.__dict__.update(kwargs)\n\n def etl(self, data, mask=None):\n \"\"\"Method that preprocesses input and creates some necesary variables to be used\n in the fit_generator.\n\n Parameters\n ----------\n data : string\n Dataset we want to use\n mask : list of bool, optional\n Mask to be used in the train test split. For instance the default value means\n that every fourth sample generated will be used in the validation step. If mask\n is [True]\n\n Returns\n -------\n corpus : iterable of strings\n Preprocessed dataset to be used in the fit phase.\n \"\"\"\n\n # Prepare masks\n self.mask = mask or [True, True, True, False]\n self.testmask = [not x for x in self.mask]\n\n # Basic cleanup\n corpus = data.lower().split(\"\\n\")\n\n # Tokenization\n self.tokenizer.fit_on_texts(corpus)\n if self.min_word_appearences:\n low_count_words = [\n word\n for word, count in self.tokenizer.word_counts.items()\n if count < self.min_word_appearences\n ]\n for word in low_count_words:\n del self.tokenizer.word_index[word]\n del self.tokenizer.word_docs[word]\n del self.tokenizer.word_counts[word]\n self.vocab_size = len(self.tokenizer.word_index) + 1\n\n # Total samples\n self.num_train_samples = len(\n list(\n self.patterngenerator(\n corpus, batchsize=self.batchsize, count=True, mask=self.mask\n )\n )\n )\n self.num_test_samples = len(\n list(\n self.patterngenerator(\n corpus, batchsize=self.batchsize, count=True, mask=self.testmask\n )\n )\n )\n\n print(f\"There are a total of {self.num_train_samples} training samples\")\n print(f\"There are a total of {self.num_test_samples} validation samples\")\n\n return corpus\n\n def compile(\n self,\n activation=\"softmax\",\n kind=\"AveragePooling\",\n embedding=None,\n embedding_output_dim=64,\n gpu=False,\n loss=\"categorical_crossentropy\",\n metrics=None,\n optimizer=\"adam\",\n arch=None,\n dropout=None,\n **kwargs,\n ):\n \"\"\"Builds the architecture of a neural network\n\n Parameters\n ----------\n activation : str, optional\n Activation function used in the las layer of the network.\n kind : str, optional\n If \"Flatten\": The architecture will be embedding layer + flatten + dense. This is VERY memory hungry\n If \"MaxPooling\": The architecture will be embedding layer + GlobalMaxPooling1D + dense. This is VERY memory hungry\n If \"AveragePooling\": The architecture will be embedding layer + GlobalAveragePooling1D + dense. This is VERY memory hungry\n If \"LSTM\" or \"GRU\": The architectura will be embedding layer + bidirectional\n LSTM (or GRU) + hidden LSTMs (or GRUs) + dense. The hidden LSTMs (GRUs)\n will be defined by the param \"arch\".\n embedding : str, optional\n If None, a simple embedding layer will be used. If \"fastText\", fastText\n embeddings will be used. fastText embeddings must be downloaded and uncompressed\n in the project root\n embedding_output_dim : int, optional\n Outpud dimension of the embedding layer. It is ignored if the used embedding\n is \"fastText\" (it has a fixed size of 300)\n dropout : float, optional\n If specified a dropout will be aded\n gpu : bool, optional\n If True, CuDNNLSTM networks will be used in stead of LSTM.\n \"\"\"\n\n arch = arch or [64]\n metrics = metrics or [perplexity]\n self.net = Sequential()\n\n # Embedding layer\n output_dim = embedding_output_dim\n trainable = True\n weights = None\n if \"fastText\" in embedding:\n if embedding == \"fastText\":\n fastText_file = \"crawl-300d-2M.vec\"\n zip_fastText_file = f\"{fastText_file}.zip\"\n url = f\"https://dl.fbaipublicfiles.com/fasttext/vectors-english/{zip_fastText_file}\"\n else:\n fastText_file = \"cc.es.300.vec\"\n zip_fastText_file = f\"{fastText_file}.gz\"\n url = f\"https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/{zip_fastText_file}\"\n try:\n embeddings = self.load_vectors_words(fastText_file)\n print(\"Embedding file loaded sucessfully!\")\n except:\n print(\n \"No embedding file found, downloading it... (this will take a while)\"\n )\n check_call(f\"curl -L# '{url}'\", shell=True)\n with zipfile.ZipFile(f\"{zip_fastText_file}\", \"r\") as file:\n file.extractall(\"./\")\n embeddings = self.load_vectors_words(fastText_file)\n print(\"Embedding file loaded sucessfully!\")\n finally:\n embedding_matrix = self.create_embedding_matrix(embeddings)\n output_dim = embedding_matrix.shape[1]\n trainable = False\n weights = [embedding_matrix]\n\n self.net.add(\n Embedding(\n input_dim=self.vocab_size,\n output_dim=output_dim,\n input_length=self.max_sequence_len - 1,\n trainable=trainable,\n weights=weights,\n )\n )\n\n # Core layers\n if kind == \"Flatten\":\n self.net.add(Flatten())\n elif kind == \"MaxPooling\":\n self.net.add(GlobalMaxPooling1D())\n elif kind == \"AveragePooling\":\n self.net.add(GlobalAveragePooling1D())\n elif kind == \"LSTM\" or kind == \"GRU\":\n self.Complex_Network(kind=kind, gpu=gpu, arch=arch)\n else:\n raise Exception(\"Unknown network architecture\")\n\n # Dropout layer\n if dropout:\n self.net.add(Dropout(dropout))\n # Final layer\n self.net.add(Dense(self.vocab_size, activation=activation))\n self.net.compile(loss=loss, optimizer=optimizer, metrics=metrics, **kwargs)\n\n print(self.summary())\n\n def fit(\n self,\n corpus,\n callbacks=None,\n checkpoints=True,\n dynamic_lr=True,\n earlystop=True,\n epochs=200,\n verbose=1,\n plot=True,\n restore_best_weights=True,\n **kwargs,\n ):\n\n \"\"\"Fits the model with given data. It uses generators to create train an test samples\n\n Parameters\n ----------\n corpus : list of str\n Dataset to train the model.\n callbacks : object, optional\n checkpoints : bool, optional\n If True, ModelFullCheckpoint will be added to callbacks\n dynamic_lr : bool, optional\n If True, ModelFullCheckpoint will be added to callbacks\n earlystop : bool, optional\n If False no default earlystop will be used. If True, a simple EarlyStopping will be used.\n epochs : int, optional\n Number of train epochs.\n save : str or bool\n Whether to save or not the model in a file. If False it will not be saved.\n If strm it will be saved in path=str.\n verbose : int, optional\n Like in keras.\n plot : bool, optional\n Wether to plot the training history at the end of the training or not.\n Returns\n -----\n data : dict\n Embedding dict.\n\n \"\"\"\n\n callbacks = callbacks or []\n if dynamic_lr:\n callbacks.append(\n ReduceLROnPlateau(\n monitor=\"val_perplexity\",\n factor=0.8,\n patience=8,\n verbose=verbose,\n mode=\"min\",\n )\n )\n if earlystop:\n callbacks.append(\n EarlyStopping(\n monitor=\"val_perplexity\",\n min_delta=0,\n patience=20,\n verbose=verbose,\n mode=\"min\",\n restore_best_weights=restore_best_weights,\n )\n )\n if checkpoints:\n model_name = f\"Best_model_{datetime.datetime.now().time()}\"\n print(f\"The model will be saved with the name: {model_name}\")\n callbacks.append(\n ModelFullCheckpoint(\n modelo=self,\n filepath=model_name,\n save_best_only=True,\n monitor=\"val_perplexity\",\n mode=\"min\",\n verbose=verbose,\n )\n )\n print(\"The fit process is starting!\")\n self.net.fit_generator(\n self.patterngenerator(\n corpus, batchsize=self.batchsize, infinite=True, mask=self.mask\n ),\n steps_per_epoch=ceil(self.num_train_samples / self.batchsize),\n callbacks=callbacks,\n epochs=epochs,\n validation_data=self.patterngenerator(\n corpus, batchsize=self.batchsize, infinite=True, mask=self.testmask\n ),\n validation_steps=ceil(self.num_test_samples / self.batchsize),\n verbose=verbose,\n **kwargs,\n )\n if plot:\n self.plot_history()\n\n def generate_text(self, seed_text, next_words):\n \"\"\"Generates text following the given seed\n\n Parameters\n ----------\n seed_text : str\n String to start generating text from (what you pass to the predict method).\n next_words : int\n Number of words to generate\n\n Returns\n -------\n generated_text : str\n String containing the generated text\n \"\"\"\n\n generated_text = seed_text\n for i in range(next_words):\n token_list = self.tokenizer.texts_to_sequences([generated_text])\n token_list = pad_sequences(\n token_list, maxlen=self.max_sequence_len - 1, padding=\"pre\"\n )\n predicted = self.net.predict(token_list, verbose=0)[0]\n sampled_predicted = sample(np.log(predicted), 0.5)\n try:\n generated_text += (\n f\" {self.tokenizer.sequences_to_texts([[sampled_predicted]])[0]}\"\n )\n except:\n # Predicted 0, pass this time\n pass\n\n return generated_text\n\n ###############################################################################\n ##################################Aux methods##################################\n ###############################################################################\n\n def Complex_Network(self, kind, gpu, arch):\n \"\"\"This Network consists in a embedding layer followed by a bidirectional\n LSTM or GRU and some number of hidden LSTM or GRU. There is a dense layer\n at the end.\n\n Parameters\n ----------\n gpu : bool\n If True, CuDNN version of the networks will be used.\n arch : list of int\n len(arch) will be the number of layers in the model (being the first one,\n bidirectional) and every elem in arch is the number of neurons for the\n ith layer.\n \"\"\"\n\n if kind == \"LSTM\":\n layer = CuDNNLSTM if gpu else LSTM\n elif kind == \"GRU\":\n layer = CuDNNGRU if gpu else GRU\n\n # Bidirectional layer\n bidirect = layer(\n arch.pop(0),\n input_shape=(self.max_sequence_len,),\n return_sequences=False if len(arch) == 0 else True,\n )\n self.net.add(Bidirectional(bidirect, merge_mode=\"concat\"))\n\n # Hidden layers\n for i, elem in enumerate(arch):\n self.net.add(\n layer(\n elem,\n input_shape=(self.max_sequence_len,),\n return_sequences=True if i < len(arch) - 1 else False,\n )\n )\n\n return self\n\n def summary(self):\n \"\"\"Wrapper method for keras' sequential model summary\"\"\"\n return self.net.summary()\n\n @property\n def history(self):\n \"\"\"keras' sequential model history\"\"\"\n return self.net.history\n\n def plot_history(self):\n \"\"\"Wrapper method for plotting the model history\"\"\"\n _plot_history(self.history)\n\n def save(self, path=None):\n \"\"\"Saves the model in json format. The keras network will be\n saved into a file called path_network.h5 and the rest of\n the params into path_attrs.json\"\"\"\n if path is None:\n path = f\"{self}\"\n kwargs = dict()\n for key in self.__dict__:\n if key != \"net\":\n kwargs[key] = self.__dict__[key]\n try:\n self.net.save(f\"{path}_network.h5\")\n with open(f\"{path}_attrs.json\", \"w\") as outfile:\n json.dump(jsonpickle.encode(kwargs), outfile)\n return \"Model saved successfully!\"\n except:\n return \"Something went wrong when saving the model...\"\n\n @classmethod\n def load_model(cls, path):\n \"\"\"Loads a model from the files. The keras network should be in a file called\n path_network.h5 and the rest of the params in path_attrs.json\"\"\"\n with open(f\"{path}_attrs.json\") as infile:\n kwargs = jsonpickle.decode(json.load(infile))\n kwargs[\"net\"] = load_model(\n f\"{path}_network.h5\", custom_objects={\"perplexity\": perplexity}\n )\n return cls(**kwargs)\n\n ###############################################################################\n ###########################Embedding related methods###########################\n ###############################################################################\n\n def load_vectors_words(self, fname):\n \"\"\"Loads embeddings from a FastText file. Only loads embeddings for the given\n dictionary of words\n\n Parameters\n ----------\n fname : str\n Location of the embbeding file\n\n Returns\n -----\n data : dict\n Embedding dict\n\n \"\"\"\n\n data = {}\n vocab = tuple(self.tokenizer.word_index.keys())[: self.vocab_size - 1]\n with open(fname) as fin:\n next(fin) # Skip first line, just contains embeddings size data\n for line in fin:\n tokens = line.rstrip().split(\" \")\n word = tokens[0]\n if word in vocab:\n data[word] = np.array(list(map(float, tokens[1:])))\n return data\n\n def create_embedding_matrix(self, embeddings):\n \"\"\"Creates a weight matrix for an Embedding layer using an embeddings dictionary\n If a word is not in the embedding dict, its value will be the mean of all the values\n An improvement to this could be using subword information\n Parameters\n ----------\n embeddings : dict\n preloaded embedding dict to use\n\n Returns\n -------\n embedding_matrix : numpy.ndarray\n Matrix with\"\"\"\n\n # Compute mean and standard deviation for embeddings\n all_embs = np.stack(embeddings.values())\n emb_mean, emb_std = all_embs.mean(), all_embs.std()\n # If we are using fastText, this is 300\n embedding_size = len(list(embeddings.values())[0])\n embedding_matrix = np.random.normal(\n emb_mean, emb_std, (self.vocab_size, embedding_size)\n )\n vocab = tuple(self.tokenizer.word_index.items())[: self.vocab_size - 1]\n for word, i in vocab:\n embedding_vector = embeddings.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n\n return embedding_matrix\n\n ###############################################################################\n #########################fit_generator related methods#########################\n ####################Inspired by Alvaro Barbero's neurowriter###################\n ###https://github.com/albarji/neurowriter/blob/master/neurowriter/encoding.py##\n ###############################################################################\n\n def patterngenerator(self, corpus, **kwargs):\n \"\"\"Infinite generator of encoded patterns.\n\n Parameters\n -----------\n corpus : iterable of strings\n The corpus\n **kwargs : any other arguments are passed on to decodetext\n \"\"\"\n\n # Pre-tokenized all corpus documents, for efficiency\n tokenizedcorpus = self.tokenizer.texts_to_sequences(corpus)\n self.max_sequence_len = min(\n len(max(tokenizedcorpus, key=len)), self.max_sequence_len\n )\n for pattern in self._tokenizedpatterngenerator(tokenizedcorpus, **kwargs):\n yield pattern\n\n @infinitegenerator\n @batchedpatternsgenerator\n @maskedgenerator\n def _tokenizedpatterngenerator(self, tokenizedcorpus, **kwargs):\n for token_list in tokenizedcorpus:\n for i in range(1, len(token_list)):\n sampl = np.array(\n pad_sequences(\n [token_list[: i + 1]],\n maxlen=self.max_sequence_len,\n padding=\"pre\",\n value=0,\n )\n )\n X, y = sampl[:, :-1], sampl[:, -1]\n y = ku.to_categorical(y, num_classes=self.vocab_size)\n if \"count\" in kwargs and kwargs[\"count\"] is True:\n yield 0, 0\n else:\n yield X[0], y[0]\n" ]
[ [ "numpy.random.normal", "numpy.log" ] ]
microsoft/mutransformers
[ "480287ce7b18a07a3432e8f2fbc0f0e5b71e2599" ]
[ "mutransformers/models/gpt2/_original_configuration_gpt2.py" ]
[ "# coding=utf-8\n# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" OpenAI GPT-2 configuration\"\"\"\nfrom collections import OrderedDict\nfrom typing import Any, List, Mapping, Optional\n\nfrom transformers import PreTrainedTokenizer, TensorType, is_torch_available\n\nfrom ...configuration_utils import PretrainedConfig\nfrom ...onnx import OnnxConfigWithPast, PatchingSpec\nfrom ...utils import logging\n\n\nlogger = logging.get_logger(__name__)\n\nGPT2_PRETRAINED_CONFIG_ARCHIVE_MAP = {\n \"gpt2\": \"https://huggingface.co/gpt2/resolve/main/config.json\",\n \"gpt2-medium\": \"https://huggingface.co/gpt2-medium/resolve/main/config.json\",\n \"gpt2-large\": \"https://huggingface.co/gpt2-large/resolve/main/config.json\",\n \"gpt2-xl\": \"https://huggingface.co/gpt2-xl/resolve/main/config.json\",\n \"distilgpt2\": \"https://huggingface.co/distilgpt2/resolve/main/config.json\",\n}\n\n\nclass GPT2Config(PretrainedConfig):\n \"\"\"\n This is the configuration class to store the configuration of a [`GPT2Model`] or a [`TFGPT2Model`]. It is used to\n instantiate a GPT-2 model according to the specified arguments, defining the model architecture. Instantiating a\n configuration with the defaults will yield a similar configuration to that of the GPT-2\n [small](https://huggingface.co/gpt2) architecture.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n\n Args:\n vocab_size (`int`, *optional*, defaults to 50257):\n Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`GPT2Model`] or [`TFGPT2Model`].\n n_positions (`int`, *optional*, defaults to 1024):\n The maximum sequence length that this model might ever be used with. Typically set this to something large\n just in case (e.g., 512 or 1024 or 2048).\n n_embd (`int`, *optional*, defaults to 768):\n Dimensionality of the embeddings and hidden states.\n n_layer (`int`, *optional*, defaults to 12):\n Number of hidden layers in the Transformer encoder.\n n_head (`int`, *optional*, defaults to 12):\n Number of attention heads for each attention layer in the Transformer encoder.\n n_inner (`int`, *optional*, defaults to None):\n Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd\n activation_function (`str`, *optional*, defaults to `\"gelu\"`):\n Activation function, to be selected in the list `[\"relu\", \"silu\", \"gelu\", \"tanh\", \"gelu_new\"]`.\n resid_pdrop (`float`, *optional*, defaults to 0.1):\n The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.\n embd_pdrop (`int`, *optional*, defaults to 0.1):\n The dropout ratio for the embeddings.\n attn_pdrop (`float`, *optional*, defaults to 0.1):\n The dropout ratio for the attention.\n layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):\n The epsilon to use in the layer normalization layers.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n summary_type (`string`, *optional*, defaults to `\"cls_index\"`):\n Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and\n [`TFGPT2DoubleHeadsModel`].\n\n Has to be one of the following options:\n\n - `\"last\"`: Take the last token hidden state (like XLNet).\n - `\"first\"`: Take the first token hidden state (like BERT).\n - `\"mean\"`: Take the mean of all tokens hidden states.\n - `\"cls_index\"`: Supply a Tensor of classification token position (like GPT/GPT-2).\n - `\"attn\"`: Not implemented now, use multi-head attention.\n summary_use_proj (`bool`, *optional*, defaults to `True`):\n Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and\n [`TFGPT2DoubleHeadsModel`].\n\n Whether or not to add a projection after the vector extraction.\n summary_activation (`str`, *optional*):\n Argument used when doing sequence summary. Used in for the multiple choice head in\n [`GPT2DoubleHeadsModel`].\n\n Pass `\"tanh\"` for a tanh activation to the output, any other value will result in no activation.\n summary_proj_to_labels (`bool`, *optional*, defaults to `True`):\n Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and\n [`TFGPT2DoubleHeadsModel`].\n\n Whether the projection outputs should have `config.num_labels` or `config.hidden_size` classes.\n summary_first_dropout (`float`, *optional*, defaults to 0.1):\n Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and\n [`TFGPT2DoubleHeadsModel`].\n\n The dropout ratio to be used after the projection and activation.\n scale_attn_weights (`bool`, *optional*, defaults to `True`):\n Scale attention weights by dividing by sqrt(hidden_size)..\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n scale_attn_by_inverse_layer_idx (`bool`, *optional*, defaults to `False`):\n Whether to additionally scale attention weights by `1 / layer_idx + 1`.\n reorder_and_upcast_attn (`bool`, *optional*, defaults to `False`):\n Whether to scale keys (K) prior to computing attention (dot-product) and upcast attention\n dot-product/softmax to float() when training with mixed precision.\n\n Example:\n\n ```python\n >>> from transformers import GPT2Model, GPT2Config\n\n >>> # Initializing a GPT2 configuration\n >>> configuration = GPT2Config()\n\n >>> # Initializing a model from the configuration\n >>> model = GPT2Model(configuration)\n\n >>> # Accessing the model configuration\n >>> configuration = model.config\n ```\"\"\"\n\n model_type = \"gpt2\"\n keys_to_ignore_at_inference = [\"past_key_values\"]\n attribute_map = {\n \"hidden_size\": \"n_embd\",\n \"max_position_embeddings\": \"n_positions\",\n \"num_attention_heads\": \"n_head\",\n \"num_hidden_layers\": \"n_layer\",\n }\n\n def __init__(\n self,\n vocab_size=50257,\n n_positions=1024,\n n_embd=768,\n n_layer=12,\n n_head=12,\n n_inner=None,\n activation_function=\"gelu_new\",\n resid_pdrop=0.1,\n embd_pdrop=0.1,\n attn_pdrop=0.1,\n layer_norm_epsilon=1e-5,\n initializer_range=0.02,\n summary_type=\"cls_index\",\n summary_use_proj=True,\n summary_activation=None,\n summary_proj_to_labels=True,\n summary_first_dropout=0.1,\n scale_attn_weights=True,\n use_cache=True,\n bos_token_id=50256,\n eos_token_id=50256,\n scale_attn_by_inverse_layer_idx=False,\n reorder_and_upcast_attn=False,\n **kwargs,\n ):\n self.vocab_size = vocab_size\n self.n_positions = n_positions\n self.n_embd = n_embd\n self.n_layer = n_layer\n self.n_head = n_head\n self.n_inner = n_inner\n self.activation_function = activation_function\n self.resid_pdrop = resid_pdrop\n self.embd_pdrop = embd_pdrop\n self.attn_pdrop = attn_pdrop\n self.layer_norm_epsilon = layer_norm_epsilon\n self.initializer_range = initializer_range\n self.summary_type = summary_type\n self.summary_use_proj = summary_use_proj\n self.summary_activation = summary_activation\n self.summary_first_dropout = summary_first_dropout\n self.summary_proj_to_labels = summary_proj_to_labels\n self.scale_attn_weights = scale_attn_weights\n self.use_cache = use_cache\n self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx\n self.reorder_and_upcast_attn = reorder_and_upcast_attn\n\n self.bos_token_id = bos_token_id\n self.eos_token_id = eos_token_id\n\n super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)\n\n\nclass GPT2OnnxConfig(OnnxConfigWithPast):\n def __init__(\n self,\n config: PretrainedConfig,\n task: str = \"default\",\n patching_specs: List[PatchingSpec] = None,\n use_past: bool = False,\n ):\n super().__init__(config, task=task, patching_specs=patching_specs, use_past=use_past)\n if not getattr(self._config, \"pad_token_id\", None):\n # TODO: how to do that better?\n self._config.pad_token_id = 0\n\n @property\n def inputs(self) -> Mapping[str, Mapping[int, str]]:\n common_inputs = OrderedDict({\"input_ids\": {0: \"batch\", 1: \"sequence\"}})\n if self.use_past:\n self.fill_with_past_key_values_(common_inputs, direction=\"inputs\")\n common_inputs[\"attention_mask\"] = {0: \"batch\", 1: \"past_sequence + sequence\"}\n else:\n common_inputs[\"attention_mask\"] = {0: \"batch\", 1: \"sequence\"}\n\n return common_inputs\n\n @property\n def num_layers(self) -> int:\n return self._config.n_layer\n\n @property\n def num_attention_heads(self) -> int:\n return self._config.n_head\n\n def generate_dummy_inputs(\n self,\n tokenizer: PreTrainedTokenizer,\n batch_size: int = -1,\n seq_length: int = -1,\n is_pair: bool = False,\n framework: Optional[TensorType] = None,\n ) -> Mapping[str, Any]:\n common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(\n tokenizer, batch_size, seq_length, is_pair, framework\n )\n\n # We need to order the input in the way they appears in the forward()\n ordered_inputs = OrderedDict({\"input_ids\": common_inputs[\"input_ids\"]})\n\n # Need to add the past_keys\n if self.use_past:\n if not is_torch_available():\n raise ValueError(\"Cannot generate dummy past_keys inputs without PyTorch installed.\")\n else:\n import torch\n\n batch, seqlen = common_inputs[\"input_ids\"].shape\n # Not using the same length for past_key_values\n past_key_values_length = seqlen + 2\n past_shape = (\n batch,\n self.num_attention_heads,\n past_key_values_length,\n self._config.hidden_size // self.num_attention_heads,\n )\n ordered_inputs[\"past_key_values\"] = [\n (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(self.num_layers)\n ]\n\n ordered_inputs[\"attention_mask\"] = common_inputs[\"attention_mask\"]\n if self.use_past:\n ordered_inputs[\"attention_mask\"] = torch.cat(\n [ordered_inputs[\"attention_mask\"], torch.ones(batch, past_key_values_length)], dim=1\n )\n\n return ordered_inputs\n\n @property\n def default_onnx_opset(self) -> int:\n return 13" ]
[ [ "torch.zeros", "torch.ones" ] ]
qimingj/tensor2tensor
[ "a6df48799dc93176df94c36d3a1aea75caa7c594" ]
[ "tensor2tensor/models/video/basic_stochastic.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Basic models for testing simple tasks.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensor2tensor.layers import common_attention\nfrom tensor2tensor.layers import common_layers\nfrom tensor2tensor.layers import common_video\n\nfrom tensor2tensor.models.video import base_vae\nfrom tensor2tensor.models.video import basic_deterministic\nfrom tensor2tensor.models.video import basic_deterministic_params\n\nfrom tensor2tensor.utils import registry\n\nimport tensorflow as tf\n\n\[email protected]_model\nclass NextFrameBasicStochastic(\n basic_deterministic.NextFrameBasicDeterministic,\n base_vae.NextFrameBaseVae):\n \"\"\"Stochastic version of basic next-frame model.\"\"\"\n\n def inject_latent(self, layer, features, filters):\n \"\"\"Inject a VAE-style latent.\"\"\"\n # Latent for stochastic model\n input_frames = tf.to_float(features[\"inputs_raw\"])\n target_frames = tf.to_float(features[\"targets_raw\"])\n full_video = tf.concat([input_frames, target_frames], axis=1)\n latent_mean, latent_std = self.construct_latent_tower(\n full_video, time_axis=1)\n latent = common_video.get_gaussian_tensor(latent_mean, latent_std)\n latent = tf.layers.flatten(latent)\n latent = tf.expand_dims(latent, axis=1)\n latent = tf.expand_dims(latent, axis=1)\n latent_mask = tf.layers.dense(latent, filters, name=\"latent_mask\")\n zeros_mask = tf.zeros(\n common_layers.shape_list(layer)[:-1] + [filters], dtype=tf.float32)\n layer = tf.concat([layer, latent_mask + zeros_mask], axis=-1)\n extra_loss = self.get_extra_loss(latent_mean, latent_std)\n return layer, extra_loss\n\n\[email protected]_model\nclass NextFrameBasicStochasticDiscrete(\n basic_deterministic.NextFrameBasicDeterministic):\n \"\"\"Basic next-frame model with a tiny discrete latent.\"\"\"\n\n def inject_latent(self, layer, features, filters):\n \"\"\"Inject a deterministic latent based on the target frame.\"\"\"\n del filters\n hparams = self.hparams\n final_filters = common_layers.shape_list(layer)[-1]\n filters = hparams.hidden_size\n kernel = (4, 4)\n\n if hparams.mode == tf.estimator.ModeKeys.PREDICT:\n layer_shape = common_layers.shape_list(layer)\n if hparams.full_latent_tower:\n rand = tf.random_uniform(layer_shape[:-1] + [hparams.bottleneck_bits])\n else:\n rand = tf.random_uniform(layer_shape[:-3] + [\n 1, 1, hparams.bottleneck_bits])\n d = 2.0 * tf.to_float(tf.less(0.5, rand)) - 1.0\n z = tf.layers.dense(d, final_filters, name=\"unbottleneck\")\n return layer + z, 0.0\n\n # Embed.\n x = tf.layers.dense(\n features[\"cur_target_frame\"], filters, name=\"latent_embed\",\n bias_initializer=tf.random_normal_initializer(stddev=0.01))\n x = common_attention.add_timing_signal_nd(x)\n\n if hparams.full_latent_tower:\n for i in range(hparams.num_compress_steps):\n with tf.variable_scope(\"latent_downstride%d\" % i):\n x = common_layers.make_even_size(x)\n if i < hparams.filter_double_steps:\n filters *= 2\n x = common_attention.add_timing_signal_nd(x)\n x = tf.layers.conv2d(x, filters, kernel,\n activation=common_layers.belu,\n strides=(2, 2), padding=\"SAME\")\n x = common_layers.layer_norm(x)\n else:\n x = common_layers.double_discriminator(x)\n x = tf.expand_dims(tf.expand_dims(x, axis=1), axis=1)\n x = tf.tanh(tf.layers.dense(x, hparams.bottleneck_bits, name=\"bottleneck\"))\n d = x + tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x)) - 1.0 - x)\n if hparams.mode == tf.estimator.ModeKeys.TRAIN:\n noise = tf.random_uniform(common_layers.shape_list(x))\n noise = 2.0 * tf.to_float(tf.less(hparams.bottleneck_noise, noise)) - 1.0\n d *= noise\n\n z = tf.layers.dense(d, final_filters, name=\"unbottleneck\")\n return layer + z, 0.0\n\n\[email protected]_hparams\ndef next_frame_basic_stochastic():\n \"\"\"Basic 2-frame conv model with stochastic tower.\"\"\"\n hparams = basic_deterministic_params.next_frame_basic_deterministic()\n hparams.stochastic_model = True\n hparams.add_hparam(\"latent_channels\", 1)\n hparams.add_hparam(\"latent_std_min\", -5.0)\n hparams.add_hparam(\"num_iterations_1st_stage\", 25000)\n hparams.add_hparam(\"num_iterations_2nd_stage\", 25000)\n hparams.add_hparam(\"latent_loss_multiplier\", 1e-3)\n hparams.add_hparam(\"latent_loss_multiplier_dynamic\", False)\n hparams.add_hparam(\"latent_loss_multiplier_alpha\", 1e-5)\n hparams.add_hparam(\"latent_loss_multiplier_epsilon\", 1.0)\n hparams.add_hparam(\"latent_loss_multiplier_schedule\", \"constant\")\n hparams.add_hparam(\"latent_num_frames\", 0) # 0 means use all frames.\n hparams.add_hparam(\"anneal_end\", 100000)\n hparams.add_hparam(\"information_capacity\", 0.0)\n return hparams\n\n\[email protected]_hparams\ndef next_frame_basic_stochastic_discrete():\n \"\"\"Basic 2-frame conv model with stochastic discrete latent.\"\"\"\n hparams = basic_deterministic_params.next_frame_sampling()\n hparams.add_hparam(\"bottleneck_bits\", 16)\n hparams.add_hparam(\"bottleneck_noise\", 0.02)\n hparams.add_hparam(\"full_latent_tower\", False)\n return hparams\n" ]
[ [ "tensorflow.concat", "tensorflow.layers.flatten", "tensorflow.expand_dims", "tensorflow.less", "tensorflow.random_uniform", "tensorflow.variable_scope", "tensorflow.layers.conv2d", "tensorflow.to_float", "tensorflow.layers.dense", "tensorflow.random_normal_initializer" ] ]
AlexSG18/FrotiersInEcology
[ "3023d2479082ee6d4c144e4eb5153d40614d2280" ]
[ "tests/test_models/test_losses.py" ]
[ "import pytest\nimport torch\n\nfrom mmdet.models import Accuracy, build_loss\n\n\ndef test_ce_loss():\n # use_mask and use_sigmoid cannot be true at the same time\n with pytest.raises(AssertionError):\n loss_cfg = dict(\n type='CrossEntropyLoss',\n use_mask=True,\n use_sigmoid=True,\n loss_weight=1.0)\n build_loss(loss_cfg)\n\n # test loss with class weights\n loss_cls_cfg = dict(\n type='CrossEntropyLoss',\n use_sigmoid=False,\n class_weight=[0.8, 0.2],\n loss_weight=1.0)\n loss_cls = build_loss(loss_cls_cfg)\n fake_pred = torch.Tensor([[100, -100]])\n fake_label = torch.Tensor([1]).long()\n assert torch.allclose(loss_cls(fake_pred, fake_label), torch.tensor(40.))\n\n loss_cls_cfg = dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)\n loss_cls = build_loss(loss_cls_cfg)\n assert torch.allclose(loss_cls(fake_pred, fake_label), torch.tensor(200.))\n\n\ndef test_accuracy():\n # test for empty pred\n print(\"accuracy_here.....................................\")\n pred = torch.empty(0, 4)\n label = torch.empty(0)\n accuracy = Accuracy(topk=1)\n acc = accuracy(pred, label)\n assert acc.item() == 0\n\n pred = torch.Tensor([[0.2, 0.3, 0.6, 0.5], [0.1, 0.1, 0.2, 0.6],\n [0.9, 0.0, 0.0, 0.1], [0.4, 0.7, 0.1, 0.1],\n [0.0, 0.0, 0.99, 0]])\n # test for top1\n true_label = torch.Tensor([2, 3, 0, 1, 2]).long()\n accuracy = Accuracy(topk=1)\n acc = accuracy(pred, true_label)\n assert acc.item() == 100\n\n # test for top1 with score thresh=0.8\n true_label = torch.Tensor([2, 3, 0, 1, 2]).long()\n accuracy = Accuracy(topk=1, thresh=0.8)\n acc = accuracy(pred, true_label)\n assert acc.item() == 40\n\n # test for top2\n accuracy = Accuracy(topk=2)\n label = torch.Tensor([3, 2, 0, 0, 2]).long()\n acc = accuracy(pred, label)\n assert acc.item() == 100\n\n # test for both top1 and top2\n accuracy = Accuracy(topk=(1, 2))\n true_label = torch.Tensor([2, 3, 0, 1, 2]).long()\n acc = accuracy(pred, true_label)\n for a in acc:\n assert a.item() == 100\n\n # topk is larger than pred class number\n with pytest.raises(AssertionError):\n accuracy = Accuracy(topk=5)\n accuracy(pred, true_label)\n\n # wrong topk type\n with pytest.raises(AssertionError):\n accuracy = Accuracy(topk='wrong type')\n accuracy(pred, true_label)\n\n # label size is larger than required\n with pytest.raises(AssertionError):\n label = torch.Tensor([2, 3, 0, 1, 2, 0]).long() # size mismatch\n accuracy = Accuracy()\n accuracy(pred, label)\n\n # wrong pred dimension\n with pytest.raises(AssertionError):\n accuracy = Accuracy()\n accuracy(pred[:, :, None], true_label)\n" ]
[ [ "torch.empty", "torch.Tensor", "torch.tensor" ] ]
UltraSuite/tal-tools
[ "cf6a497143d19e47149f057626a9bf3ad9cbea95" ]
[ "visualiser/tools/plotters.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nPlot individual video frames.\n\nDate: 2020\nAuthor: M. Sam Ribeiro\n\"\"\"\n\nimport os\nfrom textwrap import wrap\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\n\n\n\ndef set_dark_mode():\n ''' set global variables for matplotlib's dark mode'''\n\n # a simple way is to use:\n # plt.style.use('dark_background')\n\n # but we want more control so we use parameters adapted from here:\n # https://stackoverflow.com/questions/48391568/matplotlib-creating-plot-for-black-background-presentation-slides\n plt.rcParams.update({\n \"lines.color\": \"white\",\n \"patch.edgecolor\": \"black\",\n \"text.color\": \"black\",\n \"axes.facecolor\": \"white\",\n \"axes.edgecolor\": \"black\",\n \"axes.labelcolor\": \"white\",\n \"xtick.color\": \"white\",\n \"ytick.color\": \"white\",\n \"grid.color\": \"black\",\n \"figure.facecolor\": \"black\",\n \"figure.edgecolor\": \"black\",\n \"savefig.facecolor\": \"black\",\n \"savefig.edgecolor\": \"black\"}\n )\n\n\n\n\ndef plot_video_frames(ult, vid, wav, txt, \\\n sample_rate, frame_rate, config, frame_directory):\n \"\"\" Writes ultrasound, speech, and spectrogram as images to a directory. \"\"\"\n\n width_ratios = [1, 0.8]\n\n # standard parameters from configuration file\n plot_spectrogram = config.make_spectrogram\n plot_waveform = config.make_waveform\n plot_text = config.make_prompt_text\n\n dark_mode = config.dark_mode\n\n spectrogram_frame_size = config.spectrogram_frame_size\n spectrogram_frame_shift = config.spectrogram_frame_shift\n spectrogram_color_map = config.spectrogram_color_map\n\n text_font_size = config.text_font_size\n text_wrap_width = config.text_wrap_width\n\n dpi = config.dpi\n size = config.figure_size\n fig_size = (size[0]/float(dpi), size[1]/float(dpi))\n\n\n if dark_mode:\n set_dark_mode()\n\n\n if not os.path.exists(frame_directory):\n os.makedirs(frame_directory)\n\n\n plt.figure(dpi=dpi, frameon=False, figsize=fig_size)\n fig = plt.gcf()\n\n # set grid with relevant axes\n if plot_waveform and plot_spectrogram:\n gs = gridspec.GridSpec(4, 2, width_ratios=width_ratios, figure=fig)\n wav_ax = fig.add_subplot(gs[0,:])\n spec_ax = fig.add_subplot(gs[1,:])\n ult_ax = fig.add_subplot(gs[2:,0])\n vid_ax = fig.add_subplot(gs[2:,1])\n\n elif plot_waveform:\n gs = gridspec.GridSpec(3, 2, width_ratios=width_ratios, figure=fig)\n wav_ax = fig.add_subplot(gs[0,:])\n ult_ax = fig.add_subplot(gs[1:,0])\n vid_ax = fig.add_subplot(gs[1:,1])\n\n elif plot_spectrogram:\n gs = gridspec.GridSpec(3, 2, width_ratios=width_ratios, figure=fig)\n spec_ax = fig.add_subplot(gs[0,:])\n ult_ax = fig.add_subplot(gs[1:,0])\n vid_ax = fig.add_subplot(gs[1:,1])\n\n else:\n gs = gridspec.GridSpec(1, 2, width_ratios=width_ratios, figure=fig)\n ult_ax = plt.subplot(gs[0])\n vid_ax = plt.subplot(gs[1])\n\n gs.update(wspace=0.0, hspace=0.0)\n\n # plot waveform\n if plot_waveform:\n total_samples = wav.shape[0]\n\n # # if we want a full dark theme on the waveform axis (dark background)\n # wav_ax.plot(wav, color='white', linewidth=0.2)\n # wav_ax.axis(\"off\")\n\n # here, we use a light background, even if using a dark theme.\n wav_ax.plot(wav, color='black', linewidth=0.2)\n\n wav_ax.set_xlim([0, total_samples])\n wav_ax.set_xticks([])\n wav_ax.set_yticks([])\n max_wav_x = wav.shape[0]\n\n\n # plot spectrogram\n if plot_spectrogram:\n\n nfft = int(sample_rate * (spectrogram_frame_size/1000.))\n overlap = int(sample_rate * (spectrogram_frame_shift/1000.))\n\n # spectrogram with matplotlib: \n # https://matplotlib.org/api/_as_gen/matplotlib.pyplot.specgram.html\n spectrum, freqs, bins, spec_im = spec_ax.specgram(wav.reshape(-1,),\\\n NFFT=nfft, Fs=sample_rate, noverlap=overlap, cmap=spectrogram_color_map)\n\n spec_im.set_interpolation('bilinear')\n\n spec_ax.set_xticks([])\n spec_ax.set_yticks([])\n xlim = spec_ax.get_xlim()\n spec_ax.set_xlim([0, xlim[1]])\n max_spec_x = xlim[-1]\n\n # prompt text\n if plot_text:\n color = 'white' if dark_mode else 'black'\n txt = \"\\n\".join(wrap(txt, text_wrap_width))\n fig.suptitle(txt, fontsize=text_font_size, color=color)\n\n # plot ultrasound and video frame-by-frame\n num_frames = ult.shape[0]\n\n ult_im = ult_ax.imshow(ult[0].T, aspect='equal', origin='lower', cmap='gray')\n vid_im = vid_ax.imshow(vid[0], aspect='equal', origin='upper', cmap='gray')\n\n for i in range(0, num_frames):\n u = ult[i]\n v = vid[i]\n\n if plot_waveform:\n wav_x = int( (1/frame_rate) * i * sample_rate)\n wav_x = min(wav_x, max_wav_x)\n ln1 = wav_ax.axvline(x=wav_x, color='red')\n\n if plot_spectrogram:\n spec_x = (1/frame_rate) * i\n spec_x = min(spec_x, max_spec_x)\n ln2 = spec_ax.axvline(x=spec_x, color='red')\n\n ult_im.set_data(u.T)\n vid_im.set_data(v)\n\n ult_ax.axis(\"off\")\n vid_ax.axis(\"off\")\n\n plt.savefig(frame_directory + \"/%07d.jpg\" % i, bbox_inches='tight', pad_inches=0.1, dpi=dpi)\n\n if plot_waveform: ln1.remove()\n if plot_spectrogram: ln2.remove()\n\n plt.close()" ]
[ [ "matplotlib.use", "matplotlib.pyplot.rcParams.update", "matplotlib.pyplot.savefig", "matplotlib.pyplot.close", "matplotlib.pyplot.figure", "matplotlib.pyplot.gcf", "matplotlib.gridspec.GridSpec", "matplotlib.pyplot.subplot" ] ]
anibalsolon/covid-xprize
[ "cafc2c65c7e4f4184c16a1793da85371b6bc3218" ]
[ "examples/prescriptors/random/prescribe.py" ]
[ "# Copyright 2020 (c) Cognizant Digital Business, Evolutionary AI. All rights reserved. Issued under the Apache 2.0 License.\n\nimport os\nimport argparse\nimport numpy as np\nimport pandas as pd\n\nNUM_PRESCRIPTIONS = 10\n\nNPI_MAX_VALUES = {\n 'C1_School closing': 3,\n 'C2_Workplace closing': 3,\n 'C3_Cancel public events': 2,\n 'C4_Restrictions on gatherings': 4,\n 'C5_Close public transport': 2,\n 'C6_Stay at home requirements': 3,\n 'C7_Restrictions on internal movement': 2,\n 'C8_International travel controls': 4,\n 'H1_Public information campaigns': 2,\n 'H2_Testing policy': 3,\n 'H3_Contact tracing': 2\n}\n\n\ndef prescribe(start_date_str: str,\n end_date_str: str,\n path_to_hist_file: str,\n output_file_path) -> None:\n\n # Create skeleton df with one row for each prescription\n # for each geo for each day\n hdf = pd.read_csv(path_to_hist_file,\n parse_dates=['Date'],\n encoding=\"ISO-8859-1\",\n error_bad_lines=True)\n start_date = pd.to_datetime(start_date_str, format='%Y-%m-%d')\n end_date = pd.to_datetime(end_date_str, format='%Y-%m-%d')\n prescription_idxs = []\n country_names = []\n region_names = []\n dates = []\n\n for prescription_idx in range(NUM_PRESCRIPTIONS):\n for country_name in hdf['CountryName'].unique():\n cdf = hdf[hdf['CountryName'] == country_name]\n for region_name in cdf['RegionName'].unique():\n for date in pd.date_range(start_date, end_date):\n prescription_idxs.append(prescription_idx)\n country_names.append(country_name)\n region_names.append(region_name)\n dates.append(date.strftime(\"%Y-%m-%d\"))\n\n prescription_df = pd.DataFrame({\n 'PrescriptionIndex': prescription_idxs,\n 'CountryName': country_names,\n 'RegionName': region_names,\n 'Date': dates})\n\n # Fill df with random values\n num_rows = len(prescription_df)\n for npi_col, max_value in sorted(NPI_MAX_VALUES.items()):\n prescription_df[npi_col] = np.random.randint(0, max_value,\n size=num_rows)\n\n # Create the output path\n os.makedirs(os.path.dirname(output_file_path), exist_ok=True)\n\n # Save to a csv file\n prescription_df.to_csv(output_file_path, index=False)\n\n return\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-s\", \"--start_date\",\n dest=\"start_date\",\n type=str,\n required=True,\n help=\"Start date from which to prescribe, included, as YYYY-MM-DD. For example 2020-08-01\")\n parser.add_argument(\"-e\", \"--end_date\",\n dest=\"end_date\",\n type=str,\n required=True,\n help=\"End date for the last prescription, included, as YYYY-MM-DD. For example 2020-08-31\")\n parser.add_argument(\"-ip\", \"--interventions_past\",\n dest=\"prev_file\",\n type=str,\n required=True,\n help=\"The path to a .csv file of previous intervention plans\")\n parser.add_argument(\"-o\", \"--output_file\",\n dest=\"output_file\",\n type=str,\n required=True,\n help=\"The path to an intervention plan .csv file\")\n args = parser.parse_args()\n print(f\"Generating prescriptions from {args.start_date} to {args.end_date}...\")\n prescribe(args.start_date, args.end_date, args.prev_file, args.output_file)\n print(\"Done!\")\n" ]
[ [ "pandas.to_datetime", "pandas.DataFrame", "pandas.date_range", "numpy.random.randint", "pandas.read_csv" ] ]
UBC-MDS/DSCI_532_Group_12
[ "de6fe6ebcb0ceccc2eb2c0aefb057fd485c4324e" ]
[ "src/python/data_model.py" ]
[ "import pandas as pd\nimport datetime\nimport os, sys, inspect\nimport numpy as np\n\nfile_daily_report = \"daily_report.csv\"\nfile_timeseries_confirmed = \"time_series_covid19_confirmed_global.csv\"\nfile_timeseries_recovered = \"time_series_covid19_recovered_global.csv\"\nfile_timeseries_death = \"time_series_covid19_deaths_global.csv\"\n\n\nclass case_type:\n all = 0\n confirmed = 1\n death = 2\n recovered = 3\n\n\nclass data_model:\n \"\"\"handles data reading and processing\"\"\"\n\n def __init__(self, path):\n self.data_path = path\n self.reload()\n self.country_list = self.daily_report.Country_Region.unique()\n\n def reload(self):\n \"\"\"load the csv files into data frame, download the files from Github if needed\"\"\"\n today = datetime.date.today()\n today = datetime.datetime(today.year, today.month, today.day, 0, 0, 0)\n m_day = self.__get_modified_date()\n self.last_updated = today\n # check if the data is old\n if m_day < today:\n # we need to re-download data\n self.__download_data(today)\n\n # process the data\n self.__process_data()\n\n def __download_data(self, date):\n \"\"\"private method, used for downloading files from github of JH Uni\n\n Args:\n date (datetime): date to retrieve daily report\n \"\"\"\n # download daily report\n # we need to handle time zone difference, first check if there is a file created for our today, then 1, 2 day before\n dr_path = \"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports\"\n yesterday = date - datetime.timedelta(days=1)\n bf_yesterday = yesterday - datetime.timedelta(days=1)\n self.last_updated = date\n\n dr_files = [\n self.__create_filename(date),\n self.__create_filename(yesterday),\n self.__create_filename(bf_yesterday),\n ]\n for i in range(len(dr_files)):\n try:\n f = dr_files[i]\n url = dr_path + \"/\" + f\n input = pd.read_csv(url)\n input.to_csv(self.data_path + \"/\" + file_daily_report)\n if i == 1:\n self.last_updated = yesterday\n elif i == 2:\n self.last_updated = bf_yesterday\n\n break # as we sorted the date desc, we just need to get the latest file\n except:\n next\n\n # download timeseries\n ts_path = \"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series\"\n ts_files = [\n file_timeseries_confirmed,\n file_timeseries_death,\n file_timeseries_recovered,\n ]\n for f in ts_files:\n url = ts_path + \"/\" + f\n input = pd.read_csv(url)\n input.to_csv(self.data_path + \"/\" + f)\n\n def __create_filename(self, date):\n \"\"\"private method, generates file name for daily report from date\n\n Args:\n date (datetime): any date to get the daily report file name\n\n Returns:\n string: mm-dd-year.csv\n \"\"\"\n return f\"{format(date.month, '02d')}-{format(date.day, '02d')}-{date.year}.csv\"\n\n def __get_modified_date(self):\n \"\"\"get the daily report's modified date, returns 1990-01-01 if the file does not exist\n\n Returns:\n datetime: modified date\n \"\"\"\n daily_r = self.data_path + \"/\" + file_daily_report\n dirpath = os.path.dirname(daily_r)\n if not os.path.exists(daily_r):\n return datetime.datetime(1990, 1, 1, 0, 0)\n\n modified_date = datetime.datetime.fromtimestamp(os.path.getmtime(daily_r))\n modified_date = datetime.datetime(\n modified_date.year, modified_date.month, modified_date.day, 0, 0, 0\n )\n return modified_date\n\n def __process_data(self):\n \"\"\"private method, used for reading and cleaning up data files\"\"\"\n # read the files in as data frame\n self.daily_report = pd.read_csv(self.data_path + \"/\" + file_daily_report)\n self.times_series_confirmed = pd.read_csv(\n self.data_path + \"/\" + file_timeseries_confirmed\n )\n self.times_series_death = pd.read_csv(\n self.data_path + \"/\" + file_timeseries_death\n )\n self.times_series_recovered = pd.read_csv(\n self.data_path + \"/\" + file_timeseries_recovered\n )\n\n # clean up data for timeseries_confirmed\n ## copy data\n confirmed_df = self.times_series_confirmed.copy()\n confirmed_df_2 = self.times_series_confirmed.copy()\n ## summation\n confirmed_concat = confirmed_df[confirmed_df[\"Province/State\"].notna()][\n \"Country/Region\"\n ].unique()\n for country in confirmed_concat:\n new_row = confirmed_df[confirmed_df[\"Country/Region\"] == country].sum()\n new_row[\"Country/Region\"] = country\n new_row[\"Province/State\"] = np.NaN\n new_row[\"Lat\"] = confirmed_df.loc[\n (confirmed_df[\"Country/Region\"] == country)\n & (confirmed_df[\"Province/State\"].isna()),\n \"Lat\",\n ].mean()\n new_row[\"Long\"] = confirmed_df.loc[\n (confirmed_df[\"Country/Region\"] == country)\n & (confirmed_df[\"Province/State\"].isna()),\n \"Long\",\n ].mean()\n confirmed_df = confirmed_df.drop(\n confirmed_df[confirmed_df[\"Country/Region\"] == country].index\n )\n confirmed_df = confirmed_df.append(new_row, ignore_index=True)\n ## add Long and Lat manually\n confirmed_df.loc[\n (confirmed_df[\"Country/Region\"] == \"China\")\n & (confirmed_df[\"Province/State\"].isna()),\n \"Lat\",\n ] = confirmed_df_2[confirmed_df_2[\"Country/Region\"] == \"China\"][\"Lat\"].mean()\n confirmed_df.loc[\n (confirmed_df[\"Country/Region\"] == \"China\")\n & (confirmed_df[\"Province/State\"].isna()),\n \"Long\",\n ] = confirmed_df_2[confirmed_df_2[\"Country/Region\"] == \"China\"][\"Long\"].mean()\n confirmed_df.loc[\n (confirmed_df[\"Country/Region\"] == \"Canada\")\n & (confirmed_df[\"Province/State\"].isna()),\n \"Lat\",\n ] = confirmed_df_2[confirmed_df_2[\"Country/Region\"] == \"Canada\"][\"Lat\"].mean()\n confirmed_df.loc[\n (confirmed_df[\"Country/Region\"] == \"Canada\")\n & (confirmed_df[\"Province/State\"].isna()),\n \"Long\",\n ] = confirmed_df_2[confirmed_df_2[\"Country/Region\"] == \"Canada\"][\"Long\"].mean()\n confirmed_df.loc[\n (confirmed_df[\"Country/Region\"] == \"Australia\")\n & (confirmed_df[\"Province/State\"].isna()),\n \"Lat\",\n ] = confirmed_df_2[confirmed_df_2[\"Country/Region\"] == \"Australia\"][\n \"Lat\"\n ].mean()\n confirmed_df.loc[\n (confirmed_df[\"Country/Region\"] == \"Australia\")\n & (confirmed_df[\"Province/State\"].isna()),\n \"Long\",\n ] = confirmed_df_2[confirmed_df_2[\"Country/Region\"] == \"Australia\"][\n \"Long\"\n ].mean()\n ## make tidy table\n confirmed_df = confirmed_df[confirmed_df[\"Province/State\"].isna()].drop(\n columns=[\"Province/State\", \"Unnamed: 0\"], axis=1\n )\n confirmed_tidy = confirmed_df.melt(id_vars=[\"Country/Region\", \"Lat\", \"Long\"])\n confirmed_tidy[\"variable\"] = pd.to_datetime(confirmed_tidy[\"variable\"])\n self.times_series_confirmed_tidy = confirmed_tidy\n\n # clean up data for timeseries_death\n ## copy data\n death_df = self.times_series_death.copy()\n death_df_2 = self.times_series_death.copy()\n ## summation\n death_concat = death_df[death_df[\"Province/State\"].notna()][\n \"Country/Region\"\n ].unique()\n for country in death_concat:\n new_row = death_df[death_df[\"Country/Region\"] == country].sum()\n new_row[\"Country/Region\"] = country\n new_row[\"Province/State\"] = np.NaN\n new_row[\"Lat\"] = death_df.loc[\n (death_df[\"Country/Region\"] == country)\n & (death_df[\"Province/State\"].isna()),\n \"Lat\",\n ].mean()\n new_row[\"Long\"] = death_df.loc[\n (death_df[\"Country/Region\"] == country)\n & (death_df[\"Province/State\"].isna()),\n \"Long\",\n ].mean()\n death_df = death_df.drop(\n death_df[death_df[\"Country/Region\"] == country].index\n )\n death_df = death_df.append(new_row, ignore_index=True)\n ## add Long and Lat manually\n death_df.loc[\n (death_df[\"Country/Region\"] == \"China\")\n & (death_df[\"Province/State\"].isna()),\n \"Lat\",\n ] = death_df_2[death_df_2[\"Country/Region\"] == \"China\"][\"Lat\"].mean()\n death_df.loc[\n (death_df[\"Country/Region\"] == \"China\")\n & (death_df[\"Province/State\"].isna()),\n \"Long\",\n ] = death_df_2[death_df_2[\"Country/Region\"] == \"China\"][\"Long\"].mean()\n death_df.loc[\n (death_df[\"Country/Region\"] == \"Canada\")\n & (death_df[\"Province/State\"].isna()),\n \"Lat\",\n ] = death_df_2[death_df_2[\"Country/Region\"] == \"Canada\"][\"Lat\"].mean()\n death_df.loc[\n (death_df[\"Country/Region\"] == \"Canada\")\n & (death_df[\"Province/State\"].isna()),\n \"Long\",\n ] = death_df_2[death_df_2[\"Country/Region\"] == \"Canada\"][\"Long\"].mean()\n death_df.loc[\n (death_df[\"Country/Region\"] == \"Australia\")\n & (death_df[\"Province/State\"].isna()),\n \"Lat\",\n ] = death_df_2[death_df_2[\"Country/Region\"] == \"Australia\"][\"Lat\"].mean()\n death_df.loc[\n (death_df[\"Country/Region\"] == \"Australia\")\n & (death_df[\"Province/State\"].isna()),\n \"Long\",\n ] = death_df_2[death_df_2[\"Country/Region\"] == \"Australia\"][\"Long\"].mean()\n ## make tidy table\n death_df = death_df[death_df[\"Province/State\"].isna()].drop(\n columns=[\"Province/State\", \"Unnamed: 0\"], axis=1\n )\n death_tidy = death_df.melt(id_vars=[\"Country/Region\", \"Lat\", \"Long\"])\n death_tidy[\"variable\"] = pd.to_datetime(death_tidy[\"variable\"])\n self.times_series_death_tidy = death_tidy\n\n # clean up data for timeseries_recovered\n ## copy data\n recovered_df = self.times_series_recovered.copy()\n recovered_df_2 = self.times_series_recovered.copy()\n ## summation\n recovered_concat = recovered_df[recovered_df[\"Province/State\"].notna()][\n \"Country/Region\"\n ].unique()\n for country in recovered_concat:\n new_row = recovered_df[recovered_df[\"Country/Region\"] == country].sum()\n new_row[\"Country/Region\"] = country\n new_row[\"Province/State\"] = np.NaN\n new_row[\"Lat\"] = recovered_df.loc[\n (recovered_df[\"Country/Region\"] == country)\n & (recovered_df[\"Province/State\"].isna()),\n \"Lat\",\n ].mean()\n new_row[\"Long\"] = recovered_df.loc[\n (recovered_df[\"Country/Region\"] == country)\n & (recovered_df[\"Province/State\"].isna()),\n \"Long\",\n ].mean()\n recovered_df = recovered_df.drop(\n recovered_df[recovered_df[\"Country/Region\"] == country].index\n )\n recovered_df = recovered_df.append(new_row, ignore_index=True)\n ## add Long and Lat manually\n recovered_df.loc[\n (recovered_df[\"Country/Region\"] == \"China\")\n & (recovered_df[\"Province/State\"].isna()),\n \"Lat\",\n ] = recovered_df_2[recovered_df_2[\"Country/Region\"] == \"China\"][\"Lat\"].mean()\n recovered_df.loc[\n (recovered_df[\"Country/Region\"] == \"China\")\n & (recovered_df[\"Province/State\"].isna()),\n \"Long\",\n ] = recovered_df_2[recovered_df_2[\"Country/Region\"] == \"China\"][\"Long\"].mean()\n recovered_df.loc[\n (recovered_df[\"Country/Region\"] == \"Canada\")\n & (recovered_df[\"Province/State\"].isna()),\n \"Lat\",\n ] = recovered_df_2[recovered_df_2[\"Country/Region\"] == \"Canada\"][\"Lat\"].mean()\n recovered_df.loc[\n (recovered_df[\"Country/Region\"] == \"Canada\")\n & (recovered_df[\"Province/State\"].isna()),\n \"Long\",\n ] = recovered_df_2[recovered_df_2[\"Country/Region\"] == \"Canada\"][\"Long\"].mean()\n recovered_df.loc[\n (recovered_df[\"Country/Region\"] == \"Australia\")\n & (recovered_df[\"Province/State\"].isna()),\n \"Lat\",\n ] = recovered_df_2[recovered_df_2[\"Country/Region\"] == \"Australia\"][\n \"Lat\"\n ].mean()\n recovered_df.loc[\n (recovered_df[\"Country/Region\"] == \"Australia\")\n & (recovered_df[\"Province/State\"].isna()),\n \"Long\",\n ] = recovered_df_2[recovered_df_2[\"Country/Region\"] == \"Australia\"][\n \"Long\"\n ].mean()\n ## make tidy table\n recovered_df = recovered_df[recovered_df[\"Province/State\"].isna()].drop(\n columns=[\"Province/State\", \"Unnamed: 0\"], axis=1\n )\n recovered_tidy = recovered_df.melt(id_vars=[\"Country/Region\", \"Lat\", \"Long\"])\n recovered_tidy[\"variable\"] = pd.to_datetime(recovered_tidy[\"variable\"])\n self.times_series_recovered_tidy = recovered_tidy\n\n return self.times_series_death_tidy\n\n def get_aggregated_daily_report(self):\n \"\"\" Aggregate the regional level cases count to country level\"\"\"\n return (\n self.daily_report.groupby(\"Country_Region\")\n .agg(\n {\n \"Confirmed\": \"sum\",\n \"Deaths\": \"sum\",\n \"Recovered\": \"sum\",\n \"Active\": \"sum\",\n }\n )\n .reset_index()\n )\n\n def cumulative_filter(self, country=\"all\"):\n \"\"\"return cumulative cases by country\n\n Args:\n country (str, optional): [description]. Defaults to \"all\".\n\n Returns:\n [Series]: with index as Confirmed, Deaths, Recovered\n \"\"\"\n if country != \"all\":\n return self.daily_report.query(f\"Country_Region == '{country}'\").sum(\n numeric_only=True\n )\n\n return self.daily_report.sum(numeric_only=True)\n\n def get_country_options(self):\n \"\"\"create an array of country options to be used in dropdowns\n\n Returns:\n array: [{\"label\":country1, \"value\":country1}, ...]\n \"\"\"\n result = []\n for i in range(len(self.country_list)):\n result.append(\n {\"label\": self.country_list[i], \"value\": self.country_list[i]}\n )\n\n return result\n\n def get_timeserie_data_by_country(\n self,\n country=\"all\",\n c_type=case_type.confirmed,\n start_date=datetime.date(1990, 1, 1),\n end_date=datetime.date.today(),\n ):\n \"\"\"return timeseries data by country\n\n Args:\n country (str, optional): country name. Defaults to \"all\".\n case_type (int, optional): 1: confirmed, 2: death, 3: recovered. Defaults to case_type.confirmed.\n\n Raises:\n Exception: if case_type entered is invalid\n Return:\n country_data: DataFrame, date: date, total: total number, yesterday: the day before's number, new: total - yesterday\n \"\"\"\n if c_type == case_type.confirmed:\n df = self.times_series_confirmed\n elif c_type == case_type.death:\n df = self.times_series_death\n elif c_type == case_type.recovered:\n df = self.times_series_recovered\n else:\n raise Exception(\"Case type is not supported\")\n if country != \"all\":\n country_data = pd.DataFrame(\n df[df[\"Country/Region\"] == country].iloc[:, 5:].sum()\n )\n else:\n country_data = pd.DataFrame(df.iloc[:, 5:].sum())\n country_data = country_data.reset_index()\n country_data.columns = [\"date\", \"Total\"]\n yesterday_data = np.zeros(country_data.Total.shape[0])\n yesterday_data[1:] = country_data.Total.to_numpy()[0:-1]\n country_data[\"yesterday\"] = yesterday_data\n country_data[\"New\"] = country_data.Total - country_data[\"yesterday\"]\n country_data.loc[country_data.New < 0, \"New\"] = 0\n\n country_data = country_data.loc[:, [\"date\", \"Total\", \"New\"]]\n country_data = pd.melt(\n country_data,\n id_vars=[\"date\"],\n value_vars=[\"Total\", \"New\"],\n value_name=\"count\",\n var_name=\"type\",\n )\n country_data[\"date_col\"] = pd.to_datetime(country_data.date).dt.date\n\n country_data = country_data[\n (country_data[\"date_col\"] >= start_date)\n & (country_data[\"date_col\"] <= end_date)\n ]\n country_data = country_data.loc[:, [\"date\", \"count\", \"type\"]]\n\n return country_data\n\n def save_to_file(self):\n \"\"\"save the whole data model into file\"\"\"\n pass\n" ]
[ [ "pandas.to_datetime", "pandas.melt", "pandas.read_csv", "numpy.zeros" ] ]
matt-long/aerobic-safety-margins
[ "2f58775d8e67ea105a217ce89d09e239d208e001" ]
[ "notebooks/util.py" ]
[ "import os\nimport time\nfrom collections.abc import Iterable\n\nimport cftime\nimport dask\nimport intake\nimport numpy as np\nimport xarray as xr\nimport yaml\nfrom dask.distributed import Client\nfrom dask_jobqueue import PBSCluster\n\npath_to_here = os.path.dirname(os.path.realpath(__file__))\n\nUSER = os.environ['USER']\nPBS_PROJECT = 'NCGD0011'\n\n\ndef attrs_label(attrs):\n \"\"\"generate a label from long_name and units\"\"\"\n da_name = ''\n if isinstance(attrs, xr.DataArray):\n da_name = attrs.name\n attrs = attrs.attrs\n name = da_name if 'long_name' not in attrs else attrs['long_name']\n\n if len(name) > 30:\n name = '\\n'.join([name[:30], name[30:]])\n units = '' if 'units' not in attrs else f' [{attrs[\"units\"]}]'\n return name + units\n\n\ndef label_plots(fig, axs, xoff=-0.04, yoff=0.02):\n alp = [chr(i).upper() for i in range(97, 97 + 26)]\n for i, ax in enumerate(axs):\n p = ax.get_position()\n x = p.x0 + xoff\n y = p.y1 + yoff\n fig.text(x, y, f'{alp[i]}', fontsize=14, fontweight='semibold')\n\n\ndef get_ClusterClient(memory='25GB'):\n \"\"\"get cluster and client\"\"\"\n cluster = PBSCluster(\n cores=1,\n memory=memory,\n processes=1,\n queue='casper',\n local_directory=f'/glade/scratch/{USER}/dask-workers',\n log_directory=f'/glade/scratch/{USER}/dask-workers',\n resource_spec=f'select=1:ncpus=1:mem={memory}',\n project=PBS_PROJECT,\n walltime='06:00:00',\n interface='ib0',\n )\n\n jupyterhub_server_name = os.environ.get('JUPYTERHUB_SERVER_NAME', None)\n dashboard_link = 'https://jupyterhub.hpc.ucar.edu/stable/user/{USER}/proxy/{port}/status'\n if jupyterhub_server_name:\n dashboard_link = (\n 'https://jupyterhub.hpc.ucar.edu/stable/user/'\n + '{USER}'\n + f'/{jupyterhub_server_name}/proxy/'\n + '{port}/status'\n )\n dask.config.set({'distributed.dashboard.link': dashboard_link})\n client = Client(cluster)\n return cluster, client\n\n\nclass timer(object):\n \"\"\"support reporting timing info with named tasks\"\"\"\n\n def __init__(self, name=None):\n self.name = name\n\n def __enter__(self):\n self.tic = time.time()\n\n def __exit__(self, type, value, traceback):\n if self.name:\n print(f'[{self.name}]: ', end='')\n toc = time.time() - self.tic\n print(f'{toc:0.5f}s')\n\n\ndef to_datenum(y, m, d, time_units='days since 0001-01-01 00:00:00'):\n \"\"\"convert year, month, day to number\"\"\"\n return cftime.date2num(cftime.datetime(y, m, d), units=time_units)\n\n\ndef nday_per_year(year):\n return 365\n\n\ndef year_frac(time):\n \"\"\"compute year fraction\"\"\"\n\n year = [d.year for d in time.values]\n month = [d.month for d in time.values]\n day = [d.day for d in time.values]\n\n t0_year = np.array([to_datenum(y, 1, 1) - 1 for y in year])\n t_year = np.array([to_datenum(y, m, d) for y, m, d in zip(year, month, day)])\n nday_year = np.array([nday_per_year(y) for y in year])\n\n return year + (t_year - t0_year) / nday_year\n\n\ndef pop_add_cyclic(ds):\n \"\"\"Make POP grid easily plottable\"\"\"\n ni = ds.TLONG.shape[1]\n\n xL = int(ni / 2 - 1)\n xR = int(xL + ni)\n\n tlon = ds.TLONG.data\n tlat = ds.TLAT.data\n\n tlon = np.where(np.greater_equal(tlon, min(tlon[:, 0])), tlon - 360.0, tlon)\n lon = np.concatenate((tlon, tlon + 360.0), 1)\n lon = lon[:, xL:xR]\n\n if ni == 320:\n lon[367:-3, 0] = lon[367:-3, 0] + 360.0\n lon = lon - 360.0\n\n lon = np.hstack((lon, lon[:, 0:1] + 360.0))\n if ni == 320:\n lon[367:, -1] = lon[367:, -1] - 360.0\n\n # -- trick cartopy into doing the right thing:\n # it gets confused when the cyclic coords are identical\n lon[:, 0] = lon[:, 0] - 1e-8\n\n # -- periodicity\n lat = np.concatenate((tlat, tlat), 1)\n lat = lat[:, xL:xR]\n lat = np.hstack((lat, lat[:, 0:1]))\n\n TLAT = xr.DataArray(lat, dims=('nlat', 'nlon'))\n TLONG = xr.DataArray(lon, dims=('nlat', 'nlon'))\n\n dso = xr.Dataset({'TLAT': TLAT, 'TLONG': TLONG})\n\n # copy vars\n varlist = [v for v in ds.data_vars if v not in ['TLAT', 'TLONG']]\n for v in varlist:\n v_dims = ds[v].dims\n if not ('nlat' in v_dims and 'nlon' in v_dims):\n dso[v] = ds[v]\n else:\n # determine and sort other dimensions\n other_dims = set(v_dims) - {'nlat', 'nlon'}\n other_dims = tuple([d for d in v_dims if d in other_dims])\n lon_dim = ds[v].dims.index('nlon')\n field = ds[v].data\n field = np.concatenate((field, field), lon_dim)\n field = field[..., :, xL:xR]\n field = np.concatenate((field, field[..., :, 0:1]), lon_dim)\n dso[v] = xr.DataArray(field, dims=other_dims + ('nlat', 'nlon'), attrs=ds[v].attrs)\n\n # copy coords\n for v, da in ds.coords.items():\n if not ('nlat' in da.dims and 'nlon' in da.dims):\n dso = dso.assign_coords(**{v: da})\n\n return dso\n\n\nclass curator_local_assets(object):\n \"\"\"Curate an intake catalog with locally-cached assets\"\"\"\n\n def __init__(self):\n\n cache_dir = 'data/cache'\n os.makedirs(cache_dir, exist_ok=True)\n\n self.catalog_file = f'{path_to_here}/data/catalogs/catalog-local.yml'\n if os.path.exists(self.catalog_file):\n with open(self.catalog_file, 'r') as fid:\n self.catalog = yaml.safe_load(fid)\n else:\n self.catalog = yaml.safe_load(\n \"\"\"\n description: Local assets\n\n plugins:\n source:\n - module: intake_xarray\n\n sources: {}\n \"\"\"\n )\n\n def add_source(self, key, urlpath, description, driver='netcdf', overwrite=False, **kwargs):\n \"\"\"add a new source to the catalog\"\"\"\n\n if key in self.catalog['sources']:\n if not overwrite:\n raise ValueError(f'source {key} exists; set `overwrite` to true to overwrite')\n else:\n print(f'overwriting \"{key}\" key in \"sources\"')\n\n args = dict(urlpath=urlpath)\n args.update(kwargs)\n\n self.catalog['sources'][key] = dict(\n driver=driver,\n description=description,\n args=args,\n )\n self.persist()\n\n def persist(self):\n \"\"\"write the catalog to disk\"\"\"\n with open(self.catalog_file, 'w') as fid:\n yaml.dump(self.catalog, fid)\n\n def open_catalog(self):\n \"\"\"return as intake catalog\"\"\"\n return intake.open_catalog(self.catalog_file)\n\n def __repr__(self):\n return self.catalog.__repr__()\n\n\ndef infer_lat_name(ds):\n lat_names = ['latitude', 'lat']\n for n in lat_names:\n if n in ds:\n return n\n raise ValueError('could not determine lat name')\n\n\ndef infer_lon_name(ds):\n lon_names = ['longitude', 'lon']\n for n in lon_names:\n if n in ds:\n return n\n raise ValueError('could not determine lon name')\n\n\ndef lat_weights_regular_grid(lat):\n \"\"\"\n Generate latitude weights for equally spaced (regular) global grids.\n Weights are computed as sin(lat+dlat/2)-sin(lat-dlat/2) and sum to 2.0.\n \"\"\"\n dlat = np.abs(np.diff(lat))\n np.testing.assert_almost_equal(dlat, dlat[0])\n w = np.abs(np.sin(np.radians(lat + dlat[0] / 2.0)) - np.sin(np.radians(lat - dlat[0] / 2.0)))\n\n if np.abs(lat[0]) > 89.9999:\n w[0] = np.abs(1.0 - np.sin(np.radians(np.pi / 2 - dlat[0])))\n\n if np.abs(lat[-1]) > 89.9999:\n w[-1] = np.abs(1.0 - np.sin(np.radians(np.pi / 2 - dlat[0])))\n\n return w\n\n\ndef compute_grid_area(ds, check_total=True):\n \"\"\"Compute the area of grid cells.\n\n Parameters\n ----------\n\n ds : xarray.Dataset\n Input dataset with latitude and longitude fields\n\n check_total : Boolean, optional\n Test that total area is equal to area of the sphere.\n\n Returns\n -------\n\n area : xarray.DataArray\n DataArray with area field.\n\n \"\"\"\n\n radius_earth = 6.37122e6 # m, radius of Earth\n area_earth = 4.0 * np.pi * radius_earth ** 2 # area of earth [m^2]e\n\n lon_name = infer_lon_name(ds)\n lat_name = infer_lat_name(ds)\n\n weights = lat_weights_regular_grid(ds[lat_name])\n area = weights + 0.0 * ds[lon_name] # add 'lon' dimension\n area = (area_earth / area.sum(dim=(lat_name, lon_name))) * area\n\n if check_total:\n np.testing.assert_approx_equal(np.sum(area), area_earth)\n\n return xr.DataArray(\n area, dims=(lat_name, lon_name), attrs={'units': 'm^2', 'long_name': 'area'}\n )\n" ]
[ [ "numpy.concatenate", "numpy.testing.assert_almost_equal", "numpy.sum", "numpy.diff", "numpy.radians", "numpy.abs", "numpy.hstack" ] ]
jggjevestad/NavLib
[ "d81fd6e3d4b733aaefd4c69cea6b5d44a06f820b" ]
[ "lib/gnss.py" ]
[ "# Import libraries\nfrom numpy import array, sqrt, sin, cos, arctan2\nfrom lib.constants import GM, OMEGADOTe\n\n\n# Correction for beginning or end of week crossovers in GNSS systems\ndef dt(t, t0):\n t = t - t0\n\n if t > 302400:\n t = t - 604800\n elif t < -302400:\n t = t + 604800\n\n return t\n\n\n# Satellite ECEF position\ndef satpos(ttr, toe, ROOTa, DELTAn, M0, e, omega, Cus, Cuc, Crs, Crc, Cis, Cic, i0, iDOT, OMEGA0, OMEGADOT):\n # Anomalies of the Keplerian orbit\n a = ROOTa**2 # Semi-major axis [m]\n n0 = sqrt(GM/a**3) # Mean angular velocity [rad/sec]\n t = dt(ttr, toe) # Time from reference epoch [s]\n n = n0 + DELTAn # Corrected mean motion [rad/s]\n M = M0 + n*t # Mean anomaly [rad]\n\n # Kepler's equation\n epsilon = 1e-10\n E_new = M\n E = 0\n\n while abs(E_new - E) > epsilon:\n E = E_new\n E_new = M + e*sin(E)\n\n # Eccentric anomaly\n E = E_new\n\n # True anomaly\n v = arctan2(sqrt(1 - e**2)*sin(E), cos(E) - e)\n\n # Argument of latitude\n PHI = v + omega\n\n # Second harmonic pertubations\n du = Cus*sin(2*PHI) + Cuc*cos(2*PHI) # Argument of latitude correction [rad]\n dr = Crs*sin(2*PHI) + Crc*cos(2*PHI) # Radius correction [m]\n di = Cis*sin(2*PHI) + Cic*cos(2*PHI) # Inclination correction[rad]\n\n # Orbit corrections\n u = PHI + du # Corrected argument of latitude [rad]\n r = a*(1 - e*cos(E)) + dr # Corrected radius [m]\n i = i0 + di + iDOT*t # Corrected inclination [rad]\n\n # Corrected longitude of ascending node\n OMEGA = OMEGA0 + (OMEGADOT - OMEGADOTe)*t - OMEGADOTe*toe\n\n # Satellite position in ECEF system\n Xs0 = array([[r*cos(u)*cos(OMEGA) - r*sin(u)*sin(OMEGA)*cos(i)],\n [r*cos(u)*sin(OMEGA) + r*sin(u)*cos(OMEGA)*cos(i)],\n [r*sin(u)*sin(i)]])\n\n return Xs0\n" ]
[ [ "numpy.sin", "numpy.sqrt", "numpy.cos" ] ]
alantess/vigilantV2
[ "3bc44e5b87d69f87bccd4df534478ba665f2391f" ]
[ "common/helpers/support.py" ]
[ "from PIL import Image\nfrom scipy import misc, ndimage\nimport matplotlib.pyplot as plt\nimport torch\nimport numpy as np\nfrom torchvision import models\nfrom torchvision import transforms\n\n\ndef intepret_semantic_model(model, device, alpha=50):\n invTrans = transforms.Compose([\n transforms.Normalize(mean=[0., 0., 0.],\n std=[1 / 0.229, 1 / 0.224, 1 / 0.225]),\n transforms.Normalize(mean=[-0.485, -0.456, -0.406], std=[1., 1., 1.]),\n ])\n preprocess = transforms.Compose([\n transforms.ToTensor(),\n transforms.Resize((512, 512)),\n transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n ])\n model = model.to(device).eval()\n model.load()\n image = Image.open(\"../etc/dash.jpg\")\n preproc_img = preprocess(image)\n\n preproc_img = preproc_img.unsqueeze(0).to(device)\n\n with torch.no_grad():\n out = model(preproc_img)\n mask = out[0].permute(1, 2, 0)\n mask = mask.mul(255).clamp(0, 255)\n mask = mask.detach().cpu().numpy().astype(np.float32)\n\n preproc_img = invTrans(preproc_img)\n plt.figure()\n plt.axis('off')\n plt.imshow(preproc_img[0].permute(1, 2, 0).cpu())\n plt.imshow(apply_sharpen_filter(mask, alpha),\n alpha=0.4,\n cmap='winter',\n interpolation='gaussian')\n plt.show()\n\n\ndef apply_sharpen_filter(img, alpha):\n blurred_filter = ndimage.gaussian_filter(img, 3)\n filter_blurred = ndimage.gaussian_filter(blurred_filter, 1)\n img = blurred_filter + alpha * (blurred_filter - filter_blurred)\n return img\n" ]
[ [ "torch.no_grad", "scipy.ndimage.gaussian_filter", "matplotlib.pyplot.figure", "matplotlib.pyplot.show", "matplotlib.pyplot.axis" ] ]
kottmanj/z-quantum-core
[ "21752e92e79aafedbfeb6e7ae196bdc2fd5803e4" ]
[ "src/python/zquantum/core/estimator_test.py" ]
[ "from pyquil import Program\nfrom pyquil.gates import X\nfrom openfermion import QubitOperator, qubit_operator_sparse, IsingOperator\nimport numpy as np\nimport pytest\n\nfrom .interfaces.estimator_test import EstimatorTests\nfrom .interfaces.mock_objects import MockQuantumBackend, MockQuantumSimulator\nfrom .estimator import (\n BasicEstimator,\n ExactEstimator,\n get_context_selection_circuit,\n get_context_selection_circuit_for_group,\n)\nfrom .measurement import ExpectationValues\nfrom .circuit import Circuit\n\n\nclass TestEstimatorUtils:\n def test_get_context_selection_circuit_offdiagonal(self):\n term = ((0, \"X\"), (1, \"Y\"))\n circuit, ising_operator = get_context_selection_circuit(term)\n\n # Need to convert to QubitOperator in order to get matrix representation\n qubit_operator = QubitOperator()\n for ising_term in ising_operator.terms:\n qubit_operator += QubitOperator(\n ising_term, ising_operator.terms[ising_term]\n )\n\n target_unitary = qubit_operator_sparse(QubitOperator(term))\n transformed_unitary = (\n circuit.to_unitary().conj().T\n @ qubit_operator_sparse(qubit_operator)\n @ circuit.to_unitary()\n )\n\n assert np.allclose(target_unitary.todense(), transformed_unitary)\n\n def test_get_context_selection_circuit_diagonal(self):\n term = ((4, \"Z\"), (2, \"Z\"))\n circuit, ising_operator = get_context_selection_circuit(term)\n assert len(circuit.gates) == 0\n assert ising_operator == IsingOperator(term)\n\n def test_get_context_selection_circuit_for_group(self):\n group = QubitOperator(((0, \"X\"), (1, \"Y\"))) - 0.5 * QubitOperator(((1, \"Y\"),))\n circuit, ising_operator = get_context_selection_circuit_for_group(group)\n\n # Need to convert to QubitOperator in order to get matrix representation\n qubit_operator = QubitOperator()\n for ising_term in ising_operator.terms:\n qubit_operator += QubitOperator(\n ising_term, ising_operator.terms[ising_term]\n )\n\n target_unitary = qubit_operator_sparse(group)\n transformed_unitary = (\n circuit.to_unitary().conj().T\n @ qubit_operator_sparse(qubit_operator)\n @ circuit.to_unitary()\n )\n\n assert np.allclose(target_unitary.todense(), transformed_unitary)\n\n\nclass TestBasicEstimator(EstimatorTests):\n @pytest.fixture()\n def estimator(self, request):\n return BasicEstimator()\n\n @pytest.fixture()\n def target_operator(self, request):\n return QubitOperator(\"Z0\")\n\n @pytest.fixture()\n def circuit(self, request):\n return Circuit(Program(X(0)))\n\n @pytest.fixture()\n def backend(self, request):\n return MockQuantumBackend(n_samples=20)\n\n @pytest.fixture()\n def n_samples(self, request):\n return 10\n\n def test_get_estimated_expectation_values(\n self, estimator, backend, circuit, target_operator, n_samples\n ):\n # When\n values = estimator.get_estimated_expectation_values(\n backend=backend,\n circuit=circuit,\n target_operator=target_operator,\n n_samples=n_samples,\n ).values\n value = values[0]\n # Then\n assert len(values) == 1\n assert value >= -1\n assert value <= 1\n\n def test_get_estimated_expectation_values_samples_from_backend(\n self,\n estimator,\n backend,\n circuit,\n target_operator,\n ):\n # Given\n # When\n values = estimator.get_estimated_expectation_values(\n backend=backend,\n circuit=circuit,\n target_operator=target_operator,\n ).values\n value = values[0]\n # Then\n assert len(values) == 1\n assert value >= -1\n assert value <= 1\n\n def test_n_samples_is_restored(self, estimator, backend, circuit, target_operator):\n # Given\n backend.n_samples = 5\n # When\n values = estimator.get_estimated_expectation_values(\n backend, circuit, target_operator, n_samples=10\n )\n # Then\n assert backend.n_samples == 5\n\n def test_get_estimated_expectation_values_with_constant(\n self, estimator, backend, circuit, n_samples\n ):\n # Given\n coefficient = -2\n constant_qubit_operator = QubitOperator((), coefficient) + QubitOperator(\n (0, \"X\")\n )\n\n # When\n values = estimator.get_estimated_expectation_values(\n backend=backend,\n circuit=circuit,\n target_operator=constant_qubit_operator,\n n_samples=n_samples,\n ).values\n value = values[1]\n # Then\n assert len(values) == 2\n assert coefficient == value\n\n def test_get_estimated_expectation_values_optimal_shot_allocation(\n self, estimator, backend, circuit, target_operator\n ):\n # TODO: After a deterministic testing backend is imlemented, this test\n # should be updated to actually check that shots are being correctly\n # allocated and the expectation values correctly estimated.\n\n # Given\n # When\n values = estimator.get_estimated_expectation_values(\n backend=backend,\n circuit=circuit,\n target_operator=target_operator,\n shot_allocation_strategy=\"optimal\",\n n_total_samples=100,\n ).values\n value = values[0]\n # Then\n assert len(values) == 1\n assert value >= -1\n assert value <= 1\n\n def test_get_estimated_expectation_values_optimal_shot_allocation_with_prior(\n self, estimator, backend, circuit, target_operator\n ):\n # TODO: After a deterministic testing backend is imlemented, this test\n # should be updated to actually check that shots are being correctly\n # allocated and the expectation values correctly estimated.\n\n # Given\n # When\n estimator.prior_expectation_values = ExpectationValues(\n np.array([0 for _ in target_operator.terms])\n )\n values = estimator.get_estimated_expectation_values(\n backend=backend,\n circuit=circuit,\n target_operator=target_operator,\n shot_allocation_strategy=\"optimal\",\n n_total_samples=100,\n ).values\n value = values[0]\n # Then\n assert len(values) == 1\n assert value >= -1\n assert value <= 1\n\n @pytest.mark.parametrize(\n \"n_samples,n_total_samples,shot_allocation_strategy\",\n [\n (None, 100, \"uniform\"),\n (100, None, \"optimal\"),\n (100, 100, \"optimal\"),\n (100, 100, \"uniform\"),\n (100, None, \"foo\"),\n ],\n )\n def test_get_estimated_expectation_values_invalid_options(\n self,\n estimator,\n backend,\n circuit,\n target_operator,\n n_samples,\n n_total_samples,\n shot_allocation_strategy,\n ):\n with pytest.raises(ValueError):\n estimator.get_estimated_expectation_values(\n backend=backend,\n circuit=circuit,\n target_operator=target_operator,\n shot_allocation_strategy=shot_allocation_strategy,\n n_total_samples=n_total_samples,\n n_samples=n_samples,\n )\n\n\nclass TestExactEstimator(EstimatorTests):\n @pytest.fixture()\n def estimator(self, request):\n return ExactEstimator()\n\n @pytest.fixture()\n def target_operator(self, request):\n return QubitOperator(\"Z0\")\n\n @pytest.fixture()\n def circuit(self, request):\n return Circuit(Program(X(0)))\n\n @pytest.fixture()\n def backend(self, request):\n return MockQuantumSimulator()\n\n @pytest.fixture()\n def n_samples(self, request):\n return None\n\n def test_require_quantum_simulator(\n self, estimator, backend, circuit, target_operator\n ):\n backend = MockQuantumBackend()\n with pytest.raises(AttributeError):\n value = estimator.get_estimated_expectation_values(\n backend=backend,\n circuit=circuit,\n target_operator=target_operator,\n ).values\n\n def test_get_estimated_expectation_values(\n self, estimator, backend, circuit, target_operator\n ):\n # Given\n # When\n values = estimator.get_estimated_expectation_values(\n backend=backend,\n circuit=circuit,\n target_operator=target_operator,\n n_samples=None,\n ).values\n value = values[0]\n # Then\n assert len(values) == 1\n assert value >= -1\n assert value <= 1\n" ]
[ [ "numpy.array" ] ]
jsheedy/biofeedback-cube
[ "178a518d70fdf0dfa3b51226a2a97dbfa68a0543" ]
[ "biofeedback_cube/fx/midi.py" ]
[ "import numpy as np\n\nfrom ..state import clock\nfrom ..config import HEIGHT, WIDTH\nfrom ..hydra import hydra\nfrom ..palettes import palettes, cmap\nfrom ..state import midi_notes\nfrom ..utils import index_dict, xx, yy, sin, cos\n\n\ndef note_default(grid, t):\n\n # _min, _max = min(notes), max(notes)\n _min, _max = 40, 90\n dynamic_range = _max - _min\n\n if midi_notes.notes[0]:\n for i, (note, velocity, timestamp) in enumerate(midi_notes.notes[0]):\n y = (note - _min) / dynamic_range\n yi = int(y * (HEIGHT - 1))\n grid[yi, i % WIDTH] = (0.0, 1.0, 0)\n\n\ndef note_tunnel(grid, t):\n palette = index_dict(palettes, hydra.a)\n\n if midi_notes.notes[0]:\n for i, (note, velocity, timestamp) in enumerate(midi_notes.notes[0]):\n age = np.clip(1 - (t - timestamp), 0, 1)\n x = 0.5 + (1 - age) * np.cos(2 * np.pi * (note / 12))\n y = 0.5 + (1 - age) * np.sin(2 * np.pi * (note / 12))\n\n r = 4.01 * 1 / age\n cone = np.clip(1-np.sqrt((r*(xx-x))**2 + (r*(yy-y))**2), 0, 1)\n mask = cone > 0\n grid[mask] = cmap(palette, cone)[mask]\n\n\ndef metronome_default(grid, t):\n if midi_notes.notes[1]:\n note, _velocity, timestamp = midi_notes.notes[1][-1]\n bright = np.clip(1 - (t - timestamp), 0, 1)\n grid[:2, :] = (0.0, bright, 0)\n grid[-2:, :] = (0.0, bright, 0.0)\n grid[-2:, :] = (1.0, bright, 1.0)\n\n\nNOTE_MODES = {\n 'default': note_default,\n 'tunnel': note_tunnel\n}\n\nMETRONOME_MODES = {\n 'default': metronome_default\n}\n\n\ndef midi(grid, t):\n\n midi_notes.bleed()\n note_handler = index_dict(NOTE_MODES, hydra.f)\n note_handler(grid, t)\n\n metronome_handler = index_dict(METRONOME_MODES, hydra.g)\n metronome_handler(grid, t)\n" ]
[ [ "numpy.sin", "numpy.sqrt", "numpy.cos", "numpy.clip" ] ]
CADWRDeltaModeling/schimpy
[ "55b4cda524205dce64d5cfa0c86e9edd8cfacaa5" ]
[ "tests/test_schism_mesh.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\" unit tests of schism_mesh\n\"\"\"\nfrom schimpy import SchismMesh, read_mesh, write_mesh, BoundaryType\nimport numpy as np\nimport unittest\nimport os\n\n\nclass TestSchismMesh(unittest.TestCase):\n \"\"\" Unit test class for TriSchismMesh\n \"\"\"\n\n def setUp(self):\n self.testdata_dir = os.path.join(os.path.dirname(__file__), \"testdata\")\n self.fpath_mesh = os.path.join(self.testdata_dir, \"testmesh.gr3\")\n self.fpath_vmesh_sz = os.path.join(self.testdata_dir, \"vgrid_sz.in\")\n\n def test_schism_mesh_sms_reader(self):\n fpath_mesh = os.path.join(self.testdata_dir, 'testmesh.2dm')\n mesh = read_mesh(fpath_mesh)\n self.assertEqual(mesh.n_nodes(), 112)\n self.assertEqual(mesh.n_elems(), 135)\n self.assertTrue(np.allclose(mesh.node(0), np.array([0., 100., 0.])))\n self.assertTrue(np.array_equal(mesh.elem(0), np.array([2, 0, 4])))\n\n def test_find_two_neigboring_node_paths(self):\n path = self.fpath_mesh\n mesh = read_mesh(path)\n # Tri area\n line_segment = (31.0, 69.0, 39.0, 101.0)\n up_path, down_path = mesh.find_two_neighboring_node_paths(line_segment)\n self.assertListEqual(up_path, [32, 25, 19, 14])\n self.assertListEqual(down_path, [24, 18, 13, 9])\n # Quad area\n line_segment = (69.0, 69.0, 101.0, 61.0)\n up_path, down_path = mesh.find_two_neighboring_node_paths(line_segment)\n self.assertListEqual(up_path, [64, 73, 82, 90])\n self.assertListEqual(down_path, [56, 65, 74, 83])\n # Mixed area\n line_segment = (-1.0, 1.0, 101.0, 9.0)\n up_path, down_path = mesh.find_two_neighboring_node_paths(line_segment)\n self.assertListEqual(up_path,\n [52, 60, 68, 76, 84, 91, 97, 102, 106, 109, 111])\n self.assertListEqual(down_path,\n [44, 53, 61, 69, 77, 85, 92, 98, 103, 107, 110])\n # Ill-defined, tri\n line_segment = (31.0, 71.0, 39.0, 101.0)\n up_path, down_path = mesh.find_two_neighboring_node_paths(line_segment)\n self.assertListEqual(up_path, [25, 19, 14])\n self.assertListEqual(down_path, [24, 18, 13, 9])\n # Ill-defined, quad\n line_segment = (71.0, 69.0, 101.0, 61.0)\n up_path, down_path = mesh.find_two_neighboring_node_paths(line_segment)\n self.assertListEqual(up_path, [73, 82, 90])\n self.assertListEqual(down_path, [65, 74, 83])\n # Diagonal corner cut\n line_segment = (82., -3, 103., 18.)\n up_path, down_path = mesh.find_two_neighboring_node_paths(line_segment)\n self.assertListEqual(up_path, [109, 111, 110])\n self.assertListEqual(down_path, [106, 103, 107, 104, 108])\n\n def test_schism_mesh_gr3_reader_wo_vgrid(self):\n path = self.fpath_mesh\n mesh = read_mesh(path)\n self.assertEqual(mesh.n_nodes(), 112)\n self.assertEqual(mesh.n_elems(), 135)\n # Boundaries\n self.assertEqual(mesh.n_boundaries(), 3)\n self.assertEqual(mesh.n_boundaries(btype=BoundaryType.OPEN), 1)\n self.assertEqual(mesh.n_boundaries(btype=BoundaryType.LAND), 2)\n self.assertEqual(mesh.n_total_boundary_nodes(BoundaryType.OPEN), 11)\n\n def test_schism_mesh_gr3_reader_w_vgrid_sz(self):\n fpath_mesh = self.fpath_mesh\n fpath_vmesh = self.fpath_vmesh_sz\n mesh = read_mesh(fpath_mesh, fpath_vmesh)\n self.assertEqual(mesh.vmesh.param['nvrt'], 12)\n self.assertEqual(mesh.vmesh.param['kz'], 2)\n self.assertEqual(mesh.vmesh.param['h_s'], 80.)\n self.assertEqual(mesh.vmesh.param['h_c'], 5.0)\n self.assertTrue(np.allclose(mesh.vmesh.sigma, np.array(\n [-1.00, -0.9, -0.8, -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1, 0.])))\n\n def test_schism_mesh_gr3_writer(self):\n fpath_mesh = self.fpath_mesh\n mesh = read_mesh(fpath_mesh)\n fpath_mesh_out = os.path.join(\n os.path.dirname(__file__), \"testdata/meshout.gr3\")\n write_mesh(mesh, fpath_mesh_out, write_boundary=True)\n meshout = read_mesh(fpath_mesh_out)\n self.assertEqual(meshout.n_nodes(), 112)\n self.assertEqual(meshout.n_elems(), 135)\n self.assertEqual(meshout.n_boundaries(), 3)\n if os.path.exists(fpath_mesh_out):\n os.remove(fpath_mesh_out)\n\n def test_schism_mesh_shp_writer(self):\n fpath_mesh = self.fpath_mesh\n mesh = read_mesh(fpath_mesh)\n fpath_mesh_out = os.path.join(\n os.path.dirname(__file__), \"testdata/meshout.shp\")\n write_mesh(mesh, fpath_mesh_out, write_boundary=True)\n # meshout = read_mesh(fpath_mesh_out)\n # self.assertEqual(meshout.n_nodes(), 112)\n # self.assertEqual(meshout.n_elems(), 135)\n # self.assertEqual(meshout.n_boundaries(), 3)\n if os.path.exists(fpath_mesh_out):\n os.remove(fpath_mesh_out)\n\n def test_schism_mesh_areas(self):\n fpath_mesh = self.fpath_mesh\n mesh = read_mesh(fpath_mesh)\n areas = mesh.areas()\n self.assertEqual(areas[0], 50.)\n self.assertEqual(areas[60], 100.)\n\n def test_schism_mesh_edge_len(self):\n fpath_mesh = self.fpath_mesh\n mesh = read_mesh(fpath_mesh)\n edge_lens = mesh.edge_len()\n self.assertAlmostEqual(edge_lens[0], 14.14213562)\n self.assertAlmostEqual(edge_lens[1], 10.)\n\n def test_schism_mesh_centroids(self):\n fpath_mesh = self.fpath_mesh\n mesh = read_mesh(fpath_mesh)\n centroids = mesh.centroids()\n np.testing.assert_almost_equal(\n centroids[0, :], np.array([6.66666667, 96.66666667]))\n np.testing.assert_almost_equal(\n centroids[60, :], np.array([75., 45.]))\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.array" ] ]
tmatha/datasets
[ "936e5f2ebd0e14b5ee3116de7d1690d53933f7cf" ]
[ "tensorflow_datasets/image/lsun.py" ]
[ "# coding=utf-8\n# Copyright 2019 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"LSUN dataset.\n\nLarge scene understanding dataset.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport io\nimport os\nimport tensorflow as tf\n\nimport tensorflow_datasets.public_api as tfds\n\nLSUN_URL = \"http://dl.yf.io/lsun/scenes/%s_%s_lmdb.zip\"\n\n_CITATION = \"\"\"\\\n@article{journals/corr/YuZSSX15,\n added-at = {2018-08-13T00:00:00.000+0200},\n author = {Yu, Fisher and Zhang, Yinda and Song, Shuran and Seff, Ari and Xiao, Jianxiong},\n biburl = {https://www.bibsonomy.org/bibtex/2446d4ffb99a5d7d2ab6e5417a12e195f/dblp},\n ee = {http://arxiv.org/abs/1506.03365},\n interhash = {3e9306c4ce2ead125f3b2ab0e25adc85},\n intrahash = {446d4ffb99a5d7d2ab6e5417a12e195f},\n journal = {CoRR},\n keywords = {dblp},\n timestamp = {2018-08-14T15:08:59.000+0200},\n title = {LSUN: Construction of a Large-scale Image Dataset using Deep Learning with Humans in the Loop.},\n url = {http://dblp.uni-trier.de/db/journals/corr/corr1506.html#YuZSSX15},\n volume = {abs/1506.03365},\n year = 2015\n}\n\"\"\"\n\n\n# From http://dl.yf.io/lsun/categories.txt minus \"test\"\n_CATEGORIES = [\n \"classroom\",\n \"bedroom\",\n \"bridge\",\n \"church_outdoor\",\n \"conference_room\",\n \"dining_room\",\n \"kitchen\",\n \"living_room\",\n \"restaurant\",\n \"tower\",\n]\n\n\nclass Lsun(tfds.core.GeneratorBasedBuilder):\n \"\"\"Lsun dataset.\"\"\"\n\n BUILDER_CONFIGS = [\n tfds.core.BuilderConfig( # pylint: disable=g-complex-comprehension\n name=category,\n description=\"Images of category %s\" % category,\n version=\"0.1.1\",\n ) for category in _CATEGORIES\n ]\n\n def _info(self):\n return tfds.core.DatasetInfo(\n builder=self,\n description=(\"Large scale images showing different objects \"\n \"from given categories like bedroom, tower etc.\"),\n features=tfds.features.FeaturesDict({\n \"image\": tfds.features.Image(encoding_format=\"jpeg\"),\n }),\n urls=[\"https://www.yf.io/p/lsun\"],\n citation=_CITATION,\n )\n\n def _split_generators(self, dl_manager):\n extracted_dirs = dl_manager.download_and_extract({\n \"train\": LSUN_URL % (self.builder_config.name, \"train\"),\n \"val\": LSUN_URL % (self.builder_config.name, \"val\")\n })\n return [\n tfds.core.SplitGenerator(\n name=tfds.Split.TRAIN,\n num_shards=40,\n gen_kwargs={\n \"extracted_dir\": extracted_dirs[\"train\"],\n \"file_path\": \"%s_%s_lmdb\" % (self.builder_config.name, \"train\")\n }),\n tfds.core.SplitGenerator(\n name=tfds.Split.VALIDATION,\n num_shards=1,\n gen_kwargs={\n \"extracted_dir\": extracted_dirs[\"val\"],\n \"file_path\": \"%s_%s_lmdb\" % (self.builder_config.name, \"val\")\n }),\n ]\n\n def _generate_examples(self, extracted_dir, file_path):\n with tf.Graph().as_default():\n dataset = tf.contrib.data.LMDBDataset(\n os.path.join(extracted_dir, file_path, \"data.mdb\"))\n for _, jpeg_image in tfds.as_numpy(dataset):\n yield {\"image\": io.BytesIO(jpeg_image)}\n" ]
[ [ "tensorflow.Graph" ] ]
mli0603/lietorch
[ "9d8130bec3d01825591b505808bedbb0dffd4b72" ]
[ "examples/rgbdslam/viz.py" ]
[ "import time\nimport argparse\nimport torch\nimport scipy\nimport numpy as np\nimport open3d as o3d\n\nfrom queue import Empty\nfrom multiprocessing import Queue, Process\nfrom scipy.spatial.transform import Rotation\n\ndef pose_matrix_from_quaternion(pvec):\n \"\"\" convert 4x4 pose matrix to (t, q) \"\"\"\n pose = np.eye(4)\n pose[:3,:3] = Rotation.from_quat(pvec[3:]).as_matrix()\n pose[:3, 3] = pvec[:3]\n return pose\n\ndef create_camera_actor(is_gt=False, scale=0.05):\n \"\"\" build open3d camera polydata \"\"\"\n\n cam_points = scale * np.array([\n [ 0, 0, 0],\n [-1, -1, 1.5],\n [ 1, -1, 1.5],\n [ 1, 1, 1.5],\n [-1, 1, 1.5],\n [-0.5, 1, 1.5],\n [ 0.5, 1, 1.5],\n [ 0, 1.2, 1.5]])\n\n cam_lines = np.array([[1, 2], [2, 3], [3, 4], [4, 1],\n [1, 0], [0, 2], [3, 0], [0, 4], [5, 7], [7, 6]])\n\n camera_actor = o3d.geometry.LineSet(\n points=o3d.utility.Vector3dVector(cam_points),\n lines=o3d.utility.Vector2iVector(cam_lines))\n\n color = (0.0, 0.0, 0.0) if is_gt else (0.0, 0.8, 0.8)\n camera_actor.paint_uniform_color(color)\n\n return camera_actor\n\ndef create_point_cloud_actor(points, colors):\n \"\"\" open3d point cloud from numpy array \"\"\"\n\n point_cloud = o3d.geometry.PointCloud()\n point_cloud.points = o3d.utility.Vector3dVector(points)\n point_cloud.colors = o3d.utility.Vector3dVector(colors)\n\n return point_cloud\n\ndef draw_trajectory(queue):\n\n draw_trajectory.queue = queue\n draw_trajectory.cameras = {}\n draw_trajectory.points = {}\n draw_trajectory.ix = 0\n draw_trajectory.warmup = 8\n\n def animation_callback(vis):\n cam = vis.get_view_control().convert_to_pinhole_camera_parameters()\n while True:\n try:\n data = draw_trajectory.queue.get_nowait()\n if data[0] == 'pose':\n i, pose, is_gt = data[1:]\n \n # convert to 4x4 matrix\n pose = pose_matrix_from_quaternion(pose)\n\n if i in draw_trajectory.cameras:\n cam_actor, pose_prev = draw_trajectory.cameras[i]\n pose_change = pose @ np.linalg.inv(pose_prev)\n \n cam_actor.transform(pose_change)\n vis.update_geometry(cam_actor)\n\n if i in draw_trajectory.points:\n pc = draw_trajectory.points[i]\n pc.transform(pose_change)\n vis.update_geometry(pc)\n\n else:\n cam_actor = create_camera_actor(is_gt)\n cam_actor.transform(pose)\n vis.add_geometry(cam_actor)\n\n if not is_gt:\n draw_trajectory.cameras[i] = (cam_actor, pose)\n\n elif data[0] == 'points':\n i, points, colors = data[1:]\n point_actor = create_point_cloud_actor(points, colors)\n\n pose = draw_trajectory.cameras[i][1]\n point_actor.transform(pose)\n vis.add_geometry(point_actor)\n\n draw_trajectory.points[i] = point_actor\n\n elif data[0] == 'reset':\n draw_trajectory.warmup = -1\n \n for i in draw_trajectory.points:\n vis.remove_geometry(draw_trajectory.points[i])\n\n for i in draw_trajectory.cameras:\n vis.remove_geometry(draw_trajectory.cameras[i][0])\n\n draw_trajectory.cameras = {}\n draw_trajectory.points = {}\n\n except Empty:\n break\n\n # hack to allow interacting with vizualization during inference\n if len(draw_trajectory.cameras) >= draw_trajectory.warmup:\n cam = vis.get_view_control().convert_from_pinhole_camera_parameters(cam)\n\n vis.poll_events()\n vis.update_renderer()\n\n vis = o3d.visualization.Visualizer()\n\n vis.register_animation_callback(animation_callback)\n vis.create_window(height=540, width=960)\n vis.get_render_option().load_from_json(\"assets/renderoption.json\")\n\n vis.run()\n vis.destroy_window()\n\n\nclass SLAMFrontend:\n def __init__(self):\n self.queue = Queue()\n self.p = Process(target=draw_trajectory, args=(self.queue, ))\n\n def update_pose(self, index, pose, gt=False):\n if isinstance(pose, torch.Tensor):\n pose = pose.cpu().numpy()\n self.queue.put_nowait(('pose', index, pose, gt))\n\n def update_points(self, index, points, colors):\n if isinstance(points, torch.Tensor):\n points = points.cpu().numpy()\n self.queue.put_nowait(('points', index, points, colors))\n \n def reset(self):\n self.queue.put_nowait(('reset', ))\n\n def start(self):\n self.p.start()\n return self\n\n def join(self):\n self.p.join()\n\n\n\n" ]
[ [ "numpy.array", "scipy.spatial.transform.Rotation.from_quat", "numpy.linalg.inv", "numpy.eye" ] ]
thyamu/Neet
[ "cdc55fdb25700e44bcdb4f496b91d21a61a81c83" ]
[ "test/boolean/test_sensitivity.py" ]
[ "import unittest\nfrom neet.boolean import (LogicNetwork, WTNetwork)\nimport numpy as np\n\n\nclass TestSensitivity(unittest.TestCase):\n def test_sensitivity(self):\n net = WTNetwork([[1, -1], [0, 1]], [0.5, 0])\n self.assertEqual(1.0, net.sensitivity([0, 0]))\n\n def test_average_sensitivity_lengths(self):\n net = WTNetwork([[1, -1], [0, 1]], [0.5, 0])\n\n with self.assertRaises(ValueError):\n net.average_sensitivity(states=[[0, 0], [0, 1]], weights=[0, 1, 2])\n\n def test_average_sensitivity(self):\n net = WTNetwork([[1, -1], [0, 1]], [0.5, 0])\n self.assertEqual(1.0, net.average_sensitivity())\n\n def test_sensitivity_s_pombe(self):\n from neet.boolean.examples import s_pombe\n s = s_pombe.sensitivity([0, 0, 0, 0, 0, 1, 1, 0, 0])\n self.assertAlmostEqual(s, 1.0)\n\n def test_average_sensitivity_c_elegans(self):\n from neet.boolean.examples import c_elegans\n\n s = c_elegans.average_sensitivity()\n self.assertAlmostEqual(s, 1.265625)\n\n s = c_elegans.average_sensitivity(\n states=[[0, 0, 0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1, 1, 1]],\n weights=[9, 1])\n self.assertAlmostEqual(s, 1.7)\n\n def test_lambdaQ_c_elegans(self):\n from neet.boolean.examples import c_elegans\n self.assertAlmostEqual(c_elegans.lambdaQ(), 1.263099227661824)\n\n def test_average_sensitivity_logic_network(self):\n net = LogicNetwork([((1, 2), {'01', '10'}),\n ((0, 2), ('01', '10', '11')),\n ((0, 1), {'11'})])\n\n s = net.average_sensitivity()\n self.assertAlmostEqual(s, 1.3333333333333333)\n\n s = net.average_sensitivity(weights=np.ones(8))\n self.assertAlmostEqual(s, 1.3333333333333333)\n\n s = net.average_sensitivity(states=list(net))\n self.assertAlmostEqual(s, 1.3333333333333333)\n\n def test_lambdaQ_logic_network(self):\n net = LogicNetwork([((1, 2), {'01', '10'}),\n ((0, 2), ('01', '10', '11')),\n ((0, 1), {'11'})])\n self.assertAlmostEqual(net.lambdaQ(), 1.2807764064044149)\n\n def test_is_canalizing_logic_network(self):\n net = LogicNetwork([((1, 2), {'01', '10'}),\n ((0, 2), ('01', '10', '11')),\n ((0, 1), {'11'})])\n\n self.assertFalse(net.is_canalizing(0, 1))\n self.assertTrue(net.is_canalizing(1, 0))\n self.assertTrue(net.is_canalizing(2, 1))\n\n def test_canalizing(self):\n net = LogicNetwork([((1, 2), {'01', '10'}),\n ((0, 2), ('01', '10', '11')),\n ((0, 1), {'11'})])\n\n edges = net.canalizing_edges()\n self.assertEqual(edges, {(1, 0), (1, 2), (2, 0), (2, 1)})\n\n nodes = net.canalizing_nodes()\n self.assertEqual(nodes, {1, 2})\n\n def test_average_sensitivity_hgf(self):\n from neet.boolean.examples import hgf_signaling_in_keratinocytes\n self.assertAlmostEqual(hgf_signaling_in_keratinocytes.average_sensitivity(),\n 0.981618, places=6)\n\n def test_average_sensitivity_il_6(self):\n from neet.boolean.examples import il_6_signaling\n self.assertAlmostEqual(il_6_signaling.average_sensitivity(), 0.914971, places=6)\n" ]
[ [ "numpy.ones" ] ]
dphaas/pymeasure
[ "580c33bf5f1e409bb575c46bbd1df682bf27cfe1" ]
[ "pymeasure/instruments/keysight/keysightDSOX1102G.py" ]
[ "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2022 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\nimport logging\n\nimport numpy as np\n\nfrom pymeasure.instruments import Instrument\nfrom pymeasure.instruments.validators import strict_discrete_set, strict_range\n\nlog = logging.getLogger(__name__)\nlog.addHandler(logging.NullHandler())\n\n\nclass Channel():\n \"\"\" Implementation of a Keysight DSOX1102G Oscilloscope channel.\n\n Implementation modeled on Channel object of Tektronix AFG3152C instrument. \"\"\"\n\n BOOLS = {True: 1, False: 0}\n\n bwlimit = Instrument.control(\n \"BWLimit?\", \"BWLimit %d\",\n \"\"\" A boolean parameter that toggles 25 MHz internal low-pass filter.\"\"\",\n validator=strict_discrete_set,\n values=BOOLS,\n map_values=True\n )\n\n coupling = Instrument.control(\n \"COUPling?\", \"COUPling %s\",\n \"\"\" A string parameter that determines the coupling (\"ac\" or \"dc\").\"\"\",\n validator=strict_discrete_set,\n values={\"ac\": \"AC\", \"dc\": \"DC\"},\n map_values=True\n )\n\n display = Instrument.control(\n \"DISPlay?\", \"DISPlay %d\",\n \"\"\" A boolean parameter that toggles the display.\"\"\",\n validator=strict_discrete_set,\n values=BOOLS,\n map_values=True\n )\n\n invert = Instrument.control(\n \"INVert?\", \"INVert %d\",\n \"\"\" A boolean parameter that toggles the inversion of the input signal.\"\"\",\n validator=strict_discrete_set,\n values=BOOLS,\n map_values=True\n )\n\n label = Instrument.control(\n \"LABel?\", 'LABel \"%s\"',\n \"\"\" A string to label the channel. Labels with more than 10 characters are truncated to 10\n characters. May contain commonly used ASCII characters. Lower case characters are converted\n to upper case.\"\"\",\n get_process=lambda v: str(v[1:-1])\n )\n\n offset = Instrument.control(\n \"OFFSet?\", \"OFFSet %f\",\n \"\"\" A float parameter to set value that is represented at center of screen in\n Volts. The range of legal values varies depending on range and scale. If the specified\n value is outside of the legal range, the offset value is automatically set to the nearest\n legal value.\n \"\"\"\n )\n\n probe_attenuation = Instrument.control(\n \"PROBe?\", \"PROBe %f\",\n \"\"\" A float parameter that specifies the probe attenuation. The probe attenuation\n may be from 0.1 to 10000.\"\"\",\n validator=strict_range,\n values=[0.1, 10000]\n )\n\n range = Instrument.control(\n \"RANGe?\", \"RANGe %f\",\n \"\"\" A float parameter that specifies the full-scale vertical axis in Volts.\n When using 1:1 probe attenuation, legal values for the range are from 8 mV to 40V.\"\"\"\n )\n\n scale = Instrument.control(\n \"SCALe?\", \"SCALe %f\",\n \"\"\" A float parameter that specifies the vertical scale, or units per division, in Volts.\"\"\"\n )\n\n def __init__(self, instrument, number):\n self.instrument = instrument\n self.number = number\n\n def values(self, command, **kwargs):\n \"\"\" Reads a set of values from the instrument through the adapter,\n passing on any key-word arguments.\n \"\"\"\n return self.instrument.values(\":channel%d:%s\" % (\n self.number, command), **kwargs)\n\n def ask(self, command):\n self.instrument.ask(\":channel%d:%s\" % (self.number, command))\n\n def write(self, command):\n self.instrument.write(\":channel%d:%s\" % (self.number, command))\n\n def setup(self, bwlimit=None, coupling=None, display=None, invert=None, label=None, offset=None,\n probe_attenuation=None, vertical_range=None, scale=None):\n \"\"\" Setup channel. Unspecified settings are not modified. Modifying values such as\n probe attenuation will modify offset, range, etc. Refer to oscilloscope documentation and\n make multiple consecutive calls to setup() if needed.\n\n :param bwlimit: A boolean, which enables 25 MHz internal low-pass filter.\n :param coupling: \"ac\" or \"dc\".\n :param display: A boolean, which enables channel display.\n :param invert: A boolean, which enables input signal inversion.\n :param label: Label string with max. 10 commonly used ASCII characters.\n :param offset: Numerical value represented at center of screen, must be inside\n the legal range.\n :param probe_attenuation: Probe attenuation values from 0.1 to 1000.\n :param vertical_range: Full-scale vertical axis of the selected channel. When using 1:1\n probe attenuation, legal values for the range are from 8mV to 40 V. If the probe\n attenuation is changed, the range value is multiplied by the probe attenuation factor.\n :param scale: Units per division. \"\"\"\n\n if vertical_range is not None and scale is not None:\n log.warning(\n 'Both \"vertical_range\" and \"scale\" are specified. Specified \"scale\" has priority.')\n\n if probe_attenuation is not None:\n self.probe_attenuation = probe_attenuation\n if bwlimit is not None:\n self.bwlimit = bwlimit\n if coupling is not None:\n self.coupling = coupling\n if display is not None:\n self.display = display\n if invert is not None:\n self.invert = invert\n if label is not None:\n self.label = label\n if offset is not None:\n self.offset = offset\n if vertical_range is not None:\n self.range = vertical_range\n if scale is not None:\n self.scale = scale\n\n @property\n def current_configuration(self):\n \"\"\" Read channel configuration as a dict containing the following keys:\n - \"CHAN\": channel number (int)\n - \"OFFS\": vertical offset (float)\n - \"RANG\": vertical range (float)\n - \"COUP\": \"dc\" or \"ac\" coupling (str)\n - \"IMP\": input impedance (str)\n - \"DISP\": currently displayed (bool)\n - \"BWL\": bandwidth limiting enabled (bool)\n - \"INV\": inverted (bool)\n - \"UNIT\": unit (str)\n - \"PROB\": probe attenuation (float)\n - \"PROB:SKEW\": skew factor (float)\n - \"STYP\": probe signal type (str)\n \"\"\"\n\n # Using the instrument's ask method because Channel.ask() adds the prefix \":channelX:\", and\n # to query the configuration details, we actually need to ask \":channelX?\", without a\n # second \":\"\n ch_setup_raw = self.instrument.ask(\":channel%d?\" % self.number).strip(\"\\n\")\n\n # ch_setup_raw hat the following format:\n # :CHAN1:RANG +40.0E+00;OFFS +0.00000E+00;COUP DC;IMP ONEM;DISP 1;BWL 0;\n # INV 0;LAB \"1\";UNIT VOLT;PROB +10E+00;PROB:SKEW +0.00E+00;STYP SING\n\n # Cut out the \":CHANx:\" at beginning and split string\n ch_setup_splitted = ch_setup_raw[7:].split(\";\")\n\n # Create dict of setup parameters\n ch_setup_dict = dict(map(lambda v: v.split(\" \"), ch_setup_splitted))\n\n # Add \"CHAN\" key\n ch_setup_dict[\"CHAN\"] = ch_setup_raw[5]\n\n # Convert values to specific type\n to_str = [\"COUP\", \"IMP\", \"UNIT\", \"STYP\"]\n to_bool = [\"DISP\", \"BWL\", \"INV\"]\n to_float = [\"OFFS\", \"PROB\", \"PROB:SKEW\", \"RANG\"]\n to_int = [\"CHAN\"]\n for key in ch_setup_dict:\n if key in to_str:\n ch_setup_dict[key] = str(ch_setup_dict[key])\n elif key in to_bool:\n ch_setup_dict[key] = (ch_setup_dict[key] == \"1\")\n elif key in to_float:\n ch_setup_dict[key] = float(ch_setup_dict[key])\n elif key in to_int:\n ch_setup_dict[key] = int(ch_setup_dict[key])\n return ch_setup_dict\n\n\nclass KeysightDSOX1102G(Instrument):\n \"\"\" Represents the Keysight DSOX1102G Oscilloscope interface for interacting\n with the instrument.\n\n Refer to the Keysight DSOX1102G Oscilloscope Programmer's Guide for further details about\n using the lower-level methods to interact directly with the scope.\n\n .. code-block:: python\n\n scope = KeysightDSOX1102G(resource)\n scope.autoscale()\n ch1_data_array, ch1_preamble = scope.download_data(source=\"channel1\", points=2000)\n # ...\n scope.shutdown()\n\n Known issues:\n\n - The digitize command will be completed before the operation is. May lead to\n VI_ERROR_TMO (timeout) occuring when sending commands immediately after digitize.\n Current fix: if deemed necessary, add delay between digitize and follow-up command\n to scope.\n \"\"\"\n\n BOOLS = {True: 1, False: 0}\n\n def __init__(self, adapter, **kwargs):\n super().__init__(\n adapter, \"Keysight DSOX1102G Oscilloscope\", **kwargs\n )\n # Account for setup time for timebase_mode, waveform_points_mode\n self.adapter.connection.timeout = 6000\n self.ch1 = Channel(self, 1)\n self.ch2 = Channel(self, 2)\n\n #################\n # Channel setup #\n #################\n\n def autoscale(self):\n \"\"\" Autoscale displayed channels. \"\"\"\n self.write(\":autoscale\")\n\n ##################\n # Timebase Setup #\n ##################\n\n @property\n def timebase(self):\n \"\"\" Read timebase setup as a dict containing the following keys:\n - \"REF\": position on screen of timebase reference (str)\n - \"MAIN:RANG\": full-scale timebase range (float)\n - \"POS\": interval between trigger and reference point (float)\n - \"MODE\": mode (str)\"\"\"\n return self._timebase()\n\n timebase_mode = Instrument.control(\n \":TIMebase:MODE?\", \":TIMebase:MODE %s\",\n \"\"\" A string parameter that sets the current time base. Can be \"main\",\n \"window\", \"xy\", or \"roll\".\"\"\",\n validator=strict_discrete_set,\n values={\"main\": \"MAIN\", \"window\": \"WIND\", \"xy\": \"XY\", \"roll\": \"ROLL\"},\n map_values=True\n )\n\n timebase_offset = Instrument.control(\n \":TIMebase:POSition?\", \":TIMebase:REFerence CENTer;:TIMebase:POSition %f\",\n \"\"\" A float parameter that sets the time interval in seconds between the trigger\n event and the reference position (at center of screen by default).\"\"\"\n )\n\n timebase_range = Instrument.control(\n \":TIMebase:RANGe?\", \":TIMebase:RANGe %f\",\n \"\"\" A float parameter that sets the full-scale horizontal time in seconds for the\n main window.\"\"\"\n )\n\n timebase_scale = Instrument.control(\n \":TIMebase:SCALe?\", \":TIMebase:SCALe %f\",\n \"\"\" A float parameter that sets the horizontal scale (units per division) in seconds\n for the main window.\"\"\"\n )\n\n ###############\n # Acquisition #\n ###############\n\n acquisition_type = Instrument.control(\n \":ACQuire:TYPE?\", \":ACQuire:TYPE %s\",\n \"\"\" A string parameter that sets the type of data acquisition. Can be \"normal\", \"average\",\n \"hresolution\", or \"peak\".\"\"\",\n validator=strict_discrete_set,\n values={\"normal\": \"NORM\", \"average\": \"AVER\", \"hresolution\": \"HRES\", \"peak\": \"PEAK\"},\n map_values=True\n )\n\n acquisition_mode = Instrument.control(\n \":ACQuire:MODE?\", \":ACQuire:MODE %s\",\n \"\"\" A string parameter that sets the acquisition mode. Can be \"realtime\" or \"segmented\".\"\"\",\n validator=strict_discrete_set,\n values={\"realtime\": \"RTIM\", \"segmented\": \"SEGM\"},\n map_values=True\n )\n\n def run(self):\n \"\"\" Starts repetitive acquisitions.\n\n This is the same as pressing the Run key on the front panel.\n \"\"\"\n self.write(\":run\")\n\n def stop(self):\n \"\"\" Stops the acquisition. This is the same as pressing the Stop key on the front panel.\"\"\"\n self.write(\":stop\")\n\n def single(self):\n \"\"\" Causes the instrument to acquire a single trigger of data.\n This is the same as pressing the Single key on the front panel. \"\"\"\n self.write(\":single\")\n\n _digitize = Instrument.setting(\n \":DIGitize %s\",\n \"\"\" Acquire waveforms according to the settings of the :ACQuire commands and specified\n source, as a string parameter that can take the following values: \"channel1\", \"channel2\",\n \"function\", \"math\", \"fft\", \"abus\", or \"ext\". \"\"\",\n validator=strict_discrete_set,\n values={\"channel1\": \"CHAN1\", \"channel2\": \"CHAN2\", \"function\": \"FUNC\", \"math\": \"MATH\",\n \"fft\": \"FFT\", \"abus\": \"ABUS\", \"ext\": \"EXT\"},\n map_values=True\n )\n\n def digitize(self, source: str):\n \"\"\" Acquire waveforms according to the settings of the :ACQuire commands. Ensure a delay\n between the digitize operation and further commands, as timeout may be reached before\n digitize has completed.\n :param source: \"channel1\", \"channel2\", \"function\", \"math\", \"fft\", \"abus\", or \"ext\".\"\"\"\n self._digitize = source\n\n waveform_points_mode = Instrument.control(\n \":waveform:points:mode?\", \":waveform:points:mode %s\",\n \"\"\" A string parameter that sets the data record to be transferred with the waveform_data\n method. Can be \"normal\", \"maximum\", or \"raw\".\"\"\",\n validator=strict_discrete_set,\n values={\"normal\": \"NORM\", \"maximum\": \"MAX\", \"raw\": \"RAW\"},\n map_values=True\n )\n waveform_points = Instrument.control(\n \":waveform:points?\", \":waveform:points %d\",\n \"\"\" An integer parameter that sets the number of waveform points to be transferred with\n the waveform_data method. Can be any of the following values:\n 100, 250, 500, 1000, 2 000, 5 000, 10 000, 20 000, 50 000, 62 500.\n\n Note that the oscilloscope may provide less than the specified nb of points. \"\"\",\n validator=strict_discrete_set,\n values=[100, 250, 500, 1000, 2000, 5000, 10000, 20000, 50000, 62500]\n )\n waveform_source = Instrument.control(\n \":waveform:source?\", \":waveform:source %s\",\n \"\"\" A string parameter that selects the analog channel, function, or reference waveform\n to be used as the source for the waveform methods. Can be \"channel1\", \"channel2\",\n \"function\", \"fft\", \"wmemory1\", \"wmemory2\", or \"ext\".\"\"\",\n validator=strict_discrete_set,\n values={\"channel1\": \"CHAN1\", \"channel2\": \"CHAN2\", \"function\": \"FUNC\", \"fft\": \"FFT\",\n \"wmemory1\": \"WMEM1\", \"wmemory2\": \"WMEM2\", \"ext\": \"EXT\"},\n map_values=True\n )\n waveform_format = Instrument.control(\n \":waveform:format?\", \":waveform:format %s\",\n \"\"\" A string parameter that controls how the data is formatted when sent from the\n oscilloscope. Can be \"ascii\", \"word\" or \"byte\". Words are transmitted in big endian by\n default.\"\"\",\n validator=strict_discrete_set,\n values={\"ascii\": \"ASC\", \"word\": \"WORD\", \"byte\": \"BYTE\"},\n map_values=True\n )\n\n @property\n def waveform_preamble(self):\n \"\"\" Get preamble information for the selected waveform source as a dict with the following keys:\n - \"format\": byte, word, or ascii (str)\n - \"type\": normal, peak detect, or average (str)\n - \"points\": nb of data points transferred (int)\n - \"count\": always 1 (int)\n - \"xincrement\": time difference between data points (float)\n - \"xorigin\": first data point in memory (float)\n - \"xreference\": data point associated with xorigin (int)\n - \"yincrement\": voltage difference between data points (float)\n - \"yorigin\": voltage at center of screen (float)\n - \"yreference\": data point associated with yorigin (int)\"\"\"\n return self._waveform_preamble()\n\n @property\n def waveform_data(self):\n \"\"\" Get the binary block of sampled data points transmitted using the IEEE 488.2 arbitrary\n block data format.\"\"\"\n # Other waveform formats raise UnicodeDecodeError\n self.waveform_format = \"ascii\"\n\n data = self.values(\":waveform:data?\")\n # Strip header from first data element\n data[0] = float(data[0][10:])\n\n return data\n\n ################\n # System Setup #\n ################\n\n @property\n def system_setup(self):\n \"\"\" A string parameter that sets up the oscilloscope. Must be in IEEE 488.2 format.\n It is recommended to only set a string previously obtained from this command.\"\"\"\n return self.ask(\":system:setup?\")\n\n @system_setup.setter\n def system_setup(self, setup_string):\n self.write(\":system:setup \" + setup_string)\n\n def ch(self, channel_number):\n if channel_number == 1:\n return self.ch1\n elif channel_number == 2:\n return self.ch2\n else:\n raise ValueError(\"Invalid channel number. Must be 1 or 2.\")\n\n def clear_status(self):\n \"\"\" Clear device status. \"\"\"\n self.write(\"*CLS\")\n\n def factory_reset(self):\n \"\"\" Factory default setup, no user settings remain unchanged. \"\"\"\n self.write(\"*RST\")\n\n def default_setup(self):\n \"\"\" Default setup, some user settings (like preferences) remain unchanged. \"\"\"\n self.write(\":SYSTem:PRESet\")\n\n def timebase_setup(self, mode=None, offset=None, horizontal_range=None, scale=None):\n \"\"\" Set up timebase. Unspecified parameters are not modified. Modifying a single parameter\n might impact other parameters. Refer to oscilloscope documentation and make multiple\n consecutive calls to channel_setup if needed.\n\n :param mode: Timebase mode, can be \"main\", \"window\", \"xy\", or \"roll\".\n :param offset: Offset in seconds between trigger and center of screen.\n :param horizontal_range: Full-scale range in seconds.\n :param scale: Units-per-division in seconds.\"\"\"\n\n if mode is not None:\n self.timebase_mode = mode\n if offset is not None:\n self.timebase_offset = offset\n if horizontal_range is not None:\n self.timebase_range = horizontal_range\n if scale is not None:\n self.timebase_scale = scale\n\n def download_image(self, format_=\"png\", color_palette=\"color\"):\n \"\"\" Get image of oscilloscope screen in bytearray of specified file format.\n\n :param format_: \"bmp\", \"bmp8bit\", or \"png\"\n :param color_palette: \"color\" or \"grayscale\"\n \"\"\"\n query = f\":DISPlay:DATA? {format_}, {color_palette}\"\n # Using binary_values query because default interface does not support binary transfer\n img = self.binary_values(query, header_bytes=10, dtype=np.uint8)\n return bytearray(img)\n\n def download_data(self, source, points=62500):\n \"\"\" Get data from specified source of oscilloscope. Returned objects are a np.ndarray of\n data values (no temporal axis) and a dict of the waveform preamble, which can be used to\n build the corresponding time values for all data points.\n\n Multimeter will be stopped for proper acquisition.\n\n :param source: measurement source, can be \"channel1\", \"channel2\", \"function\", \"fft\",\n \"wmemory1\", \"wmemory2\", or \"ext\".\n :param points: integer number of points to acquire. Note that oscilloscope may return fewer\n points than specified, this is not an issue of this library. Can be 100, 250, 500, 1000,\n 2000, 5000, 10000, 20000, 50000, or 62500.\n\n :return data_ndarray, waveform_preamble_dict: see waveform_preamble property for dict\n format.\n \"\"\"\n # TODO: Consider downloading from multiple sources at the same time.\n self.waveform_source = source\n self.waveform_points_mode = \"normal\"\n self.waveform_points = points\n\n preamble = self.waveform_preamble\n data_bytes = self.waveform_data\n return np.array(data_bytes), preamble\n\n def _timebase(self):\n \"\"\"\n Reads setup data from timebase and converts it to a more convenient dict of values.\n \"\"\"\n tb_setup_raw = self.ask(\":timebase?\").strip(\"\\n\")\n\n # tb_setup_raw hat the following format:\n # :TIM:MODE MAIN;REF CENT;MAIN:RANG +1.00E-03;POS +0.0E+00\n\n # Cut out the \":TIM:\" at beginning and split string\n tb_setup_splitted = tb_setup_raw[5:].split(\";\")\n\n # Create dict of setup parameters\n tb_setup = dict(map(lambda v: v.split(\" \"), tb_setup_splitted))\n\n # Convert values to specific type\n to_str = [\"MODE\", \"REF\"]\n to_float = [\"MAIN:RANG\", \"POS\"]\n for key in tb_setup:\n if key in to_str:\n tb_setup[key] = str(tb_setup[key])\n elif key in to_float:\n tb_setup[key] = float(tb_setup[key])\n\n return tb_setup\n\n def _waveform_preamble(self):\n \"\"\"\n Reads waveform preamble and converts it to a more convenient dict of values.\n \"\"\"\n vals = self.values(\":waveform:preamble?\")\n # Get values to dict\n vals_dict = dict(zip([\"format\", \"type\", \"points\", \"count\", \"xincrement\", \"xorigin\",\n \"xreference\", \"yincrement\", \"yorigin\", \"yreference\"], vals))\n # Map element values\n format_map = {0: \"BYTE\", 1: \"WORD\", 4: \"ASCII\"}\n type_map = {0: \"NORMAL\", 1: \"PEAK DETECT\", 2: \"AVERAGE\", 3: \"HRES\"}\n vals_dict[\"format\"] = format_map[int(vals_dict[\"format\"])]\n vals_dict[\"type\"] = type_map[int(vals_dict[\"type\"])]\n\n # Correct types\n to_int = [\"points\", \"count\", \"xreference\", \"yreference\"]\n to_float = [\"xincrement\", \"xorigin\", \"yincrement\", \"yorigin\"]\n for key in vals_dict:\n if key in to_int:\n vals_dict[key] = int(vals_dict[key])\n elif key in to_float:\n vals_dict[key] = float(vals_dict[key])\n\n return vals_dict\n" ]
[ [ "numpy.array" ] ]
AKSingh-Udacity/PySyft
[ "51679c1941f172713cd2ba0d080b531a1693a15e" ]
[ "syft/frameworks/torch/hook/hook_args.py" ]
[ "import torch\nimport syft as sy\nfrom syft.exceptions import RemoteTensorFoundError\nfrom syft.exceptions import PureTorchTensorFoundError\n\nfrom syft.exceptions import ResponseSignatureError\nfrom syft.frameworks.torch.tensors.interpreters import AutogradTensor\nfrom syft.frameworks.torch.tensors.interpreters import AbstractTensor\nfrom syft.frameworks.torch.tensors.interpreters import PointerTensor\nfrom syft.frameworks.torch.tensors.interpreters import TorchTensor\nfrom syft.frameworks.torch.tensors.interpreters import FixedPrecisionTensor\nfrom syft.frameworks.torch.tensors.interpreters import AdditiveSharingTensor\nfrom syft.frameworks.torch.tensors.interpreters import MultiPointerTensor\nfrom syft.frameworks.torch.tensors.decorators import LoggingTensor\n\nfrom typing import Callable\nfrom typing import Union\nfrom typing import Tuple\nfrom typing import List\n\n\nhook_method_args_functions = {}\nhook_method_response_functions = {}\nget_tensor_type_functions = {}\n\none = lambda _args: 1\n\n# dict to specify the action depending of the type found\ntype_rule = {\n list: lambda _args: [build_rule(a) for a in _args],\n tuple: lambda _args: tuple([build_rule(a) for a in _args]),\n dict: one, # FIXME This is for additiveShareTensor.child, it can be confusing and AST.child\n # should perhaps be of type ShareDict extending dict or something like this\n LoggingTensor: one,\n FixedPrecisionTensor: one,\n AutogradTensor: one,\n AdditiveSharingTensor: one,\n MultiPointerTensor: one,\n PointerTensor: one,\n torch.Tensor: one,\n torch.nn.Parameter: one,\n}\n\n# Dict to return the proper lambda function for the right torch or syft tensor type\nforward_func = {\n PointerTensor: lambda p: (_ for _ in ()).throw(RemoteTensorFoundError(p)),\n torch.Tensor: lambda i: i.child\n if hasattr(i, \"child\")\n else (_ for _ in ()).throw(PureTorchTensorFoundError),\n torch.nn.Parameter: lambda i: i.child\n if hasattr(i, \"child\")\n else (_ for _ in ()).throw(PureTorchTensorFoundError),\n LoggingTensor: lambda i: i.child,\n FixedPrecisionTensor: lambda i: i.child,\n AutogradTensor: lambda i: i.child,\n AdditiveSharingTensor: lambda i: i.child,\n MultiPointerTensor: lambda i: i.child,\n \"my_syft_tensor_type\": lambda i: i.child,\n}\n\n# Dict to return the proper lambda function for the right torch or syft tensor type\nbackward_func = {\n TorchTensor: lambda i: i.wrap(),\n torch.Tensor: lambda i: i.wrap(),\n torch.nn.Parameter: lambda i: torch.nn.Parameter(data=i),\n PointerTensor: lambda i: i,\n LoggingTensor: lambda i: LoggingTensor().on(i, wrap=False),\n FixedPrecisionTensor: lambda i, **kwargs: FixedPrecisionTensor(**kwargs).on(i, wrap=False),\n AutogradTensor: lambda i: AutogradTensor(data=i).on(i, wrap=False),\n AdditiveSharingTensor: lambda i, **kwargs: AdditiveSharingTensor(**kwargs).on(i, wrap=False),\n MultiPointerTensor: lambda i, **kwargs: MultiPointerTensor(**kwargs).on(i, wrap=False),\n \"my_syft_tensor_type\": lambda i, **kwargs: \"my_syft_tensor_type(**kwargs).on(i, wrap=False)\",\n}\n\n# methods that we really don't want to hook, for example because they have an arbitrary\n# number of tensors in args signature response\nexclude_methods = {\"__getitem__\", \"view\"}\nexclude_functions = {\"torch.unbind\", \"unbind\"}\n\n\ndef hook_method_args(attr, method_self, args, kwargs):\n \"\"\"Method arguments are sometimes simple types (such as strings or ints) but\n sometimes they are custom Syft tensors such as wrappers (torch.Tensor) or LoggingTensor\n or some other tensor type. Complex types (which have a .child attribute) need to\n have arguments converted from the arg to arg.child so that the types match as the\n method is being called down the chain. To make this efficient, we cache which args\n need to be replaced with their children in a dictionary called\n hook_method_args_functions. However, sometimes a method (an attr) has multiple\n different argument signatures, such that sometimes arguments have .child objects\n and other times they don't (such as x.div(), which can accept either a tensor or a\n float as an argument). This invalidates the cache, so we need to have a try/except\n which refreshes the cache if the signature triggers an error.\n\n Args:\n attr (str): the name of the method being called\n method_self: the tensor on which the method is being called\n args (list): the arguments being passed to the method\n kwargs (dict): the keyword arguments being passed to the function\n (these are not hooked ie replace with their .child attr)\n \"\"\"\n # Specify an id to distinguish methods from different classes\n # As they won't be used with the same arg types\n attr_id = type(method_self).__name__ + \".\" + attr\n\n try:\n assert attr not in exclude_methods\n\n # Load the utility function to transform the args\n hook_args = hook_method_args_functions[attr_id]\n # Try running it\n new_self, new_args = hook_args((method_self, args))\n\n except (IndexError, KeyError, AssertionError): # Update the function in case of an error\n args_hook_function, _ = build_hook_args_function((method_self, args))\n # Store this utility function in the registry\n hook_method_args_functions[attr_id] = args_hook_function\n # Run it\n new_self, new_args = args_hook_function((method_self, args))\n\n return new_self, new_args, kwargs\n\n\ndef hook_function_args(attr, args, kwargs, return_args_type=False):\n \"\"\"See hook_method_args for details\n\n Args:\n attr (str): the name of the function being called\n args (list): the arguments being passed to the function\n kwargs (dict): the keyword arguments being passed to the function\n (these are not hooked ie replace with their .child attr)\n return_args_type (bool): return the type of the tensors in the\n original arguments\n\n Returns:\n - the arguments where all tensors are replaced with their child\n - the type of this new child\n (- the type of the tensors in the arguments)\n \"\"\"\n try:\n # Load the utility function to transform the args\n # TODO rename registry or use another one than for methods\n hook_args = hook_method_args_functions[attr]\n get_tensor_type_function = get_tensor_type_functions[attr]\n # Try running it\n new_args = hook_args(args)\n\n except (IndexError, KeyError, AssertionError): # Update the function in case of an error\n args_hook_function, get_tensor_type_function = build_hook_args_function(\n args, return_tuple=True\n )\n # Store the utility functions in registries\n hook_method_args_functions[attr] = args_hook_function\n get_tensor_type_functions[attr] = get_tensor_type_function\n # Run it\n new_args = args_hook_function(args)\n\n new_type = get_tensor_type_function(new_args)\n if return_args_type:\n args_type = get_tensor_type_function(args)\n return new_args, kwargs, new_type, args_type\n else:\n return new_args, kwargs, new_type\n\n\ndef build_hook_args_function(args, return_tuple=False):\n \"\"\"\n Build the function f that hook the arguments:\n f(args) = new_args\n \"\"\"\n # Inspect the call to find tensor arguments and return a rule whose\n # structure is the same as the args object, with 1 where there was\n # (torch or syft) tensors and 0 when not (ex: number, str, ...)\n rule = build_rule(args)\n # Build a function with this rule to efficiently replace syft tensors\n # (but not pointer) with their child in the args objects\n args_hook_function = build_args_hook(args, rule, return_tuple)\n # Build a function with this rule to efficiently the child type of the\n # tensor found in the args\n get_tensor_type_function = build_get_tensor_type(rule)\n return args_hook_function, get_tensor_type_function\n\n\ndef hook_response(attr, response, wrap_type, wrap_args={}, new_self=None):\n \"\"\"\n When executing a command, arguments are inspected and all tensors are replaced\n with their child attribute until a pointer or a torch tensor is found (for\n example an argument could be a torch wrapper with a child being a LoggingTensor, with\n a child being a torch tensor). When the result of the command is calculated,\n we need to rebuild this chain in the reverse order (in our example put back\n a LoggingTensor on top of the result and then a torch wrapper).\n To make this efficient, we cache which elements of the response (which can be more\n complicated with nested tuples for example) need to be wrapped in a dictionary called\n hook_method_response_functions. However, sometimes a method (an attr) has multiple\n different response signatures. This invalidates the cache, so we need to have a\n try/except which refreshes the cache if the signature triggers an error.\n\n Args:\n attr (str): the name of the method being called\n response (list or dict): the arguments being passed to the tensor\n wrap_type (type): the type of wrapper we'd like to have\n wrap_args (dict): options to give to the wrapper (for example the\n precision for the precision tensor)\n new_self: used for the can just below of inplace ops\n \"\"\"\n\n # inline methods should just return new_self\n if \"__i\" == attr[0:3]:\n return new_self\n\n # TODO: Why do we need to cast it in a tuple? this is a (small) time waste\n response_is_tuple = isinstance(response, tuple)\n\n if wrap_type == torch.nn.Parameter:\n wrap_type = torch.Tensor\n\n # Add an artificial tuple\n if not response_is_tuple:\n response = (response, 1)\n\n hash_wrap_args = hash(frozenset(wrap_args.items()))\n attr_id = f\"{attr}@{wrap_type.__name__}.{response_is_tuple}.{hash_wrap_args}\"\n\n try:\n assert attr not in exclude_functions\n\n # Load the utility function to transform the args\n response_hook_function = hook_method_response_functions[attr_id]\n # Try running it\n new_response = response_hook_function(response)\n\n except (IndexError, KeyError, AssertionError): # Update the function in cas of an error\n response_hook_function = build_hook_response_function(response, wrap_type, wrap_args)\n # Store this utility function in the registry\n hook_method_response_functions[attr_id] = response_hook_function\n # Run it\n new_response = response_hook_function(response)\n\n # Remove the artificial tuple\n if not response_is_tuple:\n new_response, _ = new_response\n\n return new_response\n\n\ndef build_hook_response_function(response, wrap_type, wrap_args):\n \"\"\"\n Build the function that hook the response.\n\n Example:\n p is of type Pointer\n f is the hook_response_function\n then f(p) = (Wrapper)>Pointer\n \"\"\"\n # Inspect the call to find tensor arguments and return a rule whose\n # structure is the same as the response object, with 1 where there was\n # (torch or syft) tensors and 0 when not (ex: number, str, ...)\n rule = build_rule(response)\n # Build a function with this rule to efficiently replace syft tensors\n # (but not pointer) with their child in the args objects\n response_hook_function = build_response_hook(response, rule, wrap_type, wrap_args)\n return response_hook_function\n\n\ndef build_rule(args):\n \"\"\"\n Inspect the args object to find torch or syft tensor arguments and\n return a rule whose structure is the same as the args object,\n with 1 where there was (torch or syft) tensors and 0 when\n not (ex: number, str, ...)\n\n Example:\n in: ([tensor(1, 2), Pointer@bob], 42)\n out: ([1, 1], 0)\n \"\"\"\n\n type_args = type(args)\n if type_args in type_rule:\n return type_rule[type_args](args)\n else:\n return 0\n\n\ndef build_args_hook(args, rules, return_tuple=False):\n \"\"\"\n Build a function given some rules to efficiently replace in the args object\n syft tensors with their child (but not pointer as they don't have .child),\n and do nothing for other type of object including torch tensors, str,\n numbers, bool, etc.\n Pointers trigger an error which can be caught to get the location for\n forwarding the call.\n\n Args:\n args (tuple): the arguments given to the function / method\n rules (tuple): the same structure but with boolean, true when there is\n a tensor\n return_tuple (bool): force to return a tuple even with a single element\n\n Return:\n a function that replace syft arg in args with arg.child\n \"\"\"\n\n # get the transformation lambda for each args\n lambdas = [\n typed_identity(a) # return the same obj with an identity fct with a type check if needed\n if not r # if the rule is a number == 0.\n else build_args_hook(a, r, True) # If not, call recursively build_args_hook\n if isinstance(r, (list, tuple)) # if the rule is a list or tuple.\n # Last if not, rule is probably == 1 so use type to return the right transformation.\n else lambda i: forward_func[type(i)](i)\n for a, r in zip(args, rules) # And do this for all the args / rules provided\n ]\n\n # Instead of iterating which is slow, we use trick to efficiently\n # apply each lambda to each arg\n folds = {\n 0: zero_fold,\n 1: one_fold(return_tuple),\n 2: two_fold,\n 3: three_fold,\n 4: four_fold,\n 5: five_fold,\n 6: six_fold,\n 7: seven_fold,\n 8: eight_fold,\n }\n try:\n f = folds[len(lambdas)]\n except KeyError:\n f = many_fold\n\n return lambda x: f(lambdas, x)\n\n\ndef build_get_tensor_type(rules, layer=None):\n \"\"\"\n Build a function which uses some rules to find efficiently the first tensor in\n the args objects and return the type of its child.\n\n Args:\n rules (tuple): a skeleton object with the same structure as args but each tensor\n is replaced with a 1 and other types (int, str) with a 0\n layer (list or None): keep track of the path of inspection: each element in the list\n stand for one layer of deepness into the object, and its value for the index\n in the current layer. See example for details\n\n Returns:\n a function returning a type\n\n Example:\n *Understanding the layer parameter*\n obj = (a, [b, (c, d)], e)\n the layer position is for:\n a: [0]\n b: [1, 0]\n c: [1, 1, 0]\n d: [1, 1, 1]\n e: [2]\n\n *Global behaviour example*\n rules = (0, [1, (0, 0), 0)\n - First recursion level\n 0 found -> do nothing\n list found -> recursive call with layer = [1]\n - Second recursion level\n 1 found -> update layer to [1, 0]\n build the function x: type(x[1][0])\n break\n - Back to first recursion level\n save the function returned in the lambdas list\n 0 found -> do nothing\n exit loop\n return the first (and here unique) function\n\n\n \"\"\"\n # We keep note of the first layer or recursion level to return at the end\n # only one function and instantiate the layer list the first time\n first_layer = layer is None\n\n if first_layer:\n layer = []\n\n # Iteration through the rules object\n lambdas = []\n for i, r in enumerate(rules):\n if r == 1: # if a tensor is found\n layer.append(i)\n lambdas.append(\n # the layer object is given to build a getter to reach the\n # tensor position and then the type() is called on the obj found\n lambda a: type(get_element_at[len(layer)](*layer)(a))\n )\n # we only need one to get the type of all tensors as they should be the same\n break\n if isinstance(r, (list, tuple)): # we iterate recursively if necessary\n layer.append(i)\n lambdas += build_get_tensor_type(r, layer)\n\n if first_layer:\n return lambdas[0]\n else:\n return lambdas\n\n\n# Function helpers to convert [a, b, c, ...] -> obj[a][b][c][...]\ndef one_layer(idx1):\n return lambda l: l[idx1]\n\n\ndef two_layers(idx1, idx2):\n return lambda l: one_layer(idx2)(l[idx1])\n\n\ndef three_layers(idx1, *ids):\n return lambda l: two_layers(*ids)(l[idx1])\n\n\ndef four_layers(idx1, *ids):\n return lambda l: three_layers(*ids)(l[idx1])\n\n\nget_element_at = {1: one_layer, 2: two_layers, 3: three_layers, 4: four_layers}\n\n\ndef build_response_hook(response, rules, wrap_type, wrap_args, return_tuple=False):\n \"\"\"\n Build a function given some rules to efficiently replace in the response object\n syft or torch tensors with a wrapper, and do nothing for other types of object\n including , str, numbers, bool, etc.\n\n Args:\n response: a response used to build the hook function\n rules: the same structure objects but with boolean, at true when is replaces\n a tensor\n return_tuple: force to return a tuple even with a single element\n\n Response:\n a function to \"wrap\" the response\n \"\"\"\n\n # get the transformation lambda for each args\n lambdas = [\n (lambda i: i) # return the same object\n if not r # if the rule is a number == 0.\n else build_response_hook(\n a, r, wrap_type, wrap_args, True\n ) # If not, call recursively build_response_hook\n if isinstance(r, (list, tuple)) # if the rule is a list or tuple.\n # Last if not, rule is probably == 1 so use type to return the right transformation.\n else lambda i: backward_func[wrap_type](i, **wrap_args)\n for a, r in zip(response, rules) # And do this for all the responses / rules provided\n ]\n\n # Instead of iterating which is slow, we use trick to efficiently\n # apply each lambda to each arg\n folds = {\n 0: zero_fold,\n 1: one_fold(return_tuple),\n 2: two_fold,\n 3: three_fold,\n 4: four_fold,\n 5: five_fold,\n 6: six_fold,\n 7: seven_fold,\n 8: eight_fold,\n }\n try:\n f = folds[len(lambdas)]\n except KeyError:\n f = many_fold\n\n return lambda x: f(lambdas, x)\n\n\ndef zero_fold(*a, **k):\n return tuple()\n\n\ndef one_fold(return_tuple, **kwargs):\n def _one_fold(lambdas, args, **kwargs):\n return lambdas[0](args[0], **kwargs)\n\n def tuple_one_fold(lambdas, args):\n return (lambdas[0](args[0], **kwargs),)\n\n return {False: _one_fold, True: tuple_one_fold}[return_tuple]\n\n\ndef two_fold(lambdas, args, **kwargs):\n return lambdas[0](args[0], **kwargs), lambdas[1](args[1], **kwargs)\n\n\ndef three_fold(lambdas, args, **kwargs):\n return (\n lambdas[0](args[0], **kwargs),\n lambdas[1](args[1], **kwargs),\n lambdas[2](args[2], **kwargs),\n )\n\n\ndef four_fold(lambdas, args, **kwargs):\n return (\n lambdas[0](args[0], **kwargs),\n lambdas[1](args[1], **kwargs),\n lambdas[2](args[2], **kwargs),\n lambdas[3](args[3], **kwargs),\n )\n\n\ndef five_fold(lambdas, args, **kwargs):\n return (\n lambdas[0](args[0], **kwargs),\n lambdas[1](args[1], **kwargs),\n lambdas[2](args[2], **kwargs),\n lambdas[3](args[3], **kwargs),\n lambdas[4](args[4], **kwargs),\n )\n\n\ndef six_fold(lambdas, args, **kwargs):\n return (\n lambdas[0](args[0], **kwargs),\n lambdas[1](args[1], **kwargs),\n lambdas[2](args[2], **kwargs),\n lambdas[3](args[3], **kwargs),\n lambdas[4](args[4], **kwargs),\n lambdas[5](args[5], **kwargs),\n )\n\n\ndef seven_fold(lambdas, args, **kwargs):\n return (\n lambdas[0](args[0], **kwargs),\n lambdas[1](args[1], **kwargs),\n lambdas[2](args[2], **kwargs),\n lambdas[3](args[3], **kwargs),\n lambdas[4](args[4], **kwargs),\n lambdas[5](args[5], **kwargs),\n lambdas[6](args[6], **kwargs),\n )\n\n\ndef eight_fold(lambdas, args, **kwargs):\n return (\n lambdas[0](args[0], **kwargs),\n lambdas[1](args[1], **kwargs),\n lambdas[2](args[2], **kwargs),\n lambdas[3](args[3], **kwargs),\n lambdas[4](args[4], **kwargs),\n lambdas[5](args[5], **kwargs),\n lambdas[6](args[6], **kwargs),\n lambdas[7](args[7], **kwargs),\n )\n\n\ndef many_fold(lambdas, args, **kwargs):\n return tuple([lambdas[i](args[i], **kwargs) for i in range(len(lambdas))])\n\n\n# Add the possibility to make a type check in the identity function applied\n# On some arg which could be None are of another type.\n# Could add more checks but not sure it is needed so far.\n\n\ndef typed_identity(a):\n \"\"\"\n We need to add typed identity for arguments which can be either number\n or tensors. If the argument changes from an int to a tensor, the\n assertion error triggered by typed_identity will be caught and a\n new signature will be computed for the command.\n \"\"\"\n if a is None:\n\n def none_identity(i):\n assert i is None\n return i\n\n return none_identity\n\n elif type(a) in (int, float, bool):\n\n def number_identity(i):\n assert isinstance(i, type(a))\n return i\n\n return number_identity\n\n else:\n return lambda i: i\n\n\n# -- Fast way to register responses and transform tensors in pointers\n\nregister_response_functions = {}\n\n\ndef register_response(\n attr: str, response: object, response_ids: object, owner: sy.workers.AbstractWorker\n) -> object:\n \"\"\"\n When a remote worker execute a command sent by someone else, the response is\n inspected: all tensors are stored by this worker and a Pointer tensor is\n made for each of them.\n\n To make this efficient, we cache which elements of the response (which can be more\n complicated with nested tuples for example) in the dict register_response_functions\n\n However, sometimes a function (an attr) has multiple different response signatures.\n This invalidates the cache, so we need to have a try/except which refreshes the\n cache if the signature triggers an error.\n\n Args:\n attr (str): the name of the function being called\n response (object): the response of this function\n owner (BaseWorker): the worker which registers the tensors\n \"\"\"\n\n # TODO: Why do we need to cast it in a tuple? this is a (small) time waste\n response_is_tuple = isinstance(response, tuple)\n\n # Add an artificial tuple\n if not response_is_tuple:\n response = (response, 1)\n\n attr_id = \"{}\".format(attr)\n\n try:\n assert attr not in exclude_functions\n\n # Load the utility function to register the response and transform tensors with pointers\n register_response_function = register_response_functions[attr_id]\n # Try running it\n new_response = register_response_function(response, response_ids=response_ids, owner=owner)\n\n except (IndexError, KeyError, AssertionError): # Update the function in cas of an error\n register_response_function = build_register_response_function(response)\n # Store this utility function in the registry\n register_response_functions[attr_id] = register_response_function\n # Run it\n new_response = register_response_function(response, response_ids=response_ids, owner=owner)\n\n # Remove the artificial tuple\n if not response_is_tuple:\n new_response, _ = new_response\n\n return new_response\n\n\ndef build_register_response_function(response: object) -> Callable:\n \"\"\"\n Build the function that registers the response and replaces tensors with pointers.\n\n Example:\n (1, tensor([1, 2]) is the response\n f is the register_response_function\n then f(p) = (1, (Wrapper)>Pointer)\n \"\"\"\n # Inspect the call to find tensor arguments and return a rule whose\n # structure is the same as the response object, with 1 where there was\n # (torch or syft) tensors and 0 when not (ex: number, str, ...)\n rule = build_rule(response)\n # Build a function with this rule to efficiently replace syft tensors\n # (but not pointer) with their child in the args objects\n response_hook_function = build_register_response(response, rule)\n return response_hook_function\n\n\ndef register_tensor(\n tensor: Union[torch.Tensor, AbstractTensor],\n response_ids: List = list(),\n owner: sy.workers.AbstractWorker = None,\n) -> None:\n \"\"\"\n Register a tensor\n\n Args:\n tensor: the tensor\n response_ids: list of ids where the tensor should be stored\n and each id is pop out when needed\n owner: the owner that makes the registration\n Returns:\n the pointer\n \"\"\"\n assert owner is not None\n tensor.owner = owner\n try:\n tensor.id = response_ids.pop(-1)\n except IndexError:\n raise ResponseSignatureError\n owner.register_obj(tensor)\n\n\ndef build_register_response(response: object, rules: Tuple, return_tuple: bool = False) -> Callable:\n \"\"\"\n Build a function given some rules to efficiently replace in the response object\n torch tensors with a pointer after they are registered, and do nothing for other\n types of object including , str, numbers, bool, etc.\n\n Args:\n response: the response\n rules: the rule specifying where the tensors are\n return_tuple: force to return a tuple even with a single element\n Returns:\n The function to apply on generic responses\n \"\"\"\n\n # get the transformation lambda for each args\n lambdas = [\n (lambda i, **kwargs: i) # return the same object\n if not r # if the rule is a number == 0.\n else build_register_response(a, r, True) # If not, call recursively build_response_hook\n if isinstance(r, (list, tuple)) # if the rule is a list or tuple.\n # Last if not, rule is probably == 1 so use type to return the right transformation.\n else lambda i, **kwargs: register_tensor(i, **kwargs)\n for a, r in zip(response, rules) # And do this for all the responses / rules provided\n ]\n\n # Instead of iterating which is slow, we use trick to efficiently\n # apply each lambda to each arg\n folds = {\n 0: zero_fold,\n 1: one_fold(return_tuple),\n 2: two_fold,\n 3: three_fold,\n 4: four_fold,\n 5: five_fold,\n 6: six_fold,\n 7: seven_fold,\n 8: eight_fold,\n }\n try:\n f = folds[len(lambdas)]\n except KeyError:\n f = many_fold\n\n return lambda x, **kwargs: f(lambdas, x, **kwargs)\n" ]
[ [ "torch.nn.Parameter" ] ]
hnykda/kfsims
[ "0c814c1e6bc2881b72c74f519e693e74e9abde71" ]
[ "kfsims/network.py" ]
[ "import networkx as nx\nimport numpy as np\nimport types\n\nfrom kfsims.node import node_factory, observe_factory\nfrom kfsims.common import init_all\nfrom kfsims import noise\n\n\ndef make_node(measurements, cov, rho=0.9, tau=5, u=5):\n _, xk, P, _, _, _, _, H, F, Q, N = init_all()\n U = cov * (u - 3)\n nd = node_factory(xk, P, u, U, F, Q, H, rho, tau,\n observe_factory(measurements.T), 10)\n return nd\n\n\ndef gen_noise(N, trj, seed, mod=20, halves=3):\n np.random.seed(seed)\n s = noise.sin_noise(N, halves, shift=seed)\n n_sin = noise.static_noise(N, mod=mod) * s\n msrms = trj.Y + n_sin.T\n return msrms\n\n\ndef create_nodes(n_nodes, trj, cov_init, seed_mod=0):\n nodes = []\n for i in range(n_nodes):\n msrms = gen_noise(trj.X.shape[-1], trj, i + seed_mod)\n nd = make_node(msrms, cov_init)\n nodes.append(nd)\n return nodes\n\n\ndef _get_neighbors_att(cluster, prior_pref):\n res = []\n for ngh in cluster:\n res.append(getattr(ngh, prior_pref + '_prior').hp)\n return res\n\n\ndef fuse_parameters(params_):\n params = np.array(params_)\n return np.mean(params, axis=0)\n\n\ndef get_cluster_params(cluster):\n Ps = _get_neighbors_att(cluster, 'P')\n Rs = _get_neighbors_att(cluster, 'R')\n new_P = fuse_parameters(Ps)\n new_R = fuse_parameters(Rs)\n return new_P, new_R\n\n\ndef update_nodes_neighbors_cluster(G, in_queue):\n \"\"\"\n TODO: Seems, that the following fives same good results\n as when it's not commented out.\n \"\"\"\n node = in_queue.pop()\n cluster = (set(G.neighbors(node)) & in_queue) | {node}\n hyp_P, hyp_R = get_cluster_params(cluster)\n for n in cluster:\n if n.R_updated != True:\n n.R_prior.hp = hyp_R\n n.P_prior.hp = hyp_P\n n.log('R_post', n.R_prior.expect())\n n.log('P_post', n.P_prior.expect())\n n.R_updated = True\n return in_queue# - cluster\n\n\ndef update_hyperparams(self):\n in_queue = set(np.random.permutation(self))\n\n for nod in in_queue:\n nod.R_updated = False\n\n while in_queue:\n in_queue = update_nodes_neighbors_cluster(self, in_queue)\n\n\ndef single_kf_for_all(self):\n for node in self.nodes:\n node.single_kf(next(node))\n\n\ndef vbadkf_step(self):\n self._single_kf()\n self.diffuse_hyperparams()\n\n\ndef collect_rmse(self, true):\n mean = []\n std = []\n for node in self.nodes:\n mn, st = node.rmse_stats(true)\n mean.append(mn)\n std.append(st)\n return np.mean(mean, axis=0).round(4), np.mean(std, axis=0).round(4)\n\n\ndef kf_no_diffusion(net, trj):\n for i in range(trj.shape[1]):\n net._single_kf()\n return net.collect_rmse(trj)\n\n\ndef kf_no_diffusion2(self, trj):\n # init\n xs = []\n Ps = []\n\n for node in self.nodes:\n xs.append(node.last_state)\n Ps.append(node.P_prior.expect())\n\n invPs = [np.linalg.inv(P_s) for P_s in Ps]\n P = np.linalg.inv(np.sum(invPs, axis=0))\n\n consensus_x = []\n for i in range(trj.shape[1]):\n self._single_kf()\n\n xs = []\n Ps = []\n for node in self.nodes:\n xs.append(node.last_state)\n Ps.append(node.P_prior.expect())\n\n invPs = [np.linalg.inv(P_s) for P_s in Ps]\n x = P @ np.sum([P_s @ x_s for P_s, x_s in zip(invPs, xs)], axis=0)\n P = np.linalg.inv(np.sum(invPs, axis=0))\n consensus_x.append(x)\n\n return np.mean((np.array(consensus_x) - trj.X.T) ** 2, axis=0)\n\n\ndef kf_w_diffusion(net, trj):\n for i in range(trj.shape[1]):\n net.diffused_single_kf()\n return net.collect_rmse(trj)\n\n\ndef create_network(nodes, k=4):\n G_ = nx.random_regular_graph(k, len(nodes))\n G = nx.relabel_nodes(G_, {ix: nodes[ix] for ix in range(len(nodes))})\n\n G._single_kf = types.MethodType(single_kf_for_all, G)\n G.diffuse_hyperparams = types.MethodType(update_hyperparams, G)\n G.diffused_single_kf = types.MethodType(vbadkf_step, G)\n\n G.collect_rmse = types.MethodType(collect_rmse, G)\n\n G.kf_no_diffusion = types.MethodType(kf_no_diffusion, G)\n G.kf_no_diffusion2 = types.MethodType(kf_no_diffusion2, G)\n G.kf_w_diffusion = types.MethodType(kf_w_diffusion, G)\n\n return G\n\ndef create_w_nodes(n_nodes, trj, cov_init, seed_mod=0):\n nodes = create_nodes(n_nodes, trj, cov_init, seed_mod=seed_mod)\n G = create_network(nodes)\n return G" ]
[ [ "numpy.array", "numpy.random.seed", "numpy.sum", "numpy.random.permutation", "numpy.mean", "numpy.linalg.inv" ] ]
philippeitis/versa
[ "36f1bba5171945bc7a7370d9a7adf380c3fd9806" ]
[ "src/shapenet.py" ]
[ "import os\n\nimport numpy as np\n\n\"\"\"\n Supporting methods for data handling\n\"\"\"\n\n\ndef shuffle_batch(images, labels):\n \"\"\"\n Return a shuffled batch of data\n \"\"\"\n permutation = np.random.permutation(images.shape[0])\n return images[permutation], labels[permutation]\n\n\ndef convert_index_to_angle(index, num_instances_per_item):\n \"\"\"\n Convert the index of an image to a representation of the angle\n :param index: index to be converted\n :param num_instances_per_item: number of images for each item\n :return: a biterion representation of the angle\n \"\"\"\n degrees_per_increment = 360. / num_instances_per_item\n angle = index * degrees_per_increment\n angle_radians = np.deg2rad(angle)\n return angle, np.sin(angle_radians), np.cos(angle_radians)\n\n\nclass ShapeNetData:\n \"\"\"\n Class to handle ShapeNet dataset. Loads from numpy data as saved in data folder.\n \"\"\"\n\n def __init__(self, path, num_instances_per_item, train_fraction, val_fraction, seed, mode):\n \"\"\"\n Initialize object to handle shapenet data\n :param path: directory of numpy file with preprocessed ShapeNet arrays.\n :param num_instances_per_item: Number of views of each model in the dataset.\n :param train_fraction: Fraction of models used for training.\n :param val_fraction: Fraction of models used for validation.\n :param seed: random seed for selecting data.\n :param mode: indicates either train or test.\n \"\"\"\n self.image_height = 32\n self.image_width = 32\n self.image_channels = 1\n self.angle_dimensionality = 3\n self.has_validation_set = True\n\n # concatenate all the categories\n categories = ['02691156', '02828884', '02933112', '02958343', '02992529', '03001627', '03211117',\n '03636649', '03691459', '04256520', '04379243', '04530566']\n\n data = None\n for category in categories:\n file = os.path.join(path, '{0:s}.npy'.format(category))\n if data is None: # first time through\n data = np.load(file)\n else:\n data = np.concatenate((data, np.load(file)), axis=0)\n\n self.instances_per_item = num_instances_per_item\n self.total_items = data.shape[0]\n self.mode = mode\n train_size = int(train_fraction * self.total_items)\n val_size = int(val_fraction * self.total_items)\n print(\"Training Set Size = {0:d}\".format(train_size))\n print(\"Validation Set Size = {0:d}\".format(val_size))\n print(\"Test Set Size = {0:d}\".format(self.total_items - train_size - val_size))\n np.random.seed(seed)\n np.random.shuffle(data)\n self.train_images, self.train_item_indices, self.train_item_angles = self.__extract_data(data[:train_size])\n self.validation_images, self.validation_item_indices, self.validation_item_angles = \\\n self.__extract_data(data[train_size:train_size + val_size])\n self.test_images, self.test_item_indices, self.test_item_angles = \\\n self.__extract_data(data[train_size + val_size:])\n self.train_item_sets = np.max(self.train_item_indices)\n self.validation_item_sets = np.max(self.validation_item_indices)\n self.test_item_sets = np.max(self.test_item_indices)\n if self.mode == 'test':\n self.test_item_permutation = np.random.permutation(self.test_item_sets)\n self.test_counter = 0\n\n def get_image_height(self):\n return self.image_height\n\n def get_image_width(self):\n return self.image_width\n\n def get_image_channels(self):\n return self.image_channels\n\n def get_angle_dimensionality(self):\n return self.angle_dimensionality\n\n def get_has_validation_set(self):\n return self.has_validation_set\n\n def get_batch(self, source, tasks_per_batch, shot):\n \"\"\"\n Wrapper function for batching in the model.\n :param source: train, validation or test (string).\n :param tasks_per_batch: number of tasks to include in batch.\n :param shot: number of training examples per class.\n :return: np array representing a batch of tasks.\n \"\"\"\n if source == 'train':\n return self.__yield_random_task_batch(tasks_per_batch, self.train_images, self.train_item_angles,\n self.train_item_indices, shot)\n elif source == 'validation':\n return self.__yield_random_task_batch(tasks_per_batch, self.validation_images, self.validation_item_angles,\n self.validation_item_indices, shot)\n elif source == 'test':\n return self.__yield_random_task_batch(tasks_per_batch, self.test_images, self.test_item_angles,\n self.test_item_indices, shot)\n\n def __yield_random_task_batch(self, num_tasks_per_batch, images, angles, item_indices, num_train_instances):\n \"\"\"\n Generate a batch of tasks from image set.\n :param num_tasks_per_batch: number of tasks per batch.\n :param images: images set to generate batch from.\n :param angles: associated angle for each image.\n :param item_indices: indices of each character.\n :param num_train_instances: number of training images per class.\n :return: a batch of tasks.\n \"\"\"\n train_images_to_return, test_images_to_return = [], []\n train_angles_to_return, test_angles_to_return = [], []\n for task in range(num_tasks_per_batch):\n images_train, images_test, labels_train, labels_test = \\\n self.__generateRandomTask(images, angles, item_indices, num_train_instances)\n train_images_to_return.append(images_train)\n test_images_to_return.append(images_test)\n train_angles_to_return.append(labels_train)\n test_angles_to_return.append(labels_test)\n return np.array(train_images_to_return), np.array(test_images_to_return), \\\n np.array(train_angles_to_return), np.array(test_angles_to_return)\n\n def __generateRandomTask(self, images, angles, item_indices, num_train_instances):\n \"\"\"\n Randomly generate a task from image set.\n :param images: images set to generate batch from.\n :param angles: associated angle for each image.\n :param item_indices: indices of each character.\n :param num_train_instances: number of training images per class.\n :return: tuple containing train and test images and labels for a task.\n \"\"\"\n if self.mode == 'test':\n task_item = self.test_item_permutation[self.test_counter]\n self.test_counter = self.test_counter + 1\n else:\n task_item = np.random.choice(np.unique(item_indices))\n permutation = np.random.permutation(self.instances_per_item)\n item_images = images[np.where(item_indices == task_item)[0]][permutation]\n item_angles = angles[np.where(item_indices == task_item)[0]][permutation]\n train_images, train_angles = item_images[:num_train_instances], item_angles[:num_train_instances]\n test_images, test_angles = item_images[num_train_instances:], item_angles[num_train_instances:]\n train_images_to_return, train_angles_to_return = shuffle_batch(train_images, train_angles)\n test_images_to_return, test_angles_to_return = shuffle_batch(test_images, test_angles)\n return train_images_to_return, test_images_to_return, train_angles_to_return, test_angles_to_return\n\n def __extract_data(self, data):\n \"\"\"\n Unpack ShapeNet data.\n \"\"\"\n images, item_indices, item_angles = [], [], []\n for item_index, item in enumerate(data):\n for m, instance in enumerate(item):\n images.append(instance[0])\n item_indices.append(item_index)\n item_angles.append(convert_index_to_angle(instance[2], self.instances_per_item))\n images = np.reshape(np.array(images), (len(images), self.image_height, self.image_width, self.image_channels))\n indices, angles = np.array(item_indices), np.array(item_angles)\n return images, indices, angles\n" ]
[ [ "numpy.max", "numpy.sin", "numpy.array", "numpy.random.seed", "numpy.random.permutation", "numpy.load", "numpy.random.shuffle", "numpy.where", "numpy.cos", "numpy.deg2rad", "numpy.unique" ] ]
antonia-ms/NeuroLang-1
[ "131595437ae59056f1e10a6664bb88e80d9a6230" ]
[ "neurolang/probabilistic/cplogic/testing.py" ]
[ "import contextlib\nimport itertools\n\nimport numpy as np\n\nfrom ...expression_pattern_matching import add_match\nfrom ...expression_walker import PatternWalker\nfrom ...expressions import Constant, Symbol\nfrom ...relational_algebra import (\n ColumnStr,\n NamedRelationalAlgebraFrozenSet,\n NaturalJoin,\n Projection,\n RenameColumn,\n Selection,\n str2columnstr_constant,\n)\nfrom ...relational_algebra_provenance import (\n ProvenanceAlgebraSet,\n RelationalAlgebraProvenanceCountingSolver,\n)\nfrom .cplogic_to_gm import CPLogicGroundingToGraphicalModelTranslator\nfrom .gm_provenance_solver import (\n TRUE,\n CPLogicGraphicalModelProvenanceSolver,\n ProbabilityOperation,\n SelectionOutPusher,\n TupleEqualSymbol,\n TupleSymbol,\n UnionOverTuples,\n UnionRemover,\n rename_columns_for_args_to_match,\n)\nfrom .grounding import get_grounding_predicate, ground_cplogic_program\n\n\ndef build_gm(cpl_program):\n grounded = ground_cplogic_program(cpl_program)\n translator = CPLogicGroundingToGraphicalModelTranslator()\n gm = translator.walk(grounded)\n return gm\n\n\ndef get_named_relation_tuples(relation):\n if isinstance(relation, Constant):\n relation = relation.value\n return set(tuple(x) for x in relation)\n\n\ndef eq_prov_relations(pas1, pas2):\n assert isinstance(pas1, ProvenanceAlgebraSet)\n assert isinstance(pas2, ProvenanceAlgebraSet)\n assert (\n pas1.value.projection(*pas1.non_provenance_columns).to_unnamed()\n == pas2.value.projection(*pas2.non_provenance_columns).to_unnamed()\n )\n # ensure the prov col names are different so we can join the sets\n c1 = Symbol.fresh().name\n c2 = Symbol.fresh().name\n x1 = pas1.value.rename_column(pas1.provenance_column, c1)\n x2 = pas2.value.rename_column(pas2.provenance_column, c2)\n joined = x1.naturaljoin(x2)\n probs = list(joined.projection(*(c1, c2)))\n for p1, p2 in probs:\n if isinstance(p1, float) and isinstance(p2, float):\n if not np.isclose(p1, p2):\n return False\n elif p1 != p2:\n return False\n return True\n\n\ndef make_prov_set(iterable, columns):\n return ProvenanceAlgebraSet(\n NamedRelationalAlgebraFrozenSet(columns, iterable),\n ColumnStr(columns[0]),\n )\n\n\nclass TestRAPToLaTeXTranslator(PatternWalker):\n def __init__(self, cpl_program, graphical_model):\n self.cpl_program = cpl_program\n self.graphical_model = graphical_model\n self.fresh_symbol_renames = dict()\n self.fresh_symbol_rename_count = 1\n self.colors = itertools.cycle(\n [\"blue\", \"red\", \"pink\", \"teal\", \"olive\", \"magenta\", \"cyan\"]\n )\n\n def prettify(self, exp):\n name = exp.value if isinstance(exp, Constant) else exp.name\n if not name.startswith(\"fresh_\"):\n return name\n if name in self.fresh_symbol_renames:\n return self.fresh_symbol_renames[name]\n if isinstance(exp, TupleSymbol):\n prefix = \"\\\\nu\"\n elif isinstance(exp, Constant[ColumnStr]):\n prefix = \"c\"\n else:\n prefix = \"s\"\n new_name = \"{}_{{{}}}\".format(prefix, self.fresh_symbol_rename_count)\n self.fresh_symbol_renames[name] = new_name\n self.fresh_symbol_rename_count += 1\n return new_name\n\n @add_match(Projection)\n def projection(self, op):\n inner = self.walk(op.relation)\n inner = \"\\n\".join(\" \" + x for x in inner.split(\"\\n\"))\n return (\n \"\\\\pi_{\"\n + \", \".join(self.prettify(c) for c in op.attributes)\n + \"}\"\n + \"\\n\\\\left(\\n\"\n + inner\n + \"\\n\\\\right)\"\n )\n\n @add_match(RenameColumn)\n def rename_column(self, op):\n inner = self.walk(op.relation)\n inner = \"\\n\".join(\" \" + x for x in inner.split(\"\\n\"))\n return (\n \"\\\\rho_{\"\n + self.prettify(op.src)\n + \" / \"\n + self.prettify(op.dst)\n + \"}\"\n + \"\\n\\\\left(\\n\"\n + inner\n + \"\\n\\\\right)\"\n )\n\n @add_match(Selection(..., TupleEqualSymbol))\n def selection_by_tuple_symbol(self, op):\n inner = self.walk(op.relation)\n inner = \"\\n\".join(\" \" + x for x in inner.split(\"\\n\"))\n return (\n \"\\\\sigma_{\"\n + \"({}) = {}\".format(\n \", \".join(self.prettify(c) for c in op.formula.columns),\n self.prettify(op.formula.tuple_symbol),\n )\n + \"}\"\n + \"\\n\\\\left(\\n\"\n + inner\n + \"\\n\\\\right)\"\n )\n\n @add_match(Selection)\n def selection(self, op):\n inner = self.walk(op.relation)\n inner = \"\\n\".join(\" \" + x for x in inner.split(\"\\n\"))\n return (\n \"\\\\sigma_{\"\n + \"{} = {}\".format(\n self.prettify(op.formula.args[0]),\n self.prettify(op.formula.args[1]),\n )\n + \"}\"\n + \"\\n\\\\left(\\n\"\n + inner\n + \"\\n\\\\right)\"\n )\n\n @add_match(NaturalJoin)\n def naturaljoin(self, op):\n left = self.walk(op.relation_left)\n right = self.walk(op.relation_right)\n left = \"\\n\".join(\" \" + x for x in left.split(\"\\n\"))\n right = \"\\n\".join(\" \" + x for x in right.split(\"\\n\"))\n color1 = next(self.colors)\n color2 = next(self.colors)\n return (\n \"\\\\left[\\n\"\n + \"{\\\\color{\"\n + color1\n + \"}\\n\"\n + left\n + \"}\"\n + \"\\n\\\\right]\\n\"\n + \"\\\\bowtie\\n\"\n + \"\\\\left[\\n\"\n + \"{\\\\color{\"\n + color2\n + \"}\\n\"\n + right\n + \"}\"\n + \"\\n\\\\right]\"\n )\n\n @add_match(UnionOverTuples)\n def union_over_tuples(self, op):\n pred_symb = get_grounding_predicate(\n op.__debug_expression__\n ).functor.name\n inner = self.walk(op.relation)\n inner = \"\\n\".join(\" \" + x for x in inner.split(\"\\n\"))\n return (\n \"\\\\bigcup_{\"\n + self.prettify(op.tuple_symbol)\n + \"\\\\in \\\\mathcal{{{}}}}}\".format(pred_symb)\n + \"\\n\\\\left\\\\{\\n\"\n + inner\n + \"\\n\\\\right\\\\}\"\n )\n\n @add_match(ProvenanceAlgebraSet)\n def provenance_algebra_set(self, prov_set):\n if not hasattr(prov_set, \"__debug_expression__\"):\n raise RuntimeError(\n \"Cannot convert to LaTeX without expression information \"\n \"stored in __debug_expression__ attribute\"\n )\n pred = get_grounding_predicate(prov_set.__debug_expression__)\n string = f\"\\\\mathcal{{{pred.functor.name}}}\"\n if hasattr(prov_set, \"__debug_alway_true__\"):\n string += \"_1\"\n return string\n\n\ndef rap_expression_to_latex(exp, cpl_program, graphical_model):\n walker = TestRAPToLaTeXTranslator(cpl_program, graphical_model)\n latex = walker.walk(exp)\n return latex\n\n\ndef inspect_resolution(qpred, cpl_program, tex_out_path=None):\n grounded = ground_cplogic_program(cpl_program)\n translator = CPLogicGroundingToGraphicalModelTranslator()\n gm = translator.walk(grounded)\n qpred_symb = qpred.functor\n qpred_args = qpred.args\n solver = CPLogicGraphicalModelProvenanceSolver(gm)\n query_node = gm.get_node(qpred_symb)\n exp = solver.walk(ProbabilityOperation((query_node, TRUE), tuple()))\n result_args = get_grounding_predicate(query_node.expression).args\n exp = rename_columns_for_args_to_match(exp, result_args, qpred_args)\n gm = build_gm(cpl_program)\n spusher = SelectionOutPusher()\n sexp = spusher.walk(exp)\n uremover = UnionRemover()\n uexp = uremover.walk(sexp)\n if tex_out_path is not None:\n latex = rap_expression_to_latex(exp, cpl_program, gm)\n slatex = rap_expression_to_latex(sexp, cpl_program, gm)\n ulatex = rap_expression_to_latex(uexp, cpl_program, gm)\n with open(tex_out_path, \"w\") as f:\n f.write(\"\\n\\\\\\\\\\n\".join([latex, slatex, ulatex]) + \"\\n\")\n result = Projection(\n uexp, tuple(str2columnstr_constant(arg.name) for arg in qpred_args)\n )\n solver = RelationalAlgebraProvenanceCountingSolver()\n result = solver.walk(result)\n return exp, result\n\n\[email protected]\ndef temp_seed(seed):\n state = np.random.get_state()\n np.random.seed(seed)\n try:\n yield\n finally:\n np.random.set_state(state)\n" ]
[ [ "numpy.random.seed", "numpy.random.get_state", "numpy.isclose", "numpy.random.set_state" ] ]
ManuelVs/NeuralNetworks
[ "464a8acb9c019eb1591a0aa940a8bfc6f8c7121d" ]
[ "fen/classifier.py" ]
[ "import tensorflow as tf\n\n\nclass Classifier:\n\n def __init__(self, model, input_length, output_length):\n self.model = model\n self.input_length = input_length\n self.output_length = output_length\n\n def compile(self, batch_size=32):\n self._ds_x = tf.placeholder(tf.float32, [None, self.input_length])\n self._ds_y = tf.placeholder(tf.float32, [None, self.output_length])\n\n ds = tf.data.Dataset.from_tensor_slices((self._ds_x, self._ds_y))\n ds = ds.batch(batch_size)\n\n self._ds_it = ds.make_initializable_iterator()\n self._input, self._labels = self._ds_it.get_next()\n\n self._features = self.model(self._input)\n self._output = _create_dense_layer(self._features, self.output_length)\n\n self._create_acc_computations()\n self._create_backpropagation()\n\n self._session = tf.Session()\n self._session.run(tf.global_variables_initializer())\n self._session.run(tf.local_variables_initializer())\n\n def _create_acc_computations(self):\n self._predictions = tf.argmax(self._output, 1)\n labels = tf.argmax(self._labels, 1)\n self._accuracy = tf.reduce_mean(\n tf.cast(tf.equal(self._predictions, labels), 'float32'))\n\n def _create_backpropagation(self):\n losses = tf.nn.softmax_cross_entropy_with_logits_v2(\n logits=self._output,\n labels=self._labels)\n self._loss = tf.reduce_mean(losses)\n\n optimizer = tf.train.AdamOptimizer(0.001)\n global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n grads_and_vars = optimizer.compute_gradients(self._loss)\n\n self._train_op = optimizer.apply_gradients(\n grads_and_vars, global_step=global_step)\n\n def summary(self):\n print('input:', self._input.shape)\n self.model.summary()\n print('output:', self._output.shape)\n\n def train(self, X_train, y_train, X_eval, y_eval, epochs=10):\n import time\n\n for e in range(epochs):\n start_time = time.time()\n loss, acc = self._train(X_train, y_train)\n duration = time.time() - start_time\n\n val_loss, val_acc = self._eval(X_eval, y_eval)\n\n output = 'Epoch: {}, loss = {:.4f}, acc = {:.4f}, val_loss = {:.4f}, val_acc = {:.4f}, Time = {:.2f}s'\n print(output.format(e + 1, loss, acc, val_loss, val_acc, duration))\n # endfor\n\n def _train(self, X_train, y_train):\n import numpy as np\n\n self._session.run(\n fetches=self._ds_it.initializer,\n feed_dict={\n self._ds_x: X_train,\n self._ds_y: y_train\n })\n loss, acc, = [], []\n while True:\n try:\n _, vloss, vacc = self._session.run(\n fetches=[self._train_op, self._loss, self._accuracy])\n\n loss.append(vloss)\n acc.append(vacc)\n except tf.errors.OutOfRangeError:\n break\n # endwhile\n\n loss, acc = np.mean(loss), np.mean(acc)\n return loss, acc\n\n def _eval(self, X_val, y_val):\n self._session.run(\n fetches=self._ds_it.initializer,\n feed_dict={\n self._ds_x: X_val,\n self._ds_y: y_val\n })\n\n loss, acc, = 0, 0\n while True:\n try:\n l, vloss, vacc = self._session.run(\n fetches=[self._labels, self._loss, self._accuracy])\n\n loss += vloss * len(l)\n acc += vacc * len(l)\n except tf.errors.OutOfRangeError:\n break\n\n return loss / len(X_val), acc / len(X_val)\n\n def predict(self, X):\n import numpy as np\n\n self._session.run(self._ds_it.initializer,\n feed_dict={\n self._ds_x: X,\n self._ds_y: np.empty((len(X), self.output_length))\n }\n )\n\n pred = list()\n while True:\n try:\n ppred = self._session.run(tf.nn.softmax(self._output))\n\n pred.extend(map(lambda l: l.tolist(), ppred))\n except tf.errors.OutOfRangeError:\n break\n\n return pred\n\ndef _create_dense_layer(x, output_length):\n '''Creates a dense layer\n '''\n input_size = x.shape[1].value\n W = tf.Variable(\n initial_value=tf.truncated_normal(\n shape=[input_size, output_length],\n stddev=0.1))\n b = tf.Variable(\n initial_value=tf.truncated_normal(\n shape=[output_length]))\n\n dense = tf.nn.xw_plus_b(x, W, b)\n\n return dense\n\n\nif __name__ == '__main__':\n pass\n" ]
[ [ "tensorflow.local_variables_initializer", "tensorflow.train.AdamOptimizer", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.argmax", "tensorflow.Session", "tensorflow.Variable", "tensorflow.equal", "numpy.mean", "tensorflow.truncated_normal", "tensorflow.placeholder", "tensorflow.nn.softmax", "tensorflow.nn.xw_plus_b", "tensorflow.reduce_mean", "tensorflow.global_variables_initializer", "tensorflow.nn.softmax_cross_entropy_with_logits_v2" ] ]
hu-minghao/Whale_best_practices
[ "4300821500d8dfd386f3ba32506f06dd2b8009b7" ]
[ "inception_V3/retrain.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nr\"\"\"Simple transfer learning with Inception v3 or Mobilenet models.\n\nWith support for TensorBoard.\n\nThis example shows how to take a Inception v3 or Mobilenet model trained on\nImageNet images, and train a new top layer that can recognize other classes of\nimages.\n\nThe top layer receives as input a 2048-dimensional vector (1001-dimensional for\nMobilenet) for each image. We train a softmax layer on top of this\nrepresentation. Assuming the softmax layer contains N labels, this corresponds\nto learning N + 2048*N (or 1001*N) model parameters corresponding to the\nlearned biases and weights.\n\nHere's an example, which assumes you have a folder containing class-named\nsubfolders, each full of images for each label. The example folder flower_photos\nshould have a structure like this:\n\n~/flower_photos/daisy/photo1.jpg\n~/flower_photos/daisy/photo2.jpg\n...\n~/flower_photos/rose/anotherphoto77.jpg\n...\n~/flower_photos/sunflower/somepicture.jpg\n\nThe subfolder names are important, since they define what label is applied to\neach image, but the filenames themselves don't matter. Once your images are\nprepared, you can run the training with a command like this:\n\n\n```bash\nbazel build tensorflow/examples/image_retraining:retrain && \\\nbazel-bin/tensorflow/examples/image_retraining/retrain \\\n --image_dir ~/flower_photos\n```\n\nOr, if you have a pip installation of tensorflow, `retrain.py` can be run\nwithout bazel:\n\n```bash\npython tensorflow/examples/image_retraining/retrain.py \\\n --image_dir ~/flower_photos\n```\n\nYou can replace the image_dir argument with any folder containing subfolders of\nimages. The label for each image is taken from the name of the subfolder it's\nin.\n\nThis produces a new model file that can be loaded and run by any TensorFlow\nprogram, for example the label_image sample code.\n\nBy default this script will use the high accuracy, but comparatively large and\nslow Inception v3 model architecture. It's recommended that you start with this\nto validate that you have gathered good training data, but if you want to deploy\non resource-limited platforms, you can try the `--architecture` flag with a\nMobilenet model. For example:\n\nRun floating-point version of mobilenet:\n```bash\npython tensorflow/examples/image_retraining/retrain.py \\\n --image_dir ~/flower_photos --architecture mobilenet_1.0_224\n```\n\nRun quantized version of mobilenet:\n```bash\npython tensorflow/examples/image_retraining/retrain.py \\\n --image_dir ~/flower_photos/ --architecture mobilenet_1.0_224_quantized\n```\n\nThere are 32 different Mobilenet models to choose from, with a variety of file\nsize and latency options. The first number can be '1.0', '0.75', '0.50', or\n'0.25' to control the size, and the second controls the input image size, either\n'224', '192', '160', or '128', with smaller sizes running faster. See\nhttps://research.googleblog.com/2017/06/mobilenets-open-source-models-for.html\nfor more information on Mobilenet.\n\nTo use with TensorBoard:\n\nBy default, this script will log summaries to /tmp/retrain_logs directory\n\nVisualize the summaries with this command:\n\ntensorboard --logdir /tmp/retrain_logs\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nfrom datetime import datetime\nimport hashlib\nimport os.path\nimport random\nimport re\nimport sys\nimport tarfile\n\nimport numpy as np\nfrom six.moves import urllib\nimport tensorflow as tf\n\nfrom tensorflow.contrib.quantize.python import quant_ops\nfrom tensorflow.python.framework import graph_util\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.util import compat\n\nFLAGS = None\n\n# These are all parameters that are tied to the particular model architecture\n# we're using for Inception v3. These include things like tensor names and their\n# sizes. If you want to adapt this script to work with another model, you will\n# need to update these to reflect the values in the network you're using.\nMAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1 # ~134M\n\n\ndef create_image_lists(image_dir, testing_percentage, validation_percentage):\n \"\"\"Builds a list of training images from the file system.\n\n Analyzes the sub folders in the image directory, splits them into stable\n training, testing, and validation sets, and returns a data structure\n describing the lists of images for each label and their paths.\n\n Args:\n image_dir: String path to a folder containing subfolders of images.\n testing_percentage: Integer percentage of the images to reserve for tests.\n validation_percentage: Integer percentage of images reserved for validation.\n\n Returns:\n A dictionary containing an entry for each label subfolder, with images split\n into training, testing, and validation sets within each label.\n \"\"\"\n if not gfile.Exists(image_dir):\n tf.logging.error(\"Image directory '\" + image_dir + \"' not found.\")\n return None\n result = {}\n sub_dirs = [x[0] for x in gfile.Walk(image_dir)]\n # The root directory comes first, so skip it.\n is_root_dir = True\n for sub_dir in sub_dirs:\n if is_root_dir:\n is_root_dir = False\n continue\n extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']\n file_list = []\n dir_name = os.path.basename(sub_dir)\n if dir_name == image_dir:\n continue\n tf.logging.info(\"Looking for images in '\" + dir_name + \"'\")\n for extension in extensions:\n file_glob = os.path.join(image_dir, dir_name, '*.' + extension)\n file_list.extend(gfile.Glob(file_glob))\n if not file_list:\n tf.logging.warning('No files found')\n continue\n if len(file_list) < 20:\n tf.logging.warning(\n 'WARNING: Folder has less than 20 images, which may cause issues.')\n elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS:\n tf.logging.warning(\n 'WARNING: Folder {} has more than {} images. Some images will '\n 'never be selected.'.format(dir_name, MAX_NUM_IMAGES_PER_CLASS))\n label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())\n training_images = []\n testing_images = []\n validation_images = []\n for file_name in file_list:\n base_name = os.path.basename(file_name)\n # We want to ignore anything after '_nohash_' in the file name when\n # deciding which set to put an image in, the data set creator has a way of\n # grouping photos that are close variations of each other. For example\n # this is used in the plant disease data set to group multiple pictures of\n # the same leaf.\n hash_name = re.sub(r'_nohash_.*$', '', file_name)\n # This looks a bit magical, but we need to decide whether this file should\n # go into the training, testing, or validation sets, and we want to keep\n # existing files in the same set even if more files are subsequently\n # added.\n # To do that, we need a stable way of deciding based on just the file name\n # itself, so we do a hash of that and then use that to generate a\n # probability value that we use to assign it.\n hash_name_hashed = hashlib.sha1(compat.as_bytes(hash_name)).hexdigest()\n percentage_hash = ((int(hash_name_hashed, 16) %\n (MAX_NUM_IMAGES_PER_CLASS + 1)) *\n (100.0 / MAX_NUM_IMAGES_PER_CLASS))\n if percentage_hash < validation_percentage:\n validation_images.append(base_name)\n elif percentage_hash < (testing_percentage + validation_percentage):\n testing_images.append(base_name)\n else:\n training_images.append(base_name)\n result[label_name] = {\n 'dir': dir_name,\n 'training': training_images,\n 'testing': testing_images,\n 'validation': validation_images,\n }\n return result\n\n\ndef get_image_path(image_lists, label_name, index, image_dir, category):\n \"\"\"\"Returns a path to an image for a label at the given index.\n\n Args:\n image_lists: Dictionary of training images for each label.\n label_name: Label string we want to get an image for.\n index: Int offset of the image we want. This will be moduloed by the\n available number of images for the label, so it can be arbitrarily large.\n image_dir: Root folder string of the subfolders containing the training\n images.\n category: Name string of set to pull images from - training, testing, or\n validation.\n\n Returns:\n File system path string to an image that meets the requested parameters.\n\n \"\"\"\n if label_name not in image_lists:\n tf.logging.fatal('Label does not exist %s.', label_name)\n label_lists = image_lists[label_name]\n if category not in label_lists:\n tf.logging.fatal('Category does not exist %s.', category)\n category_list = label_lists[category]\n if not category_list:\n tf.logging.fatal('Label %s has no images in the category %s.',\n label_name, category)\n mod_index = index % len(category_list)\n base_name = category_list[mod_index]\n sub_dir = label_lists['dir']\n full_path = os.path.join(image_dir, sub_dir, base_name)\n return full_path\n\n\ndef get_bottleneck_path(image_lists, label_name, index, bottleneck_dir,\n category, architecture):\n \"\"\"\"Returns a path to a bottleneck file for a label at the given index.\n\n Args:\n image_lists: Dictionary of training images for each label.\n label_name: Label string we want to get an image for.\n index: Integer offset of the image we want. This will be moduloed by the\n available number of images for the label, so it can be arbitrarily large.\n bottleneck_dir: Folder string holding cached files of bottleneck values.\n category: Name string of set to pull images from - training, testing, or\n validation.\n architecture: The name of the model architecture.\n\n Returns:\n File system path string to an image that meets the requested parameters.\n \"\"\"\n return get_image_path(image_lists, label_name, index, bottleneck_dir,\n category) + '_' + architecture + '.txt'\n\n\ndef create_model_graph(model_info):\n \"\"\"\"Creates a graph from saved GraphDef file and returns a Graph object.\n\n Args:\n model_info: Dictionary containing information about the model architecture.\n\n Returns:\n Graph holding the trained Inception network, and various tensors we'll be\n manipulating.\n \"\"\"\n with tf.Graph().as_default() as graph:\n model_path = os.path.join(FLAGS.model_dir, model_info['model_file_name'])\n print('Model path: ', model_path)\n with gfile.FastGFile(model_path, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n bottleneck_tensor, resized_input_tensor = (tf.import_graph_def(\n graph_def,\n name='',\n return_elements=[\n model_info['bottleneck_tensor_name'],\n model_info['resized_input_tensor_name'],\n ]))\n return graph, bottleneck_tensor, resized_input_tensor\n\n\ndef run_bottleneck_on_image(sess, image_data, image_data_tensor,\n decoded_image_tensor, resized_input_tensor,\n bottleneck_tensor):\n \"\"\"Runs inference on an image to extract the 'bottleneck' summary layer.\n\n Args:\n sess: Current active TensorFlow Session.\n image_data: String of raw JPEG data.\n image_data_tensor: Input data layer in the graph.\n decoded_image_tensor: Output of initial image resizing and preprocessing.\n resized_input_tensor: The input node of the recognition graph.\n bottleneck_tensor: Layer before the final softmax.\n\n Returns:\n Numpy array of bottleneck values.\n \"\"\"\n # First decode the JPEG image, resize it, and rescale the pixel values.\n resized_input_values = sess.run(decoded_image_tensor,\n {image_data_tensor: image_data})\n # Then run it through the recognition network.\n bottleneck_values = sess.run(bottleneck_tensor,\n {resized_input_tensor: resized_input_values})\n bottleneck_values = np.squeeze(bottleneck_values)\n return bottleneck_values\n\n\ndef maybe_download_and_extract(data_url):\n \"\"\"Download and extract model tar file.\n\n If the pretrained model we're using doesn't already exist, this function\n downloads it from the TensorFlow.org website and unpacks it into a directory.\n\n Args:\n data_url: Web location of the tar file containing the pretrained model.\n \"\"\"\n dest_directory = FLAGS.model_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = data_url.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' %\n (filename,\n float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n\n filepath, _ = urllib.request.urlretrieve(data_url, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n tf.logging.info('Successfully downloaded', filename, statinfo.st_size,\n 'bytes.')\n print('Extracting file from ', filepath)\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)\n else:\n print('Not extracting or downloading files, model already present in disk')\n\n\ndef ensure_dir_exists(dir_name):\n \"\"\"Makes sure the folder exists on disk.\n\n Args:\n dir_name: Path string to the folder we want to create.\n \"\"\"\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n\n\nbottleneck_path_2_bottleneck_values = {}\n\n\ndef create_bottleneck_file(bottleneck_path, image_lists, label_name, index,\n image_dir, category, sess, jpeg_data_tensor,\n decoded_image_tensor, resized_input_tensor,\n bottleneck_tensor):\n \"\"\"Create a single bottleneck file.\"\"\"\n tf.logging.info('Creating bottleneck at ' + bottleneck_path)\n image_path = get_image_path(image_lists, label_name, index,\n image_dir, category)\n if not gfile.Exists(image_path):\n tf.logging.fatal('File does not exist %s', image_path)\n image_data = gfile.FastGFile(image_path, 'rb').read()\n try:\n bottleneck_values = run_bottleneck_on_image(\n sess, image_data, jpeg_data_tensor, decoded_image_tensor,\n resized_input_tensor, bottleneck_tensor)\n except Exception as e:\n raise RuntimeError('Error during processing file %s (%s)' % (image_path,\n str(e)))\n bottleneck_string = ','.join(str(x) for x in bottleneck_values)\n with open(bottleneck_path, 'w') as bottleneck_file:\n bottleneck_file.write(bottleneck_string)\n\n\ndef get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir,\n category, bottleneck_dir, jpeg_data_tensor,\n decoded_image_tensor, resized_input_tensor,\n bottleneck_tensor, architecture):\n \"\"\"Retrieves or calculates bottleneck values for an image.\n\n If a cached version of the bottleneck data exists on-disk, return that,\n otherwise calculate the data and save it to disk for future use.\n\n Args:\n sess: The current active TensorFlow Session.\n image_lists: Dictionary of training images for each label.\n label_name: Label string we want to get an image for.\n index: Integer offset of the image we want. This will be modulo-ed by the\n available number of images for the label, so it can be arbitrarily large.\n image_dir: Root folder string of the subfolders containing the training\n images.\n category: Name string of which set to pull images from - training, testing,\n or validation.\n bottleneck_dir: Folder string holding cached files of bottleneck values.\n jpeg_data_tensor: The tensor to feed loaded jpeg data into.\n decoded_image_tensor: The output of decoding and resizing the image.\n resized_input_tensor: The input node of the recognition graph.\n bottleneck_tensor: The output tensor for the bottleneck values.\n architecture: The name of the model architecture.\n\n Returns:\n Numpy array of values produced by the bottleneck layer for the image.\n \"\"\"\n label_lists = image_lists[label_name]\n sub_dir = label_lists['dir']\n sub_dir_path = os.path.join(bottleneck_dir, sub_dir)\n ensure_dir_exists(sub_dir_path)\n bottleneck_path = get_bottleneck_path(image_lists, label_name, index,\n bottleneck_dir, category, architecture)\n if not os.path.exists(bottleneck_path):\n create_bottleneck_file(bottleneck_path, image_lists, label_name, index,\n image_dir, category, sess, jpeg_data_tensor,\n decoded_image_tensor, resized_input_tensor,\n bottleneck_tensor)\n with open(bottleneck_path, 'r') as bottleneck_file:\n bottleneck_string = bottleneck_file.read()\n did_hit_error = False\n try:\n bottleneck_values = [float(x) for x in bottleneck_string.split(',')]\n except ValueError:\n tf.logging.warning('Invalid float found, recreating bottleneck')\n did_hit_error = True\n if did_hit_error:\n create_bottleneck_file(bottleneck_path, image_lists, label_name, index,\n image_dir, category, sess, jpeg_data_tensor,\n decoded_image_tensor, resized_input_tensor,\n bottleneck_tensor)\n with open(bottleneck_path, 'r') as bottleneck_file:\n bottleneck_string = bottleneck_file.read()\n # Allow exceptions to propagate here, since they shouldn't happen after a\n # fresh creation\n bottleneck_values = [float(x) for x in bottleneck_string.split(',')]\n return bottleneck_values\n\n\ndef cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir,\n jpeg_data_tensor, decoded_image_tensor,\n resized_input_tensor, bottleneck_tensor, architecture):\n \"\"\"Ensures all the training, testing, and validation bottlenecks are cached.\n\n Because we're likely to read the same image multiple times (if there are no\n distortions applied during training) it can speed things up a lot if we\n calculate the bottleneck layer values once for each image during\n preprocessing, and then just read those cached values repeatedly during\n training. Here we go through all the images we've found, calculate those\n values, and save them off.\n\n Args:\n sess: The current active TensorFlow Session.\n image_lists: Dictionary of training images for each label.\n image_dir: Root folder string of the subfolders containing the training\n images.\n bottleneck_dir: Folder string holding cached files of bottleneck values.\n jpeg_data_tensor: Input tensor for jpeg data from file.\n decoded_image_tensor: The output of decoding and resizing the image.\n resized_input_tensor: The input node of the recognition graph.\n bottleneck_tensor: The penultimate output layer of the graph.\n architecture: The name of the model architecture.\n\n Returns:\n Nothing.\n \"\"\"\n how_many_bottlenecks = 0\n ensure_dir_exists(bottleneck_dir)\n for label_name, label_lists in image_lists.items():\n for category in ['training', 'testing', 'validation']:\n category_list = label_lists[category]\n for index, unused_base_name in enumerate(category_list):\n get_or_create_bottleneck(\n sess, image_lists, label_name, index, image_dir, category,\n bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,\n resized_input_tensor, bottleneck_tensor, architecture)\n\n how_many_bottlenecks += 1\n if how_many_bottlenecks % 100 == 0:\n tf.logging.info(\n str(how_many_bottlenecks) + ' bottleneck files created.')\n\n\ndef get_random_cached_bottlenecks(sess, image_lists, how_many, category,\n bottleneck_dir, image_dir, jpeg_data_tensor,\n decoded_image_tensor, resized_input_tensor,\n bottleneck_tensor, architecture):\n \"\"\"Retrieves bottleneck values for cached images.\n\n If no distortions are being applied, this function can retrieve the cached\n bottleneck values directly from disk for images. It picks a random set of\n images from the specified category.\n\n Args:\n sess: Current TensorFlow Session.\n image_lists: Dictionary of training images for each label.\n how_many: If positive, a random sample of this size will be chosen.\n If negative, all bottlenecks will be retrieved.\n category: Name string of which set to pull from - training, testing, or\n validation.\n bottleneck_dir: Folder string holding cached files of bottleneck values.\n image_dir: Root folder string of the subfolders containing the training\n images.\n jpeg_data_tensor: The layer to feed jpeg image data into.\n decoded_image_tensor: The output of decoding and resizing the image.\n resized_input_tensor: The input node of the recognition graph.\n bottleneck_tensor: The bottleneck output layer of the CNN graph.\n architecture: The name of the model architecture.\n\n Returns:\n List of bottleneck arrays, their corresponding ground truths, and the\n relevant filenames.\n \"\"\"\n class_count = len(image_lists.keys())\n bottlenecks = []\n ground_truths = []\n filenames = []\n if how_many >= 0:\n # Retrieve a random sample of bottlenecks.\n for unused_i in range(how_many):\n label_index = random.randrange(class_count)\n label_name = list(image_lists.keys())[label_index]\n image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)\n image_name = get_image_path(image_lists, label_name, image_index,\n image_dir, category)\n bottleneck = get_or_create_bottleneck(\n sess, image_lists, label_name, image_index, image_dir, category,\n bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,\n resized_input_tensor, bottleneck_tensor, architecture)\n bottlenecks.append(bottleneck)\n ground_truths.append(label_index)\n filenames.append(image_name)\n else:\n # Retrieve all bottlenecks.\n for label_index, label_name in enumerate(image_lists.keys()):\n for image_index, image_name in enumerate(\n image_lists[label_name][category]):\n image_name = get_image_path(image_lists, label_name, image_index,\n image_dir, category)\n bottleneck = get_or_create_bottleneck(\n sess, image_lists, label_name, image_index, image_dir, category,\n bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,\n resized_input_tensor, bottleneck_tensor, architecture)\n bottlenecks.append(bottleneck)\n ground_truths.append(label_index)\n filenames.append(image_name)\n return bottlenecks, ground_truths, filenames\n\n\ndef get_random_distorted_bottlenecks(\n sess, image_lists, how_many, category, image_dir, input_jpeg_tensor,\n distorted_image, resized_input_tensor, bottleneck_tensor):\n \"\"\"Retrieves bottleneck values for training images, after distortions.\n\n If we're training with distortions like crops, scales, or flips, we have to\n recalculate the full model for every image, and so we can't use cached\n bottleneck values. Instead we find random images for the requested category,\n run them through the distortion graph, and then the full graph to get the\n bottleneck results for each.\n\n Args:\n sess: Current TensorFlow Session.\n image_lists: Dictionary of training images for each label.\n how_many: The integer number of bottleneck values to return.\n category: Name string of which set of images to fetch - training, testing,\n or validation.\n image_dir: Root folder string of the subfolders containing the training\n images.\n input_jpeg_tensor: The input layer we feed the image data to.\n distorted_image: The output node of the distortion graph.\n resized_input_tensor: The input node of the recognition graph.\n bottleneck_tensor: The bottleneck output layer of the CNN graph.\n\n Returns:\n List of bottleneck arrays and their corresponding ground truths.\n \"\"\"\n class_count = len(image_lists.keys())\n bottlenecks = []\n ground_truths = []\n for unused_i in range(how_many):\n label_index = random.randrange(class_count)\n label_name = list(image_lists.keys())[label_index]\n image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)\n image_path = get_image_path(image_lists, label_name, image_index, image_dir,\n category)\n if not gfile.Exists(image_path):\n tf.logging.fatal('File does not exist %s', image_path)\n jpeg_data = gfile.FastGFile(image_path, 'rb').read()\n # Note that we materialize the distorted_image_data as a numpy array before\n # sending running inference on the image. This involves 2 memory copies and\n # might be optimized in other implementations.\n distorted_image_data = sess.run(distorted_image,\n {input_jpeg_tensor: jpeg_data})\n bottleneck_values = sess.run(bottleneck_tensor,\n {resized_input_tensor: distorted_image_data})\n bottleneck_values = np.squeeze(bottleneck_values)\n bottlenecks.append(bottleneck_values)\n ground_truths.append(label_index)\n return bottlenecks, ground_truths\n\n\ndef should_distort_images(flip_left_right, random_crop, random_scale,\n random_brightness):\n \"\"\"Whether any distortions are enabled, from the input flags.\n\n Args:\n flip_left_right: Boolean whether to randomly mirror images horizontally.\n random_crop: Integer percentage setting the total margin used around the\n crop box.\n random_scale: Integer percentage of how much to vary the scale by.\n random_brightness: Integer range to randomly multiply the pixel values by.\n\n Returns:\n Boolean value indicating whether any distortions should be applied.\n \"\"\"\n return (flip_left_right or (random_crop != 0) or (random_scale != 0) or\n (random_brightness != 0))\n\n\ndef add_input_distortions(flip_left_right, random_crop, random_scale,\n random_brightness, input_width, input_height,\n input_depth, input_mean, input_std):\n \"\"\"Creates the operations to apply the specified distortions.\n\n During training it can help to improve the results if we run the images\n through simple distortions like crops, scales, and flips. These reflect the\n kind of variations we expect in the real world, and so can help train the\n model to cope with natural data more effectively. Here we take the supplied\n parameters and construct a network of operations to apply them to an image.\n\n Cropping\n ~~~~~~~~\n\n Cropping is done by placing a bounding box at a random position in the full\n image. The cropping parameter controls the size of that box relative to the\n input image. If it's zero, then the box is the same size as the input and no\n cropping is performed. If the value is 50%, then the crop box will be half the\n width and height of the input. In a diagram it looks like this:\n\n < width >\n +---------------------+\n | |\n | width - crop% |\n | < > |\n | +------+ |\n | | | |\n | | | |\n | | | |\n | +------+ |\n | |\n | |\n +---------------------+\n\n Scaling\n ~~~~~~~\n\n Scaling is a lot like cropping, except that the bounding box is always\n centered and its size varies randomly within the given range. For example if\n the scale percentage is zero, then the bounding box is the same size as the\n input and no scaling is applied. If it's 50%, then the bounding box will be in\n a random range between half the width and height and full size.\n\n Args:\n flip_left_right: Boolean whether to randomly mirror images horizontally.\n random_crop: Integer percentage setting the total margin used around the\n crop box.\n random_scale: Integer percentage of how much to vary the scale by.\n random_brightness: Integer range to randomly multiply the pixel values by.\n graph.\n input_width: Horizontal size of expected input image to model.\n input_height: Vertical size of expected input image to model.\n input_depth: How many channels the expected input image should have.\n input_mean: Pixel value that should be zero in the image for the graph.\n input_std: How much to divide the pixel values by before recognition.\n\n Returns:\n The jpeg input layer and the distorted result tensor.\n \"\"\"\n\n jpeg_data = tf.placeholder(tf.string, name='DistortJPGInput')\n decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)\n decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)\n decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)\n margin_scale = 1.0 + (random_crop / 100.0)\n resize_scale = 1.0 + (random_scale / 100.0)\n margin_scale_value = tf.constant(margin_scale)\n resize_scale_value = tf.random_uniform(tensor_shape.scalar(),\n minval=1.0,\n maxval=resize_scale)\n scale_value = tf.multiply(margin_scale_value, resize_scale_value)\n precrop_width = tf.multiply(scale_value, input_width)\n precrop_height = tf.multiply(scale_value, input_height)\n precrop_shape = tf.stack([precrop_height, precrop_width])\n precrop_shape_as_int = tf.cast(precrop_shape, dtype=tf.int32)\n precropped_image = tf.image.resize_bilinear(decoded_image_4d,\n precrop_shape_as_int)\n precropped_image_3d = tf.squeeze(precropped_image, squeeze_dims=[0])\n cropped_image = tf.random_crop(precropped_image_3d,\n [input_height, input_width, input_depth])\n if flip_left_right:\n flipped_image = tf.image.random_flip_left_right(cropped_image)\n else:\n flipped_image = cropped_image\n brightness_min = 1.0 - (random_brightness / 100.0)\n brightness_max = 1.0 + (random_brightness / 100.0)\n brightness_value = tf.random_uniform(tensor_shape.scalar(),\n minval=brightness_min,\n maxval=brightness_max)\n brightened_image = tf.multiply(flipped_image, brightness_value)\n offset_image = tf.subtract(brightened_image, input_mean)\n mul_image = tf.multiply(offset_image, 1.0 / input_std)\n distort_result = tf.expand_dims(mul_image, 0, name='DistortResult')\n return jpeg_data, distort_result\n\n\ndef variable_summaries(var):\n \"\"\"Attach a lot of summaries to a Tensor (for TensorBoard visualization).\"\"\"\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)\n\n\ndef add_final_training_ops(class_count, final_tensor_name, bottleneck_tensor,\n bottleneck_tensor_size, quantize_layer):\n \"\"\"Adds a new softmax and fully-connected layer for training.\n\n We need to retrain the top layer to identify our new classes, so this function\n adds the right operations to the graph, along with some variables to hold the\n weights, and then sets up all the gradients for the backward pass.\n\n The set up for the softmax and fully-connected layers is based on:\n https://www.tensorflow.org/versions/master/tutorials/mnist/beginners/index.html\n\n Args:\n class_count: Integer of how many categories of things we're trying to\n recognize.\n final_tensor_name: Name string for the new final node that produces results.\n bottleneck_tensor: The output of the main CNN graph.\n bottleneck_tensor_size: How many entries in the bottleneck vector.\n quantize_layer: Boolean, specifying whether the newly added layer should be\n quantized.\n\n Returns:\n The tensors for the training and cross entropy results, and tensors for the\n bottleneck input and ground truth input.\n \"\"\"\n with tf.name_scope('input'):\n bottleneck_input = tf.placeholder_with_default(\n bottleneck_tensor,\n shape=[None, bottleneck_tensor_size],\n name='BottleneckInputPlaceholder')\n\n ground_truth_input = tf.placeholder(\n tf.int64, [None], name='GroundTruthInput')\n\n # Organizing the following ops as `final_training_ops` so they're easier\n # to see in TensorBoard\n layer_name = 'final_training_ops'\n with tf.name_scope(layer_name):\n with tf.name_scope('weights'):\n initial_value = tf.truncated_normal(\n [bottleneck_tensor_size, class_count], stddev=0.001)\n layer_weights = tf.Variable(initial_value, name='final_weights')\n if quantize_layer:\n quantized_layer_weights = quant_ops.MovingAvgQuantize(\n layer_weights, is_training=True)\n variable_summaries(quantized_layer_weights)\n\n variable_summaries(layer_weights)\n with tf.name_scope('biases'):\n layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases')\n if quantize_layer:\n quantized_layer_biases = quant_ops.MovingAvgQuantize(\n layer_biases, is_training=True)\n variable_summaries(quantized_layer_biases)\n\n variable_summaries(layer_biases)\n\n with tf.name_scope('Wx_plus_b'):\n if quantize_layer:\n logits = tf.matmul(bottleneck_input,\n quantized_layer_weights) + quantized_layer_biases\n logits = quant_ops.MovingAvgQuantize(\n logits,\n init_min=-32.0,\n init_max=32.0,\n is_training=True,\n num_bits=8,\n narrow_range=False,\n ema_decay=0.5)\n tf.summary.histogram('pre_activations', logits)\n else:\n logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases\n tf.summary.histogram('pre_activations', logits)\n\n final_tensor = tf.nn.softmax(logits, name=final_tensor_name)\n\n tf.summary.histogram('activations', final_tensor)\n\n with tf.name_scope('cross_entropy'):\n cross_entropy_mean = tf.losses.sparse_softmax_cross_entropy(\n labels=ground_truth_input, logits=logits)\n\n tf.summary.scalar('cross_entropy', cross_entropy_mean)\n\n with tf.name_scope('train'):\n optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)\n train_step = optimizer.minimize(cross_entropy_mean)\n\n return (train_step, cross_entropy_mean, bottleneck_input, ground_truth_input,\n final_tensor)\n\n\ndef add_evaluation_step(result_tensor, ground_truth_tensor):\n \"\"\"Inserts the operations we need to evaluate the accuracy of our results.\n\n Args:\n result_tensor: The new final node that produces results.\n ground_truth_tensor: The node we feed ground truth data\n into.\n\n Returns:\n Tuple of (evaluation step, prediction).\n \"\"\"\n with tf.name_scope('accuracy'):\n with tf.name_scope('correct_prediction'):\n prediction = tf.argmax(result_tensor, 1)\n correct_prediction = tf.equal(prediction, ground_truth_tensor)\n with tf.name_scope('accuracy'):\n evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n tf.summary.scalar('accuracy', evaluation_step)\n return evaluation_step, prediction\n\n\ndef save_graph_to_file(sess, graph, graph_file_name):\n output_graph_def = graph_util.convert_variables_to_constants(\n sess, graph.as_graph_def(), [FLAGS.final_tensor_name])\n\n with gfile.FastGFile(graph_file_name, 'wb') as f:\n f.write(output_graph_def.SerializeToString())\n return\n\n\ndef prepare_file_system():\n # Setup the directory we'll write summaries to for TensorBoard\n if tf.gfile.Exists(FLAGS.summaries_dir):\n tf.gfile.DeleteRecursively(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(FLAGS.summaries_dir)\n if FLAGS.intermediate_store_frequency > 0:\n ensure_dir_exists(FLAGS.intermediate_output_graphs_dir)\n return\n\n\ndef create_model_info(architecture):\n \"\"\"Given the name of a model architecture, returns information about it.\n\n There are different base image recognition pretrained models that can be\n retrained using transfer learning, and this function translates from the name\n of a model to the attributes that are needed to download and train with it.\n\n Args:\n architecture: Name of a model architecture.\n\n Returns:\n Dictionary of information about the model, or None if the name isn't\n recognized\n\n Raises:\n ValueError: If architecture name is unknown.\n \"\"\"\n architecture = architecture.lower()\n is_quantized = False\n if architecture == 'inception_v3':\n # pylint: disable=line-too-long\n data_url = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'\n # pylint: enable=line-too-long\n bottleneck_tensor_name = 'pool_3/_reshape:0'\n bottleneck_tensor_size = 2048\n input_width = 299\n input_height = 299\n input_depth = 3\n resized_input_tensor_name = 'Mul:0'\n model_file_name = 'classify_image_graph_def.pb'\n input_mean = 128\n input_std = 128\n elif architecture.startswith('mobilenet_'):\n parts = architecture.split('_')\n if len(parts) != 3 and len(parts) != 4:\n tf.logging.error(\"Couldn't understand architecture name '%s'\",\n architecture)\n return None\n version_string = parts[1]\n if (version_string != '1.0' and version_string != '0.75' and\n version_string != '0.50' and version_string != '0.25'):\n tf.logging.error(\n \"\"\"\"The Mobilenet version should be '1.0', '0.75', '0.50', or '0.25',\n but found '%s' for architecture '%s'\"\"\",\n version_string, architecture)\n return None\n size_string = parts[2]\n if (size_string != '224' and size_string != '192' and\n size_string != '160' and size_string != '128'):\n tf.logging.error(\n \"\"\"The Mobilenet input size should be '224', '192', '160', or '128',\n but found '%s' for architecture '%s'\"\"\",\n size_string, architecture)\n return None\n if len(parts) == 3:\n is_quantized = False\n else:\n if parts[3] != 'quantized':\n tf.logging.error(\n \"Couldn't understand architecture suffix '%s' for '%s'\", parts[3],\n architecture)\n return None\n is_quantized = True\n\n if is_quantized:\n data_url = 'http://download.tensorflow.org/models/mobilenet_v1_'\n data_url += version_string + '_' + size_string + '_quantized_frozen.tgz'\n bottleneck_tensor_name = 'MobilenetV1/Predictions/Reshape:0'\n resized_input_tensor_name = 'Placeholder:0'\n model_dir_name = ('mobilenet_v1_' + version_string + '_' + size_string +\n '_quantized_frozen')\n model_base_name = 'quantized_frozen_graph.pb'\n\n else:\n data_url = 'http://download.tensorflow.org/models/mobilenet_v1_'\n data_url += version_string + '_' + size_string + '_frozen.tgz'\n bottleneck_tensor_name = 'MobilenetV1/Predictions/Reshape:0'\n resized_input_tensor_name = 'input:0'\n model_dir_name = 'mobilenet_v1_' + version_string + '_' + size_string\n model_base_name = 'frozen_graph.pb'\n\n bottleneck_tensor_size = 1001\n input_width = int(size_string)\n input_height = int(size_string)\n input_depth = 3\n model_file_name = os.path.join(model_dir_name, model_base_name)\n input_mean = 127.5\n input_std = 127.5\n else:\n tf.logging.error(\"Couldn't understand architecture name '%s'\", architecture)\n raise ValueError('Unknown architecture', architecture)\n\n return {\n 'data_url': data_url,\n 'bottleneck_tensor_name': bottleneck_tensor_name,\n 'bottleneck_tensor_size': bottleneck_tensor_size,\n 'input_width': input_width,\n 'input_height': input_height,\n 'input_depth': input_depth,\n 'resized_input_tensor_name': resized_input_tensor_name,\n 'model_file_name': model_file_name,\n 'input_mean': input_mean,\n 'input_std': input_std,\n 'quantize_layer': is_quantized,\n }\n\n\ndef add_jpeg_decoding(input_width, input_height, input_depth, input_mean,\n input_std):\n \"\"\"Adds operations that perform JPEG decoding and resizing to the graph..\n\n Args:\n input_width: Desired width of the image fed into the recognizer graph.\n input_height: Desired width of the image fed into the recognizer graph.\n input_depth: Desired channels of the image fed into the recognizer graph.\n input_mean: Pixel value that should be zero in the image for the graph.\n input_std: How much to divide the pixel values by before recognition.\n\n Returns:\n Tensors for the node to feed JPEG data into, and the output of the\n preprocessing steps.\n \"\"\"\n jpeg_data = tf.placeholder(tf.string, name='DecodeJPGInput')\n decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)\n decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)\n decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)\n resize_shape = tf.stack([input_height, input_width])\n resize_shape_as_int = tf.cast(resize_shape, dtype=tf.int32)\n resized_image = tf.image.resize_bilinear(decoded_image_4d,\n resize_shape_as_int)\n offset_image = tf.subtract(resized_image, input_mean)\n mul_image = tf.multiply(offset_image, 1.0 / input_std)\n return jpeg_data, mul_image\n\n\ndef main(_):\n # Needed to make sure the logging output is visible.\n # See https://github.com/tensorflow/tensorflow/issues/3047\n tf.logging.set_verbosity(tf.logging.INFO)\n\n # Prepare necessary directories that can be used during training\n prepare_file_system()\n\n # Gather information about the model architecture we'll be using.\n model_info = create_model_info(FLAGS.architecture)\n if not model_info:\n tf.logging.error('Did not recognize architecture flag')\n return -1\n\n # Set up the pre-trained graph.\n maybe_download_and_extract(model_info['data_url'])\n graph, bottleneck_tensor, resized_image_tensor = (\n create_model_graph(model_info))\n\n # Look at the folder structure, and create lists of all the images.\n image_lists = create_image_lists(FLAGS.image_dir, FLAGS.testing_percentage,\n FLAGS.validation_percentage)\n class_count = len(image_lists.keys())\n if class_count == 0:\n tf.logging.error('No valid folders of images found at ' + FLAGS.image_dir)\n return -1\n if class_count == 1:\n tf.logging.error('Only one valid folder of images found at ' +\n FLAGS.image_dir +\n ' - multiple classes are needed for classification.')\n return -1\n\n # See if the command-line flags mean we're applying any distortions.\n do_distort_images = should_distort_images(\n FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,\n FLAGS.random_brightness)\n\n with tf.Session(graph=graph) as sess:\n # Set up the image decoding sub-graph.\n jpeg_data_tensor, decoded_image_tensor = add_jpeg_decoding(\n model_info['input_width'], model_info['input_height'],\n model_info['input_depth'], model_info['input_mean'],\n model_info['input_std'])\n\n if do_distort_images:\n # We will be applying distortions, so setup the operations we'll need.\n (distorted_jpeg_data_tensor,\n distorted_image_tensor) = add_input_distortions(\n FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,\n FLAGS.random_brightness, model_info['input_width'],\n model_info['input_height'], model_info['input_depth'],\n model_info['input_mean'], model_info['input_std'])\n else:\n # We'll make sure we've calculated the 'bottleneck' image summaries and\n # cached them on disk.\n cache_bottlenecks(sess, image_lists, FLAGS.image_dir,\n FLAGS.bottleneck_dir, jpeg_data_tensor,\n decoded_image_tensor, resized_image_tensor,\n bottleneck_tensor, FLAGS.architecture)\n\n # Add the new layer that we'll be training.\n (train_step, cross_entropy, bottleneck_input, ground_truth_input,\n final_tensor) = add_final_training_ops(\n len(image_lists.keys()), FLAGS.final_tensor_name, bottleneck_tensor,\n model_info['bottleneck_tensor_size'], model_info['quantize_layer'])\n\n # Create the operations we need to evaluate the accuracy of our new layer.\n evaluation_step, prediction = add_evaluation_step(\n final_tensor, ground_truth_input)\n\n # Merge all the summaries and write them out to the summaries_dir\n merged = tf.summary.merge_all()\n train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',\n sess.graph)\n\n validation_writer = tf.summary.FileWriter(\n FLAGS.summaries_dir + '/validation')\n\n # Set up all our weights to their initial default values.\n init = tf.global_variables_initializer()\n sess.run(init)\n\n # Run the training for as many cycles as requested on the command line.\n for i in range(FLAGS.how_many_training_steps):\n # Get a batch of input bottleneck values, either calculated fresh every\n # time with distortions applied, or from the cache stored on disk.\n if do_distort_images:\n (train_bottlenecks,\n train_ground_truth) = get_random_distorted_bottlenecks(\n sess, image_lists, FLAGS.train_batch_size, 'training',\n FLAGS.image_dir, distorted_jpeg_data_tensor,\n distorted_image_tensor, resized_image_tensor, bottleneck_tensor)\n else:\n (train_bottlenecks,\n train_ground_truth, _) = get_random_cached_bottlenecks(\n sess, image_lists, FLAGS.train_batch_size, 'training',\n FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,\n decoded_image_tensor, resized_image_tensor, bottleneck_tensor,\n FLAGS.architecture)\n # Feed the bottlenecks and ground truth into the graph, and run a training\n # step. Capture training summaries for TensorBoard with the `merged` op.\n train_summary, _ = sess.run(\n [merged, train_step],\n feed_dict={bottleneck_input: train_bottlenecks,\n ground_truth_input: train_ground_truth})\n train_writer.add_summary(train_summary, i)\n\n # Every so often, print out how well the graph is training.\n is_last_step = (i + 1 == FLAGS.how_many_training_steps)\n if (i % FLAGS.eval_step_interval) == 0 or is_last_step:\n train_accuracy, cross_entropy_value = sess.run(\n [evaluation_step, cross_entropy],\n feed_dict={bottleneck_input: train_bottlenecks,\n ground_truth_input: train_ground_truth})\n tf.logging.info('%s: Step %d: Train accuracy = %.1f%%' %\n (datetime.now(), i, train_accuracy * 100))\n tf.logging.info('%s: Step %d: Cross entropy = %f' %\n (datetime.now(), i, cross_entropy_value))\n validation_bottlenecks, validation_ground_truth, _ = (\n get_random_cached_bottlenecks(\n sess, image_lists, FLAGS.validation_batch_size, 'validation',\n FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,\n decoded_image_tensor, resized_image_tensor, bottleneck_tensor,\n FLAGS.architecture))\n # Run a validation step and capture training summaries for TensorBoard\n # with the `merged` op.\n validation_summary, validation_accuracy = sess.run(\n [merged, evaluation_step],\n feed_dict={bottleneck_input: validation_bottlenecks,\n ground_truth_input: validation_ground_truth})\n validation_writer.add_summary(validation_summary, i)\n tf.logging.info('%s: Step %d: Validation accuracy = %.1f%% (N=%d)' %\n (datetime.now(), i, validation_accuracy * 100,\n len(validation_bottlenecks)))\n\n # Store intermediate results\n intermediate_frequency = FLAGS.intermediate_store_frequency\n\n if (intermediate_frequency > 0 and (i % intermediate_frequency == 0)\n and i > 0):\n intermediate_file_name = (FLAGS.intermediate_output_graphs_dir +\n 'intermediate_' + str(i) + '.pb')\n tf.logging.info('Save intermediate result to : ' +\n intermediate_file_name)\n save_graph_to_file(sess, graph, intermediate_file_name)\n\n # We've completed all our training, so run a final test evaluation on\n # some new images we haven't used before.\n test_bottlenecks, test_ground_truth, test_filenames = (\n get_random_cached_bottlenecks(\n sess, image_lists, FLAGS.test_batch_size, 'testing',\n FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,\n decoded_image_tensor, resized_image_tensor, bottleneck_tensor,\n FLAGS.architecture))\n test_accuracy, predictions = sess.run(\n [evaluation_step, prediction],\n feed_dict={bottleneck_input: test_bottlenecks,\n ground_truth_input: test_ground_truth})\n tf.logging.info('Final test accuracy = %.1f%% (N=%d)' %\n (test_accuracy * 100, len(test_bottlenecks)))\n\n if FLAGS.print_misclassified_test_images:\n tf.logging.info('=== MISCLASSIFIED TEST IMAGES ===')\n for i, test_filename in enumerate(test_filenames):\n if predictions[i] != test_ground_truth[i]:\n tf.logging.info('%70s %s' %\n (test_filename,\n list(image_lists.keys())[predictions[i]]))\n\n # Write out the trained graph and labels with the weights stored as\n # constants.\n save_graph_to_file(sess, graph, FLAGS.output_graph)\n with gfile.FastGFile(FLAGS.output_labels, 'w') as f:\n f.write('\\n'.join(image_lists.keys()) + '\\n')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--image_dir',\n type=str,\n default='./train',\n help='Path to folders of labeled images.'\n )\n parser.add_argument(\n '--output_graph',\n type=str,\n default='./tmp/output_graph.pb',\n help='Where to save the trained graph.'\n )\n parser.add_argument(\n '--intermediate_output_graphs_dir',\n type=str,\n default='/tmp/intermediate_graph/',\n help='Where to save the intermediate graphs.'\n )\n parser.add_argument(\n '--intermediate_store_frequency',\n type=int,\n default=0,\n help=\"\"\"\\\n How many steps to store intermediate graph. If \"0\" then will not\n store.\\\n \"\"\"\n )\n parser.add_argument(\n '--output_labels',\n type=str,\n default='./tmp/output_labels.txt',\n help='Where to save the trained graph\\'s labels.'\n )\n parser.add_argument(\n '--summaries_dir',\n type=str,\n default='./tmp/retrain_logs',\n help='Where to save summary logs for TensorBoard.'\n )\n parser.add_argument(\n '--how_many_training_steps',\n type=int,\n default=200,\n help='How many training steps to run before ending.'\n )\n parser.add_argument(\n '--learning_rate',\n type=float,\n default=0.01,\n help='How large a learning rate to use when training.'\n )\n parser.add_argument(\n '--testing_percentage',\n type=int,\n default=10,\n help='What percentage of images to use as a test set.'\n )\n parser.add_argument(\n '--validation_percentage',\n type=int,\n default=10,\n help='What percentage of images to use as a validation set.'\n )\n parser.add_argument(\n '--eval_step_interval',\n type=int,\n default=10,\n help='How often to evaluate the training results.'\n )\n parser.add_argument(\n '--train_batch_size',\n type=int,\n default=100,\n help='How many images to train on at a time.'\n )\n parser.add_argument(\n '--test_batch_size',\n type=int,\n default=-1,\n help=\"\"\"\\\n How many images to test on. This test set is only used once, to evaluate\n the final accuracy of the model after training completes.\n A value of -1 causes the entire test set to be used, which leads to more\n stable results across runs.\\\n \"\"\"\n )\n parser.add_argument(\n '--validation_batch_size',\n type=int,\n default=100,\n help=\"\"\"\\\n How many images to use in an evaluation batch. This validation set is\n used much more often than the test set, and is an early indicator of how\n accurate the model is during training.\n A value of -1 causes the entire validation set to be used, which leads to\n more stable results across training iterations, but may be slower on large\n training sets.\\\n \"\"\"\n )\n parser.add_argument(\n '--print_misclassified_test_images',\n default=False,\n help=\"\"\"\\\n Whether to print out a list of all misclassified test images.\\\n \"\"\",\n action='store_true'\n )\n parser.add_argument(\n '--model_dir',\n type=str,\n default='/tmp/imagenet',\n help=\"\"\"\\\n Path to classify_image_graph_def.pb,\n imagenet_synset_to_human_label_map.txt, and\n imagenet_2012_challenge_label_map_proto.pbtxt.\\\n \"\"\"\n )\n parser.add_argument(\n '--bottleneck_dir',\n type=str,\n default='/tmp/bottleneck',\n help='Path to cache bottleneck layer values as files.'\n )\n parser.add_argument(\n '--final_tensor_name',\n type=str,\n default='final_result',\n help=\"\"\"\\\n The name of the output classification layer in the retrained graph.\\\n \"\"\"\n )\n parser.add_argument(\n '--flip_left_right',\n default=False,\n help=\"\"\"\\\n Whether to randomly flip half of the training images horizontally.\\\n \"\"\",\n action='store_true'\n )\n parser.add_argument(\n '--random_crop',\n type=int,\n default=0,\n help=\"\"\"\\\n A percentage determining how much of a margin to randomly crop off the\n training images.\\\n \"\"\"\n )\n parser.add_argument(\n '--random_scale',\n type=int,\n default=0,\n help=\"\"\"\\\n A percentage determining how much to randomly scale up the size of the\n training images by.\\\n \"\"\"\n )\n parser.add_argument(\n '--random_brightness',\n type=int,\n default=0,\n help=\"\"\"\\\n A percentage determining how much to randomly multiply the training image\n input pixels up or down by.\\\n \"\"\"\n )\n parser.add_argument(\n '--architecture',\n type=str,\n default='inception_v3',\n help=\"\"\"\\\n Which model architecture to use. 'inception_v3' is the most accurate, but\n also the slowest. For faster or smaller models, chose a MobileNet with the\n form 'mobilenet_<parameter size>_<input_size>[_quantized]'. For example,\n 'mobilenet_1.0_224' will pick a model that is 17 MB in size and takes 224\n pixel input images, while 'mobilenet_0.25_128_quantized' will choose a much\n less accurate, but smaller and faster network that's 920 KB on disk and\n takes 128x128 images. See https://research.googleblog.com/2017/06/mobilenets-open-source-models-for.html\n for more information on Mobilenet.\\\n \"\"\")\n FLAGS, unparsed = parser.parse_known_args()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n" ]
[ [ "tensorflow.image.resize_bilinear", "tensorflow.reduce_min", "tensorflow.image.random_flip_left_right", "tensorflow.matmul", "tensorflow.import_graph_def", "tensorflow.stack", "tensorflow.python.platform.gfile.Exists", "tensorflow.nn.softmax", "tensorflow.random_crop", "tensorflow.global_variables_initializer", "tensorflow.logging.warning", "tensorflow.image.decode_jpeg", "tensorflow.cast", "tensorflow.train.GradientDescentOptimizer", "tensorflow.subtract", "tensorflow.summary.histogram", "tensorflow.python.platform.gfile.FastGFile", "tensorflow.Variable", "tensorflow.logging.info", "tensorflow.argmax", "tensorflow.constant", "tensorflow.gfile.MakeDirs", "tensorflow.squeeze", "tensorflow.gfile.DeleteRecursively", "tensorflow.contrib.quantize.python.quant_ops.MovingAvgQuantize", "tensorflow.app.run", "tensorflow.python.platform.gfile.Walk", "tensorflow.logging.set_verbosity", "tensorflow.python.framework.tensor_shape.scalar", "tensorflow.zeros", "tensorflow.expand_dims", "tensorflow.summary.scalar", "tensorflow.logging.error", "tensorflow.gfile.Exists", "tensorflow.Session", "tensorflow.GraphDef", "tensorflow.truncated_normal", "tensorflow.python.platform.gfile.Glob", "tensorflow.placeholder", "tensorflow.name_scope", "tensorflow.summary.merge_all", "numpy.squeeze", "tensorflow.placeholder_with_default", "tensorflow.multiply", "tensorflow.Graph", "tensorflow.equal", "tensorflow.reduce_max", "tensorflow.losses.sparse_softmax_cross_entropy", "tensorflow.logging.fatal", "tensorflow.summary.FileWriter", "tensorflow.reduce_mean", "tensorflow.square", "tensorflow.python.util.compat.as_bytes" ] ]
Tereshchenkolab/paper-ecg
[ "e51d4b4dc09db39f27cc9674729c8932f994daab" ]
[ "src/main/python/Conversion.py" ]
[ "from pathlib import Path\n\nimport numpy as np\nfrom numpy.lib.arraysetops import isin\n\nimport ecgdigitize\nimport ecgdigitize.signal\nimport ecgdigitize.image\nfrom ecgdigitize import common, visualization\nfrom ecgdigitize.image import ColorImage, Rectangle\n\nfrom model.InputParameters import InputParameters\n\n\ndef convertECGLeads(inputImage: ColorImage, parameters: InputParameters):\n # Apply rotation\n rotatedImage = ecgdigitize.image.rotated(inputImage, parameters.rotation)\n\n # Crop each lead\n leadImages = {\n leadId: ecgdigitize.image.cropped(rotatedImage, Rectangle(lead.x, lead.y, lead.width, lead.height))\n for leadId, lead in parameters.leads.items()\n }\n\n extractSignal = ecgdigitize.digitizeSignal\n extractGrid = ecgdigitize.digitizeGrid\n\n # Map all lead images to signal data\n signals = {\n leadId: extractSignal(leadImage)\n for leadId, leadImage in leadImages.items()\n }\n\n # If all signals failed -> Failure\n if all([isinstance(signal, common.Failure) for _, signal in signals.items()]):\n return None, None\n\n previews = {\n leadId: visualization.overlaySignalOnImage(signal, image)\n for (leadId, image), (_, signal) in zip(leadImages.items(), signals.items())\n }\n\n # Map leads to grid size estimates\n gridSpacings = {\n leadId: extractGrid(leadImage)\n for leadId, leadImage in leadImages.items()\n }\n # Just got successful spacings\n spacings = [spacing for spacing in gridSpacings.values() if not isinstance(spacing, common.Failure)]\n\n if len(spacings) == 0:\n return None, None\n\n samplingPeriodInPixels = gridHeightInPixels = common.mean(spacings)\n\n # Scale signals\n # TODO: Pass in the grid size in mm\n scaledSignals = {\n leadId: ecgdigitize.signal.verticallyScaleECGSignal(\n ecgdigitize.signal.zeroECGSignal(signal),\n gridHeightInPixels,\n parameters.voltScale, gridSizeInMillimeters=1.0\n )\n for leadId, signal in signals.items()\n }\n\n # TODO: Pass in the grid size in mm\n samplingPeriod = ecgdigitize.signal.ecgSignalSamplingPeriod(samplingPeriodInPixels, parameters.timeScale, gridSizeInMillimeters=1.0)\n\n # 3. Zero pad all signals on the left based on their start times and the samplingPeriod\n # take the max([len(x) for x in signals]) and zero pad all signals on the right\n paddedSignals = {\n leadId: common.padLeft(signal, int(parameters.leads[leadId].startTime / samplingPeriod))\n for leadId, signal in scaledSignals.items()\n }\n\n # (should already be handled by (3)) Replace any None signals with all zeros\n maxLength = max([len(s) for _, s in paddedSignals.items()])\n fullSignals = {\n leadId: common.padRight(signal, maxLength - len(signal))\n for leadId, signal in paddedSignals.items()\n }\n\n return fullSignals, previews\n\n\ndef exportSignals(leadSignals, filePath, separator='\\t'):\n \"\"\"Exports a dict of lead signals to file\n\n Args:\n leadSignals (Dict[str -> np.ndarray]): Dict mapping lead id's to np array of signal data (output from convertECGLeads)\n \"\"\"\n leads = common.zipDict(leadSignals)\n leads.sort(key=lambda pair: pair[0].value)\n\n assert len(leads) >= 1\n lengthOfFirst = len(leads[0][1])\n\n assert all([len(signal) == lengthOfFirst for key, signal in leads])\n\n collated = np.array([signal for _, signal in leads])\n output = np.swapaxes(collated, 0, 1)\n\n if not issubclass(type(filePath), Path):\n filePath = Path(filePath)\n\n if filePath.exists():\n print(\"Warning: Output file will be overwritten!\")\n\n outputLines = [\n separator.join(\n [str(val) for val in row]\n ) + \"\\n\"\n for row in output\n ]\n\n with open(filePath, 'w') as outputFile:\n outputFile.writelines(outputLines)\n" ]
[ [ "numpy.array", "numpy.swapaxes" ] ]
ishitamed19/few-shot-meta-baseline
[ "9b6e87e4fd52a7e6745d2a87f9399dddc13aaad4" ]
[ "models/classifier.py" ]
[ "import math\n\nimport torch\nimport torch.nn as nn\n\nimport models\nimport utils\nfrom .models import register\n\n\n@register('classifier')\nclass Classifier(nn.Module):\n \n def __init__(self, encoder, encoder_args,\n classifier, classifier_args):\n super().__init__()\n self.encoder = models.make(encoder, **encoder_args)\n classifier_args['in_dim'] = self.encoder.out_dim\n self.classifier = models.make(classifier, **classifier_args)\n\n def forward(self, x):\n x = self.encoder(x)\n x = self.classifier(x)\n return x\n\n\n@register('linear-classifier')\nclass LinearClassifier(nn.Module):\n\n def __init__(self, in_dim, n_classes):\n super().__init__()\n self.linear = nn.Linear(in_dim, n_classes)\n\n def forward(self, x):\n return self.linear(x)\n\n\n@register('nn-classifier')\nclass NNClassifier(nn.Module):\n\n def __init__(self, in_dim, n_classes, metric='cos', temp=None):\n super().__init__()\n self.proto = nn.Parameter(torch.empty(n_classes, in_dim))\n nn.init.kaiming_uniform_(self.proto, a=math.sqrt(5))\n if temp is None:\n if metric == 'cos':\n temp = nn.Parameter(torch.tensor(10.))\n else:\n temp = 1.0\n self.metric = metric\n self.temp = temp\n\n def forward(self, x):\n return utils.compute_logits(x, self.proto, self.metric, self.temp)\n\n" ]
[ [ "torch.nn.Linear", "torch.empty", "torch.tensor" ] ]
tor4z/DBS
[ "1232cda2fc93d1a3b5c3c08dbef68f4d157f7b5f" ]
[ "resnet.py" ]
[ "import torch\nimport torch.nn as nn\n\n\ndef conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,\n base_width=64, dilation=1, norm_layer=None):\n super(BasicBlock, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n if groups != 1 or base_width != 64:\n raise ValueError('BasicBlock only supports groups=1 and base_width=64')\n if dilation > 1:\n raise NotImplementedError(\"Dilation > 1 not supported in BasicBlock\")\n # Both self.conv1 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = norm_layer(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = norm_layer(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,\n base_width=64, dilation=1, norm_layer=None):\n super(Bottleneck, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n width = int(planes * (base_width / 64.)) * groups\n # Both self.conv2 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv1x1(inplanes, width)\n self.bn1 = norm_layer(width)\n self.conv2 = conv3x3(width, width, stride, groups, dilation)\n self.bn2 = norm_layer(width)\n self.conv3 = conv1x1(width, planes * self.expansion)\n self.bn3 = norm_layer(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,\n groups=1, width_per_group=64, replace_stride_with_dilation=None,\n norm_layer=None):\n super(ResNet, self).__init__()\n self.expansion = block.expansion\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n self._norm_layer = norm_layer\n\n self.inplanes = 64\n self.dilation = 1\n if replace_stride_with_dilation is None:\n # each element in the tuple indicates if we should replace\n # the 2x2 stride with a dilated convolution instead\n replace_stride_with_dilation = [False, False, False]\n if len(replace_stride_with_dilation) != 3:\n raise ValueError(\"replace_stride_with_dilation should be None \"\n \"or a 3-element tuple, got {}\".format(replace_stride_with_dilation))\n self.groups = groups\n self.base_width = width_per_group\n self.conv1 = nn.Conv2d(1, self.inplanes, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = norm_layer(self.inplanes)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2,\n dilate=replace_stride_with_dilation[0])\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2,\n dilate=replace_stride_with_dilation[1])\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2,\n dilate=replace_stride_with_dilation[2])\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n # Zero-initialize the last BN in each residual branch,\n # so that the residual branch starts with zeros, and each residual block behaves like an identity.\n # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677\n if zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1, dilate=False):\n norm_layer = self._norm_layer\n downsample = None\n previous_dilation = self.dilation\n if dilate:\n self.dilation *= stride\n stride = 1\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n norm_layer(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, self.groups,\n self.base_width, previous_dilation, norm_layer))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes, groups=self.groups,\n base_width=self.base_width, dilation=self.dilation,\n norm_layer=norm_layer))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n return x\n\ndef _resnet(arch, block, layers, **kwargs):\n model = ResNet(block, layers, **kwargs)\n return model\n\n\ndef resnet18(**kwargs):\n r\"\"\"ResNet-18 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], **kwargs)\n\n\ndef resnet34(**kwargs):\n r\"\"\"ResNet-34 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], **kwargs)\n\n\ndef resnet50(**kwargs):\n r\"\"\"ResNet-50 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], **kwargs)\n\n\ndef resnet101(**kwargs):\n r\"\"\"ResNet-101 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], **kwargs)\n\n\ndef resnet152(**kwargs):\n r\"\"\"ResNet-152 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], **kwargs)\n\n\ndef resnext50_32x4d(**kwargs):\n r\"\"\"ResNeXt-50 32x4d model from\n `\"Aggregated Residual Transformation for Deep Neural Networks\" <https://arxiv.org/pdf/1611.05431.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n kwargs['groups'] = 32\n kwargs['width_per_group'] = 4\n return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], **kwargs)\n\n\ndef resnext101_32x8d(**kwargs):\n r\"\"\"ResNeXt-101 32x8d model from\n `\"Aggregated Residual Transformation for Deep Neural Networks\" <https://arxiv.org/pdf/1611.05431.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n kwargs['groups'] = 32\n kwargs['width_per_group'] = 8\n return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], **kwargs)\n\n\ndef wide_resnet50_2(**kwargs):\n r\"\"\"Wide ResNet-50-2 model from\n `\"Wide Residual Networks\" <https://arxiv.org/pdf/1605.07146.pdf>`_\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n kwargs['width_per_group'] = 64 * 2\n return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3], **kwargs)\n\n\ndef wide_resnet101_2(**kwargs):\n r\"\"\"Wide ResNet-101-2 model from\n `\"Wide Residual Networks\" <https://arxiv.org/pdf/1605.07146.pdf>`_\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n kwargs['width_per_group'] = 64 * 2\n return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3], **kwargs)\n\n\nresnets = {\n 'resnet18': resnet18,\n 'resnet34': resnet34,\n 'resnet50': resnet50,\n 'resnet101': resnet101,\n 'resnet152': resnet152,\n 'resnext50_32x4d': resnext50_32x4d,\n 'resnext101_32x8d': resnext101_32x8d\n}\n\n\ndef resnet(opt):\n resnet = resnets[opt.backbone]\n kwargs = {'num_classes': opt.num_classes}\n return resnet(**kwargs)\n" ]
[ [ "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.nn.Sequential", "torch.nn.init.constant_", "torch.nn.init.kaiming_normal_", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.AdaptiveAvgPool2d" ] ]
neverix/book-covers
[ "c9fed069f05100f2ea7cd794453fb24fa010bb7a" ]
[ "train.py" ]
[ "from model import data, model\nfrom pathlib import Path\nimport tensorflow as tf\nfrom tensorflow.keras.models import load_model\n\n\nsave_path = Path(\"model.h5\")\n\n\nif __name__ == '__main__':\n train, val = data()\n if not save_path.exists():\n model = model()\n model.compile(\"adam\", \"categorical_crossentropy\", metrics=[\"acc\"])\n initial_epoch = 1\n else:\n model = load_model(save_path)\n initial_epoch = 101\n checkpoint = tf.keras.callbacks.ModelCheckpoint(save_path, monitor='val_acc', verbose=1, save_best_only=True, mode='max')\n model.fit(train, validation_data=val, epochs=200, callbacks=[checkpoint], initial_epoch=initial_epoch)\n" ]
[ [ "tensorflow.keras.models.load_model", "tensorflow.keras.callbacks.ModelCheckpoint" ] ]
patricknharris/IEX-ELK-TopsTransform
[ "a4e898dacf27b4002b35b5fd7584ac857eb8dc56" ]
[ "iex-elk.py" ]
[ "# monolithic simple module to convert IEX json to ELK json format\n# IEX Attribution: \n# https://iextrading.com/trading/market-data/\n# in middle of this module within transform function\n# simple convervsion to DF - DataFrame\n# TBD DataFrame Normalization and Naive Correlation functions TBD\n# Within ELK this enables seeing stocks within same context -1,0,+1 std. deviations\n#!/usr/bin/env python\n\nimport json\nimport pandas as pd\nimport pycurl\nimport requests\nimport sys\nfrom io import BytesIO\n\n# global variables for now\n#debug = 1 # if 1 then print to stdout \ndebug = 0 # if 0 then no print\n\ndef usage(nargs):\n # of if not at least 2 then exit\n print(nargs)\n print(sys.argv)\n print('usage: ')\n print('python iex-elk.py ELK-index_name')\n print('or usage: python iex-elk.py ELK-index_name src_file')\n print('or usage: python iex-elk.py ELK-index_name src_file dest_file')\n quit()\n\ndef init_context():\n\n # use command line args\n # since python doesn't have a switch statement and nargs is unique use if's\n nargs=len(sys.argv)\n if nargs == 1:\n # of if not at least 2 then exit\n usage(nargs)\n return_tuple = (\"blank\", \"blank\", \"blank\")\n return return_tuple\n\n if nargs == 2:\n # of if 2 then get from curl\n index_name = sys.argv[1] ## index name for ELK\n lines = get_tops_curl_lines()\n\n # return lines\n return_tuple = (lines,index_name,\"blank_dest_file\")\n x, y, z = return_tuple\n if debug == 1:\n print(x)\n print(y)\n print(z)\n\n return return_tuple\n\n if nargs == 3:\n # if 3 then get from saved tops src_file obtained from curl\n # of if not at least 2 then exit\n index_name = sys.argv[1] ## index name for ELK\n src_file = sys.argv[2] ## get from IEX tops endpoint\n if debug == 1:\n print(\"Src_file=\" + src_file)\n\n lines = get_tops_file_lines(src_file)\n\n # return lines \n return_tuple = (lines, index_name, \"blank_dest_file\")\n return return_tuple\n \n if nargs == 4:\n # if 4 then get from saved tops src_file obtained from curl\n index_name = sys.argv[1] ## index name for ELK\n src_file = sys.argv[2] ## get from IEX tops file \n dest_file = sys.argv[3] ## get from IEX tops endpoint\n\n if debug == 1:\n print(\"Src_file=\" + src_file)\n print(\"Dest_file=\" + dest_file)\n\n lines = get_tops_file_lines(src_file)\n\n # return lines and dest_file\n return_tuple = (lines, index_name, dest_file)\n return return_tuple\n \n # TBD extend to streaming api or zmq topic\n if nargs >= 4:\n # of if at greater than 4 then exit\n usage(nargs);\n return_tuple = (\"blank\", \"blank\", \"blank\")\n return return_tuple\n\n return_tuple = (\"blank\", \"blank\", \"blank\")\n return return_tuple\n \n\n\n\n# curl https://api.iextrading.com/1.0/tops\ndef get_tops_curl_lines():\n buffer = BytesIO()\n cg = pycurl.Curl()\n cg.setopt(cg.URL, 'https://api.iextrading.com/1.0/tops')\n cg.setopt(cg.WRITEDATA, buffer)\n cg.perform()\n\n lines = buffer.getvalue()\n if debug == 1:\n print(lines)\n cg.close() # close connection\n return lines\n\ndef get_tops_file_lines(src_file):\n with open(src_file) as open_file:\n lines = open_file.readlines()\n if debug == 1:\n print(lines)\n return lines\n\n\n# below is command line ; makes assumption that previous curl from IEX Tops endpoint in tops.json\n# curl -H 'Content-Type: application/x-ndjson' -XPOST 'localhost:9200/stocks1/_bulk?pretty' --data-binary @ptop1.json \ndef post_tops_elk(lines,index_name):\n\n # specify URL for ELK localhost\n _url = 'http://localhost:9200/' + index_name + '/_bulk?pretty'\n _headers = {'Content-Type':'application/x-ndjson'}\n\n\n # POST\n try:\n r = requests.post(_url, headers=_headers, data=lines)\n\n # Print Response and Code\n if debug == 1:\n print(r.text)\n print(r.status_code)\n\n except requests.exceptions.Timeout:\n print(\"elasticsearch not running(?) error\")\n print(\"Attempted to post to url:\", _url)\n quit()\n\n # Maybe set up for a retry, or continue in a retry loop\n except requests.exceptions.TooManyRedirects:\n print(\"elasticsearch not running(?) error\")\n print(\"Attempted to post to url:\", _url)\n quit()\n\n # Tell the user their URL was bad and try a different one\n except requests.exceptions.RequestException as e:\n print(\"elasticsearch not running(?) error\")\n print(\"Attempted to post to url:\", _url)\n quit()\n\n\n\n# Transform IEX json format to required ELK json bulk api format\ndef transform(lines, index_name):\n\n\n if debug == 1:\n print(type(lines)) # debug to validate that response is correct type\n\n if isinstance(lines, list):\n if debug == 1:\n print(\"isList\")\n\n a_string = ' '.join(lines)\n my_json = a_string\n #my_json = a_string.decode('utf8').replace(\"'\", '\"')\n else:\n if debug == 1:\n print(\"NOT LIST\")\n my_json = lines\n\n if debug == 1:\n print(my_json)\n print('- ' * 20)\n\n # Load the JSON to a Python list & dump it back out as formatted JSON\n data = json.loads(my_json)\n\n # could add formatting options to generate pretty print\n # don't do that for this application\n #s = json.dumps(data, indent=4, sort_keys=True)\n\n s = json.dumps(data)\n if debug == 1:\n print(\"hello json data string\")\n print(s)\n print(\"hello json data string\")\n\n # convert to DataFrame as side effect, of transform \n # to be used later to normalize values\n # and then add to ELK index stream\n df = pd.DataFrame.from_dict(data)\n\n # Debug lines, if needed for validation\n if debug == 1:\n print(\"DF****\")\n print(df)\n print(\"****DF\")\n\n # take out right bracket [\n s1 = s.replace('[', '') \n if debug == 1:\n print(\"hello take out left bracket\")\n print(s1)\n print(\"hello take out left bracket\")\n \n # take out left bracket [ and add a newline; newline required for post to ELK\n s2 = s1.replace(']', '\\n')\n if debug == 1:\n print(\"hello take out RIGHT bracket\")\n print(s2)\n print(\"hello take out RIGHT bracket\")\n \n # Replace index name from arg line into metadata \n metadata = '\\n{\"index\": {\"_index\":\"' + index_name + '\"}}\\n{'\n \n # insert metadata , metadata definition contains a newline\n s3 = s2.replace('{', metadata)\n if debug == 1:\n print(\"hello insert meta\")\n print(s3)\n print(\"hello insert meta\")\n \n # take out spaces ; minify\n s4 = s3.replace(' ', '')\n if debug == 1:\n print(\"hello remove spaces\")\n print(s4)\n print(\"hello remove spaces\")\n\n # remove comma at end of lines\n s5 = s4.replace('},', '}')\n if debug == 1:\n print(\"hello remove comma\")\n print(s5)\n print(\"hello remove comma\")\n\n # reassign transform to global lines\n lines = s5\n return lines\n\n\n# bulk api will fail if there is no newline at end\n# you can use curl to push file into elk\n# see URL\n# https://kb.objectrocket.com/elasticsearch/how-to-bulk-import-into-elasticsearch-using-curl\ndef post_tops_file(lines,dest_file):\n if debug == 1:\n print(\"hello write out file\")\n print(lines)\n print(\"hello write out file\")\n print(dest_file)\n\n with open(dest_file, 'w') as f:\n for each_line in lines:\n f.writelines(each_line)\n f.write('\\n')\n\ndef main():\n # get args, tuple contains lines and either real or blank dest_file name\n lines_tuple = init_context()\n\n (lines, index_name, dest_file) = lines_tuple\n if debug == 1:\n print(index_name)\n print(dest_file)\n\n # issue IEX get tops endpoint\n # get_tops() is called inside init_context() now \n # depending on nargs it either gets from curl IEX endpoint or is blank\n\n # transform from IEX json to ELK bulk api json format\n nlines = transform(lines,index_name)\n\n # POST transformed json to ELK localhost \n if dest_file == \"blank_dest_file\":\n post_tops_elk(nlines,index_name)\n\n if dest_file != \"blank_dest_file\":\n post_tops_file(nlines,dest_file)\n\n# \n# __name__\nif __name__ == \"__main__\":\n main()\n\n" ]
[ [ "pandas.DataFrame.from_dict" ] ]
godber/ginga
[ "acb32ed422aa604681c63c5a9494ffb0ad96cf2e" ]
[ "ginga/BaseImage.py" ]
[ "#\n# BaseImage.py -- Abstraction of an generic data image.\n#\n# Eric Jeschke ([email protected]) \n#\n# Copyright (c) Eric R. Jeschke. All rights reserved.\n# This is open-source software licensed under a BSD license.\n# Please see the file LICENSE.txt for details.\n#\nimport math\nimport numpy\nimport logging\n\nfrom ginga.misc import Bunch, Callback\nfrom ginga import trcalc, AutoCuts\nfrom ginga.util.six.moves import map, zip\n\nclass ImageError(Exception):\n pass\n\nclass BaseImage(Callback.Callbacks):\n\n def __init__(self, data_np=None, metadata=None, logger=None):\n\n Callback.Callbacks.__init__(self)\n \n if logger != None:\n self.logger = logger\n else:\n self.logger = logging.Logger('BaseImage')\n if data_np == None:\n data_np = numpy.zeros((1, 1))\n self._data = data_np\n self.metadata = {}\n if metadata:\n self.update_metadata(metadata)\n self.order = ''\n\n self._set_minmax()\n\n self.autocuts = AutoCuts.Histogram(self.logger)\n\n # For callbacks\n for name in ('modified', ):\n self.enable_callback(name)\n\n @property\n def shape(self):\n return self._get_data().shape\n\n @property\n def width(self):\n # NOTE: numpy stores data in column-major layout\n return self.shape[1]\n\n @property\n def height(self):\n # NOTE: numpy stores data in column-major layout\n return self.shape[0]\n\n @property\n def depth(self):\n return self.get_depth()\n\n @property\n def ndim(self):\n return len(self.shape)\n\n def get_size(self):\n return (self.width, self.height)\n \n def get_depth(self):\n shape = self.shape\n if len(shape) > 2:\n return shape[2]\n return 1\n \n def get_shape(self):\n return self.shape\n\n def get_center(self):\n wd, ht = self.get_size()\n ctr_x, ctr_y = wd // 2, ht // 2\n return (ctr_x, ctr_y)\n\n def get_data(self):\n return self._data\n \n def _get_data(self):\n return self._data\n\n def _get_fast_data(self):\n \"\"\"\n Return an array similar to but possibly smaller than self._data,\n for fast calculation of the intensity distribution\n \"\"\"\n return self._data\n\n def copy_data(self):\n data = self._get_data()\n return data.copy()\n \n def get_data_xy(self, x, y):\n assert (x >= 0) and (y >= 0), \\\n ImageError(\"Indexes out of range: (x=%d, y=%d)\" % (\n x, y))\n view = numpy.s_[y, x]\n return self._slice(view)\n\n def _get_dims(self, data):\n height, width = data.shape[:2]\n return (width, height)\n\n def get_metadata(self):\n return self.metadata.copy()\n \n def get_header(self):\n return self.get('header', Header())\n \n def get(self, kwd, *args):\n if kwd in self.metadata:\n return self.metadata[kwd]\n else:\n # return a default if there is one\n if len(args) > 0:\n return args[0]\n raise KeyError(kwd)\n \n def get_list(self, *args):\n return list(map(self.get, args))\n \n def __getitem__(self, kwd):\n return self.metadata[kwd]\n \n def update(self, kwds):\n self.metadata.update(kwds)\n \n def set(self, **kwds):\n self.update(kwds)\n \n def __setitem__(self, kwd, value):\n self.metadata[kwd] = value\n \n def set_data(self, data_np, metadata=None, astype=None):\n \"\"\"Use this method to SHARE (not copy) the incoming array.\n \"\"\"\n if astype:\n data = data_np.astype(astype)\n else:\n data = data_np\n self._data = data\n\n if metadata:\n self.update_metadata(metadata)\n \n self._set_minmax()\n\n self.make_callback('modified')\n\n def get_slice(self, c):\n view = [slice(None)] * self.ndim\n view[-1] = self.order.index(c.upper())\n return self._slice(view)\n\n def has_slice(self, c):\n return c.upper() in self.order\n\n def get_array(self, order):\n order = order.upper()\n if order == self.order:\n return self._get_data()\n l = [self.get_slice(c) for c in order]\n return numpy.dstack(l)\n\n def set_order(self, order):\n self.order = order.upper()\n \n def get_order(self):\n return self.order\n \n def get_order_indexes(self, cs):\n cs = cs.upper()\n return [ self.order.index(c) for c in cs ]\n \n def _set_minmax(self):\n data = self._get_fast_data()\n try:\n self.maxval = numpy.nanmax(data)\n self.minval = numpy.nanmin(data)\n except Exception:\n self.maxval = 0\n self.minval = 0\n\n # TODO: see if there is a faster way to ignore infinity\n try:\n if numpy.isfinite(self.maxval):\n self.maxval_noinf = self.maxval\n else:\n self.maxval_noinf = numpy.nanmax(data[numpy.isfinite(data)])\n except:\n self.maxval_noinf = self.maxval\n \n try:\n if numpy.isfinite(self.minval):\n self.minval_noinf = self.minval\n else:\n self.minval_noinf = numpy.nanmin(data[numpy.isfinite(data)])\n except:\n self.minval_noinf = self.minval\n \n def get_minmax(self, noinf=False):\n if not noinf:\n return (self.minval, self.maxval)\n else:\n return (self.minval_noinf, self.maxval_noinf)\n\n def update_metadata(self, keyDict):\n for key, val in keyDict.items():\n self.metadata[key] = val\n\n def transfer(self, other, astype=None):\n data = self._get_data()\n other.set_data(data, metadata=self.metadata, astype=astype)\n \n def copy(self, astype=None):\n data = self.copy_data()\n metadata = self.get_metadata()\n other = self.__class__(data_np=data, metadata=metadata)\n return other\n\n def _slice(self, view):\n return self._get_data()[view]\n\n def cutout_data(self, x1, y1, x2, y2, xstep=1, ystep=1, astype=None):\n \"\"\"cut out data area based on coords. \n \"\"\"\n view = numpy.s_[y1:y2:ystep, x1:x2:xstep]\n data = self._slice(view)\n if astype:\n data = data.astype(astype)\n return data\n \n def cutout_adjust(self, x1, y1, x2, y2, xstep=1, ystep=1, astype=None):\n dx = x2 - x1\n dy = y2 - y1\n \n if x1 < 0:\n x1, x2 = 0, dx\n else:\n if x2 >= self.width:\n x2 = self.width\n x1 = x2 - dx\n \n if y1 < 0:\n y1, y2 = 0, dy\n else:\n if y2 >= self.height:\n y2 = self.height\n y1 = y2 - dy\n\n data = self.cutout_data(x1, y1, x2, y2, xstep=xstep, ystep=ystep,\n astype=astype)\n return (data, x1, y1, x2, y2)\n\n def cutout_radius(self, x, y, radius, xstep=1, ystep=1, astype=None):\n return self.cutout_adjust(x-radius, y-radius,\n x+radius+1, y+radius+1,\n xstep=xstep, ystep=ystep,\n astype=astype)\n\n def cutout_cross(self, x, y, radius):\n \"\"\"Cut two data subarrays that have a center at (x, y) and with\n radius (radius) from (image). Returns the starting pixel (x0, y0)\n of each cut and the respective arrays (xarr, yarr).\n \"\"\"\n n = radius\n wd, ht = self.get_size()\n x0, x1 = max(0, x - n), min(wd - 1, x + n)\n y0, y1 = max(0, y - n), min(ht - 1, y + n)\n\n xview = numpy.s_[y, x0:x1 + 1]\n yview = numpy.s_[y0:y1 + 1, x]\n\n xarr = self._slice(xview)\n yarr = self._slice(yview)\n\n return (x0, y0, xarr, yarr)\n\n def get_scaled_cutout_wdht(self, x1, y1, x2, y2, new_wd, new_ht):\n\n shp = self.shape\n\n (view, (scale_x, scale_y)) = \\\n trcalc.get_scaled_cutout_wdht_view(shp, x1, y1, x2, y2,\n new_wd, new_ht)\n newdata = self._slice(view)\n\n res = Bunch.Bunch(data=newdata, scale_x=scale_x, scale_y=scale_y)\n return res\n\n def get_scaled_cutout_basic(self, x1, y1, x2, y2, scale_x, scale_y):\n new_wd = int(round(scale_x * (x2 - x1 + 1)))\n new_ht = int(round(scale_y * (y2 - y1 + 1)))\n return self.get_scaled_cutout_wdht(x1, y1, x2, y2, new_wd, new_ht)\n\n def get_scaled_cutout_by_dims(self, x1, y1, x2, y2, dst_wd, dst_ht,\n method='basic'):\n if method == 'basic':\n return self.get_scaled_cutout_wdht(x1, y1, x2, y2, dst_wd, dst_ht)\n\n raise ImageError(\"Method not supported: '%s'\" % (method))\n \n def get_scaled_cutout(self, x1, y1, x2, y2, scale_x, scale_y,\n method='basic'):\n if method == 'basic':\n return self.get_scaled_cutout_basic(x1, y1, x2, y2,\n scale_x, scale_y)\n\n raise ImageError(\"Method not supported: '%s'\" % (method))\n\n \n def get_pixels_on_line(self, x1, y1, x2, y2, getvalues=True):\n \"\"\"Uses Bresenham's line algorithm to enumerate the pixels along\n a line.\n (see http://en.wikipedia.org/wiki/Bresenham%27s_line_algorithm)\n \"\"\"\n dx = abs(x2 - x1)\n dy = abs(y2 - y1) \n if x1 < x2:\n sx = 1\n else:\n sx = -1\n if y1 < y2:\n sy = 1\n else:\n sy = -1\n err = dx - dy\n\n res = []\n x, y = x1, y1\n while True:\n if getvalues:\n try:\n val = self.get_data_xy(x, y)\n except Exception:\n val = numpy.NaN\n res.append(val)\n else:\n res.append((x, y))\n if (x == x2) and (y == y2):\n break\n e2 = 2 * err\n if e2 > -dy:\n err = err - dy\n x += sx\n if e2 < dx: \n err = err + dx\n y += sy\n\n return res\n\n def histogram(self, x1, y1, x2, y2, z=None, pct=1.0, numbins=2048):\n data = self._get_fast_data()\n if z != None:\n data = data[y1:y2, x1:x2, z]\n else:\n data = data[y1:y2, x1:x2]\n\n return self.autocuts.calc_histogram(data, pct=pct, numbins=numbins)\n\n def cut_levels(self, loval, hival, vmin=0.0, vmax=255.0):\n data = self._get_data()\n data = self.autocuts.cut_levels(data, loval, hival,\n vmin=vmin, vmax=vmax)\n self.set_data(data)\n\n def transform(self, flip_x=False, flip_y=False, swap_xy=False):\n data = self._get_data()\n\n data = trcalc.transform(data, flip_x=flip_x, flip_y=flip_y,\n swap_xy=swap_xy)\n self.set_data(data)\n \n def rotate(self, rot_deg):\n data = self._get_data()\n wd, ht = self._get_dims(data)\n # TODO: allow off-center rotations\n ocx, ocy = wd // 2, ht // 2\n\n # If there is no rotation, then we are done\n if rot_deg == 0.0:\n return\n\n # Make a square from the scaled cutout, with room to rotate\n side = int(math.sqrt(wd**2 + ht**2))\n new_wd = new_ht = side\n dims = (new_ht, new_wd) + data.shape[2:]\n # TODO: fill with a different value?\n newdata = numpy.zeros(dims)\n # Find center of new data array \n ncx, ncy = new_wd // 2, new_ht // 2\n\n # Overlay the old image on the new (blank) image\n ldx, rdx = min(ocx, ncx), min(wd - ocx, ncx)\n bdy, tdy = min(ocy, ncy), min(ht - ocy, ncy)\n\n newdata[ncy-bdy:ncy+tdy, ncx-ldx:ncx+rdx] = \\\n data[ocy-bdy:ocy+tdy, ocx-ldx:ocx+rdx]\n\n data = newdata\n wd, ht = self._get_dims(data)\n\n # Rotate the image as necessary\n rotctr_x, rotctr_y = wd // 2, ht // 2\n \n if rot_deg != 0:\n yi, xi = numpy.mgrid[0:ht, 0:wd]\n xi = xi - rotctr_x\n yi = yi - rotctr_y\n cos_t = numpy.cos(numpy.radians(-rot_deg))\n sin_t = numpy.sin(numpy.radians(-rot_deg))\n ap = (xi * cos_t) - (yi * sin_t) + rotctr_x\n bp = (xi * sin_t) + (yi * cos_t) + rotctr_y\n ## ap = numpy.rint(ap).astype('int').clip(0, wd-1)\n ## bp = numpy.rint(bp).astype('int').clip(0, ht-1)\n ap = ap.astype('int').clip(0, wd-1)\n bp = bp.astype('int').clip(0, ht-1)\n newdata = data[bp, ap]\n new_wd, new_ht = self._get_dims(newdata)\n\n assert (wd == new_wd) and (ht == new_ht), \\\n ImageError(\"rotated cutout is %dx%d original=%dx%d\" % (\n new_wd, new_ht, wd, ht))\n wd, ht, data = new_wd, new_ht, newdata\n \n self.set_data(data)\n\n def info_xy(self, data_x, data_y, settings):\n # Get the value under the data coordinates\n try:\n value = self.get_data_xy(int(data_x), int(data_y))\n\n except Exception as e:\n value = None\n\n info = Bunch.Bunch(itype='base', data_x=data_x, data_y=data_y,\n x=data_x, y=data_y,\n value=value)\n return info\n\n\nclass Header(dict):\n\n def __init__(self, *args, **kwdargs):\n super(Header, self).__init__(*args, **kwdargs)\n self.keyorder = []\n\n def __getitem__(self, key):\n bnch = super(Header, self).__getitem__(key)\n return bnch.value\n\n def __setitem__(self, key, value):\n try:\n bnch = super(Header, self).__getitem__(key)\n bnch.value = value\n except KeyError:\n bnch = Bunch.Bunch(key=key, value=value, comment='')\n self.keyorder.append(key)\n super(Header, self).__setitem__(key, bnch)\n return bnch\n\n def __delitem__(self, key):\n super(Header, self).__delitem__(key)\n self.keyorder.remove(key)\n\n def get_card(self, key):\n bnch = super(Header, self).__getitem__(key)\n return bnch\n \n def get_keyorder(self):\n return self.keyorder\n \n def keys(self):\n return self.keyorder\n \n def items(self):\n return [(key, self[key]) for key in self.keys()]\n \n def get(self, key, alt=None):\n try:\n return self.__getitem__(key)\n except KeyError:\n return alt\n\n def update(self, mapKind):\n for key, value in mapKind.items():\n self.__setitem__(key, value)\n \n def asdict(self):\n return dict([(key, self[key]) for key in self.keys()])\n\n#END\n" ]
[ [ "numpy.zeros", "numpy.nanmin", "numpy.radians", "numpy.isfinite", "numpy.dstack", "numpy.nanmax" ] ]
pskrunner14/neural-networks
[ "83d3b41ad4773ee375b8ef9bed736d7e3f2bf334" ]
[ "nn/loss/categorical_crossentropy.py" ]
[ "import numpy as np\nnp.random.seed(42)\n\nfrom .loss import Loss\n\nclass CategoricalCrossentropy(Loss):\n\n def __init__(self):\n super().__init__()\n\n def forward(self, probs, targets):\n pass\n\n def backward(self):\n pass\n\n# def softmax_crossentropy_with_logits(logits, targets):\n# \"\"\"Compute crossentropy from logits[batch,n_classes] and ids of correct answers\"\"\"\n# m = targets.shape[0]\n# p = stable_softmax(logits)\n# log_likelihood = -np.log(p[range(m), targets])\n# loss = np.sum(log_likelihood) / m\n# return loss\n\n# def grad_softmax_crossentropy_with_logits(logits, targets):\n# grad_softmax = grad(softmax_crossentropy_with_logits)\n# return grad_softmax(logits, targets)" ]
[ [ "numpy.random.seed" ] ]
ymei/MimosaBMRDO
[ "e73ce698376a7d38ed17486de217397e66bbc864" ]
[ "Software/util/mainpy.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 20 2017\nThis the main python script for datataking and processing\n@author: Dong Wang\n\"\"\"\nfrom ethernet_t import *\nfrom dataprocess_t import *\nfrom command import *\nfrom i2c_control import *\nimport sys\nimport os\nimport shlex\nimport socket\nimport time\nimport select\nfrom threading import Thread\nfrom multiprocessing import Process, Manager, Lock\nfrom multiprocessing import Value, Array\nfrom multiprocessing import Queue\nimport numpy\nfrom matplotlib import pyplot\nimport matplotlib as mpl\nimport matplotlib.animation as animation\n\nclass Disper():\n def __init__(self):\n self._running = True\n\n def terminate(self):\n self._running = False\n\n def run(self, q_pro_dis, lock):\n while self._running :\n # -- display processing\n fig = pyplot.figure()\n maps = numpy.zeros(shape=(928,960))\n cmap = mpl.colors.ListedColormap(['white','red'])\n bounds=[0,0.5,1]\n norm = mpl.colors.BoundaryNorm(bounds, cmap.N)\n img = pyplot.imshow(maps,interpolation='nearest',cmap = cmap,norm=norm)\n pyplot.colorbar(img,cmap=cmap,norm=norm,boundaries=bounds,ticks=[0,0.5,1])\n def update(*args) :\n global q_pro_dis\n with lock :\n check = not q_pro_dis.empty()\n if check :\n with lock :\n data_dis = q_pro_dis.get()\n img.set_data(data_dis)\n return img\n anim = animation.FuncAnimation(fig, update, interval=100)\n pyplot.show()\n\n\n\n# -- define fifo --\n# -- fifo between ethernet readout to dataprocessing unit\nq_the_pro = Queue()\n# -- fifo between dataprocessing unit to display unit\nq_pro_dis = Queue()\n\n# -- frame counter\nfcount=Value('d', 0)\n\n# -- ethernet processing thread\nlock = Lock()\nsign = Value('d', 1)\ns = socket.socket()\nhost = '192.168.2.3'\nport = 1024\ns.connect((host,port))\n\n# -- MAPS board initial\ncmd = Cmd()\n# -- set threshold\ni2c_ltc2635_thre_vchip(s,cmd,0x5ff)\ni2c_ltc2635_thre_vmim(s,cmd,0x5ff)\ntime.sleep(0.3)\n# -- reset latchup\ni2c_pcf8574_reset_latchup(s,cmd)\ni2c_pcf8574_read_latchup(s,cmd)\ntime.sleep(1)\n\n# write to JTAG\nret = cmd.cmd_write_register(3,0x4)\ns.sendall(ret)\n#ret = cmd.cmd_write_memory_file(\"/home/wangdong/mapstest/JTAG_files/S1_L2.dat\")\nret = cmd.cmd_write_memory_file(\"S1_L2.dat\")\ns.sendall(ret)\ntime.sleep(0.2)\nret = cmd.cmd_send_pulse(0x8)\ns.sendall(ret)\ntime.sleep(0.5)\n\n# -- disp data take processing\ndisper = Disper()\nt_disper = Process(target=disper.run, args=(q_pro_dis,lock))\nt_disper.start()\n# -- data processing Thread\ndataprocesser = Dataprocess()\nt_dataprocesser = Process(target=dataprocesser.run, args=(q_the_pro,q_pro_dis,fcount,lock))\nt_dataprocesser.start()\n# -- start ethernet thread\nsender = SendWorker()\nrecver = RecvWorker()\nt_sender = Process(target=sender.run, args=(s,lock,sign))\nt_recver = Process(target=recver.run, args=(s,lock,sign,q_the_pro))\nt_recver.start()\nt_sender.start()\n\n# for i in range(600):\n# #while True :\n# time.sleep(0.1)\n# if (not q_pro_dis.empty()) :\n# maps = q_pro_dis.get()\n# # img = pyplot.imshow(maps,interpolation='nearest',cmap = cmap,norm=norm)\n# # pyplot.pause(0.1)\n\n# -- Thread ending --\ntime.sleep(30)\nif t_dataprocesser.is_alive():\n dataprocesser.terminate()\n t_dataprocesser.join()\nif t_sender.is_alive():\n sender.terminate()\n time.sleep(1)\n t_sender.join()\ntime.sleep(4)\nif t_recver.is_alive():\n recver.terminate()\n time.sleep(1)\n t_recver.join()\nif t_disper.is_alive():\n disper.terminate()\n t_disper.join()\ns.close()\n" ]
[ [ "matplotlib.pyplot.colorbar", "matplotlib.animation.FuncAnimation", "numpy.zeros", "matplotlib.pyplot.figure", "matplotlib.colors.BoundaryNorm", "matplotlib.pyplot.show", "matplotlib.colors.ListedColormap", "matplotlib.pyplot.imshow" ] ]
Priyansh-Kedia/drl-RPN-tf-TACC-
[ "9ecb3750e833a2908edb4eb67dc5233889f7b7df" ]
[ "lib/datasets/imdb.py" ]
[ "# --------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick and Xinlei Chen\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport os.path as osp\nimport PIL\nfrom utils.cython_bbox import bbox_overlaps\nimport numpy as np\nimport scipy.sparse\nfrom model.config import cfg\n\n\nclass imdb(object):\n \"\"\"Image database.\"\"\"\n\n def __init__(self, name, classes=None):\n self._name = name\n self._num_classes = 0\n if not classes:\n self._classes = []\n else:\n self._classes = classes\n self._image_index = []\n self._obj_proposer = 'gt'\n self._roidb = None\n self._roidb_handler = self.default_roidb\n # Use this dict for storing dataset specific config options\n self.config = {}\n\n @property\n def name(self):\n return self._name\n\n @property\n def num_classes(self):\n return len(self._classes)\n\n @property\n def classes(self):\n return self._classes\n\n @property\n def image_index(self):\n return self._image_index\n\n @property\n def roidb_handler(self):\n return self._roidb_handler\n\n @roidb_handler.setter\n def roidb_handler(self, val):\n self._roidb_handler = val\n\n def set_proposal_method(self, method):\n method = eval('self.' + method + '_roidb')\n self.roidb_handler = method\n\n @property\n def roidb(self):\n # A roidb is a list of dictionaries, each with the following keys:\n # boxes\n # gt_overlaps\n # gt_classes\n # flipped\n if self._roidb is not None:\n return self._roidb\n self._roidb = self.roidb_handler()\n return self._roidb\n\n @property\n def cache_path(self):\n cache_path = osp.abspath(osp.join(cfg.DATA_DIR, 'cache'))\n if not os.path.exists(cache_path):\n os.makedirs(cache_path)\n return cache_path\n\n @property\n def num_images(self):\n return len(self.image_index)\n\n def image_path_at(self, i):\n raise NotImplementedError\n\n def default_roidb(self):\n raise NotImplementedError\n\n def evaluate_detections(self, all_boxes, output_dir=None, start_idx=None,\n end_idx=None):\n \"\"\"\n all_boxes is a list of length number-of-classes.\n Each list element is a list of length number-of-images.\n Each of those list elements is either an empty list []\n or a numpy array of detection.\n\n all_boxes[class][image] = [] or np.array of shape #dets x 5\n \"\"\"\n raise NotImplementedError\n\n def _get_widths(self):\n print(\"path is\", [self.image_path_at(i) for i in range(self.num_images)])\n return [PIL.Image.open(self.image_path_at(i)).size[0]\n for i in range(self.num_images)]\n\n def append_flipped_images(self):\n num_images = self.num_images\n widths = self._get_widths()\n for i in range(num_images):\n boxes = self.roidb[i]['boxes'].copy()\n oldx1 = boxes[:, 0].copy()\n oldx2 = boxes[:, 2].copy()\n boxes[:, 0] = widths[i] - oldx2 - 1\n boxes[:, 2] = widths[i] - oldx1 - 1\n assert (boxes[:, 2] >= boxes[:, 0]).all()\n entry = {'boxes': boxes,\n 'gt_overlaps': self.roidb[i]['gt_overlaps'],\n 'gt_classes': self.roidb[i]['gt_classes'],\n 'flipped': True}\n self.roidb.append(entry)\n self._image_index = self._image_index * 2\n\n def evaluate_recall(self, candidate_boxes=None, thresholds=None,\n area='all', limit=None):\n \"\"\"Evaluate detection proposal recall metrics.\n\n Returns:\n results: dictionary of results with keys\n 'ar': average recall\n 'recalls': vector recalls at each IoU overlap threshold\n 'thresholds': vector of IoU overlap thresholds\n 'gt_overlaps': vector of all ground-truth overlaps\n \"\"\"\n # Record max overlap value for each gt box\n # Return vector of overlap values\n areas = {'all': 0, 'small': 1, 'medium': 2, 'large': 3,\n '96-128': 4, '128-256': 5, '256-512': 6, '512-inf': 7}\n area_ranges = [[0 ** 2, 1e5 ** 2], # all\n [0 ** 2, 32 ** 2], # small\n [32 ** 2, 96 ** 2], # medium\n [96 ** 2, 1e5 ** 2], # large\n [96 ** 2, 128 ** 2], # 96-128\n [128 ** 2, 256 ** 2], # 128-256\n [256 ** 2, 512 ** 2], # 256-512\n [512 ** 2, 1e5 ** 2], # 512-inf\n ]\n assert area in areas, 'unknown area range: {}'.format(area)\n area_range = area_ranges[areas[area]]\n gt_overlaps = np.zeros(0)\n num_pos = 0\n for i in range(self.num_images):\n # Checking for max_overlaps == 1 avoids including crowd annotations\n # (...pretty hacking :/)\n max_gt_overlaps = self.roidb[i]['gt_overlaps'].toarray().max(axis=1)\n gt_inds = np.where((self.roidb[i]['gt_classes'] > 0) &\n (max_gt_overlaps == 1))[0]\n gt_boxes = self.roidb[i]['boxes'][gt_inds, :]\n gt_areas = self.roidb[i]['seg_areas'][gt_inds]\n valid_gt_inds = np.where((gt_areas >= area_range[0]) &\n (gt_areas <= area_range[1]))[0]\n gt_boxes = gt_boxes[valid_gt_inds, :]\n num_pos += len(valid_gt_inds)\n\n if candidate_boxes is None:\n # If candidate_boxes is not supplied, the default is to use the\n # non-ground-truth boxes from this roidb\n non_gt_inds = np.where(self.roidb[i]['gt_classes'] == 0)[0]\n boxes = self.roidb[i]['boxes'][non_gt_inds, :]\n else:\n boxes = candidate_boxes[i]\n if boxes.shape[0] == 0:\n continue\n if limit is not None and boxes.shape[0] > limit:\n boxes = boxes[:limit, :]\n\n overlaps = bbox_overlaps(boxes.astype(np.float),\n gt_boxes.astype(np.float))\n\n _gt_overlaps = np.zeros((gt_boxes.shape[0]))\n for j in range(gt_boxes.shape[0]):\n # find which proposal box maximally covers each gt box\n argmax_overlaps = overlaps.argmax(axis=0)\n # and get the iou amount of coverage for each gt box\n max_overlaps = overlaps.max(axis=0)\n # find which gt box is 'best' covered (i.e. 'best' = most iou)\n gt_ind = max_overlaps.argmax()\n gt_ovr = max_overlaps.max()\n assert (gt_ovr >= 0)\n # find the proposal box that covers the best covered gt box\n box_ind = argmax_overlaps[gt_ind]\n # record the iou coverage of this gt box\n _gt_overlaps[j] = overlaps[box_ind, gt_ind]\n assert (_gt_overlaps[j] == gt_ovr)\n # mark the proposal box and the gt box as used\n overlaps[box_ind, :] = -1\n overlaps[:, gt_ind] = -1\n # append recorded iou coverage level\n gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps))\n\n gt_overlaps = np.sort(gt_overlaps)\n if thresholds is None:\n step = 0.05\n thresholds = np.arange(0.5, 0.95 + 1e-5, step)\n recalls = np.zeros_like(thresholds)\n # compute recall for each iou threshold\n for i, t in enumerate(thresholds):\n recalls[i] = (gt_overlaps >= t).sum() / float(num_pos)\n # ar = 2 * np.trapz(recalls, thresholds)\n ar = recalls.mean()\n return {'ar': ar, 'recalls': recalls, 'thresholds': thresholds,\n 'gt_overlaps': gt_overlaps}\n\n def create_roidb_from_box_list(self, box_list, gt_roidb):\n assert len(box_list) == self.num_images, \\\n 'Number of boxes must match number of ground-truth images'\n roidb = []\n for i in range(self.num_images):\n boxes = box_list[i]\n num_boxes = boxes.shape[0]\n overlaps = np.zeros((num_boxes, self.num_classes), dtype=np.float32)\n\n if gt_roidb is not None and gt_roidb[i]['boxes'].size > 0:\n gt_boxes = gt_roidb[i]['boxes']\n gt_classes = gt_roidb[i]['gt_classes']\n gt_overlaps = bbox_overlaps(boxes.astype(np.float),\n gt_boxes.astype(np.float))\n argmaxes = gt_overlaps.argmax(axis=1)\n maxes = gt_overlaps.max(axis=1)\n I = np.where(maxes > 0)[0]\n overlaps[I, gt_classes[argmaxes[I]]] = maxes[I]\n\n overlaps = scipy.sparse.csr_matrix(overlaps)\n roidb.append({\n 'boxes': boxes,\n 'gt_classes': np.zeros((num_boxes,), dtype=np.int32),\n 'gt_overlaps': overlaps,\n 'flipped': False,\n 'seg_areas': np.zeros((num_boxes,), dtype=np.float32),\n })\n return roidb\n\n @staticmethod\n def merge_roidbs(a, b):\n assert len(a) == len(b)\n for i in range(len(a)):\n a[i]['boxes'] = np.vstack((a[i]['boxes'], b[i]['boxes']))\n a[i]['gt_classes'] = np.hstack((a[i]['gt_classes'],\n b[i]['gt_classes']))\n a[i]['gt_overlaps'] = scipy.sparse.vstack([a[i]['gt_overlaps'],\n b[i]['gt_overlaps']])\n a[i]['seg_areas'] = np.hstack((a[i]['seg_areas'],\n b[i]['seg_areas']))\n return a\n\n def competition_mode(self, on):\n \"\"\"Turn competition mode on or off.\"\"\"\n pass\n" ]
[ [ "numpy.zeros_like", "numpy.zeros", "numpy.where", "numpy.arange", "numpy.sort", "numpy.hstack", "numpy.vstack" ] ]
tf2gan/vae-gan-code-for-reinforced-panel
[ "b4d407b999551ad5fd965cd2bb18c1e5bbba3449" ]
[ "vae_tfrecord_batch.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun May 9 19:08:30 2021\r\n\r\n@author: Administrator\r\n\"\"\"\r\nimport glob\r\nimport os\r\nimport tensorflow as tf\r\nfrom PIL import Image\r\nfrom math import floor\r\n# data_path='/data-input/pic2000'# windows里好像这个\\\\比较管用,与os相适配\r\ndata_path='I:\\\\zhangkunpeng\\\\bian_Cjin\\\\Generate_Images'# windows里好像这个\\\\比较管用,与os相适配\r\npic_list=glob.glob(os.path.join(data_path,'*.tiff'))\r\n\r\n\r\n\r\n# The following functions can be used to convert a value to a type compatible\r\n# with tf.Example.\r\n\r\ndef _bytes_feature(value):\r\n \"\"\"Returns a bytes_list from a string / byte.\"\"\"\r\n if isinstance(value, type(tf.constant(0))):\r\n value = value.numpy() # BytesList won't unpack a string from an EagerTensor.\r\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\r\n\r\ndef _float_feature(value):\r\n \"\"\"Returns a float_list from a float / double.\"\"\"\r\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))\r\n\r\ndef _int64_feature(value):\r\n \"\"\"Returns an int64_list from a bool / enum / int / uint.\"\"\"\r\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\r\n\r\n\r\nbatch_size = 128\r\n# 当下任务,生成tfrecord批次文件,到时候每个batch直接一个文件放进去\r\nbatch_num = len(pic_list)//batch_size # 这里比较巧正好整除了,不用再加1\r\n# 循环生成batch文件\r\nfor i in range(batch_num):\r\n record_file = 'data/images_batch{}.tfrecords'.format(i)\r\n with tf.io.TFRecordWriter(record_file) as writer:\r\n for path in pic_list[i*batch_size:(i+1)*batch_size]:\r\n # image_string = open(path, 'rb').read() 这直接读好像对tiff格式的图片会出错,InvalidArgumentError: Input to reshape is a tensor with 2360 values, but the requested shape has 12288 [Op:Reshape]\r\n img=Image.open(path,'r')\r\n image_string = img.tobytes()\r\n image_shape = img.size # (64, 64)\r\n feature = {\r\n 'height': _int64_feature(image_shape[0]),\r\n 'width': _int64_feature(image_shape[1]),\r\n 'image_raw': _bytes_feature(image_string),}\r\n tf_example = tf.train.Example(features=tf.train.Features(feature=feature))\r\n writer.write(tf_example.SerializeToString())\r\n writer.close()" ]
[ [ "tensorflow.train.BytesList", "tensorflow.train.FloatList", "tensorflow.train.Int64List", "tensorflow.train.Features", "tensorflow.constant", "tensorflow.io.TFRecordWriter" ] ]
jubick1337/NeMo
[ "9d50733ba0e698b98d0019e9e697686e0a24b90e" ]
[ "scripts/nlp_language_modeling/build_knn_map_index.py" ]
[ "# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis is the script to build KNN index map from Training dataset to Retrieval dataset.\nFor example, it maps chunk_id i from training dataset to K chunk ids in the nearest neighbor in the retrieval dataset.\n\nIt requires the training text data to be converted into `bin` and `idx` files by `preprocess_data_for_megatron.py` script.\nIt also requires the Faiss Index file for the Retrieval dataset built by `build_retrieval_index.py` script.\n\nHere is an example to using it:\n\n```python\npython scripts/nlp_language_modeling/build_knn_map_index.py \\\n --input_file=PATH_TO_INPUT_TRAINING_DATA \\\n --tokenizer-library=sentencepiece \\\n --tokenizer-model=tokenizer.model \\\n --process_chunk_size=51200 \\\n --K_neighbors=16 \\\n --faiss_index=PATH_TO_FAISS_INDEX_FILE \\\n --devices=0,1,2,3 \\\n --batch_size=1280 \\\n --remove_duplicate \\\n --output_file=knn_map.idx \n```\nUse `--remove_duplicate` flag if the data and retrieval dataset are the same. It will remove the neighbors from the same document.\nIt creates a knn_map.idx KNNIndex file.\nDuring training of RETRO model, it can look up the KNN chunk ids of the\nDB dataset given the input training data chunk id. \n\n\"\"\"\nimport argparse\nimport multiprocessing\n\nimport faiss\nimport numpy as np\nfrom numba import njit, prange\nfrom sentence_transformers import SentenceTransformer\n\nfrom nemo.collections.nlp.data.language_modeling.megatron.indexed_retrieval_dataset import (\n KNNIndex,\n MMapRetrievalIndexedDataset,\n)\nfrom nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer\nfrom nemo.utils import logging\n\nQUEUE_SIZE = 30\n\nqueue = multiprocessing.Queue(QUEUE_SIZE)\nemb_queue = multiprocessing.Queue(QUEUE_SIZE)\n\n\n@njit(parallel=True)\ndef build_map(chunk_start, result, total_chunks):\n \"\"\"\n build the map from chunk_id to document id\n \"\"\"\n size = len(chunk_start)\n for i in prange(size):\n beg = chunk_start[i]\n end = chunk_start[i + 1] if i < size - 1 else total_chunks\n result[beg:end] = i\n\n\n@njit(parallel=True)\ndef dedup(chunk_id_to_doc_id_map, I, tmp_neighbors, chunk_id_start):\n \"\"\"\n deduplicate the KNN who are from the same document as the data chunks.\n chunk_id_to_doc_id_map is calculated by build_map function.\n I is original KNN search result from Faiss.\n chunk_id_start is the chunk_id offset.\n\n filtered KNN will be stored in the tmp_neighbors\n \"\"\"\n for cid in prange(len(I)):\n source_doc_id = chunk_id_to_doc_id_map[chunk_id_start + cid]\n position = 0\n for target_chunk_id in I[cid]:\n if chunk_id_start + cid == target_chunk_id:\n continue\n target_doc_id = chunk_id_to_doc_id_map[target_chunk_id]\n if source_doc_id != target_doc_id:\n tmp_neighbors[cid, position] = target_chunk_id\n position += 1\n\n\ndef get_tokenizer(args):\n tokenizer = get_nmt_tokenizer(\n library=args.tokenizer_library,\n model_name=args.tokenizer_type,\n tokenizer_model=args.tokenizer_model,\n vocab_file=args.vocab_file,\n merges_file=args.merge_file,\n delimiter=args.delimiter,\n )\n if not hasattr(tokenizer, \"pad_id\"):\n tokenizer.add_special_tokens({'pad_token': '<pad>'})\n elif hasattr(tokenizer, \"pad_id\") and (tokenizer.pad_id is None or tokenizer.pad_id < 0):\n tokenizer.add_special_tokens({'pad_token': '<pad>'})\n return tokenizer\n\n\ndef process_sentence_chunks(ds: MMapRetrievalIndexedDataset, tokenizer, chunk_size: int):\n total_chunks = ds.chunks\n start = 0\n threshold = 0\n while start < total_chunks:\n if start / total_chunks > threshold:\n logging.info(f\"sentence processing {start / total_chunks} is done\")\n threshold += 0.1\n id_slices = ds.get_chunk(slice(start, min(start + chunk_size, total_chunks)), force_no_cont_ids=True)\n start = min(start + chunk_size, total_chunks)\n sentences = [tokenizer.ids_to_text(ids) for ids in id_slices]\n queue.put(sentences)\n queue.put(None)\n\n\ndef get_sentence_chunks():\n return queue.get()\n\n\ndef calculate_embedding(pool, batch_size):\n while True:\n sentences = get_sentence_chunks()\n if sentences is None:\n break\n emb = model.encode_multi_process(sentences=sentences, pool=pool, batch_size=batch_size)\n emb_queue.put(emb)\n emb_queue.put(None)\n\n\ndef get_emb():\n return emb_queue.get()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"build Faiss index\",)\n parser.add_argument(\n '--input_file', type=str, required=True, help='Input file',\n )\n parser.add_argument(\"--faiss_index\", type=str, required=True, help='faiss index file for retrieval dataset')\n parser.add_argument(\n '--process_chunk_size',\n type=int,\n default=10000,\n help='The sentences in chunks that is queries to build map index',\n )\n parser.add_argument(\n '--remove_duplicate',\n action='store_true',\n help='Remove the knn neighbors that is from the same document as the data.',\n )\n parser.add_argument(\n '--K_neighbors', type=int, default=16, help='The number of neighbors to query',\n )\n parser.add_argument(\n '--dedup_margin',\n type=int,\n default=2,\n help='extra neighbors to fill the spaces of the chunks in the duplicated documents',\n )\n parser.add_argument(\n '--sentence_transformer_model',\n type=str,\n default='bert-base-nli-mean-tokens',\n help='sentence transformer to load',\n )\n parser.add_argument(\n '--output_file', type=str, required=True, help='Output KNN Map index file',\n )\n parser.add_argument(\n '--devices', type=str, default=None, help='delimited list input with cuda devices. Specify like 0,1,2'\n )\n parser.add_argument(\n \"--batch_size\", type=int, default=4000, help=\"Batch size for encoding. Use max according to GPU MEM\"\n )\n group = parser.add_argument_group(title='tokenizer')\n group.add_argument(\n '--tokenizer-library',\n type=str,\n required=True,\n choices=['yttm', 'sentencepiece', 'megatron', 'huggingface', 'tabular'],\n help='What tokenizer library to use.',\n )\n group.add_argument(\n '--tokenizer-type', type=str, default=None, help='What type of tokenizer to use.',\n )\n group.add_argument(\n '--tokenizer-model', type=str, default=None, help='Path to tokenizer model.',\n )\n group.add_argument('--vocab-file', type=str, default=None, help='Path to the vocab file')\n group.add_argument('--merge-file', type=str, default=None, help='Path to the BPE merge file (if necessary).')\n group.add_argument('--delimiter', type=str, default=None, help='delimiter used for tabular tokenizer')\n\n args = parser.parse_args()\n model = SentenceTransformer(args.sentence_transformer_model)\n tokenizer = get_tokenizer(args)\n ds = MMapRetrievalIndexedDataset(args.input_file)\n\n index = faiss.read_index(args.faiss_index)\n\n process = multiprocessing.Process(target=process_sentence_chunks, args=(ds, tokenizer, args.process_chunk_size))\n process.start()\n\n if args.devices is None:\n device_list = None\n else:\n device_list = ['cuda:' + str(device) for device in args.devices.split(',')]\n\n pool = model.start_multi_process_pool(device_list)\n\n emb_process = multiprocessing.Process(target=calculate_embedding, args=(pool, args.batch_size))\n emb_process.start()\n\n if ds._index.retrieval_db and args.remove_duplicate:\n neighbors = args.K_neighbors + args.dedup_margin\n # build the id maps for quick dedup\n id_start = np.array(ds._index._chunk_id_start)\n chunk_id_to_doc_id_map = np.zeros((ds.chunks,), dtype=np.int64)\n build_map(id_start, chunk_id_to_doc_id_map, ds.chunks)\n else:\n neighbors = args.K_neighbors\n\n chunk_id_start = 0\n with KNNIndex.writer(args.output_file, args.K_neighbors) as w:\n while True:\n emb = get_emb()\n if emb is None:\n break\n D, I = index.search(emb, neighbors)\n if ds._index.retrieval_db and args.remove_duplicate:\n tmp_neighbors = np.ones_like(I) * -1\n dedup(chunk_id_to_doc_id_map, I, tmp_neighbors, chunk_id_start)\n I = tmp_neighbors[:, : args.K_neighbors]\n chunk_id_start += len(I)\n w.write(I)\n\n process.join()\n emb_process.join()\n model.stop_multi_process_pool(pool)\n" ]
[ [ "numpy.array", "numpy.ones_like", "numpy.zeros" ] ]
FilthyFrankTheGoanimator/Voice
[ "0bf3570bd6b376c936ea9f04fc15f129e738b168" ]
[ "synthesis/vocoders/hifigan.py" ]
[ "import json\nimport torch\n\nfrom synthesis.vocoders.hifigan_model import Generator\nfrom synthesis.vocoders.vocoder import Vocoder, MAX_WAV_VALUE\n\n\nclass AttrDict(dict):\n \"\"\"\n Credit: https://github.com/jik876/hifi-gan\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(AttrDict, self).__init__(*args, **kwargs)\n self.__dict__ = self\n\n\nclass Hifigan(Vocoder):\n def __init__(self, model_path, config_path):\n with open(config_path) as f:\n data = f.read()\n\n # Use GPU if available\n device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n h = AttrDict(json.loads(data))\n self.model = Generator(h).to(device)\n\n checkpoint_dict = torch.load(model_path, map_location=device)\n self.model.load_state_dict(checkpoint_dict[\"generator\"])\n self.model.eval()\n self.model.remove_weight_norm()\n\n def generate_audio(self, mel_output):\n with torch.no_grad():\n if torch.cuda.is_available():\n mel_output = mel_output.type(torch.cuda.FloatTensor)\n\n y_g_hat = self.model(mel_output)\n audio = y_g_hat.squeeze()\n audio = audio * MAX_WAV_VALUE\n audio = audio.cpu().numpy().astype(\"int16\")\n return audio\n" ]
[ [ "torch.device", "torch.no_grad", "torch.cuda.is_available", "torch.load" ] ]
tusikalanse/Machine-Learning-for-Software-Engineers
[ "59415e2db98ba2a1d97a55560d1c0cb8567b1725" ]
[ "Data-Analysis/Fliters.py" ]
[ "import pandas as pd\n\ndf = pd.DataFrame({\n 'playerID': ['bettsmo01', 'canoro01', 'cruzne02', 'ortizda01', 'bettsmo01'],\n 'yearID': [2016, 2016, 2016, 2016, 2015],\n 'teamID': ['BOS', 'SEA', 'SEA', 'BOS', 'BOS'],\n 'HR': [31, 39, 43, 38, 18]})\n\nprint(df['HR'] > 40)\n# 0 False\n# 1 False\n# 2 True\n# 3 False\n# 4 False\n# Name: HR, dtype: bool\n\nprint(df[df['HR'] > 40])\n# HR playerID teamID yearID\n# 2 43 cruzne02 SEA 2016\n\n##==, !=, <, >, <=, >=, isna, notna, isin\n\nstr_f1 = df['playerID'].str.startswith('c')\nprint(str_f1)\n# 0 False\n# 1 True\n# 2 True\n# 3 False\n# 4 False\n# Name: playerID, dtype: bool\n##endswith, contains" ]
[ [ "pandas.DataFrame" ] ]
wannaphongcom/numfa_server
[ "717ed6cf673f93eb714e6d915d868d0acd30e85e" ]
[ "framework/new/dl2.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport os\nimport numpy as np\nimport pickle\nfrom keras.models import Sequential\nfrom pythainlp.word_vector import thai2vec\nfrom keras.layers.recurrent import LSTM,SimpleRNN\nfrom sklearn.model_selection import train_test_split\n\nwith open(\"db.pickle\", 'rb') as f:\n vec_x,vec_y=pickle.load(f)\n\nvec_x=np.array(vec_x,dtype=np.float64)\nvec_y=np.array(vec_y,dtype=np.float64)\n\nx_train,x_test,y_train,y_test = train_test_split(vec_x,vec_y,test_size=0.2,random_state=1)\n\nmodel = Sequential()\nprint(x_train.shape)\nmodel.add(LSTM(output_dim=300,input_shape=x_train.shape[1:],return_sequences=True, init='glorot_normal', inner_init='glorot_normal', activation='sigmoid'))\nmodel.add(LSTM(output_dim=300,input_shape=x_train.shape[1:],return_sequences=True, init='glorot_normal', inner_init='glorot_normal', activation='sigmoid'))\nmodel.add(LSTM(output_dim=300,input_shape=x_train.shape[1:],return_sequences=True, init='glorot_normal', inner_init='glorot_normal', activation='sigmoid'))\nmodel.add(LSTM(output_dim=300,input_shape=x_train.shape[1:],return_sequences=True, init='glorot_normal', inner_init='glorot_normal', activation='sigmoid'))\nmodel.compile(loss='cosine_proximity', optimizer='adam', metrics=['accuracy'])\n\nmodel.fit(x_train, y_train, nb_epoch=500,validation_data=(x_test, y_test))\nmodel.save('LSTM500.h5')\nmodel.fit(x_train, y_train, nb_epoch=500,validation_data=(x_test, y_test))\nmodel.save('LSTM1000.h5')\nmodel.fit(x_train, y_train, nb_epoch=500,validation_data=(x_test, y_test))\nmodel.save('LSTM1500.h5')\nmodel.fit(x_train, y_train, nb_epoch=500,validation_data=(x_test, y_test))\nmodel.save('LSTM2000.h5')\nmodel.fit(x_train, y_train, nb_epoch=500,validation_data=(x_test, y_test))\nmodel.save('LSTM2500.h5')\nmodel.fit(x_train, y_train, nb_epoch=500,validation_data=(x_test, y_test))\nmodel.save('LSTM3000.h5')\nmodel.fit(x_train, y_train, nb_epoch=500,validation_data=(x_test, y_test))\nmodel.save('LSTM3500.h5')\nmodel.fit(x_train, y_train, nb_epoch=500,validation_data=(x_test, y_test))\nmodel.save('LSTM4000.h5')\nmodel.fit(x_train, y_train, nb_epoch=500,validation_data=(x_test, y_test))\nmodel.save('LSTM4500.h5')\nmodel.fit(x_train, y_train, nb_epoch=500,validation_data=(x_test, y_test))\nmodel.save('LSTM5000.h5') \npredictions=model.predict(x_test) \nmod = thai2vec.get_model()\n[mod.most_similar([predictions[10][i]])[0] for i in range(15)]" ]
[ [ "sklearn.model_selection.train_test_split", "numpy.array" ] ]
ssdavidson/reddit_incivility
[ "650e4ce9c5e1b1f37fe421eeab4f2643a5f3930e" ]
[ "log_regression/run_regression_large2.py" ]
[ "import pandas as pd\nimport numpy as np\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.linear_model.logistic import LogisticRegression\n#from sklearn.externals import joblib\nfrom sklearn.model_selection import train_test_split, cross_val_score\nimport csv, pickle, time, sys\n#import spacy\n#from imblearn.over_sampling import ADASYN\n#from imblearn.over_sampling import SMOTE\nfrom CustomIterableDataset import CustomIterableDataset\nfrom torch.utils.data import DataLoader\n\nclassifier = pickle.load(open('models/model.pkl', mode='rb'))\nvectorizer = pickle.load(open('models/vectorizer.pkl', mode='rb'))\n\n#vectorizer = TfidfVectorizer(vocabulary=pickle.load(open('models/tfidf_vocab.pkl', mode='rb')))\n\nclassifier.n_jobs = 16\nvectorizer.n_jobs = 16\n\nbatch_size = 20000\n\ndataset = CustomIterableDataset(sys.argv[1])\ndataloader = DataLoader(dataset, batch_size = batch_size)\n\ncsv_writer = csv.writer(open('predictions/' + sys.argv[2], mode='w'), delimiter='\\t', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\nfor data in dataloader:\n\n print('Predicting labels for 5000 test sentences...')\n# print()\n# print(data)\n t0 = time.time()\n\n#encode inputs using BERT tokenizer\n labels = data[3]\n subreddit_list = data[1]\n subreddit_id_list = data[2]\n comments = data[0]\n\n# for comment in data:\n #print(comment)\n #print(\"BREAK\")\n\n# if comment[1] != '':\n# text = comment[0]\n# label = comment[3]\n# subreddit = comment[1]\n# subreddit_id = comment[2]\n\n# labels.append(label)\n# subreddit_list.append(subreddit)\n# subreddit_id_list.append(subreddit_id)\n# comments.append(text)\n# else:\n# continue\n\n #print(comments)\n\n data = {'comment':comments, 'subreddit_list':subreddit_list, 'subreddit_id_list':subreddit_id_list}\n# data = pd.DataFrame(zip(comments, subreddit_list, subreddit_id_list))\n data = pd.DataFrame(data)\n data.dropna(axis=0, inplace=True)\n\n# print(data)\n\n comments = data['comment']\n #print(comments)\n comments_lower = comments.str.lower()\n subreddit_list = data['subreddit_list']\n subreddit_id_list = data['subreddit_id_list']\n\n X_test = vectorizer.transform(comments_lower)\n predictions = classifier.predict(X_test)\n\n for comment, prediction, subreddit, subreddit_id in zip(comments, predictions, subreddit_list, subreddit_id_list):\n try:\n csv_writer.writerow([comment, prediction, subreddit, subreddit_id])\n except:\n continue\n\n print(' DONE.')\n print(\" Inference took: {:}\".format(time.time() - t0))\n\n\n#mapping function to process input\ndef line_mapper(self, line):\n\n #Splits the line into text and label and applies preprocessing to the text\n\n try:\n data = yaml.load(line, Loader=Loader)\n except:\n return ('','','',0)\n\n if data['author'] == 'AutoModerator':\n return ('', '', '', 0)\n\n text = data['body']\n subreddit = data['subreddit']\n subreddit_id = data['subreddit_id']\n text = self.preprocess(text)\n label = 0\n\n# print((text, subreddit, subreddit_id, label))\n\n return (text, subreddit, subreddit_id, label)\n" ]
[ [ "pandas.DataFrame", "torch.utils.data.DataLoader" ] ]
pgruening/dl_bio_example
[ "7af124df5b2dd4e6cc63d90f4e75680a187fc98c" ]
[ "experiments/model_search/exe_evaluate_model_search.py" ]
[ "from DLBio.helpers import MyDataFrame, search_rgx, load_json, check_mkdir\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom os.path import join\nfrom DLBio.kwargs_translator import get_kwargs\n\nBASE_FOLDER = 'experiments/model_search' # the folder of the experiment\nMODEL_FOLDER = join(BASE_FOLDER, 'trained_models')\nIMAGE_FOLDER = join(BASE_FOLDER, 'boxplots')\n\n# regular expression to find all folders that contain trained models\nRGX = r'\\d\\d\\d\\d'\n\n# Create groups that are separated by these keywords. Aggregation over all\n# seeds.\nGROUP_KEYS = ['lr', 'num_layers', 'kernel_size', 'hidden_dim']\n# aggregate these keywords: compute the mean and standard deviation\nAGG = {\n 'last_train_error': ('mean', 'std'), 'last_val_error': ('mean', 'std'),\n 'min_val_error': ('mean', 'std')\n}\n\n\ndef run():\n # find all folders matching the regular expression in model folder\n folders_ = search_rgx(RGX, MODEL_FOLDER)\n assert folders_\n\n # create a dataframe: a table with all results\n df = MyDataFrame()\n for folder in folders_:\n folder = join(MODEL_FOLDER, folder)\n df = update(df, folder)\n\n # convert to a pandas Dataframe\n df = df.get_df()\n # aggregate and sort by specific keys\n df = df.groupby(GROUP_KEYS, as_index=False).agg(AGG)\n df = df.sort_values(\n [('last_val_error', 'mean'), ('min_val_error', 'mean')]\n )\n create_boxplots(df)\n\n # save as comma-separated file\n df.to_csv(join(BASE_FOLDER, 'results.csv'))\n\n # write the ten best configurations as a markdown table that you can\n # copy and paste into the README.md file directly\n with open(join(BASE_FOLDER, 'table.md'), 'w') as file:\n file.write(df.head(10).to_markdown())\n\n\ndef create_boxplots(df):\n # create boxplots for different keys\n for y_key in ['last_train_error', 'last_val_error', 'min_val_error']:\n y_key = tuple([y_key, 'mean'])\n for x_key in ['lr', 'num_layers', 'kernel_size', 'hidden_dim']:\n plt.figure()\n\n sns.boxplot(data=df, x=x_key, y=y_key)\n plt.grid()\n plt.tight_layout()\n\n out_path = join(IMAGE_FOLDER, f'bp_{x_key}_{y_key}.png')\n check_mkdir(out_path)\n plt.savefig(out_path)\n plt.close()\n\n\ndef update(df, folder):\n log = load_json(join(folder, 'log.json'))\n if log is None:\n return df\n\n opt = load_json(join(folder, 'opt.json'))\n if opt is None:\n return df\n\n model_kw = get_kwargs(opt['model_kw'])\n\n df.update({\n 'lr': opt['lr'],\n 'num_layers': model_kw['num_layers'][0],\n 'kernel_size': model_kw['kernel_size'][0],\n 'hidden_dim': model_kw['hidden_dim'][0],\n 'last_train_error': log['er'][-1],\n 'last_val_error': log['val_er'][-1],\n 'min_val_error': min(log['val_er']),\n 'seed': opt['seed']\n })\n\n return df\n\n\nif __name__ == \"__main__\":\n run()\n" ]
[ [ "matplotlib.pyplot.grid", "matplotlib.pyplot.savefig", "matplotlib.pyplot.close", "matplotlib.pyplot.figure", "matplotlib.pyplot.tight_layout" ] ]
Sirish07/opencv
[ "4b6047e746f06ce3e595c886cf6c0266498c6a67" ]
[ "apps/opencv_stitching_tool/opencv_stitching/camera_adjuster.py" ]
[ "from collections import OrderedDict\nimport cv2 as cv\nimport numpy as np\n\nfrom .stitching_error import StitchingError\n\n\nclass CameraAdjuster:\n \"\"\"https://docs.opencv.org/master/d5/d56/classcv_1_1detail_1_1BundleAdjusterBase.html\"\"\" # noqa\n\n CAMERA_ADJUSTER_CHOICES = OrderedDict()\n CAMERA_ADJUSTER_CHOICES['ray'] = cv.detail_BundleAdjusterRay\n CAMERA_ADJUSTER_CHOICES['reproj'] = cv.detail_BundleAdjusterReproj\n CAMERA_ADJUSTER_CHOICES['affine'] = cv.detail_BundleAdjusterAffinePartial\n CAMERA_ADJUSTER_CHOICES['no'] = cv.detail_NoBundleAdjuster\n\n DEFAULT_CAMERA_ADJUSTER = list(CAMERA_ADJUSTER_CHOICES.keys())[0]\n DEFAULT_REFINEMENT_MASK = \"xxxxx\"\n\n def __init__(self,\n adjuster=DEFAULT_CAMERA_ADJUSTER,\n refinement_mask=DEFAULT_REFINEMENT_MASK):\n\n self.adjuster = CameraAdjuster.CAMERA_ADJUSTER_CHOICES[adjuster]()\n self.set_refinement_mask(refinement_mask)\n self.adjuster.setConfThresh(1)\n\n def set_refinement_mask(self, refinement_mask):\n mask_matrix = np.zeros((3, 3), np.uint8)\n if refinement_mask[0] == 'x':\n mask_matrix[0, 0] = 1\n if refinement_mask[1] == 'x':\n mask_matrix[0, 1] = 1\n if refinement_mask[2] == 'x':\n mask_matrix[0, 2] = 1\n if refinement_mask[3] == 'x':\n mask_matrix[1, 1] = 1\n if refinement_mask[4] == 'x':\n mask_matrix[1, 2] = 1\n self.adjuster.setRefinementMask(mask_matrix)\n\n def adjust(self, features, pairwise_matches, estimated_cameras):\n b, cameras = self.adjuster.apply(features,\n pairwise_matches,\n estimated_cameras)\n if not b:\n raise StitchingError(\"Camera parameters adjusting failed.\")\n\n return cameras\n" ]
[ [ "numpy.zeros" ] ]
jsa214/CogAlg
[ "ca7d03c392f423fd4e3be9fa7a546bf3c4098621" ]
[ "frame_2D_alg/intra_blob_debug.py" ]
[ "import numpy as np\nimport numpy.ma as ma\nfrom comp_angle import comp_angle\nfrom comp_deriv import comp_deriv\nfrom comp_range import comp_range\n# from comp_P_ import comp_P_\nfrom filters import get_filters\nget_filters(globals()) # imports all filters at once\nfrom generic_branch import master_blob\n\n'''\n this function is under revision\n intra_blob() evaluates for recursive frame_blobs() and comp_P() within each blob,\n combined with frame_blobs(), it forms a 2D version of 1st-level algorithm\n \n inter_sub_blob() will compare sub_blobs of same range and derivation within higher-level blob, bottom-up ) top-down:\n inter_level() will compare between blob levels, where lower composition level is integrated by inter_subb\n match between levels' edges may form composite blob, axis comp if sub_blobs within blob margin?\n inter_blob() comparison will be second-level 2D algorithm, and a prototype for recursive meta-level algorithm\n \n blob = \n typ, # blob types: g_angle = 0, ga_angle = 1, gg = 2, range_g = 3; typ = 0 is also default, for primary blobs \n sign, Y, X, \n Derts = \n [ Dert = Ly, L, I, Dx, Dy, G ], # +1 Dert per layer above root blob\n derts_ = \n [ derts = \n [ dert = i, dx, dy, g ] ], # +1 dert per layer above root blob\n sub_blobs = \n Derts, sub_blob_, # Derts and sub_blob structure is same as in root blob, optional \n layers = \n Derts, layer_, # array of horizontal slices across derivation tree per root blob, optional\n \n all Dert params are summed over params of all elements of a given structure \n typ def: ga_sign? ( Ga? der_blobs(a)) : G * -Ga? der_blobs(i) : G - (G * -Ga)? rng_blobs(i)\n'''\n\ndef intra_blob_root(frame): # simplified initial branch() and eval_layer() call\n\n for blob in frame.blob_:\n if blob.sign and blob.Derts[-1][-1] > ave_blob: # g > var_cost and G > fix_cost of hypot_g: noisy or directional gradient\n master_blob(blob, hypot_g, add_dert=False) # this branch only redefines g as hypot(dx, dy)\n\n if blob.Derts[-1][-1] > ave_blob * 2: # G > fixed costs of comp_angle\n val_ = []\n for sub_blob in blob.sub_blob_:\n if sub_blob.sign and sub_blob.Derts[-1][-1] > ave_blob * 2: # G > fixed costs of comp_angle\n master_blob( sub_blob, comp_angle) # converts sub_blob to master ablob, no type rdn: no eval_layer\n\n for ablob in sub_blob.sub_blob_: # eval / ablob: unique, def / ga sign, vs. rdn ablob_ || xblob_ if / gblob\n Ly, L, I, Dx, Dy, G = ablob.Derts[-2] # Derts include params of all higher layers\n Lya, La, A, Dxa, Dya, Ga = ablob.Derts[-1]\n\n val_angle = Ga # value of comp_ga -> gga, eval comp_angle(dax, day), next layer / aga_blob\n val_deriv = ((G + ave * L) / ave * L) * -Ga # relative G * -Ga: angle match, likely edge\n val_range = G - val_deriv # non-directional G: likely d reversal, distant-pixels match\n\n # estimated next-layer values per ablob:\n val_ += [(val_angle, 0, sub_blob), (val_deriv, 1, sub_blob), (val_range, 2, sub_blob)]\n if val_:\n eval_layer(val_, 2) # rdn = 2: + ablobs\n\n return frame # frame of 2D patterns is output to level 2\n\n\ndef branch(blob, typ): # compute branch, evaluate next-layer branches: comp_angle, comp_ga, comp_deriv, comp_range\n vals = []\n\n if typ == 0: master_blob(blob, comp_deriv, 1) # comp over ga_: 1 selects angle_Dert at Derts[1]\n elif typ == 1: master_blob(blob, comp_deriv, 0) # recursive comp over g_ with incremental derivation\n else: master_blob(blob, comp_range, 0) # recursive comp over i_ with incremental distance\n\n if blob.Derts[-1][-1] > ave_blob * 2: # G > fixed costs of comp_angle\n master_blob(blob, comp_angle) # converts blob into master ablob, no lateral xtype rdn: no eval_layer\n\n for ablob in blob.sub_blob_: # eval / ablob: unique, def / ga sign, vs. rdn ablob_ || xblob_ if / gblob\n Ly, L, I, Dx, Dy, G = ablob.Derts[-2] # Derts include params of all higher-slices\n Lya, La, A, Dxa, Dya, Ga = ablob.Derts[-1]\n\n val_angle = Ga # value of comp_ga -> gga, eval comp_angle(dax, day), next layer / aga_blob\n val_deriv = ((G + ave * L) / ave * L) * -Ga # relative G * -Ga: angle match, likely edge\n val_range = G - val_deriv # non-directional G: likely d reversal, distant-pixels match\n\n # estimated next-layer values per ablob:\n vals += [(val_angle, 0, blob), (val_deriv, 1, blob), (val_range, 2, blob)]\n\n return vals # blob is converted into master_blob with added Dert[-1]\n\n\ndef eval_layer(val_, rdn): # val_: estimated values of active branches in current layer across recursion tree per blob\n\n val_ = sorted(val_, key=lambda val: val[0])\n sub_val_ = [] # estimated branch values of deeper layer of recursion tree per blob\n map_ = [] # blob boxes + maps of stronger branches in val_, appended for next (lower) val evaluation\n\n while val_:\n val, typ, blob = val_.pop()\n for box, map in map_:\n olp = overlap(blob, box, map)\n rdn += 1 * (olp / blob.params[1]) # rdn += 1 * (olp / G): redundancy to previous + stronger overlapping blobs, * branch cost ratio?\n\n if val > ave * blob.params[1] * rdn + ave_blob: # val > ave * G * rdn + fix_cost: extend master blob syntax: += branch syntax\n for sub_blob in blob.sub_blob_: # sub_blobs are angle blobs\n\n sub_vals = branch(sub_blob, typ) # branch-specific recursion step\n if sub_vals: # not empty []\n sub_val_ += sub_vals\n\n map_.append((blob.box, blob.map))\n else:\n break\n\n if sub_val_: # not empty []\n rdn += 1 # ablob redundancy to default gblob, or rdn += 2 for additional cost of angle calc?\n eval_layer(sub_val_, rdn) # evaluation of each sub_val for recursion\n\n ''' \n comp_P_(val, 0, blob, rdn) -> (val_PP_, 4, blob), (val_aPP_, 5, blob),\n val_PP_ = \n L + I + G: proj P match Pm; Dx, Dy, abs_Dx, abs_Dy for scan-invariant hyp_g_P calc, comp, no indiv comp: rdn\n * L/ Ly / Ly: elongation: >ave Pm? ~ box elong: (xn - x0) / (yn - y0)? \n * Dy / Dx: dimensional variation bias \n * Ave - Ga: angle match\n \n g and ga are dderived, blob of min_g?\n val-= sub_blob and branch switch cost: added map? only after g,a calc: no rough g comp?\n '''\n # ---------- eval_layer() end ---------------------------------------------------------------------------------------\n\n\ndef hypot_g(blob): # redefine master blob by reduced g and increased ave * 2: variable costs of comp_angle\n\n mask = ~blob.map[:, :, np.newaxis].repeat(4, axis=2) # stack map 4 times to fit the shape of dert__: (width, height, number of params)\n blob.new_dert__[0] = ma.array(blob.dert__, mask=mask) # initialize dert__ with mask for selective comp\n\n # redefine g = hypot(dx, dy), ave * 2 assuming that added cost of angle calc = cost of hypot_g calc\n blob.new_dert__[0][:, :, 3] = np.hypot(blob.new_dert__[0][:, :, 1], blob.new_dert__[0][:, :, 2]) - ave * 2\n\n return 1 # comp rng\n\n# ---------- hypot_g() end -----------------------------------------------------------------------------------\n\ndef overlap(blob, box, map): # returns number of overlapping pixels between blob.map and map\n\n y0, yn, x0, xn = blob.box\n _y0, _yn, _x0, _xn = box\n\n olp_y0 = max(y0, _y0)\n olp_yn = min(yn, _yn)\n if olp_yn - olp_y0 <= 0: # no overlapping y coordinate span\n return 0\n olp_x0 = max(x0, _x0)\n olp_xn = min(xn, _xn)\n if olp_xn - olp_x0 <= 0: # no overlapping x coordinate span\n return 0\n\n # master_blob coordinates olp_y0, olp_yn, olp_x0, olp_xn are converted to local coordinates before slicing:\n\n map1 = box.map[(olp_y0 - y0):(olp_yn - y0), (olp_x0 - x0):(olp_xn - x0)]\n map2 = map[(olp_y0 - _y0):(olp_yn - _y0), (olp_x0 - _x0):(olp_xn - _x0)]\n\n olp = np.logical_and(map1, map2).sum() # compute number of overlapping pixels\n return olp\n\n # ---------- overlap() end ------------------------------------------------------------------------------------------" ]
[ [ "numpy.ma.array", "numpy.hypot", "numpy.logical_and" ] ]
andriiaprysiazhnyk/brats_competition
[ "c2fa999c3458a118ca5c5fe81a37a74ef664fef3", "c2fa999c3458a118ca5c5fe81a37a74ef664fef3" ]
[ "brats_competition/model_training/common/adapters/base.py", "brats_competition/model_training/common/models/models_3d/nnutils.py" ]
[ "import torch\nfrom collections import OrderedDict\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom ..models import get_network\nfrom ..metrics import get_metric\nfrom ..losses import get_loss\nfrom ..augmentations import denormalization\n\n__all__ = ['ModelAdapter']\n\n\nclass ModelAdapter:\n def __init__(self, config, log_path):\n self.device = config['devices'][0]\n\n self.log_path = log_path\n self.get_loss_function(config['model']['loss'])\n metrics_names = config['model']['metrics']\n self.metrics = OrderedDict([\n (metric_name, get_metric(metric_name, config['model']['classes'], self.device))\n for metric_name in metrics_names\n ])\n self.main_metric = metrics_names[0] if len(metrics_names) > 0 else 'loss'\n\n self.denormalize = denormalization[config['val']['transform']['images_normalization']]\n self.epoch = 0\n self.mode = 'train'\n self.writer = SummaryWriter(self.log_path)\n\n self.model = get_network(config['model'])\n self.model.to(self.device)\n self.model = torch.nn.DataParallel(\n self.model,\n device_ids=config['devices']\n )\n\n def get_loss_function(self, losses_config):\n if isinstance(losses_config, dict):\n self.criterion = {losses_config['name']: (get_loss(losses_config), 1.0)}\n elif isinstance(losses_config, list):\n self.criterion = {x['name']: (get_loss(x), x['weight']) for x in losses_config}\n\n for loss_name in self.criterion:\n self.criterion[loss_name][0].to(self.device)\n\n def set_epoch(self, epoch):\n assert epoch > 0\n self.epoch = epoch\n return self\n\n def forward(self, data):\n X = data[0]\n return self.model(X)\n\n def add_metrics(self, y_pred, data):\n \"\"\"Calculate metrics on given models output and targets\"\"\"\n y_true = data[1].to(self.device)\n for metric in self.metrics.values():\n metric.add(y_pred, y_true)\n\n def get_metrics(self):\n rv = OrderedDict([\n (metric_name, metric.get())\n for metric_name, metric in self.metrics.items()\n ])\n\n for metric in self.metrics.values():\n metric.reset()\n\n return rv\n\n def get_loss(self, y_pred, data):\n \"\"\"Calculate loss given models output and targets\"\"\"\n y_true = data[1].to(self.device)\n loss = 0\n for criterion, weight in self.criterion.values():\n loss += weight * criterion(y_pred, y_true)\n return loss\n\n def make_tensorboard_grid(self, batch_sample):\n \"\"\"Make grid of model inputs and outputs\"\"\"\n raise NotImplementedError()\n\n def write_to_tensorboard(self, epoch, train_loss, val_loss, batch_sample):\n # write train and val losses\n for scalar_prefix, loss in zip(('Train', 'Validation'), (train_loss, val_loss)):\n self.writer.add_scalar(f'{scalar_prefix}_Loss', loss, epoch)\n\n for metric in self.metrics.values():\n metric.write_to_tensorboard(self.writer, epoch)\n\n images_grid = self.make_tensorboard_grid(batch_sample)\n if images_grid is not None:\n self.writer.add_image('Images', images_grid, epoch)\n\n def zero_grad(self):\n self.model.module.zero_grad()\n\n def train(self):\n self.mode = 'train'\n return self.model.module.train()\n\n def eval(self):\n self.mode = 'val'\n return self.model.module.eval()\n\n def get_params_groups(self):\n return self.model.module.get_params_groups()\n\n def parameters(self):\n return self.model.module.parameters()\n\n def state_dict(self):\n return self.model.module.state_dict()\n", "import torch.nn as nn\nfrom collections import OrderedDict\n\n\nclass ResBlock3d(nn.Module):\n\n def __init__(self, channels, channels_per_group):\n super(ResBlock3d, self).__init__()\n\n self.block = nn.Sequential(OrderedDict([\n ('gn1', nn.GroupNorm(num_groups=channels // channels_per_group, num_channels=channels)),\n ('relu1', nn.ReLU(inplace=True)),\n ('conv1', nn.Conv3d(channels, channels, (3, 3, 3), padding=1)),\n ('gn2', nn.GroupNorm(num_groups=channels // channels_per_group, num_channels=channels)),\n ('relu2', nn.ReLU(inplace=True)),\n ('conv2', nn.Conv3d(channels, channels, (3, 3, 3), padding=1))\n ]))\n\n def forward(self, x):\n out = self.block(x)\n return out + x\n\n\nif __name__ == '__main__':\n from torchsummary import summary\n\n block = ResBlock3d(channels=32, channels_per_group=8)\n summary(block, input_size=(32, 128, 128, 128), batch_size=1, device='cpu')\n" ]
[ [ "torch.utils.tensorboard.SummaryWriter", "torch.nn.DataParallel" ], [ "torch.nn.ReLU", "torch.nn.GroupNorm", "torch.nn.Conv3d" ] ]
braveld/ConvNetWord
[ "6adf8788528fa262d8727763f147c5118b05048f" ]
[ "sentence_convnet_final.py" ]
[ "__author__ = 'mangate'\n\nimport cPickle\nfrom model import Model\nimport process_data_mr\nimport process_data_tweets\nimport process_data_sst1\nimport process_data_sst2\nimport process_data_subj\nimport process_data_trec\nimport process_data_politeness2\nimport process_data_opi\nimport process_data_irony\nimport tensorflow as tf\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n#Flags\ntf.flags.DEFINE_boolean(\"random\",False,\"Initialize with random word embeddings (default: False)\")\ntf.flags.DEFINE_boolean(\"static\",False,\"Keep the word embeddings static (default: False)\")\nFLAGS =tf.flags.FLAGS\n\ndef evaluate(x, num_classes = 2, k_fold = 10):\n revs, embedding, W2, word_idx_map, vocab, max_l = x[0], x[1], x[2], x[3], x[4], x[5]\n if FLAGS.random:\n embedding = W2\n embedding_dim = 300\n vocab_size = len(vocab) + 1\n filter_sizes = [3, 4, 5]\n num_filters = 100\n vector_length = max_l + 2 * 4\n cnn_model = Model()\n trainable = not FLAGS.static\n cnn_model.build_model(embedding_dim, vocab_size, filter_sizes, num_filters, vector_length, num_classes, trainable)\n cnn_model.run(revs, embedding, word_idx_map, max_l, k_fold)\n\n\ndef evaluate_mr():\n process_data_mr.process_data(\"data/processed/mr.p\")\n x = cPickle.load(open(\"data/processed/mr.p\", \"rb\"))\n evaluate(x, 2, 10)\n\n\ndef evaluate_tweets():\n process_data_tweets.process_data(\"data/processed/twitter.p\")\n x = cPickle.load(open(\"data/processed/twitter.p\", \"rb\"))\n evaluate(x, 10, 1)\n\n\ndef evaluate_sst1():\n process_data_sst1.process_data(\"data/processed/sst1.p\")\n x = cPickle.load(open(\"data/processed/sst1.p\", \"rb\"))\n evaluate(x, 5, 1)\n\n\ndef evaluate_sst2():\n process_data_sst2.process_data(\"data/processed/sst2.p\")\n x = cPickle.load(open(\"data/processed/sst2.p\", \"rb\"))\n evaluate(x, 2, 1)\n\n\ndef evaluate_subj():\n process_data_subj.process_data(\"data/processed/subj.p\")\n x = cPickle.load(open(\"data/processed/subj.p\", \"rb\"))\n evaluate(x, 2, 10)\n\n\ndef evaluate_trec():\n process_data_trec.process_data(\"data/processed/trec.p\")\n x = cPickle.load(open(\"data/processed/trec.p\", \"rb\"))\n evaluate(x, 6, 1)\n\n\ndef evaluate_cr():\n # couldn't find the dataset...\n pass\n\n\ndef evaluate_mpqa():\n # too complicated..\n pass\n\n\ndef evaluate_politeness():\n process_data_politeness2.process_data(\"data/processed/politeness.p\")\n x = cPickle.load(open(\"data/processed/politeness.p\", \"rb\"))\n evaluate(x, 2, 10)\n\ndef evaluate_opi():\n process_data_opi.process_data(\"data/processed/opi.p\")\n x = cPickle.load(open(\"data/processed/opi.p\", \"rb\"))\n evaluate(x, 6, 10)\n\ndef evaluate_irony():\n process_data_irony.process_data(\"data/processed/irony.p\")\n x = cPickle.load(open(\"data/processed/irony.p\", \"rb\"))\n evaluate(x, 2, 10)\n\nif __name__==\"__main__\":\n #evaluate_mr()\n #evaluate_tweets()\n #evaluate_sst1()\n #evaluate_sst2()\n #evaluate_subj()\n #evaluate_trec()\n #evaluate_politeness()\n evaluate_irony()\n\n\n\n\n" ]
[ [ "tensorflow.flags.DEFINE_boolean" ] ]
worldlove521/Paddle
[ "c7f1f3ed0c897073cc7ae8ec60a13a8217dffe7d" ]
[ "python/paddle/fluid/executor.py" ]
[ "# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport os\nimport multiprocessing\nimport numpy as np\nfrom .wrapped_decorator import signature_safe_contextmanager\nimport six\nfrom .framework import Program, default_main_program, Variable\nfrom . import core\nfrom . import compiler\nfrom .. import compat as cpt\n\n__all__ = ['Executor', 'global_scope', 'scope_guard']\n\ng_scope = core.Scope()\nInferNativeConfig = core.NativeConfig\nInferAnalysisConfig = core.AnalysisConfig\n\n\ndef global_scope():\n \"\"\"\n Get the global/default scope instance. There are a lot of APIs use\n :code:`global_scope` as its default value, e.g., :code:`Executor.run`\n\n Returns:\n Scope: The global/default scope instance.\n \"\"\"\n return g_scope\n\n\ndef _switch_scope(scope):\n global g_scope\n ex = g_scope\n g_scope = scope\n return ex\n\n\n@signature_safe_contextmanager\ndef scope_guard(scope):\n \"\"\"\n Change the global/default scope instance by Python `with` statement. All\n variable in runtime will assigned to the new scope.\n\n Examples:\n >>> import paddle.fluid as fluid\n >>> new_scope = fluid.Scope()\n >>> with fluid.scope_guard(new_scope):\n >>> ...\n\n Args:\n scope: The new global/default scope.\n \"\"\"\n ex = _switch_scope(scope)\n yield\n _switch_scope(ex)\n\n\ndef as_numpy(tensor):\n \"\"\"\n Convert a Tensor to a numpy.ndarray, its only support Tensor without LoD information.\n For higher dimensional sequence data, please use LoDTensor directly.\n Examples:\n >>> import paddle.fluid as fluid\n >>> outs = executor.run(...)\n >>> np_outs = map(lambda x: as_numpy(x), outs)\n >>> ...\n\n Args:\n tensor(Variable): a instance of Tensor\n\n Returns:\n numpy.ndarray\n \"\"\"\n if isinstance(tensor, core.LoDTensorArray):\n return [as_numpy(t) for t in tensor]\n if isinstance(tensor, list):\n return [as_numpy(t) for t in tensor]\n assert isinstance(tensor, core.LoDTensor)\n lod = tensor.lod()\n if len(lod) > 0:\n raise RuntimeError(\"Some of your fetched tensors hold LoD information. \\\n They can not be completely cast to Python ndarray. \\\n Please set the parameter 'return_numpy' as 'False' to \\\n return LoDTensor itself directly.\")\n return np.array(tensor)\n\n\ndef has_feed_operators(block, feed_targets, feed_holder_name):\n \"\"\" Check whether the block already has feed operators.\n\n Return false if the block does not have any feed operators.\n If some feed operators have been prepended to the block, check that\n the info contained in these feed operators matches the feed_targets\n and feed_holder_name. Raise exception when any mismatch is found.\n Return true when the block has feed operators with matching info.\n\n Args:\n block: a block instance (typically global block of a program)\n feed_targets: a dictionary of {feed_target_name: feed_target_data}\n feed_holder_name: the name of the variable that holds the data of\n all feed targets. The type of this feed_holder variable is\n FEED_MINIBATCH, which is essentially vector<LoDTensor>.\n\n Returns:\n A boolean value that indicates whether a block has feed operators\n that match the info contained in feed_targets and feed_holder_name.\n \"\"\"\n\n feed_count = 0\n for op in block.ops:\n if op.desc.type() == 'feed':\n feed_count += 1\n assert op.desc.input('X')[0] == feed_holder_name\n feed_target_name = op.desc.output('Out')[0]\n if feed_target_name not in feed_targets:\n raise Exception(\"'feed_targets' does not have {} variable\".\n format(feed_target_name))\n else:\n break\n if feed_count > 0 and feed_count != len(feed_targets):\n raise Exception(\n \"Feed operators in program desc do not match 'feed_targets'\")\n return feed_count > 0\n\n\ndef has_fetch_operators(block, fetch_targets, fetch_holder_name):\n \"\"\" Check whether the block already has fetch operators.\n\n Return false if the block does not have any fetch operators.\n If some fetch operators have been appended to the block, check that\n the info contained in these fetch operators matches the fetch_targets\n and fetch_holder_name. Raise exception when any mismatch is found.\n Return true when the block has fetch operators with matching info.\n\n Args:\n block: a block instance (typically global block of a program)\n fetch_targets: a dictionary of {fetch_target_name: fetch_target_data}\n fetch_holder_name: the name of the variable that holds the data of\n all fetch targets. The type of this fetch_holder variable is\n FETCH_LIST, which is essentially vector<LoDTensor>.\n\n Return:\n A boolean value that indicates whether a block has fetch operators\n that match the info contained in fetch_targets and fetch_holder_name.\n \"\"\"\n\n fetch_count = 0\n for op in block.ops:\n if op.desc.type() == 'fetch':\n fetch_count += 1\n assert op.desc.output('Out')[0] == fetch_holder_name\n fetch_target_name = op.desc.input('X')[0]\n if fetch_target_name not in [\n var.desc.name() for var in fetch_targets\n ]:\n raise Exception(\"'fetch_targets' does not have {} variable\".\n format(fetch_target_name))\n idx = op.desc.attr('col')\n assert fetch_target_name == fetch_targets[idx].desc.name()\n if fetch_count > 0 and fetch_count != len(fetch_targets):\n raise Exception(\n \"Fetch operators in program desc do not match 'fetch_targets'\")\n return fetch_count > 0\n\n\ndef _fetch_var(name, scope=None, return_numpy=True):\n \"\"\"\n Fetch the value of the variable with the given name from the\n given scope.\n\n Args:\n name(str): name of the variable. Typically, only persistable variables\n can be found in the scope used for running the program.\n scope(core.Scope|None): scope object. It should be the scope where\n you pass to Executor.run() when running your program.\n If None, global_scope() will be used. Default None.\n return_numpy(bool): whether convert the tensor to numpy.ndarray.\n Default True.\n\n Returns:\n LodTensor|numpy.ndarray\n \"\"\"\n assert isinstance(name, str)\n if scope is None:\n scope = global_scope()\n assert isinstance(scope, core._Scope)\n\n var = scope.find_var(name)\n assert var is not None, (\n \"Cannot find \" + name + \" in scope. Perhaps you need to make the\"\n \" variable persistable by using var.persistable = True in your\"\n \" program.\")\n tensor = var.get_tensor()\n if return_numpy:\n tensor = as_numpy(tensor)\n return tensor\n\n\ndef _to_name_str(var):\n if isinstance(var, Variable):\n return var.desc.name()\n elif isinstance(var, str):\n return var\n elif isinstance(var, six.string_types):\n return str(var)\n else:\n raise TypeError(str(var) + \" should be Variable or str\")\n\n\ndef _get_program_cache_key(feed, fetch_list):\n feed_var_names = list(feed.keys())\n fetch_var_names = list(map(_to_name_str, fetch_list))\n\n return str(feed_var_names + fetch_var_names)\n\n\ndef _as_lodtensor(data, place):\n \"\"\"\n Convert numpy.ndarray to Tensor, its only support Tensor without LoD information.\n For higher dimensional sequence data, please use LoDTensor directly.\n\n Examples:\n >>> import paddle.fluid as fluid\n >>> place = fluid.CPUPlace()\n >>> exe = fluid.executor(place)\n >>> data = np.array(size=(100, 200, 300))\n >>> np_outs = map(lambda x: fluid.executor._as_lodtensor(x, place), data)\n >>> ...\n\n Args:\n data(numpy.ndarray): a instance of array\n\n Returns:\n LoDTensor\n \"\"\"\n if isinstance(data, list):\n raise RuntimeError(\"Some of your feed data hold LoD information. \\\n They can not be completely cast from a list of Python \\\n ndarray to LoDTensor. Please convert data to LoDTensor \\\n directly before feeding the data.\\\n \")\n # single tensor case\n tensor = core.LoDTensor()\n tensor.set(data, place)\n return tensor\n\n\nclass Executor(object):\n \"\"\"\n An Executor in Python, supports single/multiple-GPU running, and single/multiple-CPU running.\n Python executor takes a program, adds feed operators and fetch operators to this program according\n to feed map and fetch_list. Feed map provides input data for the program. fetch_list provides\n the variables(or names) that user wants to get after program runs. Note: the executor will run all\n operators in the program but not only the operators dependent by the fetch_list.\n It stores the global variables into the global scope, and creates a local scope for the temporary\n variables. The contents in local scope may be discarded after every minibatch forward/backward\n finished. But the global scope variables will be persistent through different runs.\n\n\n Example:\n\n .. code-block:: python\n\n # First create the Executor.\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n\n # Run the startup program once and only once.\n # Not need to optimize/compile the startup program.\n exe.run(fluid.default_startup_program())\n\n # Run the main program directly without compile.\n loss, = exe.run(fluid.default_main_program(),\n feed=feed_dict,\n fetch_list=[loss.name])\n # Or, compiled the program and run. See `CompiledProgram` for more detail.\n compiled_prog = compiler.CompiledProgram(\n fluid.default_main_program()).with_data_parallel(\n loss_name=loss.name)\n loss, = exe.run(compiled_prog,\n feed=feed_dict,\n fetch_list=[loss.name])\n\n Args:\n place(core.CPUPlace|core.CUDAPlace(n)): indicate the executor run on which device\n \"\"\"\n\n def __init__(self, place):\n self.place = place\n self.program_caches = dict()\n p = core.Place()\n p.set_place(self.place)\n self._default_executor = core.Executor(p)\n self._closed = False\n\n def _get_program_cache(self, program_cache_key):\n return self.program_caches.get(program_cache_key, None)\n\n def _add_program_cache(self, program_cache_key, program):\n self.program_caches[program_cache_key] = program\n\n def _add_feed_fetch_ops(self, program, feed, fetch_list, feed_var_name,\n fetch_var_name):\n tmp_program = program.clone()\n\n global_block = tmp_program.global_block()\n\n if feed_var_name in global_block.vars:\n feed_var = global_block.var(feed_var_name)\n else:\n feed_var = global_block.create_var(\n name=feed_var_name,\n type=core.VarDesc.VarType.FEED_MINIBATCH,\n persistable=True)\n\n if fetch_var_name in global_block.vars:\n fetch_var = global_block.var(fetch_var_name)\n else:\n fetch_var = global_block.create_var(\n name=fetch_var_name,\n type=core.VarDesc.VarType.FETCH_LIST,\n persistable=True)\n\n # prepend feed operators\n if not has_feed_operators(global_block, feed, feed_var_name):\n for i, name in enumerate(feed):\n out = global_block.var(name)\n global_block._prepend_op(\n type='feed',\n inputs={'X': [feed_var]},\n outputs={'Out': [out]},\n attrs={'col': i})\n\n # append fetch_operators\n if not has_fetch_operators(global_block, fetch_list, fetch_var_name):\n for i, var in enumerate(fetch_list):\n assert isinstance(var, Variable) or isinstance(\n var, six.string_types), (\n \"Wrong type for fetch_list[%s]: %s\" % (i, type(var)))\n global_block.append_op(\n type='fetch',\n inputs={'X': [var]},\n outputs={'Out': [fetch_var]},\n attrs={'col': i})\n\n return tmp_program\n\n def _feed_data(self, program, feed, feed_var_name, scope):\n # feed var to framework\n for op in program.global_block().ops:\n if op.desc.type() == 'feed':\n feed_target_name = op.desc.output('Out')[0]\n cur_feed = feed[feed_target_name]\n if not isinstance(cur_feed, core.LoDTensor):\n cur_feed = _as_lodtensor(cur_feed, self.place)\n idx = op.desc.attr('col')\n core.set_feed_variable(scope, cur_feed, feed_var_name, idx)\n else:\n break\n\n def _fetch_data(self, fetch_list, fetch_var_name, scope):\n outs = [\n core.get_fetch_variable(scope, fetch_var_name, i)\n for i in six.moves.range(len(fetch_list))\n ]\n return outs\n\n '''\n TODO(typhoonzero): Define \"no longer use\" meaning? Can user create\n a new Executor for the same program and run?\n TODO(panyx0718): Why ParallelExecutor doesn't have close?\n '''\n\n def close(self):\n \"\"\"\n Close this executor.\n\n You can no longer use this executor after calling this method.\n For the distributed training, this method would free the resource on PServers related to\n the current Trainer.\n\n Example:\n >>> cpu = core.CPUPlace()\n >>> exe = Executor(cpu)\n >>> ...\n >>> exe.close()\n \"\"\"\n if not self._closed:\n self._default_executor.close()\n self._closed = True\n\n def _run_parallel(self, program, scope, feed, fetch_list, fetch_var_name,\n return_numpy):\n exe = program._executor\n if isinstance(feed, dict):\n feed_tensor_dict = dict()\n for feed_name in feed:\n feed_tensor = feed[feed_name]\n if not isinstance(feed_tensor, core.LoDTensor):\n feed_tensor = core.LoDTensor()\n # always set to CPU place, since the tensor need to be splitted\n # it is fast in CPU\n feed_tensor.set(feed[feed_name], core.CPUPlace())\n feed_tensor_dict[feed_name] = feed_tensor\n\n exe.feed_and_split_tensor_into_local_scopes(feed_tensor_dict)\n elif isinstance(feed, list) or isinstance(feed, tuple):\n if len(feed) != len(program._places):\n raise ValueError(\n \"Feed a list of tensor, the list should be the same size as places\"\n )\n\n res = list()\n for i, each in enumerate(feed):\n if not isinstance(each, dict):\n raise TypeError(\n \"Each element of feed list should be a dict\")\n res_dict = dict()\n for feed_name in each:\n tensor = each[feed_name]\n if not isinstance(tensor, core.LoDTensor):\n tmp = core.LoDTensor()\n tmp.set(tensor, program._places[i])\n tensor = tmp\n res_dict[feed_name] = tensor\n res.append(res_dict)\n exe.feed_tensors_into_local_scopes(res)\n\n fetch_var_names = list(map(_to_name_str, fetch_list))\n exe.run(fetch_var_names, fetch_var_name)\n arr = scope.find_var(fetch_var_name).get_lod_tensor_array()\n\n if return_numpy:\n return as_numpy(arr)\n return [arr[i] for i in range(len(arr))]\n\n def run(self,\n program=None,\n feed=None,\n fetch_list=None,\n feed_var_name='feed',\n fetch_var_name='fetch',\n scope=None,\n return_numpy=True,\n use_program_cache=False):\n \"\"\"\n Run program by this Executor. Feed data by feed map, fetch result by fetch_list.\n Python executor takes a program, add feed operators and fetch operators to this program according\n to feed map and fetch_list. Feed map provides input data for the program. fetch_list provides\n the variables(or names) that user want to get after program run.\n\n Note: the executor will run all\n operators in the program but not only the operators dependent by the fetch_list\n\n Args:\n program(Program|CompiledProgram): the program that need to run,\n if not provided, then default_main_program (not compiled) will be used.\n feed(dict): feed variable map, e.g. {\"image\": ImageData, \"label\": LabelData}\n fetch_list(list): a list of variable or variable names that user \n wants to get, this method will return them according to this list.\n feed_var_name(str): the name for the input variable of \n feed Operator.\n fetch_var_name(str): the name for the output variable of \n fetch Operator.\n scope(Scope): the scope used to run this program, you can switch \n it to different scope. default is global_scope\n return_numpy(bool): if convert the fetched tensor to numpy\n use_program_cache(bool): whether to use the cached program \n settings across batches. Setting it be true would be faster \n only when (1) the program is not compiled with data parallel, \n and (2) program, feed variable names and fetch_list variable \n names do not changed compared to the last step. \n \n Returns:\n\n list(numpy.array): fetch result according to fetch_list.\n\n\n Examples:\n\n >>> data = fluid.layers.data(name='X', shape=[1], dtype='float32')\n >>> out = fluid.layers.create_tensor(dtype='float32')\n >>> hidden = fluid.layers.fc(input=data, size=10)\n >>> fluid.layers.assign(hidden,out)\n >>> loss = fluid.layers.mean(out)\n >>> adam = fluid.optimizer.Adam()\n\t\t\t\t\t\t>>> adam.minimize(loss)\n\n >>> cpu = core.CPUPlace()\n >>> exe = fluid.Executor(cpu)\n >>> exe.run(fluid.default_startup_program())\n\n >>> x = numpy.random.random(size=(10, 1)).astype('float32')\n >>> outs = exe.run(\n >>> feed={'X': x},\n >>> fetch_list=[loss.name])\n \"\"\"\n\n if self._closed:\n raise RuntimeError(\"Attempted to use a closed Executor\")\n\n if scope is None:\n scope = global_scope()\n if fetch_list is None:\n fetch_list = []\n\n compiled = isinstance(program, compiler.CompiledProgram)\n # For backward compatibility, run directly.\n if not compiled:\n return self._run(\n program,\n self._default_executor,\n feed=feed,\n fetch_list=fetch_list,\n feed_var_name=feed_var_name,\n fetch_var_name=fetch_var_name,\n scope=scope,\n return_numpy=return_numpy,\n use_program_cache=use_program_cache)\n\n program._compile(scope, self.place)\n if program._is_data_parallel:\n return self._run_parallel(\n program,\n scope=scope,\n feed=feed,\n fetch_list=fetch_list,\n fetch_var_name=fetch_var_name,\n return_numpy=return_numpy)\n elif program._is_inference:\n return self._run_inference(program._executor, feed)\n else:\n # TODO(panyx0718): Can compile program to optimize executor\n # performance.\n # TODO(panyx0718): executor should be able to run graph.\n assert program._program, \"CompiledProgram is compiled from graph, can only run with_data_parallel.\"\n return self._run(\n program._program,\n self._default_executor,\n feed=feed,\n fetch_list=fetch_list,\n feed_var_name=feed_var_name,\n fetch_var_name=fetch_var_name,\n scope=scope,\n return_numpy=return_numpy,\n use_program_cache=use_program_cache)\n\n def _run(self, program, exe, feed, fetch_list, feed_var_name,\n fetch_var_name, scope, return_numpy, use_program_cache):\n\n if feed is None:\n feed = {}\n if not isinstance(feed, dict):\n raise TypeError(\n \"feed requires dict as its Parameter. But you passed in %s\" %\n (type(feed)))\n if program is None:\n program = default_main_program()\n\n if not isinstance(program, Program):\n raise TypeError(\n \"Executor requires Program as its Parameter. But you passed in %s\"\n % (type(program)))\n\n cache_key = _get_program_cache_key(feed, fetch_list)\n if use_program_cache:\n cached_program = self._get_program_cache(cache_key)\n if cached_program is None:\n cached_program = self._add_feed_fetch_ops(\n program=program,\n feed=feed,\n fetch_list=fetch_list,\n feed_var_name=feed_var_name,\n fetch_var_name=fetch_var_name)\n self._add_program_cache(cache_key, cached_program)\n program = cached_program\n else:\n self.program_caches.pop(cache_key, None)\n program = self._add_feed_fetch_ops(\n program=program,\n feed=feed,\n fetch_list=fetch_list,\n feed_var_name=feed_var_name,\n fetch_var_name=fetch_var_name)\n\n self._feed_data(program, feed, feed_var_name, scope)\n exe.run(program.desc, scope, 0, True, True, fetch_var_name)\n outs = self._fetch_data(fetch_list, fetch_var_name, scope)\n if return_numpy:\n outs = as_numpy(outs)\n return outs\n\n def _run_inference(self, exe, feed):\n return exe.run(feed)\n" ]
[ [ "numpy.array" ] ]
podondra/roboschool-rl
[ "2e6d6b1302eaa9aea12ebd81e2ad7a22d29a8d69" ]
[ "classic/cem.py" ]
[ "# inspired by\n# http://rl-gym-doc.s3-website-us-west-2.amazonaws.com/mlss/lab1.html\nimport numpy\n\n\nclass LinearPolicy:\n def __init__(self, theta, env):\n obs_dim = env.observation_space.shape[0]\n act_dim = env.action_space.n\n self.W = theta[:obs_dim * act_dim].reshape(obs_dim, act_dim)\n self.b = theta[obs_dim * act_dim:]\n\n def act(self, observation):\n y = numpy.dot(observation, self.W) + self.b\n return y.argmax()\n\n\ndef run_episode(policy, env, n_timesteps, render=False):\n total_reward = 0\n S = env.reset()\n for t in range(n_timesteps):\n a = policy.act(S)\n S, R, done, _ = env.step(a)\n total_reward += R\n if render:\n env.render()\n if done:\n break\n return total_reward\n\n\ndef noisy_evaluation(theta, env, n_timesteps):\n policy = LinearPolicy(theta, env)\n return run_episode(policy, env, n_timesteps)\n\n\ndef cross_entropy_method(\n env, n_iteration, n_timesteps, batch_size=25, elite=0.2, render=True\n ):\n theta_dim = (env.observation_space.shape[0] + 1) * env.action_space.n\n theta_mean = numpy.zeros(theta_dim)\n theta_std = numpy.ones(theta_dim)\n n_elite = int(batch_size * elite)\n\n for iteration in range(n_iteration):\n # sample parameter vectors\n thetas = numpy.random.normal(\n loc=theta_mean,\n scale=theta_std,\n size=(batch_size, theta_dim)\n )\n rewards = numpy.zeros(batch_size)\n for i, theta in enumerate(thetas):\n rewards[i] = noisy_evaluation(theta, env, n_timesteps)\n # get elite parameters\n elite_idxs = numpy.argsort(rewards)[-n_elite:]\n elite_thetas = thetas[elite_idxs]\n theta_mean = elite_thetas.mean(axis=0)\n theta_std = elite_thetas.std(axis=0)\n print('iteration:{:9d} mean reward: {:f} max reward: {:f}'.format(\n iteration, numpy.mean(rewards), numpy.max(rewards)\n ))\n policy = LinearPolicy(theta_mean, env)\n run_episode(policy, env, n_timesteps, render)\n" ]
[ [ "numpy.max", "numpy.random.normal", "numpy.dot", "numpy.zeros", "numpy.ones", "numpy.mean", "numpy.argsort" ] ]
marcobarilari/pybids
[ "ce98ad8fee0ed19518a7b9ba48aec5eed13a4783" ]
[ "bids/layout/tests/test_models.py" ]
[ "\"\"\"Tests of functionality in the models module.\"\"\"\n\nimport sys\nimport os\nimport pytest\nimport copy\nimport json\nfrom pathlib import Path\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nimport numpy as np\n\nfrom bids.layout.models import (BIDSFile, Entity, Tag, Base, Config,\n FileAssociation, BIDSImageFile, LayoutInfo)\nfrom bids.tests import get_test_data_path\n\n\n\ndef create_session():\n engine = create_engine('sqlite://')\n Base.metadata.create_all(engine)\n Session = sessionmaker(bind=engine)\n return Session()\n\n\[email protected]\ndef sample_bidsfile(tmpdir):\n testfile = 'sub-03_ses-2_task-rest_acq-fullbrain_run-2_bold.nii.gz'\n fn = tmpdir.mkdir(\"tmp\").join(testfile)\n fn.write('###')\n return BIDSFile(os.path.join(str(fn)))\n\n\[email protected](scope='module')\ndef subject_entity():\n return Entity('subject', r\"[/\\\\\\\\]sub-([a-zA-Z0-9]+)\", mandatory=False,\n directory=\"{subject}\", dtype='str')\n\n\ndef test_layoutinfo_init():\n args = dict(root='/made/up/path', validate=True,\n absolute_paths=True, index_metadata=False,\n derivatives=True, ignore=['code/', 'blergh/'],\n force_index=None)\n with pytest.raises(ValueError) as exc:\n LayoutInfo(**args)\n assert str(exc.value).startswith(\"Missing mandatory\")\n args['config'] = ['bids', 'derivatives']\n info = LayoutInfo(**args)\n assert info.derivatives == True\n assert info._derivatives == 'true'\n\n\ndef test_entity_initialization():\n e = Entity('avaricious', r'aardvark-(\\d+)')\n assert e.name == 'avaricious'\n assert e.pattern == r'aardvark-(\\d+)'\n assert not e.mandatory\n assert e.directory is None\n assert e.files == {}\n\n\ndef test_entity_init_all_args(subject_entity):\n ent = subject_entity\n assert ent.name == 'subject'\n assert ent.pattern == r\"[/\\\\\\\\]sub-([a-zA-Z0-9]+)\"\n assert ent.mandatory == False\n assert ent.directory == \"{subject}\"\n\n\ndef test_entity_init_with_bad_dtype():\n with pytest.raises(ValueError) as exc:\n ent = Entity('test', dtype='superfloat')\n assert str(exc.value).startswith(\"Invalid dtype\")\n\n\ndef test_entity_matches(tmpdir):\n filename = \"aardvark-4-reporting-for-duty.txt\"\n tmpdir.mkdir(\"tmp\").join(filename).write(\"###\")\n f = BIDSFile(os.path.join(str(tmpdir), filename))\n e = Entity('avaricious', r'aardvark-(\\d+)')\n result = e.match_file(f)\n assert result == '4'\n\n\ndef test_entity_deepcopy(subject_entity):\n e = subject_entity\n clone = copy.deepcopy(subject_entity)\n for attr in ['name', 'pattern', 'mandatory', 'directory', 'regex']:\n assert getattr(e, attr) == getattr(clone, attr)\n assert e != clone\n\n\ndef test_file_associations():\n session = create_session()\n img = BIDSFile('sub-03/func/sub-03_task-rest_run-2_bold.nii.gz')\n md1 = BIDSFile('sub-03/func/sub-03_task-rest_run-2_bold.json')\n md2 = BIDSFile('task-rest_run-2_bold.json')\n assocs = [\n FileAssociation(src=md1.path, dst=img.path, kind=\"MetadataFor\"),\n FileAssociation(src=img.path, dst=md1.path, kind=\"MetadataIn\"),\n FileAssociation(src=md1.path, dst=md2.path, kind=\"Child\"),\n FileAssociation(src=md2.path, dst=md1.path, kind=\"Parent\"),\n FileAssociation(src=md2.path, dst=img.path, kind=\"Informs\")\n ]\n session.add_all([img, md1, md2] + assocs)\n session.commit()\n assert img._associations == [md1, md2] == img.get_associations()\n assert md2._associations == [md1]\n assert img.get_associations(kind='MetadataFor') == []\n assert img.get_associations(kind='MetadataIn') == [md1]\n results = img.get_associations(kind='MetadataIn', include_parents=True)\n assert set(results) == {md1, md2}\n\n\ndef test_tag_init(sample_bidsfile, subject_entity):\n f, e = sample_bidsfile, subject_entity\n tag = Tag(f, e, 'zzz')\n rep = str(tag)\n assert rep.startswith(\"<Tag file:\") and f.path in rep and 'zzz' in rep\n\n\ndef test_tag_dtype(sample_bidsfile, subject_entity):\n f, e = sample_bidsfile, subject_entity\n # Various ways of initializing--should all give same result\n tags = [\n Tag(f, e, 4, int),\n Tag(f, e, '4', 'int'),\n Tag(f, e, '4', int),\n Tag(f, e, 4),\n Tag(file=f, entity=e, dtype=int, value='4')\n ]\n assert all([t.dtype == int for t in tags])\n\n\ndef test_entity_add_file(sample_bidsfile):\n session = create_session()\n bf = sample_bidsfile\n e = Entity('prop', r'-(\\d+)')\n t = Tag(file=bf, entity=e, value=4)\n session.add_all([t, e, bf])\n session.commit()\n assert e.files[bf.path] == 4\n\n\ndef test_config_init_with_args():\n session = create_session()\n ents = [\n {\n \"name\": \"task\",\n \"pattern\": \"[_/\\\\\\\\]task-([a-zA-Z0-9]+)\"\n },\n {\n \"name\": \"acquisition\",\n \"pattern\": \"[_/\\\\\\\\]acq-([a-zA-Z0-9]+)\"\n }\n ]\n patterns = ['this_will_never_match_anything', 'and_neither_will_this']\n config = Config('custom', entities=ents, default_path_patterns=patterns)\n assert config.name == 'custom'\n target = {'task', 'acquisition'}\n assert set(ent.name for ent in config.entities.values()) == target\n assert config.default_path_patterns == patterns\n\n\ndef test_load_existing_config():\n session = create_session()\n first = Config('dummy')\n session.add(first)\n session.commit()\n\n second = Config.load({\"name\": \"dummy\"}, session=session)\n assert first == second\n session.add(second)\n session.commit()\n\n from sqlalchemy.orm.exc import FlushError\n with pytest.raises(FlushError):\n second = Config.load({\"name\": \"dummy\"})\n session.add(second)\n session.commit()\n\n\ndef test_bidsfile_get_df_from_tsv_gz(layout_synthetic):\n bf = layout_synthetic.get(suffix='physio', extension='tsv.gz')[0]\n\n # With onsets\n df1 = bf.get_df()\n df2 = bf.get_df(include_timing=True)\n assert df1.equals(df2)\n assert df1.shape == (1600, 3)\n assert set(df1.columns) == {'onset', 'respiratory', 'cardiac'}\n assert df1.iloc[0, 0] == 0.\n assert df1.iloc[1, 0] - df1.iloc[0, 0] == 0.1\n\n # With onsets and time shifted\n df3 = bf.get_df(adjust_onset=True)\n assert df1.iloc[:, 1:].equals(df3.iloc[:, 1:])\n assert np.allclose(df3.iloc[:,0], df1.iloc[:, 0] + 22.8)\n\n\ndef test_bidsdatafile_enforces_dtype(layout_synthetic):\n bf = layout_synthetic.get(suffix='participants', extension='tsv')[0]\n df = bf.get_df(enforce_dtypes=False)\n assert df.shape[0] == 5\n assert df.loc[:, 'subject_id'].dtype == int\n assert df.loc[:, 'subject_id'][0] == 1\n df = bf.get_df(enforce_dtypes=True)\n assert df.loc[:, 'subject_id'].dtype == 'O'\n assert df.loc[:, 'subject_id'][0] == '001'\n assert df.loc[:, 'subject_id'][1] == '2'\n\n\ndef test_bidsimagefile_get_image():\n path = \"synthetic/sub-01/ses-01/func/sub-01_ses-01_task-nback_run-01_bold.nii.gz\"\n path = path.split('/')\n path = os.path.join(get_test_data_path(), *path)\n bf = BIDSImageFile(path)\n assert bf.get_image() is not None\n assert bf.get_image().shape == (64, 64, 64, 64)\n\n\ndef test_bidsjsonfile(layout_synthetic):\n jf = layout_synthetic.get(suffix='bold', extension='json')[0]\n d = jf.get_dict()\n assert isinstance(d, dict)\n assert d['RepetitionTime'] == 2.5\n j = jf.get_json()\n assert isinstance(j, str)\n assert 'RepetitionTime' in j\n assert json.loads(j) == d\n\n\ndef test_bidsfile_get_metadata(layout_synthetic):\n bf = layout_synthetic.get(suffix='physio', extension='tsv.gz')[0]\n md = bf.get_metadata()\n assert set(md.keys()) == {'Columns', 'SamplingFrequency', 'StartTime'}\n\n\ndef test_bidsfile_get_entities(layout_synthetic):\n md_ents = {'Columns', 'SamplingFrequency', 'StartTime'}\n file_ents = {'datatype', 'extension', 'run', 'session', 'subject',\n 'suffix', 'task'}\n bf = layout_synthetic.get(suffix='physio', extension='tsv.gz')[10]\n # metadata=True and values='tags'; this is equivalent to get_metadata()\n md = bf.get_entities(metadata=True)\n assert md == bf.get_metadata()\n assert set(md.keys()) == md_ents\n assert md['StartTime'] == 22.8\n # metadata=True and values='objects'\n md = bf.get_entities(metadata=True, values='obj')\n assert set(md.keys()) == md_ents\n assert all([isinstance(v, Entity) for v in md.values()])\n # metadata=False and values='tags'\n md = bf.get_entities(metadata=False, values='tags')\n assert set(md.keys()) == file_ents\n assert md['session'] == '02'\n assert md['task'] == 'nback'\n # metadata=False and values='obj'\n md = bf.get_entities(metadata=False, values='objects')\n assert set(md.keys()) == file_ents\n assert all([isinstance(v, Entity) for v in md.values()])\n # No metadata constraint\n md = bf.get_entities(metadata='all')\n md2 = bf.get_entities(metadata=None)\n assert md == md2\n assert set(md.keys()) == md_ents | file_ents\n\n\ndef test_bidsfile_relpath(layout_synthetic):\n bf = layout_synthetic.get(suffix='physio', extension='tsv.gz')[10]\n assert bf.path != bf.relpath\n assert layout_synthetic.root in bf.path\n assert bf.relpath.startswith('sub')\n assert bf.relpath == str(Path(bf.path).relative_to(layout_synthetic.root))\n\n\[email protected](sys.version_info < (3, 6), reason=\"os.PathLike introduced in Python 3.6\")\ndef test_bidsfile_fspath(sample_bidsfile):\n bf = sample_bidsfile\n bf_path = Path(bf)\n assert bf_path == Path(bf.path)\n assert bf_path.read_text() == '###'\n" ]
[ [ "numpy.allclose" ] ]
sgkang/empymod
[ "c8fd726fdbae0788da2cdbb88e77c8b81edd37dc" ]
[ "tests/create_data/kernel.py" ]
[ "\"\"\"Create data for kernel tests. Kernel tests are just securing status quo.\"\"\"\nimport numpy as np\nfrom copy import deepcopy\nfrom scipy.constants import mu_0, epsilon_0\nfrom empymod import kernel, filters\n\n# All possible (ab, msrc, mrec) combinations\npab = (np.arange(1, 7)[None, :] + np.array([10, 20, 30])[:, None]).ravel()\niab = {}\nfor mrec in [False, True]:\n for ab in pab:\n if ab == 36:\n continue\n if ab % 10 > 3:\n msrc = True\n else:\n msrc = False\n if mrec:\n msrc = not msrc\n iab[ab] = (msrc, mrec)\n\n# # A -- ANGLE # #\n\nangres = []\nangle = np.array([1., 2., 4., 5.])\nfor key, val in iab.items():\n inp = {'angle': angle, 'ab': key, 'msrc': val[0], 'mrec': val[1]}\n res = kernel.angle_factor(angle, key, val[0], val[1])\n angres.append({'inp': inp, 'res': res})\n\n# # B -- WAVENUMBER # #\n\n# Example: 6-layer model; source in second layer, receiver in last\nfreq = np.array([0.003, 2.5, 1e6])\nres = np.array([3, .3, 10, 4, 3, 1])\naniso = np.array([1, .5, 3, 1, 2, 1])\nepermH = np.array([80, 100, 3, 8, 1, 1])\nepermV = np.array([100, 30, 1, 10, 68, 9])\nmpermH = np.array([.5, 100, 30, 1, 30, 1])\nmpermV = np.array([2, 1, 30, 9, 50, 1])\netaH = 1/res + np.outer(2j*np.pi*freq, epermH*epsilon_0)\netaV = 1/(res*aniso*aniso) + np.outer(2j*np.pi*freq, epermV*epsilon_0)\nzetaH = np.outer(2j*np.pi*freq, mpermH*mu_0)\nzetaV = np.outer(2j*np.pi*freq, mpermV*mu_0)\nlambd = filters.key_51_2012().base/np.array([0.001, 1, 100, 10000])[:, None]\ndepth = np.array([-np.infty, 0, 150, 300, 500, 600])\ninp1 = {'zsrc': np.array([100]),\n 'zrec': np.array([650]),\n 'lsrc': np.array(1),\n 'lrec': np.array(5),\n 'depth': depth,\n 'etaH': etaH,\n 'etaV': etaV,\n 'zetaH': zetaH,\n 'zetaV': zetaV,\n 'lambd': lambd,\n 'xdirect': False,\n 'use_ne_eval': False}\nwave = {}\nfor key, val in iab.items():\n res = kernel.wavenumber(ab=key, msrc=val[0], mrec=val[1], **inp1)\n wave[key] = (key, val[0], val[1], inp1, res)\n\n# # C -- GREENFCT # #\n\n# Standard example\ninp2 = deepcopy(inp1)\n# Source and receiver in same layer (last)\ninp3 = deepcopy(inp1)\ninp3['zsrc'] = np.array([610])\ninp3['lsrc'] = np.array(5)\n# Receiver in first layer\ninp4 = deepcopy(inp1)\ninp4['zrec'] = np.array([-30])\ninp4['lrec'] = np.array(0)\ngreen = {}\nfor key, val in iab.items():\n res1 = kernel.greenfct(ab=key, msrc=val[0], mrec=val[1], **inp2)\n res2 = kernel.greenfct(ab=key, msrc=val[0], mrec=val[1], **inp3)\n res3 = kernel.greenfct(ab=key, msrc=val[0], mrec=val[1], **inp4)\n\n green[key] = (key, val[0], val[1], inp2, res1, inp3, res2, inp4, res3)\n\n\n# # D -- REFLECTIONS # #\nrefl = {}\n# Standard example\nGam = np.sqrt((etaH/etaV)[:, None, :, None] *\n (lambd**2)[None, :, None, :] + (zetaH**2)[:, None, :, None])\ninp5 = {'depth': depth,\n 'e_zH': etaH,\n 'Gam': Gam,\n 'lrec': inp1['lrec'],\n 'lsrc': inp1['lsrc'],\n 'use_ne_eval': False}\nRp1, Rm1 = kernel.reflections(**inp5)\nrefl[0] = (inp5, Rp1, Rm1)\n# Source and receiver in same layer, but not last\ninp6 = {'depth': inp2['depth'],\n 'e_zH': etaH,\n 'Gam': Gam,\n 'lrec': np.array(3),\n 'lsrc': np.array(3),\n 'use_ne_eval': False}\nRp2, Rm2 = kernel.reflections(**inp6)\nrefl[1] = (inp6, Rp2, Rm2)\n\n# # E -- FIELDS # #\n# Standard example\ninp7 = {'depth': depth,\n 'Rp': Rp1,\n 'Rm': Rm1,\n 'Gam': Gam,\n 'lrec': inp5['lrec'],\n 'lsrc': inp5['lsrc'],\n 'zsrc': inp1['zsrc'],\n 'use_ne_eval': False}\n# Source and receiver in same layer, but not last\ninp8 = {'depth': depth,\n 'Rp': Rp2,\n 'Rm': Rm2,\n 'Gam': Gam,\n 'lrec': inp6['lrec'],\n 'lsrc': inp6['lsrc'],\n 'zsrc': np.array([350]),\n 'use_ne_eval': False}\n\n# Source and receiver in same layer, but not last\nRp4, Rm4 = kernel.reflections(depth, etaH, Gam, np.array(5),\n np.array(5), False)\ninp10 = {'depth': depth,\n 'Rp': Rp4,\n 'Rm': Rm4,\n 'Gam': Gam,\n 'lrec': np.array(5),\n 'lsrc': np.array(5),\n 'zsrc': np.array([700]),\n 'use_ne_eval': False}\n\n# Receiver in first layer, source in last\nRp3, Rm3 = kernel.reflections(depth, etaH, Gam, np.array(0),\n np.array(5), False)\ninp9 = {'depth': depth,\n 'Rp': Rp3,\n 'Rm': Rm3,\n 'Gam': Gam,\n 'lrec': np.array(0),\n 'lsrc': np.array(5),\n 'zsrc': np.array([700]),\n 'use_ne_eval': False}\n\n# Source in first layer, receiver in last\nRp5, Rm5 = kernel.reflections(depth, etaH, Gam, np.array(5),\n np.array(0), False)\ninp11 = {'depth': depth,\n 'Rp': Rp5,\n 'Rm': Rm5,\n 'Gam': Gam,\n 'lrec': np.array(5),\n 'lsrc': np.array(0),\n 'zsrc': np.array([-30]),\n 'use_ne_eval': False}\n\n\nfields = {}\nfor TM in [False, True]:\n for ab in pab:\n if TM and ab in [16, 26]:\n continue\n elif not TM and ab in [13, 23, 31, 32, 33, 34, 35]:\n continue\n elif ab == 36:\n continue\n\n out1 = kernel.fields(ab=ab, TM=TM, **inp7)\n out2 = kernel.fields(ab=ab, TM=TM, **inp8)\n out3 = kernel.fields(ab=ab, TM=TM, **inp9)\n out4 = kernel.fields(ab=ab, TM=TM, **inp10)\n out5 = kernel.fields(ab=ab, TM=TM, **inp11)\n fields[ab] = (ab, TM, inp7, out1, inp8, out2, inp9, out3, inp10, out4,\n inp11, out5)\n\n# # F -- Store data # #\nnp.savez_compressed('../data/kernel.npz', angres=angres, wave=wave,\n green=green, refl=refl, fields=fields)\n" ]
[ [ "numpy.array", "numpy.savez_compressed", "numpy.arange", "numpy.sqrt", "numpy.outer" ] ]