repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
tthhee/tensorpack | [
"199da52172c7ec1343619dc3177e34d227be24ab"
]
| [
"tensorpack/train/trainers.py"
]
| [
"# -*- coding: utf-8 -*-\n# File: trainers.py\n\nimport multiprocessing as mp\nimport os\nimport sys\nimport tensorflow as tf\n\nfrom ..callbacks import CallbackFactory, RunOp\nfrom ..graph_builder.distributed import DistributedParameterServerBuilder, DistributedReplicatedBuilder\nfrom ..graph_builder.training import (\n AsyncMultiGPUBuilder, SyncMultiGPUParameterServerBuilder, SyncMultiGPUReplicatedBuilder)\nfrom ..graph_builder.utils import override_to_local_variable\nfrom ..input_source import FeedfreeInput, QueueInput\nfrom ..tfutils import get_global_step_var\nfrom ..tfutils.distributed import get_distributed_session_creator\nfrom ..tfutils.sesscreate import NewSessionCreator\nfrom ..tfutils.tower import TrainTowerContext\nfrom ..utils import logger\nfrom ..utils.argtools import map_arg\nfrom ..utils.develop import HIDE_DOC\nfrom .tower import SingleCostTrainer\n\n__all__ = ['NoOpTrainer', 'SimpleTrainer',\n 'QueueInputTrainer',\n 'SyncMultiGPUTrainer',\n 'SyncMultiGPUTrainerReplicated',\n 'SyncMultiGPUTrainerParameterServer',\n 'AsyncMultiGPUTrainer',\n 'DistributedTrainerParameterServer',\n 'DistributedTrainerReplicated',\n 'HorovodTrainer']\n\n\ndef _int_to_range(x):\n if isinstance(x, int):\n assert x > 0, \"Argument cannot be {}!\".format(x)\n return list(range(x))\n return x\n\n\nclass SimpleTrainer(SingleCostTrainer):\n \"\"\"\n Single-GPU single-cost single-tower trainer.\n \"\"\"\n def _setup_graph(self, input, get_cost_fn, get_opt_fn):\n logger.info(\"Building graph for a single training tower ...\")\n with TrainTowerContext(''):\n grads = self._make_get_grad_fn(input, get_cost_fn, get_opt_fn)()\n opt = get_opt_fn()\n self.train_op = opt.apply_gradients(grads, name='train_op')\n return []\n\n\nclass NoOpTrainer(SimpleTrainer):\n \"\"\"\n A special trainer that builds the graph (if given a tower function)\n and does nothing in each step.\n It is used to only run the callbacks.\n\n Note that `steps_per_epoch` and `max_epochs` are still valid options.\n \"\"\"\n def run_step(self):\n self.hooked_sess.run([])\n\n\n# Only exists for type check & back-compatibility\nclass QueueInputTrainer(SimpleTrainer):\n def _setup_graph(self, input, get_cost_fn, get_opt_fn):\n assert isinstance(input, QueueInput), input\n return super(QueueInputTrainer, self)._setup_graph(input, get_cost_fn, get_opt_fn)\n\n\nclass SyncMultiGPUTrainerParameterServer(SingleCostTrainer):\n\n __doc__ = SyncMultiGPUParameterServerBuilder.__doc__\n\n devices = None\n \"\"\"\n List of GPU ids.\n \"\"\"\n\n @map_arg(gpus=_int_to_range)\n def __init__(self, gpus, ps_device=None):\n \"\"\"\n Args:\n gpus ([int]): list of GPU ids.\n ps_device: either 'gpu' or 'cpu', where variables are stored.\n The default value is subject to change.\n \"\"\"\n self.devices = gpus\n if ps_device is None:\n ps_device = 'gpu' if len(gpus) <= 2 else 'cpu'\n self._builder = SyncMultiGPUParameterServerBuilder(gpus, ps_device)\n super(SyncMultiGPUTrainerParameterServer, self).__init__()\n\n def _setup_graph(self, input, get_cost_fn, get_opt_fn):\n if len(self.devices) > 1:\n assert isinstance(input, FeedfreeInput), input\n tower_fn = self._make_get_grad_fn(input, get_cost_fn, get_opt_fn)\n grad_list = self._builder.call_for_each_tower(tower_fn)\n self.train_op = self._builder.build(grad_list, get_opt_fn)\n return []\n\n\ndef SyncMultiGPUTrainer(gpus):\n \"\"\"\n Return a default multi-GPU trainer, if you don't care about the details.\n It may not be the most efficient one for your task.\n\n Args:\n gpus (list[int]): list of GPU ids.\n \"\"\"\n return SyncMultiGPUTrainerParameterServer(gpus, ps_device='cpu')\n\n\nclass AsyncMultiGPUTrainer(SingleCostTrainer):\n\n __doc__ = AsyncMultiGPUBuilder.__doc__\n\n devices = None\n \"\"\"\n List of GPU ids.\n \"\"\"\n\n @map_arg(gpus=_int_to_range)\n def __init__(self, gpus, scale_gradient=True):\n \"\"\"\n Args:\n gpus ([int]): list of GPU ids.\n scale_gradient (bool): if True, will scale each gradient by ``1.0/nr_gpu``.\n \"\"\"\n self.devices = gpus\n self._builder = AsyncMultiGPUBuilder(gpus, scale_gradient)\n super(AsyncMultiGPUTrainer, self).__init__()\n\n def _setup_graph(self, input, get_cost_fn, get_opt_fn):\n if len(self.devices) > 1:\n assert isinstance(input, FeedfreeInput), input\n tower_fn = self._make_get_grad_fn(input, get_cost_fn, get_opt_fn)\n grad_list = self._builder.call_for_each_tower(tower_fn)\n self.train_op = self._builder.build(grad_list, get_opt_fn)\n return []\n\n\nclass SyncMultiGPUTrainerReplicated(SingleCostTrainer):\n\n __doc__ = SyncMultiGPUReplicatedBuilder.__doc__\n\n devices = None\n \"\"\"\n List of GPU ids.\n \"\"\"\n\n BROADCAST_EVERY_EPOCH = True\n \"\"\"\n Whether to broadcast the variables every epoch.\n Theoretically this is a no-op (because the variables\n are supposed to be in-sync).\n But this cheap operation may help prevent\n certain numerical issues in practice.\n \"\"\"\n\n @map_arg(gpus=_int_to_range)\n def __init__(self, gpus, average=True, mode=None):\n \"\"\"\n Args:\n gpus (int or [int]): list of GPU ids.\n average (bool): whether to average or sum gradients.\n mode (str or None): Gradient aggregation mode.\n Supported values: ['nccl', 'hierarchical', 'cpu'].\n Default to pick automatically by heuristics.\n These modes may have slight (within 5%) differences in speed.\n \"hierarchical\" mode was designed for DGX-like 8GPU machines.\n \"\"\"\n self.devices = gpus\n\n if mode is None:\n mode = 'hierarchical' if len(gpus) == 8 else 'nccl'\n mode = mode.lower()\n\n self._builder = SyncMultiGPUReplicatedBuilder(gpus, average, mode)\n super(SyncMultiGPUTrainerReplicated, self).__init__()\n\n def _setup_graph(self, input, get_cost_fn, get_opt_fn):\n if len(self.devices) > 1:\n assert isinstance(input, FeedfreeInput), input\n tower_fn = self._make_get_grad_fn(input, get_cost_fn, get_opt_fn)\n grad_list = self._builder.call_for_each_tower(tower_fn)\n self.train_op, post_init_op = self._builder.build(grad_list, get_opt_fn)\n\n cb = RunOp(\n post_init_op,\n run_before=True,\n run_as_trigger=self.BROADCAST_EVERY_EPOCH,\n verbose=True)\n cb.name_scope = \"SyncVariables\"\n return [cb]\n\n\nclass DistributedTrainerBase(SingleCostTrainer):\n\n devices = None\n\n def __init__(self, gpus, server):\n super(DistributedTrainerBase, self).__init__()\n self.devices = gpus\n self.server = server\n self.job_name = server.server_def.job_name\n logger.info(\"Distributed training on cluster:\\n\" + str(server.server_def.cluster))\n\n def join(self):\n logger.info(\"Calling server.join() on {}:{}\".format(self.job_name, self.server.server_def.task_index))\n logger.info(\"Kill me with 'kill {}'\".format(os.getpid()))\n self.server.join() # this function will never return tensorflow#4713\n raise RuntimeError(\"This is a bug. Server.join() for should never return!\")\n\n @HIDE_DOC\n def initialize(self, session_creator, session_init):\n if not isinstance(session_creator, NewSessionCreator) or \\\n session_creator.user_provided_config:\n raise ValueError(\n \"You are not allowed to set session_creator or session_config for distributed training! \"\n \"To use a custom session config, pass it to tf.train.Server.\")\n super(DistributedTrainerBase, self).initialize(\n get_distributed_session_creator(self.server), session_init)\n\n\nclass DistributedTrainerParameterServer(DistributedTrainerBase):\n\n __doc__ = DistributedParameterServerBuilder.__doc__\n\n @map_arg(gpus=_int_to_range)\n def __init__(self, gpus, server, caching_device='cpu'):\n \"\"\"\n Args:\n gpus ([int]): list of GPU ids.\n server (tf.train.Server): the server with ps and workers.\n caching_device (str): either 'cpu' or 'gpu'. The device to cache variables copied from PS\n \"\"\"\n super(DistributedTrainerParameterServer, self).__init__(gpus, server)\n assert self.job_name in ['ps', 'worker'], self.job_name\n if self.job_name == 'ps':\n self.join()\n\n self._builder = DistributedParameterServerBuilder(gpus, server, caching_device)\n self.is_chief = self._builder.is_chief\n\n def _setup_graph(self, input, get_cost_fn, get_opt_fn):\n assert isinstance(input, FeedfreeInput), input\n self.train_op = self._builder.build(\n self._make_get_grad_fn(input, get_cost_fn, get_opt_fn), get_opt_fn)\n return []\n\n\nclass DistributedTrainerReplicated(DistributedTrainerBase):\n\n __doc__ = DistributedReplicatedBuilder.__doc__\n\n @map_arg(gpus=_int_to_range)\n def __init__(self, gpus, server):\n \"\"\"\n Args:\n gpus (list[int]): list of GPU ids.\n server (tf.train.Server): the server with ps and workers.\n \"\"\"\n super(DistributedTrainerReplicated, self).__init__(gpus, server)\n assert self.job_name in ['ps', 'worker'], self.job_name\n if self.job_name == 'ps':\n self.join()\n\n self._builder = DistributedReplicatedBuilder(gpus, server)\n self.is_chief = self._builder.is_chief\n\n def _setup_input(self, input_signature, input):\n with override_to_local_variable():\n get_global_step_var() # gs should be local\n # input source may create variable (queue size summary)\n # TODO This is not good because we don't know from here\n # whether something should be global or local. We now assume\n # they should be local.\n assert not input.setup_done()\n return input.setup(input_signature)\n\n def _setup_graph(self, input, get_cost_fn, get_opt_fn):\n assert isinstance(input, FeedfreeInput), input\n self.train_op, initial_sync_op, model_sync_op = self._builder.build(\n self._make_get_grad_fn(input, get_cost_fn, get_opt_fn), get_opt_fn)\n\n callbacks = []\n # Initial syncing vars from PS\n cb = RunOp(lambda: initial_sync_op,\n run_before=True, run_as_trigger=False, verbose=True)\n cb.chief_only = False\n callbacks.append(cb)\n\n # Sync model_variables to PS, only chief needs to do this\n if model_sync_op:\n cb = RunOp(lambda: model_sync_op,\n run_before=False, run_as_trigger=True, verbose=True)\n logger.warn(\"For efficiency, local MODEL_VARIABLES are only synced to PS once \"\n \"every epoch. Be careful if you save the model more frequently than this.\")\n callbacks.append(cb)\n return callbacks\n\n @property\n def _main_tower_vs_name(self):\n return \"tower0\"\n\n\nclass HorovodTrainer(SingleCostTrainer):\n \"\"\"\n Horovod trainer, support both multi-GPU and distributed training.\n\n To use for multi-GPU training:\n\n .. code-block:: bash\n\n # First, change trainer to HorovodTrainer(), then\n CUDA_VISIBLE_DEVICES=0,1,2,3 NCCL_DEBUG=INFO mpirun -np 4 --output-filename mylog python train.py\n\n To use for distributed training:\n\n .. code-block:: bash\n\n # First, change trainer to HorovodTrainer(), then\n mpirun -np 8 -H server1:4,server2:4 \\\\\n -bind-to none -map-by slot \\\\\n --output-filename mylog -x NCCL_DEBUG=INFO -x LD_LIBRARY_PATH \\\\\n python train.py\n # Add other environment variables you need by -x, e.g. PYTHONPATH, PATH.\n # If using all GPUs, you can always skip the `CUDA_VISIBLE_DEVICES` option.\n # There are other MPI options that can potentially improve performance especially on special hardwares.\n\n Note:\n 1. To reach the maximum speed in your system, there are many options to tune\n for Horovod installation and in the MPI command line.\n See Horovod docs for details.\n\n 2. Due to a TF bug (#8136), you must not initialize CUDA context before the trainer starts training.\n Therefore TF functions like `is_gpu_available()` or `list_local_devices()`\n must be avoided.\n You can, however, use `tf.config.experimental.list_physical_devices('GPU')`, introduced in TF 1.14.\n\n 2. MPI does not like `fork()`. If your dataflow contains multiprocessing, it may cause problems.\n\n 3. MPI sometimes fails to kill all processes in the end. Be sure to check it afterwards.\n\n 4. Keep in mind that there is one process running the script per GPU, therefore:\n\n + Make sure your InputSource has reasonable randomness.\n\n + If your data processing is heavy, doing it in a single dedicated process might be\n a better choice than doing them repeatedly in each process.\n\n + You need to make sure log directories in each process won't conflict.\n You can set it only for the chief process, or set a different one for each process.\n\n + Callbacks have an option to be run only in the chief process, or in all processes.\n See :meth:`Callback.set_chief_only()`. Most callbacks have a reasonable\n default already, but certain callbacks may not behave properly by default. Report an issue if you find any.\n\n + You can use Horovod API such as `hvd.rank()` to know which process you are and choose\n different code path. Chief process has rank 0.\n\n 5. Due to these caveats, see\n `ResNet-Horovod <https://github.com/tensorpack/benchmarks/tree/master/ResNet-Horovod>`_\n for a full example which has handled these common issues.\n This example can train ImageNet in roughly an hour following the paper's setup.\n \"\"\"\n def __init__(self, average=True, compression=None):\n \"\"\"\n Args:\n average (bool): whether to average or sum the gradients across processes.\n compression: `hvd.Compression.fp16` or `hvd.Compression.none`\n \"\"\"\n if 'pyarrow' in sys.modules:\n logger.warn(\"Horovod and pyarrow may conflict due to pyarrow bugs. \"\n \"Uninstall pyarrow and use msgpack instead.\")\n # lazy import\n import horovod.tensorflow as _hvd\n import horovod\n global hvd\n hvd = _hvd\n hvd_version = tuple(map(int, horovod.__version__.split('.')))\n\n hvd.init()\n self.is_chief = hvd.rank() == 0\n self._local_rank = hvd.local_rank()\n self._rank = hvd.rank()\n self._average = average\n self._compression = compression\n self._has_compression = hvd_version >= (0, 15, 0)\n logger.info(\"[HorovodTrainer] local rank={}\".format(self._local_rank))\n super(HorovodTrainer, self).__init__()\n\n def allreduce(self, grads):\n if hvd.size() == 1:\n return grads\n # copied from https://github.com/uber/horovod/blob/master/horovod/tensorflow/__init__.py\n averaged_gradients = []\n with tf.name_scope(\"HVDAllReduce\"):\n for grad, var in grads:\n if grad is not None:\n if self._compression is not None and self._has_compression:\n avg_grad = hvd.allreduce(grad, average=self._average, compression=self._compression)\n else:\n avg_grad = hvd.allreduce(grad, average=self._average)\n averaged_gradients.append((avg_grad, var))\n else:\n averaged_gradients.append((None, var))\n return averaged_gradients\n\n def _setup_graph(self, input, get_cost_fn, get_opt_fn):\n with TrainTowerContext(''):\n grads = self._make_get_grad_fn(input, get_cost_fn, get_opt_fn)()\n grads = self.allreduce(grads)\n\n opt = get_opt_fn()\n self.train_op = opt.apply_gradients(grads, name='train_op')\n\n def broadcast(self):\n logger.info(\"Running horovod broadcast ...\")\n # the op will be created later in initialize()\n self.trainer._broadcast_op.run()\n\n # TODO provide a way to sync manually\n cb = CallbackFactory(before_train=broadcast).set_chief_only(False)\n return [cb]\n\n @HIDE_DOC\n def initialize(self, session_creator, session_init):\n # broadcast_op should be the last setup_graph: it needs to be created\n # \"right before\" the graph is finalized,\n # because it needs to capture all the variables (which may be created by callbacks).\n with tf.name_scope('horovod_broadcast'):\n self._broadcast_op = hvd.broadcast_global_variables(0)\n\n # it's important that our NewSessionCreator does not finalize the graph\n if not isinstance(session_creator, NewSessionCreator):\n raise ValueError(\n \"session_creator has to be `NewSessionCreator` for horovod training! \")\n # NOTE It will fail if GPU was already detected before initializing the session\n # https://github.com/tensorflow/tensorflow/issues/8136\n session_creator.config.gpu_options.visible_device_list = str(self._local_rank)\n try:\n session_creator.config.inter_op_parallelism_threads = mp.cpu_count() // hvd.local_size()\n except AttributeError: # old horovod does not have local_size\n pass\n super(HorovodTrainer, self).initialize(session_creator, session_init)\n\n # This broadcast belongs to the \"intialize\" stage\n # It should not be delayed to the \"before_train\" stage.\n # TODO:\n # 1. a allgather helper to concat strings\n # 2. check variables on each rank match each other, print warnings, and broadcast the common set.\n if self.is_chief:\n logger.info(\"Broadcasting initialized variables ...\")\n else:\n logger.info(\"Rank {} waiting for initialization broadcasting ...\".format(self._rank))\n self.sess.run(self._broadcast_op)\n\n\n# for lazy import\nhvd = None\n"
]
| [
[
"tensorflow.name_scope"
]
]
|
LongerVision/h2o-3 | [
"e19a1e27cb46714e102e86fbc2421ddb551a1932"
]
| [
"h2o-py/tests/testdir_jira/pyunit_pubdev_5336.py"
]
| [
"import h2o\nimport pandas as pd\nfrom tests import pyunit_utils\n\n\ndef pubdev_5336():\n data = pd.DataFrame({'Origin': ['SFO', 'SAN', 'SFO', 'NYC', None],\n 'Dest': ['SFO', 'SFO', 'SAN', 'SAN', None]})\n frame = h2o.H2OFrame(data)\n frame['Origin'].asfactor()\n frame['Dest'].asfactor()\n\n # First column has one more categorical variable\n assert frame['Origin'].nlevels() == [3]\n assert frame['Origin'].levels() == [['NYC', 'SAN', 'SFO']]\n assert frame['Dest'].nlevels() == [2]\n assert frame['Dest'].levels() == [['SAN', 'SFO']]\n frame['eq'] = frame['Origin'] == frame['Dest']\n assert frame['eq'][0,0] == 1\n assert frame['eq'][1,0] == 0\n assert frame['eq'][2,0] == 0\n assert frame['eq'][3,0] == 0\n\n # Compare in inverse order (tests one more categorical variable in first column)\n frame['eqInv'] = frame['Dest'] == frame['Origin']\n assert frame['eqInv'][0,0] == 1\n assert frame['eqInv'][1,0] == 0\n assert frame['eqInv'][2,0] == 0\n assert frame['eqInv'][3,0] == 0\n\n train = h2o.import_file(path=pyunit_utils.locate(\"smalldata/testng/airlines_train.csv\"))\n train['Origin'].asfactor()\n train['Dest'].asfactor()\n train['eq'] = train['Origin'] == train['Dest']\n assert train[train['eq'] == 1].nrows == 0\n\n missing = h2o.import_file(path=pyunit_utils.locate(\"smalldata/logreg/prostate_missing.csv\"))\n missing['GLEASON'] = missing['GLEASON'].asfactor()\n missing['DPROS'] = missing['DPROS'].asfactor()\n missing['eq'] = missing['GLEASON'] == missing['DPROS']\n # Both columns have NA on this row\n assert missing['eq'][1,0] == 1\n # One NA on this in GLEASON column\n assert missing['eq'][7,0] == 0\n\n\nif __name__ == \"__main__\":\n pyunit_utils.standalone_test(pubdev_5336)\nelse:\n pubdev_5336\n"
]
| [
[
"pandas.DataFrame"
]
]
|
epfml/byzantine-robust-decentralized-optimizer | [
"5387d600354a0fbf137c3e4346c4441b2f427407"
]
| [
"codes/tasks/quadratics.py"
]
| [
"import contextlib\nimport copy\nimport math\nimport numpy as np\nimport torch\n\n\nclass LinearModel(torch.nn.Module):\n def __init__(self, d):\n super(LinearModel, self).__init__()\n self.layer = torch.nn.Linear(d, 1, bias=False)\n\n def forward(self, x):\n return self.layer(x)\n\n\ndef generate_A_with_L_mu(n, d, L, mu=-1):\n \"\"\"\n Generate a data matrix A for\n f(x) = \\frac{1}{n} || A x - b ||_2^2\n with L-smoothnes and mu-convexity.\n\n The L-smoothness is the largest eigenvalue of the hessian of f\n hessian(f)(x) = \\frac{2}{n} A^T A\n \"\"\"\n assert mu <= L\n\n # Generate unitary matrix U and V\n dummy_matrix = torch.randn(n, d)\n U, _, V = torch.linalg.svd(dummy_matrix)\n\n # Construct matrix S such that S.T @ S has largest elements L\n # and smallest elements mu.\n smallest = math.sqrt(abs(mu) * n / 2)\n largest = math.sqrt(L * n / 2)\n diag = torch.linspace(start=smallest, end=largest, steps=min(n, d))\n S = torch.zeros(n, d)\n S[list(range(d)), list(range(d))] = diag\n\n # Reconstruct A\n return U @ S @ V.T\n\n\ndef generate_synthetic_dataset(n, d, L, mu, sigma):\n \"\"\"\n The optimum model is zeros.\n \"\"\"\n A = generate_A_with_L_mu(n, d, L, mu)\n x_opt = torch.zeros(d)\n\n def _add_noise_to_target():\n # Generate a random noise and compute its variance\n b = torch.randn(n)\n _2Atb = 2 * A.T @ torch.diag(b)\n # The variance over `n` and sum over `d`.\n b_sigma = _2Atb.var(axis=1).sum().sqrt()\n # Rescale the noise to the specific sigma\n return sigma / b_sigma * b\n\n b = _add_noise_to_target()\n return A, b, x_opt\n\n\ndef generate_synthetic_distributed_dataset(m, n, d, L, mu, sigma, zeta):\n \"\"\"\n Create datasets for `m` workers, each having `n` samples.\n\n Note the L and mu is for each worker not for all workers.\n \"\"\"\n # The `A` here\n A = torch.cat([generate_A_with_L_mu(n, d, L, mu) for _ in range(m)])\n\n # Create noise as heterogeneity\n b = []\n xi_stars = []\n for i in range(m):\n Ai = A[i * n: (i + 1) * n, :]\n xi_star = torch.randn(d) + i\n bi = Ai @ xi_star\n b.append(bi)\n xi_stars.append(xi_star)\n b = torch.cat(b)\n\n x_opt = torch.Tensor(np.linalg.solve(A.T @ A, A.T @ b))\n zeta2_ = 0\n for i in range(m):\n Ai = A[i * n: (i + 1) * n, :]\n xi_star = xi_stars[i]\n zeta_i2 = (2 / n * Ai.T @ Ai @ (x_opt - xi_star)).norm() ** 2\n zeta2_ += zeta_i2\n zeta2_ /= m\n\n scale = zeta / zeta2_.sqrt()\n x_opt = scale * x_opt\n xi_stars = [xi_star * scale for xi_star in xi_stars]\n b = scale * b\n\n # Adding sigma2 noise\n sigma_noises = []\n for i in range(m):\n xi = torch.randn(n)\n Ai = A[i * n: (i + 1) * n, :]\n _2Atb = 2 * Ai.T @ torch.diag(xi)\n\n # The variance over `n` and sum over `d`.\n b_sigma = _2Atb.var(axis=1).sum().sqrt()\n # Rescale the noise to the specific sigma\n sigma_noise = sigma / b_sigma * xi\n sigma_noises.append(sigma_noise)\n\n sigma_noises = torch.cat(sigma_noises)\n b += sigma_noises\n\n return (\n [A[i * n: (i + 1) * n, :] for i in range(m)],\n [b[i * n: (i + 1) * n] for i in range(m)],\n x_opt,\n )\n\n\nclass Quadratics(torch.utils.data.Dataset):\n def __init__(self, n_samples, n_features, L, mu, sigma=0, seed=0):\n self.n_samples = n_samples\n self.n_features = n_features\n self.sigma = sigma\n\n with fork_with_seed(seed=seed):\n self._A, self._b, self._x_opt = generate_synthetic_dataset(\n n=self.n_samples,\n d=self.n_features,\n L=L,\n mu=mu,\n sigma=sigma,\n )\n\n def __len__(self):\n return self.n_samples\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n return self.A[idx, :], self.b[idx]\n\n @property\n def A(self):\n return self._A\n\n @property\n def b(self):\n return self._b\n\n @property\n def x_opt(self):\n return self._x_opt\n\n\[email protected]\ndef fork_with_seed(seed):\n if seed is None:\n yield\n else:\n with torch.random.fork_rng(devices=[]):\n torch.manual_seed(seed)\n yield\n\n\nclass QuadraticTask(object):\n def __init__(\n self,\n n_samples,\n n_features,\n batch_size=None,\n L=10,\n mu=1,\n r0=1,\n sigma=0,\n seed=0,\n ):\n self.r0 = r0\n self._model = self._initialize_model(n_features, r0)\n self._dataset = Quadratics(n_samples, n_features, L, mu, sigma, seed)\n self.n_samples = n_samples\n self.batch_size = batch_size or n_samples\n\n def _initialize_model(self, d, r0):\n model = LinearModel(d)\n model.layer.weight.data /= model.layer.weight.data.norm() / r0\n return model\n\n def loss_func(self):\n return torch.nn.MSELoss(reduction=\"mean\")\n\n @property\n def model(self):\n return self._model\n\n def model_class(self):\n return LinearModel\n\n def metrics(self):\n return {}\n\n def train_loader(self):\n return torch.utils.data.DataLoader(\n self._dataset,\n batch_size=self.batch_size,\n shuffle=True,\n )\n\n def test_loader(self):\n raise NotImplementedError()\n\n\nclass DistributedQuadratics(torch.utils.data.Dataset):\n def __init__(self, A, b):\n self._A = A\n self._b = b\n self.n = A.shape[0]\n\n def __len__(self):\n return self.n\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n return self.A[idx, :], self.b[idx]\n\n @property\n def A(self):\n return self._A\n\n @property\n def b(self):\n return self._b\n\n\nclass DistributedQuadraticsTask(object):\n def __init__(self, A, b, batch_size, model):\n self.A = A\n self.b = b\n self.batch_size = batch_size\n self._model = model\n self.n_samples = A.shape[0]\n self._dataset = DistributedQuadratics(A, b)\n\n def loss_func(self):\n return torch.nn.MSELoss(reduction=\"mean\")\n\n @property\n def model(self):\n return self._model\n\n def model_class(self):\n return LinearModel\n\n def metrics(self):\n return {}\n\n def train_loader(self):\n return torch.utils.data.DataLoader(\n self._dataset,\n batch_size=self.batch_size,\n shuffle=True,\n )\n\n def test_loader(self):\n return torch.utils.data.DataLoader(\n self._dataset,\n batch_size=self.batch_size,\n shuffle=True,\n )\n\n\ndef get_distributed_quadratics_tasks(m, n, d, b, L, mu, r0, sigma, zeta, seed):\n with fork_with_seed(seed=seed):\n As, bs, _ = generate_synthetic_distributed_dataset(\n m=m, n=n, d=d, L=L, mu=mu, sigma=sigma, zeta=zeta\n )\n model = LinearModel(d)\n model.layer.weight.data /= model.layer.weight.data.norm() / r0\n\n tasks = []\n for i in range(m):\n worker_task = DistributedQuadraticsTask(\n A=As[i],\n b=bs[i],\n batch_size=b,\n model=copy.deepcopy(model),\n )\n tasks.append(worker_task)\n\n main_task = DistributedQuadraticsTask(\n A=torch.cat(As), b=torch.cat(bs), batch_size=m * n, model=None\n )\n return tasks, main_task\n"
]
| [
[
"torch.nn.Linear",
"torch.zeros",
"torch.cat",
"torch.nn.MSELoss",
"torch.linalg.svd",
"torch.is_tensor",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"numpy.linalg.solve",
"torch.random.fork_rng",
"torch.diag",
"torch.randn"
]
]
|
XuchanBao/ICCV2019-LearningToPaint | [
"777effbd496567c619c6f9896a2f349c1a842850"
]
| [
"baseline_modelfree/env.py"
]
| [
"import sys\nimport json\nimport torch\nimport numpy as np\nimport argparse\nimport torchvision.transforms as transforms\nimport cv2\nfrom DRL.ddpg import decode\nfrom Renderer.quadratic_gen import get_initialization, generate_quadratic_heatmap\nfrom utils.util import *\nfrom PIL import Image\nfrom torchvision import transforms, utils\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\naug = transforms.Compose(\n [transforms.ToPILImage(),\n transforms.RandomHorizontalFlip(),\n ])\n\nwidth = 128\nconvas_area = width * width\n\nimg_train = []\nimg_test = []\ntrain_num = 0\ntest_num = 0\n\nclass Paint:\n def __init__(self, obs_dim, act_dim, batch_size, max_step):\n self.batch_size = batch_size\n self.max_step = max_step\n self.action_space = act_dim\n self.observation_space = (self.batch_size, width, width, obs_dim)\n self.test = False\n\n self.parameters = None\n \n def load_data(self):\n # CelebA\n global train_num, test_num\n for i in range(200000):\n img_id = '%06d' % (i + 1)\n try:\n img = cv2.imread('../data/img_align_celeba/' + img_id + '.jpg', cv2.IMREAD_UNCHANGED)\n img = cv2.resize(img, (width, width))\n if i > 2000:\n train_num += 1\n img_train.append(img)\n else:\n test_num += 1\n img_test.append(img)\n finally:\n if (i + 1) % 10000 == 0:\n print('loaded {} images'.format(i + 1))\n print('finish loading data, {} training images, {} testing images'.format(str(train_num), str(test_num)))\n \n def pre_data(self, id, test):\n if test:\n img = img_test[id]\n else:\n img = img_train[id]\n if not test:\n img = aug(img)\n img = np.asarray(img)\n return np.transpose(img, (2, 0, 1))\n \n def reset(self, test=False, begin_num=False):\n self.test = test\n self.imgid = [0] * self.batch_size\n self.gt = torch.zeros([self.batch_size, 3, width, width], dtype=torch.uint8).to(device)\n for i in range(self.batch_size):\n if test:\n id = (i + begin_num) % test_num\n else:\n id = np.random.randint(train_num)\n self.imgid[i] = id\n self.gt[i] = torch.tensor(self.pre_data(id, test))\n self.tot_reward = ((self.gt.float() / 255) ** 2).mean(1).mean(1).mean(1)\n self.stepnum = 0\n\n self.parameters = get_initialization(self.batch_size)\n self.canvas, _, __ = generate_quadratic_heatmap(self.parameters)\n self.canvas = self.canvas.reshape(-1, 3, width, width).to(device)\n\n self.parameters = self.parameters.unsqueeze(-1).unsqueeze(-1).expand(self.parameters.size() + (128, 128))\n self.parameters = self.parameters.to(device)\n\n self.lastdis = self.ini_dis = self.cal_dis()\n return self.observation()\n \n def observation(self):\n # canvas B * 3 * width * width\n # gt B * 3 * width * width\n # params B * 2 * width * width\n # T B * 1 * width * width\n # Total: (B, 9, width, width)\n T = torch.ones([self.batch_size, 1, width, width], dtype=torch.uint8) * self.stepnum\n return torch.cat((self.canvas.float(), self.gt.float(), self.parameters, T.float().to(device)), 1) # canvas, gt_img, params, T\n\n def cal_trans(self, s, t):\n return (s.transpose(0, 3) * t).transpose(0, 3)\n \n def step(self, action):\n self.canvas, self.parameters = decode(action, self.parameters)\n self.canvas = (self.canvas * 255).byte()\n\n self.stepnum += 1\n ob = self.observation()\n done = (self.stepnum == self.max_step)\n reward = self.cal_reward() # np.array([0.] * self.batch_size)\n return ob.detach(), reward, np.array([done] * self.batch_size), None\n\n def cal_dis(self):\n return (((self.canvas.float() - self.gt.float()) / 255) ** 2).mean(1).mean(1).mean(1)\n \n def cal_reward(self):\n dis = self.cal_dis()\n reward = (self.lastdis - dis) / (self.ini_dis + 1e-8)\n self.lastdis = dis\n return to_numpy(reward)\n"
]
| [
[
"torch.zeros",
"numpy.array",
"numpy.asarray",
"torch.ones",
"torch.cuda.is_available",
"numpy.transpose",
"numpy.random.randint"
]
]
|
tansey/hrt | [
"f6d271a34590d073a08f0fc40f40e898f38cdf97"
]
| [
"benchmarks/liang/sim_liang_agg.py"
]
| [
"import matplotlib\nmatplotlib.use('Agg')\nimport os\nimport numpy as np\nimport matplotlib.pylab as plt\nimport seaborn as sns\nfrom pyhrt.utils import bh_predictions, tpr, fdr, pretty_str\n\ndef p_plot(p_values, S, start=0, end=1):\n plt.close()\n with sns.axes_style('white', {'legend.frameon': True}):\n plt.rc('font', weight='bold')\n plt.rc('grid', lw=3)\n plt.rc('lines', lw=2)\n plt.rc('axes', lw=2)\n for p, label in zip([p_values[:,:S], p_values[:,S:]], ['Alternative', 'Null']):\n p = p.flatten()\n p = p[~np.isnan(p)]\n p = np.sort(p)\n x = np.concatenate([[0],p,[1]])\n y = np.concatenate([[0],(np.arange(p.shape[0])+1.)/p.shape[0],[1]])\n plt.plot(x, y, label=label, lw=2)\n plt.plot([0,1], [0,1], color='black', ls='--', lw=3, label='U(0,1)', alpha=0.7)\n plt.xlim([start,end])\n plt.ylim([start,end])\n plt.xlabel('p-value', fontsize=18, weight='bold')\n plt.ylabel('Empirical CDF', fontsize=18, weight='bold')\n plt.legend(loc='lower right')\n\ndef bounds_plot(bounds):\n plt.close()\n with sns.axes_style('white', {'legend.frameon': True}):\n plt.rc('font', weight='bold')\n plt.rc('grid', lw=3)\n plt.rc('lines', lw=2)\n plt.rc('axes', lw=2)\n lower = bounds[:,:,0][~np.isnan(bounds[:,:,0])].flatten()\n upper = bounds[:,:,1][~np.isnan(bounds[:,:,1])].flatten()\n plt.hist(lower, label='Lower band', color='blue', bins=np.linspace(0,50,51), normed=True)\n plt.hist(upper, label='Upper band', color='orange', bins=np.linspace(50,100,51), normed=True)\n plt.xlabel('Band value', fontsize=18, weight='bold')\n plt.ylabel('Proportion', fontsize=18, weight='bold')\n plt.legend(loc='upper right')\n\ndef results_plot(tpr_vals, fdr_vals, names, fdr_threshold):\n import pandas as pd\n plt.close()\n with sns.axes_style('white', {'legend.frameon': True}):\n plt.rc('font', weight='bold')\n plt.rc('grid', lw=3)\n plt.rc('lines', lw=2)\n plt.rc('axes', lw=2)\n plt.figure(figsize=(12,5))\n rates = []\n labels = []\n models = []\n for t, f, n in zip(tpr_vals, fdr_vals, names):\n rates.extend(t)\n rates.extend(f)\n labels.extend(['TPR']*len(t))\n labels.extend(['FDP']*len(f))\n models.extend([n]*(len(t)+len(f)))\n df = pd.DataFrame({'value': rates, 'Rate': labels, 'Model': models})\n ax = sns.boxplot(x='Model', y='value', hue='Rate', data=df) # RUN PLOT\n plt.xlabel('', fontsize=18, weight='bold')\n plt.ylabel('Power and FDP', fontsize=18, weight='bold')\n plt.axhline(fdr_threshold, color='red', lw=2, ls='--')\n # ax.tick_params(labelsize=10)\n plt.legend(loc='upper right')\n sns.despine(offset=10, trim=True)\n\n\ndef main():\n N = 500 # total number of samples\n P = 500 # number of features\n S = 40 # number of signal features\n nperms = 5000\n nbootstraps = 100\n fdr_threshold = 0.1\n ntrials = 100\n names = ['Holdout\\nPermutation', 'Calibrated\\nHRT\\n(linear)', 'Uncalibrated\\nHRT',\n 'CV\\nPermutation', 'Calibrated\\nCV-HRT\\n(linear)', 'Uncalibrated\\nCV-HRT',\n 'Calibrated\\nHRT\\n(linear)', 'Calibrated\\nHRT',\n 'Calibrated\\nCV-HRT\\n(linear)', 'Calibrated\\nCV-HRT']\n prefixes = ['perm', 'linear', 'nonlinear',\n 'cv_perm', 'cv_linear', 'cv_nonlinear',\n 'robust_linear', 'robust_nonlinear',\n 'cv_robust_linear', 'cv_robust_nonlinear']\n p_values = np.full((len(prefixes), ntrials, P), np.nan)\n tpr_vals, fdr_vals = np.full((len(prefixes), ntrials), np.nan), np.full((len(prefixes), ntrials), np.nan)\n\n for idx, prefix in enumerate(prefixes):\n for trial in range(ntrials):\n if (trial % 25) == 0:\n print('{} trial: {}'.format(prefix, trial))\n TRUTH_PATH = 'data/{}/truth.csv'.format(trial)\n truth = np.loadtxt(TRUTH_PATH, delimiter=',')\n\n P_VALUE_PATH = 'data/{}/{}_p_values.npy'.format(trial, prefix)\n if os.path.exists(P_VALUE_PATH):\n p_values[idx, trial] = np.load(P_VALUE_PATH)\n \n clean_up_needed = False\n if np.any(np.isnan(p_values[idx,trial])):\n clean_up_needed = True\n for feature in range(P):\n Pi_PATH = 'data/{}/{}_p_values_{}.npy'.format(trial, prefix, feature)\n if np.isnan(p_values[idx, trial, feature]) and os.path.exists(Pi_PATH):\n try:\n p_values[idx,trial,feature] = np.load(Pi_PATH)\n except:\n os.remove(Pi_PATH)\n \n # p_values[idx, trial] = p_values[idx, trial] * nperms / (nperms+1)\n missing = np.isnan(p_values[idx, trial])\n pred = bh_predictions(p_values[idx, trial][~missing], fdr_threshold)\n tpr_vals[idx, trial] = tpr(truth[~missing], pred)\n fdr_vals[idx, trial] = fdr(truth[~missing], pred)\n\n if not np.any(np.isnan(p_values[idx,trial])):\n # clean up\n if clean_up_needed:\n np.save(P_VALUE_PATH, p_values[idx,trial])\n for feature in range(P):\n Pi_PATH = 'data/{}/{}_p_values_{}.npy'.format(trial, prefix, feature)\n if os.path.exists(Pi_PATH):\n # print('Would delete {}'.format((idx, trial, feature)))\n os.remove(Pi_PATH)\n else:\n print('Trial {} Nulls: {}'.format(trial, np.where(np.isnan(p_values[idx, trial]))[0]))\n\n if 'robust' in prefix:\n # Get the distribution of confidence intervals\n bounds = np.full((ntrials, P, 2), np.nan)\n for trial in range(ntrials):\n BOUNDS_PATH = 'data/{}/{}_bounds.npy'.format(trial, prefix)\n if os.path.exists(BOUNDS_PATH):\n bounds[trial] = np.load(BOUNDS_PATH)\n \n clean_up_needed = False\n if np.any(np.isnan(bounds[trial])):\n clean_up_needed = True\n for feature in range(P):\n BOUNDS_i_PATH = 'data/{}/{}_bounds_{}.npy'.format(trial, prefix, feature)\n if np.any(np.isnan(bounds[trial, feature])) and os.path.exists(BOUNDS_i_PATH):\n bounds[trial,feature] = np.load(BOUNDS_i_PATH)\n \n if not np.any(np.isnan(bounds[trial])):\n # clean up\n if clean_up_needed:\n np.save(BOUNDS_PATH, bounds[trial])\n for feature in range(P):\n BOUNDS_i_PATH = 'data/{}/{}_bounds_{}.npy'.format(trial, prefix, feature)\n if os.path.exists(BOUNDS_i_PATH):\n # print('Would delete {}'.format(BOUNDS_i_PATH))\n os.remove(BOUNDS_i_PATH)\n\n bounds_plot(bounds)\n plt.savefig('plots/liang-bounds-{}.pdf'.format(prefix.replace('_', '-')), bbox_inches='tight')\n\n\n print('*** {} model ({} trials) ***'.format(names[idx], (~np.isnan(tpr_vals[idx])).sum()))\n print('TPR: {:.2f}%'.format(np.nanmean(tpr_vals[idx], axis=0)*100))\n print('FDR: {:.2f}%'.format(np.nanmean(fdr_vals[idx], axis=0)*100))\n print('')\n \n p_plot(p_values[idx], S)\n plt.savefig('plots/liang-p-values-{}.pdf'.format(prefix.replace('_','-')), bbox_inches='tight')\n\n selected = np.array([0,3,2,5,7,9])\n results_plot([tpr_vals[i] for i in selected],\n [fdr_vals[i] for i in selected],\n [names[i] for i in selected],\n fdr_threshold)\n plt.savefig('plots/liang-tpr-fdr.pdf', bbox_inches='tight')\n\nif __name__ == '__main__':\n main()\n\n'''\n*** Holdout Permutation model (100 trials) ***\nTPR: 31.72%\nFDR: 47.17%\n\n*** Calibrated HRT (linear) model (100 trials) ***\nTPR: 1.62%\nFDR: 24.23%\n\n*** Uncalibrated HRT model (100 trials) ***\nTPR: 6.42%\nFDR: 23.82%\n\n*** CV Permutation model (100 trials) ***\nTPR: 69.88%\nFDR: 69.30%\n\n*** Calibrated CV-HRT (linear) model (100 trials) ***\nTPR: 12.47%\nFDR: 13.57%\n\n*** Uncalibrated CV-HRT model (100 trials) ***\nTPR: 38.23%\nFDR: 18.46%\n\n*** Calibrated HRT (linear) model (100 trials) ***\nTPR: 0.85%\nFDR: 11.70%\n\n*** Calibrated HRT model (100 trials) ***\nTPR: 4.65%\nFDR: 18.89%\n\n*** Calibrated CV-HRT (linear) model (100 trials) ***\nTPR: 9.90%\nFDR: 12.54%\n\n*** Calibrated CV-HRT model (100 trials) ***\nTPR: 35.27%\nFDR: 14.48%\n\n'''\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
]
| [
[
"matplotlib.pylab.savefig",
"matplotlib.pylab.ylabel",
"numpy.load",
"numpy.nanmean",
"numpy.sort",
"matplotlib.pylab.xlim",
"numpy.concatenate",
"numpy.full",
"pandas.DataFrame",
"matplotlib.pylab.close",
"numpy.save",
"numpy.arange",
"matplotlib.pylab.plot",
"matplotlib.pylab.rc",
"matplotlib.use",
"numpy.array",
"matplotlib.pylab.axhline",
"numpy.loadtxt",
"matplotlib.pylab.ylim",
"numpy.isnan",
"matplotlib.pylab.legend",
"matplotlib.pylab.figure",
"matplotlib.pylab.xlabel",
"numpy.linspace"
]
]
|
kasmith/OptimTools | [
"1212a66b57e335cb5a4563f23c936aba9dede56f"
]
| [
"OptimTools/producer_consumer.py"
]
| [
"from __future__ import division, print_function\nfrom multiprocessing import Process, Condition, Event, Queue, cpu_count\nimport numpy as np\nimport time\nimport random\n\n# A Producer that initializes with loaded data then waits for further processing on that data\nclass _Producer(Process):\n def __init__(self, init_function, init_data, process_function, queues, conds):\n super(_Producer, self).__init__()\n\n self._init_fn = init_function\n self._init_dat = init_data\n self._proc_fn = process_function\n self._set_q, self._get_q = queues\n self._set_cond, self._get_cond = conds\n self._stop = Event()\n\n def run(self):\n # Initialize random number generators here\n random.seed()\n np.random.seed()\n # Run the initial function\n out = [self._init_fn(id) for id in self._init_dat]\n while not self._stop.is_set():\n # Pop parameters off the queue and run the process function\n self._set_cond.acquire()\n if self._set_q.empty():\n self._set_cond.wait()\n if self._stop.is_set():\n return\n params = self._set_q.get()\n self._set_cond.release()\n\n # Run the function\n ret = [self._proc_fn(params, o) for o in out]\n\n # Put the results back onto the queue\n self._get_cond.acquire()\n self._get_q.put(ret)\n self._get_cond.notify()\n self._get_cond.release()\n\n def stop(self):\n self._stop.set()\n\n# The class that will be used -- takes in initialization functions / data, slices them up, then calls a\n# process_function in a segmented way\nclass ProducerConsumer(object):\n def __init__(self, init_function, init_data, process_function, n_cores = cpu_count(), timeout = None):\n self._ncore = n_cores\n self._split_dat = [init_data[i::n_cores] for i in range(n_cores)]\n self._producer_list = []\n self._init_fn = init_function\n self._proc_fn = process_function\n self._timeout = timeout\n self._lastparams = None\n for i in range(n_cores):\n self._producer_list.append(self._make_producer(i))\n\n #set_q = Queue()\n #get_q = Queue()\n #set_cond = Condition()\n #get_cond = Condition()\n #producer = _Producer(init_function, self._split_dat[i], process_function, [set_q, get_q], [set_cond, get_cond])\n #self._producer_list.append([producer, set_q, set_cond, get_q, get_cond])\n #producer.start()\n\n def run(self, params):\n self._set_params(params)\n return self._get_outcomes()\n\n def shut_down(self):\n for p, _, c, _, _ in self._producer_list:\n c.acquire()\n p.stop()\n c.notify()\n c.release()\n self._producer_list = []\n\n def __del__(self):\n self.shut_down()\n\n def _make_producer(self, index):\n set_q = Queue()\n get_q = Queue()\n set_cond = Condition()\n get_cond = Condition()\n producer = _Producer(self._init_fn, self._split_dat[index], self._proc_fn, [set_q, get_q], [set_cond, get_cond])\n producer.start()\n return producer, set_q, set_cond, get_q, get_cond\n\n def _set_params(self, params):\n self._lastparams = params\n for p, q, c, _, _ in self._producer_list:\n c.acquire()\n q.put(params)\n c.notify()\n c.release()\n\n def _get_outcomes(self):\n starttime = time.time()\n agg = []\n for i, (p, setq, setcond, q, c) in enumerate(self._producer_list):\n c.acquire()\n if q.empty():\n if self._timeout:\n remwait = max(self._timeout + starttime - time.time(), 1.) # Always give it a small chance to load\n c.wait(remwait)\n # If the queue remains empty, replace the process and try again with the last parameter set\n while q.empty():\n setcond.acquire()\n p.stop()\n setcond.notify()\n setcond.release()\n p.terminate()\n print (\"Process exceeded timeout limit\")\n print (\"Init data:\", self._split_dat[i])\n print (\"Parameters:\", self._lastparams)\n print (\"Timeout:\", self._timeout)\n print (\"\\n\")\n pgroup = self._make_producer(i)\n p, setq, setcond, q, c = pgroup\n self._producer_list[i] = pgroup\n setcond.acquire()\n setq.put(self._lastparams)\n setcond.notify()\n setcond.release()\n c.acquire()\n if q.empty():\n r = random.randint(0, 100)\n print (\"Start wait time:\", r, time.time())\n c.wait(self._timeout)\n print (\"End wait time:\", r, time.time())\n else:\n c.wait()\n from_q = q.get()\n agg += from_q\n c.release()\n return agg\n\nif __name__ == '__main__':\n from scipy.stats import norm\n import random\n def initfn(s):\n print (\"initialized\")\n return s\n\n def procfn(arg, s):\n tst = 3*random.random()\n wait = s + tst\n print (\"Waiting for\", wait, \"seconds\")\n time.sleep(wait)\n return wait\n\n procon = ProducerConsumer(initfn, [1.,1.2,2.,1.6, 1.7], procfn, 3, timeout = 5)\n print (\"Done\")\n print (procon.run(2))\n del procon\n"
]
| [
[
"numpy.random.seed"
]
]
|
Hirosaji/bert | [
"74b9d39044c4123e0851e7adc1d5182a0d720a3b"
]
| [
"server/bert_script/tokenization.py"
]
| [
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tokenization classes.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport re\nimport unicodedata\nimport sentencepiece as sp\nimport six\nimport tensorflow as tf\n\n\ndef validate_case_matches_checkpoint(do_lower_case, init_checkpoint):\n \"\"\"Checks whether the casing config is consistent with the checkpoint name.\"\"\"\n\n # The casing has to be passed in by the user and there is no explicit check\n # as to whether it matches the checkpoint. The casing information probably\n # should have been stored in the bert_config.json file, but it's not, so\n # we have to heuristically detect it to validate.\n\n if not init_checkpoint:\n return\n\n m = re.match(\"^.*?([A-Za-z0-9_-]+)/bert_model.ckpt\", init_checkpoint)\n if m is None:\n return\n\n model_name = m.group(1)\n\n lower_models = [\n \"uncased_L-24_H-1024_A-16\", \"uncased_L-12_H-768_A-12\",\n \"multilingual_L-12_H-768_A-12\", \"chinese_L-12_H-768_A-12\"\n ]\n\n cased_models = [\n \"cased_L-12_H-768_A-12\", \"cased_L-24_H-1024_A-16\",\n \"multi_cased_L-12_H-768_A-12\"\n ]\n\n is_bad_config = False\n if model_name in lower_models and not do_lower_case:\n is_bad_config = True\n actual_flag = \"False\"\n case_name = \"lowercased\"\n opposite_flag = \"True\"\n\n if model_name in cased_models and do_lower_case:\n is_bad_config = True\n actual_flag = \"True\"\n case_name = \"cased\"\n opposite_flag = \"False\"\n\n if is_bad_config:\n raise ValueError(\n \"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. \"\n \"However, `%s` seems to be a %s model, so you \"\n \"should pass in `--do_lower_case=%s` so that the fine-tuning matches \"\n \"how the model was pre-training. If this error is wrong, please \"\n \"just comment out this check.\" % (actual_flag, init_checkpoint,\n model_name, case_name, opposite_flag))\n\n\ndef convert_to_unicode(text):\n \"\"\"Converts `text` to Unicode (if it's not already), assuming utf-8 input.\"\"\"\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text.decode(\"utf-8\", \"ignore\")\n elif isinstance(text, unicode):\n return text\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")\n\n\ndef printable_text(text):\n \"\"\"Returns text encoded in a way suitable for print or `tf.logging`.\"\"\"\n\n # These functions want `str` for both Python2 and Python3, but in one case\n # it's a Unicode string and in the other it's a byte string.\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text\n elif isinstance(text, unicode):\n return text.encode(\"utf-8\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")\n\n\ndef load_vocab(vocab_file):\n \"\"\"Loads a vocabulary file into a dictionary.\"\"\"\n vocab = collections.OrderedDict()\n index = 0\n with tf.io.gfile.GFile(vocab_file, \"r\") as reader:\n while True:\n token = convert_to_unicode(reader.readline())\n if not token:\n break\n token = token.strip()\n vocab[token] = index\n index += 1\n return vocab\n\n\ndef convert_by_vocab(vocab, items, unk_info):\n \"\"\"Converts a sequence of [tokens|ids] using the vocab.\"\"\"\n output = []\n for item in items:\n if item in vocab:\n output.append(vocab[item])\n else:\n output.append(unk_info)\n return output\n\n\ndef convert_tokens_to_ids(vocab, tokens):\n return convert_by_vocab(vocab, tokens, unk_info=0)\n\n\ndef convert_ids_to_tokens(inv_vocab, ids):\n return convert_by_vocab(inv_vocab, ids, unk_info=\"<unk>\")\n\n\ndef whitespace_tokenize(text):\n \"\"\"Runs basic whitespace cleaning and splitting on a piece of text.\"\"\"\n text = text.strip()\n if not text:\n return []\n tokens = text.split()\n return tokens\n\n\nclass FullTokenizer(object):\n \"\"\"Runs end-to-end tokenziation.\"\"\"\n\n def __init__(self, vocab_file, model_file, do_lower_case=True):\n self.tokenizer = SentencePieceTokenizer(model_file, do_lower_case=do_lower_case)\n self.vocab = load_vocab(vocab_file)\n self.inv_vocab = {v: k for k, v in self.vocab.items()}\n # self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)\n # self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)\n\n def tokenize(self, text):\n split_tokens = self.tokenizer.tokenize(text)\n return split_tokens\n\n # split_tokens = []\n # for token in self.basic_tokenizer.tokenize(text):\n # for sub_token in self.wordpiece_tokenizer.tokenize(token):\n # split_tokens.append(sub_token)\n\n return split_tokens\n\n def convert_tokens_to_ids(self, tokens):\n return convert_by_vocab(self.vocab, tokens, unk_info=0)\n\n def convert_ids_to_tokens(self, ids):\n return convert_by_vocab(self.inv_vocab, ids, unk_info=\"<unk>\")\n\n\nclass SentencePieceTokenizer(object):\n \"\"\"Runs SentencePiece tokenization (from raw text to tokens list)\"\"\"\n\n def __init__(self, model_file, do_lower_case=True):\n \"\"\"Constructs a SentencePieceTokenizer.\"\"\"\n self.tokenizer = sp.SentencePieceProcessor()\n if self.tokenizer.Load(model_file):\n print(\"Loaded a trained SentencePiece model.\")\n else:\n print(\"You have to give a path of trained SentencePiece model.\")\n sys.exit(1)\n self.do_lower_case = do_lower_case\n\n def tokenize(self, text):\n \"\"\"Tokenizes a piece of text.\"\"\"\n text = convert_to_unicode(text)\n if self.do_lower_case:\n text = text.lower()\n output_tokens = self.tokenizer.EncodeAsPieces(text)\n return output_tokens\n\n\nclass BasicTokenizer(object):\n \"\"\"Runs basic tokenization (punctuation splitting, lower casing, etc.).\"\"\"\n\n def __init__(self, do_lower_case=True):\n \"\"\"Constructs a BasicTokenizer.\n\n Args:\n do_lower_case: Whether to lower case the input.\n \"\"\"\n self.do_lower_case = do_lower_case\n\n def tokenize(self, text):\n \"\"\"Tokenizes a piece of text.\"\"\"\n text = convert_to_unicode(text)\n text = self._clean_text(text)\n\n # This was added on November 1st, 2018 for the multilingual and Chinese\n # models. This is also applied to the English models now, but it doesn't\n # matter since the English models were not trained on any Chinese data\n # and generally don't have any Chinese data in them (there are Chinese\n # characters in the vocabulary because Wikipedia does have some Chinese\n # words in the English Wikipedia.).\n text = self._tokenize_chinese_chars(text)\n\n orig_tokens = whitespace_tokenize(text)\n split_tokens = []\n for token in orig_tokens:\n if self.do_lower_case:\n token = token.lower()\n token = self._run_strip_accents(token)\n split_tokens.extend(self._run_split_on_punc(token))\n\n output_tokens = whitespace_tokenize(\" \".join(split_tokens))\n return output_tokens\n\n def _run_strip_accents(self, text):\n \"\"\"Strips accents from a piece of text.\"\"\"\n text = unicodedata.normalize(\"NFD\", text)\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == \"Mn\":\n continue\n output.append(char)\n return \"\".join(output)\n\n def _run_split_on_punc(self, text):\n \"\"\"Splits punctuation on a piece of text.\"\"\"\n chars = list(text)\n i = 0\n start_new_word = True\n output = []\n while i < len(chars):\n char = chars[i]\n if _is_punctuation(char):\n output.append([char])\n start_new_word = True\n else:\n if start_new_word:\n output.append([])\n start_new_word = False\n output[-1].append(char)\n i += 1\n\n return [\"\".join(x) for x in output]\n\n def _tokenize_chinese_chars(self, text):\n \"\"\"Adds whitespace around any CJK character.\"\"\"\n output = []\n for char in text:\n cp = ord(char)\n if self._is_chinese_char(cp):\n output.append(\" \")\n output.append(char)\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)\n\n def _is_chinese_char(self, cp):\n \"\"\"Checks whether CP is the codepoint of a CJK character.\"\"\"\n # This defines a \"chinese character\" as anything in the CJK Unicode block:\n # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)\n #\n # Note that the CJK Unicode block is NOT all Japanese and Korean characters,\n # despite its name. The modern Korean Hangul alphabet is a different block,\n # as is Japanese Hiragana and Katakana. Those alphabets are used to write\n # space-separated words, so they are not treated specially and handled\n # like the all of the other languages.\n if ((cp >= 0x4E00 and cp <= 0x9FFF) or #\n (cp >= 0x3400 and cp <= 0x4DBF) or #\n (cp >= 0x20000 and cp <= 0x2A6DF) or #\n (cp >= 0x2A700 and cp <= 0x2B73F) or #\n (cp >= 0x2B740 and cp <= 0x2B81F) or #\n (cp >= 0x2B820 and cp <= 0x2CEAF) or\n (cp >= 0xF900 and cp <= 0xFAFF) or #\n (cp >= 0x2F800 and cp <= 0x2FA1F)): #\n return True\n\n return False\n\n def _clean_text(self, text):\n \"\"\"Performs invalid character removal and whitespace cleanup on text.\"\"\"\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)\n\n\nclass WordpieceTokenizer(object):\n \"\"\"Runs WordPiece tokenziation.\"\"\"\n\n def __init__(self, vocab, unk_token=\"[UNK]\", max_input_chars_per_word=200):\n self.vocab = vocab\n self.unk_token = unk_token\n self.max_input_chars_per_word = max_input_chars_per_word\n\n def tokenize(self, text):\n \"\"\"Tokenizes a piece of text into its word pieces.\n\n This uses a greedy longest-match-first algorithm to perform tokenization\n using the given vocabulary.\n\n For example:\n input = \"unaffable\"\n output = [\"un\", \"##aff\", \"##able\"]\n\n Args:\n text: A single token or whitespace separated tokens. This should have\n already been passed through `BasicTokenizer.\n\n Returns:\n A list of wordpiece tokens.\n \"\"\"\n\n text = convert_to_unicode(text)\n\n output_tokens = []\n for token in whitespace_tokenize(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token)\n continue\n\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = \"\".join(chars[start:end])\n if start > 0:\n substr = \"##\" + substr\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1\n if cur_substr is None:\n is_bad = True\n break\n sub_tokens.append(cur_substr)\n start = end\n\n if is_bad:\n output_tokens.append(self.unk_token)\n else:\n output_tokens.extend(sub_tokens)\n return output_tokens\n\n\ndef _is_whitespace(char):\n \"\"\"Checks whether `chars` is a whitespace character.\"\"\"\n # \\t, \\n, and \\r are technically contorl characters but we treat them\n # as whitespace since they are generally considered as such.\n if char == \" \" or char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return True\n cat = unicodedata.category(char)\n if cat == \"Zs\":\n return True\n return False\n\n\ndef _is_control(char):\n \"\"\"Checks whether `chars` is a control character.\"\"\"\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat in (\"Cc\", \"Cf\"):\n return True\n return False\n\n\ndef _is_punctuation(char):\n \"\"\"Checks whether `chars` is a punctuation character.\"\"\"\n cp = ord(char)\n # We treat all non-letter/number ASCII as punctuation.\n # Characters such as \"^\", \"$\", and \"`\" are not in the Unicode\n # Punctuation class but we treat them as punctuation anyways, for\n # consistency.\n if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or\n (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):\n return True\n cat = unicodedata.category(char)\n if cat.startswith(\"P\"):\n return True\n return False\n"
]
| [
[
"tensorflow.io.gfile.GFile"
]
]
|
Ronlin1/Sentiment_Analysis | [
"96c9da01931b57cf861fd97c32e2947cb63fb09c"
]
| [
"vectoriser.py"
]
| [
"# importing the modules\nfrom sklearn.feature_extraction.text import HashingVectorizer\nimport re\nimport os\nimport pickle\n\n# now we open the stopwords we created before\nstop = pickle.load(open('stopwords.pkl', 'rb'))\n\n\ndef tokenizer(text):\n text = re.sub('<[^>]*>', '', text)\n emoticons = re.findall('(?::|;|=)(?:-)?(?:\\)|\\(|D|P)',\n text.lower())\n text = re.sub('[\\W]+', ' ', text.lower()) \\\n + ' '.join(emoticons).replace('-', '')\n tokenized = [w for w in text.split() if w not in stop]\n return tokenized\n\n\nvect = HashingVectorizer(decode_error='ignore',\n n_features=2 ** 21,\n preprocessor=None,\n tokenizer=tokenizer)\n"
]
| [
[
"sklearn.feature_extraction.text.HashingVectorizer"
]
]
|
welkin-feng/cs285-homework-2020 | [
"ce2511acd7233e0ecf9ffc030f7d76e1f8919745"
]
| [
"hw3/cs285/policies/argmax_policy.py"
]
| [
"import numpy as np\n\n\nclass ArgMaxPolicy(object):\n\n def __init__(self, critic):\n self.critic = critic\n\n def get_action(self, obs):\n \"\"\"\n Assert action is discrete.\n\n Args:\n obs (np.ndarray): size [N, ob_dim]\n\n Returns:\n actions (np.ndarray): size [N, ]\n\n \"\"\"\n if len(obs.shape) > 3:\n observation = obs\n else:\n observation = obs[None]\n\n # DONE: return the action that maxinmizes the Q-value\n # at the current observation as the output\n assert self.critic\n q_values_na = self.critic.qa_values(observation)\n\n actions = np.argmax(q_values_na, axis=-1).squeeze()\n\n return actions\n"
]
| [
[
"numpy.argmax"
]
]
|
autoih/addons | [
"b59f0e89ca09837808331be1eee8ae8df8eb9355"
]
| [
"tensorflow_addons/activations/tanhshrink_test.py"
]
| [
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport sys\n\nimport pytest\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow_addons.activations import tanhshrink\nfrom tensorflow_addons.activations.tanhshrink import _tanhshrink_py\nfrom tensorflow_addons.utils import test_utils\n\n\[email protected](\"dtype\", [np.float16, np.float32, np.float64])\ndef test_same_as_py_func(dtype):\n np.random.seed(1234)\n for _ in range(20):\n verify_funcs_are_equivalent(dtype)\n\n\ndef verify_funcs_are_equivalent(dtype):\n x_np = np.random.uniform(-10, 10, size=(4, 4)).astype(dtype)\n x = tf.convert_to_tensor(x_np)\n with tf.GradientTape(persistent=True) as t:\n t.watch(x)\n y_native = tanhshrink(x)\n y_py = _tanhshrink_py(x)\n test_utils.assert_allclose_according_to_type(y_native, y_py)\n grad_native = t.gradient(y_native, x)\n grad_py = t.gradient(y_py, x)\n test_utils.assert_allclose_according_to_type(grad_native, grad_py)\n\n\nif __name__ == \"__main__\":\n sys.exit(pytest.main([__file__]))\n"
]
| [
[
"numpy.random.seed",
"tensorflow.convert_to_tensor",
"numpy.random.uniform",
"tensorflow.GradientTape"
]
]
|
gridgentoo/h2o-tree | [
"92478be7633fcf3f4b550fe4cbf69bf85112391e"
]
| [
"h2o-py/tests/testdir_munging/unop/pyunit_vec_math_ops.py"
]
| [
"from builtins import zip\nfrom builtins import range\nimport sys\nsys.path.insert(1,\"../../../\")\nimport h2o\nfrom tests import pyunit_utils\nimport numpy as np\nimport random\nimport math\nimport scipy.special\n\n\ndef vec_math_ops():\n\n sin_cos_tan_atan_sinh_cosh_tanh_asinh_data = [[random.uniform(-10,10) for r in range(10)] for c in range(10)]\n asin_acos_atanh_data = [[random.uniform(-1,1) for r in range(10)] for c in range(10)]\n acosh_data = [[random.uniform(1,10) for r in range(10)] for c in range(10)]\n abs_data = [[random.uniform(-100000,0) for r in range(10)] for c in range(10)]\n zero_one_data = [random.randint(0,1) for c in range(10)]\n zero_one_data = [zero_one_data, zero_one_data]\n\n h2o_data1 = h2o.H2OFrame(sin_cos_tan_atan_sinh_cosh_tanh_asinh_data)\n h2o_data2 = h2o.H2OFrame(asin_acos_atanh_data)\n h2o_data3 = h2o.H2OFrame(acosh_data)\n h2o_data4 = h2o.H2OFrame(abs_data)\n h2o_data5 = h2o.H2OFrame(zero_one_data)\n\n np_data1 = np.array(sin_cos_tan_atan_sinh_cosh_tanh_asinh_data)\n np_data2 = np.array(asin_acos_atanh_data)\n np_data3 = np.array(acosh_data)\n np_data4 = np.array(abs_data)\n np_data5 = np.array(zero_one_data)\n\n row, col = h2o_data1.dim\n\n c = random.randint(0,col-1)\n for d in range(1,6):\n h2o_signif = h2o_data5[c].signif(digits=d)\n h2o_round = h2o_data5[c].round(digits=d+4)\n s = h2o_signif[0]\n r = h2o_round[0]\n assert (s == r).all(), \"Expected these to be equal, but signif: {0}, round: {1}\".format(s, r)\n h2o_transposed = h2o_data1[c].transpose()\n x, y = h2o_transposed.dim\n assert x == 1 and y == 10, \"Expected 1 row and 10 columns, but got {0} rows and {1} columns\".format(x,y)\n pyunit_utils.np_comparison_check(h2o_data1[:,c].cos(), np.cos(np_data1[:,c]), 10)\n pyunit_utils.np_comparison_check(h2o_data1[:,c].sin(), np.sin(np_data1[:,c]), 10)\n pyunit_utils.np_comparison_check(h2o_data1[:,c].tan(), np.tan(np_data1[:,c]), 10)\n pyunit_utils.np_comparison_check(h2o_data2[:,c].acos(), np.arccos(np_data2[:,c]), 10)\n pyunit_utils.np_comparison_check(h2o_data2[:,c].asin(), np.arcsin(np_data2[:,c]), 10)\n pyunit_utils.np_comparison_check(h2o_data1[:,c].atan(), np.arctan(np_data1[:,c]), 10)\n pyunit_utils.np_comparison_check(h2o_data1[:,c].cosh(), np.cosh(np_data1[:,c]), 10)\n pyunit_utils.np_comparison_check(h2o_data1[c].sinh(), np.sinh(np_data1[:,c]), 10)\n pyunit_utils.np_comparison_check(h2o_data1[c].tanh(), np.tanh(np_data1[:,c]), 10)\n pyunit_utils.np_comparison_check(h2o_data3[c].acosh(), np.arccosh(np_data3[:,c]), 10)\n pyunit_utils.np_comparison_check(h2o_data1[c].asinh(), np.arcsinh(np_data1[:,c]), 10)\n h2o_val = h2o_data3[c].gamma()[5,:].flatten()\n num_val = math.gamma(h2o_data3[5,c])\n assert abs(h2o_val - num_val) < max(abs(h2o_val), abs(num_val)) * 1e-6, \\\n \"check unsuccessful! h2o computed {0} and math computed {1}. expected equal gamma values between h2o and\" \\\n \"math\".format(h2o_val,num_val)\n h2o_val = h2o_data3[c].lgamma()[5,:].flatten()\n num_val = math.lgamma(h2o_data3[5,c])\n assert abs(h2o_val - num_val) < max(abs(h2o_val), abs(num_val)) * 1e-6, \\\n \"check unsuccessful! h2o computed {0} and math computed {1}. expected equal lgamma values between h2o and \" \\\n \"math\".format(h2o_val,num_val)\n h2o_val = h2o_data3[c].digamma()[5,:].flatten()\n num_val = scipy.special.polygamma(0,h2o_data3[5,c])\n assert abs(h2o_val - num_val) < max(abs(h2o_val), abs(num_val)) * 1e-6, \\\n \"check unsuccessful! h2o computed {0} and math computed {1}. expected equal digamma values between h2o and \" \\\n \"math\".format(h2o_val,num_val)\n h2o_val = h2o_data3[c].trigamma()[5,:].flatten()\n num_val = scipy.special.polygamma(1,h2o_data3[5,c])\n assert abs(h2o_val - float(num_val)) < max(abs(h2o_val), abs(num_val)) * 1e-6, \\\n \"check unsuccessful! h2o computed {0} and math computed {1}. expected equal trigamma values between h2o and \" \\\n \"math\".format(h2o_val,num_val)\n # for c in range(col):\n # h2o_val = h2o_data5[c].all()\n # num_val = True if np.all(np_data5[:,c]) else False\n # assert h2o_val == num_val, \"check unsuccessful! h2o computed {0} and numpy computed {1}. expected equal \" \\\n # \"values between h2o and numpy\".format(h2o_val,num_val)\n\n\n\nif __name__ == \"__main__\":\n pyunit_utils.standalone_test(vec_math_ops)\nelse:\n vec_math_ops()\n"
]
| [
[
"numpy.array",
"numpy.arccos",
"numpy.sin",
"numpy.arcsin",
"numpy.cosh",
"numpy.tan",
"numpy.arccosh",
"numpy.tanh",
"numpy.arctan",
"numpy.sinh",
"numpy.cos",
"numpy.arcsinh"
]
]
|
Neptune-Trojans/ACTOR | [
"26d548e4e7004a8e8a2a476433fc8397d5dc5435"
]
| [
"src/visualize/visualize_dataset.py"
]
| [
"import matplotlib.pyplot as plt\nimport os\nimport pandas as pd\nimport plotly.express as px\n\nfrom src.datasets.get_dataset import get_dataset\nfrom src.parser.visualize import parser\nfrom src.visualize.visualize import viz_dataset\n\nplt.switch_backend('agg')\n\n\ndef build_dataset_dist(dataset):\n frames_by_class = zip(dataset._actions, dataset._num_frames_in_video)\n frames_by_class_df = pd.DataFrame(frames_by_class, columns=['action', 'frames'])\n frames_by_class_df.replace({\"action\": dataset._action_classes}, inplace=True)\n fig = px.histogram(frames_by_class_df, x=\"frames\", color=\"action\", barmode='overlay', title='frames by action')\n fig.write_html(os.path.join(\"datavisualize\", 'HumanAct12_frames_by_action.html'))\n\n\nif __name__ == '__main__':\n # parse options\n # parameters = optutils.visualize_dataset_parser()\n parameters = parser(checkpoint=False)\n parameters['num_frames'] = 120\n parameters['fps'] = 10\n parameters['dataset'] = 'datagen'\n parameters['pose_rep'] = 'xyz'\n parameters[\"num_actions_to_sample\"] = 1\n # parameters['pose_rep'] = 'rot6d'\n # get device\n device = parameters[\"device\"]\n\n # get data\n DATA = get_dataset(name=parameters[\"dataset\"])\n dataset = DATA(split=\"train\", **parameters)\n # build_dataset_dist(dataset)\n # add specific parameters from the dataset loading\n dataset.update_parameters(parameters)\n\n name = f\"{parameters['dataset']}_{parameters['pose_rep']}\"\n folder = os.path.join(\"datavisualize\", name)\n viz_dataset(dataset, parameters, folder)\n"
]
| [
[
"matplotlib.pyplot.switch_backend",
"pandas.DataFrame"
]
]
|
BRutan/DynamicETLDashboard | [
"8a40e6f51e53f084d6103ba41cd675916505652f"
]
| [
"DynamicETLDashboard/DynamicETL_Dashboard/ETL/DataComparer.py"
]
| [
"#####################################\n# DataComparer.py\n#####################################\n# Description:\n# * Generate report comparing two datasets.\n\nimport os\nfrom pandas import DataFrame, MultiIndex\nimport xlsxwriter\n\nclass DataComparer(object):\n \"\"\"\n * Compare two DataFrames.\n \"\"\"\n __headerFormat = {'bold': True, 'font_color': 'white', 'bg_color' : 'black'}\n def __init__(self):\n \"\"\"\n * Create empty object that can generate reports.\n \"\"\"\n pass\n ####################\n # Interface Methods:\n ####################\n @classmethod\n def GenerateComparisonReport(cls, reportPath, data_test, data_valid, ignoreCols = None, pKey = None):\n \"\"\"\n * Generate report detailing differences between table rows\n and file rows.\n Inputs:\n * reportPath: Path to output report. Must point to xlsx file.\n * data_test: DataFrame containing test data.\n * data_valid: DataFrame containing valid data to compare against.\n Optional:\n * ignoreCols: Iterable of columns (strings) to ignore when comparing. \n * pKey: String or iterable to determine which rows to compare.\n \"\"\"\n # Validate parameters:\n DataComparer.__Validate(reportPath, data_test, data_valid, ignoreCols, pKey)\n # Generate report:\n compData, missingColsMsg = DataComparer.__Compare(data_test, data_valid, ignoreCols, pKey)\n reportWB = xlsxwriter.Workbook(reportPath)\n DataComparer.__GenerateSummaryPage(compData, reportWB, missingColsMsg)\n DataComparer.__GenerateDiffPage(compData, reportWB)\n reportWB.close()\n\n ####################\n # Private Helpers:\n ####################\n @classmethod\n def __GenerateSummaryPage(cls, compData, wb, missingColsMsg):\n \"\"\"\n * Generate summary page in workbook.\n \"\"\"\n headerFormat = wb.add_format(DataComparer.__headerFormat)\n summarySheet = wb.add_worksheet('Summary')\n summarySheet.write(0, 0, 'Missing Columns', headerFormat)\n summarySheet.write(0, 1, 'None' if not missingColsMsg else missingColsMsg)\n summarySheet.write(1, 0, '# of Differing Rows', headerFormat)\n summarySheet.write(1, 1, len(compData), headerFormat)\n # Write # of differences for each column:\n summarySheet.write(2, 0, '# of Differences by Column', headerFormat)\n for num, col in enumerate(compData.columns):\n summarySheet.write(3, num, col, headerFormat)\n summarySheet.write(4, num, len([val for val in compData[col] if val]))\n\n @classmethod\n def __GenerateDiffPage(cls, compData, wb):\n \"\"\"\n * Generate sheet detailing specific differences in \n column values.\n \"\"\"\n # Do not generate sheet if no differences occurred.\n if len(compData) == 0:\n return\n headerFormat = wb.add_format(DataComparer.__headerFormat)\n diffSheet = wb.add_worksheet('Differences')\n # Write all rows with differing column values:\n for rowNum in range(0, len(compData) + 1):\n for colNum, col in enumerate(compData.columns):\n if rowNum != 0:\n diffSheet.write(rowNum, colNum, compData[col][rowNum - 1])\n else:\n # Write headers:\n diffSheet.write(rowNum, colNum, col, headerFormat)\n @classmethod\n def __Compare(cls, data_test, data_valid, ignoreCols, pKey):\n \"\"\"\n * Return dataframe containing rows where datasets differ.\n \"\"\"\n missingColsMsg = None\n colMap = {col.lower() : col for col in data_test.columns}\n data_test = data_test.rename(columns={col : col.lower() for col in data_test.columns}).fillna('')\n data_valid = data_valid.rename(columns={col : col.lower() for col in data_valid.columns}).fillna('')\n if not ignoreCols is None:\n ignoreCols = set([col.lower() for col in ignoreCols])\n data_test = data_test[[col for col in data_test.columns if not col.lower() in ignoreCols]]\n data_valid = data_valid[[col for col in data_valid.columns if not col.lower() in ignoreCols]]\n columnOrder = data_test.columns\n # Match testing dtypes to valid dtypes:\n for col in data_test.columns:\n if data_valid[col].dtype != data_test[col].dtype:\n data_test[col] = data_test[col].astype(data_valid[col].dtype)\n # Remove columns in data_valid not in data_test:\n missingCols = set(data_valid.columns) - set(data_test.columns)\n if missingCols:\n missingColsMsg = ','.join(missingCols)\n data_valid = data_valid[[col for col in data_valid if not col in missingCols]]\n # Remove duplicate rows from each dataset:\n data_test = data_test.drop_duplicates()\n data_valid = data_valid.drop_duplicates()\n # Perform comparison:\n if not pKey is None:\n # Compare using primary key(s):\n pKey = [pKey.lower()] if isinstance(pKey, str) else [key.lower() for key in pKey]\n data_test = data_test.set_index(list(pKey))\n data_valid = data_valid.set_index(list(pKey))\n diff = { col : [] for col in data_test.columns }\n diff.update({col : [] for col in data_test.index.names})\n matches_test = data_test.loc[data_valid.index]\n matches_valid = data_valid.loc[data_test.index]\n non_matches = data_test.loc[data_test.index == set(data_test.index) - set(data_valid.index)]\n # Compare rows where primary key is the same:\n for row in range(0, len(matches_test)):\n test = matches_test.iloc[row]\n valid = matches_valid.loc[test.name]\n rowDiff = { col : None for col in diff }\n appendDiff = False\n for col in matches_test.columns:\n if test[col] != valid[col]:\n rowDiff[col] = '%s vs %s' % (test[col], valid[col])\n appendDiff = True\n # Append differing values if differences occurred:\n if appendDiff:\n # Add the index value to difference to denote unique identifier where issue occurred:\n if isinstance(matches_test.index, MultiIndex):\n for num, col in enumerate(matches_test.index.names):\n rowDiff[col] = matches_test.index[row][num]\n else:\n col = matches_test.index.names[0]\n rowDiff[col] = matches_test.index[row]\n for col in rowDiff:\n diff[col].append(rowDiff[col])\n # Write entire row for each data_test pkey value not present in data_valid:\n for row in range(0, len(non_matches)):\n target = non_matches.iloc[row]\n for col in diff:\n diff[col].append(data_test.iloc[row][col]) \n df = DataFrame(diff)\n else:\n # Compare rows as they appear in descending order:\n test_tuples = set(data_test.itertuples())\n valid_tuples = set(data_valid.itertuples())\n diff = valid_tuples - test_tuples\n df = DataFrame(diff)\n # Reorder columns to match original order:\n df = df[columnOrder].rename(columns = colMap)\n return df, missingColsMsg\n\n @classmethod\n def __Validate(cls, reportPath, data_test, data_valid, ignoreCols, pKey):\n \"\"\"\n * Validate parameters for main function.\n \"\"\"\n errs = []\n if not isinstance(reportPath, str):\n errs.append('reportPath must be a string.')\n elif not reportPath.endswith('.xlsx'):\n errs.append('reportPath must point to xlsx file.')\n if not isinstance(data_test, DataFrame):\n errs.append('data_test must be a DataFrame.')\n if not isinstance(data_valid, DataFrame):\n errs.append('data_valid must be a DataFrame.')\n if not ignoreCols is None and not hasattr(ignoreCols, '__iter__'):\n errs.append('ignoreCols must be an iterable of strings if provided.')\n elif not ignoreCols is None and any([not isinstance(val, str) for val in ignoreCols]):\n errs.append('ignoreCols must be an iterable of strings if provided.')\n if not pKey is None and not (isinstance(pKey, str) or hasattr(pKey, '__iter__')):\n errs.append('pKey must be a string/iterable of strings if provided.')\n elif isinstance(pKey, str):\n if hasattr(ignoreCols, '__iter__') and pKey in ignoreCols:\n errs.append('pKey cannot be in ignoreCols.')\n if isinstance(data_test, DataFrame) and not pKey.lower() in [col.lower() for col in data_test.columns]:\n errs.append('pKey not present in data_test.')\n if isinstance(data_valid, DataFrame) and not pKey.lower() in [col.lower() for col in data_valid.columns]:\n errs.append('pKey not present in data_valid.')\n elif hasattr(pKey, '__iter__'):\n if any([not isinstance(key, str) for key in pKey]):\n errs.append('pKey must only contain strings if an iterable.')\n else:\n if hasattr(ignoreCols, '__iter__'):\n overlap = set(pKey).intersection(set(ignoreCols))\n if overlap:\n errs.append('The following pKey columns overlap with ignoreCols: %s' % overlap)\n if isinstance(data_test, DataFrame):\n missing = set([key.lower() for key in pKey]) - set([col.lower() for col in data_test.columns])\n if missing:\n errs.append('The following pkeys are missing from data_test: %s' % ','.join(missing))\n elif isinstance(data_valid, DataFrame):\n missing = set([key.lower() for key in pKey]) - set([col.lower() for col in data_valid.columns])\n if missing:\n errs.append('The following pkeys are missing from data_valid: %s' % ','.join(missing))\n if errs:\n raise Exception('\\n'.join(errs))"
]
| [
[
"pandas.DataFrame"
]
]
|
neylsoncrepalde/sysidentpy | [
"d1af4243e7c3d2c0b456fb9b4fe120965a7ededc"
]
| [
"sysidentpy/polynomial_basis/tests/test_simulation.py"
]
| [
"from numpy.testing._private.utils import assert_allclose\nfrom sysidentpy.polynomial_basis import PolynomialNarmax\nfrom sysidentpy.utils.generate_data import get_miso_data, get_siso_data\nimport numpy as np\nfrom numpy.testing import assert_almost_equal, assert_array_equal\nfrom numpy.testing import assert_raises\nfrom sysidentpy.polynomial_basis import SimulatePolynomialNarmax\n\n\ndef test_get_index_from_regressor_code():\n s = SimulatePolynomialNarmax()\n model = np.array(\n [\n [1001, 0], # y(k-1)\n [2001, 1001], # x1(k-1)y(k-1)\n [2002, 0], # x1(k-2)\n ]\n )\n\n regressor_space = np.array(\n [\n [0, 0],\n [1001, 0],\n [2001, 0],\n [2002, 0],\n [1001, 1001],\n [2001, 1001],\n [2002, 1001],\n [2001, 2001],\n [2002, 2001],\n [2002, 2002],\n ]\n )\n index = s._get_index_from_regressor_code(\n regressor_code=regressor_space, model_code=model\n )\n\n assert (index == np.array([1, 3, 5])).all()\n\n\ndef test_list_output_regressor():\n s = SimulatePolynomialNarmax()\n model = np.array(\n [\n [1001, 0], # y(k-1)\n [2001, 1001], # x1(k-1)y(k-1)\n [2002, 0], # x1(k-2)\n ]\n )\n\n y_code = s._list_output_regressor_code(model)\n assert (y_code == np.array([1001, 1001])).all()\n\n\ndef test_list_input_regressor():\n s = SimulatePolynomialNarmax()\n model = np.array(\n [\n [1001, 0], # y(k-1)\n [2001, 1001], # x1(k-1)y(k-1)\n [2002, 0], # x1(k-2)\n ]\n )\n\n x_code = s._list_input_regressor_code(model)\n assert (x_code == np.array([2001, 2002])).all()\n\n\ndef test_get_lag_from_regressor_code():\n s = SimulatePolynomialNarmax()\n list_regressor1 = np.array([2001, 2002])\n list_regressor2 = np.array([1004, 1002])\n max_lag1 = s._get_lag_from_regressor_code(list_regressor1)\n max_lag2 = s._get_lag_from_regressor_code(list_regressor2)\n\n assert max_lag1 == 2\n assert max_lag2 == 4\n\n\ndef test_simulate():\n x_train, x_valid, y_train, y_valid = get_siso_data(\n n=1000, colored_noise=False, sigma=0.001, train_percentage=90\n )\n\n s = SimulatePolynomialNarmax()\n\n # the model must be a numpy array\n model = np.array(\n [\n [1001, 0], # y(k-1)\n [2001, 1001], # x1(k-1)y(k-1)\n [2002, 0], # x1(k-2)\n ]\n )\n # theta must be a numpy array of shape (n, 1) where n is the number of regressors\n theta = np.array([[0.2, 0.9, 0.1]]).T\n\n yhat, results = s.simulate(\n X_test=x_valid, y_test=y_valid, model_code=model, theta=theta, plot=False\n )\n assert yhat.shape == (100, 1)\n assert len(results) == 3\n\n\ndef test_simulate_theta():\n x_train, x_valid, y_train, y_valid = get_siso_data(\n n=1000, colored_noise=False, sigma=0.001, train_percentage=90\n )\n\n s = SimulatePolynomialNarmax(estimate_parameter=True)\n\n # the model must be a numpy array\n model = np.array(\n [\n [1001, 0], # y(k-1)\n [2001, 1001], # x1(k-1)y(k-1)\n [2002, 0], # x1(k-2)\n ]\n )\n\n yhat, results = s.simulate(\n X_train=x_train,\n y_train=y_train,\n X_test=x_valid,\n y_test=y_valid,\n model_code=model,\n plot=False,\n )\n theta = np.array([[0.2, 0.9, 0.1]]).T\n assert_almost_equal(s.theta, theta, decimal=1)\n\n\ndef test_estimate_parameter():\n assert_raises(TypeError, SimulatePolynomialNarmax, estimmate_parameter=1)\n"
]
| [
[
"numpy.testing.assert_almost_equal",
"numpy.array",
"numpy.testing.assert_raises"
]
]
|
iminders/multiagent-particle-envs | [
"6a1341dff68520bd61d3ab97f89dbd32e70dd664"
]
| [
"multiagent/scenarios/simple_speaker_listener.py"
]
| [
"import numpy as np\n\nfrom multiagent.core import Agent, Landmark, World\nfrom multiagent.scenario import BaseScenario\n\n\nclass Scenario(BaseScenario):\n def make_world(self, num_agents=2):\n world = World()\n # set any world properties first\n world.dim_c = 3\n num_landmarks = 3\n world.collaborative = True\n # add agents\n world.agents = [Agent() for i in range(num_agents)]\n for i, agent in enumerate(world.agents):\n agent.name = 'agent %d' % i\n agent.collide = False\n agent.size = 0.075\n # speaker\n world.agents[0].movable = False\n # listener\n world.agents[1].silent = True\n # add landmarks\n world.landmarks = [Landmark() for i in range(num_landmarks)]\n for i, landmark in enumerate(world.landmarks):\n landmark.name = 'landmark %d' % i\n landmark.collide = False\n landmark.movable = False\n landmark.size = 0.04\n # make initial conditions\n self.reset_world(world)\n return world\n\n def reset_world(self, world):\n # assign goals to agents\n for agent in world.agents:\n agent.goal_a = None\n agent.goal_b = None\n # want listener to go to the goal landmark\n world.agents[0].goal_a = world.agents[1]\n world.agents[0].goal_b = np.random.choice(world.landmarks)\n # random properties for agents\n for i, agent in enumerate(world.agents):\n agent.color = np.array([0.25, 0.25, 0.25])\n # random properties for landmarks\n world.landmarks[0].color = np.array([0.65, 0.15, 0.15])\n world.landmarks[1].color = np.array([0.15, 0.65, 0.15])\n world.landmarks[2].color = np.array([0.15, 0.15, 0.65])\n # special colors for goals\n world.agents[0].goal_a.color = world.agents[0].goal_b.color + \\\n np.array([0.45, 0.45, 0.45])\n # set random initial states\n for agent in world.agents:\n agent.state.p_pos = np.random.uniform(-1, +1, world.dim_p)\n agent.state.p_vel = np.zeros(world.dim_p)\n agent.state.c = np.zeros(world.dim_c)\n for i, landmark in enumerate(world.landmarks):\n landmark.state.p_pos = np.random.uniform(-1, +1, world.dim_p)\n landmark.state.p_vel = np.zeros(world.dim_p)\n\n def benchmark_data(self, agent, world):\n # returns data for benchmarking purposes\n return self.reward(agent, reward)\n\n def reward(self, agent, world):\n # squared distance from listener to landmark\n a = world.agents[0]\n dist2 = np.sum(np.square(a.goal_a.state.p_pos - a.goal_b.state.p_pos))\n return -dist2\n\n def observation(self, agent, world):\n # goal color\n goal_color = np.zeros(world.dim_color)\n if agent.goal_b is not None:\n goal_color = agent.goal_b.color\n\n # get positions of all entities in this agent's reference frame\n entity_pos = []\n for entity in world.landmarks:\n entity_pos.append(entity.state.p_pos - agent.state.p_pos)\n\n # communication of all other agents\n comm = []\n for other in world.agents:\n if other is agent or (other.state.c is None):\n continue\n comm.append(other.state.c)\n\n # speaker\n if not agent.movable:\n return np.concatenate([goal_color])\n # listener\n if agent.silent:\n return np.concatenate([agent.state.p_vel] + entity_pos + comm)\n"
]
| [
[
"numpy.square",
"numpy.concatenate",
"numpy.array",
"numpy.random.choice",
"numpy.zeros",
"numpy.random.uniform"
]
]
|
maloyan/jina | [
"93404a9545b3ca1870df61ac8b34910277770f38"
]
| [
"tests/integration/rolling_update/test_rolling_update.py"
]
| [
"import collections\nimport multiprocessing\nimport os\nimport threading\nimport time\n\nimport numpy as np\nimport pytest\n\nfrom jina import Document, Flow, Executor, requests, Client\n\ncur_dir = os.path.dirname(os.path.abspath(__file__))\nexposed_port = 12345\n\n\[email protected]\ndef config(tmpdir):\n os.environ['JINA_REPLICA_DIR'] = str(tmpdir)\n yield\n del os.environ['JINA_REPLICA_DIR']\n\n\[email protected]\ndef docs():\n return [\n Document(id=str(i), text=f'doc {i}', embedding=np.array([i] * 5))\n for i in range(20)\n ]\n\n\nclass DummyMarkExecutor(Executor):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.metas.name = 'dummy'\n\n @requests\n def foo(self, docs, *args, **kwargs):\n for doc in docs:\n doc.tags['replica'] = os.getpid()\n doc.tags['shard'] = self.runtime_args.shard_id\n\n def close(self) -> None:\n import os\n\n os.makedirs(self.workspace, exist_ok=True)\n\n\ndef test_normal(docs):\n NUM_REPLICAS = 3\n NUM_SHARDS = 2\n doc_id_path = collections.OrderedDict()\n\n def handle_search_result(resp):\n for doc in resp.data.docs:\n if int(doc.id) not in doc_id_path:\n doc_id_path[int(doc.id)] = []\n doc_id_path[int(doc.id)].append((doc.tags['replica'], doc.tags['shard']))\n\n flow = Flow().add(\n name='executor1',\n uses=DummyMarkExecutor,\n replicas=NUM_REPLICAS,\n shards=NUM_SHARDS,\n )\n with flow:\n flow.search(inputs=docs, request_size=1, on_done=handle_search_result)\n\n assert len(doc_id_path.keys()) == len(docs)\n\n replica_shards = [\n tag_item for tag_items in doc_id_path.values() for tag_item in tag_items\n ]\n replicas = [r for r, s in replica_shards]\n shards = [s for r, s in replica_shards]\n\n assert len(set(replicas)) == NUM_REPLICAS\n # shard results are reduced\n assert len(set(shards)) == 1\n\n\[email protected](60)\ndef test_simple_run(docs):\n flow = Flow().add(\n name='executor1',\n replicas=2,\n shards=3,\n )\n with flow:\n # test rolling update does not hang\n flow.search(docs)\n flow.rolling_update('executor1', None)\n flow.search(docs)\n\n\[email protected]()\ndef docker_image():\n import docker\n\n client = docker.from_env()\n client.images.build(path=os.path.join(cur_dir), tag='test_rolling_update_docker')\n client.close()\n yield\n time.sleep(2)\n client = docker.from_env()\n client.containers.prune()\n client.close()\n\n\n# TODO: this should be repodtable, but its not due to head/gateway not being containerized\n# @pytest.mark.repodt(5)\[email protected](60)\[email protected]('uses', ['docker://test_rolling_update_docker'])\ndef test_search_while_updating(docs, reraise, docker_image, uses):\n request_count = 50\n shards = 2\n\n def update_rolling(flow, deployment_name, start_event):\n start_event.wait()\n with reraise:\n flow.rolling_update(deployment_name)\n\n with Flow().add(\n uses=uses,\n name='executor1',\n replicas=2,\n shards=shards,\n timeout_ready=5000,\n ) as flow:\n start_event = multiprocessing.Event()\n result_queue = multiprocessing.Queue()\n\n client_process = multiprocessing.Process(\n target=send_requests,\n args=(\n flow.port_expose,\n start_event,\n result_queue,\n len(docs),\n request_count,\n ),\n )\n client_process.start()\n update_rolling(flow, 'executor1', start_event)\n client_process.join()\n\n total_docs = 0\n while not result_queue.empty():\n total_docs += len(result_queue.get())\n assert total_docs == len(docs) * request_count\n\n\n# TODO: this should be repodtable, but its not due to head/gateway not being containerized\n# @pytest.mark.repodt(5)\[email protected](60)\ndef test_vector_indexer_thread(config, docs, reraise):\n def update_rolling(flow, deployment_name, start_event):\n start_event.wait()\n with reraise:\n flow.rolling_update(deployment_name)\n\n with Flow().add(\n name='executor1',\n uses=DummyMarkExecutor,\n replicas=2,\n shards=3,\n timeout_ready=5000,\n ) as flow:\n start_event = multiprocessing.Event()\n\n client_process = multiprocessing.Process(\n target=send_requests,\n args=(flow.port_expose, start_event, multiprocessing.Queue(), len(docs), 5),\n )\n client_process.start()\n client_process.join()\n result_queue = multiprocessing.Queue()\n client_process = multiprocessing.Process(\n target=send_requests,\n args=(flow.port_expose, start_event, result_queue, len(docs), 40),\n )\n client_process.start()\n update_rolling(flow, 'executor1', start_event)\n client_process.join()\n\n total_docs = 0\n while not result_queue.empty():\n total_docs += len(result_queue.get())\n assert total_docs == len(docs) * 40\n\n\ndef test_workspace(config, tmpdir, docs):\n with Flow().add(\n name='executor1',\n uses=DummyMarkExecutor,\n workspace=str(tmpdir),\n shards=3,\n ) as flow:\n # in practice, we don't send index requests to the deployment this is just done to test the workspaces\n for i in range(10):\n flow.index(docs)\n\n # validate created workspaces\n assert set(os.listdir(str(tmpdir))) == {'dummy'}\n assert set(os.listdir(os.path.join(tmpdir, 'dummy'))) == {'0', '1', '2'}\n\n\ndef test_num_pods(config):\n with Flow().add(\n name='executor1',\n uses='!DummyMarkExecutor',\n replicas=3,\n shards=4,\n ) as flow:\n assert flow.num_pods == (\n 4 * 3 + 1 + 1 # shards 4 # replicas 3 # deployment head # gateway\n )\n\n\nclass UpdateExecutor(Executor):\n def __init__(\n self,\n dump_path: str = '/tmp/dump_path1/',\n argument1: str = 'version1',\n argument2: str = 'version1',\n *args,\n **kwargs,\n ):\n super().__init__(*args, **kwargs)\n self._dump_path = dump_path\n self._argument1 = argument1\n self._argument2 = argument2\n\n @requests\n def run(self, docs, **kwargs):\n for doc in docs:\n doc.tags['dump_path'] = self._dump_path\n doc.tags['arg1'] = self._argument1\n doc.tags['arg2'] = self._argument2\n\n\[email protected](60)\ndef test_override_uses_with(docs):\n flow = Flow(port_expose=exposed_port).add(\n name='executor1',\n uses=UpdateExecutor,\n replicas=2,\n parallel=3,\n )\n with flow:\n # test rolling update does not hang\n ret1 = Client(port=exposed_port, return_responses=True).search(docs)\n flow.rolling_update(\n 'executor1',\n uses_with={\n 'dump_path': '/tmp/dump_path2/',\n 'argument1': 'version2',\n 'argument2': 'version2',\n },\n )\n ret2 = Client(port=exposed_port, return_responses=True).search(docs)\n\n assert len(ret1) > 0\n assert len(ret1[0].docs) > 0\n for doc in ret1[0].docs:\n assert doc.tags['dump_path'] == '/tmp/dump_path1/'\n assert doc.tags['arg1'] == 'version1'\n assert doc.tags['arg2'] == 'version1'\n\n assert len(ret2) > 0\n assert len(ret2[0].docs) > 0\n for doc in ret2[0].docs:\n assert doc.tags['dump_path'] == '/tmp/dump_path2/'\n assert doc.tags['arg1'] == 'version2'\n assert doc.tags['arg2'] == 'version2'\n\n\[email protected](60)\[email protected](\n 'replicas, scale_to',\n [(2, 3), (3, 2)],\n)\ndef test_scale_after_rolling_update(docs, replicas, scale_to):\n flow = Flow(port_expose=exposed_port).add(\n name='executor1',\n uses=DummyMarkExecutor,\n replicas=replicas,\n )\n with flow:\n ret1 = Client(port=exposed_port, return_responses=True).search(\n docs, request_size=1\n )\n flow.rolling_update('executor1', None)\n flow.scale('executor1', replicas=scale_to)\n ret2 = Client(port=exposed_port, return_responses=True).search(\n docs, request_size=1\n )\n\n replicas_before = set()\n for r in ret1:\n for replica in r.docs[:, 'tags__replica']:\n replicas_before.add(replica)\n\n assert len(replicas_before) == replicas\n\n replicas_after = set()\n for r in ret2:\n for replica in r.docs[:, 'tags__replica']:\n replicas_after.add(replica)\n assert len(replicas_after) == scale_to\n\n\ndef send_requests(\n port_expose,\n start_rolling_update_event: multiprocessing.Event,\n result_queue: multiprocessing.Queue,\n doc_count: int,\n request_count: int,\n):\n client = Client(port=port_expose, return_responses=True)\n for i in range(request_count):\n responses = client.search(\n [Document(id=f'{idx}', text=f'doc{idx}') for idx in range(doc_count)],\n request_size=10,\n )\n for r in responses:\n result_queue.put(r.docs.texts)\n if i == 5:\n start_rolling_update_event.set()\n # give the rolling update some time to kick in\n time.sleep(0.1)\n"
]
| [
[
"numpy.array"
]
]
|
FelixFu520/yolov1 | [
"8bf464702d75b03ec09878cdf4090a4442cdac34"
]
| [
"utils/dataset.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"\n@Time : 2020/08/07 15:07\n@Author : FelixFu\n@File : train.py\n@Noice :\n@Modificattion : txt描述文件 image_name.jpg x y w h c x y w h c 这样就是说一张图片中有两个目标\n @Author :\n @Time :\n @Detail :\n\"\"\"\n\nimport os\nimport os.path\n\nimport random\nimport numpy as np\n\nimport torch\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\n\nimport cv2\nimport matplotlib.pyplot as plt\n\n\nclass yoloDataset(data.Dataset):\n image_size = 448\n\n def __init__(self, root, list_file, train, transform):\n self.root = root # 数据集根目录 datasests\n self.train = train # 是否为训练\n self.transform = transform # 转换\n self.fnames = [] # 文件名s [001.jpg, 002.jpg]\n self.boxes = [] # boxes [ [box], [[x1,y1,x2,y2], ...], ... ]\n self.labels = [] # labels [ [1], [2], ... ]\n self.mean = (123, 117, 104) # RGB\n self.num_samples = 0 # 样本总数\n\n if isinstance(list_file, list):\n # Cat multiple list files together.\n # This is especially useful for voc07/voc12 combination.\n tmp_file = os.path.join(root, 'images.txt')\n list_file = [os.path.join(root, list_file[0]), os.path.join(root, list_file[1])]\n os.system('cat %s > %s' % (' '.join(list_file), tmp_file))\n list_file = tmp_file\n else:\n list_file = os.path.join(root, list_file)\n\n # 处理标签\n with open(list_file) as f:\n lines = f.readlines()\n for line in lines:\n splited = line.strip().split() # ['005246.jpg', '84', '48', '493', '387', '2']\n self.fnames.append(splited[0])\n num_boxes = (len(splited) - 1) // 5\n box = []\n label = []\n for i in range(num_boxes):\n x = float(splited[1+5*i])\n y = float(splited[2+5*i])\n x2 = float(splited[3+5*i])\n y2 = float(splited[4+5*i])\n c = splited[5+5*i]\n box.append([x, y, x2, y2])\n label.append(int(c)+1)\n self.boxes.append(torch.Tensor(box))\n self.labels.append(torch.LongTensor(label))\n self.num_samples = len(self.boxes)\n\n def __getitem__(self, idx):\n fname = self.fnames[idx]\n img = cv2.imread(os.path.join(self.root, \"images\", fname))\n boxes = self.boxes[idx].clone()\n labels = self.labels[idx].clone()\n\n # 数据增强\n # if self.train:\n # img = self.random_bright(img)\n # img, boxes = self.random_flip(img, boxes)\n # img, boxes = self.randomScale(img, boxes)\n # img = self.randomBlur(img)\n # img = self.RandomBrightness(img)\n # img = self.RandomHue(img)\n # img = self.RandomSaturation(img)\n # img, boxes, labels = self.randomShift(img, boxes, labels)\n # img, boxes, labels = self.randomCrop(img, boxes, labels)\n\n # # debug\n # box_show = boxes.numpy().reshape(-1)\n # print(box_show)\n # img_show = self.BGR2RGB(img)\n # pt1 = (int(box_show[0]), int(box_show[1]))\n # pt2 = (int(box_show[2]), int(box_show[3]))\n # cv2.rectangle(img_show, pt1=pt1, pt2=pt2, color=(0, 255, 0), thickness=1)\n # print(type(img_show))\n # plt.figure()\n # plt.imshow(img_show)\n # plt.show()\n # plt.savefig(\"a.png\")\n # #debug\n h, w, _ = img.shape\n boxes /= torch.Tensor([w, h, w, h]).expand_as(boxes)\n img = self.BGR2RGB(img) # because pytorch pretrained model use RGB\n img = self.subMean(img, self.mean) # 减去均值\n img = cv2.resize(img, (self.image_size, self.image_size))\n target = self.encoder(boxes, labels) # 7x7x30\n for t in self.transform:\n img = t(img)\n\n return img, target\n\n def __len__(self):\n return self.num_samples\n\n def encoder(self, boxes, labels):\n \"\"\"\n boxes (tensor) [[x1,y1,x2,y2],[]]\n labels (tensor) [...]\n return 14x14x30\n \"\"\"\n grid_num = 14\n target = torch.zeros((grid_num, grid_num, 30))\n cell_size = 1./grid_num\n wh = boxes[:, 2:] - boxes[:, :2] # 这张图片,有n=8个box,每个box的wh torch.Size([8, 2])\n cxcy = (boxes[:, 2:] + boxes[:, :2]) / 2 # n个box的中心xy torch.Size([8, 2])\n for i in range(cxcy.size()[0]):\n cxcy_sample = cxcy[i]\n ij = (cxcy_sample/cell_size).ceil()-1 # 将cxcy_sample定位到cell(网格)中\n target[int(ij[1]), int(ij[0]), 4] = 1 # 将target中box设为1\n target[int(ij[1]), int(ij[0]), 9] = 1 # 将target中box设为1\n target[int(ij[1]), int(ij[0]), int(labels[i])+9] = 1 # 将target中class设为1\n xy = ij*cell_size # 匹配到的网格的左上角相对坐标\n delta_xy = (cxcy_sample - xy)/cell_size\n target[int(ij[1]), int(ij[0]), 2:4] = wh[i]\n target[int(ij[1]), int(ij[0]), :2] = delta_xy\n target[int(ij[1]), int(ij[0]), 7:9] = wh[i]\n target[int(ij[1]), int(ij[0]), 5:7] = delta_xy\n return target\n\n def BGR2RGB(self, img):\n return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n def BGR2HSV(self, img):\n return cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n\n def HSV2BGR(self, img):\n return cv2.cvtColor(img, cv2.COLOR_HSV2BGR)\n \n def RandomBrightness(self, bgr):\n if random.random() < 0.5:\n hsv = self.BGR2HSV(bgr)\n h, s, v = cv2.split(hsv)\n adjust = random.choice([0.5, 1.5])\n v = v*adjust\n v = np.clip(v, 0, 255).astype(hsv.dtype)\n hsv = cv2.merge((h, s, v))\n bgr = self.HSV2BGR(hsv)\n return bgr\n\n def RandomSaturation(self, bgr):\n if random.random() < 0.5:\n hsv = self.BGR2HSV(bgr)\n h, s, v = cv2.split(hsv)\n adjust = random.choice([0.5, 1.5])\n s = s*adjust\n s = np.clip(s, 0, 255).astype(hsv.dtype)\n hsv = cv2.merge((h, s, v))\n bgr = self.HSV2BGR(hsv)\n return bgr\n\n def RandomHue(self, bgr):\n if random.random() < 0.5:\n hsv = self.BGR2HSV(bgr)\n h, s, v = cv2.split(hsv)\n adjust = random.choice([0.5, 1.5])\n h = h*adjust\n h = np.clip(h, 0, 255).astype(hsv.dtype)\n hsv = cv2.merge((h, s, v))\n bgr = self.HSV2BGR(hsv)\n return bgr\n\n def randomBlur(self, bgr):\n if random.random() < 0.5:\n bgr = cv2.blur(bgr, (5, 5))\n return bgr\n\n def randomShift(self, bgr, boxes, labels):\n # 平移变换\n center = (boxes[:, 2:]+boxes[:, :2])/2\n if random.random() < 0.5:\n height, width, c = bgr.shape\n after_shfit_image = np.zeros((height, width, c), dtype=bgr.dtype)\n after_shfit_image[:, :, :] = (104, 117, 123) # bgr\n shift_x = random.uniform(-width*0.2, width*0.2)\n shift_y = random.uniform(-height*0.2, height*0.2)\n # print(bgr.shape,shift_x,shift_y)\n # 原图像的平移\n if shift_x >= 0 and shift_y >= 0:\n after_shfit_image[int(shift_y):, int(shift_x):, :] = bgr[:height-int(shift_y), :width-int(shift_x), :]\n elif shift_x >= 0 and shift_y < 0:\n after_shfit_image[:height+int(shift_y), int(shift_x):, :] = bgr[-int(shift_y):, :width-int(shift_x), :]\n elif shift_x < 0 and shift_y >= 0:\n after_shfit_image[int(shift_y):, :width+int(shift_x), :] = bgr[:height-int(shift_y), -int(shift_x):, :]\n elif shift_x < 0 and shift_y < 0:\n after_shfit_image[:height+int(shift_y), :width+int(shift_x), :] = bgr[-int(shift_y):, -int(shift_x):, :]\n\n shift_xy = torch.FloatTensor([[int(shift_x),int(shift_y)]]).expand_as(center)\n center = center + shift_xy\n mask1 = (center[:, 0] > 0) & (center[:, 0] < width)\n mask2 = (center[:, 1] > 0) & (center[: ,1] < height)\n mask = (mask1 & mask2).view(-1, 1)\n boxes_in = boxes[mask.expand_as(boxes)].view(-1,4)\n if len(boxes_in) == 0:\n return bgr, boxes, labels\n box_shift = torch.FloatTensor([[int(shift_x), int(shift_y),\n int(shift_x), int(shift_y)]]).expand_as(boxes_in)\n boxes_in = boxes_in+box_shift\n labels_in = labels[mask.view(-1)]\n return after_shfit_image, boxes_in, labels_in\n return bgr, boxes, labels\n\n def randomScale(self, bgr, boxes):\n # 固定住高度,以0.8-1.2伸缩宽度,做图像形变\n if random.random() < 0.5:\n scale = random.uniform(0.8, 1.2)\n height, width, c = bgr.shape\n bgr = cv2.resize(bgr, (int(width*scale), height))\n scale_tensor = torch.FloatTensor([[scale, 1, scale, 1]]).expand_as(boxes)\n boxes = boxes * scale_tensor\n return bgr, boxes\n return bgr, boxes\n\n def randomCrop(self, bgr, boxes, labels):\n if random.random() < 0.5:\n center = (boxes[:, 2:]+boxes[:, :2])/2\n height, width, c = bgr.shape\n h = random.uniform(0.6*height, height)\n w = random.uniform(0.6*width, width)\n x = random.uniform(0, width-w)\n y = random.uniform(0, height-h)\n x, y, h, w = int(x), int(y), int(h), int(w)\n\n center = center - torch.FloatTensor([[x, y]]).expand_as(center)\n mask1 = (center[:, 0] > 0) & (center[:, 0] < w)\n mask2 = (center[:, 1] > 0) & (center[:, 1] < h)\n mask = (mask1 & mask2).view(-1, 1)\n\n boxes_in = boxes[mask.expand_as(boxes)].view(-1, 4)\n if(len(boxes_in) == 0):\n return bgr, boxes, labels\n box_shift = torch.FloatTensor([[x, y, x, y]]).expand_as(boxes_in)\n\n boxes_in = boxes_in - box_shift\n boxes_in[:, 0] = boxes_in[:, 0].clamp_(min=0, max=w)\n boxes_in[:, 2] = boxes_in[:, 2].clamp_(min=0, max=w)\n boxes_in[:, 1] = boxes_in[:, 1].clamp_(min=0, max=h)\n boxes_in[:, 3] = boxes_in[:, 3].clamp_(min=0, max=h)\n\n labels_in = labels[mask.view(-1)]\n img_croped = bgr[y:y+h, x:x+w, :]\n return img_croped, boxes_in, labels_in\n return bgr, boxes, labels\n\n def subMean(self, bgr, mean):\n mean = np.array(mean, dtype=np.float32)\n bgr = bgr - mean\n return bgr\n\n def random_flip(self, im, boxes):\n if random.random() < 0.5:\n im_lr = np.fliplr(im).copy()\n h, w, _ = im.shape\n xmin = w - boxes[:, 2]\n xmax = w - boxes[:, 0]\n boxes[:, 0] = xmin\n boxes[:, 2] = xmax\n return im_lr, boxes\n return im, boxes\n\n def random_bright(self, im, delta=16):\n alpha = random.random()\n if alpha > 0.3:\n im = im * alpha + random.randrange(-delta, delta)\n im = im.clip(min=0, max=255).astype(np.uint8)\n return im\n\n\nif __name__ == '__main__':\n from torch.utils.data import DataLoader\n import torchvision.transforms as transforms\n file_root = \"../datasets\"\n # train_dataset = yoloDataset(root=file_root, list_file=['voc2012.txt', 'voc2007.txt'],\n # train=True, transform=[transforms.ToTensor()])\n train_dataset = yoloDataset(root=file_root, list_file='images.txt',\n train=True, transform=[transforms.ToTensor()])\n train_loader = DataLoader(train_dataset, batch_size=1, shuffle=False, num_workers=0)\n train_iter = iter(train_loader)\n for i in range(1):\n img, target = next(train_iter)\n print(img.shape, target.shape)\n print(train_dataset.num_samples)\n\n\n"
]
| [
[
"torch.zeros",
"numpy.array",
"numpy.zeros",
"torch.FloatTensor",
"torch.LongTensor",
"torch.utils.data.DataLoader",
"numpy.clip",
"torch.Tensor",
"numpy.fliplr"
]
]
|
ajmal017/ralph-usa | [
"41a7f910da04cfa88f603313fad2ff44c82b9dd4"
]
| [
"algDev/run.py"
]
| [
"from algDev.preprocessing.feature_generation import *\nfrom algDev.preprocessing import data_generator\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom algDev.models.equity import Equity\nfrom algDev.algorithms.cnn import CNN\nfrom algDev.algorithms.svm import SVM\nfrom algDev.API.indicators import get_indicator_value\nfrom algDev.db.wrapper import *\nfrom algDev.tests import trading_alg_test, asset_alloc_test, test_svm\nfrom algDev.db.populate_models_table import build_example_model, get_tas, test_add_model, test_add_model_collection\nfrom algDev.API.models import loadTradingAlgorithm\nfrom algDev.tests.test_backtest import run_test\n\ndef test_one():\n eq = Equity('QCOM')\n print(eq.opens)\n print(eq.dates)\n print(getTickers())\n\ndef test_two():\n eq = Equity('AAPL')\n feature_set = ['prings']\n length = 10\n threshold = 0.015\n period = 10\n fig, ax = plt.subplots()\n ax = plot_features(eq, feature_set, ax, 255)\n # # ax = plot_labels(eq, 10, .015, ax, range=255)\n plt.show()\n\n # X,y = data_generator.gen_svm_data(eq, feature_set, length, threshold, period)\n\n # svm = SVM(X, y)\n # svm.train([0.8,0.2])\n # cnn.train_model(X_train,y_train,X_test,y_test)\n\n print(get_indicator_value('AAPL', 'lowerBol'))\n\ndef test_three():\n \n trading_alg_test.run_test_one()\n\ndef test_four():\n asset_alloc_test.run_test_one()\n\ndef test_five():\n trading_alg_test.build_confusion_matrix()\n\ndef test_six():\n trading_alg_test.test_conf_matrix_model_coll()\n\ndef test_seven():\n trading_alg_test.hyper_param_tuning()\n\ndef test_eight():\n test_svm.run_2()\n\ndef test_nine():\n build_example_model()\n # test_add_model()\n # test_add_model_collection()\n\ndef test_ten():\n print(get_tas())\n\ndef test_eleven():\n trading_alg_test.grid_search()\n\ndef test_twelve():\n ta_entity = getTradingAlgorithms()\n ta_id = ta_entity[0][0]\n trading_alg = loadTradingAlgorithm(ta_id)\n\n print(trading_alg)\n\ndef backtest():\n run_test()\n\ndef getMCs():\n print(getModelCollections())\n"
]
| [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
]
|
niyoj/AcademicContent | [
"e943b48719f06e5fe60874b84a4b8b8491ed9e99"
]
| [
"Events and Hacks/AI Hackathon/Code and Data/5_image_recogniser.py"
]
| [
"import scipy\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.externals._pilutil import imread\nimport os\nos.chdir('data/images_part1')\n\ntrainimage = []\nfor i in range(11):\n A = imread(str(i) + '.png', flatten = True)\n B, c, D = np.linalg.svd(A)\n trainimage.append({'original': A, 'singular': c[:10]}) \n\ntestimage = trainimage[10] \n\nrecognisedimage = min(trainimage[:10], key=lambda e: sum((e['singular']-testimage['singular'])**2))\n\nplt.imshow(recognisedimage['original'], interpolation='nearest', cmap=plt.cm.Greys_r)\nplt.show()\n\n\n\n"
]
| [
[
"matplotlib.pyplot.show",
"numpy.linalg.svd",
"matplotlib.pyplot.imshow"
]
]
|
JLivingston01/py_research | [
"928f74287039a933d27c5a5dc3df8db4cb79c152",
"928f74287039a933d27c5a5dc3df8db4cb79c152"
]
| [
"scripts/kmeans_dem0.py",
"scripts/pytorch_word_embedding.py"
]
| [
"\n\nimport numpy as np\n\nd1 = np.random.normal(3,.5,(10,3))\nd2=np.random.normal(5,.5,(8,3))\n\nd3=np.random.normal(7,.5,(8,3))\n\n\nd = np.vstack((d1,d2,d3))\n\ncentroids=3\n\nc=np.random.normal(np.mean(d),np.std(d),(centroids,d.shape[1]))\n\n\ndef kmeans(dat,centroids,max_iter):\n \n d = dat \n c=np.random.normal(np.mean(d),np.std(d),(centroids,d.shape[1]))\n \n def mydist(d,c):\n distarray=[]\n for i in range(c.shape[0]):\n distarray.append(np.sum((d-c[i])**2,axis=1)**.5)\n \n distarray=np.array(distarray)\n return distarray \n \n for j in range(16):\n dists = mydist(d,c).T\n clusts=np.argmin(dists,axis=1)\n for i in range(centroids):\n c[i]=np.mean(d[clusts==i],axis=0)\n \n return clusts\n \nkmeans(d,3,16)",
"\nimport pandas as pd\nimport numpy as np\n\n\nimport torch\nfrom torch import nn\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data import TensorDataset\n#from torchvision import datasets\n#from torchvision.transforms import ToTensor, Lambda, Compose\nimport matplotlib.pyplot as plt\n\nsentences=[\n \"what is the weather\",\n \"the weather outside is frightful\",\n \"i have a basketball game today\",\n \"the weather is cold today\",\n \"the weather is hot today\",\n \"hot weather is not enjoyable\",\n \"when is the weather going to get better\",\n \"the basketball game was long\",\n \"how much are the basketball tickets\",\n \"what does the fox say\"\n ]\n\ntokens = [i.split() for i in sentences]\n\nall_tokens=[]\nfor i in tokens:\n all_tokens=all_tokens+i\n \nall_tokens=list(set(all_tokens))\n\n\n\n\nall_pairs1=[[[j[i],j[i+1]] for i in range(len(j)-1)] for j in tokens]\nall_pairs2=[[[j[i+1],j[i]] for i in range(len(j)-1)] for j in tokens]\n\n\n\ntoken_cooccur=all_pairs1+all_pairs2\n\ntoken_cooccur[1]\n\nall_pairs=[]\nfor i in token_cooccur:\n for j in i:\n all_pairs.append(j)\n \n\nX=pd.DataFrame()\n\nX['pairssss']=all_pairs\n\nfor i in all_tokens:\n X[i]=X['pairssss'].apply(lambda x: i==x[0])\n \nX.drop('pairssss',axis=1,inplace=True)\n\nX=pd.DataFrame(np.where(X,1,0),columns=X.columns)\n\n\nunique_X=X.drop_duplicates()\n\nY=pd.DataFrame()\n\nY['pairssss']=all_pairs\n\nfor i in all_tokens:\n Y[i]=Y['pairssss'].apply(lambda x: i==x[1])\n \nY.drop('pairssss',axis=1,inplace=True)\n\nY=pd.DataFrame(np.where(Y,1,0),columns=Y.columns)\n\n\n\n\nX=np.array(X)\nY=np.array(Y)\n\n\nX = X.astype(np.float32)\nY = Y.astype(np.float32)\n\n\nX_torch = torch.from_numpy(X)\nY_torch = torch.from_numpy(Y)\n\ntrain_ds = TensorDataset(X_torch, Y_torch)\n\n\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nprint(\"Using {} device\".format(device))\n\nlen(all_tokens)\n\nembeddings= 10\nconfiguration = nn.Sequential(\n nn.Linear(len(all_tokens), embeddings),\n nn.Sigmoid(),\n nn.Linear(embeddings, len(all_tokens)),\n nn.Sigmoid()\n )\n\n\n# Define model\nclass NeuralNetwork(nn.Module):\n def __init__(self):\n super(NeuralNetwork, self).__init__()\n self.stack = configuration\n\n def forward(self, x):\n logits = self.stack(x)\n return logits\n\nmodel = NeuralNetwork().to(device)\nprint(model)\n\n\n\nloss_fn = nn.MSELoss()\noptimizer = torch.optim.SGD(model.parameters(), lr=1e-2)\n\n\n\ndef train(dataloader, model, loss_fn, optimizer):\n size = len(dataloader.dataset)\n for batch, (X, y) in enumerate(dataloader):\n X, y = X.to(device), y.to(device)\n\n # Compute prediction error\n pred = model(X)\n loss = loss_fn(pred, y)\n\n # Backpropagation\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if batch % 100 == 0:\n loss, current = loss.item(), batch * len(X)\n print(f\"loss: {loss:>7f} [{current:>5d}/{size:>5d}]\")\n\n\n\ntrain_dataloader = DataLoader(train_ds, batch_size=200)\n\n\n\nepochs = 1000000\nfor t in range(epochs):\n if t%100==0:\n print(f\"Epoch {t+1}\\n-------------------------------\")\n train(train_dataloader, model, loss_fn, optimizer)\nprint(\"Done!\")\n\n\n\n\nweights=list(model.parameters())[0]\nbias=list(model.parameters())[1]\n\n\n\n\[email protected]+bias\n\n\nunique_X.reset_index(drop=True,inplace=True)\n\n\nunique_X_array=np.array(unique_X).astype(np.float32)\nunique_X_torch = torch.from_numpy(unique_X_array)\n\[email protected]+bias\n\n\nembedding_array=embedding.detach().numpy()\nembedding_df=pd.DataFrame(embedding_array)\n\nunique_X_embeddings=unique_X.merge(embedding_df,left_index=True,right_index=True)\n\n\ncols = [i for i in unique_X_embeddings.columns if i not in range(0,embeddings)]\n\nunique_X_embeddings['argmax']=np.argmax(np.array(unique_X_embeddings[cols]),axis=1)\nunique_X_embeddings.sort_values(by='argmax',inplace=True,ascending=True)\n\nunique_X_embeddings['word']=unique_X_embeddings['argmax'].apply(lambda x:\n unique_X_embeddings.columns[x])\n \nemb=unique_X_embeddings[list(range(0,embeddings))+['word']]\n\n\nfrom sklearn.decomposition import PCA\n\np=PCA(n_components=2).fit_transform(emb[list(range(0,embeddings))])\n\n\nfig=plt.figure(figsize=(8,8))\nplt.scatter(p[:,0],p[:,1])\n\nfor i in range(len(emb)):\n plt.annotate(emb['word'].values[i],(p[i,0],p[i,1]))\n\n\n\n\n\n\n\n\n"
]
| [
[
"numpy.random.normal",
"numpy.array",
"numpy.argmin",
"numpy.sum",
"numpy.mean",
"numpy.std",
"numpy.vstack"
],
[
"numpy.array",
"matplotlib.pyplot.annotate",
"torch.nn.MSELoss",
"torch.nn.Sigmoid",
"pandas.DataFrame",
"torch.from_numpy",
"matplotlib.pyplot.figure",
"numpy.where",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.scatter",
"sklearn.decomposition.PCA",
"torch.utils.data.TensorDataset"
]
]
|
j1c/graspologic | [
"ff34382d1ffa0b7ea5f0e005525b7364f977e86f",
"ff34382d1ffa0b7ea5f0e005525b7364f977e86f"
]
| [
"graspologic/align/base.py",
"tests/cluster/test_divisive_cluster.py"
]
| [
"# Copyright (c) Microsoft Corporation and contributors.\n# Licensed under the MIT License.\n\nfrom abc import abstractmethod\nfrom typing import TypeVar\n\nimport numpy as np\nfrom sklearn.base import BaseEstimator\nfrom sklearn.utils import check_array\n\nfrom graspologic.types import Tuple\n\nSelf = TypeVar(\"Self\", bound=\"BaseAlign\")\n\n\nclass BaseAlign(BaseEstimator):\n \"\"\"\n Base class for align tasks such as sign flipping, procrustes and seedless\n procrustes.\n\n Attributes\n ----------\n Q_ : array, size (d, d)\n Final orthogonal matrix, used to modify ``X`` passed to transform\n\n \"\"\"\n\n def __init__(self) -> None:\n pass\n\n def _check_datasets(\n self, X: np.ndarray, Y: np.ndarray\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Ensures that the datasets are numpy, 2d, finite, and have the same\n number of components. Does not check for same number of vertices.\n Returns copies of these datasets.\n \"\"\"\n # check for numpy-ness\n if not isinstance(X, np.ndarray):\n msg = f\"First dataset is a {type(X)}, not an np.ndarray! \"\n raise TypeError(msg)\n if not isinstance(Y, np.ndarray):\n msg = f\"Second dataset is a {type(Y)}, not an np.ndarray! \"\n raise TypeError(msg)\n # check for 2-dness and finiteness\n X = check_array(X, copy=True)\n Y = check_array(Y, copy=True)\n # check for equal components\n if X.shape[1] != Y.shape[1]:\n msg = \"Two datasets have different number of components!\"\n raise ValueError(msg)\n return X, Y\n\n @abstractmethod\n def fit(self: Self, X: np.ndarray, Y: np.ndarray) -> Self:\n \"\"\"\n Uses the two datasets to learn the matrix :attr:`~graspologic.align.BaseAlign.Q_` that aligns the\n first dataset with the second.\n\n Parameters\n ----------\n X : np.ndarray, shape (n, d)\n Dataset to be mapped to ``Y``, must have same number of dimensions\n (axis 1) as ``Y``.\n\n Y : np.ndarray, shape (m, d)\n Target dataset, must have same number of dimensions (axis 1) as ``X``.\n\n Returns\n -------\n self : returns an instance of self\n \"\"\"\n pass\n\n def transform(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Transforms the dataset ``X`` using the learned matrix :attr:`~graspologic.align.BaseAlign.Q_`. This may\n be the same as the first dataset as in :func:`~graspologic.align.BaseAlign.fit`, or a new dataset.\n For example, additional samples from the same dataset.\n\n Parameters\n ----------\n X : np.ndarray, shape(m, d)\n Dataset to be transformed, must have same number of dimensions\n (axis 1) as ``X`` and ``Y`` that were passed to fit.\n\n Returns\n -------\n X_prime : np.ndarray, shape (n, d)\n First dataset of vectors, aligned to second. Equal to\n ``X`` @ :attr:`~graspologic.align.BaseAlign.Q_`.\n \"\"\"\n if not isinstance(X, np.ndarray):\n msg = f\"Dataset is a {type(X)}, not an np.ndarray! \"\n raise TypeError(msg)\n X = check_array(X)\n if not X.shape[1] == self.Q_.shape[0]:\n msg = (\n \"Dataset needs to have the same number of dimensions, d, \"\n \"as datasets X and Y used in fit. Currently, vectors in \"\n f\"the dataset to transform have {X.shape[1]} dimensions, \"\n f\"while vectors in fit had {self.Q_.shape[0]} dimensions.\"\n )\n raise ValueError(msg)\n result: np.ndarray = X @ self.Q_\n return result\n\n def fit_transform(self, X: np.ndarray, Y: np.ndarray) -> np.ndarray:\n \"\"\"\n Uses the two datasets to learn the matrix :attr:`~graspologic.align.BaseAlign.Q_` that aligns the\n first dataset with the second. Then, transforms the first dataset ``X``\n using the learned matrix :attr:`~graspologic.align.BaseAlign.Q_`.\n\n Parameters\n ----------\n X : np.ndarray, shape (n, d)\n Dataset to be mapped to ``Y``, must have same number of dimensions\n (axis 1) as ``Y``.\n\n Y : np.ndarray, shape (m, d)\n Target dataset, must have same number of dimensions (axis 1) as ``X``.\n\n Returns\n -------\n X_prime : np.ndarray, shape (n, d)\n First dataset of vectors, aligned to second. Equal to\n ``X`` @ :attr:`~graspologic.align.BaseAlign.Q_`.\n \"\"\"\n self.fit(X, Y)\n return self.transform(X)\n",
"# Copyright (c) Microsoft Corporation and contributors.\n# Licensed under the MIT License.\n\nimport unittest\n\nimport numpy as np\nfrom numpy.testing import assert_allclose, assert_array_less, assert_equal\nfrom sklearn.exceptions import NotFittedError\nfrom sklearn.metrics import adjusted_rand_score\n\nfrom graspologic.cluster import DivisiveCluster\n\n\ndef _test_hierarchical_four_class(**kws):\n \"\"\"\n Clustering above hierarchical data with gmm\n \"\"\"\n # Easily separable hierarchical data with 2 levels\n # of four gaussians\n np.random.seed(1)\n n = 100\n d = 3\n\n X11 = np.random.normal(-5, 0.1, size=(n, d))\n X21 = np.random.normal(-2, 0.1, size=(n, d))\n X12 = np.random.normal(2, 0.1, size=(n, d))\n X22 = np.random.normal(5, 0.1, size=(n, d))\n X = np.vstack((X11, X21, X12, X22))\n\n # true labels of 2 levels\n y_lvl1 = np.repeat([0, 1], 2 * n)\n y_lvl2 = np.repeat([0, 1, 2, 3], n)\n\n np.random.seed(1)\n dc = DivisiveCluster(max_components=2, **kws)\n pred = dc.fit_predict(X, fcluster=True)\n\n # Assert that the 2-cluster model is the best at level 1\n assert_equal(np.max(pred[:, 0]) + 1, 2)\n # Assert that the 4-cluster model is the best at level 1\n assert_equal(len(np.unique(pred[:, 1])), 4)\n\n # Assert that we get perfect clustering at level 1\n ari_lvl1 = adjusted_rand_score(y_lvl1, pred[:, 0])\n assert_allclose(ari_lvl1, 1)\n\n # Assert that we get perfect clustering at level 2\n ari_lvl2 = adjusted_rand_score(y_lvl2, pred[:, 1])\n assert_allclose(ari_lvl2, 1)\n\n\nclass TestDivisiveCluster(unittest.TestCase):\n def test_inputs(self):\n # Generate random data\n X = np.random.normal(0, 1, size=(100, 3))\n\n # min_components < 1\n with self.assertRaises(ValueError):\n dc = DivisiveCluster(min_components=0)\n\n # min_components not integer\n with self.assertRaises(TypeError):\n dc = DivisiveCluster(min_components=\"1\")\n\n # max_components < min_components\n with self.assertRaises(ValueError):\n dc = DivisiveCluster(min_components=1, max_components=0)\n\n # max_components not integer\n with self.assertRaises(TypeError):\n dc = DivisiveCluster(max_components=\"1\")\n\n # cluster_method not in ['gmm', 'kmeans']\n with self.assertRaises(ValueError):\n dc = DivisiveCluster(cluster_method=\"graspologic\")\n\n # delta_criter negative\n with self.assertRaises(ValueError):\n dc = DivisiveCluster(delta_criter=-1)\n\n # cluster_kws not a dict\n with self.assertRaises(TypeError):\n dc = DivisiveCluster(cluster_kws=0)\n\n # max_components > n_sample\n with self.assertRaises(ValueError):\n dc = DivisiveCluster(max_components=101)\n dc.fit(X)\n\n # level not an int\n with self.assertRaises(TypeError):\n rc = DivisiveCluster(max_components=2)\n rc.fit_predict(X, fcluster=True, level=\"1\")\n\n with self.assertRaises(TypeError):\n rc = DivisiveCluster(max_components=2)\n rc.fit(X)\n rc.predict(X, fcluster=True, level=\"1\")\n\n # level not positive\n with self.assertRaises(ValueError):\n rc = DivisiveCluster(max_components=2)\n rc.fit_predict(X, fcluster=True, level=0)\n\n with self.assertRaises(ValueError):\n rc = DivisiveCluster(max_components=2)\n rc.fit(X)\n rc.predict(X, fcluster=True, level=0)\n\n # level exceeds n_level\n with self.assertRaises(ValueError):\n dc = DivisiveCluster(max_components=2)\n dc.fit_predict(X, fcluster=True, level=100)\n\n with self.assertRaises(ValueError):\n dc = DivisiveCluster(max_components=2)\n dc.fit(X)\n dc.predict(X, fcluster=True, level=100)\n\n # level is given but fcluster disabled\n with self.assertRaises(ValueError):\n dc = DivisiveCluster(max_components=2)\n dc.fit_predict(X, level=1)\n\n with self.assertRaises(ValueError):\n dc = DivisiveCluster(max_components=2)\n dc.fit(X)\n dc.predict(X, level=1)\n\n def test_predict_without_fit(self):\n # Generate random data\n X = np.random.normal(0, 1, size=(100, 3))\n\n with self.assertRaises(NotFittedError):\n dc = DivisiveCluster(max_components=2)\n dc.predict(X)\n\n def test_predict_on_nonfitted_data_gmm(self):\n # Generate random data to fit on\n np.random.seed(1)\n n = 100\n d = 3\n X1 = np.random.normal(1, 0.1, size=(n, d))\n X2 = np.random.normal(2, 0.1, size=(n, d))\n X = np.vstack((X1, X2))\n y = np.repeat([0, 1], n)\n\n dc = DivisiveCluster(max_components=2)\n pred1 = dc.fit_predict(X)\n\n # Generate random data to predict on\n np.random.seed(2)\n n = 50\n d = 3\n X1_new = np.random.normal(1, 0.1, size=(n, d))\n X2_new = np.random.normal(2, 0.1, size=(n, d))\n X_new = np.vstack((X1_new, X2_new))\n y_new = np.repeat([0, 1], n)\n\n pred2 = dc.predict(X_new)\n\n # Assert that both predictions have the same depth\n assert_equal(pred1.shape[1], pred2.shape[1])\n\n # Assert that both predictions represent a perfect clustering\n # of 2 clusters\n assert_equal(np.max(pred1) + 1, 2)\n ari_1 = adjusted_rand_score(y, pred1[:, 0])\n assert_allclose(ari_1, 1)\n\n assert_equal(np.max(pred2) + 1, 2)\n ari_2 = adjusted_rand_score(y_new, pred2[:, 0])\n assert_allclose(ari_2, 1)\n\n def test_predict_on_nonfitted_data_kmeans(self):\n # Generate random data to fit on\n np.random.seed(1)\n n = 100\n d = 3\n X1 = np.random.normal(1, 0.1, size=(n, d))\n X2 = np.random.normal(2, 0.1, size=(n, d))\n X = np.vstack((X1, X2))\n y = np.repeat([0, 1], n)\n\n dc = DivisiveCluster(max_components=2, cluster_method=\"kmeans\")\n pred1 = dc.fit_predict(X)\n\n # Generate random data to predict on\n np.random.seed(2)\n n = 50\n d = 3\n X1_new = np.random.normal(1, 0.1, size=(n, d))\n X2_new = np.random.normal(2, 0.1, size=(n, d))\n X_new = np.vstack((X1_new, X2_new))\n y_new = np.repeat([0, 1], n)\n\n pred2 = dc.predict(X_new)\n\n # Assert that both predictions have the same depth\n assert_equal(pred1.shape[1], pred2.shape[1])\n\n # Assert that both predictions represent a perfect clustering\n # of 2 clusters at 1st level\n assert_equal(np.max(pred1[:, 0]) + 1, 2)\n ari_1 = adjusted_rand_score(y, pred1[:, 0])\n assert_allclose(ari_1, 1)\n\n assert_equal(np.max(pred2[:, 0]) + 1, 2)\n ari_2 = adjusted_rand_score(y_new, pred2[:, 0])\n assert_allclose(ari_2, 1)\n\n # Assert that predictions on new data have the same or fewer\n # clusters than those on the fitted data at each level\n for lvl in range(pred1.shape[1]):\n n_cluster1 = np.max(pred1[:, lvl]) + 1\n n_cluster2 = np.max(pred2[:, lvl]) + 1\n assert_array_less(n_cluster2, n_cluster1 + 1)\n\n def test_hierarchical_four_class_gmm(self):\n _test_hierarchical_four_class(cluster_method=\"gmm\")\n\n def test_hierarchical_four_class_aic(self):\n _test_hierarchical_four_class(cluster_kws=dict(selection_criteria=\"aic\"))\n\n def test_hierarchical_four_class_kmeans(self):\n _test_hierarchical_four_class(cluster_method=\"kmeans\")\n\n def test_hierarchical_six_class_delta_criter(self):\n \"\"\"\n Clustering on less easily separable hierarchical data with 2 levels\n of six gaussians\n \"\"\"\n\n np.random.seed(1)\n\n n = 100\n d = 3\n\n X11 = np.random.normal(-4, 0.8, size=(n, d))\n X21 = np.random.normal(-3, 0.8, size=(n, d))\n X31 = np.random.normal(-2, 0.8, size=(n, d))\n X12 = np.random.normal(2, 0.8, size=(n, d))\n X22 = np.random.normal(3, 0.8, size=(n, d))\n X32 = np.random.normal(4, 0.8, size=(n, d))\n X = np.vstack((X11, X21, X31, X12, X22, X32))\n\n y_lvl1 = np.repeat([0, 1], 3 * n)\n y_lvl2 = np.repeat([0, 1, 2, 3, 4, 5], n)\n\n # Perform clustering without setting delta_criter\n dc = DivisiveCluster(max_components=2)\n pred = dc.fit_predict(X, fcluster=True)\n\n # Perform clustering while setting delta_criter\n dc = DivisiveCluster(max_components=2, delta_criter=10)\n pred_delta_criter = dc.fit_predict(X, fcluster=True)\n\n # Assert that pred has more levels than pred_delta_criter\n assert_equal(pred.shape[1] - 1, pred_delta_criter.shape[1])\n\n # Assert that both pred_delta_criter and pred represent\n # perfect clustering at the first level\n ari_lvl1 = adjusted_rand_score(y_lvl1, pred[:, 0])\n assert_allclose(ari_lvl1, 1)\n ari_delta_criter_lvl1 = adjusted_rand_score(y_lvl1, pred_delta_criter[:, 0])\n assert_allclose(ari_delta_criter_lvl1, 1)\n\n # Assert that pred_delta_criter leads to a clustering as good as\n # pred at the second level\n ari_lvl2 = adjusted_rand_score(y_lvl2, pred[:, 1])\n ari_delta_criter_lvl2 = adjusted_rand_score(y_lvl2, pred_delta_criter[:, 1])\n assert_allclose(ari_delta_criter_lvl2, ari_lvl2)\n\n # Assert that pred suggests oversplitting at the last level (level 3)\n # which leads to a worse clustering than the last level\n # of pred_delta_criter (level 2)\n ari_lvl3 = adjusted_rand_score(y_lvl2, pred[:, -1])\n assert_array_less(ari_lvl3, ari_delta_criter_lvl2)\n"
]
| [
[
"sklearn.utils.check_array"
],
[
"numpy.max",
"numpy.random.normal",
"numpy.testing.assert_allclose",
"numpy.testing.assert_equal",
"numpy.random.seed",
"numpy.testing.assert_array_less",
"sklearn.metrics.adjusted_rand_score",
"numpy.unique",
"numpy.repeat",
"numpy.vstack"
]
]
|
sakibh/qfit-3.0 | [
"fcc9d56b21d2d16ffb2796da0d48003649a31909"
]
| [
"src/qfit/backbone.py"
]
| [
"'''\nExcited States software: qFit 3.0\n\nContributors: Saulo H. P. de Oliveira, Gydo van Zundert, and Henry van den Bedem.\nContact: [email protected]\n\nCopyright (C) 2009-2019 Stanford University\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\nof the Software, and to permit persons to whom the Software is furnished to do\nso, subject to the following conditions:\n\nThis entire text, including the above copyright notice and this permission notice\nshall be included in all copies or substantial portions of the Software.\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\nOTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\nIN THE SOFTWARE.\n'''\n\nimport numpy as np\nimport scipy as sp\n\nfrom .samplers import BackboneRotator\n\n\ndef adp_ellipsoid_axes(U_ij):\n \"\"\"Calculate principal axes of ADP ellipsoid.\n\n Args:\n U_ij (np.ndarray[np.float32]): square symmetric matrix of anisotropic\n displacement parameters (ADPs) in cartesian coordinates.\n In a PDB, ANISOU cards contain the parameters\n [u_11, u_22, u_33, u_12, u_13, u_23] / 1e-4 Å^2.\n\n Returns:\n List[np.ndarray[np.float32]]: principal axes of the anisotropic\n displacement ellipsoid, from largest to smallest.\n \"\"\"\n # Unscale ADP parameters to Å^2\n # U_ij = U_ij * 1e-4\n\n # Eigendecompose U_ij matrix with lower-triangle eigh\n eigvals, eigvecs = np.linalg.eigh(U_ij)\n\n # Scale unit eigenvectors by associated eigenvalues to return principal axes\n # TODO: Should we? This would mean -bba/-bbs would not behave as expected.\n # directions = [e for e in (eigvals * eigvecs).T]\n directions = [e for e in eigvecs.T]\n\n return directions\n\n\ndef compute_jacobian5d(bb_coor):\n \"\"\"Compute the 5D Jacobian for null space computation.\n\n bb_coor : Coordinates of sequential N, CA, and C atoms.\n \"\"\"\n\n nresidues = bb_coor.shape[0] // 3\n N_coor = bb_coor[::3]\n CA_coor = bb_coor[1::3]\n C_coor = bb_coor[2::3]\n\n # Use notations as used in Budday, Lyendecker and Van den Bedem (2016).\n fh = bb_coor[0]\n fd = bb_coor[1]\n fa = bb_coor[-1]\n faa = bb_coor[-2]\n fh_fa = fh + fa\n ndofs = nresidues * 2\n #jacobian = np.zeros((ndofs, 5), dtype=np.float64)\n jacobian = np.zeros((5, ndofs), dtype=np.float64).T\n norm = np.linalg.norm\n # N -> CA rotations\n # Relative distance constraints\n r = CA_coor - N_coor\n r /= norm(r, axis=1).reshape(-1, 1)\n jacobian[::2, :3] = np.cross(r, fh_fa - N_coor)\n # Orientation constraints\n f = np.asmatrix(fa - fh)\n dfh_dq = np.cross(r, fh - N_coor)\n dfd_dq = np.cross(r, fd - N_coor)\n jacobian[::2, 3] = f * np.asmatrix(dfh_dq - dfd_dq).T\n\n f = np.asmatrix(fa - faa)\n dfa_dq = np.cross(r, fa - N_coor)\n jacobian[::2, 4] = f * np.asmatrix(dfh_dq - dfa_dq).T\n\n # C -> CA rotations\n # Relative distance constraints\n r = C_coor - CA_coor\n r /= norm(r, axis=1).reshape(-1, 1)\n jacobian[1::2, :3] = np.cross(r, fh_fa - C_coor)\n # Orientation constraints\n f = np.asmatrix(fa - fh)\n dfh_dq = np.cross(r, fh - CA_coor)\n dfd_dq = np.cross(r, fd - CA_coor)\n jacobian[1::2, 3] = f * np.asmatrix(dfh_dq - dfd_dq).T\n\n f = np.asmatrix(fa - faa)\n dfa_dq = np.cross(r, fa - CA_coor)\n jacobian[1::2, 4] = f * np.asmatrix(dfh_dq - dfa_dq).T\n\n return jacobian.T\n\n\ndef compute_jacobian(bb_coor):\n \"\"\"Compute the 6D Jacobian for null space computation.\n\n bb_coor : Coordinates of sequential N, CA, and C atoms.\n \"\"\"\n\n nresidues = bb_coor.shape[0] // 3\n N_coor = bb_coor[::3]\n CA_coor = bb_coor[1::3]\n C_coor = bb_coor[2::3]\n\n ndofs = nresidues * 2\n jacobian = np.zeros((6, ndofs), dtype=np.float64).T\n norm = np.linalg.norm\n # N -> CA rotations\n # Relative distance constraints\n t1 = CA_coor - N_coor\n t1 /= norm(t1, axis=1).reshape(-1, 1)\n c1 = np.cross(t1, bb_coor[-1] - CA_coor)\n jacobian[::2, :3] = t1\n jacobian[::2, 3:] = c1\n\n # C -> CA rotations\n # Relative distance constraints\n t1 = C_coor - CA_coor\n t1 /= norm(t1, axis=1).reshape(-1, 1)\n c1 = np.cross(t1, bb_coor[-1] - C_coor)\n jacobian[1::2, :3] = t1\n jacobian[1::2, 3:] = c1\n\n return jacobian.T\n\n\ndef project_on_null_space(null_space, gradients):\n null_space = np.asmatrix(null_space)\n projection = null_space * null_space.T\n return projection * gradients\n\n\nclass AtomMoveFunctional:\n\n \"\"\"Functional for obtaining energy and gradient to move a CB atom\"\"\"\n\n def __init__(self, segment, residue_index, atom_name, endpoint):\n self.segment = segment\n self.residue_index = residue_index\n residue = self.segment[residue_index]\n self._atom_index = residue.select('name', atom_name)[0]\n self.endpoint = endpoint\n\n def target(self):\n\n current = self.segment._coor[self._atom_index]\n diff = current - self.endpoint\n energy = np.dot(diff, diff)\n return energy\n\n def gradient(self):\n\n \"\"\"Return the gradient on the CB atom.\"\"\"\n\n current = self.segment._coor[self._atom_index]\n diff = current - self.endpoint\n return 2 * diff\n\n def target_and_gradient(self):\n current = self.segment._coor[self._atom_index]\n diff = current - self.endpoint\n energy = np.dot(diff, diff)\n gradient = 2 * diff\n return energy, gradient\n\n def target_and_gradients_phi_psi(self):\n\n \"\"\"Return the gradients by rotating along each phi and psi backbone angle.\"\"\"\n\n target, gradient = self.target_and_gradient()\n normal = gradient / np.linalg.norm(gradient)\n gradients = np.zeros((len(self.segment) * 2, 3), float)\n current = self.segment._coor[self._atom_index]\n for n, residue in enumerate(self.segment.residues):\n # Residues after the selected CB residue have no impact on the CB\n # position. The backbone torsion gradients will be zero.\n if n > self.residue_index:\n continue\n N = residue.extract('name', 'N')\n CA = residue.extract('name', 'CA')\n C = residue.extract('name', 'C')\n origin = N.coor[0]\n phi_axis = CA.coor[0] - origin\n phi_axis /= np.linalg.norm(phi_axis)\n phi_gradient = np.cross(phi_axis, current - origin)\n phi_gradient_unit = phi_gradient / np.linalg.norm(phi_gradient)\n gradients[2 * n] = np.dot(phi_gradient_unit, normal) * phi_gradient\n\n if n == self.residue_index:\n continue\n origin = CA.coor[0]\n psi_axis = C.coor[0] - origin\n psi_axis /= np.linalg.norm(psi_axis)\n psi_gradient = np.cross(psi_axis, current - origin)\n psi_gradient_unit = psi_gradient / np.linalg.norm(phi_gradient)\n gradients[2 * n + 1] = np.dot(psi_gradient_unit, normal) * psi_gradient\n return target, gradients\n\n\nclass NullSpaceOptimizer:\n\n def __init__(self, segment):\n\n self.segment = segment\n self.ndofs = len(segment) * 2\n self.rotator = BackboneRotator(segment)\n self._bb_selection = np.sort(self.segment.select('name', ('N', 'CA', 'C')))\n self._starting_coor = self.segment.coor\n\n def optimize(self, atom_name, endpoint):\n residue_index = int(len(self.segment) / 2.0)\n self._functional = AtomMoveFunctional(self.segment, residue_index, atom_name, endpoint)\n\n torsions = np.zeros(self.ndofs, float)\n\n options = {}\n minimize = sp.optimize.minimize\n result = minimize(self.target_and_gradient, torsions,\n method='L-BFGS-B', jac=True, options=options)\n return result\n\n def target_and_gradient(self, torsions):\n\n self.rotator(torsions)\n #target, gradient = self._functional.target_and_gradients_phi_psi()\n target = self._functional.target()\n\n tmp = torsions.copy()\n delta = 1e-4\n gradients = np.zeros(torsions.size)\n for n in range(torsions.size):\n tmp[n] += delta\n self.rotator(tmp)\n fp = self._functional.target()\n\n tmp[n] -= 2 * delta\n self.rotator(tmp)\n fn = self._functional.target()\n\n gradients[n] = (fp - fn) / (2 * delta)\n tmp[n] += delta\n\n bb_coor = self.segment._coor[self._bb_selection]\n jacobian = compute_jacobian(bb_coor)\n null_space = sp.linalg.null_space(jacobian)\n null_space = np.asmatrix(null_space)\n projector = np.asarray(null_space * null_space.T)\n null_space_gradients = np.dot(projector, gradients)\n self.segment.coor = self._starting_coor\n return target, null_space_gradients\n"
]
| [
[
"numpy.asmatrix",
"numpy.dot",
"numpy.linalg.norm",
"numpy.asarray",
"numpy.zeros",
"numpy.linalg.eigh",
"scipy.linalg.null_space",
"numpy.cross"
]
]
|
cbilodeau2/chemprop | [
"08903b0af0f62fdb09c497fd2e21fbbe2b5261ae"
]
| [
"chemprop/train/loss_funcs.py"
]
| [
"\"\"\"Custom loss functions.\"\"\"\n\nimport torch\nimport torch.nn as nn\n\nclass ContrastiveLoss(nn.Module):\n\n def __init__(self):\n \"\"\"\n Initializes contrastive loss with no reduction.\n \"\"\"\n super(ContrastiveLoss, self).__init__()\n self.logsoftmax=nn.LogSoftmax(dim=0)\n\n def get_lengths(self, targets):\n ret, size = [], 0\n for target in targets.flatten().tolist():\n if target:\n ret.append(size)\n size = 1\n else:\n size += 1\n ret.append(size)\n return ret[1:] # Adds dummy size in the very beginning\n\n def forward(self, preds, targets):\n \"\"\"\n Returns contrastive loss. Assumes that batches are marked by one positive pair.\n\n :param preds: Scoring output by the model.\n :param targets: Truth for cmpds.\n :return: Contrastive loss for scores.\n \"\"\"\n lengths = self.get_lengths(targets)\n ret, start = [], 0\n\n for size in lengths:\n ret.append(self.logsoftmax( preds[start:start+size,:] ))\n start += size\n return -torch.cat(ret)\n"
]
| [
[
"torch.nn.LogSoftmax",
"torch.cat"
]
]
|
wen0618/simple-faster-rcnn-pytorch | [
"b5c41eeaf9f0641f65bdd6fe0d7f1301bd1cabf3"
]
| [
"model/region_proposal_network.py"
]
| [
"import numpy as np\nfrom torch.nn import functional as F\nimport torch as t\nfrom torch import nn\n\nfrom model.utils.bbox_tools import generate_anchor_base\nfrom model.utils.creator_tool import ProposalCreator\n\n\nclass RegionProposalNetwork(nn.Module):\n \"\"\"Region Proposal Network introduced in Faster R-CNN.\n\n This is Region Proposal Network introduced in Faster R-CNN [#]_.\n This takes features extracted from images and propose\n class agnostic bounding boxes around \"objects\".\n\n .. [#] Shaoqing Ren, Kaiming He, Ross Girshick, Jian Sun. \\\n Faster R-CNN: Towards Real-Time Object Detection with \\\n Region Proposal Networks. NIPS 2015.\n\n Args:\n in_channels (int): The channel size of input.\n mid_channels (int): The channel size of the intermediate tensor.\n ratios (list of floats): This is ratios of width to height of\n the anchors.\n anchor_scales (list of numbers): This is areas of anchors.\n Those areas will be the product of the square of an element in\n :obj:`anchor_scales` and the original area of the reference\n window.\n feat_stride (int): Stride size after extracting features from an\n image.\n initialW (callable): Initial weight value. If :obj:`None` then this\n function uses Gaussian distribution scaled by 0.1 to\n initialize weight.\n May also be a callable that takes an array and edits its values.\n proposal_creator_params (dict): Key valued paramters for\n :class:`model.utils.creator_tools.ProposalCreator`.\n\n .. seealso::\n :class:`~model.utils.creator_tools.ProposalCreator`\n\n \"\"\"\n\n def __init__(\n self, in_channels=512, mid_channels=512, ratios=[0.5, 1, 2],\n anchor_scales=[8, 16, 32], feat_stride=16,\n proposal_creator_params=dict(),\n ):\n super(RegionProposalNetwork, self).__init__()\n self.anchor_base = generate_anchor_base(\n anchor_scales=anchor_scales, ratios=ratios)\n self.feat_stride = feat_stride\n self.proposal_layer = ProposalCreator(self, **proposal_creator_params)\n n_anchor = self.anchor_base.shape[0]\n self.conv1 = nn.Conv2d(in_channels, mid_channels, 3, 1, 1)\n self.score = nn.Conv2d(mid_channels, n_anchor * 2, 1, 1, 0)\n self.loc = nn.Conv2d(mid_channels, n_anchor * 4, 1, 1, 0)\n normal_init(self.conv1, 0, 0.01)\n normal_init(self.score, 0, 0.01)\n normal_init(self.loc, 0, 0.01)\n\n def forward(self, x, img_size, scale=1.):\n \"\"\"Forward Region Proposal Network.\n\n Here are notations.\n\n * :math:`N` is batch size.\n * :math:`C` channel size of the input.\n * :math:`H` and :math:`W` are height and witdh of the input feature.\n * :math:`A` is number of anchors assigned to each pixel.\n\n Args:\n x (~torch.autograd.Variable): The Features extracted from images.\n Its shape is :math:`(N, C, H, W)`.\n img_size (tuple of ints): A tuple :obj:`height, width`,\n which contains image size after scaling.\n scale (float): The amount of scaling done to the input images after\n reading them from files.\n\n Returns:\n (~torch.autograd.Variable, ~torch.autograd.Variable, array, array, array):\n\n This is a tuple of five following values.\n\n * **rpn_locs**: Predicted bounding box offsets and scales for \\\n anchors. Its shape is :math:`(N, H W A, 4)`.\n * **rpn_scores**: Predicted foreground scores for \\\n anchors. Its shape is :math:`(N, H W A, 2)`.\n * **rois**: A bounding box array containing coordinates of \\\n proposal boxes. This is a concatenation of bounding box \\\n arrays from multiple images in the batch. \\\n Its shape is :math:`(R', 4)`. Given :math:`R_i` predicted \\\n bounding boxes from the :math:`i` th image, \\\n :math:`R' = \\\\sum _{i=1} ^ N R_i`.\n * **roi_indices**: An array containing indices of images to \\\n which RoIs correspond to. Its shape is :math:`(R',)`.\n * **anchor**: Coordinates of enumerated shifted anchors. \\\n Its shape is :math:`(H W A, 4)`.\n\n \"\"\"\n n, _, hh, ww = x.shape #feature map\n anchor = _enumerate_shifted_anchor( #利用base_anchor 在featuremap上滑动 生成所有的9*hh*ww的Anchor\n np.array(self.anchor_base),\n self.feat_stride, hh, ww)\n\n n_anchor = anchor.shape[0] // (hh * ww) #1个pix有多少个anchor=9\n h = F.relu(self.conv1(x)) #第一个卷积层+relu\n\n rpn_locs = self.loc(h) #四个位置的cls shape(1, A:36,H,W) HW:feature_map A:4*9(1个像素9anchor)\n # UNNOTE: check whether need contiguous\n # A: Yes\n rpn_locs = rpn_locs.permute(0, 2, 3, 1).contiguous().view(n, -1, 4)# (1, A:36,H,W,) to (1,H,W,A) to (1,HW9,4) \n rpn_scores = self.score(h) #卷积结果(1,2*9,H,W)\n rpn_scores = rpn_scores.permute(0, 2, 3, 1).contiguous() #(1,H,W,2*9)\n rpn_softmax_scores = F.softmax(rpn_scores.view(n, hh, ww, n_anchor, 2), dim=4) #(1,H,W,9,2)按二分类进行softmax\n rpn_fg_scores = rpn_softmax_scores[:, :, :, :, 1].contiguous() #前景分数\n rpn_fg_scores = rpn_fg_scores.view(n, -1)\n rpn_scores = rpn_scores.view(n, -1, 2) #rpn分数\n\n rois = list()\n roi_indices = list()\n for i in range(n): #/batchsize=n=1\n roi = self.proposal_layer( #调用Proposal Creator 得到roi\n rpn_locs[i].cpu().data.numpy(),\n rpn_fg_scores[i].cpu().data.numpy(),\n anchor, img_size,\n scale=scale)\n batch_index = i * np.ones((len(roi),), dtype=np.int32) #这里是标注roi对于batch的索引 batch=1 所以没有作用\n rois.append(roi)\n roi_indices.append(batch_index)\n\n rois = np.concatenate(rois, axis=0)\n roi_indices = np.concatenate(roi_indices, axis=0)\n return rpn_locs, rpn_scores, rois, roi_indices, anchor\n\n\ndef _enumerate_shifted_anchor(anchor_base, feat_stride, height, width):#将anchor映射回原图\n # Enumerate all shifted anchors:\n #\n # add A anchors (1, A, 4) to\n # cell K shifts (K, 1, 4) to get\n # shift anchors (K, A, 4)\n # reshape to (K*A, 4) shifted anchors\n # return (K*A, 4)\n\n # !TODO: add support for torch.CudaTensor\n # xp = cuda.get_array_module(anchor_base)\n # it seems that it can't be boosed using GPU\n import numpy as xp\n shift_y = xp.arange(0, height * feat_stride, feat_stride) #因为down sample了16倍 在featuremap移动一个相当于原图移动16\n shift_x = xp.arange(0, width * feat_stride, feat_stride)\n shift_x, shift_y = xp.meshgrid(shift_x, shift_y)#生成网格点坐标矩阵 如果你用matlab那么这函数应该挺熟悉,就是生成\n #shift_x与shift_y组成的矩形中所有的点(含边缘) 点间隔feat_stride=16 结果的shift_x y按位置组成一个坐标 \n\n shift = xp.stack((shift_y.ravel(), shift_x.ravel(), #将y,x平铺后组合,生成shape(K,4) = (K,(y,x)(y,x))的shift \n shift_y.ravel(), shift_x.ravel()), axis=1)\n\n A = anchor_base.shape[0] #1个点对应A(9)个anchor\n K = shift.shape[0] #共K个点\n anchor = anchor_base.reshape((1, A, 4)) + \\\n shift.reshape((1, K, 4)).transpose((1, 0, 2)) #anchor=(1,A,4) + (K,1,4) 根据广播将进行K和A的全排列 生成(K,A,4)所有锚\n anchor = anchor.reshape((K * A, 4)).astype(np.float32)\n return anchor\n\n\ndef _enumerate_shifted_anchor_torch(anchor_base, feat_stride, height, width):#pytorch版本\n # Enumerate all shifted anchors:\n #\n # add A anchors (1, A, 4) to\n # cell K shifts (K, 1, 4) to get\n # shift anchors (K, A, 4)\n # reshape to (K*A, 4) shifted anchors\n # return (K*A, 4)\n\n # !TODO: add support for torch.CudaTensor\n # xp = cuda.get_array_module(anchor_base)\n import torch as t\n \n#shift_y /shift_x = xp.arange(0, height * feat_stride, feat_stride) 而这个feat_stride=16就是放大的倍数\n#最后得到的效果就是纵横向都扩大了16倍对应回原图大小,shift_x,shift_y = xp.meshgrid(shift_x,shift_y)就是形成了一个纵横向偏移量的矩阵,\n#也就是特征图的每一点都能够通过这个矩阵找到映射在原图中的具体位\n shift_y = t.arange(0, height * feat_stride, feat_stride)\n shift_x = t.arange(0, width * feat_stride, feat_stride)\n shift_x, shift_y = xp.meshgrid(shift_x, shift_y)\n shift = xp.stack((shift_y.ravel(), shift_x.ravel(),\n shift_y.ravel(), shift_x.ravel()), axis=1)\n\n A = anchor_base.shape[0]\n K = shift.shape[0]\n anchor = anchor_base.reshape((1, A, 4)) + \\\n shift.reshape((1, K, 4)).transpose((1, 0, 2))\n #首先将特征图的每个点都对应到原图位置,然后再在每个位置产生九个anchor\n anchor = anchor.reshape((K * A, 4)).astype(np.float32)\n return anchor\n\n\ndef normal_init(m, mean, stddev, truncated=False): #/正态初始化参数\n \"\"\"\n weight initalizer: truncated normal and random normal.\n \"\"\"\n # x is a parameter\n if truncated:\n m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation\n else:\n m.weight.data.normal_(mean, stddev)\n m.bias.data.zero_()\n"
]
| [
[
"numpy.concatenate",
"numpy.array",
"torch.arange",
"torch.nn.Conv2d",
"numpy.arange",
"numpy.meshgrid"
]
]
|
vijaykiran/mlflow | [
"4edde91d0fa9909f5894bf84529b3416d52d83f6"
]
| [
"tests/spark/test_spark_model_export.py"
]
| [
"import os\n\nimport json\nimport pandas as pd\nimport pyspark\nfrom pyspark.ml.classification import LogisticRegression\nfrom pyspark.ml.feature import VectorAssembler\nfrom pyspark.ml.pipeline import Pipeline\nfrom pyspark.ml.wrapper import JavaModel\nfrom pyspark.ml.util import _jvm\nfrom pyspark.version import __version__ as pyspark_version\nfrom pyspark.sql import SQLContext\nfrom pyspark.sql.types import DateType\nimport pytest\nfrom sklearn import datasets\nimport shutil\nfrom collections import namedtuple\n\nimport mlflow\nimport mlflow.tracking\nfrom mlflow import active_run, pyfunc, mleap\nfrom mlflow import spark as sparkm\nfrom mlflow.models import Model\nfrom mlflow.utils.file_utils import TempDir\n\nfrom mlflow.utils.environment import _mlflow_conda_env\nfrom tests.helper_functions import score_model_in_sagemaker_docker_container\n\nfrom tests.pyfunc.test_spark import score_model_as_udf\n\n\[email protected]\ndef spark_conda_env(tmpdir):\n conda_env = os.path.join(str(tmpdir), \"conda_env.yml\")\n _mlflow_conda_env(conda_env, additional_pip_deps=[\"pyspark=={}\".format(pyspark_version)])\n return conda_env\n\n\nSparkModelWithData = namedtuple(\"SparkModelWithData\",\n [\"model\", \"spark_df\", \"pandas_df\", \"predictions\"])\n\n\n# Specify `autouse=True` to ensure that a context is created\n# before any tests are executed. This ensures that the Hadoop filesystem\n# does not create its own SparkContext without the MLeap libraries required by\n# other tests.\[email protected](scope=\"session\", autouse=True)\ndef spark_context():\n conf = pyspark.SparkConf()\n conf.set(key=\"spark.jars.packages\",\n value='ml.combust.mleap:mleap-spark-base_2.11:0.12.0,'\n 'ml.combust.mleap:mleap-spark_2.11:0.12.0')\n conf.set(key=\"spark_session.python.worker.reuse\", value=True)\n sc = pyspark.SparkContext(master=\"local-cluster[2, 1, 1024]\", conf=conf).getOrCreate()\n return sc\n\n\[email protected](scope=\"session\")\ndef spark_model_iris(spark_context):\n iris = datasets.load_iris()\n X = iris.data # we only take the first two features.\n y = iris.target\n feature_names = [\"0\", \"1\", \"2\", \"3\"]\n pandas_df = pd.DataFrame(X, columns=feature_names) # to make spark_udf work\n pandas_df['label'] = pd.Series(y)\n spark_session = pyspark.sql.SparkSession(spark_context)\n spark_df = spark_session.createDataFrame(pandas_df)\n assembler = VectorAssembler(inputCols=feature_names, outputCol=\"features\")\n lr = LogisticRegression(maxIter=50, regParam=0.1, elasticNetParam=0.8)\n pipeline = Pipeline(stages=[assembler, lr])\n # Fit the model\n model = pipeline.fit(spark_df)\n preds_df = model.transform(spark_df)\n preds = [x.prediction for x in preds_df.select(\"prediction\").collect()]\n return SparkModelWithData(model=model,\n spark_df=spark_df,\n pandas_df=pandas_df,\n predictions=preds)\n\n\[email protected]\ndef model_path(tmpdir):\n return str(tmpdir.mkdir(\"model\"))\n\n\ndef test_hadoop_filesystem(tmpdir):\n # copy local dir to and back from HadoopFS and make sure the results match\n from mlflow.spark import _HadoopFileSystem as FS\n test_dir_0 = os.path.join(str(tmpdir), \"expected\")\n test_file_0 = os.path.join(test_dir_0, \"root\", \"file_0\")\n test_dir_1 = os.path.join(test_dir_0, \"root\", \"subdir\")\n test_file_1 = os.path.join(test_dir_1, \"file_1\")\n os.makedirs(os.path.dirname(test_file_0))\n with open(test_file_0, \"w\") as f:\n f.write(\"test0\")\n os.makedirs(os.path.dirname(test_file_1))\n with open(test_file_1, \"w\") as f:\n f.write(\"test1\")\n remote = \"/tmp/mlflow/test0\"\n # File should not be copied in this case\n assert os.path.abspath(test_dir_0) == FS.maybe_copy_from_local_file(test_dir_0, remote)\n FS.copy_from_local_file(test_dir_0, remote, remove_src=False)\n local = os.path.join(str(tmpdir), \"actual\")\n FS.copy_to_local_file(remote, local, remove_src=True)\n assert sorted(os.listdir(os.path.join(local, \"root\"))) == sorted([\n \"subdir\", \"file_0\", \".file_0.crc\"])\n assert sorted(os.listdir(os.path.join(local, \"root\", \"subdir\"))) == sorted([\n \"file_1\", \".file_1.crc\"])\n # compare the files\n with open(os.path.join(test_dir_0, \"root\", \"file_0\")) as expected_f:\n with open(os.path.join(local, \"root\", \"file_0\")) as actual_f:\n assert expected_f.read() == actual_f.read()\n with open(os.path.join(test_dir_0, \"root\", \"subdir\", \"file_1\")) as expected_f:\n with open(os.path.join(local, \"root\", \"subdir\", \"file_1\")) as actual_f:\n assert expected_f.read() == actual_f.read()\n\n # make sure we cleanup\n assert not os.path.exists(FS._remote_path(remote).toString()) # skip file: prefix\n FS.copy_from_local_file(test_dir_0, remote, remove_src=False)\n assert os.path.exists(FS._remote_path(remote).toString()) # skip file: prefix\n FS.delete(remote)\n assert not os.path.exists(FS._remote_path(remote).toString()) # skip file: prefix\n\n\ndef test_model_export(spark_model_iris, model_path, spark_conda_env):\n sparkm.save_model(spark_model_iris.model, path=model_path,\n conda_env=spark_conda_env)\n # 1. score and compare reloaded sparkml model\n reloaded_model = sparkm.load_model(path=model_path)\n preds_df = reloaded_model.transform(spark_model_iris.spark_df)\n preds1 = [x.prediction for x in preds_df.select(\"prediction\").collect()]\n assert spark_model_iris.predictions == preds1\n m = pyfunc.load_pyfunc(model_path)\n # 2. score and compare reloaded pyfunc\n preds2 = m.predict(spark_model_iris.pandas_df)\n assert spark_model_iris.predictions == preds2\n # 3. score and compare reloaded pyfunc Spark udf\n preds3 = score_model_as_udf(model_path, run_id=None, pandas_df=spark_model_iris.pandas_df)\n assert spark_model_iris.predictions == preds3\n assert os.path.exists(sparkm.DFS_TMP)\n\n\[email protected]\ndef test_model_deployment(spark_model_iris, model_path, spark_conda_env):\n sparkm.save_model(spark_model_iris.model, path=model_path,\n conda_env=spark_conda_env,\n # Test both spark ml and mleap\n sample_input=spark_model_iris.spark_df)\n\n # 1. score and compare pyfunc deployed in Sagemaker docker container\n preds1 = score_model_in_sagemaker_docker_container(model_path=model_path,\n data=spark_model_iris.pandas_df,\n flavor=mlflow.pyfunc.FLAVOR_NAME)\n assert spark_model_iris.predictions == preds1\n # 2. score and compare mleap deployed in Sagemaker docker container\n preds2 = score_model_in_sagemaker_docker_container(model_path=model_path,\n data=spark_model_iris.pandas_df,\n flavor=mlflow.mleap.FLAVOR_NAME)\n assert spark_model_iris.predictions == preds2\n\n\ndef test_sparkml_model_log(tmpdir, spark_model_iris):\n # Print the coefficients and intercept for multinomial logistic regression\n old_tracking_uri = mlflow.get_tracking_uri()\n cnt = 0\n # should_start_run tests whether or not calling log_model() automatically starts a run.\n for should_start_run in [False, True]:\n for dfs_tmp_dir in [None, os.path.join(str(tmpdir), \"test\")]:\n print(\"should_start_run =\", should_start_run, \"dfs_tmp_dir =\", dfs_tmp_dir)\n try:\n tracking_dir = os.path.abspath(str(tmpdir.join(\"mlruns\")))\n mlflow.set_tracking_uri(\"file://%s\" % tracking_dir)\n if should_start_run:\n mlflow.start_run()\n artifact_path = \"model%d\" % cnt\n cnt += 1\n sparkm.log_model(artifact_path=artifact_path, spark_model=spark_model_iris.model,\n dfs_tmpdir=dfs_tmp_dir)\n run_id = active_run().info.run_uuid\n # test reloaded model\n reloaded_model = sparkm.load_model(artifact_path, run_id=run_id,\n dfs_tmpdir=dfs_tmp_dir)\n preds_df = reloaded_model.transform(spark_model_iris.spark_df)\n preds = [x.prediction for x in preds_df.select(\"prediction\").collect()]\n assert spark_model_iris.predictions == preds\n finally:\n mlflow.end_run()\n mlflow.set_tracking_uri(old_tracking_uri)\n x = dfs_tmp_dir or sparkm.DFS_TMP\n shutil.rmtree(x)\n shutil.rmtree(tracking_dir)\n\n\ndef test_mleap_model_log(spark_model_iris):\n artifact_path = \"model\"\n sparkm.log_model(spark_model=spark_model_iris.model,\n sample_input=spark_model_iris.spark_df,\n artifact_path=artifact_path)\n rid = active_run().info.run_uuid\n model_path = mlflow.tracking.utils._get_model_log_dir(model_name=artifact_path, run_id=rid)\n config_path = os.path.join(model_path, \"MLmodel\")\n mlflow_model = Model.load(config_path)\n assert sparkm.FLAVOR_NAME in mlflow_model.flavors\n assert mleap.FLAVOR_NAME in mlflow_model.flavors\n\n\ndef test_mleap_output_json_format(spark_model_iris, model_path):\n mlflow_model = Model()\n mleap.save_model(spark_model=spark_model_iris.model,\n path=model_path,\n sample_input=spark_model_iris.spark_df,\n mlflow_model=mlflow_model)\n mleap_conf = mlflow_model.flavors[mleap.FLAVOR_NAME]\n schema_path_sub = mleap_conf[\"input_schema\"]\n schema_path_full = os.path.join(model_path, schema_path_sub)\n with open(schema_path_full, \"r\") as f:\n json_schema = json.load(f)\n\n assert \"fields\" in json_schema.keys()\n assert len(json_schema[\"fields\"]) > 0\n assert type(json_schema[\"fields\"][0]) == dict\n assert \"name\" in json_schema[\"fields\"][0]\n\n\ndef test_spark_module_model_save_with_mleap_and_unsupported_transformer_raises_exception(\n spark_model_iris, model_path):\n class CustomTransformer(JavaModel):\n def _transform(self, dataset):\n return dataset\n\n unsupported_pipeline = Pipeline(stages=[CustomTransformer()])\n unsupported_model = unsupported_pipeline.fit(spark_model_iris.spark_df)\n\n with pytest.raises(mleap.MLeapSerializationException):\n sparkm.save_model(spark_model=unsupported_model,\n path=model_path,\n sample_input=spark_model_iris.spark_df)\n\n\ndef test_spark_module_model_save_with_relative_path_and_valid_sample_input_produces_mleap_flavor(\n spark_model_iris):\n with TempDir(chdr=True) as tmp:\n model_path = os.path.basename(tmp.path(\"model\"))\n mlflow_model = Model()\n sparkm.save_model(spark_model=spark_model_iris.model,\n path=model_path,\n sample_input=spark_model_iris.spark_df,\n mlflow_model=mlflow_model)\n assert mleap.FLAVOR_NAME in mlflow_model.flavors\n\n config_path = os.path.join(model_path, \"MLmodel\")\n assert os.path.exists(config_path)\n config = Model.load(config_path)\n assert mleap.FLAVOR_NAME in config.flavors\n\n\ndef test_mleap_module_model_save_with_relative_path_and_valid_sample_input_produces_mleap_flavor(\n spark_model_iris):\n with TempDir(chdr=True) as tmp:\n model_path = os.path.basename(tmp.path(\"model\"))\n mlflow_model = Model()\n mleap.save_model(spark_model=spark_model_iris.model,\n path=model_path,\n sample_input=spark_model_iris.spark_df,\n mlflow_model=mlflow_model)\n assert mleap.FLAVOR_NAME in mlflow_model.flavors\n\n config_path = os.path.join(model_path, \"MLmodel\")\n assert os.path.exists(config_path)\n config = Model.load(config_path)\n assert mleap.FLAVOR_NAME in config.flavors\n\n\ndef test_mleap_module_model_save_with_absolute_path_and_valid_sample_input_produces_mleap_flavor(\n spark_model_iris, model_path):\n model_path = os.path.abspath(model_path)\n mlflow_model = Model()\n mleap.save_model(spark_model=spark_model_iris.model,\n path=model_path,\n sample_input=spark_model_iris.spark_df,\n mlflow_model=mlflow_model)\n assert mleap.FLAVOR_NAME in mlflow_model.flavors\n\n config_path = os.path.join(model_path, \"MLmodel\")\n assert os.path.exists(config_path)\n config = Model.load(config_path)\n assert mleap.FLAVOR_NAME in config.flavors\n\n\ndef test_mleap_module_model_save_with_invalid_sample_input_type_raises_exception(\n spark_model_iris, model_path):\n with pytest.raises(Exception):\n invalid_input = pd.DataFrame()\n sparkm.save_model(spark_model=spark_model_iris.model,\n path=model_path,\n sample_input=invalid_input)\n\n\ndef test_mleap_module_model_save_with_unsupported_transformer_raises_serialization_exception(\n spark_model_iris, model_path):\n class CustomTransformer(JavaModel):\n def _transform(self, dataset):\n return dataset\n\n unsupported_pipeline = Pipeline(stages=[CustomTransformer()])\n unsupported_model = unsupported_pipeline.fit(spark_model_iris.spark_df)\n\n with pytest.raises(mleap.MLeapSerializationException):\n mleap.save_model(spark_model=unsupported_model,\n path=model_path,\n sample_input=spark_model_iris.spark_df)\n\n\ndef test_save_with_sample_input_containing_unsupported_data_type_raises_serialization_exception(\n spark_context, model_path):\n sql_context = SQLContext(spark_context)\n unsupported_df = sql_context.createDataFrame([(1, \"2016-09-30\"), (2, \"2017-02-27\")])\n unsupported_df = unsupported_df.withColumn(\"_2\", unsupported_df._2.cast(DateType()))\n pipeline = Pipeline(stages=[])\n model = pipeline.fit(unsupported_df)\n # The Spark `DateType` is not supported by MLeap, so we expect serialization to fail.\n with pytest.raises(mleap.MLeapSerializationException):\n sparkm.save_model(spark_model=model, path=model_path, sample_input=unsupported_df)\n"
]
| [
[
"pandas.DataFrame",
"sklearn.datasets.load_iris",
"pandas.Series"
]
]
|
Jacc0027/pvlib-python | [
"65782fdf24c96cc092329942e29904e78b0b3cce"
]
| [
"pvlib/clearsky.py"
]
| [
"\"\"\"\nThe ``clearsky`` module contains several methods\nto calculate clear sky GHI, DNI, and DHI.\n\"\"\"\n\nimport os\nfrom collections import OrderedDict\nimport calendar\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.optimize import minimize_scalar\nfrom scipy.linalg import hankel\nimport tables\n\nfrom pvlib import atmosphere, tools\n\n\ndef ineichen(apparent_zenith, airmass_absolute, linke_turbidity,\n altitude=0, dni_extra=1364., perez_enhancement=False):\n '''\n Determine clear sky GHI, DNI, and DHI from Ineichen/Perez model.\n\n Implements the Ineichen and Perez clear sky model for global\n horizontal irradiance (GHI), direct normal irradiance (DNI), and\n calculates the clear-sky diffuse horizontal (DHI) component as the\n difference between GHI and DNI*cos(zenith) as presented in [1, 2]. A\n report on clear sky models found the Ineichen/Perez model to have\n excellent performance with a minimal input data set [3].\n\n Default values for monthly Linke turbidity provided by SoDa [4, 5].\n\n Parameters\n -----------\n apparent_zenith : numeric\n Refraction corrected solar zenith angle in degrees.\n\n airmass_absolute : numeric\n Pressure corrected airmass.\n\n linke_turbidity : numeric\n Linke Turbidity.\n\n altitude : numeric, default 0\n Altitude above sea level in meters.\n\n dni_extra : numeric, default 1364\n Extraterrestrial irradiance. The units of ``dni_extra``\n determine the units of the output.\n\n perez_enhancement : bool, default False\n Controls if the Perez enhancement factor should be applied.\n Setting to True may produce spurious results for times when\n the Sun is near the horizon and the airmass is high.\n See https://github.com/pvlib/pvlib-python/issues/435\n\n Returns\n -------\n clearsky : DataFrame (if Series input) or OrderedDict of arrays\n DataFrame/OrderedDict contains the columns/keys\n ``'dhi', 'dni', 'ghi'``.\n\n See also\n --------\n lookup_linke_turbidity\n pvlib.location.Location.get_clearsky\n\n References\n ----------\n .. [1] P. Ineichen and R. Perez, \"A New airmass independent formulation for\n the Linke turbidity coefficient\", Solar Energy, vol 73, pp. 151-157,\n 2002.\n\n .. [2] R. Perez et. al., \"A New Operational Model for Satellite-Derived\n Irradiances: Description and Validation\", Solar Energy, vol 73, pp.\n 307-317, 2002.\n\n .. [3] M. Reno, C. Hansen, and J. Stein, \"Global Horizontal Irradiance\n Clear Sky Models: Implementation and Analysis\", Sandia National\n Laboratories, SAND2012-2389, 2012.\n\n .. [4] http://www.soda-is.com/eng/services/climat_free_eng.php#c5 (obtained\n July 17, 2012).\n\n .. [5] J. Remund, et. al., \"Worldwide Linke Turbidity Information\", Proc.\n ISES Solar World Congress, June 2003. Goteborg, Sweden.\n '''\n\n # ghi is calculated using either the equations in [1] by setting\n # perez_enhancement=False (default behavior) or using the model\n # in [2] by setting perez_enhancement=True.\n\n # The NaN handling is a little subtle. The AM input is likely to\n # have NaNs that we'll want to map to 0s in the output. However, we\n # want NaNs in other inputs to propagate through to the output. This\n # is accomplished by judicious use and placement of np.maximum,\n # np.minimum, and np.fmax\n\n # use max so that nighttime values will result in 0s instead of\n # negatives. propagates nans.\n cos_zenith = np.maximum(tools.cosd(apparent_zenith), 0)\n\n tl = linke_turbidity\n\n fh1 = np.exp(-altitude/8000.)\n fh2 = np.exp(-altitude/1250.)\n cg1 = 5.09e-05 * altitude + 0.868\n cg2 = 3.92e-05 * altitude + 0.0387\n\n ghi = np.exp(-cg2*airmass_absolute*(fh1 + fh2*(tl - 1)))\n\n # https://github.com/pvlib/pvlib-python/issues/435\n if perez_enhancement:\n ghi *= np.exp(0.01*airmass_absolute**1.8)\n\n # use fmax to map airmass nans to 0s. multiply and divide by tl to\n # reinsert tl nans\n ghi = cg1 * dni_extra * cos_zenith * tl / tl * np.fmax(ghi, 0)\n\n # From [1] (Following [2] leads to 0.664 + 0.16268 / fh1)\n # See https://github.com/pvlib/pvlib-python/pull/808\n b = 0.664 + 0.163/fh1\n # BncI = \"normal beam clear sky radiation\"\n bnci = b * np.exp(-0.09 * airmass_absolute * (tl - 1))\n bnci = dni_extra * np.fmax(bnci, 0)\n\n # \"empirical correction\" SE 73, 157 & SE 73, 312.\n bnci_2 = ((1 - (0.1 - 0.2*np.exp(-tl))/(0.1 + 0.882/fh1)) /\n cos_zenith)\n bnci_2 = ghi * np.fmin(np.fmax(bnci_2, 0), 1e20)\n\n dni = np.minimum(bnci, bnci_2)\n\n dhi = ghi - dni*cos_zenith\n\n irrads = OrderedDict()\n irrads['ghi'] = ghi\n irrads['dni'] = dni\n irrads['dhi'] = dhi\n\n if isinstance(dni, pd.Series):\n irrads = pd.DataFrame.from_dict(irrads)\n\n return irrads\n\n\ndef lookup_linke_turbidity(time, latitude, longitude, filepath=None,\n interp_turbidity=True):\n \"\"\"\n Look up the Linke Turibidity from the ``LinkeTurbidities.h5``\n data file supplied with pvlib.\n\n Parameters\n ----------\n time : pandas.DatetimeIndex\n\n latitude : float or int\n\n longitude : float or int\n\n filepath : None or string, default None\n The path to the ``.h5`` file.\n\n interp_turbidity : bool, default True\n If ``True``, interpolates the monthly Linke turbidity values\n found in ``LinkeTurbidities.h5`` to daily values.\n\n Returns\n -------\n turbidity : Series\n \"\"\"\n\n # The .h5 file 'LinkeTurbidities.h5' contains a single 2160 x 4320 x 12\n # matrix of type uint8 called 'LinkeTurbidity'. The rows represent global\n # latitudes from 90 to -90 degrees; the columns represent global longitudes\n # from -180 to 180; and the depth (third dimension) represents months of\n # the year from January (1) to December (12). To determine the Linke\n # turbidity for a position on the Earth's surface for a given month do the\n # following: LT = LinkeTurbidity(LatitudeIndex, LongitudeIndex, month).\n # Note that the numbers within the matrix are 20 * Linke Turbidity,\n # so divide the number from the file by 20 to get the\n # turbidity.\n\n # The nodes of the grid are 5' (1/12=0.0833[arcdeg]) apart.\n # From Section 8 of Aerosol optical depth and Linke turbidity climatology\n # http://www.meteonorm.com/images/uploads/downloads/ieashc36_report_TL_AOD_climatologies.pdf\n # 1st row: 89.9583 S, 2nd row: 89.875 S\n # 1st column: 179.9583 W, 2nd column: 179.875 W\n\n if filepath is None:\n pvlib_path = os.path.dirname(os.path.abspath(__file__))\n filepath = os.path.join(pvlib_path, 'data', 'LinkeTurbidities.h5')\n\n latitude_index = _degrees_to_index(latitude, coordinate='latitude')\n longitude_index = _degrees_to_index(longitude, coordinate='longitude')\n\n with tables.open_file(filepath) as lt_h5_file:\n lts = lt_h5_file.root.LinkeTurbidity[latitude_index,\n longitude_index, :]\n\n if interp_turbidity:\n linke_turbidity = _interpolate_turbidity(lts, time)\n else:\n months = time.month - 1\n linke_turbidity = pd.Series(lts[months], index=time)\n\n linke_turbidity /= 20.\n\n return linke_turbidity\n\n\ndef _is_leap_year(year):\n \"\"\"Determine if a year is leap year.\n\n Parameters\n ----------\n year : numeric\n\n Returns\n -------\n isleap : array of bools\n \"\"\"\n isleap = ((np.mod(year, 4) == 0) &\n ((np.mod(year, 100) != 0) | (np.mod(year, 400) == 0)))\n return isleap\n\n\ndef _interpolate_turbidity(lts, time):\n \"\"\"\n Interpolated monthly Linke turbidity onto daily values.\n\n Parameters\n ----------\n lts : np.array\n Monthly Linke turbidity values.\n time : pd.DatetimeIndex\n Times to be interpolated onto.\n\n Returns\n -------\n linke_turbidity : pd.Series\n The interpolated turbidity.\n \"\"\"\n # Data covers 1 year. Assume that data corresponds to the value at the\n # middle of each month. This means that we need to add previous Dec and\n # next Jan to the array so that the interpolation will work for\n # Jan 1 - Jan 15 and Dec 16 - Dec 31.\n lts_concat = np.concatenate([[lts[-1]], lts, [lts[0]]])\n\n # handle leap years\n try:\n isleap = time.is_leap_year\n except AttributeError:\n year = time.year\n isleap = _is_leap_year(year)\n\n dayofyear = time.dayofyear\n days_leap = _calendar_month_middles(2016)\n days_no_leap = _calendar_month_middles(2015)\n\n # Then we map the month value to the day of year value.\n # Do it for both leap and non-leap years.\n lt_leap = np.interp(dayofyear, days_leap, lts_concat)\n lt_no_leap = np.interp(dayofyear, days_no_leap, lts_concat)\n linke_turbidity = np.where(isleap, lt_leap, lt_no_leap)\n\n linke_turbidity = pd.Series(linke_turbidity, index=time)\n\n return linke_turbidity\n\n\ndef _calendar_month_middles(year):\n \"\"\"List of middle day of each month, used by Linke turbidity lookup\"\"\"\n # remove mdays[0] since January starts at mdays[1]\n # make local copy of mdays since we need to change\n # February for leap years\n mdays = np.array(calendar.mdays[1:])\n ydays = 365\n # handle leap years\n if calendar.isleap(year):\n mdays[1] = mdays[1] + 1\n ydays = 366\n middles = np.concatenate(\n [[-calendar.mdays[-1] / 2.0], # Dec last year\n np.cumsum(mdays) - np.array(mdays) / 2., # this year\n [ydays + calendar.mdays[1] / 2.0]]) # Jan next year\n return middles\n\n\ndef _degrees_to_index(degrees, coordinate):\n \"\"\"Transform input degrees to an output index integer. The Linke\n turbidity lookup tables have three dimensions, latitude, longitude, and\n month. Specify a degree value and either 'latitude' or 'longitude' to get\n the appropriate index number for the first two of these index numbers.\n\n Parameters\n ----------\n degrees : float or int\n Degrees of either latitude or longitude.\n coordinate : string\n Specify whether degrees arg is latitude or longitude. Must be set to\n either 'latitude' or 'longitude' or an error will be raised.\n\n Returns\n -------\n index : np.int16\n The latitude or longitude index number to use when looking up values\n in the Linke turbidity lookup table.\n \"\"\"\n # Assign inputmin, inputmax, and outputmax based on degree type.\n if coordinate == 'latitude':\n inputmin = 90\n inputmax = -90\n outputmax = 2160\n elif coordinate == 'longitude':\n inputmin = -180\n inputmax = 180\n outputmax = 4320\n else:\n raise IndexError(\"coordinate must be 'latitude' or 'longitude'.\")\n\n inputrange = inputmax - inputmin\n scale = outputmax/inputrange # number of indices per degree\n center = inputmin + 1 / scale / 2 # shift to center of index\n outputmax -= 1 # shift index to zero indexing\n index = (degrees - center) * scale\n err = IndexError('Input, %g, is out of range (%g, %g).' %\n (degrees, inputmin, inputmax))\n\n # If the index is still out of bounds after rounding, raise an error.\n # 0.500001 is used in comparisons instead of 0.5 to allow for a small\n # margin of error which can occur when dealing with floating point numbers.\n if index > outputmax:\n if index - outputmax <= 0.500001:\n index = outputmax\n else:\n raise err\n elif index < 0:\n if -index <= 0.500001:\n index = 0\n else:\n raise err\n # If the index wasn't set to outputmax or 0, round it and cast it as an\n # integer so it can be used in integer-based indexing.\n else:\n index = int(np.around(index))\n\n return index\n\n\ndef haurwitz(apparent_zenith):\n '''\n Determine clear sky GHI using the Haurwitz model.\n\n Implements the Haurwitz clear sky model for global horizontal\n irradiance (GHI) as presented in [1, 2]. A report on clear\n sky models found the Haurwitz model to have the best performance\n in terms of average monthly error among models which require only\n zenith angle [3].\n\n Parameters\n ----------\n apparent_zenith : Series\n The apparent (refraction corrected) sun zenith angle\n in degrees.\n\n Returns\n -------\n ghi : DataFrame\n The modeled global horizonal irradiance in W/m^2 provided\n by the Haurwitz clear-sky model.\n\n References\n ----------\n\n .. [1] B. Haurwitz, \"Insolation in Relation to Cloudiness and Cloud\n Density,\" Journal of Meteorology, vol. 2, pp. 154-166, 1945.\n\n .. [2] B. Haurwitz, \"Insolation in Relation to Cloud Type,\" Journal of\n Meteorology, vol. 3, pp. 123-124, 1946.\n\n .. [3] M. Reno, C. Hansen, and J. Stein, \"Global Horizontal Irradiance\n Clear Sky Models: Implementation and Analysis\", Sandia National\n Laboratories, SAND2012-2389, 2012.\n '''\n\n cos_zenith = tools.cosd(apparent_zenith.values)\n clearsky_ghi = np.zeros_like(apparent_zenith.values)\n cos_zen_gte_0 = cos_zenith > 0\n clearsky_ghi[cos_zen_gte_0] = (1098.0 * cos_zenith[cos_zen_gte_0] *\n np.exp(-0.059/cos_zenith[cos_zen_gte_0]))\n\n df_out = pd.DataFrame(index=apparent_zenith.index,\n data=clearsky_ghi,\n columns=['ghi'])\n\n return df_out\n\n\ndef simplified_solis(apparent_elevation, aod700=0.1, precipitable_water=1.,\n pressure=101325., dni_extra=1364.):\n \"\"\"\n Calculate the clear sky GHI, DNI, and DHI according to the\n simplified Solis model.\n\n Reference [1]_ describes the accuracy of the model as being 15, 20,\n and 18 W/m^2 for the beam, global, and diffuse components. Reference\n [2]_ provides comparisons with other clear sky models.\n\n Parameters\n ----------\n apparent_elevation : numeric\n The apparent elevation of the sun above the horizon (deg).\n\n aod700 : numeric, default 0.1\n The aerosol optical depth at 700 nm (unitless).\n Algorithm derived for values between 0 and 0.45.\n\n precipitable_water : numeric, default 1.0\n The precipitable water of the atmosphere (cm).\n Algorithm derived for values between 0.2 and 10 cm.\n Values less than 0.2 will be assumed to be equal to 0.2.\n\n pressure : numeric, default 101325.0\n The atmospheric pressure (Pascals).\n Algorithm derived for altitudes between sea level and 7000 m,\n or 101325 and 41000 Pascals.\n\n dni_extra : numeric, default 1364.0\n Extraterrestrial irradiance. The units of ``dni_extra``\n determine the units of the output.\n\n Returns\n -------\n clearsky : DataFrame (if Series input) or OrderedDict of arrays\n DataFrame/OrderedDict contains the columns/keys\n ``'dhi', 'dni', 'ghi'``.\n\n References\n ----------\n .. [1] P. Ineichen, \"A broadband simplified version of the\n Solis clear sky model,\" Solar Energy, 82, 758-762 (2008).\n\n .. [2] P. Ineichen, \"Validation of models that estimate the clear\n sky global and beam solar irradiance,\" Solar Energy, 132,\n 332-344 (2016).\n \"\"\"\n\n p = pressure\n\n w = precipitable_water\n\n # algorithm fails for pw < 0.2\n w = np.maximum(w, 0.2)\n\n # this algorithm is reasonably fast already, but it could be made\n # faster by precalculating the powers of aod700, the log(p/p0), and\n # the log(w) instead of repeating the calculations as needed in each\n # function\n\n i0p = _calc_i0p(dni_extra, w, aod700, p)\n\n taub = _calc_taub(w, aod700, p)\n b = _calc_b(w, aod700)\n\n taug = _calc_taug(w, aod700, p)\n g = _calc_g(w, aod700)\n\n taud = _calc_taud(w, aod700, p)\n d = _calc_d(aod700, p)\n\n # this prevents the creation of nans at night instead of 0s\n # it's also friendly to scalar and series inputs\n sin_elev = np.maximum(1.e-30, np.sin(np.radians(apparent_elevation)))\n\n dni = i0p * np.exp(-taub/sin_elev**b)\n ghi = i0p * np.exp(-taug/sin_elev**g) * sin_elev\n dhi = i0p * np.exp(-taud/sin_elev**d)\n\n irrads = OrderedDict()\n irrads['ghi'] = ghi\n irrads['dni'] = dni\n irrads['dhi'] = dhi\n\n if isinstance(dni, pd.Series):\n irrads = pd.DataFrame.from_dict(irrads)\n\n return irrads\n\n\ndef _calc_i0p(i0, w, aod700, p):\n \"\"\"Calculate the \"enhanced extraterrestrial irradiance\".\"\"\"\n p0 = 101325.\n io0 = 1.08 * w**0.0051\n i01 = 0.97 * w**0.032\n i02 = 0.12 * w**0.56\n i0p = i0 * (i02*aod700**2 + i01*aod700 + io0 + 0.071*np.log(p/p0))\n\n return i0p\n\n\ndef _calc_taub(w, aod700, p):\n \"\"\"Calculate the taub coefficient\"\"\"\n p0 = 101325.\n tb1 = 1.82 + 0.056*np.log(w) + 0.0071*np.log(w)**2\n tb0 = 0.33 + 0.045*np.log(w) + 0.0096*np.log(w)**2\n tbp = 0.0089*w + 0.13\n\n taub = tb1*aod700 + tb0 + tbp*np.log(p/p0)\n\n return taub\n\n\ndef _calc_b(w, aod700):\n \"\"\"Calculate the b coefficient.\"\"\"\n\n b1 = 0.00925*aod700**2 + 0.0148*aod700 - 0.0172\n b0 = -0.7565*aod700**2 + 0.5057*aod700 + 0.4557\n\n b = b1 * np.log(w) + b0\n\n return b\n\n\ndef _calc_taug(w, aod700, p):\n \"\"\"Calculate the taug coefficient\"\"\"\n p0 = 101325.\n tg1 = 1.24 + 0.047*np.log(w) + 0.0061*np.log(w)**2\n tg0 = 0.27 + 0.043*np.log(w) + 0.0090*np.log(w)**2\n tgp = 0.0079*w + 0.1\n taug = tg1*aod700 + tg0 + tgp*np.log(p/p0)\n\n return taug\n\n\ndef _calc_g(w, aod700):\n \"\"\"Calculate the g coefficient.\"\"\"\n\n g = -0.0147*np.log(w) - 0.3079*aod700**2 + 0.2846*aod700 + 0.3798\n\n return g\n\n\ndef _calc_taud(w, aod700, p):\n \"\"\"Calculate the taud coefficient.\"\"\"\n\n # isscalar tests needed to ensure that the arrays will have the\n # right shape in the tds calculation.\n # there's probably a better way to do this.\n\n if np.isscalar(w) and np.isscalar(aod700):\n w = np.array([w])\n aod700 = np.array([aod700])\n elif np.isscalar(w):\n w = np.full_like(aod700, w)\n elif np.isscalar(aod700):\n aod700 = np.full_like(w, aod700)\n\n # set up nan-tolerant masks\n aod700_lt_0p05 = np.full_like(aod700, False, dtype='bool')\n np.less(aod700, 0.05, where=~np.isnan(aod700), out=aod700_lt_0p05)\n aod700_mask = np.array([aod700_lt_0p05, ~aod700_lt_0p05], dtype=np.int)\n\n # create tuples of coefficients for\n # aod700 < 0.05, aod700 >= 0.05\n td4 = 86*w - 13800, -0.21*w + 11.6\n td3 = -3.11*w + 79.4, 0.27*w - 20.7\n td2 = -0.23*w + 74.8, -0.134*w + 15.5\n td1 = 0.092*w - 8.86, 0.0554*w - 5.71\n td0 = 0.0042*w + 3.12, 0.0057*w + 2.94\n tdp = -0.83*(1+aod700)**(-17.2), -0.71*(1+aod700)**(-15.0)\n\n tds = (np.array([td0, td1, td2, td3, td4, tdp]) * aod700_mask).sum(axis=1)\n\n p0 = 101325.\n taud = (tds[4]*aod700**4 + tds[3]*aod700**3 + tds[2]*aod700**2 +\n tds[1]*aod700 + tds[0] + tds[5]*np.log(p/p0))\n\n # be polite about matching the output type to the input type(s)\n if len(taud) == 1:\n taud = taud[0]\n\n return taud\n\n\ndef _calc_d(aod700, p):\n \"\"\"Calculate the d coefficient.\"\"\"\n\n p0 = 101325.\n dp = 1/(18 + 152*aod700)\n d = -0.337*aod700**2 + 0.63*aod700 + 0.116 + dp*np.log(p/p0)\n\n return d\n\n\ndef _calc_stats(data, samples_per_window, sample_interval, H):\n \"\"\" Calculates statistics for each window, used by Reno-style clear\n sky detection functions. Does not return the line length statistic\n which is provided by _calc_windowed_stat and _line_length.\n\n Calculations are done on a sliding window defined by the Hankel matrix H.\n Columns in H define the indices for each window. Each window contains\n samples_per_window index values. The first window starts with index 0;\n the last window ends at the last index position in data.\n\n In the calculation of data_slope_nstd, a choice is made here where [1]_ is\n ambiguous. data_slope_nstd is the standard deviation of slopes divided by\n the mean GHI for each interval; see [1]_ Eq. 11. For intervals containing\n e.g. 10 values, there are 9 slope values in the standard deviation, and the\n mean is calculated using all 10 values. Eq. 11 in [1]_ is ambiguous if\n the mean should be calculated using 9 points (left ends of each slope)\n or all 10 points.\n\n Parameters\n ----------\n data : Series\n samples_per_window : int\n Number of data points in each window\n sample_interval : float\n Time in minutes in each sample interval\n H : 2D ndarray\n Hankel matrix defining the indices for each window.\n\n Returns\n -------\n data_mean : Series\n mean of data in each window\n data_max : Series\n maximum of data in each window\n data_slope_nstd : Series\n standard deviation of difference between data points in each window\n data_slope : Series\n difference between successive data points\n\n References\n ----------\n .. [1] Reno, M.J. and C.W. Hansen, \"Identification of periods of clear\n sky irradiance in time series of GHI measurements\" Renewable Energy,\n v90, p. 520-531, 2016.\n \"\"\"\n\n data_mean = data.values[H].mean(axis=0)\n data_mean = _to_centered_series(data_mean, data.index, samples_per_window)\n data_max = data.values[H].max(axis=0)\n data_max = _to_centered_series(data_max, data.index, samples_per_window)\n # shift to get forward difference, .diff() is backward difference instead\n data_diff = data.diff().shift(-1)\n data_slope = data_diff / sample_interval\n data_slope_nstd = _slope_nstd_windowed(data_slope.values[:-1], data, H,\n samples_per_window, sample_interval)\n data_slope_nstd = data_slope_nstd\n\n return data_mean, data_max, data_slope_nstd, data_slope\n\n\ndef _slope_nstd_windowed(slopes, data, H, samples_per_window, sample_interval):\n with np.errstate(divide='ignore', invalid='ignore'):\n nstd = slopes[H[:-1, ]].std(ddof=1, axis=0) \\\n / data.values[H].mean(axis=0)\n return _to_centered_series(nstd, data.index, samples_per_window)\n\n\ndef _max_diff_windowed(data, H, samples_per_window):\n raw = np.diff(data)\n raw = np.abs(raw[H[:-1, ]]).max(axis=0)\n return _to_centered_series(raw, data.index, samples_per_window)\n\n\ndef _line_length_windowed(data, H, samples_per_window,\n sample_interval):\n raw = np.sqrt(np.diff(data)**2. + sample_interval**2.)\n raw = np.sum(raw[H[:-1, ]], axis=0)\n return _to_centered_series(raw, data.index, samples_per_window)\n\n\ndef _to_centered_series(vals, idx, samples_per_window):\n vals = np.pad(vals, ((0, len(idx) - len(vals)),), mode='constant',\n constant_values=np.nan)\n shift = samples_per_window // 2 # align = 'center' only\n return pd.Series(index=idx, data=vals).shift(shift)\n\n\ndef _get_sample_intervals(times, win_length):\n \"\"\" Calculates time interval and samples per window for Reno-style clear\n sky detection functions\n \"\"\"\n deltas = np.diff(times.values) / np.timedelta64(1, '60s')\n\n # determine if we can proceed\n if times.inferred_freq and len(np.unique(deltas)) == 1:\n sample_interval = times[1] - times[0]\n sample_interval = sample_interval.seconds / 60 # in minutes\n samples_per_window = int(win_length / sample_interval)\n return sample_interval, samples_per_window\n else:\n raise NotImplementedError('algorithm does not yet support unequal '\n 'times. consider resampling your data.')\n\n\ndef _clear_sample_index(clear_windows, samples_per_window, align, H):\n \"\"\"\n Returns indices of clear samples in clear windows\n \"\"\"\n # H contains indices for each window, e.g. indices for the first window\n # are in first column of H.\n # clear_windows contains one boolean for each window and is aligned\n # by 'align', default to center\n # shift clear_windows.index to be aligned left (e.g. first value in the\n # left-most position) to line up with the first column of H.\n\n # commented if/else block for future align='left', 'right' capability\n # if align == 'right':\n # shift = 1 - samples_per_window\n # elif align == 'center':\n # shift = - (samples_per_window // 2)\n # else:\n # shift = 0\n shift = -(samples_per_window // 2)\n idx = clear_windows.shift(shift)\n # drop rows at the end corresponding to windows past the end of data\n idx = idx.drop(clear_windows.index[1 - samples_per_window:])\n idx = idx.astype(bool) # shift changed type to object\n clear_samples = np.unique(H[:, idx])\n return clear_samples\n\n\ndef detect_clearsky(measured, clearsky, times=None, window_length=10,\n mean_diff=75, max_diff=75,\n lower_line_length=-5, upper_line_length=10,\n var_diff=0.005, slope_dev=8, max_iterations=20,\n return_components=False):\n \"\"\"\n Detects clear sky times according to the algorithm developed by Reno\n and Hansen for GHI measurements. The algorithm [1]_ was designed and\n validated for analyzing GHI time series only. Users may attempt to\n apply it to other types of time series data using different filter\n settings, but should be skeptical of the results.\n\n The algorithm detects clear sky times by comparing statistics for a\n measured time series and an expected clearsky time series.\n Statistics are calculated using a sliding time window (e.g., 10\n minutes). An iterative algorithm identifies clear periods, uses the\n identified periods to estimate bias in the clearsky data, scales the\n clearsky data and repeats.\n\n Clear times are identified by meeting 5 criteria. Default values for\n these thresholds are appropriate for 10 minute windows of 1 minute\n GHI data.\n\n Parameters\n ----------\n measured : array or Series\n Time series of measured GHI. [W/m2]\n clearsky : array or Series\n Time series of the expected clearsky GHI. [W/m2]\n times : DatetimeIndex or None, default None.\n Times of measured and clearsky values. If None the index of measured\n will be used.\n window_length : int, default 10\n Length of sliding time window in minutes. Must be greater than 2\n periods.\n mean_diff : float, default 75\n Threshold value for agreement between mean values of measured\n and clearsky in each interval, see Eq. 6 in [1]. [W/m2]\n max_diff : float, default 75\n Threshold value for agreement between maxima of measured and\n clearsky values in each interval, see Eq. 7 in [1]. [W/m2]\n lower_line_length : float, default -5\n Lower limit of line length criterion from Eq. 8 in [1].\n Criterion satisfied when lower_line_length < line length difference\n < upper_line_length.\n upper_line_length : float, default 10\n Upper limit of line length criterion from Eq. 8 in [1].\n var_diff : float, default 0.005\n Threshold value in Hz for the agreement between normalized\n standard deviations of rate of change in irradiance, see Eqs. 9\n through 11 in [1].\n slope_dev : float, default 8\n Threshold value for agreement between the largest magnitude of\n change in successive values, see Eqs. 12 through 14 in [1].\n max_iterations : int, default 20\n Maximum number of times to apply a different scaling factor to\n the clearsky and redetermine clear_samples. Must be 1 or larger.\n return_components : bool, default False\n Controls if additional output should be returned. See below.\n\n Returns\n -------\n clear_samples : array or Series\n Boolean array or Series of whether or not the given time is\n clear. Return type is the same as the input type.\n\n components : OrderedDict, optional\n Dict of arrays of whether or not the given time window is clear\n for each condition. Only provided if return_components is True.\n\n alpha : scalar, optional\n Scaling factor applied to the clearsky_ghi to obtain the\n detected clear_samples. Only provided if return_components is\n True.\n\n Raises\n ------\n ValueError\n If measured is not a Series and times is not provided\n NotImplementedError\n If timestamps are not equally spaced\n\n References\n ----------\n .. [1] Reno, M.J. and C.W. Hansen, \"Identification of periods of clear\n sky irradiance in time series of GHI measurements\" Renewable Energy,\n v90, p. 520-531, 2016.\n\n Notes\n -----\n Initial implementation in MATLAB by Matthew Reno. Modifications for\n computational efficiency by Joshua Patrick and Curtis Martin. Ported\n to Python by Will Holmgren, Tony Lorenzo, and Cliff Hansen.\n\n Differences from MATLAB version:\n\n * no support for unequal times\n * automatically determines sample_interval\n * requires a reference clear sky series instead calculating one\n from a user supplied location and UTCoffset\n * parameters are controllable via keyword arguments\n * option to return individual test components and clearsky scaling\n parameter\n * uses centered windows (Matlab function uses left-aligned windows)\n \"\"\"\n\n if times is None:\n try:\n times = measured.index\n except AttributeError:\n raise ValueError(\"times is required when measured is not a Series\")\n\n # be polite about returning the same type as was input\n ispandas = isinstance(measured, pd.Series)\n\n # for internal use, need a Series\n if not ispandas:\n meas = pd.Series(measured, index=times)\n else:\n meas = measured\n\n if not isinstance(clearsky, pd.Series):\n clear = pd.Series(clearsky, index=times)\n else:\n clear = clearsky\n\n sample_interval, samples_per_window = _get_sample_intervals(times,\n window_length)\n\n # generate matrix of integers for creating windows with indexing\n H = hankel(np.arange(samples_per_window),\n np.arange(samples_per_window-1, len(times)))\n\n # calculate measurement statistics\n meas_mean, meas_max, meas_slope_nstd, meas_slope = _calc_stats(\n meas, samples_per_window, sample_interval, H)\n meas_line_length = _line_length_windowed(\n meas, H, samples_per_window, sample_interval)\n\n # calculate clear sky statistics\n clear_mean, clear_max, _, clear_slope = _calc_stats(\n clear, samples_per_window, sample_interval, H)\n\n # find a scaling factor for the clear sky time series that minimizes the\n # RMSE between the clear times identified in the measured data and the\n # scaled clear sky time series. Optimization to determine the scaling\n # factor considers all identified clear times, which is different from [1]\n # where the scaling factor was determined from clear times on days with\n # at least 50% of the day being identified as clear.\n alpha = 1\n for iteration in range(max_iterations):\n scaled_clear = alpha * clear\n clear_line_length = _line_length_windowed(\n scaled_clear, H, samples_per_window, sample_interval)\n\n line_diff = meas_line_length - clear_line_length\n slope_max_diff = _max_diff_windowed(\n meas - scaled_clear, H, samples_per_window)\n # evaluate comparison criteria\n c1 = np.abs(meas_mean - alpha*clear_mean) < mean_diff\n c2 = np.abs(meas_max - alpha*clear_max) < max_diff\n c3 = (line_diff > lower_line_length) & (line_diff < upper_line_length)\n c4 = meas_slope_nstd < var_diff\n c5 = slope_max_diff < slope_dev\n c6 = (clear_mean != 0) & ~np.isnan(clear_mean)\n clear_windows = c1 & c2 & c3 & c4 & c5 & c6\n\n # create array to return\n clear_samples = np.full_like(meas, False, dtype='bool')\n # find the samples contained in any window classified as clear\n idx = _clear_sample_index(clear_windows, samples_per_window, 'center',\n H)\n clear_samples[idx] = True\n\n # find a new alpha\n previous_alpha = alpha\n clear_meas = meas[clear_samples]\n clear_clear = clear[clear_samples]\n\n def rmse(alpha):\n return np.sqrt(np.mean((clear_meas - alpha*clear_clear)**2))\n\n alpha = minimize_scalar(rmse).x\n if round(alpha*10000) == round(previous_alpha*10000):\n break\n else:\n import warnings\n warnings.warn('rescaling failed to converge after %s iterations'\n % max_iterations, RuntimeWarning)\n\n # be polite about returning the same type as was input\n if ispandas:\n clear_samples = pd.Series(clear_samples, index=times)\n\n if return_components:\n components = OrderedDict()\n components['mean_diff_flag'] = c1\n components['max_diff_flag'] = c2\n components['line_length_flag'] = c3\n components['slope_nstd_flag'] = c4\n components['slope_max_flag'] = c5\n components['mean_nan_flag'] = c6\n components['windows'] = clear_windows\n\n components['mean_diff'] = np.abs(meas_mean - alpha * clear_mean)\n components['max_diff'] = np.abs(meas_max - alpha * clear_max)\n components['line_length'] = meas_line_length - clear_line_length\n components['slope_nstd'] = meas_slope_nstd\n components['slope_max'] = slope_max_diff\n\n return clear_samples, components, alpha\n else:\n return clear_samples\n\n\ndef bird(zenith, airmass_relative, aod380, aod500, precipitable_water,\n ozone=0.3, pressure=101325., dni_extra=1364., asymmetry=0.85,\n albedo=0.2):\n \"\"\"\n Bird Simple Clear Sky Broadband Solar Radiation Model\n\n Based on NREL Excel implementation by Daryl R. Myers [1, 2].\n\n Bird and Hulstrom define the zenith as the \"angle between a line to\n the sun and the local zenith\". There is no distinction in the paper\n between solar zenith and apparent (or refracted) zenith, but the\n relative airmass is defined using the Kasten 1966 expression, which\n requires apparent zenith. Although the formulation for calculated\n zenith is never explicitly defined in the report, since the purpose\n was to compare existing clear sky models with \"rigorous radiative\n transfer models\" (RTM) it is possible that apparent zenith was\n obtained as output from the RTM. However, the implentation presented\n in PVLIB is tested against the NREL Excel implementation by Daryl\n Myers which uses an analytical expression for solar zenith instead\n of apparent zenith.\n\n Parameters\n ----------\n zenith : numeric\n Solar or apparent zenith angle in degrees - see note above\n airmass_relative : numeric\n Relative airmass\n aod380 : numeric\n Aerosol optical depth [cm] measured at 380[nm]\n aod500 : numeric\n Aerosol optical depth [cm] measured at 500[nm]\n precipitable_water : numeric\n Precipitable water [cm]\n ozone : numeric\n Atmospheric ozone [cm], defaults to 0.3[cm]\n pressure : numeric\n Ambient pressure [Pa], defaults to 101325[Pa]\n dni_extra : numeric\n Extraterrestrial radiation [W/m^2], defaults to 1364[W/m^2]\n asymmetry : numeric\n Asymmetry factor, defaults to 0.85\n albedo : numeric\n Albedo, defaults to 0.2\n\n Returns\n -------\n clearsky : DataFrame (if Series input) or OrderedDict of arrays\n DataFrame/OrderedDict contains the columns/keys\n ``'dhi', 'dni', 'ghi', 'direct_horizontal'`` in [W/m^2].\n\n See also\n --------\n pvlib.atmosphere.bird_hulstrom80_aod_bb\n pvlib.atmosphere.get_relative_airmass\n\n References\n ----------\n .. [1] R. E. Bird and R. L Hulstrom, \"A Simplified Clear Sky model for\n Direct and Diffuse Insolation on Horizontal Surfaces\" SERI Technical\n Report SERI/TR-642-761, Feb 1981. Solar Energy Research Institute,\n Golden, CO.\n\n .. [2] Daryl R. Myers, \"Solar Radiation: Practical Modeling for Renewable\n Energy Applications\", pp. 46-51 CRC Press (2013)\n\n .. [3] `NREL Bird Clear Sky Model <http://rredc.nrel.gov/solar/models/\n clearsky/>`_\n\n .. [4] `SERI/TR-642-761 <http://rredc.nrel.gov/solar/pubs/pdfs/\n tr-642-761.pdf>`_\n\n .. [5] `Error Reports <http://rredc.nrel.gov/solar/models/clearsky/\n error_reports.html>`_\n \"\"\"\n etr = dni_extra # extraradiation\n ze_rad = np.deg2rad(zenith) # zenith in radians\n airmass = airmass_relative\n # Bird clear sky model\n am_press = atmosphere.get_absolute_airmass(airmass, pressure)\n t_rayleigh = (\n np.exp(-0.0903 * am_press ** 0.84 * (\n 1.0 + am_press - am_press ** 1.01\n ))\n )\n am_o3 = ozone*airmass\n t_ozone = (\n 1.0 - 0.1611 * am_o3 * (1.0 + 139.48 * am_o3) ** -0.3034 -\n 0.002715 * am_o3 / (1.0 + 0.044 * am_o3 + 0.0003 * am_o3 ** 2.0)\n )\n t_gases = np.exp(-0.0127 * am_press ** 0.26)\n am_h2o = airmass * precipitable_water\n t_water = (\n 1.0 - 2.4959 * am_h2o / (\n (1.0 + 79.034 * am_h2o) ** 0.6828 + 6.385 * am_h2o\n )\n )\n bird_huldstrom = atmosphere.bird_hulstrom80_aod_bb(aod380, aod500)\n t_aerosol = np.exp(\n -(bird_huldstrom ** 0.873) *\n (1.0 + bird_huldstrom - bird_huldstrom ** 0.7088) * airmass ** 0.9108\n )\n taa = 1.0 - 0.1 * (1.0 - airmass + airmass ** 1.06) * (1.0 - t_aerosol)\n rs = 0.0685 + (1.0 - asymmetry) * (1.0 - t_aerosol / taa)\n id_ = 0.9662 * etr * t_aerosol * t_water * t_gases * t_ozone * t_rayleigh\n ze_cos = np.where(zenith < 90, np.cos(ze_rad), 0.0)\n id_nh = id_ * ze_cos\n ias = (\n etr * ze_cos * 0.79 * t_ozone * t_gases * t_water * taa *\n (0.5 * (1.0 - t_rayleigh) + asymmetry * (1.0 - (t_aerosol / taa))) / (\n 1.0 - airmass + airmass ** 1.02\n )\n )\n gh = (id_nh + ias) / (1.0 - albedo * rs)\n diffuse_horiz = gh - id_nh\n # TODO: be DRY, use decorator to wrap methods that need to return either\n # OrderedDict or DataFrame instead of repeating this boilerplate code\n irrads = OrderedDict()\n irrads['direct_horizontal'] = id_nh\n irrads['ghi'] = gh\n irrads['dni'] = id_\n irrads['dhi'] = diffuse_horiz\n if isinstance(irrads['dni'], pd.Series):\n irrads = pd.DataFrame.from_dict(irrads)\n return irrads\n"
]
| [
[
"numpy.minimum",
"numpy.exp",
"scipy.optimize.minimize_scalar",
"numpy.mean",
"numpy.where",
"numpy.radians",
"numpy.cos",
"numpy.cumsum",
"numpy.deg2rad",
"numpy.full_like",
"numpy.concatenate",
"numpy.zeros_like",
"numpy.log",
"pandas.DataFrame",
"numpy.fmax",
"numpy.interp",
"numpy.arange",
"numpy.around",
"numpy.mod",
"numpy.array",
"numpy.diff",
"numpy.isscalar",
"numpy.timedelta64",
"numpy.isnan",
"numpy.errstate",
"pandas.DataFrame.from_dict",
"numpy.sum",
"numpy.abs",
"pandas.Series",
"numpy.unique",
"numpy.maximum"
]
]
|
dodobill/pytorch_tutorial | [
"33b76aa47e2fdba072b049159c0f56512562c203"
]
| [
"classifier/second.py"
]
| [
"'''\ntraining a classifier\n1. 加载并规范化测试数据\n2. 定义一个卷积网络\n3. 在训练数据上训练神经网络\n4. 在测试数据山测试网络\n'''\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\n\ntransform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])#input[channel] = (input[channel] - mean[channel]) / std[channel]\n\ntrainset = torchvision.datasets.CIFAR10(root='./data', train=True,\n download=True, transform=transform)\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=4,\n shuffle=True, num_workers=2)\n\ntestset = torchvision.datasets.CIFAR10(root='./data', train=False,\n download=True, transform=transform)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=4,\n shuffle=False, num_workers=2)\n\nclasses = ('plane', 'car', 'bird', 'cat',\n 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# functions to show an image\n\n\ndef imshow(img):\n img = img / 2 + 0.5 # unnormalize\n npimg = img.numpy()\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n plt.show()\n\n\n# get some random training images\ndataiter = iter(trainloader)\nimages, labels = dataiter.next()\n\n# show images\nimshow(torchvision.utils.make_grid(images))\n# print labels\nprint(' '.join('%5s' % classes[labels[j]] for j in range(4)))\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(3, 6, 5)\n self.pool = nn.MaxPool2d(2, 2)\n self.conv2 = nn.Conv2d(6, 16, 5)\n self.fc1 = nn.Linear(16 * 5 * 5, 120)\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 10)\n\n def forward(self, x):\n x = self.pool(F.relu(self.conv1(x)))\n x = self.pool(F.relu(self.conv2(x)))\n x = x.view(-1, 16 * 5 * 5)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n\n\nnet = Net()\n\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)\n\nfor epoch in range(2): # loop over the dataset multiple times\n\n running_loss = 0.0\n for i, data in enumerate(trainloader, 0):\n # get the inputs; data is a list of [inputs, labels]\n inputs, labels = data\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n # print statistics\n running_loss += loss.item()\n if i % 2000 == 1999: # print every 2000 mini-batches\n print('[%d, %5d] loss: %.3f' %\n (epoch + 1, i + 1, running_loss / 2000))\n running_loss = 0.0\n\nprint('Finished Training')\n\nPATH = './cifar_net.pth'#save model\ntorch.save(net.state_dict(),PATH)\n\nnet = Net()#load model\nnet.load_state_dict(torch.load(PATH))\n\n"
]
| [
[
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.Conv2d",
"numpy.transpose",
"torch.utils.data.DataLoader",
"torch.load",
"matplotlib.pyplot.show",
"torch.nn.CrossEntropyLoss"
]
]
|
d-sot/hdmf | [
"6df64fe9f2f8163d1c688561c4a5a7ae96ae7284"
]
| [
"tests/unit/test_io_hdf5_h5tools.py"
]
| [
"import os\nimport unittest\nimport tempfile\nimport warnings\nimport numpy as np\n\nfrom hdmf.utils import docval, getargs\nfrom hdmf.data_utils import DataChunkIterator, InvalidDataIOError\nfrom hdmf.backends.hdf5.h5tools import HDF5IO, ROOT_NAME\nfrom hdmf.backends.hdf5 import H5DataIO\nfrom hdmf.backends.io import UnsupportedOperation\nfrom hdmf.build import GroupBuilder, DatasetBuilder, BuildManager, TypeMap, ObjectMapper\nfrom hdmf.spec.namespace import NamespaceCatalog\nfrom hdmf.spec.spec import AttributeSpec, DatasetSpec, GroupSpec, ZERO_OR_MANY, ONE_OR_MANY\nfrom hdmf.spec.namespace import SpecNamespace\nfrom hdmf.spec.catalog import SpecCatalog\nfrom hdmf.container import Container\nfrom hdmf.testing import TestCase\n\nfrom h5py import SoftLink, HardLink, ExternalLink, File\nfrom h5py import filters as h5py_filters\n\nfrom tests.unit.utils import Foo, FooBucket, CORE_NAMESPACE\n\n\nclass FooFile(Container):\n\n @docval({'name': 'buckets', 'type': list, 'doc': 'the FooBuckets in this file', 'default': list()})\n def __init__(self, **kwargs):\n buckets = getargs('buckets', kwargs)\n super().__init__(name=ROOT_NAME) # name is not used - FooFile should be the root container\n self.__buckets = buckets\n for f in self.__buckets:\n f.parent = self\n\n def __eq__(self, other):\n return set(self.buckets) == set(other.buckets)\n\n def __str__(self):\n foo_str = \"[\" + \",\".join(str(f) for f in self.buckets) + \"]\"\n return 'buckets=%s' % foo_str\n\n @property\n def buckets(self):\n return self.__buckets\n\n\ndef get_temp_filepath():\n # On Windows, h5py cannot truncate an open file in write mode.\n # The temp file will be closed before h5py truncates it and will be removed during the tearDown step.\n temp_file = tempfile.NamedTemporaryFile()\n temp_file.close()\n return temp_file.name\n\n\nclass H5IOTest(TestCase):\n \"\"\"Tests for h5tools IO tools\"\"\"\n\n def setUp(self):\n self.path = get_temp_filepath()\n self.io = HDF5IO(self.path, mode='a')\n self.f = self.io._file\n\n def tearDown(self):\n self.io.close()\n os.remove(self.path)\n\n ##########################################\n # __chunked_iter_fill__(...) tests\n ##########################################\n def test__chunked_iter_fill(self):\n \"\"\"Matrix test of HDF5IO.__chunked_iter_fill__ using a DataChunkIterator with different parameters\"\"\"\n data_opts = {'iterator': range(10),\n 'numpy': np.arange(30).reshape(5, 2, 3),\n 'list': np.arange(30).reshape(5, 2, 3).tolist(),\n 'sparselist1': [1, 2, 3, None, None, None, None, 8, 9, 10],\n 'sparselist2': [None, None, 3],\n 'sparselist3': [1, 2, 3, None, None], # note: cannot process None in ndarray\n 'nanlist': [[[1, 2, 3, np.nan, np.nan, 6], [np.nan, np.nan, 3, 4, np.nan, np.nan]],\n [[10, 20, 30, 40, np.nan, np.nan], [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]]]}\n buffer_size_opts = [1, 2, 3, 4] # data is divisible by some of these, some not\n for data_type, data in data_opts.items():\n iter_axis_opts = [0, 1, 2]\n if data_type == 'iterator' or data_type.startswith('sparselist'):\n iter_axis_opts = [0] # only one dimension\n\n for iter_axis in iter_axis_opts:\n for buffer_size in buffer_size_opts:\n with self.subTest(data_type=data_type, iter_axis=iter_axis, buffer_size=buffer_size):\n with warnings.catch_warnings(record=True) as w:\n dci = DataChunkIterator(data=data, buffer_size=buffer_size, iter_axis=iter_axis)\n if len(w) <= 1:\n # init may throw UserWarning for iterating over not-first dim of a list. ignore here\n pass\n\n dset_name = '%s, %d, %d' % (data_type, iter_axis, buffer_size)\n my_dset = HDF5IO.__chunked_iter_fill__(self.f, dset_name, dci)\n\n if data_type == 'iterator':\n self.assertListEqual(my_dset[:].tolist(), list(data))\n elif data_type == 'numpy':\n self.assertTrue(np.all(my_dset[:] == data))\n self.assertTupleEqual(my_dset.shape, data.shape)\n elif data_type == 'list' or data_type == 'nanlist':\n data_np = np.array(data)\n np.testing.assert_array_equal(my_dset[:], data_np)\n self.assertTupleEqual(my_dset.shape, data_np.shape)\n elif data_type.startswith('sparselist'):\n # replace None in original data with default hdf5 fillvalue 0\n data_zeros = np.where(np.equal(np.array(data), None), 0, data)\n np.testing.assert_array_equal(my_dset[:], data_zeros)\n self.assertTupleEqual(my_dset.shape, data_zeros.shape)\n\n ##########################################\n # write_dataset tests: scalars\n ##########################################\n def test_write_dataset_scalar(self):\n a = 10\n self.io.write_dataset(self.f, DatasetBuilder('test_dataset', a, attributes={}))\n dset = self.f['test_dataset']\n self.assertTupleEqual(dset.shape, ())\n self.assertEqual(dset[()], a)\n\n def test_write_dataset_string(self):\n a = 'test string'\n self.io.write_dataset(self.f, DatasetBuilder('test_dataset', a, attributes={}))\n dset = self.f['test_dataset']\n self.assertTupleEqual(dset.shape, ())\n # self.assertEqual(dset[()].decode('utf-8'), a)\n self.assertEqual(dset[()], a)\n\n ##########################################\n # write_dataset tests: lists\n ##########################################\n def test_write_dataset_list(self):\n a = np.arange(30).reshape(5, 2, 3)\n self.io.write_dataset(self.f, DatasetBuilder('test_dataset', a.tolist(), attributes={}))\n dset = self.f['test_dataset']\n self.assertTrue(np.all(dset[:] == a))\n\n def test_write_dataset_list_compress_gzip(self):\n a = H5DataIO(np.arange(30).reshape(5, 2, 3),\n compression='gzip',\n compression_opts=5,\n shuffle=True,\n fletcher32=True)\n self.io.write_dataset(self.f, DatasetBuilder('test_dataset', a, attributes={}))\n dset = self.f['test_dataset']\n self.assertTrue(np.all(dset[:] == a.data))\n self.assertEqual(dset.compression, 'gzip')\n self.assertEqual(dset.compression_opts, 5)\n self.assertEqual(dset.shuffle, True)\n self.assertEqual(dset.fletcher32, True)\n\n @unittest.skipIf(\"lzf\" not in h5py_filters.encode,\n \"LZF compression not supported in this h5py library install\")\n def test_write_dataset_list_compress_lzf(self):\n warn_msg = (\"lzf compression may not be available on all installations of HDF5. Use of gzip is \"\n \"recommended to ensure portability of the generated HDF5 files.\")\n with self.assertWarnsWith(UserWarning, warn_msg):\n a = H5DataIO(np.arange(30).reshape(5, 2, 3),\n compression='lzf',\n shuffle=True,\n fletcher32=True)\n self.io.write_dataset(self.f, DatasetBuilder('test_dataset', a, attributes={}))\n dset = self.f['test_dataset']\n self.assertTrue(np.all(dset[:] == a.data))\n self.assertEqual(dset.compression, 'lzf')\n self.assertEqual(dset.shuffle, True)\n self.assertEqual(dset.fletcher32, True)\n\n @unittest.skipIf(\"szip\" not in h5py_filters.encode,\n \"SZIP compression not supported in this h5py library install\")\n def test_write_dataset_list_compress_szip(self):\n warn_msg = (\"szip compression may not be available on all installations of HDF5. Use of gzip is \"\n \"recommended to ensure portability of the generated HDF5 files.\")\n with self.assertWarnsWith(UserWarning, warn_msg):\n a = H5DataIO(np.arange(30).reshape(5, 2, 3),\n compression='szip',\n compression_opts=('ec', 16),\n shuffle=True,\n fletcher32=True)\n self.io.write_dataset(self.f, DatasetBuilder('test_dataset', a, attributes={}))\n dset = self.f['test_dataset']\n self.assertTrue(np.all(dset[:] == a.data))\n self.assertEqual(dset.compression, 'szip')\n self.assertEqual(dset.shuffle, True)\n self.assertEqual(dset.fletcher32, True)\n\n def test_write_dataset_list_compress_available_int_filters(self):\n a = H5DataIO(np.arange(30).reshape(5, 2, 3),\n compression=1,\n shuffle=True,\n fletcher32=True,\n allow_plugin_filters=True)\n self.io.write_dataset(self.f, DatasetBuilder('test_dataset', a, attributes={}))\n dset = self.f['test_dataset']\n self.assertTrue(np.all(dset[:] == a.data))\n self.assertEqual(dset.compression, 'gzip')\n self.assertEqual(dset.shuffle, True)\n self.assertEqual(dset.fletcher32, True)\n\n def test_write_dataset_list_enable_default_compress(self):\n a = H5DataIO(np.arange(30).reshape(5, 2, 3),\n compression=True)\n self.assertEqual(a.io_settings['compression'], 'gzip')\n self.io.write_dataset(self.f, DatasetBuilder('test_dataset', a, attributes={}))\n dset = self.f['test_dataset']\n self.assertTrue(np.all(dset[:] == a.data))\n self.assertEqual(dset.compression, 'gzip')\n\n def test_write_dataset_list_disable_default_compress(self):\n with warnings.catch_warnings(record=True) as w:\n a = H5DataIO(np.arange(30).reshape(5, 2, 3),\n compression=False,\n compression_opts=5)\n self.assertEqual(len(w), 1) # We expect a warning that compression options are being ignored\n self.assertFalse('compression_ops' in a.io_settings)\n self.assertFalse('compression' in a.io_settings)\n\n self.io.write_dataset(self.f, DatasetBuilder('test_dataset', a, attributes={}))\n dset = self.f['test_dataset']\n self.assertTrue(np.all(dset[:] == a.data))\n self.assertEqual(dset.compression, None)\n\n def test_write_dataset_list_chunked(self):\n a = H5DataIO(np.arange(30).reshape(5, 2, 3),\n chunks=(1, 1, 3))\n self.io.write_dataset(self.f, DatasetBuilder('test_dataset', a, attributes={}))\n dset = self.f['test_dataset']\n self.assertTrue(np.all(dset[:] == a.data))\n self.assertEqual(dset.chunks, (1, 1, 3))\n\n def test_write_dataset_list_fillvalue(self):\n a = H5DataIO(np.arange(20).reshape(5, 4), fillvalue=-1)\n self.io.write_dataset(self.f, DatasetBuilder('test_dataset', a, attributes={}))\n dset = self.f['test_dataset']\n self.assertTrue(np.all(dset[:] == a.data))\n self.assertEqual(dset.fillvalue, -1)\n\n ##########################################\n # write_dataset tests: tables\n ##########################################\n def test_write_table(self):\n cmpd_dt = np.dtype([('a', np.int32), ('b', np.float64)])\n data = np.zeros(10, dtype=cmpd_dt)\n data['a'][1] = 101\n data['b'][1] = 0.1\n dt = [{'name': 'a', 'dtype': 'int32', 'doc': 'a column'},\n {'name': 'b', 'dtype': 'float64', 'doc': 'b column'}]\n self.io.write_dataset(self.f, DatasetBuilder('test_dataset', data, attributes={}, dtype=dt))\n dset = self.f['test_dataset']\n self.assertEqual(dset['a'].tolist(), data['a'].tolist())\n self.assertEqual(dset['b'].tolist(), data['b'].tolist())\n\n def test_write_table_nested(self):\n b_cmpd_dt = np.dtype([('c', np.int32), ('d', np.float64)])\n cmpd_dt = np.dtype([('a', np.int32), ('b', b_cmpd_dt)])\n data = np.zeros(10, dtype=cmpd_dt)\n data['a'][1] = 101\n data['b']['c'] = 202\n data['b']['d'] = 10.1\n b_dt = [{'name': 'c', 'dtype': 'int32', 'doc': 'c column'},\n {'name': 'd', 'dtype': 'float64', 'doc': 'd column'}]\n dt = [{'name': 'a', 'dtype': 'int32', 'doc': 'a column'},\n {'name': 'b', 'dtype': b_dt, 'doc': 'b column'}]\n self.io.write_dataset(self.f, DatasetBuilder('test_dataset', data, attributes={}, dtype=dt))\n dset = self.f['test_dataset']\n self.assertEqual(dset['a'].tolist(), data['a'].tolist())\n self.assertEqual(dset['b'].tolist(), data['b'].tolist())\n\n ##########################################\n # write_dataset tests: Iterable\n ##########################################\n def test_write_dataset_iterable(self):\n self.io.write_dataset(self.f, DatasetBuilder('test_dataset', range(10), attributes={}))\n dset = self.f['test_dataset']\n self.assertListEqual(dset[:].tolist(), list(range(10)))\n\n def test_write_dataset_iterable_multidimensional_array(self):\n a = np.arange(30).reshape(5, 2, 3)\n aiter = iter(a)\n daiter = DataChunkIterator.from_iterable(aiter, buffer_size=2)\n self.io.write_dataset(self.f, DatasetBuilder('test_dataset', daiter, attributes={}))\n dset = self.f['test_dataset']\n self.assertListEqual(dset[:].tolist(), a.tolist())\n\n def test_write_multi_dci_oaat(self):\n \"\"\"\n Test writing multiple DataChunkIterators, one at a time\n \"\"\"\n a = np.arange(30).reshape(5, 2, 3)\n b = np.arange(30, 60).reshape(5, 2, 3)\n aiter = iter(a)\n biter = iter(b)\n daiter1 = DataChunkIterator.from_iterable(aiter, buffer_size=2)\n daiter2 = DataChunkIterator.from_iterable(biter, buffer_size=2)\n builder = GroupBuilder(\"root\")\n builder.add_dataset('test_dataset1', daiter1, attributes={})\n builder.add_dataset('test_dataset2', daiter2, attributes={})\n self.io.write_builder(builder)\n dset1 = self.f['test_dataset1']\n self.assertListEqual(dset1[:].tolist(), a.tolist())\n dset2 = self.f['test_dataset2']\n self.assertListEqual(dset2[:].tolist(), b.tolist())\n\n def test_write_multi_dci_conc(self):\n \"\"\"\n Test writing multiple DataChunkIterators, concurrently\n \"\"\"\n a = np.arange(30).reshape(5, 2, 3)\n b = np.arange(30, 60).reshape(5, 2, 3)\n aiter = iter(a)\n biter = iter(b)\n daiter1 = DataChunkIterator.from_iterable(aiter, buffer_size=2)\n daiter2 = DataChunkIterator.from_iterable(biter, buffer_size=2)\n builder = GroupBuilder(\"root\")\n builder.add_dataset('test_dataset1', daiter1, attributes={})\n builder.add_dataset('test_dataset2', daiter2, attributes={})\n self.io.write_builder(builder)\n dset1 = self.f['test_dataset1']\n self.assertListEqual(dset1[:].tolist(), a.tolist())\n dset2 = self.f['test_dataset2']\n self.assertListEqual(dset2[:].tolist(), b.tolist())\n\n def test_write_dataset_iterable_multidimensional_array_compression(self):\n a = np.arange(30).reshape(5, 2, 3)\n aiter = iter(a)\n daiter = DataChunkIterator.from_iterable(aiter, buffer_size=2)\n wrapped_daiter = H5DataIO(data=daiter,\n compression='gzip',\n compression_opts=5,\n shuffle=True,\n fletcher32=True)\n self.io.write_dataset(self.f, DatasetBuilder('test_dataset', wrapped_daiter, attributes={}))\n dset = self.f['test_dataset']\n self.assertEqual(dset.shape, a.shape)\n self.assertListEqual(dset[:].tolist(), a.tolist())\n self.assertEqual(dset.compression, 'gzip')\n self.assertEqual(dset.compression_opts, 5)\n self.assertEqual(dset.shuffle, True)\n self.assertEqual(dset.fletcher32, True)\n\n #############################################\n # write_dataset tests: data chunk iterator\n #############################################\n def test_write_dataset_data_chunk_iterator(self):\n dci = DataChunkIterator(data=np.arange(10), buffer_size=2)\n self.io.write_dataset(self.f, DatasetBuilder('test_dataset', dci, attributes={}, dtype=dci.dtype))\n dset = self.f['test_dataset']\n self.assertListEqual(dset[:].tolist(), list(range(10)))\n self.assertEqual(dset[:].dtype, dci.dtype)\n\n def test_write_dataset_data_chunk_iterator_with_compression(self):\n dci = DataChunkIterator(data=np.arange(10), buffer_size=2)\n wrapped_dci = H5DataIO(data=dci,\n compression='gzip',\n compression_opts=5,\n shuffle=True,\n fletcher32=True,\n chunks=(2,))\n self.io.write_dataset(self.f, DatasetBuilder('test_dataset', wrapped_dci, attributes={}))\n dset = self.f['test_dataset']\n self.assertListEqual(dset[:].tolist(), list(range(10)))\n self.assertEqual(dset.compression, 'gzip')\n self.assertEqual(dset.compression_opts, 5)\n self.assertEqual(dset.shuffle, True)\n self.assertEqual(dset.fletcher32, True)\n self.assertEqual(dset.chunks, (2,))\n\n def test_pass_through_of_recommended_chunks(self):\n\n class DC(DataChunkIterator):\n def recommended_chunk_shape(self):\n return (5, 1, 1)\n\n dci = DC(data=np.arange(30).reshape(5, 2, 3))\n wrapped_dci = H5DataIO(data=dci,\n compression='gzip',\n compression_opts=5,\n shuffle=True,\n fletcher32=True)\n self.io.write_dataset(self.f, DatasetBuilder('test_dataset', wrapped_dci, attributes={}))\n dset = self.f['test_dataset']\n self.assertEqual(dset.chunks, (5, 1, 1))\n self.assertEqual(dset.compression, 'gzip')\n self.assertEqual(dset.compression_opts, 5)\n self.assertEqual(dset.shuffle, True)\n self.assertEqual(dset.fletcher32, True)\n\n def test_dci_h5dataset(self):\n data = np.arange(30).reshape(5, 2, 3)\n dci1 = DataChunkIterator(data=data, buffer_size=1, iter_axis=0)\n HDF5IO.__chunked_iter_fill__(self.f, 'test_dataset', dci1)\n dset = self.f['test_dataset']\n dci2 = DataChunkIterator(data=dset, buffer_size=2, iter_axis=2)\n\n chunk = dci2.next()\n self.assertTupleEqual(chunk.shape, (5, 2, 2))\n chunk = dci2.next()\n self.assertTupleEqual(chunk.shape, (5, 2, 1))\n\n # TODO test chunk data, shape, selection\n\n self.assertTupleEqual(dci2.recommended_data_shape(), data.shape)\n self.assertIsNone(dci2.recommended_chunk_shape())\n\n def test_dci_h5dataset_sparse_matched(self):\n data = [1, 2, 3, None, None, None, None, 8, 9, 10]\n dci1 = DataChunkIterator(data=data, buffer_size=3)\n HDF5IO.__chunked_iter_fill__(self.f, 'test_dataset', dci1)\n dset = self.f['test_dataset']\n dci2 = DataChunkIterator(data=dset, buffer_size=2)\n # dataset is read such that Nones in original data were not written, but are read as 0s\n\n self.assertTupleEqual(dci2.maxshape, (10,))\n self.assertEqual(dci2.dtype, np.dtype(int))\n count = 0\n for chunk in dci2:\n self.assertEqual(len(chunk.selection), 1)\n if count == 0:\n self.assertListEqual(chunk.data.tolist(), [1, 2])\n self.assertEqual(chunk.selection[0], slice(0, 2))\n elif count == 1:\n self.assertListEqual(chunk.data.tolist(), [3, 0])\n self.assertEqual(chunk.selection[0], slice(2, 4))\n elif count == 2:\n self.assertListEqual(chunk.data.tolist(), [0, 0])\n self.assertEqual(chunk.selection[0], slice(4, 6))\n elif count == 3:\n self.assertListEqual(chunk.data.tolist(), [0, 8])\n self.assertEqual(chunk.selection[0], slice(6, 8))\n elif count == 4:\n self.assertListEqual(chunk.data.tolist(), [9, 10])\n self.assertEqual(chunk.selection[0], slice(8, 10))\n count += 1\n\n self.assertEqual(count, 5)\n self.assertTupleEqual(dci2.recommended_data_shape(), (10,))\n self.assertIsNone(dci2.recommended_chunk_shape())\n\n def test_dci_h5dataset_sparse_unmatched(self):\n data = [1, 2, 3, None, None, None, None, 8, 9, 10]\n dci1 = DataChunkIterator(data=data, buffer_size=3)\n HDF5IO.__chunked_iter_fill__(self.f, 'test_dataset', dci1)\n dset = self.f['test_dataset']\n dci2 = DataChunkIterator(data=dset, buffer_size=4)\n # dataset is read such that Nones in original data were not written, but are read as 0s\n\n self.assertTupleEqual(dci2.maxshape, (10,))\n self.assertEqual(dci2.dtype, np.dtype(int))\n count = 0\n for chunk in dci2:\n self.assertEqual(len(chunk.selection), 1)\n if count == 0:\n self.assertListEqual(chunk.data.tolist(), [1, 2, 3, 0])\n self.assertEqual(chunk.selection[0], slice(0, 4))\n elif count == 1:\n self.assertListEqual(chunk.data.tolist(), [0, 0, 0, 8])\n self.assertEqual(chunk.selection[0], slice(4, 8))\n elif count == 2:\n self.assertListEqual(chunk.data.tolist(), [9, 10])\n self.assertEqual(chunk.selection[0], slice(8, 10))\n count += 1\n\n self.assertEqual(count, 3)\n self.assertTupleEqual(dci2.recommended_data_shape(), (10,))\n self.assertIsNone(dci2.recommended_chunk_shape())\n\n def test_dci_h5dataset_scalar(self):\n data = [1]\n dci1 = DataChunkIterator(data=data, buffer_size=3)\n HDF5IO.__chunked_iter_fill__(self.f, 'test_dataset', dci1)\n dset = self.f['test_dataset']\n dci2 = DataChunkIterator(data=dset, buffer_size=4)\n # dataset is read such that Nones in original data were not written, but are read as 0s\n\n self.assertTupleEqual(dci2.maxshape, (1,))\n self.assertEqual(dci2.dtype, np.dtype(int))\n count = 0\n for chunk in dci2:\n self.assertEqual(len(chunk.selection), 1)\n if count == 0:\n self.assertListEqual(chunk.data.tolist(), [1])\n self.assertEqual(chunk.selection[0], slice(0, 1))\n count += 1\n\n self.assertEqual(count, 1)\n self.assertTupleEqual(dci2.recommended_data_shape(), (1,))\n self.assertIsNone(dci2.recommended_chunk_shape())\n\n #############################################\n # H5DataIO general\n #############################################\n def test_warning_on_non_gzip_compression(self):\n # Make sure no warning is issued when using gzip\n with warnings.catch_warnings(record=True) as w:\n dset = H5DataIO(np.arange(30),\n compression='gzip')\n self.assertEqual(len(w), 0)\n self.assertEqual(dset.io_settings['compression'], 'gzip')\n # Make sure a warning is issued when using szip (even if installed)\n if \"szip\" in h5py_filters.encode:\n with warnings.catch_warnings(record=True) as w:\n dset = H5DataIO(np.arange(30),\n compression='szip',\n compression_opts=('ec', 16))\n self.assertEqual(len(w), 1)\n self.assertEqual(dset.io_settings['compression'], 'szip')\n else:\n with self.assertRaises(ValueError):\n H5DataIO(np.arange(30), compression='szip', compression_opts=('ec', 16))\n # Make sure a warning is issued when using lzf compression\n with warnings.catch_warnings(record=True) as w:\n dset = H5DataIO(np.arange(30),\n compression='lzf')\n self.assertEqual(len(w), 1)\n self.assertEqual(dset.io_settings['compression'], 'lzf')\n\n def test_error_on_unsupported_compression_filter(self):\n # Make sure gzip does not raise an error\n try:\n H5DataIO(np.arange(30), compression='gzip', compression_opts=5)\n except ValueError:\n self.fail(\"Using gzip compression raised a ValueError when it should not\")\n # Make sure szip raises an error if not installed (or does not raise an error if installed)\n warn_msg = (\"szip compression may not be available on all installations of HDF5. Use of gzip is \"\n \"recommended to ensure portability of the generated HDF5 files.\")\n if \"szip\" not in h5py_filters.encode:\n with self.assertRaises(ValueError):\n H5DataIO(np.arange(30), compression='szip', compression_opts=('ec', 16))\n else:\n try:\n with self.assertWarnsWith(UserWarning, warn_msg):\n H5DataIO(np.arange(30), compression='szip', compression_opts=('ec', 16))\n except ValueError:\n self.fail(\"SZIP is installed but H5DataIO still raises an error\")\n # Test error on illegal (i.e., a made-up compressor)\n with self.assertRaises(ValueError):\n warn_msg = (\"unknown compression may not be available on all installations of HDF5. Use of gzip is \"\n \"recommended to ensure portability of the generated HDF5 files.\")\n with self.assertWarnsWith(UserWarning, warn_msg):\n H5DataIO(np.arange(30), compression=\"unknown\")\n # Make sure passing int compression filter raise an error if not installed\n if not h5py_filters.h5z.filter_avail(h5py_filters.h5z.FILTER_MAX):\n with self.assertRaises(ValueError):\n warn_msg = (\"%i compression may not be available on all installations of HDF5. Use of gzip is \"\n \"recommended to ensure portability of the generated HDF5 files.\"\n % h5py_filters.h5z.FILTER_MAX)\n with self.assertWarnsWith(UserWarning, warn_msg):\n H5DataIO(np.arange(30), compression=h5py_filters.h5z.FILTER_MAX, allow_plugin_filters=True)\n # Make sure available int compression filters raise an error without passing allow_plugin_filters=True\n with self.assertRaises(ValueError):\n H5DataIO(np.arange(30), compression=h5py_filters.h5z.FILTER_DEFLATE)\n\n def test_value_error_on_incompatible_compression_opts(self):\n # Make sure we warn when gzip with szip compression options is used\n with self.assertRaises(ValueError):\n H5DataIO(np.arange(30), compression='gzip', compression_opts=('ec', 16))\n # Make sure we warn if gzip with a too high agression is used\n with self.assertRaises(ValueError):\n H5DataIO(np.arange(30), compression='gzip', compression_opts=100)\n # Make sure we warn if lzf with gzip compression option is used\n with self.assertRaises(ValueError):\n H5DataIO(np.arange(30), compression='lzf', compression_opts=5)\n # Make sure we warn if lzf with szip compression option is used\n with self.assertRaises(ValueError):\n H5DataIO(np.arange(30), compression='lzf', compression_opts=('ec', 16))\n # Make sure we warn if szip with gzip compression option is used\n with self.assertRaises(ValueError):\n H5DataIO(np.arange(30), compression='szip', compression_opts=4)\n # Make sure szip raises a ValueError if bad options are used (odd compression option)\n with self.assertRaises(ValueError):\n H5DataIO(np.arange(30), compression='szip', compression_opts=('ec', 3))\n # Make sure szip raises a ValueError if bad options are used (bad methos)\n with self.assertRaises(ValueError):\n H5DataIO(np.arange(30), compression='szip', compression_opts=('bad_method', 16))\n\n def test_warning_on_linking_of_regular_array(self):\n with warnings.catch_warnings(record=True) as w:\n dset = H5DataIO(np.arange(30),\n link_data=True)\n self.assertEqual(len(w), 1)\n self.assertEqual(dset.link_data, False)\n\n def test_warning_on_setting_io_options_on_h5dataset_input(self):\n self.io.write_dataset(self.f, DatasetBuilder('test_dataset', np.arange(10), attributes={}))\n with warnings.catch_warnings(record=True) as w:\n H5DataIO(self.f['test_dataset'],\n compression='gzip',\n compression_opts=4,\n fletcher32=True,\n shuffle=True,\n maxshape=(10, 20),\n chunks=(10,),\n fillvalue=100)\n self.assertEqual(len(w), 7)\n\n def test_h5dataio_array_conversion_numpy(self):\n # Test that H5DataIO.__array__ is working when wrapping an ndarray\n test_speed = np.array([10., 20.])\n data = H5DataIO((test_speed))\n self.assertTrue(np.all(np.isfinite(data))) # Force call of H5DataIO.__array__\n\n def test_h5dataio_array_conversion_list(self):\n # Test that H5DataIO.__array__ is working when wrapping a python list\n test_speed = [10., 20.]\n data = H5DataIO(test_speed)\n self.assertTrue(np.all(np.isfinite(data))) # Force call of H5DataIO.__array__\n\n def test_h5dataio_array_conversion_datachunkiterator(self):\n # Test that H5DataIO.__array__ is working when wrapping a python list\n test_speed = DataChunkIterator(data=[10., 20.])\n data = H5DataIO(test_speed)\n with self.assertRaises(NotImplementedError):\n np.isfinite(data) # Force call of H5DataIO.__array__\n\n #############################################\n # Copy/Link h5py.Dataset object\n #############################################\n def test_link_h5py_dataset_input(self):\n self.io.write_dataset(self.f, DatasetBuilder('test_dataset', np.arange(10), attributes={}))\n self.io.write_dataset(self.f, DatasetBuilder('test_softlink', self.f['test_dataset'], attributes={}))\n self.assertTrue(isinstance(self.f.get('test_softlink', getlink=True), SoftLink))\n\n def test_copy_h5py_dataset_input(self):\n self.io.write_dataset(self.f, DatasetBuilder('test_dataset', np.arange(10), attributes={}))\n self.io.write_dataset(self.f,\n DatasetBuilder('test_copy', self.f['test_dataset'], attributes={}),\n link_data=False)\n self.assertTrue(isinstance(self.f.get('test_copy', getlink=True), HardLink))\n self.assertListEqual(self.f['test_dataset'][:].tolist(),\n self.f['test_copy'][:].tolist())\n\n def test_link_h5py_dataset_h5dataio_input(self):\n self.io.write_dataset(self.f, DatasetBuilder('test_dataset', np.arange(10), attributes={}))\n self.io.write_dataset(self.f, DatasetBuilder('test_softlink',\n H5DataIO(data=self.f['test_dataset'],\n link_data=True),\n attributes={}))\n self.assertTrue(isinstance(self.f.get('test_softlink', getlink=True), SoftLink))\n\n def test_copy_h5py_dataset_h5dataio_input(self):\n self.io.write_dataset(self.f, DatasetBuilder('test_dataset', np.arange(10), attributes={}))\n self.io.write_dataset(self.f,\n DatasetBuilder('test_copy',\n H5DataIO(data=self.f['test_dataset'],\n link_data=False), # Force dataset copy\n attributes={}),\n link_data=True) # Make sure the default behavior is set to link the data\n self.assertTrue(isinstance(self.f.get('test_copy', getlink=True), HardLink))\n self.assertListEqual(self.f['test_dataset'][:].tolist(),\n self.f['test_copy'][:].tolist())\n\n def test_list_fill_empty(self):\n dset = self.io.__list_fill__(self.f, 'empty_dataset', [], options={'dtype': int, 'io_settings': {}})\n self.assertTupleEqual(dset.shape, (0,))\n\n def test_list_fill_empty_no_dtype(self):\n with self.assertRaisesRegex(Exception, r\"cannot add \\S+ to [/\\S]+ - could not determine type\"):\n self.io.__list_fill__(self.f, 'empty_dataset', [])\n\n\ndef _get_manager():\n\n foo_spec = GroupSpec('A test group specification with a data type',\n data_type_def='Foo',\n datasets=[DatasetSpec('an example dataset',\n 'int',\n name='my_data',\n attributes=[AttributeSpec('attr2',\n 'an example integer attribute',\n 'int')])],\n attributes=[AttributeSpec('attr1', 'an example string attribute', 'text'),\n AttributeSpec('attr3', 'an example float attribute', 'float')])\n\n tmp_spec = GroupSpec('A subgroup for Foos',\n name='foo_holder',\n groups=[GroupSpec('the Foos in this bucket', data_type_inc='Foo', quantity=ZERO_OR_MANY)])\n\n bucket_spec = GroupSpec('A test group specification for a data type containing data type',\n data_type_def='FooBucket',\n groups=[tmp_spec])\n\n class FooMapper(ObjectMapper):\n def __init__(self, spec):\n super().__init__(spec)\n my_data_spec = spec.get_dataset('my_data')\n self.map_spec('attr2', my_data_spec.get_attribute('attr2'))\n\n class BucketMapper(ObjectMapper):\n def __init__(self, spec):\n super().__init__(spec)\n foo_holder_spec = spec.get_group('foo_holder')\n self.unmap(foo_holder_spec)\n foo_spec = foo_holder_spec.get_data_type('Foo')\n self.map_spec('foos', foo_spec)\n\n file_spec = GroupSpec(\"A file of Foos contained in FooBuckets\",\n data_type_def='FooFile',\n groups=[GroupSpec('Holds the FooBuckets',\n name='buckets',\n groups=[GroupSpec(\"One or more FooBuckets\",\n data_type_inc='FooBucket',\n quantity=ONE_OR_MANY)])])\n\n class FileMapper(ObjectMapper):\n def __init__(self, spec):\n super().__init__(spec)\n bucket_spec = spec.get_group('buckets').get_data_type('FooBucket')\n self.map_spec('buckets', bucket_spec)\n\n spec_catalog = SpecCatalog()\n spec_catalog.register_spec(foo_spec, 'test.yaml')\n spec_catalog.register_spec(bucket_spec, 'test.yaml')\n spec_catalog.register_spec(file_spec, 'test.yaml')\n namespace = SpecNamespace(\n 'a test namespace',\n CORE_NAMESPACE,\n [{'source': 'test.yaml'}],\n version='0.1.0',\n catalog=spec_catalog)\n namespace_catalog = NamespaceCatalog()\n namespace_catalog.add_namespace(CORE_NAMESPACE, namespace)\n type_map = TypeMap(namespace_catalog)\n\n type_map.register_container_type(CORE_NAMESPACE, 'Foo', Foo)\n type_map.register_container_type(CORE_NAMESPACE, 'FooBucket', FooBucket)\n type_map.register_container_type(CORE_NAMESPACE, 'FooFile', FooFile)\n\n type_map.register_map(Foo, FooMapper)\n type_map.register_map(FooBucket, BucketMapper)\n type_map.register_map(FooFile, FileMapper)\n\n manager = BuildManager(type_map)\n return manager\n\n\nclass TestRoundTrip(TestCase):\n\n def setUp(self):\n self.manager = _get_manager()\n self.path = get_temp_filepath()\n\n def tearDown(self):\n if os.path.exists(self.path):\n os.remove(self.path)\n\n def test_roundtrip_basic(self):\n # Setup all the data we need\n foo1 = Foo('foo1', [1, 2, 3, 4, 5], \"I am foo1\", 17, 3.14)\n foobucket = FooBucket('test_bucket', [foo1])\n foofile = FooFile([foobucket])\n\n with HDF5IO(self.path, manager=self.manager, mode='w') as io:\n io.write(foofile)\n\n with HDF5IO(self.path, manager=self.manager, mode='r') as io:\n read_foofile = io.read()\n self.assertListEqual(foofile.buckets[0].foos[0].my_data,\n read_foofile.buckets[0].foos[0].my_data[:].tolist())\n\n def test_roundtrip_empty_dataset(self):\n foo1 = Foo('foo1', [], \"I am foo1\", 17, 3.14)\n foobucket = FooBucket('test_bucket', [foo1])\n foofile = FooFile([foobucket])\n\n with HDF5IO(self.path, manager=self.manager, mode='w') as io:\n io.write(foofile)\n\n with HDF5IO(self.path, manager=self.manager, mode='r') as io:\n read_foofile = io.read()\n self.assertListEqual([], read_foofile.buckets[0].foos[0].my_data[:].tolist())\n\n def test_roundtrip_empty_group(self):\n foobucket = FooBucket('test_bucket', [])\n foofile = FooFile([foobucket])\n\n with HDF5IO(self.path, manager=self.manager, mode='w') as io:\n io.write(foofile)\n\n with HDF5IO(self.path, manager=self.manager, mode='r') as io:\n read_foofile = io.read()\n self.assertListEqual([], read_foofile.buckets[0].foos)\n\n\nclass TestHDF5IO(TestCase):\n\n def setUp(self):\n self.manager = _get_manager()\n self.path = get_temp_filepath()\n\n foo1 = Foo('foo1', [1, 2, 3, 4, 5], \"I am foo1\", 17, 3.14)\n foobucket = FooBucket('test_bucket', [foo1])\n self.foofile = FooFile([foobucket])\n\n self.file_obj = None\n\n def tearDown(self):\n if os.path.exists(self.path):\n os.remove(self.path)\n\n if self.file_obj is not None:\n fn = self.file_obj.filename\n self.file_obj.close()\n if os.path.exists(fn):\n os.remove(fn)\n\n def test_constructor(self):\n with HDF5IO(self.path, manager=self.manager, mode='w') as io:\n self.assertEqual(io.manager, self.manager)\n self.assertEqual(io.source, self.path)\n\n def test_set_file_mismatch(self):\n self.file_obj = File(get_temp_filepath(), 'w')\n err_msg = (\"You argued %s as this object's path, but supplied a file with filename: %s\"\n % (self.path, self.file_obj.filename))\n with self.assertRaisesWith(ValueError, err_msg):\n HDF5IO(self.path, manager=self.manager, mode='w', file=self.file_obj)\n\n\nclass TestCacheSpec(TestCase):\n\n def setUp(self):\n self.manager = _get_manager()\n self.path = get_temp_filepath()\n\n def tearDown(self):\n if os.path.exists(self.path):\n os.remove(self.path)\n\n def test_cache_spec(self):\n foo1 = Foo('foo1', [0, 1, 2, 3, 4], \"I am foo1\", 17, 3.14)\n foo2 = Foo('foo2', [5, 6, 7, 8, 9], \"I am foo2\", 34, 6.28)\n foobucket = FooBucket('test_bucket', [foo1, foo2])\n foofile = FooFile([foobucket])\n\n with HDF5IO(self.path, manager=self.manager, mode='w') as io:\n io.write(foofile)\n\n ns_catalog = NamespaceCatalog()\n HDF5IO.load_namespaces(ns_catalog, self.path)\n self.assertEqual(ns_catalog.namespaces, (CORE_NAMESPACE,))\n source_types = self.__get_types(io.manager.namespace_catalog)\n read_types = self.__get_types(ns_catalog)\n self.assertSetEqual(source_types, read_types)\n\n def test_double_cache_spec(self):\n # Setup all the data we need\n foo1 = Foo('foo1', [0, 1, 2, 3, 4], \"I am foo1\", 17, 3.14)\n foo2 = Foo('foo2', [5, 6, 7, 8, 9], \"I am foo2\", 34, 6.28)\n foobucket = FooBucket('test_bucket', [foo1, foo2])\n foofile = FooFile([foobucket])\n\n with HDF5IO(self.path, manager=self.manager, mode='w') as io:\n io.write(foofile)\n\n with HDF5IO(self.path, manager=self.manager, mode='a') as io:\n io.write(foofile)\n\n def __get_types(self, catalog):\n types = set()\n for ns_name in catalog.namespaces:\n ns = catalog.get_namespace(ns_name)\n for source in ns['schema']:\n types.update(catalog.get_types(source['source']))\n return types\n\n\nclass TestNoCacheSpec(TestCase):\n\n def setUp(self):\n self.manager = _get_manager()\n self.path = get_temp_filepath()\n\n def tearDown(self):\n if os.path.exists(self.path):\n os.remove(self.path)\n\n def test_no_cache_spec(self):\n # Setup all the data we need\n foo1 = Foo('foo1', [0, 1, 2, 3, 4], \"I am foo1\", 17, 3.14)\n foo2 = Foo('foo2', [5, 6, 7, 8, 9], \"I am foo2\", 34, 6.28)\n foobucket = FooBucket('test_bucket', [foo1, foo2])\n foofile = FooFile([foobucket])\n\n with HDF5IO(self.path, manager=self.manager, mode='w') as io:\n io.write(foofile, cache_spec=False)\n\n with File(self.path, 'r') as f:\n self.assertNotIn('specifications', f)\n\n\nclass HDF5IOMultiFileTest(TestCase):\n \"\"\"Tests for h5tools IO tools\"\"\"\n\n def setUp(self):\n numfiles = 3\n base_name = \"test_multifile_hdf5_%d.h5\"\n self.test_temp_files = [base_name % i for i in range(numfiles)]\n\n # On Windows h5py cannot truncate an open file in write mode.\n # The temp file will be closed before h5py truncates it\n # and will be removed during the tearDown step.\n self.io = [HDF5IO(i, mode='a', manager=_get_manager()) for i in self.test_temp_files]\n self.f = [i._file for i in self.io]\n\n def tearDown(self):\n # Close all the files\n for i in self.io:\n i.close()\n del(i)\n self.io = None\n self.f = None\n # Make sure the files have been deleted\n for tf in self.test_temp_files:\n try:\n os.remove(tf)\n except OSError:\n pass\n self.test_temp_files = None\n\n def test_copy_file_with_external_links(self):\n # Create the first file\n foo1 = Foo('foo1', [0, 1, 2, 3, 4], \"I am foo1\", 17, 3.14)\n bucket1 = FooBucket('test_bucket1', [foo1])\n\n foofile1 = FooFile(buckets=[bucket1])\n\n # Write the first file\n self.io[0].write(foofile1)\n\n # Create the second file\n bucket1_read = self.io[0].read()\n foo2 = Foo('foo2', bucket1_read.buckets[0].foos[0].my_data, \"I am foo2\", 34, 6.28)\n bucket2 = FooBucket('test_bucket2', [foo2])\n foofile2 = FooFile(buckets=[bucket2])\n # Write the second file\n self.io[1].write(foofile2)\n self.io[1].close()\n self.io[0].close() # Don't forget to close the first file too\n\n # Copy the file\n self.io[2].close()\n HDF5IO.copy_file(source_filename=self.test_temp_files[1],\n dest_filename=self.test_temp_files[2],\n expand_external=True,\n expand_soft=False,\n expand_refs=False)\n\n # Test that everything is working as expected\n # Confirm that our original data file is correct\n f1 = File(self.test_temp_files[0], 'r')\n self.assertIsInstance(f1.get('/buckets/test_bucket1/foo_holder/foo1/my_data', getlink=True), HardLink)\n # Confirm that we successfully created and External Link in our second file\n f2 = File(self.test_temp_files[1], 'r')\n self.assertIsInstance(f2.get('/buckets/test_bucket2/foo_holder/foo2/my_data', getlink=True), ExternalLink)\n # Confirm that we successfully resolved the External Link when we copied our second file\n f3 = File(self.test_temp_files[2], 'r')\n self.assertIsInstance(f3.get('/buckets/test_bucket2/foo_holder/foo2/my_data', getlink=True), HardLink)\n\n\nclass HDF5IOInitNoFileTest(TestCase):\n \"\"\" Test if file does not exist, init with mode (r, r+) throws error, all others succeed \"\"\"\n\n def test_init_no_file_r(self):\n self.path = \"test_init_nofile_r.h5\"\n with self.assertRaisesWith(UnsupportedOperation,\n \"Unable to open file %s in 'r' mode. File does not exist.\" % self.path):\n HDF5IO(self.path, mode='r')\n\n def test_init_no_file_rplus(self):\n self.path = \"test_init_nofile_rplus.h5\"\n with self.assertRaisesWith(UnsupportedOperation,\n \"Unable to open file %s in 'r+' mode. File does not exist.\" % self.path):\n HDF5IO(self.path, mode='r+')\n\n def test_init_no_file_ok(self):\n # test that no errors are thrown\n modes = ('w', 'w-', 'x', 'a')\n for m in modes:\n self.path = \"test_init_nofile.h5\"\n with HDF5IO(self.path, mode=m):\n pass\n if os.path.exists(self.path):\n os.remove(self.path)\n\n\nclass HDF5IOInitFileExistsTest(TestCase):\n \"\"\" Test if file exists, init with mode w-/x throws error, all others succeed \"\"\"\n\n def setUp(self):\n self.path = get_temp_filepath()\n temp_io = HDF5IO(self.path, mode='w')\n temp_io.close()\n self.io = None\n\n def tearDown(self):\n if self.io is not None:\n self.io.close()\n del(self.io)\n if os.path.exists(self.path):\n os.remove(self.path)\n\n def test_init_wminus_file_exists(self):\n with self.assertRaisesWith(UnsupportedOperation,\n \"Unable to open file %s in 'w-' mode. File already exists.\" % self.path):\n self.io = HDF5IO(self.path, mode='w-')\n\n def test_init_x_file_exists(self):\n with self.assertRaisesWith(UnsupportedOperation,\n \"Unable to open file %s in 'x' mode. File already exists.\" % self.path):\n self.io = HDF5IO(self.path, mode='x')\n\n def test_init_file_exists_ok(self):\n # test that no errors are thrown\n modes = ('r', 'r+', 'w', 'a')\n for m in modes:\n with HDF5IO(self.path, mode=m):\n pass\n\n\nclass HDF5IOReadNoDataTest(TestCase):\n \"\"\" Test if file exists and there is no data, read with mode (r, r+, a) throws error \"\"\"\n\n def setUp(self):\n self.path = get_temp_filepath()\n temp_io = HDF5IO(self.path, mode='w')\n temp_io.close()\n self.io = None\n\n def tearDown(self):\n if self.io is not None:\n self.io.close()\n del(self.io)\n\n if os.path.exists(self.path):\n os.remove(self.path)\n\n def test_read_no_data_r(self):\n self.io = HDF5IO(self.path, mode='r')\n with self.assertRaisesWith(UnsupportedOperation,\n \"Cannot read data from file %s in mode 'r'. There are no values.\" % self.path):\n self.io.read()\n\n def test_read_no_data_rplus(self):\n self.io = HDF5IO(self.path, mode='r+')\n with self.assertRaisesWith(UnsupportedOperation,\n \"Cannot read data from file %s in mode 'r+'. There are no values.\" % self.path):\n self.io.read()\n\n def test_read_no_data_a(self):\n self.io = HDF5IO(self.path, mode='a')\n with self.assertRaisesWith(UnsupportedOperation,\n \"Cannot read data from file %s in mode 'a'. There are no values.\" % self.path):\n self.io.read()\n\n\nclass HDF5IOReadData(TestCase):\n \"\"\" Test if file exists and there is no data, read in mode (r, r+, a) is ok\n and read in mode w throws error\n \"\"\"\n\n def setUp(self):\n self.path = get_temp_filepath()\n foo1 = Foo('foo1', [0, 1, 2, 3, 4], \"I am foo1\", 17, 3.14)\n bucket1 = FooBucket('test_bucket1', [foo1])\n self.foofile1 = FooFile(buckets=[bucket1])\n\n with HDF5IO(self.path, manager=_get_manager(), mode='w') as temp_io:\n temp_io.write(self.foofile1)\n self.io = None\n\n def tearDown(self):\n if self.io is not None:\n self.io.close()\n del(self.io)\n if os.path.exists(self.path):\n os.remove(self.path)\n\n def test_read_file_ok(self):\n modes = ('r', 'r+', 'a')\n for m in modes:\n with HDF5IO(self.path, manager=_get_manager(), mode=m) as io:\n io.read()\n\n def test_read_file_w(self):\n with HDF5IO(self.path, manager=_get_manager(), mode='w') as io:\n with self.assertRaisesWith(UnsupportedOperation,\n \"Cannot read from file %s in mode 'w'. Please use mode 'r', 'r+', or 'a'.\"\n % self.path):\n read_foofile1 = io.read()\n self.assertListEqual(self.foofile1.buckets[0].foos[0].my_data,\n read_foofile1.buckets[0].foos[0].my_data[:].tolist())\n\n\nclass HDF5IOWriteNoFile(TestCase):\n \"\"\" Test if file does not exist, write in mode (w, w-, x, a) is ok \"\"\"\n\n def setUp(self):\n foo1 = Foo('foo1', [0, 1, 2, 3, 4], \"I am foo1\", 17, 3.14)\n bucket1 = FooBucket('test_bucket1', [foo1])\n self.foofile1 = FooFile(buckets=[bucket1])\n self.path = 'test_write_nofile.h5'\n\n def tearDown(self):\n if os.path.exists(self.path):\n os.remove(self.path)\n\n def test_write_no_file_w_ok(self):\n self.__write_file('w')\n\n def test_write_no_file_wminus_ok(self):\n self.__write_file('w-')\n\n def test_write_no_file_x_ok(self):\n self.__write_file('x')\n\n def test_write_no_file_a_ok(self):\n self.__write_file('a')\n\n def __write_file(self, mode):\n with HDF5IO(self.path, manager=_get_manager(), mode=mode) as io:\n io.write(self.foofile1)\n\n with HDF5IO(self.path, manager=_get_manager(), mode='r') as io:\n read_foofile = io.read()\n self.assertListEqual(self.foofile1.buckets[0].foos[0].my_data,\n read_foofile.buckets[0].foos[0].my_data[:].tolist())\n\n\nclass HDF5IOWriteFileExists(TestCase):\n \"\"\" Test if file exists, write in mode (r+, w, a) is ok and write in mode r throws error \"\"\"\n\n def setUp(self):\n self.path = get_temp_filepath()\n\n foo1 = Foo('foo1', [0, 1, 2, 3, 4], \"I am foo1\", 17, 3.14)\n bucket1 = FooBucket('test_bucket1', [foo1])\n self.foofile1 = FooFile(buckets=[bucket1])\n\n foo2 = Foo('foo2', [0, 1, 2, 3, 4], \"I am foo2\", 17, 3.14)\n bucket2 = FooBucket('test_bucket2', [foo2])\n self.foofile2 = FooFile(buckets=[bucket2])\n\n with HDF5IO(self.path, manager=_get_manager(), mode='w') as io:\n io.write(self.foofile1)\n self.io = None\n\n def tearDown(self):\n if self.io is not None:\n self.io.close()\n del(self.io)\n if os.path.exists(self.path):\n os.remove(self.path)\n\n def test_write_rplus(self):\n with HDF5IO(self.path, manager=_get_manager(), mode='r+') as io:\n # even though foofile1 and foofile2 have different names, writing a\n # root object into a file that already has a root object, in r+ mode\n # should throw an error\n with self.assertRaisesWith(ValueError, \"Unable to create group (name already exists)\"):\n io.write(self.foofile2)\n\n def test_write_a(self):\n with HDF5IO(self.path, manager=_get_manager(), mode='a') as io:\n # even though foofile1 and foofile2 have different names, writing a\n # root object into a file that already has a root object, in r+ mode\n # should throw an error\n with self.assertRaisesWith(ValueError, \"Unable to create group (name already exists)\"):\n io.write(self.foofile2)\n\n def test_write_w(self):\n # mode 'w' should overwrite contents of file\n with HDF5IO(self.path, manager=_get_manager(), mode='w') as io:\n io.write(self.foofile2)\n\n with HDF5IO(self.path, manager=_get_manager(), mode='r') as io:\n read_foofile = io.read()\n self.assertListEqual(self.foofile2.buckets[0].foos[0].my_data,\n read_foofile.buckets[0].foos[0].my_data[:].tolist())\n\n def test_write_r(self):\n with HDF5IO(self.path, manager=_get_manager(), mode='r') as io:\n with self.assertRaisesWith(UnsupportedOperation,\n (\"Cannot write to file %s in mode 'r'. \"\n \"Please use mode 'r+', 'w', 'w-', 'x', or 'a'\") % self.path):\n io.write(self.foofile2)\n\n\nclass H5DataIOValid(TestCase):\n\n def setUp(self):\n self.paths = [get_temp_filepath(), ]\n\n self.foo1 = Foo('foo1', H5DataIO([1, 2, 3, 4, 5]), \"I am foo1\", 17, 3.14)\n bucket1 = FooBucket('test_bucket1', [self.foo1])\n foofile1 = FooFile(buckets=[bucket1])\n\n with HDF5IO(self.paths[0], manager=_get_manager(), mode='w') as io:\n io.write(foofile1)\n\n def tearDown(self):\n for path in self.paths:\n if os.path.exists(path):\n os.remove(path)\n\n def test_valid(self):\n self.assertTrue(self.foo1.my_data.valid)\n\n def test_read_valid(self):\n \"\"\"Test that h5py.H5Dataset.id.valid works as expected\"\"\"\n with HDF5IO(self.paths[0], manager=_get_manager(), mode='r') as io:\n read_foofile1 = io.read()\n self.assertTrue(read_foofile1.buckets[0].foos[0].my_data.id.valid)\n\n self.assertFalse(read_foofile1.buckets[0].foos[0].my_data.id.valid)\n\n def test_link(self):\n \"\"\"Test that wrapping of linked data within H5DataIO \"\"\"\n with HDF5IO(self.paths[0], manager=_get_manager(), mode='r') as io:\n read_foofile1 = io.read()\n\n self.foo2 = Foo('foo2', H5DataIO(data=read_foofile1.buckets[0].foos[0].my_data), \"I am foo2\", 17, 3.14)\n bucket2 = FooBucket('test_bucket2', [self.foo2])\n foofile2 = FooFile(buckets=[bucket2])\n\n self.paths.append(get_temp_filepath())\n\n with HDF5IO(self.paths[1], manager=_get_manager(), mode='w') as io:\n io.write(foofile2)\n\n self.assertTrue(self.foo2.my_data.valid) # test valid\n self.assertEqual(len(self.foo2.my_data), 5) # test len\n self.assertEqual(self.foo2.my_data.shape, (5,)) # test getattr with shape\n self.assertTrue(np.array_equal(np.array(self.foo2.my_data), [1, 2, 3, 4, 5])) # test array conversion\n\n # test loop through iterable\n match = [1, 2, 3, 4, 5]\n for (i, j) in zip(self.foo2.my_data, match):\n self.assertEqual(i, j)\n\n # test iterator\n my_iter = iter(self.foo2.my_data)\n self.assertEqual(next(my_iter), 1)\n\n # foo2.my_data dataset is now closed\n self.assertFalse(self.foo2.my_data.valid)\n\n with self.assertRaisesWith(InvalidDataIOError, \"Cannot get length of data. Data is not valid.\"):\n len(self.foo2.my_data)\n\n with self.assertRaisesWith(InvalidDataIOError, \"Cannot get attribute 'shape' of data. Data is not valid.\"):\n self.foo2.my_data.shape\n\n with self.assertRaisesWith(InvalidDataIOError, \"Cannot convert data to array. Data is not valid.\"):\n np.array(self.foo2.my_data)\n\n with self.assertRaisesWith(InvalidDataIOError, \"Cannot iterate on data. Data is not valid.\"):\n for i in self.foo2.my_data:\n pass\n\n with self.assertRaisesWith(InvalidDataIOError, \"Cannot iterate on data. Data is not valid.\"):\n iter(self.foo2.my_data)\n\n # re-open the file with the data linking to other file (still closed)\n with HDF5IO(self.paths[1], manager=_get_manager(), mode='r') as io:\n read_foofile2 = io.read()\n read_foo2 = read_foofile2.buckets[0].foos[0]\n\n # note that read_foo2 dataset does not have an attribute 'valid'\n self.assertEqual(len(read_foo2.my_data), 5) # test len\n self.assertEqual(read_foo2.my_data.shape, (5,)) # test getattr with shape\n self.assertTrue(np.array_equal(np.array(read_foo2.my_data), [1, 2, 3, 4, 5])) # test array conversion\n\n # test loop through iterable\n match = [1, 2, 3, 4, 5]\n for (i, j) in zip(read_foo2.my_data, match):\n self.assertEqual(i, j)\n\n # test iterator\n my_iter = iter(read_foo2.my_data)\n self.assertEqual(next(my_iter), 1)\n\n\nclass TestReadLink(TestCase):\n def setUp(self):\n self.target_path = get_temp_filepath()\n self.link_path = get_temp_filepath()\n self.root1 = GroupBuilder(name='root')\n self.subgroup = self.root1.add_group('test_group')\n self.dataset = self.subgroup.add_dataset('test_dataset', data=[1, 2, 3, 4])\n\n self.root2 = GroupBuilder(name='root')\n self.group_link = self.root2.add_link(self.subgroup, 'link_to_test_group')\n self.dataset_link = self.root2.add_link(self.dataset, 'link_to_test_dataset')\n\n with HDF5IO(self.target_path, manager=_get_manager(), mode='w') as io:\n io.write_builder(self.root1)\n self.root1.source = self.target_path\n\n with HDF5IO(self.link_path, manager=_get_manager(), mode='w') as io:\n io.write_builder(self.root2)\n self.root2.source = self.link_path\n\n def test_set_link_loc(self):\n \"\"\"\n Test that Builder location is set when it is read as a link\n \"\"\"\n read_io = HDF5IO(self.link_path, manager=_get_manager(), mode='r')\n bldr = read_io.read_builder()\n self.assertEqual(bldr['link_to_test_group'].builder.location, '/')\n self.assertEqual(bldr['link_to_test_dataset'].builder.location, '/test_group')\n read_io.close()\n\n def test_link_to_link(self):\n \"\"\"\n Test that link to link gets written and read properly\n \"\"\"\n link_to_link_path = get_temp_filepath()\n read_io1 = HDF5IO(self.link_path, manager=_get_manager(), mode='r')\n bldr1 = read_io1.read_builder()\n root3 = GroupBuilder(name='root')\n root3.add_link(bldr1['link_to_test_group'].builder, 'link_to_link')\n with HDF5IO(link_to_link_path, manager=_get_manager(), mode='w') as io:\n io.write_builder(root3)\n read_io1.close()\n\n read_io2 = HDF5IO(link_to_link_path, manager=_get_manager(), mode='r')\n bldr2 = read_io2.read_builder()\n self.assertEqual(bldr2['link_to_link'].builder.source, self.target_path)\n read_io2.close()\n"
]
| [
[
"numpy.array",
"numpy.zeros",
"numpy.testing.assert_array_equal",
"numpy.arange",
"numpy.isfinite",
"numpy.all",
"numpy.dtype"
]
]
|
wangronin/Bayesian-Optimization | [
"ffbcf4c8813dfa603b9065355e20eda0ccb99e30"
]
| [
"unittest/test_warmdata.py"
]
| [
"import sys\n\nimport numpy as np\n\nsys.path.insert(0, \"../\")\n\nfrom bayes_optim import BO, DiscreteSpace, IntegerSpace, RealSpace\nfrom bayes_optim.surrogate import GaussianProcess, RandomForest\n\nnp.random.seed(42)\n\n\ndef obj_fun(x):\n x_r, x_i, x_d = np.array(x[:2]), x[2], x[3]\n if x_d == \"OK\":\n tmp = 0\n else:\n tmp = 1\n return np.sum((x_r + np.array([2, 2])) ** 2) + abs(x_i - 10) * 10 + tmp\n\n\ndef test_warm_data_with_GPR():\n dim = 2\n lb, ub = -5, 5\n\n def fitness(x):\n x = np.asarray(x)\n return np.sum(x ** 2)\n\n X = np.random.rand(5, dim) * (ub - lb) + lb\n y = [fitness(x) for x in X]\n space = RealSpace([lb, ub]) * dim\n\n thetaL = 1e-10 * (ub - lb) * np.ones(dim)\n thetaU = 10 * (ub - lb) * np.ones(dim)\n theta0 = np.random.rand(dim) * (thetaU - thetaL) + thetaL\n\n model = GaussianProcess(\n theta0=theta0,\n thetaL=thetaL,\n thetaU=thetaU,\n nugget=0,\n noise_estim=False,\n optimizer=\"BFGS\",\n wait_iter=3,\n random_start=dim,\n likelihood=\"concentrated\",\n eval_budget=100 * dim,\n )\n opt = BO(\n search_space=space,\n obj_fun=fitness,\n model=model,\n warm_data=(X, y),\n max_FEs=10,\n verbose=True,\n n_point=1,\n )\n assert np.all(np.asarray(opt.data) == np.asarray(opt.warm_data))\n assert opt.model.is_fitted\n opt.run()\n\n\ndef test_warm_data_with_RF():\n space = (\n RealSpace([-10, 10]) * 2\n + IntegerSpace([5, 15])\n + DiscreteSpace([\"OK\", \"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\"])\n )\n\n X = space.sample(10)\n y = [obj_fun(x) for x in X]\n\n model = RandomForest(levels=space.levels)\n opt = BO(\n search_space=space,\n obj_fun=obj_fun,\n model=model,\n minimize=True,\n eval_type=\"list\",\n max_FEs=5,\n verbose=True,\n acquisition_fun=\"EI\",\n warm_data=(X, y),\n )\n opt.run()\n assert opt.data.shape[0] == 15\n"
]
| [
[
"numpy.array",
"numpy.random.rand",
"numpy.asarray",
"numpy.random.seed",
"numpy.sum",
"numpy.ones"
]
]
|
bartek-wojcik/graph_classification | [
"313f71bd04c3aed889e29921ff19590c6827dde4"
]
| [
"src/models/modules/graph_sage.py"
]
| [
"from torch import nn\nfrom torch_geometric.nn import (\n SAGEConv,\n global_add_pool,\n global_max_pool,\n global_mean_pool,\n)\n\n\nclass GraphSAGE(nn.Module):\n \"\"\"Flexible GraphSAGE network.\"\"\"\n\n def __init__(self, hparams: dict):\n super().__init__()\n self.hparams = hparams\n\n for param_name in [\n \"num_node_features\",\n \"num_conv_layers\",\n \"conv_size\",\n \"lin1_size\",\n \"lin2_size\",\n \"output_size\",\n ]:\n if not isinstance(hparams[param_name], int):\n raise Exception(\"Wrong hyperparameter type!\")\n\n if hparams[\"num_conv_layers\"] < 1:\n raise Exception(\"Invalid number of layers!\")\n\n if hparams[\"activation\"] == \"relu\":\n activation = nn.ReLU\n elif hparams[\"activation\"] == \"prelu\":\n activation = nn.PReLU\n else:\n raise Exception(\"Invalid activation function name.\")\n\n if hparams[\"pool_method\"] == \"add\":\n self.pooling_method = global_add_pool\n elif hparams[\"pool_method\"] == \"mean\":\n self.pooling_method = global_mean_pool\n elif hparams[\"pool_method\"] == \"max\":\n self.pooling_method = global_max_pool\n else:\n raise Exception(\"Invalid pooling method name\")\n\n self.conv_modules = nn.ModuleList()\n self.activ_modules = nn.ModuleList()\n\n normalize = hparams.get(\"normalize\", False)\n\n self.conv_modules.append(\n SAGEConv(hparams[\"num_node_features\"], hparams[\"conv_size\"], normalize=normalize)\n )\n self.activ_modules.append(activation())\n\n for _ in range(hparams[\"num_conv_layers\"] - 1):\n self.conv_modules.append(\n SAGEConv(hparams[\"conv_size\"], hparams[\"conv_size\"], normalize=normalize)\n )\n self.activ_modules.append(activation())\n\n self.lin1 = nn.Linear(hparams[\"conv_size\"], hparams[\"lin1_size\"])\n self.activ_lin1 = activation()\n\n self.lin2 = nn.Linear(hparams[\"lin1_size\"], hparams[\"lin2_size\"])\n self.activ_lin2 = activation()\n\n self.output = nn.Linear(hparams[\"lin2_size\"], hparams[\"output_size\"])\n\n def forward(self, data):\n x, edge_index, batch = data.x, data.edge_index, data.batch\n\n for layer, activation in zip(self.conv_modules, self.activ_modules):\n x = layer(x, edge_index)\n x = activation(x)\n\n x = self.pooling_method(x, batch)\n\n x = self.lin1(x)\n x = self.activ_lin1(x)\n\n x = self.lin2(x)\n x = self.activ_lin2(x)\n\n return self.output(x)\n"
]
| [
[
"torch.nn.Linear",
"torch.nn.ModuleList"
]
]
|
plewandowska777/QuIT | [
"1e1ea4d57e16a6074c123ed01b22ad190384329c"
]
| [
"tests/test_quit.py"
]
| [
"import numpy as np\nimport pytest\nfrom scipy import linalg\n\nfrom quit.basicfunction import (\n basis,\n bell_state,\n bell_state_density,\n bra,\n braket,\n dagger,\n ket,\n ketbra,\n proj,\n res,\n unres,\n unvec,\n vec,\n)\n\n\[email protected](\"phi\", [np.pi, np.sqrt(2), 2j, 1 + 1j, 1])\ndef test_ket_with_different_types_of_entries(phi):\n np.testing.assert_array_equal(np.array([[1.0], [phi]]), ket([1, phi]))\n\n\[email protected](\"dim\", [4, 8])\[email protected](\"index\", [2, 3])\ndef test_if_basis_is_correctly_defined(dim, index):\n np.testing.assert_array_equal(basis(dim, index), np.identity(dim)[index])\n\n\[email protected](\"phi\", [np.pi, np.sqrt(2), 2j, 1 + 1j, 1])\ndef test_bra_with_different_types_of_entries(phi):\n np.testing.assert_array_equal(np.array([[1, np.conjugate(phi)]]), bra([1, phi]))\n\n\[email protected](\"phi\", [[np.sqrt(2), 1j + 2]])\[email protected](\"psi\", [[np.pi, 1, 2j]])\ndef test_ketbra_is_equal_outer_product(phi, psi):\n np.testing.assert_almost_equal(ketbra(phi, psi), np.outer(phi, np.conjugate(psi)))\n\n\[email protected](\"phi\", [[np.sqrt(2), 1j + 2]])\[email protected](\"psi\", [[np.pi, 2j]])\ndef test_braket(phi, psi):\n np.testing.assert_almost_equal(braket(phi, psi), np.inner(np.conjugate(phi), psi))\n\n\[email protected](\"vector\", [[1, 2], [1, -1]])\ndef test_projector_is_idemotent(vector):\n np.testing.assert_almost_equal(proj(vector) @ proj(vector), proj(vector))\n\n\nsx = np.array([[0, 1], [1, 0]])\n\n\[email protected](\n \"symmatrix\", [np.identity(3), np.asarray(linalg.hadamard(2, dtype=complex)), np.kron(sx, sx)]\n)\ndef test_dagger_for_hermitian_matrices_is_equal_its_dagger_operation(symmatrix):\n np.testing.assert_array_equal(dagger(symmatrix), symmatrix)\n\n\ndef test_function_res_naive():\n matrix = np.array([[1, 1 - 1j], [0, 1], [np.pi, -1]])\n np.testing.assert_array_equal(res(matrix), ket(np.asarray([1, 1 - 1j, 0, 1, np.pi, -1])))\n\n\ndef test_function_res_by_using_telegraphic_notation():\n\n a = np.array([[1, 1], [2, 2]])\n b = np.identity(2)\n c = np.array([[-1, 1], [0, 2]])\n\n np.testing.assert_array_equal(np.kron(a, b) @ res(c), res(a @ c @ np.transpose(b)))\n\n\ndef test_function_res_for_rank_one_operator():\n x = [1, 1 - 1j]\n y = [np.pi, -1]\n np.testing.assert_array_equal(res(ketbra(x, y)), ket(np.kron(x, y)))\n\n\ndef test_function_res_for_vector():\n x = [1, 1 - 1j, np.pi, -1]\n np.testing.assert_array_equal(res(x), ket(x))\n\n\ndef test_function_vec_naive():\n matrix = np.array([[1, 1 - 1j], [0, 1], [np.pi, -1]])\n np.testing.assert_array_equal(vec(matrix), ket(np.asarray([1, 0, np.pi, 1 - 1j, 1, -1])))\n\n\ndef test_function_vec_as_transposition_of_res():\n matrix = np.array([[1, 1 - 1j], [0, 1], [np.pi, -1]])\n np.testing.assert_array_equal(vec(matrix), res(np.transpose(matrix)))\n\n\nsx = np.array([[0, 1], [1, 0]])\n\n\[email protected](\n \"symmatrix\", [np.identity(3), np.asarray(linalg.hadamard(2, dtype=complex)), np.kron(sx, sx)]\n)\ndef test_if_vec_and_res_is_equal_on_symmetric_matrix(symmatrix):\n np.testing.assert_array_equal(vec(symmatrix), res(symmatrix))\n\n\ndef test_unres():\n\n vector = np.array([[1, 1 - 1j, 0, 1, np.pi, -1]])\n matrix = np.array([[1, 1 - 1j], [0, 1], [np.pi, -1]])\n\n np.testing.assert_array_equal(unres(vector, (3, 2)), matrix)\n\n\ndef test_unvec():\n\n vector = np.array([[1, 1 - 1j, 0, 1, np.pi, -1]])\n matrix = np.array([[1, 1 - 1j], [0, 1], [np.pi, -1]])\n\n np.testing.assert_array_equal(unvec(vector, (3, 2)), matrix.transpose())\n\n\ndef test_bell_state():\n\n np.testing.assert_array_almost_equal(bell_state(2), 1 / np.sqrt(2) * ket([1, 0, 0, 1]))\n\n\[email protected](\"dim\", [2, 3, 4])\ndef test_bell_state_density(dim):\n np.testing.assert_array_almost_equal(\n bell_state_density(dim),\n ketbra(vec(np.identity(dim) / np.sqrt(dim)), vec(np.identity(dim) / np.sqrt(dim))),\n )\n"
]
| [
[
"numpy.array",
"numpy.asarray",
"numpy.identity",
"scipy.linalg.hadamard",
"numpy.transpose",
"numpy.sqrt",
"numpy.conjugate",
"numpy.kron"
]
]
|
RomuloSouza/elections-data-visualization | [
"f089757a5dd2d80d4f5c0835332ab62cd34b74c3"
]
| [
"create_insert_candidate.py"
]
| [
"import pandas as pd\nimport bisect\n\nMAX_CANDIDATES = 1000\n\nINSERT_CANDIDATE = \"\"\"\nINSERT INTO CANDIDATO (cpf, nomeUrna, sexo, nomeCandidato, dtNascimento)\nVALUES ('{}', '{}', '{}', '{}', {});\n\"\"\"\n\n\ndef create_insert_string(row):\n dt = str(row[6])\n dt = f\"'{dt[-4:]}-{dt[2:4]}-{dt[0:2]}'\"\n if len(dt) != 12:\n dt = \"NULL\"\n\n nome_urna = str(row[3]).replace(\"'\", \"\")\n nome_candidato = str(row[5]).replace(\"'\", \"\")\n\n insert_sql = INSERT_CANDIDATE.format(\n row[2],\n nome_urna,\n row[4],\n nome_candidato,\n dt\n )\n\n return insert_sql\n\n\ndef binary_search(array, target):\n lower = 0\n upper = len(array)\n while lower < upper:\n x = lower + (upper - lower) // 2\n val = array[x]\n if target == val:\n return True\n elif target > val:\n if lower == x:\n break\n lower = x\n elif target < val:\n upper = x\n return False\n\n\ndef create_cpfs_file(cpfs):\n f = open('cpfs.txt', 'w+')\n\n for cpf in cpfs:\n f.write(str(cpf)+'\\n')\n\n f.close()\n\n\ndef add_lines_to_file(lines):\n f = open('./sql_scripts/popula_candidatos.sql', 'w+')\n f.write('USE eleicoes;\\n')\n for line in lines:\n f.write(line)\n\n f.close()\n\n\nif __name__ == '__main__':\n filename = 'new_candidates.csv'\n\n data = pd.read_csv(filename, parse_dates=['dtNascimento'])\n counter = 0\n lines = []\n cpfs = []\n for row in data.itertuples():\n # if counter < MAX_CANDIDATES:\n cpf = row[2]\n if not binary_search(cpfs, cpf):\n bisect.insort(cpfs, cpf)\n\n counter += 1\n line = create_insert_string(row)\n lines.append(line)\n # else:\n # break\n\n print('tamanho = ', counter)\n create_cpfs_file(set(cpfs))\n add_lines_to_file(lines)\n"
]
| [
[
"pandas.read_csv"
]
]
|
rhasspy/tacotron2-train | [
"9fcd05d6eb10e352d257ab158ccc56e84d2aa616"
]
| [
"tacotron2_train/dataset.py"
]
| [
"\"\"\"Classes and methods for loading phonemes and mel spectrograms\"\"\"\nimport csv\nimport json\nimport logging\nimport random\nimport typing\nfrom pathlib import Path\n\nimport numpy as np\nimport torch\nimport torch.utils.data\n\n_LOGGER = logging.getLogger(\"tacotron2_train.dataset\")\n\n# -----------------------------------------------------------------------------\n\n\nclass PhonemeMelLoader(torch.utils.data.Dataset):\n def __init__(\n self,\n id_phonemes: typing.Dict[str, torch.IntTensor],\n id_mels: typing.Dict[str, torch.FloatTensor],\n mels_dir: typing.Optional[typing.Union[str, Path]] = None,\n ):\n self.id_phonemes = id_phonemes\n self.id_mels = id_mels\n self.mels_dir = Path(mels_dir) if mels_dir else None\n\n if self.id_mels:\n self.ids = list(\n set.intersection(set(id_phonemes.keys()), set(id_mels.keys()))\n )\n assert self.ids, \"No shared utterance ids between phonemes and mels\"\n else:\n # Assume all ids will be present in mels_dir\n self.ids = list(id_phonemes.keys())\n\n random.shuffle(self.ids)\n\n def __getitem__(self, index):\n utt_id = self.ids[index]\n text = self.id_phonemes[utt_id]\n mel = self.id_mels.get(utt_id)\n\n if mel is None:\n assert self.mels_dir, f\"Missing mel for id {utt_id}, but no mels_dir\"\n mel_path = self.mels_dir / (utt_id + \".npy\")\n\n # TODO: Verify shape\n mel = torch.FloatTensor(np.load(mel_path, allow_pickle=True))\n\n # Cache mel\n self.id_mels[utt_id] = mel\n\n # phonemes, mels, lengths\n return (text, mel, len(text))\n\n def __len__(self):\n return len(self.ids)\n\n\nclass PhonemeMelCollate:\n def __init__(self, n_frames_per_step=1):\n self.n_frames_per_step = n_frames_per_step\n\n def __call__(self, batch):\n # Right zero-pad all one-hot text sequences to max input length\n input_lengths, ids_sorted_decreasing = torch.sort(\n torch.LongTensor([len(x[0]) for x in batch]), dim=0, descending=True\n )\n max_input_len = input_lengths[0]\n\n text_padded = torch.LongTensor(len(batch), max_input_len)\n text_padded.zero_()\n for i in range(len(ids_sorted_decreasing)):\n text = batch[ids_sorted_decreasing[i]][0]\n text_padded[i, : text.size(0)] = text\n\n # Right zero-pad mel-spec\n num_mels = batch[0][1].size(0)\n max_target_len = max([x[1].size(1) for x in batch])\n if (max_target_len % self.n_frames_per_step) != 0:\n max_target_len += self.n_frames_per_step - (\n max_target_len % self.n_frames_per_step\n )\n assert max_target_len % self.n_frames_per_step == 0\n\n # include mel padded and gate padded\n mel_padded = torch.FloatTensor(len(batch), num_mels, max_target_len)\n mel_padded.zero_()\n gate_padded = torch.FloatTensor(len(batch), max_target_len)\n gate_padded.zero_()\n output_lengths = torch.LongTensor(len(batch))\n for i in range(len(ids_sorted_decreasing)):\n mel = batch[ids_sorted_decreasing[i]][1]\n mel_padded[i, :, : mel.size(1)] = mel\n gate_padded[i, mel.size(1) - 1 :] = 1\n output_lengths[i] = mel.size(1)\n\n # count number of items - characters in text\n len_x = [x[2] for x in batch]\n len_x = torch.Tensor(len_x)\n return (\n text_padded,\n input_lengths,\n mel_padded,\n gate_padded,\n output_lengths,\n len_x,\n )\n\n\n# -----------------------------------------------------------------------------\n\n\ndef load_phonemes(csv_file: typing.TextIO) -> typing.Dict[str, torch.IntTensor]:\n phonemes = {}\n reader = csv.reader(csv_file, delimiter=\"|\")\n for row in reader:\n utt_id, phoneme_str = row[0], row[1]\n phoneme_ids = [int(p) for p in phoneme_str.strip().split()]\n\n if not phoneme_ids:\n _LOGGER.warning(\"No phonemes for %s, dropping utterance\", utt_id)\n continue\n\n phonemes[utt_id] = torch.IntTensor(phoneme_ids)\n\n return phonemes\n\n\ndef load_mels(jsonl_file: typing.TextIO) -> typing.Dict[str, torch.FloatTensor]:\n mels = {}\n for line in jsonl_file:\n line = line.strip()\n if not line:\n continue\n\n mel_obj = json.loads(line)\n utt_id = mel_obj[\"id\"]\n mels[utt_id] = torch.FloatTensor(mel_obj[\"mel\"])\n\n return mels\n"
]
| [
[
"torch.IntTensor",
"torch.FloatTensor",
"torch.Tensor",
"numpy.load"
]
]
|
Quinn-5/FRC-2020-Vision-Tests | [
"ed4845856ac141f457d27e1457f9223ceb95e4d9"
]
| [
"Final Products/frc_vision_final.py"
]
| [
"# Contains most of the vision processing, though the most complete is under Offline_Filter\n# Likely to become the final product file for vision processing\n# The exposure options are likely needed to be adjusted per competition\n# They are currently tuned to the testing area\n# Tuning for the RealSense camera can be done easily through the ReaslSense Viewer app in a GUI\n# One thing I do still need to figure out is assigning the camera to a specific IO port number\n# Testing on my laptop, it is only visible when using one port\n\nfrom math import degrees, radians\nimport pyrealsense2 as rs2\nimport cv2\nimport numpy as np\nimport time\nfrom networktables import NetworkTables\n\n# Takes in slopes x and y, tests if they are equal to each other or any previously verified line\ndef unequal(new, old_list):\n variance = 5\n for i in old_list:\n x3, y3, x4, y4 = i[0]\n old_slope = degrees(np.arctan((y4 - y3)/(x4 - x3)))\n min_val = old_slope - variance\n max_val = old_slope + variance\n if min_val < new < max_val:\n return False\n return True\n\nNetworkTables.initialize(server='roborio-166-frc.local')\n\nsd = NetworkTables.getTable('SmartDashboard')\n# sd.putNumber('someNumber', 1234)\n# otherNumber = sd.getNumber('otherNumber')\n\nWIDTH = 640\nHEIGHT = 480\nPOINT_SAMPLES = 5\n\npipe = rs2.pipeline() # The camera's API sucks, but at least I can guarantee setings\nconfig = rs2.config()\nconfig.enable_stream(rs2.stream.color, WIDTH, HEIGHT, rs2.format.bgr8, 60)\nconfig.enable_stream(rs2.stream.depth, WIDTH, HEIGHT, rs2.format.z16, 60)\nprofile = pipe.start(config)\ns = profile.get_device().query_sensors()[1]\ns.set_option(rs2.option.brightness, 0)\ns.set_option(rs2.option.contrast, 100)\ns.set_option(rs2.option.exposure, 45)\ns.set_option(rs2.option.gain, 75)\ns.set_option(rs2.option.gamma, 100)\ns.set_option(rs2.option.hue, 0)\ns.set_option(rs2.option.saturation, 50)\ns.set_option(rs2.option.sharpness, 0)\ns.set_option(rs2.option.white_balance, 2800)\n\nX_VALS = []\nY_VALS = []\n\npointer = 0\n\n\nwhile True:\n start_time = time.time()\n\n frames = rs2.composite_frame(pipe.wait_for_frames())\n frame = rs2.video_frame(frames.get_color_frame())\n if not frame:\n continue\n\n IMG = np.asanyarray(frame.get_data())\n\n # Convert from RGB to HSV, helps with filltering\n HSV = cv2.cvtColor(IMG, cv2.COLOR_BGR2HSV)\n\n # Define upper and lower bounds for HSV variables\n LOWER_COLOR = np.array([70, 80, 255])\n UPPER_COLOR = np.array([95, 180, 255])\n\n # Create mask within hsv range\n MASK = cv2.inRange(HSV, LOWER_COLOR, UPPER_COLOR)\n\n # Various blur method testings\n BLUR = cv2.GaussianBlur(MASK, (3, 3), 0)\n MEDIAN = cv2.medianBlur(MASK, 3)\n\n # Edge detection on each test for use in line detection\n BLUR_EDGES = cv2.Canny(BLUR, 100, 200)\n MASK_EDGES = cv2.Canny(MASK, 100, 200)\n MED_EDGES = cv2.Canny(MEDIAN, 50, 150)\n\n # Empty image for drawing lines (testing)\n FILTERED_LINE_IMG = np.zeros((HEIGHT, WIDTH, 3), np.uint8)\n LINE_IMG = np.zeros((HEIGHT, WIDTH, 3), np.uint8)\n\n # Find lines in selected image\n LINES = cv2.HoughLinesP(MASK_EDGES, 1, radians(.5), 25, maxLineGap=25)\n\n if LINES is not None:\n NUM_LINES = len(LINES)\n FILTERED_LINES = []\n X_TOTAL = 0\n Y_TOTAL = 0\n for NEW_LINE in LINES:\n x1, y1, x2, y2 = NEW_LINE[0]\n new_slope = degrees(np.arctan((y2 - y1)/(x2 - x1)))\n if FILTERED_LINES:\n if (new_slope < -40 or new_slope > 40) and unequal(new_slope, FILTERED_LINES):\n FILTERED_LINES.append(NEW_LINE)\n cv2.line(FILTERED_LINE_IMG, (x1, y1), (x2, y2), (0, 255, 0), 1)\n X_TOTAL += x1 + x2\n Y_TOTAL += y1 + y2\n else:\n if new_slope < -40 or new_slope > 40:\n FILTERED_LINES.append(NEW_LINE)\n cv2.line(FILTERED_LINE_IMG, (x1, y1), (x2, y2), (0, 255, 0), 1)\n X_TOTAL += x1 + x2\n Y_TOTAL += y1 + y2\n\n NUM_LINES = len(FILTERED_LINES)\n if FILTERED_LINES:\n X_AVG = 0\n Y_AVG = 0\n if len(X_VALS) == POINT_SAMPLES:\n X_VALS[pointer] = X_TOTAL/(2*NUM_LINES)\n Y_VALS[pointer] = Y_TOTAL/(2*NUM_LINES)\n for i in range(len(X_VALS)):\n X_AVG += X_VALS[i]\n Y_AVG += Y_VALS[i]\n X_AVG /= POINT_SAMPLES\n Y_AVG /= POINT_SAMPLES\n\n cv2.circle(FILTERED_LINE_IMG, (int(X_AVG), int(Y_AVG)), 5, [255, 255, 255], -1)\n else:\n X_VALS.append(int(X_TOTAL/(2*NUM_LINES)))\n Y_VALS.append(int(Y_TOTAL/(2*NUM_LINES)))\n\n for LINE in LINES:\n x1, y1, x2, y2 = LINE[0]\n cv2.line(LINE_IMG, (x1, y1), (x2, y2), (0, 255, 0), 1)\n end_time = time.time()\n\n cv2.imshow(\"og lines\", LINE_IMG)\n cv2.imshow(\"lines\", FILTERED_LINE_IMG)\n cv2.imshow('OG', IMG) # Open the gallery of all my filtered works\n cv2.imshow('Mask', MASK)\n cv2.imshow('blur', BLUR_EDGES)\n cv2.imshow('median', MEDIAN)\n cv2.imshow('med', MED_EDGES)\n cv2.imshow('Mask Edges', MASK_EDGES)\n\n if pointer == POINT_SAMPLES - 1:\n pointer = 0\n else:\n pointer += 1\n\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n end_time = time.time()\n print(end_time - start_time)\n\ncv2.destroyAllWindows()\npipe.stop()\n"
]
| [
[
"numpy.array",
"numpy.arctan",
"numpy.zeros"
]
]
|
RnoB/fly-matrix | [
"50733b1be715fccb386c1a4bb9e57f19d82a0078"
]
| [
"dbGen/zebraDB.py"
]
| [
"import sqlite3\r\nimport itertools\r\nimport numpy as np\r\nfrom random import shuffle\r\n\r\nprojectDB = 'zebraProjects.db'\r\nexpDB = 'zebraExperiments.db'\r\n\r\nproject = 'DecisionGeometry'\r\n\r\nnPosts = 10\r\nnCubes = 3\r\n\r\nposts = range(1,2)\r\nposts = list(itertools.chain.from_iterable(itertools.repeat(x, 10) for x in posts))\r\ndistances = [5.0]\r\nstart_ang_split = 8\r\nangles2 = [np.pi/3, 7*np.pi/18, np.pi]\r\nangles3 = [5*np.pi/18, 5*np.pi/18, 2*np.pi/3]\r\n\r\n# creates empty database\r\ndef FirstGen():\r\n\t# establish a connection to the project database\r\n\tconn = sqlite3.connect(projectDB)\r\n\t# connect a cursor that goes through the project database\r\n\tcursorProject = conn.cursor()\r\n\t# create a table with specified column names and datatypes\r\n\tcursorProject.execute('''CREATE TABLE projects (project text, exp integer,\r\n\t\t\t\t\t\t\t\t\t\treplicate integer,\r\n\t\t\t\t\t\t\t\t\t\ttExp int,tSwitch integer, \r\n\t\t\t\t\t\t\t\t\t\tnSwitch integer,\r\n\t\t\t\t\t\t\t\t\t\tnStimuli integer, \r\n\t\t\t\t\t\t\t\t\t\tpost0 text,post1 text, \r\n\t\t\t\t\t\t\t\t\t\tpost2 text,post3 text, \r\n\t\t\t\t\t\t\t\t\t\tpost4 text,post5 text, \r\n\t\t\t\t\t\t\t\t\t\tpost6 text,post7 text, \r\n\t\t\t\t\t\t\t\t\t\tpost8 text,post9 text,\r\n\t\t\t\t\t\t\t\t\t\tcube0 text,cube1 text,\r\n\t\t\t\t\t\t\t\t\t\tcube2 text)''')\r\n\t# commit and close connection\r\n\tconn.commit()\r\n\tconn.close()\r\n\t\r\n\t# establish a connection to the experiment database\r\n\tconn = sqlite3.connect(expDB)\r\n\t# connectr a cursor that goes through the experiment database\r\n\tcursorExperiment = conn.cursor()\r\n\t# create a table with specified column names and datatypes\r\n\tcursorExperiment.execute('''CREATE TABLE experiments (project text, exp integer,\r\n\t\t\t\t\t\t\t\t\t\treplicate integer,\r\n\t\t\t\t\t\t\t\t\t\tdate text, tStart text, tEnd text, \r\n\t\t\t\t\t\t\t\t\t\tnameExperimenter text,expId text)''')\r\n\t# commit and close connection\r\n\tconn.commit()\r\n\tconn.close()\r\n\r\n\r\n# creates a single post fixation control\r\ndef dataController():\r\n\tdata=[]\r\n\tfor j in range(0,nPosts):\r\n\t\tdataStimuli = 'None'\r\n\t\tdata.append(str(dataStimuli))\r\n\r\n\tcube_id = np.random.randint(0,nCubes)\r\n\tfor j in range(0,nCubes):\r\n\t\tif j == cube_id:\r\n\t\t\tdataStimuli = {'position' : (0.0,0.0)}\r\n\t\telse:\r\n\t\t\tdataStimuli = 'None'\r\n\t\tdata.append(str(dataStimuli))\r\n\r\n\treturn data \r\n\r\n\r\n# define stimuli based on experimental condition\r\n# the expType parameter defines which parameter is randomised for a given fly\r\n# the other parameter is randomised between flies\r\ndef defineStimuli(expType, nSwitch, nReplicates=2, N=2, d=1.0, ang=np.pi/6, picked=[]):\r\n\tdataReplicates = []\r\n\tdataControl = dataController()\r\n\tdata = []\r\n\t\r\n\t# define stimuli nSwitch-2 times since we have two control stimuli - one in the beginning; other in the end\r\n\tfor k in range(0,nSwitch-2):\r\n\t\tdata.append([])\r\n\t\t# pick a random start angle (one of six angles obtained by splitting angle of symmetry for N posts in six parts)\r\n\t\tstart_ang = 2*np.pi*(np.random.randint(start_ang_split)+1) / start_ang_split\r\n\t\t# pick a random angle that will be the angle between successive posts\r\n\t\tang = -1.0\r\n\t\twhile ang in picked or ang < 0.0:\r\n\t\t\tang = np.random.randint(3)\r\n\t\tpicked.append(ang)\r\n\r\n\t\tcolumn_id = np.random.randint(0,4)\r\n\t\tcube_id = np.random.randint(0,nCubes)\r\n\r\n\t\tfor j in range(0,nPosts):\r\n\t\t\tif j == column_id:\r\n\t\t\t\tr = d\r\n\t\t\t\tangle = angles2[ang] if N == 2 else angles3[ang]\r\n\t\t\t\ttheta = start_ang + j*angle\r\n\t\t\t\tx = r*np.cos(theta)\r\n\t\t\t\ty = r*np.sin(theta)\r\n\t\t\t\tdataStimuli = {'position' : (x,y), 'distance' : r, 'angle' : angle}\r\n\t\t\telse:\r\n\t\t\t\tdataStimuli = 'None'\r\n\t\t\tdata[-1].append(str(dataStimuli))\r\n\r\n\t\tfor j in range(0,nCubes):\r\n\t\t\tif j == cube_id:\r\n\t\t\t\tdataStimuli = {'position' : (0.0,0.0)}\r\n\t\t\telse:\r\n\t\t\t\tdataStimuli = 'None'\r\n\t\t\tdata[-1].append(str(dataStimuli))\r\n\r\n\t# permute replicates before adding them to the database\r\n\t# sandwich permutations between controls\r\n\r\n\tfor k in range(0,nReplicates):\r\n\t\tdataReplicates.append([])\r\n\t\tdataReplicates[-1].append(dataControl)\r\n\t\tprint(data)\r\n\t\tshuffle(data)\r\n\t\tfor idx, dataStimulus in enumerate(data):\r\n\t\t\tdataReplicates[-1].append(dataStimulus)\r\n\t\tdataReplicates[-1].append(dataControl)\r\n\r\n\treturn dataReplicates\r\n\r\n\r\n# write defined stimuli to database\r\ndef writeStimuli(cursor,projects,exp,nReplicate,tExp,tSwitch,nSwitch,data):\r\n\r\n\tfor perm in range(0, nReplicate):\r\n\t\tfor k in range(0, nSwitch):\r\n\t\t\tvalues = [projects, exp, perm, tExp, tSwitch, nSwitch, k, str(data[perm][k][0]), str(data[perm][k][1]), str(data[perm][k][2]), str(data[perm][k][3]), str(data[perm][k][4]), str(data[perm][k][5]), str(data[perm][k][6]), str(data[perm][k][7]), str(data[perm][k][8]), str(data[perm][k][9]), str(data[perm][k][10]), str(data[perm][k][11]), str(data[perm][k][12])]\r\n\t\t\tcursor.execute(\"INSERT INTO projects VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)\",values)\r\n\r\n\r\n# fill database created by FirstGen\r\ndef main():\r\n\t# open the database\r\n\tconn = sqlite3.connect(projectDB)\r\n\tcursorProject = conn.cursor()\r\n\r\n\t# check the number of experiments in the project\r\n\tcursorProject.execute(\"Select exp from projects where project = ? \",(project,))\r\n\tfetched = cursorProject.fetchall()\r\n\texpType = np.unique(fetched)\r\n\tprint(expType)\r\n\tprint(len(expType))\r\n\r\n\tif len(expType) == 0:\r\n\t\texp = -1\r\n\telse:\r\n\t\texp = int(np.amax(expType))\r\n\r\n\r\n\t# define expType based on what variable needs randomisation within individual i.e. your experimental parameter\r\n\ttSwitch = 3\r\n\tnSwitch = 5\r\n\ttExp = tSwitch*nSwitch \r\n\tnReplicates = 5\r\n\r\n\tN = 2\r\n\td = 1.0\r\n\tang = np.pi/6\r\n\r\n\tfor N in posts:\r\n\t\tfor d in distances:\r\n\t\t\tpicked_angs = []\r\n\t\t\t# write your new stimuli\r\n\t\t\texp += 1\r\n\t\t\tdata = defineStimuli(expType, nSwitch, nReplicates, N=N, d=d, ang=ang, picked=picked_angs)\r\n\t\t\twriteStimuli(cursorProject, project, exp, nReplicate = nReplicates, tExp = tExp, tSwitch = tSwitch, nSwitch = nSwitch, data=data)\r\n\r\n\r\n\t# commit and close connection\r\n\tconn.commit()\r\n\tconn.close()\r\n\r\nif __name__ == '__main__':\r\n\tmain()"
]
| [
[
"numpy.sin",
"numpy.random.randint",
"numpy.amax",
"numpy.cos",
"numpy.unique"
]
]
|
michael7198/deeplenstronomy_tests | [
"e310684669f403969e169843185255a468c299d9"
]
| [
"exploded_setup_old/test/test_ImSim/test_plot_sims.py"
]
| [
"from deeplenstronomy.ImSim import image_sim\nfrom deeplenstronomy.ImSim import plot_sim\nfrom deeplenstronomy.PopSim.population import Population\nimport pytest\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport numpy.testing as npt\n\n\nclass TestPlotSim(object):\n\n def setup(self):\n pass\n\n def test_image_sim(self):\n pop = Population()\n kwargs_params, kwargs_model = pop.draw_model(with_lens_light=True, with_quasar=True)\n\n kwargs_band = {'read_noise': 10,\n 'pixel_scale': 0.263,\n 'ccd_gain': 4.5,\n 'exposure_time': 90.,\n 'magnitude_zero_point': 30,\n 'num_exposures': 10,\n 'psf_type': 'GAUSSIAN',\n 'seeing': 1.0,\n 'sky_brightness': 21}\n numpix = 10\n # print(kwargs_params['kwargs_lens'])\n image = image_sim.sim_image(numpix, kwargs_band, kwargs_model, kwargs_params, kwargs_numerics={})\n f, ax = plt.subplots(1, 1, figsize=(4, 4))\n plot_sim.plot_single_band(ax, image)\n plt.close()\n\n\nif __name__ == '__main__':\n pytest.main()"
]
| [
[
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots"
]
]
|
tdeboer-ilmn/hail | [
"98fffc9b4e13cd5d5ced8322112894361d0b7052"
]
| [
"hail/python/hail/stats/linear_mixed_model.py"
]
| [
"import numpy as np\nimport pandas as pd\n\nimport hail as hl\nfrom hail.linalg import BlockMatrix\nfrom hail.linalg.utils import _check_dims\nfrom hail.table import Table\nfrom hail.typecheck import typecheck_method, nullable, tupleof, oneof, numeric\nfrom hail.utils.java import Env, info\nfrom hail.utils.misc import plural\n\n\nclass LinearMixedModel(object):\n r\"\"\"Class representing a linear mixed model.\n\n .. include:: ../_templates/experimental.rst\n\n :class:`LinearMixedModel` represents a linear model of the form\n\n .. math::\n\n y \\sim \\mathrm{N}(X \\beta, \\, \\sigma^2 K + \\tau^2 I)\n\n where\n\n - :math:`\\mathrm{N}` is a :math:`n`-dimensional normal distribution.\n - :math:`y` is a known vector of :math:`n` observations.\n - :math:`X` is a known :math:`n \\times p` design matrix for :math:`p` fixed effects.\n - :math:`K` is a known :math:`n \\times n` positive semi-definite kernel.\n - :math:`I` is the :math:`n \\times n` identity matrix.\n - :math:`\\beta` is a :math:`p`-parameter vector of fixed effects.\n - :math:`\\sigma^2` is the variance parameter on :math:`K`.\n - :math:`\\tau^2` is the variance parameter on :math:`I`.\n\n In particular, the residuals for the :math:`i^\\mathit{th}` and :math:`j^\\mathit{th}`\n observations have covariance :math:`\\sigma^2 K_{ij}` for :math:`i \\neq j`.\n\n This model is equivalent to a\n `mixed model <https://en.wikipedia.org/wiki/Mixed_model>`__\n of the form\n\n .. math::\n\n y = X \\beta + Z u + \\epsilon\n\n by setting :math:`K = ZZ^T` where\n\n - :math:`Z` is a known :math:`n \\times r` design matrix for :math:`r` random effects.\n - :math:`u` is a :math:`r`-vector of random effects drawn from :math:`\\mathrm{N}(0, \\sigma^2 I)`.\n - :math:`\\epsilon` is a :math:`n`-vector of random errors drawn from :math:`\\mathrm{N}(0, \\tau^2 I)`.\n\n However, :class:`LinearMixedModel` does not itself realize :math:`K` as a linear kernel\n with respect to random effects, nor does it take :math:`K` explicitly as input. Rather,\n via the eigendecomposion :math:`K = U S U^T`, the the class leverages a third, decorrelated\n form of the model\n\n .. math::\n\n Py \\sim \\mathrm{N}(PX \\beta, \\, \\sigma^2 (\\gamma S + I))\n\n where\n\n - :math:`P = U^T: \\mathbb{R}^n \\rightarrow \\mathbb{R}^n` is an orthonormal transformation\n that decorrelates the observations. The rows of :math:`P` are an eigenbasis for :math:`K`.\n - :math:`S` is the :math:`n \\times n` diagonal matrix of corresponding eigenvalues.\n - :math:`\\gamma = \\frac{\\sigma^2}{\\tau^2}` is the ratio of variance parameters.\n\n Hence, the triple :math:`(Py, PX, S)` determines the probability\n of the observations for any choice of model parameters, and is\n therefore sufficient for inference.\n This triple, with S encoded as a vector, is the default\n (\"full-rank\") initialization of the class.\n\n :class:`LinearMixedModel` also provides an efficient strategy to fit the\n model above with :math:`K` replaced by its rank-:math:`r` approximation\n :math:`K_r = P_r^T S_r P_r` where\n\n - :math:`P_r: \\mathbb{R}^n \\rightarrow \\mathbb{R}^r` has orthonormal rows\n consisting of the top :math:`r` eigenvectors of :math:`K`.\n - :math:`S_r` is the :math:`r \\times r` diagonal matrix of corresponding\n non-zero eigenvalues.\n\n For this low-rank model, the quintuple :math:`(P_r y, P_r X, S_r, y, X)`\n is similarly sufficient for inference and corresponds to the \"low-rank\"\n initialization of the class. Morally, :math:`y` and :math:`X` are\n required for low-rank inference because the diagonal :math:`\\gamma S + I`\n is always full-rank.\n\n If :math:`K` actually has rank :math:`r`, then :math:`K = K_r`\n and the low-rank and full-rank models are equivalent.\n Hence low-rank inference provides a more efficient, equally-exact\n algorithm for fitting the full-rank model.\n This situation arises, for example, when :math:`K` is the linear kernel\n of a mixed model with fewer random effects than observations.\n\n Even when :math:`K` has full rank, using a lower-rank approximation may\n be an effective from of regularization, in addition to boosting\n computational efficiency.\n\n **Initialization**\n\n The class may be initialized directly or with one of two methods:\n\n - :meth:`from_kinship` takes :math:`y`, :math:`X`, and :math:`K` as ndarrays.\n The model is always full-rank.\n\n - :meth:`from_random_effects` takes :math:`y` and :math:`X` as ndarrays and\n :math:`Z` as an ndarray or block matrix. The model is full-rank if and\n only if :math:`n \\leq m`.\n\n Direct full-rank initialization takes :math:`Py`, :math:`PX`, and :math:`S`\n as ndarrays. The following class attributes are set:\n\n .. list-table::\n :header-rows: 1\n\n * - Attribute\n - Type\n - Value\n * - `low_rank`\n - bool\n - ``False``\n * - `n`\n - int\n - Number of observations :math:`n`\n * - `f`\n - int\n - Number of fixed effects :math:`p`\n * - `r`\n - int\n - Effective number of random effects, must equal :math:`n`\n * - `py`\n - ndarray\n - Rotated response vector :math:`P y` with shape :math:`(n)`\n * - `px`\n - ndarray\n - Rotated design matrix :math:`P X` with shape :math:`(n, p)`\n * - `s`\n - ndarray\n - Eigenvalues vector :math:`S` of :math:`K` with shape :math:`(n)`\n * - `p_path`\n - str\n - Path at which :math:`P` is stored as a block matrix\n\n Direct low-rank initialization takes :math:`P_r y`, :math:`P_r X`, :math:`S_r`,\n :math:`y`, and :math:`X` as ndarrays. The following class attributes are set:\n\n .. list-table::\n :header-rows: 1\n\n * - Attribute\n - Type\n - Value\n * - `low_rank`\n - bool\n - ``True``\n * - `n`\n - int\n - Number of observations :math:`n`\n * - `f`\n - int\n - Number of fixed effects :math:`p`\n * - `r`\n - int\n - Effective number of random effects, must be less than :math:`n`\n * - `py`\n - ndarray\n - Projected response vector :math:`P_r y` with shape :math:`(r)`\n * - `px`\n - ndarray\n - Projected design matrix :math:`P_r X` with shape :math:`(r, p)`\n * - `s`\n - ndarray\n - Eigenvalues vector :math:`S_r` of :math:`K_r` with shape :math:`(r)`\n * - `y`\n - ndarray\n - Response vector with shape :math:`(n)`\n * - `x`\n - ndarray\n - Design matrix with shape :math:`(n, p)`\n * - `p_path`\n - str\n - Path at which :math:`P` is stored as a block matrix\n\n **Fitting the model**\n\n :meth:`fit` uses `restricted maximum likelihood\n <https://en.wikipedia.org/wiki/Restricted_maximum_likelihood>`__ (REML)\n to estimate :math:`(\\beta, \\sigma^2, \\tau^2)`.\n\n This is done by numerical optimization of the univariate function\n :meth:`compute_neg_log_reml`, which itself optimizes REML constrained to a\n fixed ratio of variance parameters. Each evaluation of\n :meth:`compute_neg_log_reml` has computational complexity\n\n .. math::\n\n \\mathit{O}(rp^2 + p^3).\n\n :meth:`fit` adds the following attributes at this estimate.\n\n .. list-table::\n :header-rows: 1\n\n * - Attribute\n - Type\n - Value\n * - `beta`\n - ndarray\n - :math:`\\beta`\n * - `sigma_sq`\n - float\n - :math:`\\sigma^2`\n * - `tau_sq`\n - float\n - :math:`\\tau^2`\n * - `gamma`\n - float\n - :math:`\\gamma = \\frac{\\sigma^2}{\\tau^2}`\n * - `log_gamma`\n - float\n - :math:`\\log{\\gamma}`\n * - `h_sq`\n - float\n - :math:`\\mathit{h}^2 = \\frac{\\sigma^2}{\\sigma^2 + \\tau^2}`\n * - `h_sq_standard_error`\n - float\n - asymptotic estimate of :math:`\\mathit{h}^2` standard error\n\n **Testing alternative models**\n\n The model is also equivalent to its augmentation\n\n .. math::\n\n y \\sim \\mathrm{N}\\left(x_\\star\\beta_\\star + X \\beta, \\, \\sigma^2 K + \\tau^2 I\\right)\n\n by an additional covariate of interest :math:`x_\\star` under the\n null hypothesis that the corresponding fixed effect parameter\n :math:`\\beta_\\star` is zero. Similarly to initialization, full-rank testing\n of the alternative hypothesis :math:`\\beta_\\star \\neq 0` requires\n :math:`P x_\\star`, whereas the low-rank testing requires :math:`P_r x_\\star`\n and :math:`x_\\star`.\n\n After running :meth:`fit` to fit the null model, one can test each of a\n collection of alternatives using either of two implementations of the\n likelihood ratio test:\n\n - :meth:`fit_alternatives_numpy` takes one or two ndarrays. It is a pure Python\n method that evaluates alternatives serially on leader (master).\n\n - :meth:`fit_alternatives` takes one or two paths to block matrices. It\n evaluates alternatives in parallel on the workers.\n\n Per alternative, both have computational complexity\n\n .. math::\n\n \\mathit{O}(rp + p^3).\n\n Parameters\n ----------\n py: :class:`numpy.ndarray`\n Projected response vector :math:`P_r y` with shape :math:`(r)`.\n px: :class:`numpy.ndarray`\n Projected design matrix :math:`P_r X` with shape :math:`(r, p)`.\n s: :class:`numpy.ndarray`\n Eigenvalues vector :math:`S` with shape :math:`(r)`.\n y: :class:`numpy.ndarray`, optional\n Response vector with shape :math:`(n)`.\n Include for low-rank inference.\n x: :class:`numpy.ndarray`, optional\n Design matrix with shape :math:`(n, p)`.\n Include for low-rank inference.\n p_path: :class:`str`, optional\n Path at which :math:`P` has been stored as a block matrix.\n \"\"\"\n @typecheck_method(py=np.ndarray,\n px=np.ndarray,\n s=np.ndarray,\n y=nullable(np.ndarray),\n x=nullable(np.ndarray),\n p_path=nullable(str))\n def __init__(self, py, px, s, y=None, x=None, p_path=None):\n if y is None and x is None:\n low_rank = False\n elif y is not None and x is not None:\n low_rank = True\n else:\n raise ValueError('for low-rank, set both y and x; for full-rank, do not set y or x.')\n\n _check_dims(py, 'py', 1)\n _check_dims(px, 'px', 2)\n _check_dims(s, 's', 1)\n\n r = s.size\n f = px.shape[1]\n\n if py.size != r:\n raise ValueError(\"py and s must have the same size\")\n if px.shape[0] != r:\n raise ValueError(\"px must have the same number of rows as the size of s\")\n if low_rank:\n _check_dims(y, 'y', 1)\n _check_dims(x, 'x', 2)\n n = y.size\n if n <= r:\n raise ValueError(\"size of y must be larger than the size of s\")\n if x.shape[0] != n:\n raise ValueError(\"x must have the same number of rows as the size of y\")\n if x.shape[1] != f:\n raise ValueError(\"px and x must have the same number columns\")\n else:\n n = r\n\n if p_path is not None:\n n_rows, n_cols = BlockMatrix.read(p_path).shape\n if n_cols != n:\n raise ValueError(\"LinearMixedModel: Number of columns in the block \"\n f\"matrix at 'p_path' ({n_cols}) must equal \"\n f\"the size of 'y' ({n})\")\n if n_rows != r:\n raise ValueError(\"LinearMixedModel: Number of rows in the block \"\n f\"matrix at 'p_path' ({n_rows}) must equal \"\n f\"the size of 'py' ({r})\")\n\n self.low_rank = low_rank\n self.n = n\n self.f = f\n self.r = r\n self.py = py\n self.px = px\n self.s = s\n self.y = y\n self.x = x\n self.p_path = p_path\n\n self._check_dof()\n\n self.beta = None\n self.sigma_sq = None\n self.tau_sq = None\n self.gamma = None\n self.log_gamma = None\n self.h_sq = None\n self.h_sq_standard_error = None\n self.optimize_result = None\n\n self._fitted = False\n\n if low_rank:\n self._yty = y @ y\n self._xty = x.T @ y\n self._xtx = x.T @ x\n\n self._dof = n - f\n self._d = None\n self._ydy = None\n self._xdy = None\n self._xdx = None\n\n self._dof_alt = n - (f + 1)\n self._d_alt = None\n self._ydy_alt = None\n self._xdy_alt = np.zeros(f + 1)\n self._xdx_alt = np.zeros((f + 1, f + 1))\n\n self._residual_sq = None\n\n self._scala_model = None\n\n def _reset(self):\n self._fitted = False\n\n self.beta = None\n self.sigma_sq = None\n self.tau_sq = None\n self.gamma = None\n self.log_gamma = None\n self.h_sq = None\n self.h_sq_standard_error = None\n self.optimize_result = None\n\n def compute_neg_log_reml(self, log_gamma, return_parameters=False):\n r\"\"\"Compute negative log REML constrained to a fixed value\n of :math:`\\log{\\gamma}`.\n\n This function computes the triple :math:`(\\beta, \\sigma^2, \\tau^2)` with\n :math:`\\gamma = \\frac{\\sigma^2}{\\tau^2}` at which the restricted\n likelihood is maximized and returns the negative of the restricted log\n likelihood at these parameters (shifted by the constant defined below).\n\n The implementation has complexity :math:`\\mathit{O}(rp^2 + p^3)` and is\n inspired by `FaST linear mixed models for genome-wide association studies (2011)\n <https://www.nature.com/articles/nmeth.1681>`__.\n\n The formulae follow from `Bayesian Inference for Variance Components Using Only Error Contrasts (1974)\n <http://faculty.dbmi.pitt.edu/day/Bioinf2132-advanced-Bayes-and-R/previousDocuments/Bioinf2132-documents-2016/2016-11-22/Harville-1974.pdf>`__.\n Harville derives that for fixed covariance :math:`V`, the restricted\n likelihood of the variance parameter :math:`V` in the model\n\n .. math::\n\n y \\sim \\mathrm{N}(X \\beta, \\, V)\n\n is given by\n\n .. math::\n\n (2\\pi)^{-\\frac{1}{2}(n - p)}\n \\det(X^T X)^\\frac{1}{2}\n \\det(V)^{-\\frac{1}{2}}\n \\det(X^T V^{-1} X)^{-\\frac{1}{2}}\n e^{-\\frac{1}{2}(y - X\\hat\\beta)^T V^{-1}(y - X\\hat\\beta)}.\n\n with\n\n .. math::\n\n \\hat\\beta = (X^T V^{-1} X)^{-1} X^T V^{-1} y.\n\n In our case, the variance is\n\n .. math::\n\n V = \\sigma^2 K + \\tau^2 I = \\sigma^2 (K + \\gamma^{-1} I)\n\n which is determined up to scale by any fixed value of the ratio\n :math:`\\gamma`. So for input :math:`\\log \\gamma`, the\n negative restricted log likelihood is minimized at\n :math:`(\\hat\\beta, \\hat\\sigma^2)` with :math:`\\hat\\beta` as above and\n\n .. math::\n\n \\hat\\sigma^2 = \\frac{1}{n - p}(y - X\\hat\\beta)^T (K + \\gamma^{-1} I)^{-1}(y - X\\hat\\beta).\n\n For :math:`\\hat V` at this :math:`(\\hat\\beta, \\hat\\sigma^2, \\gamma)`,\n the exponent in the likelihood reduces to :math:`-\\frac{1}{2}(n-p)`, so\n the negative restricted log likelihood may be expressed as\n\n .. math::\n\n \\frac{1}{2}\\left(\\log \\det(\\hat V) + \\log\\det(X^T \\hat V^{-1} X)\\right) + C\n\n where\n\n .. math::\n\n C = \\frac{1}{2}\\left(n - p + (n - p)\\log(2\\pi) - \\log\\det(X^T X)\\right)\n\n only depends on :math:`X`. :meth:`compute_neg_log_reml` returns the value of\n the first term, omitting the constant term.\n\n Parameters\n ----------\n log_gamma: :obj:`float`\n Value of :math:`\\log{\\gamma}`.\n return_parameters:\n If ``True``, also return :math:`\\beta`, :math:`\\sigma^2`,\n and :math:`\\tau^2`.\n\n Returns\n -------\n :obj:`float` or (:obj:`float`, :class:`numpy.ndarray`, :obj:`float`, :obj:`float`)\n If `return_parameters` is ``False``, returns (shifted) negative log REML.\n Otherwise, returns (shifted) negative log REML, :math:`\\beta`, :math:`\\sigma^2`,\n and :math:`\\tau^2`.\n \"\"\"\n from scipy.linalg import solve, LinAlgError\n\n gamma = np.exp(log_gamma)\n d = 1 / (self.s + 1 / gamma)\n logdet_d = np.sum(np.log(d)) + (self.n - self.r) * log_gamma\n\n if self.low_rank:\n d -= gamma\n dpy = d * self.py\n ydy = self.py @ dpy + gamma * self._yty\n xdy = self.px.T @ dpy + gamma * self._xty\n xdx = (self.px.T * d) @ self.px + gamma * self._xtx\n else:\n dpy = d * self.py\n ydy = self.py @ dpy\n xdy = self.px.T @ dpy\n xdx = (self.px.T * d) @ self.px\n\n try:\n beta = solve(xdx, xdy, assume_a='pos')\n residual_sq = ydy - xdy.T @ beta\n sigma_sq = residual_sq / self._dof\n tau_sq = sigma_sq / gamma\n neg_log_reml = (np.linalg.slogdet(xdx)[1] - logdet_d + self._dof * np.log(sigma_sq)) / 2\n\n self._d, self._ydy, self._xdy, self._xdx = d, ydy, xdy, xdx # used in fit\n\n if return_parameters:\n return neg_log_reml, beta, sigma_sq, tau_sq\n else:\n return neg_log_reml\n except LinAlgError as e:\n raise Exception('linear algebra error while solving for REML estimate') from e\n\n @typecheck_method(log_gamma=nullable(numeric), bounds=tupleof(numeric), tol=float, maxiter=int)\n def fit(self, log_gamma=None, bounds=(-8.0, 8.0), tol=1e-8, maxiter=500):\n r\"\"\"Find the triple :math:`(\\beta, \\sigma^2, \\tau^2)` maximizing REML.\n\n This method sets the attributes `beta`, `sigma_sq`, `tau_sq`, `gamma`,\n `log_gamma`, `h_sq`, and `h_sq_standard_error` as described in the\n top-level class documentation.\n\n If `log_gamma` is provided, :meth:`fit` finds the REML solution\n with :math:`\\log{\\gamma}` constrained to this value. In this case,\n `h_sq_standard_error` is ``None`` since `h_sq` is not estimated.\n\n Otherwise, :meth:`fit` searches for the value of :math:`\\log{\\gamma}`\n that minimizes :meth:`compute_neg_log_reml`, and also sets the attribute\n `optimize_result` of type `scipy.optimize.OptimizeResult\n <https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.html>`__.\n\n Parameters\n ----------\n log_gamma: :obj:`float`, optional\n If provided, the solution is constrained to have this value of\n :math:`\\log{\\gamma}`.\n bounds: :obj:`float`, :obj:`float`\n Lower and upper bounds for :math:`\\log{\\gamma}`.\n tol: :obj:`float`\n Absolute tolerance for optimizing :math:`\\log{\\gamma}`.\n maxiter: :obj:`float`\n Maximum number of iterations for optimizing :math:`\\log{\\gamma}`.\n \"\"\"\n if self._fitted:\n self._reset()\n\n fit_log_gamma = True if log_gamma is None else False\n\n if fit_log_gamma:\n from scipy.optimize import minimize_scalar\n\n self.optimize_result = minimize_scalar(\n self.compute_neg_log_reml,\n method='bounded',\n bounds=bounds,\n options={'xatol': tol, 'maxiter': maxiter})\n\n if self.optimize_result.success:\n if self.optimize_result.x - bounds[0] < 0.001:\n raise Exception(\"failed to fit log_gamma: optimum within 0.001 of lower bound.\")\n elif bounds[1] - self.optimize_result.x < 0.001:\n raise Exception(\"failed to fit log_gamma: optimum within 0.001 of upper bound.\")\n else:\n self.log_gamma = self.optimize_result.x\n else:\n raise Exception(f'failed to fit log_gamma:\\n {self.optimize_result}')\n else:\n self.log_gamma = log_gamma\n\n _, self.beta, self.sigma_sq, self.tau_sq = self.compute_neg_log_reml(self.log_gamma, return_parameters=True)\n\n self.gamma = np.exp(self.log_gamma)\n self.h_sq = self.sigma_sq / (self.sigma_sq + self.tau_sq)\n\n self._residual_sq = self.sigma_sq * self._dof\n self._d_alt = self._d\n self._ydy_alt = self._ydy\n self._xdy_alt[1:] = self._xdy\n self._xdx_alt[1:, 1:] = self._xdx\n\n if fit_log_gamma:\n self.h_sq_standard_error = self._estimate_h_sq_standard_error()\n\n self._fitted = True\n\n def _estimate_h_sq_standard_error(self):\n epsilon = 1e-4 # parabolic interpolation radius in log_gamma space\n lg = self.log_gamma + np.array([-epsilon, 0.0, epsilon])\n h2 = 1 / (1 + np.exp(-lg))\n nll = [self.compute_neg_log_reml(lgi) for lgi in lg]\n\n if nll[1] > nll[0] or nll[1] > nll[2]:\n i = 0 if nll[1] > nll[0] else 2\n raise Exception(f'Minimum of negative log likelihood fit as {nll[1]} at log_gamma={lg[1]},'\n f'\\n but found smaller value of {nll[i]} at log_gamma={lg[i]}.'\n f'\\n Investigate by plotting the negative log likelihood function.')\n\n # Asymptotically near MLE, nLL = a * h2^2 + b * h2 + c with a = 1 / (2 * se^2)\n # By Lagrange interpolation:\n a = ((h2[2] * (nll[1] - nll[0]) + h2[1] * (nll[0] - nll[2]) + h2[0] * (nll[2] - nll[1]))\n / ((h2[1] - h2[0]) * (h2[0] - h2[2]) * (h2[2] - h2[1])))\n\n return 1 / np.sqrt(2 * a)\n\n def h_sq_normalized_lkhd(self):\n r\"\"\"Estimate the normalized likelihood of :math:`\\mathit{h}^2` over the\n discrete grid of percentiles.\n\n Examples\n --------\n Plot the estimated normalized likelihood function:\n\n >>> import matplotlib.pyplot as plt # doctest: +SKIP\n >>> plt.plot(range(101), model.h_sq_normalized_lkhd()) # doctest: +SKIP\n\n Notes\n -----\n This method may be used to visualize the approximate posterior on\n :math:`\\mathit{h}^2` under a flat prior.\n\n The resulting ndarray ``a`` has length 101 with ``a[i]`` equal to the\n maximum likelihood over all :math:`\\beta` and :math:`\\sigma^2` with\n :math:`\\mathit{h}^2` constrained to ``i / 100``. The values for\n ``1 <= i <= 99`` are normalized to sum to 1, and ``a[0]`` and ``a[100]``\n are set to ``nan``.\n\n Returns\n -------\n :class:`numpy.ndarray` of :obj:`float`\n Normalized likelihood values for :math:`\\mathit{h}^2`.\n \"\"\"\n log_lkhd = np.zeros(101, dtype=np.float64)\n log_lkhd[0], log_lkhd[100] = np.nan, np.nan\n\n for h2 in range(1, 100):\n gamma = h2 / (100.0 - h2)\n log_lkhd[h2] = -self.compute_neg_log_reml(np.log(gamma))\n\n log_lkhd -= np.max(log_lkhd[1:-1])\n lkhd = np.exp(log_lkhd)\n lkhd /= np.sum(lkhd[1:-1])\n return lkhd\n\n @typecheck_method(pa_t_path=str,\n a_t_path=nullable(str),\n partition_size=nullable(int))\n def fit_alternatives(self, pa_t_path, a_t_path=None, partition_size=None):\n r\"\"\"Fit and test alternative model for each augmented design matrix in parallel.\n\n Notes\n -----\n The alternative model is fit using REML constrained to the value of\n :math:`\\gamma` set by :meth:`fit`.\n\n The likelihood ratio test of fixed effect parameter :math:`\\beta_\\star`\n uses (non-restricted) maximum likelihood:\n\n .. math::\n\n \\chi^2 = 2 \\log\\left(\\frac{\n \\max_{\\beta_\\star, \\beta, \\sigma^2}\\mathrm{N}\n (y \\, | \\, x_\\star \\beta_\\star + X \\beta; \\sigma^2(K + \\gamma^{-1}I)}\n {\\max_{\\beta, \\sigma^2} \\mathrm{N}\n (y \\, | \\, x_\\star \\cdot 0 + X \\beta; \\sigma^2(K + \\gamma^{-1}I)}\n \\right)\n\n The p-value is given by the tail probability under a chi-squared\n distribution with one degree of freedom.\n\n The resulting table has the following fields:\n\n .. list-table::\n :header-rows: 1\n\n * - Field\n - Type\n - Value\n * - `idx`\n - int64\n - Index of augmented design matrix.\n * - `beta`\n - float64\n - :math:`\\beta_\\star`\n * - `sigma_sq`\n - float64\n - :math:`\\sigma^2`\n * - `chi_sq`\n - float64\n - :math:`\\chi^2`\n * - `p_value`\n - float64\n - p-value\n\n :math:`(P_r A)^T` and :math:`A^T` (if given) must have the same number\n of rows (augmentations). These rows are grouped into partitions for\n parallel processing. The number of partitions equals the ceiling of\n ``n_rows / partition_size``, and should be at least the number or cores\n to make use of all cores. By default, there is one partition per row of\n blocks in :math:`(P_r A)^T`. Setting the partition size to an exact\n (rather than approximate) divisor or multiple of the block size reduces\n superfluous shuffling of data.\n\n The number of columns in each block matrix must be less than :math:`2^{31}`.\n\n Warning\n -------\n The block matrices must be stored in row-major format, as results\n from :meth:`.BlockMatrix.write` with ``force_row_major=True`` and from\n :meth:`.BlockMatrix.write_from_entry_expr`. Otherwise, this method\n will produce an error message.\n\n Parameters\n ----------\n pa_t_path: :class:`str`\n Path to block matrix :math:`(P_r A)^T` with shape :math:`(m, r)`.\n Each row is a projected augmentation :math:`P_r x_\\star` of :math:`P_r X`.\n a_t_path: :class:`str`, optional\n Path to block matrix :math:`A^T` with shape :math:`(m, n)`.\n Each row is an augmentation :math:`x_\\star` of :math:`X`.\n Include for low-rank inference.\n partition_size: :obj:`int`, optional\n Number of rows to process per partition.\n Default given by block size of :math:`(P_r A)^T`.\n\n Returns\n -------\n :class:`.Table`\n Table of results for each augmented design matrix.\n \"\"\"\n from hail.table import Table\n\n self._check_dof(self.f + 1)\n\n if self.low_rank and a_t_path is None:\n raise ValueError('model is low-rank so a_t is required.')\n elif not (self.low_rank or a_t_path is None):\n raise ValueError('model is full-rank so a_t must not be set.')\n\n if self._scala_model is None:\n self._set_scala_model()\n\n backend = Env.spark_backend('LinearMixedModel.fit_alternatives')\n jfs = backend.fs._jfs\n\n if partition_size is None:\n block_size = Env.hail().linalg.BlockMatrix.readMetadata(jfs, pa_t_path).blockSize()\n partition_size = block_size\n elif partition_size <= 0:\n raise ValueError(f'partition_size must be positive, found {partition_size}')\n\n jpa_t = Env.hail().linalg.RowMatrix.readBlockMatrix(jfs, pa_t_path, partition_size)\n\n if a_t_path is None:\n maybe_ja_t = None\n else:\n maybe_ja_t = Env.hail().linalg.RowMatrix.readBlockMatrix(jfs, a_t_path, partition_size)\n\n return Table._from_java(backend._jbackend.pyFitLinearMixedModel(\n self._scala_model, jpa_t, maybe_ja_t))\n\n @typecheck_method(pa=np.ndarray, a=nullable(np.ndarray), return_pandas=bool)\n def fit_alternatives_numpy(self, pa, a=None, return_pandas=False):\n r\"\"\"Fit and test alternative model for each augmented design matrix.\n\n Notes\n -----\n This Python-only implementation runs serially on leader (master). See\n the scalable implementation :meth:`fit_alternatives` for documentation\n of the returned table.\n\n Parameters\n ----------\n pa: :class:`numpy.ndarray`\n Projected matrix :math:`P_r A` of alternatives with shape :math:`(r, m)`.\n Each column is a projected augmentation :math:`P_r x_\\star` of :math:`P_r X`.\n a: :class:`numpy.ndarray`, optional\n Matrix :math:`A` of alternatives with shape :math:`(n, m)`.\n Each column is an augmentation :math:`x_\\star` of :math:`X`.\n Required for low-rank inference.\n return_pandas: :obj:`bool`\n If true, return pandas dataframe. If false, return Hail table.\n\n Returns\n -------\n :class:`.Table` or :class:`.pandas.DataFrame`\n Table of results for each augmented design matrix.\n \"\"\"\n self._check_dof(self.f + 1)\n\n if not self._fitted:\n raise Exception(\"null model is not fit. Run 'fit' first.\")\n\n n_cols = pa.shape[1]\n assert pa.shape[0] == self.r\n\n if self.low_rank:\n assert a.shape[0] == self.n and a.shape[1] == n_cols\n data = [(i,) + self._fit_alternative_numpy(pa[:, i], a[:, i]) for i in range(n_cols)]\n else:\n data = [(i,) + self._fit_alternative_numpy(pa[:, i], None) for i in range(n_cols)]\n\n df = pd.DataFrame.from_records(data, columns=['idx', 'beta', 'sigma_sq', 'chi_sq', 'p_value'])\n\n if return_pandas:\n return df\n else:\n return Table.from_pandas(df, key='idx')\n\n def _fit_alternative_numpy(self, pa, a):\n from scipy.linalg import solve, LinAlgError\n from scipy.stats.distributions import chi2\n\n gamma = self.gamma\n dpa = self._d_alt * pa\n\n # single thread => no need to copy\n ydy = self._ydy_alt\n xdy = self._xdy_alt\n xdx = self._xdx_alt\n\n if self.low_rank:\n xdy[0] = self.py @ dpa + gamma * (self.y @ a)\n xdx[0, 0] = pa @ dpa + gamma * (a @ a)\n xdx[0, 1:] = self.px.T @ dpa + gamma * (self.x.T @ a)\n else:\n xdy[0] = self.py @ dpa\n xdx[0, 0] = pa @ dpa\n xdx[0, 1:] = self.px.T @ dpa\n\n try:\n beta = solve(xdx, xdy, assume_a='pos') # only uses upper triangle\n residual_sq = ydy - xdy.T @ beta\n sigma_sq = residual_sq / self._dof_alt\n chi_sq = self.n * np.log(self._residual_sq / residual_sq) # division => precision\n p_value = chi2.sf(chi_sq, 1)\n\n return beta[0], sigma_sq, chi_sq, p_value\n except LinAlgError:\n return tuple(4 * [float('nan')])\n\n def _set_scala_model(self):\n from hail.utils.java import Env\n from hail.linalg import _jarray_from_ndarray, _breeze_from_ndarray\n\n if not self._fitted:\n raise Exception(\"null model is not fit. Run 'fit' first.\")\n\n self._scala_model = Env.hail().stats.LinearMixedModel.pyApply(\n self.gamma,\n self._residual_sq,\n _jarray_from_ndarray(self.py),\n _breeze_from_ndarray(self.px),\n _jarray_from_ndarray(self._d_alt),\n self._ydy_alt,\n _jarray_from_ndarray(self._xdy_alt),\n _breeze_from_ndarray(self._xdx_alt),\n _jarray_from_ndarray(self.y) if self.low_rank else None,\n _breeze_from_ndarray(self.x) if self.low_rank else None\n )\n\n def _check_dof(self, f=None):\n if f is None:\n f = self.f\n dof = self.n - f\n if dof <= 0:\n raise ValueError(f\"{self.n} {plural('observation', self.n)} with {f} fixed {plural('effect', f)} \"\n f\"implies {dof} {plural('degree', dof)} of freedom. Must be positive.\")\n\n @classmethod\n @typecheck_method(y=np.ndarray,\n x=np.ndarray,\n k=np.ndarray,\n p_path=nullable(str),\n overwrite=bool)\n def from_kinship(cls, y, x, k, p_path=None, overwrite=False):\n r\"\"\"Initializes a model from :math:`y`, :math:`X`, and :math:`K`.\n\n Examples\n --------\n >>> from hail.stats import LinearMixedModel\n >>> y = np.array([0.0, 1.0, 8.0, 9.0])\n >>> x = np.array([[1.0, 0.0],\n ... [1.0, 2.0],\n ... [1.0, 1.0],\n ... [1.0, 4.0]])\n >>> k = np.array([[ 1. , -0.8727875 , 0.96397335, 0.94512946],\n ... [-0.8727875 , 1. , -0.93036112, -0.97320323],\n ... [ 0.96397335, -0.93036112, 1. , 0.98294169],\n ... [ 0.94512946, -0.97320323, 0.98294169, 1. ]])\n >>> model, p = LinearMixedModel.from_kinship(y, x, k)\n >>> model.fit()\n >>> model.h_sq # doctest: +SKIP_OUTPUT_CHECK\n 0.2525148830695317\n\n >>> model.s # doctest: +SKIP_OUTPUT_CHECK\n array([3.83501295, 0.13540343, 0.02454114, 0.00504248])\n\n Truncate to a rank :math:`r=2` model:\n\n >>> r = 2\n >>> s_r = model.s[:r]\n >>> p_r = p[:r, :]\n >>> model_r = LinearMixedModel(p_r @ y, p_r @ x, s_r, y, x)\n >>> model.fit()\n >>> model.h_sq # doctest: +SKIP_OUTPUT_CHECK\n 0.25193197591429695\n\n Notes\n -----\n This method eigendecomposes :math:`K = P^T S P` on the leader (master)\n and returns ``LinearMixedModel(p @ y, p @ x, s)`` and ``p``.\n\n The performance of eigendecomposition depends critically on the number\n of leader (master) cores and the NumPy / SciPy configuration, viewable\n with ``np.show_config()``. For Intel machines, we recommend installing\n the `MKL <https://anaconda.org/anaconda/mkl>`__ package for Anaconda.\n\n `k` must be positive semi-definite; symmetry is not checked as only the\n lower triangle is used.\n\n Parameters\n ----------\n y: :class:`numpy.ndarray`\n :math:`n` vector of observations.\n x: :class:`numpy.ndarray`\n :math:`n \\times p` matrix of fixed effects.\n k: :class:`numpy.ndarray`\n :math:`n \\times n` positive semi-definite kernel :math:`K`.\n p_path: :class:`str`, optional\n Path at which to write :math:`P` as a block matrix.\n overwrite: :obj:`bool`\n If ``True``, overwrite an existing file at `p_path`.\n\n Returns\n -------\n model: :class:`LinearMixedModel`\n Model constructed from :math:`y`, :math:`X`, and :math:`K`.\n p: :class:`numpy.ndarray`\n Matrix :math:`P` whose rows are the eigenvectors of :math:`K`.\n \"\"\"\n _check_dims(y, \"y\", 1)\n _check_dims(x, \"x\", 2)\n _check_dims(k, \"k\", 2)\n\n n = k.shape[0]\n if k.shape[1] != n:\n raise ValueError(\"from_kinship: 'k' must be a square matrix\")\n if y.shape[0] != n:\n raise ValueError(\"from_kinship: 'y' and 'k' must have the same \"\n \"number of rows\")\n if x.shape[0] != n:\n raise ValueError(\"from_kinship: 'x' and 'k' must have the same \"\n \"number of rows\")\n\n s, u = hl.linalg._eigh(k)\n if s[0] < -1e12 * s[-1]:\n raise Exception(\"from_kinship: smallest eigenvalue of 'k' is\"\n f\"negative: {s[0]}\")\n\n # flip singular values to descending order\n s = np.flip(s, axis=0)\n u = np.fliplr(u)\n p = u.T\n if p_path:\n BlockMatrix.from_numpy(p).write(p_path, overwrite=overwrite)\n\n model = LinearMixedModel(p @ y, p @ x, s, p_path=p_path)\n return model, p\n\n @classmethod\n @typecheck_method(y=np.ndarray,\n x=np.ndarray,\n z=oneof(np.ndarray, hl.linalg.BlockMatrix),\n p_path=nullable(str),\n overwrite=bool,\n max_condition_number=float,\n complexity_bound=int)\n def from_random_effects(cls, y, x, z,\n p_path=None,\n overwrite=False,\n max_condition_number=1e-10,\n complexity_bound=8192):\n r\"\"\"Initializes a model from :math:`y`, :math:`X`, and :math:`Z`.\n\n Examples\n --------\n >>> from hail.stats import LinearMixedModel\n >>> y = np.array([0.0, 1.0, 8.0, 9.0])\n >>> x = np.array([[1.0, 0.0],\n ... [1.0, 2.0],\n ... [1.0, 1.0],\n ... [1.0, 4.0]])\n >>> z = np.array([[0.0, 0.0, 1.0],\n ... [0.0, 1.0, 2.0],\n ... [1.0, 2.0, 4.0],\n ... [2.0, 4.0, 8.0]])\n >>> model, p = LinearMixedModel.from_random_effects(y, x, z)\n >>> model.fit()\n >>> model.h_sq # doctest: +SKIP_OUTPUT_CHECK\n 0.38205307244271675\n\n Notes\n -----\n If :math:`n \\leq m`, the returned model is full rank.\n\n If :math:`n > m`, the returned model is low rank. In this case only,\n eigenvalues less than or equal to `max_condition_number` times the top\n eigenvalue are dropped from :math:`S`, with the corresponding\n eigenvectors dropped from :math:`P`. This guards against precision\n loss on left eigenvectors computed via the right gramian :math:`Z^T Z`\n in :meth:`.BlockMatrix.svd`.\n\n In either case, one can truncate to a rank :math:`r` model as follows.\n If `p` is an ndarray:\n\n >>> p_r = p[:r, :] # doctest: +SKIP\n >>> s_r = model.s[:r] # doctest: +SKIP\n >>> model_r = LinearMixedModel(p_r @ y, p_r @ x, s_r, y, x) # doctest: +SKIP\n\n If `p` is a block matrix:\n\n >>> p[:r, :].write(p_r_path) # doctest: +SKIP\n >>> p_r = BlockMatrix.read(p_r_path) # doctest: +SKIP\n >>> s_r = model.s[:r] # doctest: +SKIP\n >>> model_r = LinearMixedModel(p_r @ y, p_r @ x, s_r, y, x, p_r_path) # doctest: +SKIP\n\n This method applies no standardization to `z`.\n\n Warning\n -------\n If `z` is a block matrix, then ideally `z` should be the result of\n directly reading from disk (and possibly a transpose). This is most\n critical if :math:`n > m`, because in this case multiplication by `z`\n will result in all preceding transformations being repeated\n ``n / block_size`` times, as explained in :class:`.BlockMatrix`.\n\n At least one dimension must be less than or equal to 46300.\n See the warning in :meth:`.BlockMatrix.svd` for performance\n considerations.\n\n Parameters\n ----------\n y: :class:`numpy.ndarray`\n :math:`n` vector of observations :math:`y`.\n x: :class:`numpy.ndarray`\n :math:`n \\times p` matrix of fixed effects :math:`X`.\n z: :class:`numpy.ndarray` or :class:`.BlockMatrix`\n :math:`n \\times m` matrix of random effects :math:`Z`.\n p_path: :class:`str`, optional\n Path at which to write :math:`P` as a block matrix.\n Required if `z` is a block matrix.\n overwrite: :obj:`bool`\n If ``True``, overwrite an existing file at `p_path`.\n max_condition_number: :obj:`float`\n Maximum condition number. Must be greater than 1e-16.\n complexity_bound: :obj:`int`\n Complexity bound for :meth:`.BlockMatrix.svd` when `z` is a block\n matrix.\n\n Returns\n -------\n model: :class:`LinearMixedModel`\n Model constructed from :math:`y`, :math:`X`, and :math:`Z`.\n p: :class:`numpy.ndarray` or :class:`.BlockMatrix`\n Matrix :math:`P` whose rows are the eigenvectors of :math:`K`.\n The type is block matrix if `z` is a block matrix and\n :meth:`.BlockMatrix.svd` of `z` returns :math:`U` as a block matrix.\n \"\"\"\n z_is_bm = isinstance(z, BlockMatrix)\n\n if z_is_bm and p_path is None:\n raise ValueError(\"from_random_effects: 'p_path' required when 'z'\"\n \"is a block matrix.\")\n\n if max_condition_number < 1e-16:\n raise ValueError(\"from_random_effects: 'max_condition_number' must \"\n f\"be at least 1e-16, found {max_condition_number}\")\n\n _check_dims(y, \"y\", 1)\n _check_dims(x, \"x\", 2)\n _check_dims(z, \"z\", 2)\n\n n, m = z.shape\n\n if y.shape[0] != n:\n raise ValueError(\"from_random_effects: 'y' and 'z' must have the \"\n \"same number of rows\")\n if x.shape[0] != n:\n raise ValueError(\"from_random_effects: 'x' and 'z' must have the \"\n \"same number of rows\")\n\n if z_is_bm:\n u, s0, _ = z.svd(complexity_bound=complexity_bound)\n p = u.T\n p_is_bm = isinstance(p, BlockMatrix)\n else:\n u, s0, _ = hl.linalg._svd(z, full_matrices=False)\n p = u.T\n p_is_bm = False\n\n s = s0 ** 2\n\n low_rank = n > m\n\n if low_rank:\n assert np.all(np.isfinite(s))\n r = int(np.searchsorted(-s, -max_condition_number * s[0]))\n if r < m:\n info(f'from_random_effects: model rank reduced from {m} to {r} '\n f'due to ill-condition.'\n f'\\n Largest dropped eigenvalue was {s[r]}.')\n s = s[:r]\n p = p[:r, :]\n\n if p_path is not None:\n if p_is_bm:\n p.write(p_path, overwrite=overwrite)\n p = BlockMatrix.read(p_path)\n else:\n BlockMatrix.from_numpy(p).write(p_path, overwrite=overwrite)\n if p_is_bm:\n py, px = (p @ y.reshape(n, 1)).to_numpy().flatten(), (p @ x).to_numpy()\n else:\n py, px = p @ y, p @ x\n\n if low_rank:\n model = LinearMixedModel(py, px, s, y, x, p_path)\n else:\n model = LinearMixedModel(py, px, s, p_path=p_path)\n\n return model, p\n\n # checks agreement of model initialization\n def _same(self, other, tol=1e-6, up_to_sign=True):\n def same_rows_up_to_sign(a, b, atol):\n assert a.shape[0] == b.shape[0]\n return all(np.allclose(a[i], b[i], atol=atol)\n or np.allclose(-a[i], b[i], atol=atol)\n for i in range(a.shape[0]))\n\n close = same_rows_up_to_sign if up_to_sign else np.allclose\n\n if self.low_rank != other.low_rank:\n print(f'different low_rank: {self.low_rank}, {other.low_rank}')\n return False\n\n same = True\n if not close(self.py, other.py, atol=tol):\n print(f'different py:\\n{self.py}\\n{other.py}')\n same = False\n if not close(self.px, other.px, atol=tol):\n print(f'different px:\\n{self.px}\\n{other.px}')\n same = False\n if not np.allclose(self.s, other.s, atol=tol):\n print(f'different s:\\n{self.s}\\n{other.s}')\n same = False\n if self.low_rank and not close(self.y, other.y, atol=tol):\n print(f'different y:\\n{self.y}\\n{other.y}')\n same = False\n if self.low_rank and not close(self.x, other.x, atol=tol):\n print(f'different x\\n{self.x}\\n{other.x}')\n same = False\n if self.p_path != other.p_path:\n print(f'different p_path:\\n{self.p_path}\\n{other.p_path}')\n same = False\n return same\n"
]
| [
[
"numpy.max",
"pandas.DataFrame.from_records",
"numpy.array",
"numpy.linalg.slogdet",
"numpy.zeros",
"numpy.log",
"numpy.sum",
"numpy.exp",
"scipy.optimize.minimize_scalar",
"scipy.linalg.solve",
"scipy.stats.distributions.chi2.sf",
"numpy.allclose",
"numpy.sqrt",
"numpy.isfinite",
"numpy.searchsorted",
"numpy.flip",
"numpy.fliplr"
]
]
|
google/alligator2 | [
"d97d4691b292d970b5b0e0e1f0eb243538fc6392"
]
| [
"topic_clustering.py"
]
| [
"# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport os\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import silhouette_score\nimport tensorflow.compat.v2 as tf\nimport tensorflow_hub as hub\nimport tensorflow_text # pylint: disable=unused-import\n\n# This flag disables GPU usage. Comment to use GPU with tensorflow.\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\n\nCLUSTER_LABELS_FILE = \"cluster_labels.txt\"\n\n\nclass TopicClustering(object):\n \"\"\"Handles the clustering of reviews into topics.\"\"\"\n\n def __init__(self):\n # Reduce verbosity of tensorflow\n tf.get_logger().setLevel(\"ERROR\")\n default_folder = os.path.dirname(os.path.realpath(__file__))\n self.cluster_labels_file_location = os.path.join(\n default_folder, CLUSTER_LABELS_FILE)\n\n self.model = hub.load(\n \"https://tfhub.dev/google/universal-sentence-encoder-multilingual/3\")\n\n self.candidate_cluster_names = []\n\n if os.path.isfile(self.cluster_labels_file_location):\n with open(self.cluster_labels_file_location, \"r\") as labels_file:\n self.candidate_cluster_names = labels_file.read().splitlines()\n logging.info(\"Found cluster labels file. %d labels loaded.\",\n len(self.candidate_cluster_names))\n\n labels_file.close()\n\n def recommend_topics(self, nouns):\n \"\"\"Recommends a list of topics for a given set of nouns based on repetition.\n\n Args:\n nouns: a list with nouns for each review.\n\n Returns:\n The recommended list of topics.\n \"\"\"\n nouns = [s.replace(\"translated by google\", \" \") for s in nouns]\n candidate_cluster_names = pd.Series(\n \" \".join(nouns).split()).value_counts()[0:150].index.to_list()\n\n with open(self.cluster_labels_file_location, \"w\") as labels_file:\n for label in candidate_cluster_names:\n labels_file.write(label + \"\\n\")\n\n labels_file.close()\n\n return candidate_cluster_names\n\n def determine_topics(self, reviews):\n \"\"\"Determines the topic for a given set of reviews.\n\n Args:\n reviews: the full set of reviews to classify. This is modified to add\n a topic field with the calculated topic for each review.\n\n Returns:\n Nothing.\n \"\"\"\n nouns = [\n self.extract_tokens(review[\"annotation\"][\"tokens\"], \"NOUN\")\n for review in reviews\n ]\n\n if not self.candidate_cluster_names:\n self.candidate_cluster_names = self.recommend_topics(nouns)\n\n topics = self.modelling_pipeline(pd.DataFrame(nouns), [5, 10])\n\n topics = topics.to_list()\n for review in reviews:\n review[\"topic\"] = topics.pop(0)\n return\n\n def extract_tokens(self, token_syntax, tag):\n \"\"\"Extracts specified token type for API request.\n\n Args:\n token_syntax: API request return from Language API\n tag: type of token to return e.g. \"NOUN\" or \"ADJ\"\n\n Returns:\n string containing only words of specified syntax in API request\n \"\"\"\n return \" \".join([\n s[\"lemma\"].lower()\n for s in token_syntax\n if s[u\"partOfSpeech\"][u\"tag\"] == tag\n ])\n\n def modelling_pipeline(self, reviews, num_clusters_list, max_iterations=10):\n \"\"\"Runs the clustering modelling pipeline with k-means.\n\n Args:\n reviews: pandas series of strings to assign to clusters\n num_clusters_list: a list of the number of clusters to attempt. The\n modelling pipeline will select the number with the best silhoutte\n coefficient\n max_iterations: the maximum number of iterations for k-means to perform\n\n Returns:\n numpy array containing the cluster names corresponding to reviews.\n \"\"\"\n if not isinstance(num_clusters_list, list):\n raise ValueError(\"num_clusters_list is not a list\")\n vectors = self.model(reviews)\n\n scores = [\n self.generate_silhouette_score(vectors, k, max_iterations)\n for k in num_clusters_list\n ]\n\n scores = dict(zip(num_clusters_list, scores))\n best_silhouette_score = max(scores, key=scores.get)\n logging.info(\"Optimal clusters is {} with silhouette score {}\".format(\n best_silhouette_score, scores[best_silhouette_score]))\n\n cluster_indices, cluster_centers = self.generate_clusters(\n vectors, best_silhouette_score, max_iterations)\n\n index = self.return_most_similar_index(\n cluster_centers, self.model(self.candidate_cluster_names))\n\n cluster_names = dict(\n zip(\n np.arange(len(cluster_centers)),\n [self.candidate_cluster_names[i] for i in list(index)]))\n\n return pd.Series(cluster_indices).map(cluster_names)\n\n def generate_silhouette_score(self,\n vectors,\n num_clusters,\n max_iterations=10,\n seed=32):\n \"\"\"Generates the silhouette score of the clustering model.\n\n The Silhouette Coefficient is calculated using the mean intra-cluster\n distance (a) and the mean nearest-cluster distance (b) for each sample.\n The best value is 1 and the worst value is -1. Values near 0 indicate\n overlapping clusters. Negative values generally indicate that a sample has\n been assigned to the wrong cluster, as a different cluster is more similar.\n\n Args:\n vectors: Tensor containing the embeddings of the review\n num_clusters: the number of clusters to use\n max_iterations: the maximum number of iterations for k-means to perform\n seed: seed\n\n Returns:\n silhouette score as a float\n \"\"\"\n cluster_indices, _ = self.generate_clusters(\n vectors, num_clusters, max_iterations=max_iterations, seed=seed)\n\n score = silhouette_score(vectors.numpy(), np.array(cluster_indices))\n\n logging.info(\"{} clusters yields {} silhouette score\".format(\n num_clusters, score))\n return score\n\n def generate_clusters(self,\n vectors,\n num_clusters,\n max_iterations=10,\n seed=32):\n \"\"\"Generates clusters using vectors using K-means on cosine distance.\n\n Args:\n vectors: Tensor containing the embeddings of the reviews\n num_clusters: the number of clusters to use\n max_iterations: the maximum number of iterations for k-means to perform\n seed: seed\n\n Returns:\n df with named topics\n \"\"\"\n kmeans = tf.compat.v1.estimator.experimental.KMeans(\n num_clusters=num_clusters,\n use_mini_batch=False,\n seed=seed,\n distance_metric=tf.compat.v1.estimator.experimental.KMeans\n .COSINE_DISTANCE)\n\n def input_fn():\n return tf.compat.v1.train.limit_epochs(\n # first convert to numpy due to v1 & eager incompatability\n tf.convert_to_tensor(vectors.numpy(), dtype=tf.float32),\n num_epochs=1)\n\n previous_centers = None\n score = 0\n\n for i in range(max_iterations):\n kmeans.train(input_fn)\n cluster_centers = kmeans.cluster_centers()\n if previous_centers is not None:\n previous_centers = cluster_centers\n new_score = kmeans.score(input_fn) # sum of squared distances\n # break if score improves by less than (arbitrary) 10%\n logging.debug(\"Iteration %d - Sum of squared distances: %.0f\", i,\n new_score)\n if np.divide(score, new_score) > 1.1 or score == 0:\n score = new_score\n else:\n break\n\n return list(kmeans.predict_cluster_index(input_fn)), cluster_centers\n\n def return_most_similar_index(self, a, b, limit_cosine_similarity=0):\n \"\"\"Returns the elements in b with the highest cosine similarity in a.\n\n limit_cosine_similarity sets a lower bound limit on the cosine similarity\n for an element to be returned (and returns -1 for these values).\n\n Args:\n a: Tensor of vectors\n b: Tensor of vectors\n limit_cosine_similarity: integer between 0 and 1\n \"\"\"\n similarity = tf.reduce_sum(a[:, tf.newaxis] * b, axis=-1)\n\n similarity = tf.math.divide(\n similarity,\n tf.norm(a[:, tf.newaxis], axis=-1) * tf.norm(b, axis=-1))\n\n indices = tf.math.argmax(similarity, axis=1).numpy()\n if limit_cosine_similarity > 0:\n max_cosine_similarity = tf.math.reduce_max(similarity, axis=1).numpy()\n indices[max_cosine_similarity < limit_cosine_similarity] = -1\n\n return indices\n"
]
| [
[
"numpy.divide",
"tensorflow.compat.v2.get_logger",
"numpy.array",
"tensorflow.compat.v2.math.reduce_max",
"pandas.DataFrame",
"tensorflow.compat.v2.compat.v1.estimator.experimental.KMeans",
"tensorflow.compat.v2.math.argmax",
"pandas.Series",
"tensorflow.compat.v2.reduce_sum",
"tensorflow.compat.v2.norm"
]
]
|
selvanponraj/qtpylib | [
"f67e9b2de3f8ea124f899386a696c61201332feb"
]
| [
"qtpylib/broker.py"
]
| [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# QTPyLib: Quantitative Trading Python Library\n# https://github.com/ranaroussi/qtpylib\n#\n# Copyright 2016-2018 Ran Aroussi\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport atexit\nimport hashlib\nimport logging\nimport os\nimport time\nimport sys\n\n# from decimal import *\nimport decimal\n\nfrom abc import ABCMeta, abstractmethod\nfrom datetime import datetime, timedelta\n\nimport numpy as np\nimport pandas as pd\nimport pymysql\nimport ezibpy\n\nfrom qtpylib.instrument import Instrument\nfrom qtpylib import (\n tools, sms\n)\nfrom qtpylib.blotter import (\n Blotter, load_blotter_args\n)\n\ndecimal.getcontext().prec = 5\n\n\n# =============================================\n# check min, python version\nif sys.version_info < (3, 4):\n raise SystemError(\"QTPyLib requires Python version >= 3.4\")\n\n# =============================================\ntools.createLogger(__name__)\n# =============================================\n\n\nclass Broker():\n \"\"\"Broker class initilizer (abstracted, parent class of ``Algo``)\n\n :Parameters:\n\n instruments : list\n List of IB contract tuples\n ibclient : int\n IB TWS/GW Port to use (default: 4001)\n ibport : int\n IB TWS/GW Client ID (default: 998)\n ibserver : string\n IB TWS/GW Server hostname (default: localhost)\n \"\"\"\n\n __metaclass__ = ABCMeta\n\n def __init__(self, instruments, ibclient=998, ibport=4001, ibserver=\"localhost\"):\n\n # detect running strategy\n self.strategy = str(self.__class__).split('.')[-1].split(\"'\")[0]\n\n # initilize class logger\n self.log_broker = logging.getLogger(__name__)\n\n # -----------------------------------\n # assign default vals if not propogated from algo\n if not hasattr(self, 'timezone'):\n self.timezone = \"UTC\"\n if not hasattr(self, 'tick_window'):\n self.tick_window = 1000\n if not hasattr(self, 'bar_window'):\n self.bar_window = 100\n if not hasattr(self, 'last_price'):\n self.last_price = {}\n if not hasattr(self, 'backtest'):\n self.backtest = False\n if not hasattr(self, 'sms_numbers'):\n self.sms_numbers = []\n if not hasattr(self, 'trade_log_dir'):\n self.trade_log_dir = None\n if not hasattr(self, 'blotter_name'):\n self.blotter_name = None\n\n # -----------------------------------\n # connect to IB\n self.ibclient = int(ibclient)\n self.ibport = int(ibport)\n self.ibserver = str(ibserver)\n\n self.ibConn = ezibpy.ezIBpy()\n self.ibConn.ibCallback = self.ibCallback\n # self.ibConnect()\n\n connection_tries = 0\n while not self.ibConn.connected:\n self.ibConn.connect(clientId=self.ibclient,\n port=self.ibport, host=self.ibserver)\n time.sleep(1)\n if not self.ibConn.connected:\n # print('*', end=\"\", flush=True)\n connection_tries += 1\n if connection_tries > 10:\n self.log_broker.info(\n \"Cannot connect to Interactive Brokers...\")\n sys.exit(0)\n\n self.log_broker.info(\"Connection established...\")\n\n # -----------------------------------\n # create contracts\n instrument_tuples_dict = {}\n for instrument in instruments:\n try:\n if isinstance(instrument, ezibpy.utils.Contract):\n instrument = self.ibConn.contract_to_tuple(instrument)\n else:\n instrument = tools.create_ib_tuple(instrument)\n contractString = self.ibConn.contractString(instrument)\n instrument_tuples_dict[contractString] = instrument\n self.ibConn.createContract(instrument)\n except Exception as e:\n pass\n\n self.instruments = instrument_tuples_dict\n self.symbols = list(self.instruments.keys())\n self.instrument_combos = {}\n\n # -----------------------------------\n # track orders & trades\n self.active_trades = {}\n self.trades = []\n\n # shortcut\n self.account = self.ibConn.account\n\n # use: self.orders.pending...\n self.orders = tools.make_object(\n by_tickerid=self.ibConn.orders,\n by_symbol=self.ibConn.symbol_orders,\n pending_ttls={},\n pending={},\n filled={},\n active={},\n history={},\n nextId=1,\n recent={}\n )\n\n # -----------------------------------\n self.dbcurr = None\n self.dbconn = None\n\n # -----------------------------------\n # load blotter settings\n self.blotter_args = load_blotter_args(\n self.blotter_name, logger=self.log_broker)\n self.blotter = Blotter(**self.blotter_args)\n\n # connect to mysql using blotter's settings\n if not self.blotter_args['dbskip']:\n self.dbconn = pymysql.connect(\n host=str(self.blotter_args['dbhost']),\n port=int(self.blotter_args['dbport']),\n user=str(self.blotter_args['dbuser']),\n passwd=str(self.blotter_args['dbpass']),\n db=str(self.blotter_args['dbname']),\n autocommit=True\n )\n self.dbcurr = self.dbconn.cursor()\n # -----------------------------------\n # do stuff on exit\n atexit.register(self._on_exit)\n\n # ---------------------------------------\n def add_instruments(self, *instruments):\n \"\"\" add instruments after initialization \"\"\"\n for instrument in instruments:\n if isinstance(instrument, ezibpy.utils.Contract):\n instrument = self.ibConn.contract_to_tuple(instrument)\n contractString = self.ibConn.contractString(instrument)\n self.instruments[contractString] = instrument\n self.ibConn.createContract(instrument)\n\n self.symbols = list(self.instruments.keys())\n\n # ---------------------------------------\n\n @abstractmethod\n def on_fill(self, instrument, order):\n pass\n\n # ---------------------------------------\n \"\"\"\n instrument group methods\n used with spreads to get the group members (contratc legs) as symbols\n \"\"\"\n\n def register_combo(self, parent, legs):\n \"\"\" add contracts to groups \"\"\"\n parent = self.ibConn.contractString(parent)\n legs_dict = {}\n for leg in legs:\n leg = self.ibConn.contractString(leg)\n legs_dict[leg] = self.get_instrument(leg)\n self.instrument_combos[parent] = legs_dict\n\n def get_combo(self, symbol):\n \"\"\" get group by child symbol \"\"\"\n for parent, legs in self.instrument_combos.items():\n if symbol == parent or symbol in legs.keys():\n return {\n \"parent\": self.get_instrument(parent),\n \"legs\": legs,\n }\n return {\n \"parent\": None,\n \"legs\": {},\n }\n\n # -------------------------------------------\n def _on_exit(self):\n self.log_broker.info(\"Algo stopped...\")\n\n if self.ibConn is not None:\n self.log_broker.info(\"Disconnecting...\")\n self.ibConn.disconnect()\n\n self.log_broker.info(\"Disconnecting from MySQL...\")\n try:\n self.dbcurr.close()\n self.dbconn.close()\n except Exception as e:\n pass\n\n # ---------------------------------------\n def ibConnect(self):\n self.ibConn.connect(clientId=self.ibclient,\n host=self.ibserver, port=self.ibport)\n self.ibConn.requestPositionUpdates(subscribe=True)\n self.ibConn.requestAccountUpdates(subscribe=True)\n\n # ---------------------------------------\n # @abstractmethod\n def ibCallback(self, caller, msg, **kwargs):\n\n if caller == \"handleHistoricalData\":\n # transmit \"as-is\" to blotter for handling\n self.blotter.ibCallback(\"handleHistoricalData\", msg, **kwargs)\n\n if caller == \"handleConnectionClosed\":\n self.log_broker.info(\"Lost conncetion to Interactive Brokers...\")\n\n while not self.ibConn.connected:\n self.ibConnect()\n time.sleep(1.3)\n if not self.ibConn.connected:\n print('*', end=\"\", flush=True)\n\n self.log_broker.info(\"Connection established...\")\n\n elif caller == \"handleOrders\":\n if not hasattr(self, \"orders\"):\n return\n\n if msg.typeName == ezibpy.utils.dataTypes[\"MSG_TYPE_OPEN_ORDER_END\"]:\n return\n\n # order canceled? do some cleanup\n if hasattr(msg, 'status') and \"CANCELLED\" in msg.status.upper():\n if msg.orderId in self.orders.recent.keys():\n symbol = self.orders.recent[msg.orderId]['symbol']\n try:\n del self.orders.pending_ttls[msg.orderId]\n except Exception as e:\n pass\n try:\n del self.orders.recent[msg.orderId]\n except Exception as e:\n pass\n try:\n if self.orders.pending[symbol]['orderId'] == msg.orderId:\n del self.orders.pending[symbol]\n except Exception as e:\n pass\n return\n\n # continue...\n\n order = self.ibConn.orders[msg.orderId]\n\n # print(\"***********************\\n\\n\", order, \"\\n\\n***********************\")\n orderId = msg.orderId\n symbol = order[\"symbol\"]\n\n try:\n try:\n quantity = self.orders.history[symbol][orderId]['quantity']\n except Exception as e:\n quantity = self.orders.history[symbol][order['parentId']]['quantity']\n # ^^ for child orders auto-created by ezibpy\n except Exception as e:\n quantity = 1\n\n # update pending order to the time actually submitted\n if order[\"status\"] in [\"OPENED\", \"SUBMITTED\"]:\n if orderId in self.orders.pending_ttls:\n self._update_pending_order(symbol, orderId,\n self.orders.pending_ttls[orderId],\n quantity)\n\n elif order[\"status\"] == \"FILLED\":\n self._update_order_history(\n symbol, orderId, quantity, filled=True)\n self._expire_pending_order(symbol, orderId)\n self._cancel_orphan_orders(orderId)\n self._register_trade(order)\n\n # filled\n time.sleep(0.005)\n self.on_fill(self.get_instrument(order['symbol']), order)\n\n # ---------------------------------------\n def _register_trade(self, order):\n \"\"\" constructs trade info from order data \"\"\"\n if order['id'] in self.orders.recent:\n orderId = order['id']\n else:\n orderId = order['parentId']\n # entry / exit?\n symbol = order[\"symbol\"]\n order_data = self.orders.recent[orderId]\n position = self.get_positions(symbol)['position']\n\n if position != 0:\n # entry\n order_data['action'] = \"ENTRY\"\n order_data['position'] = position\n order_data['entry_time'] = tools.datetime_to_timezone(\n order['time'])\n order_data['exit_time'] = None\n order_data['entry_order'] = order_data['order_type']\n order_data['entry_price'] = order['avgFillPrice']\n order_data['exit_price'] = 0\n order_data['exit_reason'] = None\n\n else:\n order_data['action'] = \"EXIT\"\n order_data['position'] = 0\n order_data['exit_time'] = tools.datetime_to_timezone(order['time'])\n order_data['exit_price'] = order['avgFillPrice']\n\n # target / stop?\n if order['id'] == order_data['targetOrderId']:\n order_data['exit_reason'] = \"TARGET\"\n elif order['id'] == order_data['stopOrderId']:\n order_data['exit_reason'] = \"STOP\"\n else:\n order_data['exit_reason'] = \"SIGNAL\"\n\n # remove from collection\n del self.orders.recent[orderId]\n\n if order_data is None:\n return None\n\n # trade identifier\n tradeId = self.strategy.upper() + '_' + symbol.upper()\n tradeId = hashlib.sha1(tradeId.encode()).hexdigest()\n\n # existing trade?\n if tradeId not in self.active_trades:\n self.active_trades[tradeId] = {\n \"strategy\": self.strategy,\n \"action\": order_data['action'],\n \"quantity\": abs(order_data['position']),\n \"position\": order_data['position'],\n \"symbol\": order_data[\"symbol\"].split('_')[0],\n \"direction\": order_data['direction'],\n \"entry_time\": None,\n \"exit_time\": None,\n \"duration\": \"0s\",\n \"exit_reason\": order_data['exit_reason'],\n \"order_type\": order_data['order_type'],\n \"market_price\": order_data['price'],\n \"target\": order_data['target'],\n \"stop\": order_data['initial_stop'],\n \"entry_price\": 0,\n \"exit_price\": order_data['exit_price'],\n \"realized_pnl\": 0\n }\n if \"entry_time\" in order_data:\n self.active_trades[tradeId][\"entry_time\"] = order_data['entry_time']\n if \"entry_price\" in order_data:\n self.active_trades[tradeId][\"entry_price\"] = order_data['entry_price']\n else:\n # self.active_trades[tradeId]['direction'] = order_data['direction']\n self.active_trades[tradeId]['action'] = order_data['action']\n self.active_trades[tradeId]['position'] = order_data['position']\n self.active_trades[tradeId]['exit_price'] = order_data['exit_price']\n self.active_trades[tradeId]['exit_reason'] = order_data['exit_reason']\n self.active_trades[tradeId]['exit_time'] = order_data['exit_time']\n\n # calculate trade duration\n try:\n delta = int((self.active_trades[tradeId]['exit_time'] -\n self.active_trades[tradeId]['entry_time']).total_seconds())\n days, remainder = divmod(delta, 86400)\n hours, remainder = divmod(remainder, 3600)\n minutes, seconds = divmod(remainder, 60)\n duration = ('%sd %sh %sm %ss' %\n (days, hours, minutes, seconds))\n self.active_trades[tradeId]['duration'] = duration.replace(\n \"0d \", \"\").replace(\"0h \", \"\").replace(\"0m \", \"\")\n except Exception as e:\n pass\n\n trade = self.active_trades[tradeId]\n if trade['entry_price'] > 0 and trade['position'] == 0:\n if trade['direction'] == \"SELL\":\n pnl = trade['entry_price'] - trade['exit_price']\n else:\n pnl = trade['exit_price'] - trade['entry_price']\n\n pnl = tools.to_decimal(pnl)\n # print(\"1)\", pnl)\n self.active_trades[tradeId]['realized_pnl'] = pnl\n\n # print(\"\\n\\n-----------------\")\n # print(self.active_trades[tradeId])\n # print(\"-----------------\\n\\n\")\n\n # get trade\n trade = self.active_trades[tradeId].copy()\n\n # sms trades\n sms._send_trade(trade, self.sms_numbers, self.timezone)\n\n # rename trade direction\n trade['direction'] = trade['direction'].replace(\n \"BUY\", \"LONG\").replace(\"SELL\", \"SHORT\")\n\n # log\n self.log_trade(trade)\n\n # remove from active trades and add to trade\n if trade['action'] == \"EXIT\":\n del self.active_trades[tradeId]\n self.trades.append(trade)\n\n # return trade\n return trade\n\n # ---------------------------------------\n def log_trade(self, trade):\n\n # first trade is an exit?\n if trade['entry_time'] is None:\n return\n\n # connection established\n if (self.dbconn is not None) & (self.dbcurr is not None):\n\n sql = \"\"\"INSERT INTO trades (\n `algo`, `symbol`, `direction`,`quantity`,\n `entry_time`, `exit_time`, `exit_reason`,\n `order_type`, `market_price`, `target`, `stop`,\n `entry_price`, `exit_price`, `realized_pnl`)\n VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\n ON DUPLICATE KEY UPDATE\n `algo`=%s, `symbol`=%s, `direction`=%s, `quantity`=%s,\n `entry_time`=%s, `exit_time`=%s, `exit_reason`=%s,\n `order_type`=%s, `market_price`=%s, `target`=%s, `stop`=%s,\n `entry_price`=%s, `exit_price`=%s, `realized_pnl`=%s\n \"\"\"\n\n try:\n trade['entry_time'] = trade['entry_time'].strftime(\n \"%Y-%m-%d %H:%M:%S.%f\")\n except Exception as e:\n pass\n\n try:\n trade['exit_time'] = trade['exit_time'].strftime(\n \"%Y-%m-%d %H:%M:%S.%f\")\n except Exception as e:\n pass\n\n # all strings\n for k, v in trade.items():\n if v is not None:\n trade[k] = str(v)\n\n self.dbcurr.execute(sql, (\n trade['strategy'], trade['symbol'], trade['direction'], trade['quantity'],\n trade['entry_time'], trade['exit_time'], trade['exit_reason'],\n trade['order_type'], trade['market_price'], trade['target'], trade['stop'],\n trade['entry_price'], trade['exit_price'], trade['realized_pnl'],\n trade['strategy'], trade['symbol'], trade['direction'], trade['quantity'],\n trade['entry_time'], trade['exit_time'], trade['exit_reason'],\n trade['order_type'], trade['market_price'], trade['target'], trade['stop'],\n trade['entry_price'], trade['exit_price'], trade['realized_pnl']\n ))\n\n # commit\n try:\n self.dbconn.commit()\n except Exception as e:\n pass\n\n if self.trade_log_dir:\n self.trade_log_dir = (self.trade_log_dir + '/').replace('//', '/')\n trade_log_path = self.trade_log_dir + self.strategy.lower() + \"_\" + \\\n datetime.now().strftime('%Y%m%d') + \".csv\"\n\n # convert None to empty string !!\n trade.update((k, '') for k, v in trade.items() if v is None)\n\n # create df\n trade_df = pd.DataFrame(index=[0], data=trade)[[\n 'strategy', 'symbol', 'direction', 'quantity', 'entry_time',\n 'exit_time', 'exit_reason', 'order_type', 'market_price', 'target',\n 'stop', 'entry_price', 'exit_price', 'realized_pnl'\n ]]\n\n if os.path.exists(trade_log_path):\n trades = pd.read_csv(trade_log_path, header=0)\n trades = trades.append(trade_df, ignore_index=True, sort=True)\n trades.drop_duplicates(['entry_time', 'symbol', 'strategy'],\n keep=\"last\", inplace=True)\n trades.to_csv(trade_log_path, header=True, index=False)\n tools.chmod(trade_log_path)\n else:\n trade_df.to_csv(trade_log_path, header=True, index=False)\n tools.chmod(trade_log_path)\n\n # ---------------------------------------\n def active_order(self, symbol, order_type=\"STOP\"):\n if symbol in self.orders.history:\n for orderId in self.orders.history[symbol]:\n order = self.orders.history[symbol][orderId]\n if order['order_type'].upper() == order_type.upper():\n return order\n return None\n\n # ---------------------------------------\n @staticmethod\n def _get_locals(local_params):\n del local_params['self']\n return local_params\n\n # ---------------------------------------\n def _create_order(self, symbol, direction, quantity, order_type=\"\",\n limit_price=0, expiry=0, orderId=0, target=0,\n initial_stop=0, trail_stop_at=0, trail_stop_by=0,\n stop_limit=False, trail_stop_type='percent', **kwargs):\n\n # fix prices to comply with contract's min-tick\n ticksize = self.get_contract_details(symbol)['m_minTick']\n limit_price = tools.round_to_fraction(limit_price, ticksize)\n target = tools.round_to_fraction(target, ticksize)\n initial_stop = tools.round_to_fraction(initial_stop, ticksize)\n trail_stop_at = tools.round_to_fraction(trail_stop_at, ticksize)\n trail_stop_by = tools.round_to_fraction(trail_stop_by, ticksize)\n trail_stop_type = \"amount\" if trail_stop_type == \"amount\" else \"percent\"\n\n self.log_broker.debug('CREATE ORDER: %s %4d %s %s', direction,\n quantity, symbol, dict(locals(), **kwargs))\n\n # force BUY/SELL (not LONG/SHORT)\n direction = direction.replace(\"LONG\", \"BUY\").replace(\"SHORT\", \"SELL\")\n\n # modify order?\n if order_type.upper() == \"MODIFY\":\n self.modify_order(symbol, orderId, quantity, limit_price)\n return\n\n # continue...\n\n if \"stoploss\" in kwargs and initial_stop == 0:\n initial_stop = kwargs['stoploss']\n\n order_type = \"MARKET\" if limit_price == 0 else \"LIMIT\"\n fillorkill = kwargs[\"fillorkill\"] if \"fillorkill\" in kwargs else False\n iceberg = kwargs[\"iceberg\"] if \"iceberg\" in kwargs else False\n tif = kwargs[\"tif\"] if \"tif\" in kwargs else \"DAY\"\n\n # clear expired pending orders\n self._cancel_expired_pending_orders()\n\n # don't submit order if a pending one is waiting\n if symbol in self.orders.pending:\n self.log_broker.warning(\n 'Not submitting %s order, orders pending: %s', symbol,\n self.orders.pending)\n return\n\n # continue...\n order_quantity = abs(quantity)\n if direction.upper() == \"SELL\":\n order_quantity = -order_quantity\n\n contract = self.get_contract(symbol)\n\n # is bracket order\n bracket = (target > 0) | (initial_stop > 0) | (\n trail_stop_at > 0) | (trail_stop_by > 0)\n\n # create & submit order\n if not bracket:\n # simple order\n order = self.ibConn.createOrder(order_quantity, limit_price,\n fillorkill=fillorkill,\n iceberg=iceberg,\n tif=tif)\n\n orderId = self.ibConn.placeOrder(contract, order)\n self.log_broker.debug('PLACE ORDER: %s %s', tools.contract_to_dict(\n contract), tools.order_to_dict(order))\n elif stop_limit:\n bracket = False\n # stop limit order\n order = self.ibConn.createStopOrder(order_quantity, price=limit_price, stop=limit_price,\n stop_limit=True,\n fillorkill=fillorkill,\n iceberg=iceberg,\n tif=tif\n )\n\n orderId = self.ibConn.placeOrder(contract, order)\n self.log_broker.debug('PLACE ORDER: %s %s', tools.contract_to_dict(\n contract), tools.order_to_dict(order))\n\n else:\n # bracket order\n order = self.ibConn.createBracketOrder(contract, order_quantity,\n entry=limit_price,\n target=target,\n stop=initial_stop,\n stop_limit=stop_limit,\n fillorkill=fillorkill,\n iceberg=iceberg,\n tif=tif)\n orderId = order[\"entryOrderId\"]\n\n # triggered trailing stop?\n if trail_stop_by != 0 and trail_stop_at != 0:\n trail_stop_params = {\n \"symbol\": symbol,\n \"quantity\": -order_quantity,\n \"triggerPrice\": trail_stop_at,\n \"parentId\": order[\"entryOrderId\"],\n \"stopOrderId\": order[\"stopOrderId\"]\n }\n if trail_stop_type.lower() == 'amount':\n trail_stop_params[\"trailAmount\"] = trail_stop_by\n else:\n trail_stop_params[\"trailPercent\"] = trail_stop_by\n self.ibConn.createTriggerableTrailingStop(**trail_stop_params)\n\n # add all orders to history\n self._update_order_history(symbol=symbol,\n orderId=order[\"entryOrderId\"],\n quantity=order_quantity,\n order_type='ENTRY')\n\n self._update_order_history(symbol=symbol,\n orderId=order[\"targetOrderId\"],\n quantity=-order_quantity,\n order_type='TARGET',\n parentId=order[\"entryOrderId\"])\n\n self._update_order_history(symbol=symbol,\n orderId=order[\"stopOrderId\"],\n quantity=-order_quantity,\n order_type='STOP',\n parentId=order[\"entryOrderId\"])\n\n # have original params available for FILL event\n self.orders.recent[orderId] = self._get_locals(locals())\n self.orders.recent[orderId]['targetOrderId'] = 0\n self.orders.recent[orderId]['stopOrderId'] = 0\n\n if bracket:\n self.orders.recent[orderId]['targetOrderId'] = order[\"targetOrderId\"]\n self.orders.recent[orderId]['stopOrderId'] = order[\"stopOrderId\"]\n\n # append market price at the time of order\n try:\n self.orders.recent[orderId]['price'] = self.last_price[symbol]\n except Exception as e:\n self.orders.recent[orderId]['price'] = 0\n\n # add orderId / ttl to (auto-adds to history)\n expiry = expiry * 1000 if expiry > 0 else 60000 # 1min\n self._update_pending_order(symbol, orderId, expiry, order_quantity)\n time.sleep(0.1)\n\n # ---------------------------------------\n def _cancel_order(self, orderId):\n if orderId is not None and orderId > 0:\n self.ibConn.cancelOrder(orderId)\n\n # ---------------------------------------\n def modify_order_group(self, symbol, orderId, entry=None,\n target=None, stop=None, quantity=None):\n\n order_group = self.orders.recent[orderId]['order']\n\n if entry is not None:\n self.modify_order(\n symbol, orderId, limit_price=entry, quantity=quantity)\n\n if target is not None:\n self.modify_order(symbol, order_group['targetOrderId'],\n limit_price=target, quantity=quantity)\n if stop is not None:\n stop_quantity = quantity * -1 if quantity is not None else None\n self.modify_order(symbol, order_group['stopOrderId'],\n limit_price=stop, quantity=stop_quantity)\n\n # ---------------------------------------\n def modify_order(self, symbol, orderId, quantity=None, limit_price=None):\n if quantity is None and limit_price is None:\n return\n\n if symbol in self.orders.history:\n for historyOrderId in self.orders.history[symbol]:\n if historyOrderId == orderId:\n order_quantity = self.orders.history[symbol][orderId]['quantity']\n if quantity is not None:\n order_quantity = quantity\n\n order = self.orders.history[symbol][orderId]\n if order['order_type'] == \"STOP\":\n new_order = self.ibConn.createStopOrder(\n quantity=order_quantity,\n parentId=order['parentId'],\n stop=limit_price,\n trail=None,\n transmit=True\n )\n else:\n new_order = self.ibConn.createOrder(\n order_quantity, limit_price)\n\n # child order?\n if \"parentId\" in order:\n new_order.parentId = order['parentId']\n\n # send order\n contract = self.get_contract(symbol)\n self.ibConn.placeOrder(\n contract, new_order, orderId=orderId)\n break\n\n # ---------------------------------------\n @staticmethod\n def _milliseconds_delta(delta):\n return delta.days * 86400000 + delta.seconds * 1000 + delta.microseconds / 1000\n\n # ---------------------------------------\n def _cancel_orphan_orders(self, orderId):\n \"\"\" cancel child orders when parent is gone \"\"\"\n orders = self.ibConn.orders\n for order in orders:\n order = orders[order]\n if order['parentId'] != orderId:\n self.ibConn.cancelOrder(order['id'])\n\n # ---------------------------------------\n def _cancel_expired_pending_orders(self):\n \"\"\" expires pending orders \"\"\"\n # use a copy to prevent errors\n pending = self.orders.pending.copy()\n for symbol in pending:\n orderId = pending[symbol][\"orderId\"]\n expiration = pending[symbol][\"expires\"]\n\n delta = expiration - datetime.now()\n delta = self._milliseconds_delta(delta)\n\n # cancel order if expired\n if delta < 0:\n self.ibConn.cancelOrder(orderId)\n if orderId in self.orders.pending_ttls:\n if orderId in self.orders.pending_ttls:\n del self.orders.pending_ttls[orderId]\n if symbol in self.orders.pending:\n if self.orders.pending[symbol]['orderId'] == orderId:\n del self.orders.pending[symbol]\n\n # ---------------------------------------------------------\n def _expire_pending_order(self, symbol, orderId):\n self.ibConn.cancelOrder(orderId)\n\n if orderId in self.orders.pending_ttls:\n del self.orders.pending_ttls[orderId]\n\n if symbol in self.orders.pending:\n if self.orders.pending[symbol]['orderId'] == orderId:\n del self.orders.pending[symbol]\n\n # ---------------------------------------------------------\n def _update_pending_order(self, symbol, orderId, expiry, quantity):\n self.orders.pending[symbol] = {\n \"orderId\": orderId,\n \"quantity\": quantity,\n # \"created\": datetime.now(),\n \"expires\": datetime.now() + timedelta(milliseconds=expiry)\n }\n\n # ibCallback needs this to update with submittion time\n self.orders.pending_ttls[orderId] = expiry\n self._update_order_history(\n symbol=symbol, orderId=orderId, quantity=quantity)\n\n # ---------------------------------------------------------\n def _update_order_history(self, symbol, orderId, quantity,\n order_type='entry', filled=False, parentId=0):\n if symbol not in self.orders.history:\n self.orders.history[symbol] = {}\n\n self.orders.history[symbol][orderId] = {\n \"orderId\": orderId,\n \"quantity\": quantity,\n \"order_type\": order_type.upper(),\n \"filled\": filled,\n \"parentId\": parentId\n }\n\n # ---------------------------------------\n # UTILITY FUNCTIONS\n # ---------------------------------------\n def get_instrument(self, symbol):\n \"\"\"\n A string subclass that provides easy access to misc\n symbol-related methods and information using shorthand.\n Refer to the `Instruments API <#instrument-api>`_\n for available methods and properties\n\n Call from within your strategy:\n ``instrument = self.get_instrument(\"SYMBOL\")``\n\n :Parameters:\n symbol : string\n instrument symbol\n\n \"\"\"\n instrument = Instrument(self.get_symbol(symbol))\n instrument._set_parent(self)\n instrument._set_windows(ticks=self.tick_window, bars=self.bar_window)\n\n return instrument\n\n # ---------------------------------------\n @staticmethod\n def get_symbol(symbol):\n if not isinstance(symbol, str):\n if isinstance(symbol, dict):\n symbol = symbol['symbol']\n elif isinstance(symbol, pd.DataFrame):\n symbol = symbol[:1]['symbol'].values[0]\n\n return symbol\n\n # ---------------------------------------\n def get_account(self):\n return self.ibConn.account\n\n # ---------------------------------------\n def get_contract(self, symbol):\n return self.ibConn.contracts[self.ibConn.tickerId(symbol)]\n\n # ---------------------------------------\n def get_contract_details(self, symbol):\n return self.ibConn.contractDetails(symbol)\n\n # ---------------------------------------\n def get_tickerId(self, symbol):\n return self.ibConn.tickerId(symbol)\n\n # ---------------------------------------\n def get_orders(self, symbol):\n symbol = self.get_symbol(symbol)\n\n self.orders.by_symbol = self.ibConn.group_orders(\"symbol\")\n if symbol in self.orders.by_symbol:\n return self.orders.by_symbol[symbol]\n\n return {}\n\n # ---------------------------------------\n def get_positions(self, symbol):\n symbol = self.get_symbol(symbol)\n\n if self.backtest:\n position = 0\n avgCost = 0.0\n\n if self.datastore.recorded is not None:\n data = self.datastore.recorded\n col = symbol.upper() + '_POSITION'\n position = data[col].values[-1]\n if position != 0:\n pos = data[col].diff()\n avgCost = data[data.index.isin(pos[pos != 0][-1:].index)\n ][symbol.upper() + '_OPEN'].values[-1]\n return {\n \"symbol\": symbol,\n \"position\": position,\n \"avgCost\": avgCost,\n \"account\": \"Backtest\"\n }\n\n elif symbol in self.ibConn.positions:\n return self.ibConn.positions[symbol]\n\n return {\n \"symbol\": symbol,\n \"position\": 0,\n \"avgCost\": 0.0,\n \"account\": None\n }\n\n # ---------------------------------------\n def get_portfolio(self, symbol=None):\n if symbol is not None:\n symbol = self.get_symbol(symbol)\n\n if symbol in self.ibConn.portfolio:\n portfolio = self.ibConn.portfolio[symbol]\n if \"symbol\" in portfolio:\n return portfolio\n\n return {\n \"symbol\": symbol,\n \"position\": 0.0,\n \"marketPrice\": 0.0,\n \"marketValue\": 0.0,\n \"averageCost\": 0.0,\n \"unrealizedPNL\": 0.0,\n \"realizedPNL\": 0.0,\n \"totalPNL\": 0.0,\n \"account\": None\n }\n\n return self.ibConn.portfolio\n\n # ---------------------------------------\n def get_pending_orders(self, symbol=None):\n if symbol is not None:\n symbol = self.get_symbol(symbol)\n if symbol in self.orders.pending:\n return self.orders.pending[symbol]\n return {}\n\n return self.orders.pending\n\n # ---------------------------------------\n def get_trades(self, symbol=None):\n\n # closed trades\n trades = pd.DataFrame(self.trades)\n if not trades.empty:\n trades.loc[:, 'closed'] = True\n\n # ongoing trades\n active_trades = pd.DataFrame(list(self.active_trades.values()))\n if not active_trades.empty:\n active_trades.loc[:, 'closed'] = False\n\n # combine dataframes\n df = pd.concat([trades, active_trades], sort=True).reset_index()\n\n # set last price\n if not df.empty:\n\n # conert values to floats\n df['entry_price'] = df['entry_price'].astype(float)\n df['exit_price'] = df['exit_price'].astype(float)\n df['market_price'] = df['market_price'].astype(float)\n df['realized_pnl'] = df['realized_pnl'].astype(float)\n df['stop'] = df['stop'].astype(float)\n df['target'] = df['target'].astype(float)\n df['quantity'] = df['quantity'].astype(int)\n\n try:\n df.loc[:, 'last'] = self.last_price[symbol]\n except Exception as e:\n df.loc[:, 'last'] = 0\n\n # calc unrealized pnl\n df['unrealized_pnl'] = np.where(df['direction'] == \"SHORT\",\n df['entry_price'] - df['last'],\n df['last'] - df['entry_price'])\n\n df.loc[df['closed'], 'unrealized_pnl'] = 0\n\n # drop index column\n df.drop('index', axis=1, inplace=True)\n\n # get single symbol\n if symbol is not None:\n df = df[df['symbol'] == symbol.split(\"_\")[0]]\n df.loc[:, 'symbol'] = symbol\n\n # return\n return df\n"
]
| [
[
"pandas.DataFrame",
"pandas.read_csv",
"numpy.where",
"pandas.concat"
]
]
|
TimO96/NLP2 | [
"83f65a385457f68397c641f38b53df0110282578"
]
| [
"Assignment1/model.py"
]
| [
"# Copyright (c) 2018-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n#\n\nimport torch.nn as nn\nimport torch.utils.data.dataloader\n\n\nclass RNNModel(nn.Module):\n \"\"\"Container module with an encoder, a recurrent module, and a decoder.\n ntoken: vocab size\n nip: embedding size\n \"\"\"\n\n def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False):\n super(RNNModel, self).__init__()\n self.drop = nn.Dropout(dropout)\n self.encoder = nn.Embedding(ntoken, ninp)\n if rnn_type in ['LSTM', 'GRU']:\n self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)\n else:\n try:\n nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]\n except KeyError:\n raise ValueError( \"\"\"An invalid option for `--model` was supplied,\n options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']\"\"\")\n self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout)\n self.decoder = nn.Linear(nhid, ntoken)\n\n # Optionally tie weights as in:\n # \"Using the Output Embedding to Improve Language Models\" (Press & Wolf 2016)\n # https://arxiv.org/abs/1608.05859\n # and\n # \"Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling\" (Inan et al. 2016)\n # https://arxiv.org/abs/1611.01462\n if tie_weights:\n if nhid != ninp:\n raise ValueError('When using the tied flag, nhid must be equal to emsize')\n self.decoder.weight = self.encoder.weight\n\n self.init_weights()\n\n self.rnn_type = rnn_type\n self.nhid = nhid\n self.nlayers = nlayers\n\n def init_weights(self):\n initrange = 0.1\n self.encoder.weight.data.uniform_(-initrange, initrange)\n self.decoder.bias.data.fill_(0)\n self.decoder.weight.data.uniform_(-initrange, initrange)\n\n def forward(self, input, hidden):\n emb = self.drop(self.encoder(input))\n output, hidden = self.rnn(emb, hidden)\n output = self.drop(output)\n #print(output)\n decoded = self.decoder(output.view(output.size(0)*output.size(1), output.size(2)))\n\n return decoded.view(output.size(0), output.size(1), decoded.size(1)), hidden\n\n def init_hidden(self, bsz):\n weight = next(self.parameters()).data\n if self.rnn_type == 'LSTM':\n return (weight.new(self.nlayers, bsz, self.nhid).zero_(),\n weight.new(self.nlayers, bsz, self.nhid).zero_())\n else:\n return weight.new(self.nlayers, bsz, self.nhid).zero_()\n"
]
| [
[
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.Embedding",
"torch.nn.RNN"
]
]
|
xiaotaw/faster-lio | [
"ff0c9092989da5dc3f1f66e798915d648b31b695"
]
| [
"result/plot_time.py"
]
| [
"# coding=\"utf8\"\nimport sys\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\n\ndef remove_outlier(x):\n q1 = x.quantile(0.25)\n q3 = x.quantile(0.75)\n m = (x <= q3 + (q3 - q1) * 1.5).all(axis=1)\n return x[m]\n\n\ndef time_plot(log_file):\n # read time log\n # log_file = \"20130110.time.log\"\n df = pd.read_csv(log_file)\n print(\"Read \" + str(df.shape[0]) + \" rows from \" + log_file)\n # compute average\n for c in df:\n x = df[c]\n x = x[x.notna()] # remove nan\n fmt = \"%-35s: num=%d, ave=%f, std=%f, max=%f, min=%f\"\n print(fmt % (c, len(x), x.mean(), x.std(), x.max(), x.min()))\n # plot\n c = [\" Laser Mapping Single Run\", \" IEKF Solve and Update\"]\n x = df[c]\n x = x[x.apply(pd.notna).all(axis=1)] # remove nan\n x = remove_outlier(x)\n y = x.rolling(7, center=True).mean() # sliding average, window size = 7\n fig = plt.figure(num=log_file)\n # _ = plt.plot(x)\n # fig = plt.figure(num=log_file + \"(moving average = 7)\")\n _ = plt.plot(y)\n plt.legend(y.columns)\n plt.ylabel(\"time/ms\")\n plt.xlabel(\"laser scan\")\n plt.savefig(log_file.replace(\".log\", \".png\"))\n plt.show()\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print(\"Usage: python2 plot_time.py 20120110.time.log\")\n else:\n log_file = sys.argv[1]\n time_plot(log_file)\n"
]
| [
[
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show",
"pandas.read_csv"
]
]
|
xuehaouwa/VMZ | [
"44492f03c7c43a2add6572a927af6637ddb02f38"
]
| [
"app/tools/test_net.py"
]
| [
"# Copyright 2018-present, Facebook, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\n\nimport logging\nimport numpy as np\nimport argparse\n\nfrom caffe2.python import workspace, cnn, core\nfrom caffe2.python import data_parallel_model\nimport lib.models.model_builder as model_builder\nimport lib.utils.model_helper as model_helper\nimport lib.utils.model_loader as model_loader\nimport lib.utils.metric as metric\n\nfrom caffe2.proto import caffe2_pb2\n\nlogging.basicConfig()\nlog = logging.getLogger(\"test_net\")\nlog.setLevel(logging.INFO)\n\n\ndef PredictionAggregation(preds, method):\n if method == 0: # average pooling\n return np.mean(preds, axis=0)\n elif method == 1: # max pooling\n return np.max(preds, axis=0)\n else:\n log.info('Unknown aggregation method')\n return []\n\n\ndef Test(args):\n if args.gpus is not None:\n gpus = [int(x) for x in args.gpus.split(',')]\n num_gpus = len(gpus)\n else:\n gpus = range(args.num_gpus)\n num_gpus = args.num_gpus\n\n if num_gpus > 0:\n total_batch_size = args.batch_size * num_gpus\n log.info(\"Running on GPUs: {}\".format(gpus))\n log.info(\"total_batch_size: {}\".format(total_batch_size))\n else:\n total_batch_size = args.batch_size\n log.info(\"Running on CPU\")\n log.info(\"total_batch_size: {}\".format(total_batch_size))\n\n # Model building functions\n def create_model_ops(model, loss_scale):\n return model_builder.build_model(\n model=model,\n model_name=args.model_name,\n model_depth=args.model_depth,\n num_labels=args.num_labels,\n num_channels=args.num_channels,\n crop_size=args.crop_size,\n clip_length=(\n args.clip_length_of if args.input_type == 1\n else args.clip_length_rgb\n ),\n loss_scale=loss_scale,\n is_test=1,\n pred_layer_name=args.pred_layer_name,\n )\n\n test_model = cnn.CNNModelHelper(\n order=\"NCHW\",\n name=\"video_model_test\",\n use_cudnn=(True if args.use_cudnn == 1 else False),\n cudnn_exhaustive_search=True,\n )\n\n test_reader, number_of_examples = model_builder.create_data_reader(\n test_model,\n name=\"test_reader\",\n input_data=args.test_data,\n )\n\n if args.num_iter <= 0:\n num_iter = int(number_of_examples / total_batch_size)\n else:\n num_iter = args.num_iter\n\n def test_input_fn(model):\n model_helper.AddVideoInput(\n test_model,\n test_reader,\n batch_size=args.batch_size,\n clip_per_video=args.clip_per_video,\n decode_type=1,\n length_rgb=args.clip_length_rgb,\n sampling_rate_rgb=args.sampling_rate_rgb,\n scale_h=args.scale_h,\n scale_w=args.scale_w,\n crop_size=args.crop_size,\n num_decode_threads=4,\n num_of_class=args.num_labels,\n random_mirror=False,\n random_crop=False,\n input_type=args.input_type,\n length_of=args.clip_length_of,\n sampling_rate_of=args.sampling_rate_of,\n frame_gap_of=args.frame_gap_of,\n do_flow_aggregation=args.do_flow_aggregation,\n flow_data_type=args.flow_data_type,\n get_rgb=(args.input_type == 0),\n get_optical_flow=(args.input_type == 1),\n get_video_id=args.get_video_id,\n use_local_file=args.use_local_file,\n )\n\n if num_gpus > 0:\n data_parallel_model.Parallelize_GPU(\n test_model,\n input_builder_fun=test_input_fn,\n forward_pass_builder_fun=create_model_ops,\n param_update_builder_fun=None,\n devices=gpus\n )\n else:\n test_model._device_type = caffe2_pb2.CPU\n test_model._devices = [0]\n device_opt = core.DeviceOption(test_model._device_type, 0)\n with core.DeviceScope(device_opt):\n # Because our loaded models are named with \"gpu_x\", keep the naming for now.\n # TODO: Save model using `data_parallel_model.ExtractPredictorNet`\n # to extract the model for \"gpu_0\". It also renames\n # the input and output blobs by stripping the \"gpu_x/\" prefix\n with core.NameScope(\"{}_{}\".format(\"gpu\", 0)):\n test_input_fn(test_model)\n create_model_ops(test_model, 1.0)\n\n workspace.RunNetOnce(test_model.param_init_net)\n workspace.CreateNet(test_model.net)\n\n if args.db_type == 'minidb':\n if num_gpus > 0:\n model_helper.LoadModel(args.load_model_path, args.db_type)\n data_parallel_model.FinalizeAfterCheckpoint(test_model)\n else:\n with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU, 0)):\n model_helper.LoadModel(args.load_model_path, args.db_type)\n elif args.db_type == 'pickle':\n if num_gpus > 0:\n model_loader.LoadModelFromPickleFile(\n test_model,\n args.load_model_path,\n use_gpu=True,\n root_gpu_id=gpus[0]\n )\n data_parallel_model.FinalizeAfterCheckpoint(test_model)\n else:\n model_loader.LoadModelFromPickleFile(\n test_model,\n args.load_model_path,\n use_gpu=False\n )\n else:\n log.warning(\"Unsupported db_type: {}\".format(args.db_type))\n\n\n # metric counters for classification\n clip_acc = 0\n video_top1 = 0\n video_topk = 0\n video_count = 0\n clip_count = 0\n\n for i in range(num_iter):\n workspace.RunNet(test_model.net.Proto().name)\n num_devices = 1 # default for cpu\n if num_gpus > 0:\n num_devices = num_gpus\n\n for g in range(num_devices):\n # get labels\n label = workspace.FetchBlob(\n \"gpu_{}\".format(g) + '/label'\n )\n # get predictions\n predicts = workspace.FetchBlob(\"gpu_{}\".format(g) + '/softmax')\n assert predicts.shape[0] == args.batch_size * args.clip_per_video\n\n for j in range(args.batch_size):\n # get label for one video\n sample_label = label[j * args.clip_per_video]\n # get clip accuracy\n for k in range(args.clip_per_video):\n c1, _ = metric.accuracy_metric(\n predicts[j * args.clip_per_video + k, :],\n label[j * args.clip_per_video + k])\n clip_acc = clip_acc + c1\n # get all clip predictions for one video\n all_clips = predicts[\n j * args.clip_per_video:(j + 1) * args.clip_per_video, :]\n # aggregate predictions into one\n video_pred = PredictionAggregation(all_clips, args.aggregation)\n c1, ck = metric.accuracy_metric(\n video_pred, sample_label, args.top_k)\n video_top1 = video_top1 + c1\n video_topk = video_topk + ck\n\n video_count = video_count + args.batch_size\n clip_count = clip_count + label.shape[0]\n\n if i > 0 and i % args.display_iter == 0:\n log.info('Iter {}/{}: clip: {}, top1: {}, top 5: {}'.format(\n i,\n num_iter,\n clip_acc / clip_count,\n video_top1 / video_count,\n video_topk / video_count))\n\n log.info(\"Test accuracy: clip: {}, top 1: {}, top{}: {}\".format(\n clip_acc / clip_count,\n video_top1 / video_count,\n args.top_k,\n video_topk / video_count\n ))\n\n if num_gpus > 0:\n flops, params = model_helper.GetFlopsAndParams(test_model, gpus[0])\n else:\n flops, params = model_helper.GetFlopsAndParams(test_model)\n log.info('FLOPs: {}, params: {}'.format(flops, params))\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"test_net\"\n )\n parser.add_argument(\"--test_data\", type=str, default=None,\n help=\"Path to test data\")\n parser.add_argument(\"--db_type\", type=str, default='pickle',\n help=\"Db type of the testing model\")\n parser.add_argument(\"--model_depth\", type=int, default=18,\n help=\"Model depth\")\n parser.add_argument(\"--model_name\", type=str, default='r2plus1d',\n help=\"Model name\")\n parser.add_argument(\"--gpus\", type=str, default=None,\n help=\"Comma separated list of GPU devices to use\")\n parser.add_argument(\"--num_gpus\", type=int, default=1,\n help=\"Number of GPU devices (instead of --gpus)\")\n parser.add_argument(\"--scale_h\", type=int, default=128,\n help=\"Scale image height to\")\n parser.add_argument(\"--scale_w\", type=int, default=171,\n help=\"Scale image width to\")\n parser.add_argument(\"--num_iter\", type=int, default=0,\n help=\"Number of test iterations; \" +\n \"0: test the whole set\")\n parser.add_argument(\"--crop_size\", type=int, default=112,\n help=\"Input image size (to crop to)\")\n parser.add_argument(\"--clip_length_rgb\", type=int, default=16,\n help=\"Length of input clips\")\n parser.add_argument(\"--sampling_rate_rgb\", type=int, default=1,\n help=\"Frame sampling rate\")\n parser.add_argument(\"--num_labels\", type=int, default=101,\n help=\"Number of labels\")\n parser.add_argument(\"--num_channels\", type=int, default=3,\n help=\"Number of channels\")\n parser.add_argument(\"--batch_size\", type=int, default=6,\n help=\"Batch size, total over all GPUs\")\n parser.add_argument(\"--clip_per_video\", type=int, default=10,\n help=\"Number of clips to be sampled from a video\")\n parser.add_argument(\"--top_k\", type=int, default=5,\n help=\"Top k video accuracy output\")\n parser.add_argument(\"--aggregation\", type=int, default=0,\n help=\"0: avergage pool, 1: max pooling\")\n parser.add_argument(\"--load_model_path\", type=str, default=None,\n help=\"Load saved model for testing\")\n parser.add_argument(\"--use_cudnn\", type=int, default=1,\n help=\"Use CuDNN\")\n parser.add_argument(\"--pred_layer_name\", type=str, default=None,\n help=\"the prediction layer name\")\n parser.add_argument(\"--display_iter\", type=int, default=10,\n help=\"Display information every # of iterations.\")\n parser.add_argument(\"--clip_length_of\", type=int, default=8,\n help=\"Frames of optical flow data\")\n parser.add_argument(\"--sampling_rate_of\", type=int, default=2,\n help=\"Optical flow sampling rate (in frames)\")\n parser.add_argument(\"--frame_gap_of\", type=int, default=2,\n help=\"\")\n parser.add_argument(\"--do_flow_aggregation\", type=int, default=0,\n help=\"whether to aggregate optical flow across \" +\n \" multiple frames\")\n parser.add_argument(\"--flow_data_type\", type=int, default=0,\n help=\"0=Flow2C, 1=Flow3C, 2=FlowWithGray, \" +\n \"3=FlowWithRGB\")\n parser.add_argument(\"--input_type\", type=int, default=0,\n help=\"False=rgb, True=optical flow\")\n parser.add_argument(\"--get_video_id\", type=int, default=0,\n help=\"Output video id\")\n parser.add_argument(\"--use_dropout\", type=int, default=0,\n help=\"Use dropout at the prediction layer\")\n parser.add_argument(\"--use_local_file\", type=int, default=0,\n help=\"Use lmdb as a list of local filenames\")\n\n args = parser.parse_args()\n\n log.info(args)\n assert model_builder.model_validation(\n args.model_name,\n args.model_depth,\n args.clip_length_of if args.input_type == 1 else args.clip_length_rgb,\n args.crop_size\n )\n\n Test(args)\n\n\nif __name__ == '__main__':\n workspace.GlobalInit(['caffe2', '--caffe2_log_level=2'])\n main()\n"
]
| [
[
"numpy.max",
"numpy.mean"
]
]
|
jlaneve/astro | [
"4528162c7582f3860d1d21de7af954f20c9f9a6a"
]
| [
"tests/operators/transform/test_transform.py"
]
| [
"import pathlib\n\nimport pandas as pd\nimport pytest\nfrom airflow.decorators import task\n\nfrom astro import sql as aql\nfrom astro.dataframe import dataframe as adf\nfrom astro.sql.table import Table\nfrom tests.operators import utils as test_utils\n\ncwd = pathlib.Path(__file__).parent\n\n\[email protected](\n \"sql_server\",\n [\n \"snowflake\",\n \"postgres\",\n \"bigquery\",\n \"sqlite\",\n ],\n indirect=True,\n)\ndef test_dataframe_transform(sql_server, sample_dag, test_table):\n print(\"test_dataframe_to_database\")\n\n @adf\n def get_dataframe():\n return pd.DataFrame({\"numbers\": [1, 2, 3], \"colors\": [\"red\", \"white\", \"blue\"]})\n\n @aql.transform\n def sample_pg(input_table: Table):\n return \"SELECT * FROM {{input_table}}\"\n\n @adf\n def validate_dataframe(df: pd.DataFrame):\n df.columns = df.columns.str.lower()\n df = df.sort_values(by=df.columns.tolist()).reset_index(drop=True)\n assert df.equals(\n pd.DataFrame({\"numbers\": [1, 2, 3], \"colors\": [\"red\", \"white\", \"blue\"]})\n )\n\n with sample_dag:\n my_df = get_dataframe(output_table=test_table)\n pg_df = sample_pg(my_df)\n validate_dataframe(pg_df)\n test_utils.run_dag(sample_dag)\n\n\[email protected](\n \"sql_server\",\n [\n \"snowflake\",\n \"postgres\",\n \"bigquery\",\n \"sqlite\",\n ],\n indirect=True,\n)\ndef test_transform(sql_server, sample_dag, test_table):\n @aql.transform\n def sample_function(input_table: Table):\n return \"SELECT * FROM {{input_table}} LIMIT 10\"\n\n @adf\n def validate_table(df: pd.DataFrame):\n assert len(df) == 10\n\n with sample_dag:\n homes_file = aql.load_file(\n path=str(cwd) + \"/../../data/homes.csv\",\n output_table=test_table,\n )\n first_model = sample_function(\n input_table=homes_file,\n )\n inherit_model = sample_function(\n input_table=first_model,\n )\n validate_table(inherit_model)\n test_utils.run_dag(sample_dag)\n\n\[email protected](\n \"sql_server\",\n [\n \"snowflake\",\n \"postgres\",\n \"bigquery\",\n \"sqlite\",\n ],\n indirect=True,\n)\ndef test_raw_sql(sql_server, sample_dag, test_table):\n @aql.run_raw_sql\n def raw_sql_query(my_input_table: Table, created_table: Table, num_rows: int):\n return \"SELECT * FROM {{my_input_table}} LIMIT {{num_rows}}\"\n\n @task\n def validate_raw_sql(cur):\n print(cur)\n\n with sample_dag:\n homes_file = aql.load_file(\n path=str(cwd) + \"/../../data/homes.csv\",\n output_table=test_table,\n )\n raw_sql_result = (\n raw_sql_query(\n my_input_table=homes_file,\n created_table=test_table.to_table(\n sample_dag.dag_id + \"_RAW_SQL_CREATE\"\n ),\n num_rows=5,\n handler=lambda cur: cur.fetchall(),\n ),\n )\n validate_raw_sql(raw_sql_result)\n test_utils.run_dag(sample_dag)\n"
]
| [
[
"pandas.DataFrame"
]
]
|
omkarmali/dsmp-pre-work | [
"7032b0af8a8571a638915ac88a9b9760814aa25c"
]
| [
"Loan-defaulters/code.py"
]
| [
"# --------------\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# code starts here\ndf=pd.read_csv(path)\n\ntotal_length=len(df)\nprint(total_length)\n\nfico=0\nfor i in df['fico'] > 700:\n fico+=i\nprint(fico)\n\np_a=fico/total_length\nprint(p_a)\n\ndebt=0\n\nfor i in df['purpose']=='debt_consolidation':\n debt+=i\nprint(debt)\n\np_b=debt/total_length\nprint(p_b)\n\ndf1=pd.Series(df.purpose == 'debt_consolidation')\nprint(df1)\n\np_a_b=(p_a*p_b)/p_a\nprint(p_a_b)\n\np_b_a=(p_a*p_b)/p_b\nprint(p_b_a)\n\nresult=((p_a_b*p_b_a)/p_b_a == p_a)\nprint(result)\n# code ends here\n\n\n# --------------\n# code starts here\nprob_lp=df[df['paid.back.loan']=='Yes'].shape[0]/df.shape[0]\nprint(prob_lp)\n\nprob_cs=df[df['credit.policy']=='Yes'].shape[0]/df.shape[0]\nprint(prob_cs)\n\nnew_df=df[df['paid.back.loan']=='Yes']\nprint(new_df)\n\nprob_pd_cs=new_df[new_df['credit.policy']=='Yes'].shape[0]/new_df.shape[0]\nprint(p_a_b)\n\nbayes=(prob_pd_cs*prob_lp)/prob_cs\nprint(bayes)\n\n# code ends here\n\n\n# --------------\n# code starts here\nplt.bar(df['purpose'].index,df['purpose'])\n\ndf1=df[df['paid.back.loan']=='No']\nprint(df1)\n\nplt.bar(df1['purpose'].index,df1['purpose'])\n\n\n# code ends here\n\n\n# --------------\n# code starts here\ninst_median = df['installment'].median()\ninst_mean = df['installment'].mean()\n\n\n# histogram for installment\ndf['installment'].hist(normed = True, bins=50)\nplt.axvline(x=inst_median,color='r')\nplt.axvline(x=inst_mean,color='g')\n\nplt.show()\n\n#histogram for log anual income\ndf['log.annual.inc'].hist(normed = True, bins=50)\nplt.show()\n\n\n\n# code ends here\n\n\n"
]
| [
[
"matplotlib.pyplot.show",
"pandas.Series",
"matplotlib.pyplot.axvline",
"pandas.read_csv",
"matplotlib.pyplot.bar"
]
]
|
wadhikar/napari | [
"954fc69a5c6939c473021994d41ff14a384399f4"
]
| [
"napari/_qt/layer_controls/qt_image_controls_base.py"
]
| [
"from contextlib import suppress\nfrom functools import partial\n\nimport numpy as np\nfrom qtpy.QtCore import Qt\nfrom qtpy.QtGui import QImage, QPixmap\nfrom qtpy.QtWidgets import QLabel, QPushButton, QSlider\n\nfrom ...utils.colormaps import AVAILABLE_COLORMAPS\nfrom ...utils.translations import trans\nfrom ..utils import qt_signals_blocked\nfrom ..widgets.qt_range_slider import QHRangeSlider\nfrom ..widgets.qt_range_slider_popup import QRangeSliderPopup\nfrom .qt_colormap_combobox import QtColormapComboBox\nfrom .qt_layer_controls_base import QtLayerControls\n\n\nclass QtBaseImageControls(QtLayerControls):\n \"\"\"Superclass for classes requiring colormaps, contrast & gamma sliders.\n\n This class is never directly instantiated anywhere.\n It is subclassed by QtImageControls and QtSurfaceControls.\n\n Parameters\n ----------\n layer : napari.layers.Layer\n An instance of a napari layer.\n\n Attributes\n ----------\n clim_pop : napari._qt.qt_range_slider_popup.QRangeSliderPopup\n Popup widget launching the contrast range slider.\n colorbarLabel : qtpy.QtWidgets.QLabel\n Label text of colorbar widget.\n colormapComboBox : qtpy.QtWidgets.QComboBox\n Dropdown widget for selecting the layer colormap.\n contrastLimitsSlider : qtpy.QtWidgets.QHRangeSlider\n Contrast range slider widget.\n gammaSlider : qtpy.QtWidgets.QSlider\n Gamma adjustment slider widget.\n layer : napari.layers.Layer\n An instance of a napari layer.\n\n \"\"\"\n\n def __init__(self, layer):\n super().__init__(layer)\n\n self.layer.events.colormap.connect(self._on_colormap_change)\n self.layer.events.gamma.connect(self._on_gamma_change)\n self.layer.events.contrast_limits.connect(\n self._on_contrast_limits_change\n )\n\n comboBox = QtColormapComboBox(self)\n comboBox.setObjectName(\"colormapComboBox\")\n comboBox._allitems = set(self.layer.colormaps)\n\n for name, cm in AVAILABLE_COLORMAPS.items():\n if name in self.layer.colormaps:\n comboBox.addItem(cm._display_name, name)\n\n comboBox.activated[str].connect(self.changeColor)\n self.colormapComboBox = comboBox\n\n # Create contrast_limits slider\n self.contrastLimitsSlider = QHRangeSlider(\n self.layer.contrast_limits,\n self.layer.contrast_limits_range,\n parent=self,\n )\n self.contrastLimitsSlider.mousePressEvent = self._clim_mousepress\n set_clim = partial(setattr, self.layer, 'contrast_limits')\n set_climrange = partial(setattr, self.layer, 'contrast_limits_range')\n self.contrastLimitsSlider.valuesChanged.connect(set_clim)\n self.contrastLimitsSlider.rangeChanged.connect(set_climrange)\n\n # gamma slider\n sld = QSlider(Qt.Horizontal, parent=self)\n sld.setFocusPolicy(Qt.NoFocus)\n sld.setMinimum(2)\n sld.setMaximum(200)\n sld.setSingleStep(2)\n sld.setValue(100)\n sld.valueChanged.connect(self.gamma_slider_changed)\n self.gammaSlider = sld\n self._on_gamma_change()\n\n self.colorbarLabel = QLabel(parent=self)\n self.colorbarLabel.setObjectName('colorbar')\n self.colorbarLabel.setToolTip(trans._('Colorbar'))\n\n self._on_colormap_change()\n\n def changeColor(self, text):\n \"\"\"Change colormap on the layer model.\n\n Parameters\n ----------\n text : str\n Colormap name.\n \"\"\"\n self.layer.colormap = self.colormapComboBox.currentData()\n\n def _clim_mousepress(self, event):\n \"\"\"Update the slider, or, on right-click, pop-up an expanded slider.\n\n The expanded slider provides finer control, directly editable values,\n and the ability to change the available range of the sliders.\n\n Parameters\n ----------\n event : napari.utils.event.Event\n The napari event that triggered this method.\n \"\"\"\n if event.button() == Qt.RightButton:\n self.clim_pop = create_range_popup(\n self.layer, 'contrast_limits', parent=self\n )\n self.clim_pop.finished.connect(self.clim_pop.deleteLater)\n reset, fullrange = create_clim_reset_buttons(self.layer)\n self.clim_pop.layout.addWidget(reset)\n if fullrange is not None:\n self.clim_pop.layout.addWidget(fullrange)\n self.clim_pop.move_to('top', min_length=650)\n self.clim_pop.show()\n else:\n return QHRangeSlider.mousePressEvent(\n self.contrastLimitsSlider, event\n )\n\n def _on_contrast_limits_change(self, event=None):\n \"\"\"Receive layer model contrast limits change event and update slider.\n\n Parameters\n ----------\n event : napari.utils.event.Event, optional\n The napari event that triggered this method, by default None.\n \"\"\"\n with qt_signals_blocked(self.contrastLimitsSlider):\n self.contrastLimitsSlider.setRange(\n self.layer.contrast_limits_range\n )\n self.contrastLimitsSlider.setValues(self.layer.contrast_limits)\n\n # clim_popup will throw an AttributeError if not yet created\n # and a RuntimeError if it has already been cleaned up.\n # we only want to update the slider if it's active\n with suppress(AttributeError, RuntimeError):\n self.clim_pop.slider.setRange(self.layer.contrast_limits_range)\n with qt_signals_blocked(self.clim_pop.slider):\n clims = self.layer.contrast_limits\n self.clim_pop.slider.setValues(clims)\n self.clim_pop._on_values_change(clims)\n\n def _on_colormap_change(self, event=None):\n \"\"\"Receive layer model colormap change event and update dropdown menu.\n\n Parameters\n ----------\n event : napari.utils.event.Event, optional\n The napari event that triggered this method, by default None.\n \"\"\"\n name = self.layer.colormap.name\n if name not in self.colormapComboBox._allitems:\n cm = AVAILABLE_COLORMAPS.get(name)\n if cm:\n self.colormapComboBox._allitems.add(name)\n self.colormapComboBox.addItem(cm._display_name, name)\n\n if name != self.colormapComboBox.currentData():\n index = self.colormapComboBox.findData(name)\n self.colormapComboBox.setCurrentIndex(index)\n\n # Note that QImage expects the image width followed by height\n cbar = self.layer.colormap.colorbar\n image = QImage(\n cbar,\n cbar.shape[1],\n cbar.shape[0],\n QImage.Format_RGBA8888,\n )\n self.colorbarLabel.setPixmap(QPixmap.fromImage(image))\n\n def gamma_slider_changed(self, value):\n \"\"\"Change gamma value on the layer model.\n\n Parameters\n ----------\n value : float\n Gamma adjustment value.\n https://en.wikipedia.org/wiki/Gamma_correction\n \"\"\"\n self.layer.gamma = value / 100\n\n def _on_gamma_change(self, event=None):\n \"\"\"Receive the layer model gamma change event and update the slider.\n\n Parameters\n ----------\n event : napari.utils.event.Event, optional\n The napari event that triggered this method, by default None.\n \"\"\"\n with qt_signals_blocked(self.gammaSlider):\n self.gammaSlider.setValue(int(self.layer.gamma * 100))\n\n def closeEvent(self, event):\n self.deleteLater()\n event.accept()\n\n\ndef create_range_popup(layer, attr, parent=None):\n \"\"\"Create a QRangeSliderPopup linked to a specific layer attribute.\n\n This assumes the layer has an attribute named both `attr` and `attr`_range.\n\n Parameters\n ----------\n layer : napari.layers.Layer\n probably an instance of Image or Surface layer\n attr : str\n the attribute to control with the slider.\n parent : QWidget\n probably an instance of QtLayerControls. important for styling.\n\n Returns\n -------\n QRangeSliderPopup\n\n Raises\n ------\n AttributeError\n if `layer` does not have an attribute named `{attr}_range`\n \"\"\"\n range_attr = f'{attr}_range'\n if not hasattr(layer, range_attr):\n raise AttributeError(\n trans._(\n 'Layer {layer} must have attribute {range_attr} '\n 'to use \"create_range_popup\"',\n deferred=True,\n layer=layer,\n range_attr=range_attr,\n )\n )\n is_integer_type = np.issubdtype(layer.dtype, np.integer)\n\n d_range = getattr(layer, range_attr)\n popup = QRangeSliderPopup(\n initial_values=getattr(layer, attr),\n data_range=d_range,\n collapsible=False,\n precision=(\n 0\n if is_integer_type\n # scale precision with the log of the data range order of magnitude\n # eg. 0 - 1 (0 order of mag) -> 3 decimal places\n # 0 - 10 (1 order of mag) -> 2 decimals\n # 0 - 100 (2 orders of mag) -> 1 decimal\n # ≥ 3 orders of mag -> no decimals\n else int(max(3 - np.log10(max(d_range[1] - d_range[0], 0.01)), 0))\n ),\n parent=parent,\n )\n\n set_values = partial(setattr, layer, attr)\n set_range = partial(setattr, layer, range_attr)\n popup.slider.valuesChanged.connect(set_values)\n popup.slider.rangeChanged.connect(set_range)\n return popup\n\n\ndef create_clim_reset_buttons(layer):\n \"\"\"Create contrast limits reset and full range buttons.\n\n Important: consumers of this function should check whether range_btn is\n not None before adding the widget to a layout. Adding None to a layout\n can cause a segmentation fault.\n\n Parameters\n ----------\n layer : napari.layers.Layer\n Image or Surface Layer\n\n Returns\n -------\n 2-tuple\n If layer data type is integer type, returns (reset_btn, range_btn).\n Else, returns (reset_btn, None)\n \"\"\"\n\n def reset():\n layer.reset_contrast_limits()\n layer.contrast_limits_range = layer.contrast_limits\n\n def reset_range():\n layer.reset_contrast_limits_range()\n\n reset_btn = QPushButton(\"reset\")\n reset_btn.setObjectName(\"reset_clims_button\")\n reset_btn.setToolTip(trans._(\"autoscale contrast to data range\"))\n reset_btn.setFixedWidth(40)\n reset_btn.clicked.connect(reset)\n\n range_btn = None\n # the \"full range\" button doesn't do anything if it's not an\n # unsigned integer type (it's unclear what range should be set)\n # so we don't show create it at all.\n if np.issubdtype(layer.dtype, np.integer):\n range_btn = QPushButton(\"full range\")\n range_btn.setObjectName(\"full_clim_range_button\")\n range_btn.setToolTip(trans._(\"set contrast range to full bit-depth\"))\n range_btn.setFixedWidth(65)\n range_btn.clicked.connect(reset_range)\n\n return reset_btn, range_btn\n"
]
| [
[
"numpy.issubdtype"
]
]
|
yxoos/CorrelationMatrix | [
"556443949be007fef9739c8a25866ea2e8fdee58"
]
| [
"Correlation_matrix.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 3 14:58:50 2019\n\n@author: Amirh\n\ncorrelation matrix, collinearity problem\n\n\"\"\"\nfrom scipy.stats import pearsonr\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.stats import norm\nfrom scipy import interpolate\n#%%\ndef correlation_plot(data):\n \"\"\"\n data: is MxN numpy array where M is the number of samples and N is the \n number of features per sample.\n \n \"\"\"\n data = data.T\n ds = data.shape\n \n fig,ax = plt.subplots(nrows=ds[0], ncols=ds[0],figsize=(ds[0],ds[0]))\n \n # Changing the number of ticks per subplot\n for axi in ax.flat:\n axi.xaxis.set_major_locator(plt.MaxNLocator(2))\n axi.yaxis.set_major_locator(plt.MaxNLocator(2)) \n \n # plotting each subplot \n for i in range(ds[0]):\n for j in range(ds[0]):\n if i == j:\n # plotting histograms of each variable\n n, bins, patches=ax[i,j].hist(data[i],density=True)\n \n # plotting distribution function and using it to fit a gaussian\n mu, std = norm.fit(data[i])\n p = norm.pdf(bins, mu, std)\n ax[i,j].plot(bins, p, 'r--', linewidth=2)\n ax[i,j].set_xticks([])\n ax[i,j].set_yticks([])\n if j == ds[0]-1:\n ax[i,j].set_ylabel(\"var_%s\"%(i+1),fontsize=11).set_color(\"red\")\n ax[i,j].yaxis.set_label_position(\"right\")\n \n if i == 0 and j == 0:\n ax[i,j].set_title(\"var_%s\"%(i+1),fontsize=11).set_color(\"red\")\n \n elif i < j:\n prs=pearsonr(data[i],data[j])[0]\n if prs >= 0.5 or prs <= -0.5:\n ax[i,j].text(0.5,0.5,str(prs)[0:4],fontsize=24,horizontalalignment='center',verticalalignment='center') \n ax[i,j].text(0.8,0.8,\"***\",color='r',fontsize=16,horizontalalignment='center',verticalalignment='center') \n elif (prs <= -0.45 and prs >= -0.50) or (prs >= 0.45 and prs <= 0.50):\n ax[i,j].text(0.5,0.5,str(prs)[0:4],fontsize=18,horizontalalignment='center',verticalalignment='center') \n ax[i,j].text(0.8,0.8,\"**\",color='r',fontsize=16,horizontalalignment='center',verticalalignment='center') \n elif (prs <= -0.4 and prs > -0.45) or (prs >= 0.4 and prs < 0.45):\n ax[i,j].text(0.5,0.5,str(prs)[0:4],fontsize=16,horizontalalignment='center',verticalalignment='center') \n ax[i,j].text(0.8,0.8,\"*\",color='r',fontsize=16,horizontalalignment='center',verticalalignment='center')\n else: \n ax[i,j].text(0.5,0.5,str(pearsonr(data[i],data[j])[0])[0:4],fontsize=10,horizontalalignment='center',verticalalignment='center') \n \n ax[i,j].set_xticks([])\n ax[i,j].set_yticks([])\n \n if i ==0:\n ax[i,j].set_title(\"var_%s\"%(j+1),fontsize=11).set_color(\"red\")\n ax[i,j].set_xticks([])\n ax[i,j].set_yticks([])\n if j == ds[0]-1:\n ax[i,j].set_ylabel(\"var_%s\"%(i+1),fontsize=11).set_color(\"red\")\n ax[i,j].yaxis.set_label_position(\"right\")\n \n elif i > j:\n ax[i,j].scatter(data[i],data[j],s=10,c='k') \n rnge= data[i].max()-data[i].min()\n ax[i,j].set_ylim(-0.2*rnge,1.2*rnge)\n ax[i,j].set_xlim(-0.2*rnge,1.2*rnge) \n \n if i!=0 and i!=ds[0]-1:\n if j==0:\n ax[i,j].set_xticks([])\n elif j!=0:\n ax[i,j].set_xticks([])\n ax[i,j].set_yticks([])\n \n if j!=0 and j!=ds[0]-1 and i==ds[0]-1:\n ax[i,j].set_yticks([])\n \n plt.subplots_adjust(wspace=0, hspace=0)\n\n \n"
]
| [
[
"scipy.stats.norm.pdf",
"matplotlib.pyplot.MaxNLocator",
"matplotlib.pyplot.subplots",
"scipy.stats.pearsonr",
"scipy.stats.norm.fit",
"matplotlib.pyplot.subplots_adjust"
]
]
|
xiao-aBoy/Microbiome-based-disease-prediction-with-prototypical-network | [
"a981b7fa4cb4770a090d341c806d284d21b00d0c"
]
| [
"net/AM1.py"
]
| [
"# -*- coding: utf-8 -*-\r\nimport warnings\r\nimport torch\r\nimport torch.nn as nn\r\nimport random\r\nimport torch.nn.functional as F\r\nfrom sklearn.feature_selection import f_classif, SelectKBest\r\nfrom sklearn.metrics import accuracy_score, roc_auc_score, f1_score\r\nfrom sklearn.model_selection import StratifiedKFold\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom torch import optim\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nwarnings.filterwarnings('ignore')\r\n\r\ntorch.manual_seed(1)\r\ntorch.cuda.manual_seed(1)\r\nnp.random.seed(1)\r\nrandom.seed(1)\r\n\r\n\r\n\"\"\"\r\n\r\nAdaptive_Cross_Modal_PN is the implementation of AMPN in the paper\r\n\r\n\"\"\"\r\n\r\nclass Adaptive_Cross_Modal_PN(nn.Module):\r\n def __init__(self, species_in_feature, pathway_in_feature,num_class, embedding_dim,\r\n support_num, query_num, distance='euclidean', mixup_data=False):\r\n super(Adaptive_Cross_Modal_PN, self).__init__()\r\n\r\n self.num_class = num_class\r\n self.embedding_dim = embedding_dim\r\n self.support_num = support_num\r\n self.query_num = query_num\r\n self.distance = distance\r\n self.prototypical = None\r\n self.prototypes = []\r\n self.mixup_data = mixup_data\r\n\r\n self.species_feature_extraction = nn.Sequential(\r\n nn.Linear(in_features=species_in_feature, out_features=1024),\r\n nn.ReLU(inplace=True),\r\n nn.Dropout(p=0.5),\r\n nn.Linear(in_features=1024, out_features=embedding_dim),\r\n )\r\n\r\n self.pathway_feature_extraction = nn.Sequential(\r\n nn.Linear(in_features=pathway_in_feature, out_features=pathway_in_feature * 2),\r\n nn.ReLU(inplace=True),\r\n nn.Dropout(p=0.5),\r\n nn.Linear(in_features=pathway_in_feature * 2, out_features=embedding_dim)\r\n )\r\n\r\n self.fusion_factor = nn.Sequential(\r\n nn.Linear(in_features=embedding_dim, out_features= embedding_dim * 2),\r\n nn.ReLU(inplace=True),\r\n nn.Dropout(p=0.5),\r\n nn.Linear(in_features=embedding_dim * 2, out_features=embedding_dim),\r\n )\r\n\r\n def forward(self, species_support_input,pathway_support_input,\r\n species_query_input,pathway_query_input):\r\n\r\n species_support_embedding = self.species_feature_extraction(species_support_input)\r\n species_query_embedding = self.species_feature_extraction(species_query_input)\r\n pathway_support_embedding = self.pathway_feature_extraction(pathway_support_input)\r\n # pathway_query_embedding = self.pathway_feature_extraction(pathway_query_input)\r\n support_size = species_support_embedding.shape[0]\r\n every_class_num = support_size // self.num_class\r\n\r\n negtive_pathway_prototype = torch.mean(pathway_support_embedding[0:every_class_num, :], dim=0)\r\n postive_pathway_prototype= torch.mean(pathway_support_embedding[every_class_num:, :], dim=0)\r\n\r\n negtive_factor = self.fusion_factor(negtive_pathway_prototype)\r\n postive_factor = self.fusion_factor(postive_pathway_prototype)\r\n\r\n negtive_factor = 1 / (1 + torch.exp(-1 * negtive_factor))\r\n postive_factor = 1 / (1 + torch.exp(-1 * postive_factor))\r\n\r\n class_meta_dict = {}\r\n\r\n negtive_species_prototype= torch.mean(species_support_embedding[0 : every_class_num, :], dim=0)\r\n\r\n\r\n class_meta_dict[0] = postive_factor* negtive_species_prototype + \\\r\n (1 - postive_factor) * negtive_pathway_prototype\r\n\r\n postive_species_prototype = torch.mean(species_support_embedding[every_class_num:, :], dim=0)\r\n\r\n class_meta_dict[1] = negtive_factor * postive_species_prototype + \\\r\n (1 - negtive_factor) * postive_pathway_prototype\r\n\r\n\r\n class_meta_information = torch.zeros(size=[len(class_meta_dict), species_support_embedding.shape[1]])\r\n\r\n for key, item in class_meta_dict.items():\r\n class_meta_information[key, :] = class_meta_dict[key]\r\n\r\n self.prototypical = class_meta_information\r\n\r\n self.prototypes.append(class_meta_information.detach().numpy())\r\n\r\n N_query = species_query_embedding.shape[0]\r\n result = torch.zeros(size=[N_query, self.num_class])\r\n\r\n\r\n for i in range(0, N_query):\r\n temp_value = species_query_embedding[i].repeat(self.num_class, 1)\r\n dist_value = 0\r\n if self.distance == 'euclidean':\r\n dist_value = F.pairwise_distance(self.prototypical, temp_value, p=2)\r\n elif self.distance == 'cosine':\r\n dist_value = torch.cosine_similarity(self.prototypical, temp_value, dim=1)\r\n dist_value = 1 - dist_value\r\n result[i] = -1 * dist_value\r\n\r\n return result\r\n\r\n def randomGenerate(self, species_X,pathway_X, Y):\r\n\r\n postive_index = np.where(Y == 1)[0]\r\n negtive_index = np.where(Y == 0)[0]\r\n\r\n pos_support_index = np.random.choice(postive_index, self.support_num // 2, replace=False)\r\n\r\n neg_support_index = np.random.choice(negtive_index, self.support_num // 2, replace=False)\r\n\r\n support_index = np.concatenate((neg_support_index, pos_support_index), axis=0)\r\n species_support_input = species_X[support_index, :]\r\n pathway_support_input = pathway_X[support_index, :]\r\n support_label = Y[support_index]\r\n\r\n pos_query_index = np.random.choice([index for index in postive_index if index not in pos_support_index],\r\n self.query_num // 2, replace=False)\r\n neg_query_index = np.random.choice([index for index in negtive_index if index not in neg_support_index],\r\n self.query_num // 2, replace=False)\r\n query_index = np.concatenate((neg_query_index, pos_query_index), axis=0)\r\n species_query_input = species_X[query_index,:]\r\n pathway_query_input = pathway_X[query_index,:]\r\n query_label = Y[query_index]\r\n\r\n species_support_input = torch.tensor(species_support_input, dtype=torch.float)\r\n pathway_support_input = torch.tensor(pathway_support_input, dtype=torch.float)\r\n species_query_input = torch.tensor(species_query_input, dtype=torch.float)\r\n pathway_query_input = torch.tensor(pathway_query_input, dtype=torch.float)\r\n support_label = torch.tensor(support_label, dtype=torch.long)\r\n query_label = torch.tensor(query_label, dtype=torch.long)\r\n\r\n return species_support_input,pathway_support_input, \\\r\n species_query_input,pathway_query_input, support_label, query_label\r\n\r\n def fit(self, species_X,pathway_X, Y, optimizer, criterion, EPOCH):\r\n loss_list = []\r\n for epoch in range(EPOCH):\r\n self.train()\r\n optimizer.zero_grad()\r\n species_support_input, pathway_support_input, \\\r\n species_query_input, pathway_query_input, support_label, query_label \\\r\n = self.randomGenerate(species_X,pathway_X, Y)\r\n\r\n output = self.forward(species_support_input,pathway_support_input,\r\n species_query_input,pathway_query_input)\r\n loss = criterion(output, query_label)\r\n\r\n loss.backward()\r\n optimizer.step()\r\n loss_list.append(loss.item())\r\n print(\"Epoch number:{},Current loss:{:.4f}\\n\".format(epoch, loss.item()))\r\n\r\n return loss_list\r\n\r\n\r\n def predict(self, specie_X_test):\r\n self.eval()\r\n specie_X_test = torch.tensor(specie_X_test, dtype=torch.float)\r\n species_embedding = self.species_feature_extraction(specie_X_test)\r\n result = torch.zeros(size=[species_embedding.shape[0], self.num_class])\r\n for i in range(0, species_embedding.shape[0]):\r\n temp_value = species_embedding[i].repeat(self.num_class, 1)\r\n dist_value = 0\r\n if self.distance == 'euclidean':\r\n dist_value = F.pairwise_distance(self.prototypical, temp_value, p=2)\r\n elif self.distance == 'cosine':\r\n dist_value = torch.cosine_similarity(self.prototypical, temp_value, dim=1)\r\n dist_value = 1 - dist_value\r\n result[i] = -1 * dist_value\r\n result = F.softmax(result, dim=1)\r\n pre_Y = result[:, 0] < result[:, 1]\r\n pre_Y = pre_Y.detach().numpy().astype(int)\r\n prob_Y = result[:, 1].detach().numpy()\r\n return pre_Y, prob_Y\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n pathway_df = pd.read_csv(\"../../data/curatedMetagenomicData/NielsenHB_2014/NielsenHB_2014.pathcoverage.csv\",\r\n sep=',', index_col=0).T\r\n\r\n label_df = pd.read_table(\"../../data/curatedMetagenomicData/NielsenHB_2014/NielsenHB_2014_pData.csv\",\r\n sep=\",\", index_col=0)[['disease']]\r\n label_df.loc[label_df[\"disease\"] == \"healthy\", \"disease\"] = 0\r\n label_df.loc[label_df[\"disease\"] == \"IBD\", \"disease\"] = 1\r\n\r\n data_df1 = pathway_df.iloc[:, 2:].join(label_df).dropna()\r\n\r\n data_arr1 = np.array(data_df1)\r\n\r\n pathway_X = data_arr1[:, 2:-1].astype(np.float)\r\n\r\n species_df = pd.read_table(\"../../data/curatedMetagenomicData/NielsenHB_2014/counts/NielsenHB_2014_counts_species.csv\",\r\n sep=\",\", index_col=0).dropna(axis=1)\r\n\r\n log_df = species_df.apply(np.log1p, axis=1).T\r\n\r\n\r\n data_df2 = log_df.join(label_df).dropna()\r\n\r\n data_arr2 = np.array(data_df2)\r\n\r\n species_X = data_arr2[:, :-1].astype(np.float)\r\n y = data_arr2[:, -1].astype(np.int)\r\n\r\n acc = []\r\n auc = []\r\n f1 = []\r\n\r\n for i in range(3):\r\n kf = StratifiedKFold(n_splits=10, shuffle=True, random_state=random.randint(0, 10 ** 9))\r\n cur_acc = []\r\n cur_auc = []\r\n cur_f1 = []\r\n for train_index, test_index in kf.split(species_X, y):\r\n\r\n species_X_train, species_X_test = species_X[train_index], species_X[test_index]\r\n pathway_X_train, pathway_X_test = pathway_X[train_index], pathway_X[test_index]\r\n y_train, y_test = y[train_index], y[test_index]\r\n\r\n s_filter_column = np.sum((species_X_train != 0), axis=0) > species_X_train.shape[0] * 0.2\r\n species_X_train = species_X_train[:, s_filter_column]\r\n species_X_test = species_X_test[:, s_filter_column]\r\n\r\n p_filter_column = np.sum((pathway_X_train != 0), axis=0) > pathway_X_train.shape[0] * 0.2\r\n pathway_X_train = pathway_X_train[:, p_filter_column]\r\n pathway_X_test = pathway_X_test[:, p_filter_column]\r\n\r\n\r\n\r\n s_std = StandardScaler()\r\n s_std.fit(species_X_train,y_train)\r\n species_X_train = s_std.transform(species_X_train)\r\n species_X_test = s_std.transform(species_X_test)\r\n\r\n p_std = StandardScaler()\r\n p_std.fit(pathway_X_train,y_train)\r\n pathway_X_train = p_std.transform(pathway_X_train)\r\n pathway_X_test = p_std.transform(pathway_X_test)\r\n\r\n skb = SelectKBest(f_classif, k=256)\r\n # skb = UnivariateFilter(f_ratio_measure, select_k_best(256))\r\n skb.fit(pathway_X_train, y_train)\r\n pathway_X_train = skb.transform(pathway_X_train)\r\n pathway_X_test = skb.transform(pathway_X_test)\r\n\r\n acmp = Adaptive_Cross_Modal_PN(species_X_train.shape[1],\r\n pathway_X_train.shape[1],2,64,100,100)\r\n optimer = optim.Adam(acmp.parameters(), lr=0.001, weight_decay=0.001)\r\n criterion = nn.CrossEntropyLoss()\r\n acmp.fit(species_X_train,pathway_X_train,y_train,optimer,criterion,100)\r\n pre_y, prob_y = acmp.predict(species_X_test)\r\n\r\n\r\n cur_acc.append(accuracy_score(pre_y, y_test))\r\n cur_auc.append(roc_auc_score(y_test, prob_y))\r\n cur_f1.append(f1_score(y_test, pre_y))\r\n\r\n\r\n acc.append(np.mean(np.array(cur_acc)))\r\n auc.append(np.mean(np.array(cur_auc)))\r\n f1.append(np.mean(np.array(cur_f1)))\r\n\r\n print(\"IPrototypical Net ACC:\" + str(np.mean(np.array(acc))) + \" AUC:\" + str(np.mean(np.array(auc))) +\r\n \" F1-micro:\" + str(np.mean(np.array(f1))))\r\n\r\n"
]
| [
[
"torch.nn.Linear",
"torch.cuda.manual_seed",
"numpy.random.choice",
"numpy.where",
"sklearn.feature_selection.SelectKBest",
"pandas.read_csv",
"sklearn.metrics.f1_score",
"torch.exp",
"torch.nn.CrossEntropyLoss",
"numpy.concatenate",
"pandas.read_table",
"torch.nn.functional.pairwise_distance",
"sklearn.metrics.accuracy_score",
"torch.manual_seed",
"torch.tensor",
"torch.zeros",
"torch.cosine_similarity",
"numpy.array",
"torch.nn.ReLU",
"torch.nn.functional.softmax",
"sklearn.metrics.roc_auc_score",
"torch.nn.Dropout",
"sklearn.preprocessing.StandardScaler",
"numpy.random.seed",
"numpy.sum",
"torch.mean"
]
]
|
supervisely-ecosystem/ritm-training | [
"5fc9617b21449f147d99383f501e75f2067ac721"
]
| [
"isegm/data/datasets/coco_lvis.py"
]
| [
"from pathlib import Path\nimport pickle\nimport random\nimport numpy as np\nimport json\nimport cv2\nfrom copy import deepcopy\nfrom isegm.data.base import ISDataset\nfrom isegm.data.sample import DSample\n\n\nclass CocoLvisDataset(ISDataset):\n def __init__(self, dataset_path, split='train', stuff_prob=0.0,\n allow_list_name=None, anno_file='hannotation.pickle', **kwargs):\n super(CocoLvisDataset, self).__init__(**kwargs)\n dataset_path = Path(dataset_path)\n self._split_path = dataset_path / split\n self.split = split\n self._images_path = self._split_path / 'images'\n self._masks_path = self._split_path / 'masks'\n self.stuff_prob = stuff_prob\n\n with open(self._split_path / anno_file, 'rb') as f:\n self.dataset_samples = sorted(pickle.load(f).items())\n\n if allow_list_name is not None:\n allow_list_path = self._split_path / allow_list_name\n with open(allow_list_path, 'r') as f:\n allow_images_ids = json.load(f)\n allow_images_ids = set(allow_images_ids)\n\n self.dataset_samples = [sample for sample in self.dataset_samples\n if sample[0] in allow_images_ids]\n\n def get_sample(self, index) -> DSample:\n image_id, sample = self.dataset_samples[index]\n image_path = self._images_path / f'{image_id}.jpg'\n\n image = cv2.imread(str(image_path))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n packed_masks_path = self._masks_path / f'{image_id}.pickle'\n with open(packed_masks_path, 'rb') as f:\n encoded_layers, objs_mapping = pickle.load(f)\n layers = [cv2.imdecode(x, cv2.IMREAD_UNCHANGED) for x in encoded_layers]\n layers = np.stack(layers, axis=2)\n\n instances_info = deepcopy(sample['hierarchy'])\n for inst_id, inst_info in list(instances_info.items()):\n if inst_info is None:\n inst_info = {'children': [], 'parent': None, 'node_level': 0}\n instances_info[inst_id] = inst_info\n inst_info['mapping'] = objs_mapping[inst_id]\n\n if self.stuff_prob > 0 and random.random() < self.stuff_prob:\n for inst_id in range(sample['num_instance_masks'], len(objs_mapping)):\n instances_info[inst_id] = {\n 'mapping': objs_mapping[inst_id],\n 'parent': None,\n 'children': []\n }\n else:\n for inst_id in range(sample['num_instance_masks'], len(objs_mapping)):\n layer_indx, mask_id = objs_mapping[inst_id]\n layers[:, :, layer_indx][layers[:, :, layer_indx] == mask_id] = 0\n\n return DSample(image, layers, objects=instances_info)\n"
]
| [
[
"numpy.stack"
]
]
|
trzhang0116/HRAC | [
"205ec4c68ee6a0b9a7e162f1d91ae60cbc6b7bb8"
]
| [
"discrete/agent.py"
]
| [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.tensorboard import SummaryWriter\n\nimport os\nimport time\nimport datetime\nimport json\nimport numpy as np\nimport pandas as pd\n\nfrom env import MazeEnv, KeyChestEnv\nfrom model import HRLNet, ANet\nfrom memory import HighLevelReplayBuffer, LowLevelMemory, TrajectoryMemory\nfrom metric import train_anet\nimport utils\n\n\nclass Agent:\n\n def __init__(self, args):\n self.algo = args.algo\n self.k = args.manager_propose_freq\n self.goal_loss_coeff = args.goal_loss_coeff\n self.n_noisy_goals = args.n_noisy_goals\n self.man_discount = args.man_discount\n self.ctrl_discount = args.ctrl_discount\n\n self.r_margin_pos = args.r_margin_pos\n self.r_margin_neg = args.r_margin_neg\n self.r_init_epochs = args.r_init_epochs\n self.r_training_epochs = args.r_training_epochs\n self.r_training_freq = args.r_training_freq\n self.r_batch_size = args.r_batch_size\n\n self.man_noise_sigma = args.man_noise_sigma\n self.man_policy_update_freq = args.man_policy_update_freq\n\n self.ctrl_entropy = args.ctrl_entropy\n\n self.man_rew_scale = args.man_rew_scale\n self.ctrl_rew_scale = args.ctrl_rew_scale\n\n self.eval_freq = args.eval_freq\n self.eval_episodes = args.eval_episodes\n self.save_models = args.save_models\n self.model_save_freq = 10000\n self.log_freq = 1\n\n self.device = torch.device('cuda:{}'.format(args.gid))\n\n if args.env_name == 'Maze':\n self.env = MazeEnv(step_limit=200, seed=args.seed)\n self.eval_env = MazeEnv(step_limit=200, seed=args.seed+100)\n self.x_range = self.env.h\n self.y_range = self.env.w\n self.action_scale_l = None\n self.total_training_frames = 1000000\n self.random_start = False\n elif args.env_name == 'KeyChest':\n self.env = KeyChestEnv(step_limit=500, random_start=True, seed=args.seed)\n self.eval_env = KeyChestEnv(step_limit=500, random_start=True, seed=args.seed+100)\n self.x_range = self.env.h\n self.y_range = self.env.w\n self.action_scale_l = None\n self.total_training_frames = 2000000\n self.random_start = True\n else:\n raise NotImplementedError\n\n self.obs_dim = self.env.obs_dim\n self.action_dim = self.env.action_dim\n self.goal_dim = 2\n\n self.origin_time = datetime.datetime.now().strftime('%m%d-%H%M')\n output_path = os.path.join('output', args.env_name, self.origin_time)\n self.model_load_path = os.path.join('trained_models', args.env_name)\n self.model_save_path = os.path.join(output_path, 'models')\n self.log_path = os.path.join(output_path, 'log')\n self.result_path = os.path.join(output_path, 'results')\n self.output_data = {'frames': [], 'reward': []}\n self.output_filename = '{}_{}_{}.csv'.format(args.env_name, self.algo, args.seed)\n\n if self.save_models:\n utils.make_path(self.model_save_path)\n utils.make_path(self.log_path)\n utils.make_path(self.result_path)\n self.summary_writer = SummaryWriter(self.log_path)\n with open(os.path.join(self.result_path, 'params.json'), 'w') as json_file:\n json.dump(vars(args), json_file, indent=0)\n\n self.replay_buffer_h = HighLevelReplayBuffer(args.man_buffer_size, args.man_batch_size,\n self.obs_dim, self.goal_dim)\n self.memory_l = LowLevelMemory()\n\n self.build_model(args)\n if args.load_model:\n self.load_model()\n\n self.n_states = 0\n self.state_list = []\n self.state_dict = {}\n self.adj_mat = np.diag(np.ones(500, dtype=np.uint8))\n self.traj_memory = TrajectoryMemory(capacity=args.r_init_steps)\n\n self._episodes = 0\n self._frames = 0\n self.policy_update_it = 0\n self.loss_l = None\n self.loss_h = None\n self.mean_int_reward = None\n\n def build_model(self, args):\n self.net = HRLNet(self.obs_dim, self.goal_dim, self.action_dim, args.hidden_dim,\n self.x_range, self.y_range, self.k, self.n_noisy_goals, args.man_soft_sync_rate,\n low_action_scale=self.action_scale_l)\n self.net.to(self.device)\n\n self.a_net = ANet(self.goal_dim, args.r_hidden_dim, args.r_embedding_dim)\n self.a_net.to(self.device)\n\n self.optimizer_r = optim.Adam(self.a_net.parameters(), lr=args.lr_r)\n self.optimizer_actor_h = optim.Adam(self.net.actor_h.parameters(), lr=args.man_act_lr)\n self.optimizer_critic_h = optim.Adam(self.net.critic_h.parameters(), lr=args.man_crit_lr, weight_decay=args.weight_decay_critic)\n\n self.optimizer_l = optim.Adam(self.net.policy_l.parameters(), lr=args.ctrl_lr)\n\n def load_model(self):\n filename = os.path.join(self.model_load_path, '{}.pth'.format(self.algo))\n print('Loading the trained model: {}...'.format(filename))\n self.net.load_state_dict(torch.load(filename))\n\n def save_model(self, episode=None):\n if episode is not None:\n filename = os.path.join(self.model_save_path, '{}_{}.pth'.format(self.algo, episode))\n else:\n filename = os.path.join(self.model_save_path, '{}.pth'.format(self.algo))\n torch.save(self.net.state_dict(), filename)\n print('************** Model {} saved. **************'.format(episode))\n\n def train(self):\n print(\"===================== Training {} starts =====================\".format(self.algo))\n self.start_time = time.time()\n\n print('Pre-training adjacency network...')\n self.env.pure_exploration = True\n self.env.random_start = True\n while not self.traj_memory.full():\n self._episodes += 1\n self.interact_one_episode(train=False)\n # print('Gathered samples: {} / {}'.format(self.traj_memory.size(), self.traj_memory._capacity))\n self.update_adj_mat()\n train_anet(self.a_net, self.state_list, self.adj_mat[:self.n_states, :self.n_states], self.optimizer_r,\n self.r_margin_pos, self.r_margin_neg, n_epochs=self.r_init_epochs, batch_size=self.r_batch_size,\n device=self.device, verbose=False)\n self.test_adjacency_acc()\n self.env.pure_exploration = False\n self.env.random_start = self.random_start\n\n self.traj_memory.reset()\n self.traj_memory.set_capacity(self.r_training_freq)\n\n while self._frames <= self.total_training_frames:\n if self._episodes == 0:\n self.test()\n self.interact_one_episode()\n self.train_one_episode()\n self._episodes += 1\n if self.traj_memory.full():\n print('Training adjacency network...')\n self.update_adj_mat()\n train_anet(self.a_net, self.state_list, self.adj_mat[:self.n_states, :self.n_states], self.optimizer_r,\n self.r_margin_pos, self.r_margin_neg, n_epochs=self.r_training_epochs,\n batch_size=self.r_batch_size, device=self.device, verbose=False)\n self.test_adjacency_acc()\n self.traj_memory.reset()\n\n if self._episodes % self.log_freq == 0:\n self.log_train()\n if self._episodes % self.eval_freq == 0:\n self.test()\n if self._episodes % self.model_save_freq == 0 and self.save_models:\n self.save_model(self._episodes)\n\n if self.save_models:\n self.save_model('last')\n r_filename = os.path.join(self.model_save_path, 'a_network.pth'.format(self._episodes))\n torch.save(self.a_net.state_dict(), r_filename)\n self.summary_writer.close()\n output_df = pd.DataFrame(self.output_data)\n output_df.to_csv(os.path.join(self.result_path, self.output_filename), float_format='%.4f', index=False)\n\n print(\"======================= Training {} ends =======================\".format(self.algo))\n\n def test(self):\n print(\"[@@ {} @@] ************** Testing at episode {} **************\".format(self.algo, self._episodes))\n reward_total = 0.\n dist_total = 0.\n for i in range(self.eval_episodes):\n reward, dist = self.test_one_episode()\n reward_total += reward\n dist_total += dist\n reward_avg = reward_total / self.eval_episodes\n dist_avg = dist_total / self.eval_episodes\n print('Average reward: {:.4f}, average dist: {:.4f}'.format(reward_avg, dist_avg))\n self.output_data['frames'].append(self._frames)\n self.output_data['reward'].append(reward_avg)\n # self.output_data['dist'].append(dist_avg)\n self.summary_writer.add_scalar('average test reward', reward_avg, self._frames)\n self.summary_writer.add_scalar('average test dist', dist_avg, self._frames)\n\n def interact_one_episode(self, train=True):\n self.env.new_episode()\n obs_curr = self.env.get_state()\n self.memory_l.reset()\n self.traj_memory.create_new_trajectory()\n last_state = None\n last_goal = None\n\n if train:\n r_horizon = 0.\n start_flag = self.replay_buffer_h.start()\n while True:\n obs_var = utils.single_input_transform(obs_curr, device=self.device)\n flag = (self.env.get_current_step() % self.k == 0)\n\n action, last_goal, last_state, (prob, log_prob, log_prob_act, value_l) = self.step(\n obs_var, last_state, last_goal, flag, start_flag=start_flag)\n\n action_copy = action.copy()\n r = self.man_rew_scale * self.env.make_action(action_copy)\n obs_new = self.env.get_state()\n done = self.env.is_episode_finished()\n last_goal_np = last_goal.detach().squeeze().cpu().numpy()\n\n if (self.env.get_current_step() - 1) % self.k != 0:\n r_horizon += r\n else:\n if (self.env.get_current_step() - 1) != 0:\n self.replay_buffer_h.append(state_store, goal_store, r_horizon, obs_curr, done)\n goal_store = last_goal.detach().squeeze().cpu().numpy()\n state_store = obs_curr\n r_horizon = r\n\n info_low = dict(prob=prob, log_prob=log_prob, log_prob_act=log_prob_act, value_l=value_l)\n self.memory_l.append(last_state, last_goal, action, info_low)\n self.traj_memory.append(obs_curr)\n obs_curr = obs_new\n\n if done:\n self.replay_buffer_h.append(state_store, goal_store, r_horizon, obs_curr, done)\n obs_var = utils.single_input_transform(obs_curr, device=self.device)\n self.memory_l.states.append(obs_var)\n self.traj_memory.append(obs_curr)\n self._frames += self.env.get_current_step()\n self.curr_reward = self.env.get_total_reward()\n break\n else:\n while True:\n obs_var = utils.single_input_transform(obs_curr, device=self.device)\n flag = (self.env.get_current_step() % self.k == 0)\n self.traj_memory.append(obs_curr)\n\n action, last_goal, last_state, _ = self.step(\n obs_var, last_state, last_goal, flag, start_flag=False, evaluate=False)\n self.env.make_action(action)\n obs_new = self.env.get_state()\n done = self.env.is_episode_finished()\n obs_curr = obs_new\n if done:\n self.traj_memory.append(obs_curr)\n self._frames += self.env.get_current_step()\n break\n\n def step(self, state, last_state, last_goal, flag, start_flag=True, evaluate=False):\n if evaluate:\n goal, prob, log_prob, value_l = self.net(\n state, last_state, last_goal, flag,\n self.man_noise_sigma, evaluate=True)\n action = prob.multinomial(1)\n log_prob_act = log_prob.gather(1, action).squeeze()\n else:\n if not start_flag:\n if flag:\n goal = torch.rand(self.goal_dim).unsqueeze(0).to(self.device)\n scale = torch.FloatTensor([self.x_range, self.y_range]).expand_as(goal).to(self.device)\n goal = goal * scale\n else:\n goal = self.goal_transition(state, last_state, last_goal)\n policy, value_l = self.net.policy_l(state, goal)\n prob = F.softmax(policy, dim=-1)\n log_prob = F.log_softmax(policy, dim=-1)\n action = prob.multinomial(1)\n log_prob_act = log_prob.gather(1, action).squeeze()\n else:\n if flag:\n goals = self.net.actor_h(state, self.man_noise_sigma, evaluate=False)\n if self.n_noisy_goals > 0:\n noised_goals = goals[:self.n_noisy_goals]\n raw_goal = goals[-1].unsqueeze(0)\n goal = self.sample_adjacent_subgoal(noised_goals, state)\n if goal is None:\n goal = raw_goal\n else:\n goal = goals\n else:\n goal = self.goal_transition(state, last_state, last_goal)\n policy, value_l = self.net.policy_l(state, goal)\n prob = F.softmax(policy, dim=-1)\n log_prob = F.log_softmax(policy, dim=-1)\n action = prob.multinomial(1)\n log_prob_act = log_prob.gather(1, action).squeeze()\n return action.squeeze().detach().cpu().numpy(), goal, state, (prob, log_prob, log_prob_act, value_l)\n\n def sample_adjacent_subgoal(self, goals, state):\n # Randomly sample an adjacent subgoal from a goal list\n inputs = torch.cat((state[:, :self.goal_dim], goals), dim=0)\n outputs = self.a_net(inputs)\n s_embedding = outputs[0].unsqueeze(0)\n goal_embeddings = outputs[1:]\n dists = utils.euclidean_dist(s_embedding, goal_embeddings).squeeze()\n idx = (dists < (self.r_margin_neg + self.r_margin_pos) / 2).nonzero().squeeze()\n if idx.size() and len(idx) == 0:\n return None\n elif not idx.size(): # one index\n sel_goal = goals[idx]\n else:\n sample_idx = np.random.randint(len(idx))\n sel_goal = goals[idx[sample_idx]]\n sel_goal = sel_goal.unsqueeze(0)\n return sel_goal\n\n def goal_transition(self, state, last_state, last_goal):\n return last_goal\n\n def train_one_episode(self):\n # train low level\n if self.replay_buffer_h.start() and self.memory_l.size() > 0:\n self.loss_policy_l, self.loss_value_l, self.loss_entropy_l = self.train_low_level_a2c()\n self.loss_l = self.loss_policy_l + self.loss_value_l + self.ctrl_entropy * self.loss_entropy_l\n # train high level\n if self.replay_buffer_h.start():\n self.loss_policy_h = 0.\n self.loss_value_h = 0.\n self.loss_goal_h = 0.\n high_train_steps = max(self.env.get_current_step() // self.k, 1)\n for _ in range(high_train_steps):\n loss_policy_h, loss_value_h, loss_goal_h = self.train_high_level()\n self.loss_policy_h += loss_policy_h\n self.loss_value_h += loss_value_h\n self.loss_goal_h += loss_goal_h\n self.loss_policy_h /= high_train_steps\n self.loss_value_h /= high_train_steps\n self.loss_goal_h /= high_train_steps\n self.loss_h = self.loss_policy_h + self.loss_value_h + self.goal_loss_coeff * self.loss_goal_h\n\n if self.loss_h is not None and self.loss_l is not None:\n self.loss = self.loss_h + self.loss_l\n\n def train_high_level(self):\n states, goals, rewards, next_states, dones = self.replay_buffer_h.sample()\n states = torch.from_numpy(states).float().to(self.device)\n goals = torch.from_numpy(goals).float().to(self.device)\n rewards = torch.from_numpy(rewards).float().to(self.device)\n next_states = torch.from_numpy(next_states).float().to(self.device)\n dones = torch.from_numpy(dones).bool()\n\n next_goals = self.net.actor_h_tgt(next_states, explore_sigma=0.)\n next_vals_1, next_vals_2 = self.net.critic_h_tgt(next_states, next_goals)\n next_vals = torch.min(next_vals_1, next_vals_2)\n next_vals[dones] = 0.\n ref_vals = rewards + next_vals * self.man_discount\n vals_1, vals_2 = self.net.critic_h(states, goals)\n loss_value_h = F.mse_loss(vals_1, ref_vals.detach()) + F.mse_loss(vals_2, ref_vals.detach())\n\n self.optimizer_critic_h.zero_grad()\n loss_value_h.backward()\n self.optimizer_critic_h.step()\n\n curr_goals = self.net.actor_h(states, explore_sigma=0.)\n loss_policy_h = torch.tensor(0.).to(self.device)\n if self.policy_update_it % self.man_policy_update_freq == 0:\n loss_policy_h = -self.net.critic_h.value(states, curr_goals).mean()\n loss_goal_h = torch.clamp(F.pairwise_distance(self.a_net(states[:, :self.goal_dim]), self.a_net(curr_goals)) \\\n - (self.r_margin_neg + self.r_margin_pos) / 2, min=0.).mean()\n loss_actor_h = loss_policy_h + self.goal_loss_coeff * loss_goal_h\n\n self.optimizer_actor_h.zero_grad()\n loss_actor_h.backward()\n self.optimizer_actor_h.step()\n\n self.net.soft_sync_high()\n self.policy_update_it += 1\n return loss_policy_h.item(), loss_value_h.item(), loss_goal_h.item()\n\n def train_low_level_a2c(self):\n states, goals, actions, info = self.memory_l.get_experience()\n next_states = states[1:]\n states = states[:-1]\n states = torch.cat(states, dim=0)\n next_states = torch.cat(next_states, dim=0)\n goals = torch.cat(goals, dim=0)\n rewards = self.compute_int_reward(states, goals, next_states)\n returns = self.compute_return(rewards, self.ctrl_discount, horizon=self.k)\n\n loss_policy_l = 0.\n loss_value_l = 0.\n loss_entropy_l = 0.\n for i in range(self.memory_l.size()):\n adv = returns[i] - info['value_l'][i]\n loss_policy_l -= adv.detach() * info['log_prob_act'][i]\n loss_value_l += adv.pow(2)\n entropy = -(info['prob'][i] * info['log_prob'][i]).sum(-1)\n loss_entropy_l -= entropy\n loss_policy_l /= self.memory_l.size()\n loss_value_l /= self.memory_l.size()\n loss_entropy_l /= self.memory_l.size()\n\n loss = loss_policy_l + loss_value_l + self.ctrl_entropy * loss_entropy_l\n self.optimizer_l.zero_grad()\n loss.backward()\n self.optimizer_l.step()\n return loss_policy_l.item(), loss_value_l.item(), loss_entropy_l.item()\n\n def compute_state_goal_similarity(self, states, next_states, goals):\n return -F.pairwise_distance(next_states[:, :self.goal_dim], goals)\n\n def compute_goal_reaching_reward(self, states, next_states, goals):\n diff = (next_states[:, :self.goal_dim] - goals).abs()\n return (diff <= 0.5 * torch.ones(self.goal_dim).to(self.device)).prod(dim=1).float()\n\n def compute_int_reward(self, states, goals, next_states):\n rewards = self.ctrl_rew_scale * self.compute_goal_reaching_reward(states, next_states, goals)\n self.mean_int_reward = float(rewards.mean())\n return rewards\n\n def compute_return(self, rewards, gamma=0.99, horizon=None):\n episode_length = len(rewards)\n returns = np.zeros(episode_length)\n if horizon is None:\n returns[episode_length - 1] = rewards[episode_length - 1]\n for i in reversed(range(episode_length - 1)):\n returns[i] = rewards[i] + returns[i + 1] * gamma\n else:\n for i in range(horizon, episode_length, horizon):\n returns[i-1] = rewards[i-1]\n for j in range(1, horizon):\n returns[i-1-j] = rewards[i-1-j] + returns[i-j] * gamma\n returns[episode_length - 1] = rewards[episode_length - 1]\n j = 1\n while (episode_length - j) % horizon != 0:\n returns[episode_length - 1 - j] = rewards[episode_length - 1 - j] + returns[episode_length - j] * gamma\n j += 1\n return returns\n\n def test_one_episode(self):\n self.eval_env.new_episode()\n last_state = None\n last_goal = None\n goal_list = []\n last_obs_list = []\n obs_list = []\n done = False\n start_flag = self.replay_buffer_h.start()\n while True:\n flag = (self.eval_env.get_current_step() % self.k == 0)\n obs = self.eval_env.get_state()\n obs_var = utils.single_input_transform(obs, device=self.device)\n if flag:\n if self.eval_env.get_current_step() != 0:\n goal_list.append(last_goal)\n obs_list.append(obs_var)\n else:\n last_obs_list.append(obs_var)\n\n action, last_goal, last_state, _ = self.step(\n obs_var, last_state, last_goal, flag, start_flag=start_flag, evaluate=True)\n \n self.eval_env.make_action(action)\n if self.eval_env.is_episode_finished():\n reward = self.eval_env.get_total_reward()\n break\n last_obs_list.extend(obs_list[:-1])\n goals = torch.cat(goal_list, dim=0)\n last_obs = torch.cat(last_obs_list, dim=0)\n obs = torch.cat(obs_list, dim=0)\n dist = -self.compute_state_goal_similarity(last_obs, obs, goals).mean().item()\n return reward, dist\n\n def update_adj_mat(self):\n for traj in self.traj_memory.get_trajectory():\n for i in range(len(traj)):\n for j in range(1, min(self.k, len(traj) - i)):\n s1 = tuple(np.round(traj[i][:self.goal_dim]).astype(np.int32))\n s2 = tuple(np.round(traj[i+j][:self.goal_dim]).astype(np.int32))\n if s1 not in self.state_list:\n self.state_list.append(s1)\n self.state_dict[s1] = self.n_states\n self.n_states += 1\n if s2 not in self.state_list:\n self.state_list.append(s2)\n self.state_dict[s2] = self.n_states\n self.n_states += 1\n # assume that the environment is symmetric\n self.adj_mat[self.state_dict[s1], self.state_dict[s2]] = 1\n self.adj_mat[self.state_dict[s2], self.state_dict[s1]] = 1\n\n def test_adjacency_acc(self):\n states = torch.tensor(self.eval_env.states_all).float().to(self.device)\n self.a_net.eval()\n embeddings = self.a_net(states)\n dists = utils.euclidean_dist(embeddings, embeddings).detach().cpu().numpy()\n n_correct = 0\n n_total = 0\n r_dists = []\n for i in range(self.eval_env.n_states):\n r_dist = self.eval_env.calc_r_dist(self.eval_env.states_all[i])\n for j in range(i+1, self.eval_env.n_states):\n y = r_dist[tuple(self.eval_env.states_all[j])] <= self.k\n pred = dists[i, j] <= (self.r_margin_neg + self.r_margin_pos) / 2\n if pred == y:\n n_correct += 1\n n_total += 1\n print('Adjacency acc = {:.4f}'.format(n_correct / n_total))\n self.a_net.train()\n\n def log_train(self):\n print(\"[@@ {} @@] ************** Training at episode {} **************\".format(self.algo, self._episodes))\n utils.print_localtime()\n self.end_time = time.time()\n print(' Frames {}, Episode #{} (( costs {:.2f} s ))'.format(self._frames, self._episodes, self.end_time-self.start_time))\n if self.mean_int_reward is None:\n print(' Gets (( {:.3f} reward )) in (( {} steps ))'.format(self.curr_reward, self.env.get_current_step()))\n else:\n print(' Gets (( {:.3f} reward, {:.5f} mean int reward )) in (( {} steps ))'.format(\n self.curr_reward, self.mean_int_reward, self.env.get_current_step()))\n self.start_time = self.end_time\n if self.loss_h is not None:\n print(\" High-level (( Policy loss = {:5f} || Value loss = {:5f} ))\".format(self.loss_policy_h, self.loss_value_h))\n print(\" (( Goal loss = {:.5f} ))\".format(self.loss_goal_h))\n self.summary_writer.add_scalar('high-level/policy loss', self.loss_policy_h, self._frames)\n self.summary_writer.add_scalar('high-level/value loss', self.loss_value_h, self._frames)\n self.summary_writer.add_scalar('high-level/goal loss', self.loss_goal_h, self._frames)\n else:\n print(\" High-level (( Has not started training yet. Current replay size = {} ))\".format(self.replay_buffer_h.size()))\n if self.loss_l is not None:\n print(\" Low-level (( Policy loss = {:5f} || Value loss = {:5f} ))\".format(self.loss_policy_l, self.loss_value_l))\n self.summary_writer.add_scalar('low-level/policy loss', self.loss_policy_l, self._frames)\n self.summary_writer.add_scalar('low-level/value loss', self.loss_value_l, self._frames)\n self.summary_writer.add_scalar('low-level/mean int reward', self.mean_int_reward, self._frames)\n print(\" (( Entropy = {:5f} ))\".format(-self.loss_entropy_l))\n self.summary_writer.add_scalar('low-level/entropy', -self.loss_entropy_l, self._frames)\n else:\n print(\" Low-level (( Has not started training yet. ))\")\n\n if self.loss_h is not None and self.loss_l is not None:\n print(\" Total loss = {:5f}\".format(self.loss))\n"
]
| [
[
"torch.rand",
"torch.cat",
"torch.min",
"numpy.zeros",
"torch.nn.functional.pairwise_distance",
"pandas.DataFrame",
"numpy.round",
"numpy.ones",
"torch.FloatTensor",
"torch.nn.functional.log_softmax",
"torch.from_numpy",
"torch.ones",
"torch.tensor",
"torch.load",
"torch.nn.functional.softmax",
"torch.utils.tensorboard.SummaryWriter"
]
]
|
19ahmed99/l2rpn_opponent_modelling | [
"5a04f74fe065e2b3788d3aa8378acd06ee3d2426"
]
| [
"Agents/D3QN_baseline_nn_concat/DoubleDuelingDQN.py"
]
| [
"# Copyright (c) 2020, RTE (https://www.rte-france.com)\n# See AUTHORS.txt\n# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.\n# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,\n# you can obtain one at http://mozilla.org/MPL/2.0/.\n# SPDX-License-Identifier: MPL-2.0\n# This file is part of L2RPN Baselines, L2RPN Baselines a repository to host baselines for l2rpn competitions.\n\nimport os\nimport json\nimport math\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom grid2op.Agent import AgentWithConverter\nfrom grid2op.Converter import IdToAct\n\nfrom DoubleDuelingDQNConfig import DoubleDuelingDQNConfig as cfg\nfrom DoubleDuelingDQN_NN import DoubleDuelingDQN_NN\nfrom prioritized_replay_buffer import PrioritizedReplayBuffer\n\n\n\nclass DoubleDuelingDQN(AgentWithConverter):\n def __init__(self,\n observation_space,\n action_space,\n name=__name__,\n is_training=False):\n # Call parent constructor\n AgentWithConverter.__init__(self, action_space,\n action_space_converter=IdToAct)\n self.obs_space = observation_space\n\n # Filter\n #print(\"Actions filtering...\")\n self.action_space.filter_action(self._filter_action)\n #print(\"..Done\")\n\n self.action_path = \"./allactions.npy\"\n self.converter = IdToAct(self.action_space)\n self.converter.init_converter()\n self.converter.save(*os.path.split(self.action_path))\n self.all_actions = np.array(self.converter.all_actions).tolist()\n self.all_acts_dict = {tuple(el.to_vect().tolist()): i for i, el in enumerate(self.all_actions)}\n\n # Store constructor params\n self.name = name\n self.num_frames = cfg.N_FRAMES\n self.is_training = is_training\n self.batch_size = cfg.BATCH_SIZE\n self.lr = cfg.LR\n \n # Declare required vars\n self.Qmain = None\n self.obs = None\n self.state = []\n self.frames = []\n\n # Declare training vars\n self.per_buffer = None\n self.done = False\n self.frames2 = None\n self.epoch_rewards = None\n self.epoch_rewards_moving_avg = None\n self.losses = None\n self.epoch_alive = None\n self.Qtarget = None\n self.epsilon = 0.0\n\n # Store the current opponent action\n self.opponent_action = None\n # List of all opponent actions\n self.all_opponent_actions = []\n # Stores opponent next action\n self.opponent_next_action = None\n # List of all opponent next actions\n self.all_opponent_next_actions = []\n self.count_non_do_nothing_opp_act = 0 \n\n # Compute dimensions from intial spaces\n self.observation_size = self.obs_space.size_obs()\n self.action_size = self.action_space.size()\n self.action_vect_size = 258\n \n # Load network graph\n self.Qmain = DoubleDuelingDQN_NN(self.action_size,\n self.observation_size,\n num_frames=self.num_frames,\n learning_rate=self.lr,\n learning_rate_decay_steps=cfg.LR_DECAY_STEPS,\n learning_rate_decay_rate=cfg.LR_DECAY_RATE)\n # Setup training vars if needed\n if self.is_training:\n self._init_training()\n\n def _filter_action(self, action):\n MAX_ELEM = 2\n act_dict = action.impact_on_objects()\n elem = 0\n elem += act_dict[\"force_line\"][\"reconnections\"][\"count\"]\n elem += act_dict[\"force_line\"][\"disconnections\"][\"count\"]\n elem += act_dict[\"switch_line\"][\"count\"]\n elem += len(act_dict[\"topology\"][\"bus_switch\"])\n elem += len(act_dict[\"topology\"][\"assigned_bus\"])\n elem += len(act_dict[\"topology\"][\"disconnect_bus\"])\n elem += len(act_dict[\"redispatch\"][\"generators\"])\n\n if elem <= MAX_ELEM:\n return True\n return False\n \n def _init_training(self):\n self.epsilon = cfg.INITIAL_EPSILON\n self.frames2 = []\n self.epoch_rewards = []\n self.epoch_rewards_moving_avg = []\n self.losses = []\n self.epoch_alive = []\n self.per_buffer = PrioritizedReplayBuffer(cfg.PER_CAPACITY, cfg.PER_ALPHA)\n self.Qtarget = DoubleDuelingDQN_NN(self.action_size,\n self.observation_size,\n num_frames = self.num_frames)\n\n def _reset_state(self, current_obs):\n # Initial state\n self.obs = current_obs\n self.state = self.convert_obs(self.obs)\n self.done = False\n\n def _reset_frame_buffer(self):\n # Reset frame buffers\n self.frames = []\n if self.is_training:\n self.frames2 = []\n\n def _save_current_frame(self, state):\n self.frames.append(state.copy())\n if len(self.frames) > self.num_frames:\n self.frames.pop(0)\n\n def _save_next_frame(self, next_state):\n self.frames2.append(next_state.copy())\n if len(self.frames2) > self.num_frames:\n self.frames2.pop(0)\n\n def _adaptive_epsilon_decay(self, step):\n ada_div = cfg.DECAY_EPSILON / 10.0\n step_off = step + ada_div\n ada_eps = cfg.INITIAL_EPSILON * -math.log10((step_off + 1) / (cfg.DECAY_EPSILON + ada_div))\n ada_eps_up_clip = min(cfg.INITIAL_EPSILON, ada_eps)\n ada_eps_low_clip = max(cfg.FINAL_EPSILON, ada_eps_up_clip)\n return ada_eps_low_clip\n \n def _save_hyperparameters(self, logpath, env, steps):\n\n r_instance = env._reward_helper.template_reward\n hp = {\n \"lr\": cfg.LR,\n \"lr_decay_steps\": cfg.LR_DECAY_STEPS,\n \"lr_decay_rate\": cfg.LR_DECAY_RATE,\n \"batch_size\": cfg.BATCH_SIZE,\n \"stack_frames\": cfg.N_FRAMES,\n \"iter\": steps,\n \"e_start\": cfg.INITIAL_EPSILON,\n \"e_end\": cfg.FINAL_EPSILON,\n \"e_decay\": cfg.DECAY_EPSILON,\n \"discount\": cfg.DISCOUNT_FACTOR,\n \"per_alpha\": cfg.PER_ALPHA,\n \"per_beta\": cfg.PER_BETA,\n \"per_capacity\": cfg.PER_CAPACITY,\n \"update_freq\": cfg.UPDATE_FREQ,\n \"update_hard\": cfg.UPDATE_TARGET_HARD_FREQ,\n \"update_soft\": cfg.UPDATE_TARGET_SOFT_TAU,\n \"reward\": dict(r_instance)\n }\n hp_filename = \"{}-hypers.json\".format(self.name)\n hp_path = os.path.join(logpath, hp_filename)\n with open(hp_path, 'w') as fp:\n json.dump(hp, fp=fp, indent=2)\n\n ## Agent Interface\n def convert_obs(self, observation):\n li_vect= []\n for el in observation.attr_list_vect:\n v = observation._get_array_from_attr_name(el).astype(np.float32)\n v_fix = np.nan_to_num(v)\n v_norm = np.linalg.norm(v_fix)\n if v_norm > 1e6:\n v_res = (v_fix / v_norm) * 10.0\n else:\n v_res = v_fix\n li_vect.append(v_res)\n return np.concatenate(li_vect)\n\n def convert_act(self, action):\n return super().convert_act(action)\n\n ## Baseline Interface\n def reset(self, observation):\n self._reset_state(observation)\n self._reset_frame_buffer()\n\n def my_act(self, state, reward, done=False):\n # Register current state to stacking buffer\n self._save_current_frame(state)\n # We need at least num frames to predict\n if len(self.frames) < self.num_frames:\n return 0 # Do nothing\n # Infer with the last num_frames states\n a, _ = self.Qmain.predict_move(np.array(self.frames), np.array(self.all_opponent_actions[-self.num_frames:])) \n \n return a\n\n def act(self, obs, reward, done):\n self.obs = obs \n # Store opponent action\n self.store_opponent_action(obs)\n transformed_observation = self.convert_obs(obs)\n encoded_act = self.my_act(transformed_observation, reward, done)\n return self.convert_act(encoded_act)\n\n \n def load(self, path):\n self.Qmain.load_network(path)\n if self.is_training:\n self.Qmain.update_target_hard(self.Qtarget.model)\n\n def save(self, path):\n self.Qmain.save_network(path)\n\n # Store current opponents action\n # This function is used as an alterantive way to retrieve the opponent action with using the info variable returned by the step function\n # This function can be used when the agent is training or not\n def store_opponent_action(self, obs):\n # A Do_Nothing action\n opponent_action = self.action_space({})\n\n # Get all the powerline id that will require maintenance\n maintenance = obs.time_next_maintenance\n maintenance_powerline_id = [i for i in range(len(maintenance)) if maintenance[i] != -1]\n \n # Retrive all the cooldown_duration for disconnected powerlines that are equal to 47 timesteps\n cooldown_duration = obs.time_before_cooldown_line\n cooldown_powerline_id = [i for i in range(len(cooldown_duration)) if cooldown_duration[i] == 47]\n \n for pid in cooldown_powerline_id:\n # Check if it is disconnected due to a maintenacance or an attack\n if pid in maintenance_powerline_id and maintenance[pid] == 0:\n cooldown_powerline_id.remove(pid) \n else:\n powerline_attacked = pid\n opponent_action = self.action_space({\"change_line_status\": [int(powerline_attacked)]})\n self.count_non_do_nothing_opp_act += 1\n \n # Convert the opponent action to its vector representation\n opp_act_as_vect = (self.converter() + opponent_action).to_vect()\n self.opponent_action = opp_act_as_vect\n self.all_opponent_actions.append(self.opponent_action)\n\n # Store the opponent action using the info variable returned by the step() function which give information about the next observation\n # This function can only be used during the training\n def store_opponent_next_action(self, info):\n opponent_action = self.action_space()\n attack_duration = info[\"opponent_attack_duration\"]\n\n if attack_duration == 48:\n powerline_attacked = np.where(info[\"opponent_attack_line\"])[0]\n # Let the opponent action be a powerline disconnection action of the powerline attacked\n opponent_action = self.action_space({\"change_line_status\": [int(powerline_attacked)]})\n \n # Convert the opponent action to its vector representation\n opp_act_as_vect = (self.converter() + opponent_action).to_vect()\n self.opponent_next_action = opp_act_as_vect\n self.all_opponent_next_actions.append(self.opponent_action)\n\n\n ## Training Procedure\n def train(self, env,\n iterations,\n save_path,\n num_pre_training_steps=0,\n logdir = \"logs-train\"):\n # Make sure we can fill the experience buffer\n if num_pre_training_steps < self.batch_size * self.num_frames:\n num_pre_training_steps = self.batch_size * self.num_frames\n\n # Loop vars\n num_training_steps = iterations\n num_steps = num_pre_training_steps + num_training_steps\n self.epsilon = cfg.INITIAL_EPSILON\n alive_steps = 0\n total_reward = 0\n self.done = True\n step = 0 \n\n # Create file system related vars\n logpath = os.path.join(logdir, self.name)\n os.makedirs(save_path, exist_ok=True)\n modelpath = os.path.join(save_path, self.name + \".h5\")\n self.tf_writer = tf.summary.create_file_writer(logpath, name=self.name)\n self._save_hyperparameters(save_path, env, num_steps)\n\n # Training loop\n while step < num_steps:\n # Init first time or new episode\n if self.done:\n new_obs = env.reset() # This shouldn't raise\n self.reset(new_obs)\n if cfg.VERBOSE and step % 1000 == 0:\n print(\"Step [{}] -- Random [{}]\".format(step, self.epsilon))\n\n # Save current observation to stacking buffer\n self._save_current_frame(self.state)\n\n # Store opponent current action\n self.store_opponent_action(new_obs)\n\n # Choose an action\n if step <= num_pre_training_steps:\n a = self.Qmain.random_move()\n elif np.random.rand(1) < self.epsilon:\n a = self.Qmain.random_move()\n elif len(self.frames) < self.num_frames:\n a = 0 # Do nothing\n else:\n a, _ = self.Qmain.predict_move(np.array(self.frames), np.array(self.all_opponent_actions[-self.num_frames:]))\n\n # Convert it to a valid action\n act = self.convert_act(a)\n # Execute action\n new_obs, reward, self.done, info = env.step(act)\n new_state = self.convert_obs(new_obs)\n # if info[\"is_illegal\"] or info[\"is_ambiguous\"] or \\\n # info[\"is_dispatching_illegal\"] or info[\"is_illegal_reco\"]:\n # # if cfg.VERBOSE:\n # print (a, info)\n\n # Store opponent next action\n self.store_opponent_next_action(info)\n\n # Save new observation to stacking buffer\n self._save_next_frame(new_state)\n\n # Save to experience buffer\n if len(self.frames2) == self.num_frames:\n self.per_buffer.add(np.array(self.frames),\n a, np.array(self.all_opponent_actions[-4:]),\n reward,\n np.array(self.frames2), np.array(self.all_opponent_next_actions[-4:]),\n self.done)\n\n # Perform training when we have enough experience in buffer\n if step >= num_pre_training_steps:\n training_step = step - num_pre_training_steps\n # Decay chance of random action\n self.epsilon = self._adaptive_epsilon_decay(training_step)\n\n # Perform training at given frequency\n if step % cfg.UPDATE_FREQ == 0 and \\\n len(self.per_buffer) >= self.batch_size:\n # Perform training\n self._batch_train(training_step, step)\n\n if cfg.UPDATE_TARGET_SOFT_TAU > 0.0:\n tau = cfg.UPDATE_TARGET_SOFT_TAU\n # Update target network towards primary network\n self.Qmain.update_target_soft(self.Qtarget.model, tau)\n\n # Every UPDATE_TARGET_HARD_FREQ trainings, update target completely\n if cfg.UPDATE_TARGET_HARD_FREQ > 0 and \\\n step % (cfg.UPDATE_FREQ * cfg.UPDATE_TARGET_HARD_FREQ) == 0:\n self.Qmain.update_target_hard(self.Qtarget.model)\n\n total_reward += reward\n if self.done:\n self.epoch_rewards.append(total_reward)\n current_reward_moving_avg = sum(self.epoch_rewards)/len(self.epoch_rewards)\n self.epoch_rewards_moving_avg.append(current_reward_moving_avg)\n self.epoch_alive.append(alive_steps)\n if cfg.VERBOSE:\n print(\"Survived [{}] steps\".format(alive_steps))\n print(\"Total reward [{}]\".format(total_reward))\n alive_steps = 0\n total_reward = 0\n else:\n alive_steps += 1\n \n # Save the network every 1000 iterations\n if step > 0 and step % 1000 == 0:\n modelpath = os.path.join(save_path, self.name + str(step) +\".h5\")\n self.save(modelpath)\n\n\n # Iterate to next loop\n step += 1\n # Make new obs the current obs\n self.obs = new_obs\n self.state = new_state\n\n\n # Save model after all steps\n modelpath = os.path.join(save_path, self.name + str(step) +\".h5\")\n self.save(modelpath)\n\n print(\"Number of opponent action that are not do_nothing : {} \".format(self.count_non_do_nothing_opp_act))\n\n return self.epoch_rewards, self.epoch_rewards_moving_avg, self.losses\n\n\n\n def _batch_train(self, training_step, step):\n \"\"\"Trains network to fit given parameters\"\"\"\n\n # Sample from experience buffer\n sample_batch = self.per_buffer.sample(self.batch_size, cfg.PER_BETA)\n s_batch = sample_batch[0]\n a_batch = sample_batch[1]\n opp_a_batch = sample_batch[2]\n r_batch = sample_batch[3]\n s2_batch = sample_batch[4]\n opp_next_a_batch = sample_batch[5]\n d_batch = sample_batch[6]\n w_batch = sample_batch[7]\n idx_batch = sample_batch[8]\n\n Q = np.zeros((self.batch_size, self.action_size))\n \n \n input_s_size = self.observation_size * self.num_frames \n input_opp_size = self.action_vect_size * self.num_frames\n\n # Reshape frames to 1D\n input_s_t = np.reshape(s_batch, (self.batch_size, input_s_size))\n input_opp_t = np.reshape(opp_a_batch, (self.batch_size, input_opp_size))\n input_s_t_1 = np.reshape(s2_batch, (self.batch_size, input_s_size))\n input_opp_t_1 = np.reshape(opp_next_a_batch, (self.batch_size, input_opp_size))\n\n # Save the graph just the first time\n if training_step == 0:\n tf.summary.trace_on()\n\n # T Batch predict\n Q = self.Qmain.model.predict([input_s_t, input_opp_t], batch_size = self.batch_size)\n\n ## Log graph once and disable graph logging\n if training_step == 0:\n with self.tf_writer.as_default():\n tf.summary.trace_export(self.name + \"-graph\", step)\n\n # T+1 batch predict\n Q1 = self.Qmain.model.predict([input_s_t_1,input_opp_t_1], batch_size=self.batch_size)\n Q2 = self.Qtarget.model.predict([input_s_t_1,input_opp_t_1], batch_size=self.batch_size)\n\n # Compute batch Qtarget using Double DQN\n for i in range(self.batch_size):\n doubleQ = Q2[i, np.argmax(Q1[i])]\n Q[i, a_batch[i]] = r_batch[i]\n if d_batch[i] == False:\n Q[i, a_batch[i]] += cfg.DISCOUNT_FACTOR * doubleQ\n\n # Batch train\n loss = self.Qmain.train_on_batch([input_s_t, input_opp_t], Q, w_batch)\n self.losses.append(loss)\n\n # Update PER buffer\n priorities = self.Qmain.batch_sq_error\n # Can't be zero, no upper limit\n priorities = np.clip(priorities, a_min=1e-8, a_max=None)\n self.per_buffer.update_priorities(idx_batch, priorities)\n\n # Log some useful metrics every even updates\n if step % (cfg.UPDATE_FREQ * 2) == 0:\n with self.tf_writer.as_default():\n mean_reward = np.mean(self.epoch_rewards)\n mean_alive = np.mean(self.epoch_alive)\n if len(self.epoch_rewards) >= 100:\n mean_reward_100 = np.mean(self.epoch_rewards[-100:])\n mean_alive_100 = np.mean(self.epoch_alive[-100:])\n else:\n mean_reward_100 = mean_reward\n mean_alive_100 = mean_alive\n tf.summary.scalar(\"mean_reward\", mean_reward, step)\n tf.summary.scalar(\"mean_alive\", mean_alive, step)\n tf.summary.scalar(\"mean_reward_100\", mean_reward_100, step)\n tf.summary.scalar(\"mean_alive_100\", mean_alive_100, step)\n tf.summary.scalar(\"loss\", loss, step)\n tf.summary.scalar(\"lr\", self.Qmain.train_lr, step)\n if cfg.VERBOSE:\n print(\"loss =\", loss)\n"
]
| [
[
"numpy.concatenate",
"numpy.array",
"numpy.linalg.norm",
"numpy.nan_to_num",
"numpy.reshape",
"numpy.zeros",
"tensorflow.summary.trace_on",
"tensorflow.summary.scalar",
"numpy.random.rand",
"numpy.mean",
"numpy.where",
"tensorflow.summary.trace_export",
"numpy.argmax",
"numpy.clip",
"tensorflow.summary.create_file_writer"
]
]
|
washizzle/darts | [
"bab599ec2232f5ced4aabed269fafb41306ceced"
]
| [
"cnn/architect.py"
]
| [
"import torch\r\nimport numpy as np\r\nimport torch.nn as nn\r\n\r\nfrom torch.autograd import Variable\r\nfrom model_search import Network\r\n\r\n\r\ndef _concat(xs):\r\n return torch.cat([x.view(-1) for x in xs])\r\n\r\n\r\nclass Architect(object):\r\n\r\n def __init__(self, model, args):\r\n self.network_momentum = args.momentum\r\n self.network_weight_decay = args.weight_decay\r\n self.model = model\r\n self.optimizer = torch.optim.Adam(self.model.arch_parameters(),\r\n lr=args.arch_learning_rate, betas=(0.5, 0.999), weight_decay=args.arch_weight_decay)\r\n\r\n def _compute_unrolled_model(self, input, target, eta, network_optimizer):\r\n loss = self.model._loss(input, target)\r\n theta = _concat(self.model.parameters()).data\r\n try:\r\n moment = _concat(network_optimizer.state[v]['momentum_buffer'] for v in self.model.parameters()).mul_(self.network_momentum)\r\n except:\r\n moment = torch.zeros_like(theta)\r\n dtheta = _concat(torch.autograd.grad(loss, self.model.parameters())).data + self.network_weight_decay*theta\r\n model_unrolled = self._construct_model_from_theta(theta.sub(eta, moment+dtheta))\r\n return model_unrolled\r\n\r\n def step(self, input_train, target_train, input_valid, target_valid, eta, network_optimizer, unrolled):\r\n self.optimizer.zero_grad()\r\n if unrolled:\r\n self._backward_step_unrolled(\r\n input_train, target_train, input_valid, target_valid, eta, network_optimizer)\r\n else:\r\n self._backward_step(input_valid, target_valid)\r\n\r\n grad_norm = nn.utils.clip_grad_norm(self.model.arch_parameters(), 10.)\r\n self.optimizer.step()\r\n return grad_norm\r\n\r\n def _backward_step(self, input_valid, target_valid):\r\n loss = self.model._loss(input_valid, target_valid)\r\n for v in self.model.arch_parameters():\r\n if v.grad is not None:\r\n v.grad.data.zero_()\r\n loss.backward()\r\n\r\n def _backward_step_unrolled(self, input_train, target_train, input_valid, target_valid, eta, network_optimizer):\r\n model_unrolled = self._compute_unrolled_model(input_train, target_train, eta, network_optimizer)\r\n loss = model_unrolled._loss(input_valid, target_valid)\r\n grads = torch.autograd.grad(loss, model_unrolled.arch_parameters(), allow_unused=True, retain_graph=True)#added allow_unused=True,\r\n\r\n theta = model_unrolled.parameters()\r\n dtheta = torch.autograd.grad(loss, model_unrolled.parameters(), allow_unused=True) #added allow_unused=True\r\n vector = [dt.add(self.network_weight_decay, t).data for dt, t in zip(dtheta, theta)]\r\n implicit_grads = self._hessian_vector_product(model_unrolled, vector, input_train, target_train)\r\n\r\n for g, ig in zip(grads, implicit_grads):\r\n g.data.sub_(eta, ig.data)\r\n\r\n for v, g in zip(self.model.arch_parameters(), grads):\r\n if v.grad is None:\r\n v.grad = Variable(g.data)\r\n else:\r\n v.grad.data.copy_(g.data)\r\n\r\n def _construct_model_from_theta(self, theta):\r\n model_clone = Network(self.model._C, self.model._num_classes, self.model._layers, self.model._criterion).cuda()\r\n\r\n for x, y in zip(model_clone.arch_parameters(), self.model.arch_parameters()):\r\n x.data.copy_(y.data)\r\n model_dict = self.model.state_dict()\r\n\r\n params, offset = {}, 0\r\n for k, v in self.model.named_parameters():\r\n v_length = np.prod(v.size())\r\n params[k] = theta[offset: offset+v_length].view(v.size())\r\n offset += v_length\r\n\r\n assert offset == len(theta)\r\n model_dict.update(params)\r\n model_clone.load_state_dict(model_dict)\r\n return model_clone.cuda()\r\n\r\n def _hessian_vector_product(self, model, vector, input, target, r=1e-2):\r\n R = r / _concat(vector).norm()\r\n for p, v in zip(model.parameters(), vector):\r\n p.data.add_(R, v)\r\n loss = model._loss(input, target)\r\n grads_p = torch.autograd.grad(loss, model.arch_parameters(), allow_unused=True)#added allow_unused=True,\r\n\r\n for p, v in zip(model.parameters(), vector):\r\n p.data.sub_(2*R, v)\r\n loss = model._loss(input, target)\r\n grads_n = torch.autograd.grad(loss, model.arch_parameters(), allow_unused=True)#added allow_unused=True,\r\n\r\n for p, v in zip(model.parameters(), vector):\r\n p.data.add_(R, v)\r\n\r\n return [(x-y).div_(2*R) for x, y in zip(grads_p, grads_n)]\r\n\r\n"
]
| [
[
"torch.zeros_like",
"torch.autograd.Variable"
]
]
|
analogdada/object_detection_demo | [
"87115051fedc9a5559fe779c49c3b526232470ef"
]
| [
"xml_to_csv.py"
]
| [
"\"\"\"\nUsage:\n# Create train data:\npython xml_to_csv.py -i [PATH_TO_IMAGES_FOLDER]/train -o [PATH_TO_ANNOTATIONS_FOLDER]/train_labels.csv\n\n# Create test data:\npython xml_to_csv.py -i [PATH_TO_IMAGES_FOLDER]/test -o [PATH_TO_ANNOTATIONS_FOLDER]/test_labels.csv\n\"\"\"\n\nimport os\nimport glob\nimport pandas as pd\nimport argparse\nimport xml.etree.ElementTree as ET\n\n\ndef xml_to_csv(path):\n \"\"\"Iterates through all .xml files (generated by labelImg) in a given directory and combines them in a single Pandas datagrame.\n\n Parameters:\n ----------\n path : {str}\n The path containing the .xml files\n Returns\n -------\n Pandas DataFrame\n The produced dataframe\n \"\"\"\n classes_names = []\n xml_list = []\n for xml_file in glob.glob(path + \"/*.xml\"):\n tree = ET.parse(xml_file)\n root = tree.getroot()\n for member in root.findall(\"object\"):\n classes_names.append(member[0].text)\n value = (\n root.find(\"filename\").text,\n int(root.find(\"size\")[0].text),\n int(root.find(\"size\")[1].text),\n member[0].text,\n int(member[4][0].text),\n int(member[4][1].text),\n int(member[4][2].text),\n int(member[4][3].text),\n )\n xml_list.append(value)\n column_name = [\n \"filename\",\n \"width\",\n \"height\",\n \"class\",\n \"xmin\",\n \"ymin\",\n \"xmax\",\n \"ymax\",\n ]\n xml_df = pd.DataFrame(xml_list, columns=column_name)\n classes_names = list(set(classes_names))\n classes_names.sort()\n return xml_df, classes_names\n\n\ndef main():\n # Initiate argument parser\n parser = argparse.ArgumentParser(\n description=\"Sample TensorFlow XML-to-CSV converter\"\n )\n parser.add_argument(\n \"-i\",\n \"--inputDir\",\n help=\"Path to the folder where the input .xml files are stored\",\n type=str,\n )\n parser.add_argument(\n \"-o\", \"--outputFile\", help=\"Name of output .csv file (including path)\", type=str\n )\n\n parser.add_argument(\n \"-l\",\n \"--labelMapDir\",\n help=\"Directory path to save label_map.pbtxt file is specified.\",\n type=str,\n default=\"\",\n )\n\n args = parser.parse_args()\n\n if args.inputDir is None:\n args.inputDir = os.getcwd()\n if args.outputFile is None:\n args.outputFile = args.inputDir + \"/labels.csv\"\n\n assert os.path.isdir(args.inputDir)\n os.makedirs(os.path.dirname(args.outputFile), exist_ok=True)\n xml_df, classes_names = xml_to_csv(args.inputDir)\n xml_df.to_csv(args.outputFile, index=None)\n print(\"Successfully converted xml to csv.\")\n if args.labelMapDir:\n os.makedirs(args.labelMapDir, exist_ok=True)\n label_map_path = os.path.join(args.labelMapDir, \"label_map.pbtxt\")\n print(\"Generate `{}`\".format(label_map_path))\n\n # Create the `label_map.pbtxt` file\n pbtxt_content = \"\"\n for i, class_name in enumerate(classes_names):\n pbtxt_content = (\n pbtxt_content\n + \"item {{\\n id: {0}\\n name: '{1}'\\n}}\\n\\n\".format(\n i + 1, class_name\n )\n )\n pbtxt_content = pbtxt_content.strip()\n with open(label_map_path, \"w\") as f:\n f.write(pbtxt_content)\n\n\nif __name__ == \"__main__\":\n main()\n"
]
| [
[
"pandas.DataFrame"
]
]
|
thealphadollar/ML-Assignments | [
"547c115d0d62c770aa673bf98fd56ff184bcfbd6"
]
| [
"Assignment_3/Assignment3/src/Task3.py"
]
| [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 26 01:37:42 2020\n\n@author: praveshj\n\"\"\"\n\nimport numpy as np\nimport math\nimport random\n\nprint(\"Data Loading. May Take Time....\")\n\nX = np.genfromtxt('../data/tfidf.csv',delimiter = ' ')\n\n# print(X.shape)\n\n \n#Cosine Similarity and Distance will be same in each tast. There will be no difference\ndef cosine_similarity(X, Y):\n dotP = np.dot(X, Y);\n modX = math.sqrt(X.dot(X))\n modY= math.sqrt(Y.dot(Y))\n return dotP/(modX*modY)\n\n\ndef dist(X, Y):\n return math.exp(-1*cosine_similarity(X, Y));\n\n\n\n#The functions returns the index of centroid which is at the minimum distance from a the Vector X\ndef min_dist(centroids, X):\n dis_min = 1e10\n index = 0\n for i in range(0, 8):\n temp = dist(centroids[i], X)\n if dis_min > temp:\n dis_min = temp;\n index = i;\n return index;\n\nnum_doc = X.shape[0]\ndist_mat = np.zeros((X.shape[0],X.shape[0]))\n\n\n#Distance Matrix, so we don't have to calculate distance again and again, between known vectors\nfor i in range(0, num_doc):\n for j in range(0, num_doc):\n if i == j :\n dist_mat[i][j] = 1e10\n else:\n dist_mat[i][j] = dist(X[i], X[j])\n \n#clusterClass stores the cluster assigned to each document by kmeans\nclusterClass = np.zeros((num_doc, 1))\n#Centroids is the array of the 8 cluster centroids\ncentroids = np.zeros((8, X.shape[1]))\nfor i in range(0, 8):\n centroids[i]= X[np.random.random_integers(0, X.shape[0])]\n \n \n#For each iteration we will find the mean of the all the elements, assigned to that class,\n# the new centroid will assigned\n# based on it \nfor j in range(0, 1000):\n print('Iteration', j)\n for i in range(0, num_doc):\n clusterClass[i] = min_dist(centroids, X[i])\n \n for thisClass in range(0, 8):\n temp = np.zeros((1, X.shape[1]))\n count = 0\n for i in range(0, num_doc):\n if(clusterClass[i] == thisClass):\n temp = temp + X[i];\n count +=1\n centroids[thisClass] = temp/count\n \n#The final output file is saved in kmeans.txt in the format asked\nfile= open('../data/kmeans.txt', 'w')\nfor thisClass in range(0, 8):\n temp = np.zeros((1, X.shape[1]))\n count = 0\n for i in range(0, num_doc):\n if(clusterClass[i] == thisClass):\n if(count == 0):\n file.write(str(i));\n count = 1\n else: file.write(',' + str(i))\n file.write('\\n')\nfile.close()\n\n\n"
]
| [
[
"numpy.random.random_integers",
"numpy.genfromtxt",
"numpy.dot",
"numpy.zeros"
]
]
|
cpriebe/dldt | [
"8631dc583e506adcd06498095919b5dd42323e1e"
]
| [
"tools/accuracy_checker/accuracy_checker/postprocessor/resize_segmentation_mask.py"
]
| [
"\"\"\"\nCopyright (c) 2019 Intel Corporation\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nfrom functools import singledispatch\nimport scipy.misc\nimport numpy as np\n\nfrom ..config import NumberField\nfrom ..utils import get_size_from_config\nfrom .postprocessor import PostprocessorWithSpecificTargets, PostprocessorWithTargetsConfigValidator\nfrom ..representation import SegmentationPrediction, SegmentationAnnotation\n\n\nclass ResizeMaskConfigValidator(PostprocessorWithTargetsConfigValidator):\n size = NumberField(floats=False, optional=True, min_value=1)\n dst_width = NumberField(floats=False, optional=True, min_value=1)\n dst_height = NumberField(floats=False, optional=True, min_value=1)\n\nclass ResizeSegmentationMask(PostprocessorWithSpecificTargets):\n __provider__ = 'resize_segmentation_mask'\n\n annotation_types = (SegmentationAnnotation, )\n prediction_types = (SegmentationPrediction, )\n _config_validator_type = ResizeMaskConfigValidator\n\n def configure(self):\n self.dst_height, self.dst_width = get_size_from_config(self.config, allow_none=True)\n\n def process_image(self, annotation, prediction):\n target_height = self.dst_height or self.image_size[0]\n target_width = self.dst_width or self.image_size[1]\n\n @singledispatch\n def resize_segmentation_mask(entry, height, width):\n return entry\n\n @resize_segmentation_mask.register(SegmentationPrediction)\n def _(entry, height, width):\n entry_mask = []\n for class_mask in entry.mask:\n resized_mask = scipy.misc.imresize(class_mask, (height, width), 'nearest')\n entry_mask.append(resized_mask)\n entry.mask = np.array(entry_mask)\n\n return entry\n\n @resize_segmentation_mask.register(SegmentationAnnotation)\n def _(entry, height, width):\n entry.mask = scipy.misc.imresize(entry.mask, (height, width), 'nearest')\n return entry\n\n for target in annotation:\n resize_segmentation_mask(target, target_height, target_width)\n\n for target in prediction:\n resize_segmentation_mask(target, target_height, target_width)\n\n return annotation, prediction\n"
]
| [
[
"numpy.array"
]
]
|
VolkerH/nd2 | [
"3fb449d28c10b975cd6773be8aa5802b3cb976f6"
]
| [
"setup.py"
]
| [
"import os\nimport platform\nfrom pathlib import Path\n\nfrom Cython.Build import cythonize\nfrom numpy import get_include\nfrom setuptools import Extension, setup\n\nSYSTEM = platform.system()\nSDK = Path(\"src/sdk\") / SYSTEM\nLIB = SDK / \"lib\"\nINCLUDE = SDK / \"include\"\nLINK = \"shared\" if SYSTEM == \"Linux\" else \"static\"\n\n# set env CYTHON_TRACE=1 to enable coverage on .pyx files\nCYTHON_TRACE = bool(os.getenv(\"CYTHON_TRACE\", \"0\") not in (\"0\", \"False\"))\n\nsdk = Extension(\n name=\"nd2._sdk.latest\",\n sources=[\"src/nd2/_sdk/latest.pyx\"],\n libraries=[f\"nd2readsdk-{LINK}\"],\n library_dirs=[str(LIB)],\n runtime_library_dirs=[str(LIB)] if SYSTEM == \"Linux\" else [],\n include_dirs=[str(INCLUDE), get_include()],\n extra_objects=[str(x) for x in LIB.glob(\"*\") if not x.name.startswith(\".\")],\n define_macros=[(\"LX_STATIC_LINKING\", None), (\"CYTHON_TRACE\", int(CYTHON_TRACE))],\n # extra_link_args=[\n # \"-ltiff\",\n # \"-lz\",\n # \"-ljpeg\",\n # \"-llzma\",\n # \"-ljbig\",\n # \"-ltiffxx\",\n # # \"-lm\",\n # # \"-lstdc++fs\",\n # ],\n)\n\n\nsetup(\n use_scm_version={\"write_to\": \"src/nd2/_version.py\"},\n ext_modules=cythonize(\n [sdk],\n language_level=\"3\",\n compiler_directives={\n \"linetrace\": CYTHON_TRACE,\n \"c_string_type\": \"unicode\",\n \"c_string_encoding\": \"utf-8\",\n },\n ),\n)\n"
]
| [
[
"numpy.get_include"
]
]
|
openvinotoolkit/mmsegmentation | [
"9f50fc158be50594ea4aecf0a07ea652c91ec846"
]
| [
"mmseg/core/utils/checkpoint.py"
]
| [
"# Copyright (C) 2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n#\n\nimport re\n\nimport torch\nimport numpy as np\nfrom terminaltables import AsciiTable\nfrom mmcv.runner.checkpoint import _load_checkpoint\nfrom mmcv.runner.dist_utils import get_dist_info\n\n\ndef _is_cls_layer(name):\n return 'fc_angular' in name or 'fc_cls_out' in name\n\n\ndef _get_dataset_id(name):\n return int(name.split('cls_head.')[-1].split('.')[0])\n\n\ndef load_state_dict(module, in_state, class_maps=None, strict=False, logger=None, force_matching=False,\n show_converted=False, ignore_keys=None):\n rank, _ = get_dist_info()\n\n unexpected_keys = []\n converted_pairs = []\n shape_mismatch_pairs = []\n shape_casted_pairs = []\n\n own_state = module.state_dict()\n for name, in_param in in_state.items():\n if ignore_keys is not None:\n ignored = any(re.match(ignore_key, name) for ignore_key in ignore_keys)\n if ignored:\n continue\n\n if name not in own_state:\n unexpected_keys.append(name)\n continue\n\n out_param = own_state[name]\n if isinstance(out_param, torch.nn.Parameter):\n out_param = out_param.data\n if isinstance(in_param, torch.nn.Parameter):\n in_param = in_param.data\n\n src_shape = in_param.size()\n trg_shape = out_param.size()\n if src_shape != trg_shape:\n if np.prod(src_shape) == np.prod(trg_shape):\n out_param.copy_(in_param.view(trg_shape))\n shape_casted_pairs.append([name, list(out_param.size()), list(in_param.size())])\n continue\n\n is_valid = False\n if force_matching:\n is_valid = len(src_shape) == len(trg_shape)\n for i in range(len(src_shape)):\n is_valid &= src_shape[i] >= trg_shape[i]\n\n if is_valid:\n if not (name.endswith('.weight') or name.endswith('.bias')):\n continue\n\n if class_maps is not None and _is_cls_layer(name):\n dataset_id = 0\n if len(class_maps) > 1:\n dataset_id = _get_dataset_id(name)\n class_map = class_maps[dataset_id]\n\n if 'fc_angular' in name:\n for src_id, trg_id in class_map.items():\n out_param[:, src_id] = in_param[:, trg_id]\n else:\n for src_id, trg_id in class_map.items():\n out_param[src_id] = in_param[trg_id]\n else:\n ind = [slice(0, d) for d in list(trg_shape)]\n out_param.copy_(in_param[ind])\n\n shape_casted_pairs.append([name, list(out_param.size()), list(in_param.size())])\n else:\n shape_mismatch_pairs.append([name, list(out_param.size()), list(in_param.size())])\n else:\n out_param.copy_(in_param)\n if show_converted:\n converted_pairs.append([name, list(out_param.size())])\n\n missing_keys = list(set(own_state.keys()) - set(in_state.keys()))\n if ignore_keys is not None:\n filtered_missing_keys = []\n for missing_key in missing_keys:\n ignored = any(re.match(ignore_key, missing_key) for ignore_key in ignore_keys)\n if not ignored:\n filtered_missing_keys.append(missing_key)\n\n missing_keys = filtered_missing_keys\n\n err_msg = []\n if unexpected_keys:\n err_msg.append('unexpected key in source state_dict: {}\\n'.format(', '.join(unexpected_keys)))\n if missing_keys:\n err_msg.append('missing keys in source state_dict: {}\\n'.format(', '.join(missing_keys)))\n\n if shape_mismatch_pairs:\n casted_info = 'these keys have mismatched shape:\\n'\n header = ['key', 'expected shape', 'loaded shape']\n table_data = [header] + shape_mismatch_pairs\n table = AsciiTable(table_data)\n err_msg.append(casted_info + table.table)\n\n if len(err_msg) > 0 and rank == 0:\n err_msg.insert(0, 'The model and loaded state dict do not match exactly\\n')\n err_msg = '\\n'.join(err_msg)\n if strict:\n raise RuntimeError(err_msg)\n elif logger is not None:\n logger.warning(err_msg)\n\n ok_message = []\n if converted_pairs:\n converted_info = 'These keys have been matched correctly:\\n'\n header = ['key', 'shape']\n table_data = [header] + converted_pairs\n table = AsciiTable(table_data)\n ok_message.append(converted_info + table.table)\n\n if len(ok_message) > 0 and rank == 0:\n ok_message = '\\n'.join(ok_message)\n if logger is not None:\n logger.info(ok_message)\n\n warning_msg = []\n if shape_casted_pairs:\n casted_info = 'these keys have been shape casted:\\n'\n header = ['key', 'expected shape', 'loaded shape']\n table_data = [header] + shape_casted_pairs\n table = AsciiTable(table_data)\n warning_msg.append(casted_info + table.table)\n\n if len(warning_msg) > 0 and rank == 0:\n warning_msg.insert(0, 'The model and loaded state dict do not match exactly\\n')\n warning_msg = '\\n'.join(warning_msg)\n if logger is not None:\n logger.warning(warning_msg)\n\n\ndef load_checkpoint(model,\n filename,\n map_location='cpu',\n strict=False,\n logger=None,\n force_matching=False,\n show_converted=False,\n revise_keys=[(r'^module\\.', '')],\n ignore_keys=None):\n # load checkpoint\n checkpoint = _load_checkpoint(filename, map_location)\n if not isinstance(checkpoint, dict):\n raise RuntimeError(f'No state_dict found in checkpoint file {filename}')\n\n # get state_dict from checkpoint\n if 'state_dict' in checkpoint:\n state_dict = checkpoint['state_dict']\n else:\n state_dict = checkpoint\n\n # strip prefix of state_dict\n for p, r in revise_keys:\n state_dict = {re.sub(p, r, k): v for k, v in state_dict.items()}\n\n # extract model\n model = model.module if hasattr(model, 'module') else model\n\n # # load model classes\n # assert hasattr(model, 'CLASSES')\n # assert isinstance(model.CLASSES, dict)\n # model_all_classes = model.CLASSES\n #\n # # build class mapping between model.classes and checkpoint.classes\n # if 'meta' in checkpoint and 'CLASSES' in checkpoint['meta']:\n # checkpoint_all_classes = checkpoint['meta']['CLASSES']\n #\n # assert set(model_all_classes.keys()).issubset(checkpoint_all_classes.keys()),\\\n # f'The model set of datasets is not a subset of checkpoint datasets: ' \\\n # f'{model_all_classes.keys()} vs {checkpoint_all_classes.keys()}'\n #\n # class_maps = dict()\n # for dataset_id in model_all_classes.keys():\n # model_dataset_classes = model_all_classes[dataset_id]\n # checkpoint_dataset_classes = checkpoint_all_classes[dataset_id]\n # assert set(model_dataset_classes.values()).issubset(checkpoint_dataset_classes.values()), \\\n # f'The model set of classes is not a subset of checkpoint classes'\n #\n # checkpoint_inv_class_map = {v: k for k, v in checkpoint_dataset_classes.items()}\n # class_maps[dataset_id] = {k: checkpoint_inv_class_map[v] for k, v in model_dataset_classes.items()}\n # else:\n # class_maps = model_all_classes\n class_maps = None\n\n if ignore_keys is not None and not isinstance(ignore_keys, (tuple, list)):\n ignore_keys = [ignore_keys]\n\n # load weights\n load_state_dict(model, state_dict, class_maps,\n strict, logger, force_matching,\n show_converted, ignore_keys)\n\n return checkpoint\n"
]
| [
[
"numpy.prod"
]
]
|
rachellea/medgenetics | [
"467e82c09dc6ab168a986721ac5be6c71f616fcc"
]
| [
"src/visualization1.py"
]
| [
"#visualization.py\n\nimport os\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\nimport sklearn.metrics\n\nimport matplotlib\nmatplotlib.use('agg') #so that it does not attempt to display via SSH\nimport matplotlib.pyplot as plt\nplt.ioff() #turn interactive plotting off\nimport matplotlib.lines as mlines\n\nfrom . import calibr\n\nclass MakeAllFigures(object):\n def __init__(self, gene_name, results_dir):\n self.gene_name = gene_name\n self.results_dir = results_dir\n possible_files = os.listdir(results_dir)\n self.chosen_file_LR = [y for y in [x for x in possible_files if 'LR' in x] if 'all_test_out.csv' in y][0]\n self.chosen_file_MLP = [y for y in [x for x in possible_files if 'MLP' in x] if 'all_test_out.csv' in y][0]\n print('Making figures based on LR file',self.chosen_file_LR,'and MLP file',self.chosen_file_MLP)\n \n #self.test_out has columns Consensus,Change,Position,Conservation,\n #SigNoise,Pred_Prob,Pred_Label,True_Label and index of arbitrary ints\n test_out_LR = pd.read_csv(os.path.join(results_dir, self.chosen_file_LR),header=0,index_col=0)\n self.true_labels_LR = test_out_LR.loc[:,'True_Label']\n self.pred_probs_LR = test_out_LR.loc[:,'Pred_Prob']\n test_out_MLP = pd.read_csv(os.path.join(results_dir, self.chosen_file_MLP),header=0,index_col=0)\n self.true_labels_MLP = test_out_MLP.loc[:,'True_Label']\n self.pred_probs_MLP = test_out_MLP.loc[:,'Pred_Prob']\n \n #Plot characteristics\n #COlors: https://matplotlib.org/tutorials/colors/colors.html\n self.LR_color = 'crimson'\n self.LR_linestyle = 'solid'\n self.MLP_color = 'royalblue'\n self.MLP_linestyle = 'solid'\n self.neutral_color = 'k'\n self.neutral_linestyle = 'dashed'\n self.lw = 2\n \n #Plot\n self.plot_precision_recall_curve()\n self.plot_roc_curve()\n self.plot_calibration_curve()\n \n def plot_precision_recall_curve(self):\n #http://scikit-learn.org/stable/auto_examples/model_selection/plot_precision_recall.html\n average_precision_LR = sklearn.metrics.average_precision_score(self.true_labels_LR, self.pred_probs_LR)\n precision_LR, recall_LR, _ = sklearn.metrics.precision_recall_curve(self.true_labels_LR, self.pred_probs_LR)\n LR_line, = plt.step(recall_LR, precision_LR, color=self.LR_color, alpha=0.2, where='post',linewidth=self.lw,linestyle=self.LR_linestyle)\n plt.fill_between(recall_LR, precision_LR, step='post', alpha=0.2, color=self.LR_color)\n \n average_precision_MLP = sklearn.metrics.average_precision_score(self.true_labels_MLP, self.pred_probs_MLP)\n precision_MLP, recall_MLP, _ = sklearn.metrics.precision_recall_curve(self.true_labels_MLP, self.pred_probs_MLP)\n MLP_line, = plt.step(recall_MLP, precision_MLP, color=self.MLP_color, alpha=0.2, where='post',linewidth=self.lw,linestyle=self.MLP_linestyle)\n plt.fill_between(recall_MLP, precision_MLP, step='post', alpha=0.2, color=self.MLP_color)\n \n plt.xlabel('Recall')\n plt.ylabel('Precision')\n plt.ylim([0.0, 1.05])\n plt.xlim([0.0, 1.0])\n plt.title('Precision-Recall Curves')\n \n plt.legend([LR_line, MLP_line], ['LR, AP=%0.2f' % average_precision_LR, 'MLP, AP=%0.2f' % average_precision_MLP], loc='lower right')\n plt.savefig(os.path.join(self.results_dir, self.gene_name+'_Best_Models_PR_Curves.pdf'))\n plt.close()\n \n def plot_roc_curve(self):\n #http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html#sphx-glr-auto-examples-model-selection-plot-roc-py\n lw = 2\n fpr_LR, tpr_LR, _ = sklearn.metrics.roc_curve(self.true_labels_LR,self.pred_probs_LR,pos_label = 1)\n roc_auc_LR = sklearn.metrics.auc(fpr_LR, tpr_LR)\n LR_line, = plt.plot(fpr_LR, tpr_LR, color=self.LR_color, lw=self.lw, linestyle = self.LR_linestyle)\n \n fpr_MLP, tpr_MLP, _ = sklearn.metrics.roc_curve(self.true_labels_MLP,self.pred_probs_MLP,pos_label = 1)\n roc_auc_MLP = sklearn.metrics.auc(fpr_MLP, tpr_MLP)\n MLP_line, = plt.plot(fpr_MLP, tpr_MLP, color=self.MLP_color, lw=lw, label='ROC curve (area = %0.2f)' % roc_auc_MLP, linestyle = self.MLP_linestyle)\n \n plt.plot([0, 1], [0, 1], color=self.neutral_color, lw=self.lw, linestyle=self.neutral_linestyle) #diagonal line\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver Operating Characteristics')\n plt.legend([LR_line, MLP_line], ['LR, AUROC=%0.2f' % roc_auc_LR, 'MLP, AUROC=%0.2f' % roc_auc_MLP], loc='lower right')\n plt.savefig(os.path.join(self.results_dir, self.gene_name+'_Best_Models_ROC_Curves.pdf'))\n plt.close()\n \n def plot_calibration_curve(self):\n #https://scikit-learn.org/stable/modules/generated/sklearn.calibration.calibration_curve.html\n #https://scikit-learn.org/stable/auto_examples/calibration/plot_compare_calibration.html#sphx-glr-auto-examples-calibration-plot-compare-calibration-py\n fig, ax = plt.subplots()\n plt.plot([0, 1], [0, 1], color=self.neutral_color, lw=self.lw, linestyle=self.neutral_linestyle) #diagonal line\n fraction_of_positives_LR, mean_predicted_prob_LR = calibr.calibration_curve_new(self.true_labels_LR,\n self.pred_probs_LR, n_bins=20, strategy='quantile')\n LR_line, = plt.plot(mean_predicted_prob_LR, fraction_of_positives_LR,\n color = self.LR_color, marker='o', markersize=3, linewidth=self.lw, linestyle = self.LR_linestyle)\n \n fraction_of_positives_MLP, mean_predicted_prob_MLP = calibr.calibration_curve_new(self.true_labels_MLP,\n self.pred_probs_MLP, n_bins=20, strategy='quantile')\n MLP_line, = plt.plot(mean_predicted_prob_MLP, fraction_of_positives_MLP,\n color = self.MLP_color, marker='o', markersize=3, linewidth=self.lw, linestyle = self.LR_linestyle)\n \n #Calculate the calibration slopes using a best fit line\n LR_slope, LR_intercept, _, _, _ = stats.linregress(mean_predicted_prob_LR, fraction_of_positives_LR)\n MLP_slope, MLP_intercept, _, _, _ = stats.linregress(mean_predicted_prob_MLP, fraction_of_positives_MLP)\n \n #Plot the calibration best-fit lines\n abline(LR_slope, LR_intercept, self.LR_color)\n abline(MLP_slope, MLP_intercept, self.MLP_color)\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('Mean Predicted Probability')\n plt.ylabel('Fraction of Positives')\n plt.title('Calibration Curves')\n plt.legend([LR_line, MLP_line], ['LR, Slope=%0.2f' % LR_slope, 'MLP, Slope=%0.2f' % MLP_slope], loc='lower right')\n plt.savefig(os.path.join(self.results_dir, self.gene_name+'_Best_Models_Calibration_Curves.pdf'))\n plt.close()\n\n#Plot a line based on a slope and intercept in matplotlib\ndef abline(slope, intercept, color):\n \"\"\"Plot a line from slope and intercept\"\"\"\n axes = plt.gca()\n x_vals = np.array(axes.get_xlim())\n y_vals = intercept + slope * x_vals\n plt.plot(x_vals, y_vals, color = color, linewidth = 1, linestyle = 'dotted')\n\n"
]
| [
[
"matplotlib.use",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.step",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots",
"scipy.stats.linregress",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.ioff",
"matplotlib.pyplot.gca"
]
]
|
benoitblanc/kloppy | [
"5c3f94ff8806f9e23f8bad095a948a403a06a54c"
]
| [
"examples/playing_time.py"
]
| [
"import logging\nimport sys\nfrom collections import Counter\n\nfrom kloppy import metrica\nimport matplotlib.pyplot as plt\n\nfrom kloppy.domain import Ground\n\n\ndef main():\n \"\"\"\n This example shows how to determine playing time\n \"\"\"\n logging.basicConfig(\n stream=sys.stdout,\n level=logging.INFO,\n format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\",\n )\n\n dataset = metrica.load_open_data(sample_rate=1.0 / 25)\n\n playing_seconds_per_player = Counter()\n for frame in dataset.frames:\n playing_seconds_per_player.update(\n [\n player.jersey_no\n for player in frame.players_coordinates.keys()\n if player.team.ground == Ground.HOME\n ]\n )\n\n x = range(len(playing_seconds_per_player))\n jersey_numbers, playing_seconds = zip(\n *sorted(playing_seconds_per_player.items())\n )\n playing_minutes = [seconds / 60 for seconds in playing_seconds]\n\n plt.bar(x, playing_minutes, align=\"center\", alpha=0.5)\n plt.xticks(x, jersey_numbers)\n plt.ylabel(\"Minutes\")\n plt.title(\"Playing time per player\")\n\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n"
]
| [
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.xticks"
]
]
|
unpsjb-rtsg/case-2017 | [
"1048a8de90a99982d574f19be29c7fe13e28a267"
]
| [
"wcrt-test-mbed.py"
]
| [
"from __future__ import print_function\n\nimport os\nimport sys\nimport serial\nimport xml.etree.cElementTree as et\nimport ntpath\nimport re\nimport struct\nimport glob\nimport datetime as dt\nimport pandas as pd\nimport mbed_lstools\nimport subprocess\nimport json\nimport shutil\nfrom bunch import Bunch, unbunchify\nfrom pyOCD.board import MbedBoard\nfrom time import sleep\nfrom argparse import ArgumentParser\n\n\ndef test_rts_in_mbed(rts, ser, methods, task_metric=None):\n # send task count\n ser.write(struct.pack('>i', len(rts)))\n\n # send task parameters\n for task in rts:\n ser.write(struct.pack('>i', task[\"C\"]))\n ser.write(struct.pack('>i', task[\"T\"]))\n ser.write(struct.pack('>i', task[\"D\"]))\n\n result_str = []\n\n # retrieve the results\n for _ in methods:\n # read method, schedulable, usecs and cycles\n r_str = [ser.read(4) for _ in range(4)]\n if task_metric:\n if task_metric == \"detail\":\n for _ in rts:\n # read wcrt, ceils, loops\n r_str.extend([ser.read(4) for _ in range(3)])\n if task_metric == \"total\":\n r_str.append(ser.read(4)) # cc\n r_str.append(ser.read(4)) # loops\n result_str.append(r_str)\n\n magic = struct.unpack('>i', ser.read(4))[0]\n if magic != 0xABBA:\n print(\"Error: received wrong end code ({0})\".format(magic), file=sys.stderr)\n return False, None\n\n # store the result data -- one list per method\n result_list = []\n\n for r_str in result_str:\n result = [ struct.unpack('>i', r_str[0])[0], struct.unpack('>i', r_str[1])[0],\n struct.unpack('>i', r_str[2])[0], struct.unpack('>i', r_str[3])[0] ]\n\n # verify that the method id is valid\n if result[0] not in range(6):\n print(\"Error: invalid method id {0}\".format(result[0]), file=sys.stderr)\n return False, None\n \n # verify that a valid schedulable result was sent\n if result[1] not in [0,1]:\n print(\"Error: invalid schedulability result {0}\".format(result[1]), file=sys.stderr)\n return False, None\n\n if task_metric is not None:\n if task_metric == \"detail\":\n # wcrt, number of ceils/floors operations and amount of loops per task\n for r in r_str[4:]:\n result.append(struct.unpack('>i', r)[0])\n \n if task_metric == \"total\":\n # total ceil/floor operations and total for/while loops count\n result.append(struct.unpack('>i', r_str[4])[0])\n result.append(struct.unpack('>i', r_str[5])[0])\n\n result_list.append(result)\n\n # verify that all the methods have the same schedulability result\n sched_ref = result_list[0][1]\n for r in result_list:\n if r[1] != sched_ref:\n print(\"Error: {0}\".format([\"{0}:{1}\".format(x[0], x[1]) for x in result_list]), file=sys.stderr)\n return False, None\n\n return True, result_list\n\n\ndef test_rts(rts, ser, methods, task_metric=None):\n test_ok = False\n\n while not test_ok:\n try: \n test_ok, result_t = test_rts_in_mbed(rts, ser, methods, task_metric)\n except serial.SerialTimeoutException as e:\n print(\"{0}: {1}\".format(e.errno, e.strerror), file=sys.stderr)\n except UnicodeDecodeError as e:\n print(\"{0}: {1}\".format(e.errno, e.strerror), file=sys.stderr)\n except struct.error as e:\n print(\"struct.error: {0}\".format(e), file=sys.stderr) \n \n if not test_ok:\n # reset mbed board and wait half a second\n print(\"Reset\", file=sys.stderr)\n ser.reset_input_buffer()\n ser.reset_output_buffer()\n ser.sendBreak(0.5) \n sleep(0.5)\n \n return result_t\n\n\ndef test_file_hdfs(g, ser, testcfg):\n results = []\n for key in g[\"keys\"]:\n try:\n # retrieve the selected data from the store\n df = pd.read_hdf(g.file, key, where=g.where, mode='r')\n\n # group by rts \n dfg = df.groupby(['tdist','trange','ntask','uf','rts_id'])\n \n print(\"Ready to test {0} rts from {1}...\".format(len(dfg), key))\n \n for k, v in dfg:\n v.columns = [x.upper() for x in v.columns]\n rts = v.to_dict(orient='records')\n result_t = test_rts(rts, ser, testcfg.test.methods, testcfg.test.task_metric)\n\n for r in result_t:\n r.extend([k[3], k[2], k[4]])\n \n results.extend(result_t)\n except KeyError as e:\n print(\"{0}\".format(e.strerror), file=sys.stderr)\n continue\n\n return results\n\n\ndef create_df(test, results):\n # list of column names for computing sum with df.sum(axis=1)\n col_names_wcrt = []\n col_names_cc = []\n col_names_loops = []\n\n # column names for the data frame (the csv has no headers)\n col_names = [\"method_id\", \"sched\", \"usecs\", \"cycles\"]\n\n # complete columns if the csv has detailed results per task\n if test.task_metric == \"detail\":\n for n in range(1, test.task_count + 1):\n col_names.extend([\"wcrt_{0}\".format(n), \"cc_{0}\".format(n), \"loops_{0}\".format(n)])\n col_names_wcrt.append(\"wcrt_{0}\".format(n))\n col_names_cc.append(\"cc_{0}\".format(n))\n col_names_loops.append(\"loops_{0}\".format(n))\n \n # complete columns i the csv has only total values for cc and # of loops\n if test.task_metric == \"total\":\n col_names.extend([\"cc\", \"loops\"])\n\n # index columns\n col_names.extend([\"uf\", \"rts_size\", \"rts_id\"])\n\n # create dataframe\n df = pd.DataFrame(results, columns=col_names)\n\n return df\n\n\ndef save_to_hdfs(args, board_info, df): \n save = args.save\n with pd.HDFStore(save.file, complevel=9, complib='blosc') as store:\n if save.key in store.keys():\n store.remove(save.key)\n\n # save the results into the store\n store.put(save.key, df, format='table', min_itemsize = {'values': 50})\n\n # add additional metadata\n metadata = {'datetime': str(dt.datetime.now()), 'config': Bunch.toDict(args), 'board_info': board_info}\n store.get_storer(save.key).attrs.metadata = metadata\n\n print(\"Results saved in {0} store as {1}.\".format(save.file, save.key))\n\n\ndef build_project(maincfg, testcfg):\n make_path = os.path.join(maincfg.project.make_path, \"make\") if maincfg.project.has_key(\"make_path\") else \"make\"\n\n methods_to_test = \":\".join([str.upper(\"TEST_{0}\".format(m)) for m in testcfg.test.methods])\n\n method_ids = \":\".join([str.upper(\"{0}_ID={1}\".format(k,v)) for k,v in maincfg.test.supported_methods.items()])\n \n make_clean = [make_path,\n \"--no-print-directory\",\n \"-C\", str(maincfg.project[testcfg.target.platform].path),\n \"clean\"]\n \n make_call = [make_path,\n \"--no-print-directory\",\n \"-C\", str(maincfg.project[testcfg.target.platform].path),\n \"METHODS_TO_TEST={0}\".format(methods_to_test),\n \"METHODS_IDS={0}\".format(method_ids),\n \"PRINT_TASK_RESULTS={0}\".format(\"1\" if testcfg.test.task_metric else \"0\"),\n maincfg.test.supported_tests[testcfg.test.test_type]]\n make_call.extend(testcfg.target.project.build_options)\n\n if maincfg.project.has_key(\"toolchain_path\"):\n make_call.append(\"GCC_BIN='{0}'\".format(maincfg.project.toolchain_path))\n\n print(\"Clean project {0}.\".format(testcfg.target.platform), file=sys.stderr)\n returncode = subprocess.call(make_clean, stdout=None, stderr=None)\n \n print(\"Copy main_wcrt.cpp to {0} directory.\".format(maincfg.project[testcfg.target.platform].path))\n try:\n shutil.copy('main_wcrt.cpp', maincfg.project[testcfg.target.platform].path)\n except (error, IOError) as e:\n print(e.strerro, file=sys.stderr)\n exit(1)\n \n print(\"Building project {0}.\".format(testcfg.target.platform), file=sys.stderr)\n returncode = subprocess.call(make_call, stdout=None, stderr=None)\n\n if returncode > 0:\n exit(1)\n\n\ndef configure_board(maincfg, target): \n # retrieve all connected mbed boards\n mbed_ls = mbed_lstools.create()\n connected_boards = mbed_ls.list_mbeds()\n\n if not connected_boards:\n print(\"Error: no mbed boards found.\", file=sys.stderr) \n exit(1)\n\n mbed_board_info = None\n\n if target.board.auto:\n # select the first mbed board found that match the selected platform\n for connected_board in connected_boards:\n if connected_board['platform_name'].upper() == target.platform.upper(): \n mbed_board_info = connected_board\n \n if not mbed_board_info:\n print(\"Error: no {0} board found \".format(target.platform), file=sys.stderr)\n exit(1)\n else:\n # use the specified targetid to select the board\n for connected_board in connected_boards:\n if target.board.target_id == connected_board['target_id']:\n mbed_board_info = connected_board\n break\n \n if not mbed_board_info:\n print(\"Error: no mbed board found with target_id {0}\".format(target.board.target_id), file=sys.stderr)\n exit(1)\n\n if target.platform.upper() != mbed_board_info['platform_name']:\n print(\"Error: platform mismatch ({0}, {1})\".format(target.platform, mbed_board_info[\"platform_name\"]))\n exit(1)\n\n # print board info\n print(\"Using {0} board - serial port: {1}.\".format(mbed_board_info['platform_name'], mbed_board_info['serial_port']))\n\n # connect to the mbed board\n mbed_board = MbedBoard.chooseBoard(board_id=mbed_board_info[\"target_id_usb_id\"], init_board=True)\n\n # flash the binary if specified\n if target.board.flash:\n binfile = os.path.join(maincfg.project[target.platform].path, \n maincfg.project[target.platform].bin_path,\n maincfg.project.bin_file)\n try:\n print(\"Flash binary {0}...\".format(binfile), file=sys.stderr)\n mbed_board.target.resume()\n mbed_board.target.halt()\n mbed_board.flash.flashBinary(binfile)\n except IOError as e:\n print(\"{0}: {1}\".format(binfile, e.strerror), file=sys.stderr) \n exit(1)\n \n # reset target board\n mbed_board.target.reset()\n mbed_board.uninit()\n\n return mbed_board_info\n\n\ndef get_args():\n \"\"\" Command line arguments \"\"\"\n parser = ArgumentParser(description=\"Evaluate the schedulability of a set of RTS, sending them to a \" + \n \"mbed board via serial port. The results are readed back and saved in a HDF5 \" +\n \"store, or printed into stdout by default. The test configuration \" +\n \"is loaded from a JSON file.\")\n\n parser.add_argument(\"testcfg\", type=str, metavar=\"file\", help=\"Test configuration.\")\n\n hdfs_group = parser.add_argument_group('HDF5 store', 'Options for saving the results into a HDF5 store.')\n hdfs_group.add_argument(\"--reuse-key\", help=\"Replace the DataFrame assigned under key in the store.\", default=False, action=\"store_true\")\n\n return parser.parse_args()\n\n\ndef main():\n # get command line arguments\n args = get_args()\n\n # get main configuration file\n try:\n with open('main-config.json') as f:\n maincfg_dict = json.load(f)\n except IOError as err:\n print(\"main-config.json: {0}\".format(err.strerror), file=sys.stderr)\n exit(1) \n\n # get test configuration file\n try:\n with open(args.testcfg) as f:\n test_config = json.load(f)\n except IOError as err:\n print(\"{0}: {1}\".format(args.load_config, err.strerror), file=sys.stderr)\n exit(1)\n\n if not \"test_metric\" in test_config[\"test\"].keys():\n test_config[\"test\"][\"test_metric\"] = False\n\n # parse the configuration files into namespaces\n testcfg = Bunch.fromDict(test_config)\n maincfg = Bunch.fromDict(maincfg_dict)\n\n # if using a hdfs store, check before running the tests if the specified\n # store key already exists -- if so, print an error message.\n if testcfg.has_key('save') and testcfg.save.has_key('hdfs') and not args.reuse_key:\n try:\n df = pd.read_hdf(testcfg.save.hdfs.file, testcfg.save.hdfs.key, stop=1)\n print(\"Error: key {0} exists (use --reuse-key).\".format(testcfg.save.hdfs.key), file=sys.stderr)\n exit(1)\n except KeyError as e: \n pass # ok!\n\n # build the target project\n if testcfg.target.project.build:\n build_project(maincfg, testcfg)\n\n # select the mbed board and retrieve the serial port connection\n mbed_board_info = configure_board(maincfg, testcfg.target)\n \n # open the specified serial port\n ser = serial.Serial(port=mbed_board_info['serial_port'], baudrate=testcfg.target.baudrate,\n timeout=0.5, write_timeout=0.5, xonxoff=True, dsrdtr=True)\n\n # dataframe list\n df_list = []\n\n # evaluate the given number of rts on the file(s), sending them to the \n # mbed board, and storing the results as a pandas dataframe.\n for g in testcfg.test.data:\n try:\n df_list.append(create_df(testcfg.test, test_file_hdfs(g, ser, testcfg)))\n except IOError as e:\n print(e, file=sys.stderr)\n sys.exit(1)\n\n # generate a new dataframe\n df = pd.concat(df_list)\n\n # save the results or send them to stdout\n if testcfg.has_key('save'):\n save_to_hdfs(testcfg, mbed_board_info, df) \n else:\n df.to_csv(path_or_buf=sys.stdout, sep=':', index=False)\n\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"pandas.HDFStore",
"pandas.DataFrame",
"pandas.read_hdf",
"pandas.concat"
]
]
|
master-coro/gantt-trampoline | [
"987304ffb73628896219c8b7b5b9e981960cafff"
]
| [
"lib/GanttPlot.py"
]
| [
"# Importing the matplotlb.pyplot\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nclass GanttPlot():\n def __init__(self, ylim, xlim, title=None):\n self.fig, self.gnt = plt.subplots(figsize=(12, 8))\n self.gnt.set_ylim(0, ylim+1)\n self.gnt.set_xlim(-1, xlim+1)\n self.ylim = ylim\n self.xlim = xlim\n self.tasksYticks = {}\n self.tasksColors = {}\n self.tasks = {}\n\n # Setting labels for x-axis and y-axis\n self.gnt.set_xlabel('Time(s)')\n self.gnt.set_ylabel('Tasks')\n\n # Setting graph attribute\n self.gnt.grid(True)\n\n if title:\n self.gnt.set_title(title)\n\n # Define available y position\n self.available_y = []\n index = 1\n while index < ylim:\n self.available_y.append((index, 2))\n index += 3\n\n # Initiate labels\n self.ylabels = [str(_) for _ in range(ylim)]\n self.gnt.set_yticks([_[0]+1 for _ in self.available_y])\n\n self.numberTasks = 0\n\n def plotArrow(self, task, x):\n if task.name in self.tasksYticks:\n y_index = self.tasksYticks[task.name]\n self.tasksYticks[task.name] = y_index\n self.ylabels[self.numberTasks] = task.name\n self.gnt.set_yticklabels(labels=self.ylabels)\n self.gnt.arrow(\n x, y_index[0]-0.2, 0, 2, color='red', width=0.2, head_width=0.6)\n else:\n y_index = self.available_y[self.numberTasks]\n if self.numberTasks >= int(self.ylim/3):\n print(\n 'Task was not added, gantt diagram full. Extend ylim to add more tasks.')\n else:\n self.tasksColors[task.name] = np.random.rand(3,)\n self.tasks[task.name] = task.name\n self.tasksYticks[task.name] = y_index\n self.ylabels[self.numberTasks] = task.name\n self.gnt.set_yticklabels(labels=self.ylabels)\n self.numberTasks += 1\n self.gnt.arrow(\n x, y_index[0]-0.2, 0, 2, color='red', width=0.2, head_width=0.6)\n \n def plotReverseArrow(self, task, x):\n if task.name in self.tasksYticks:\n y_index = self.tasksYticks[task.name]\n self.tasksYticks[task.name] = y_index\n self.ylabels[self.numberTasks] = task.name\n self.gnt.set_yticklabels(labels=self.ylabels)\n self.gnt.arrow(\n x, 2.2+y_index[0], 0, -2, color='red', width=0.2, head_width=0.6)\n else:\n y_index = self.available_y[self.numberTasks]\n if self.numberTasks >= int(self.ylim/3):\n print(\n 'Task was not added, gantt diagram full. Extend ylim to add more tasks.')\n else:\n self.tasksColors[task.name] = np.random.rand(3,)\n self.tasks[task.name] = task.name\n self.tasksYticks[task.name] = y_index\n self.ylabels[self.numberTasks] = task.name\n self.gnt.set_yticklabels(labels=self.ylabels)\n self.numberTasks += 1\n self.gnt.arrow(\n x, y_index[0]+2.2, 0, -2, color='red', width=0.2, head_width=0.6)\n \n def plotAutoTask(self, task, periods):\n # print(self.tasksYticks[task.name])\n if task.name in self.tasksYticks:\n self.gnt.broken_barh(\n periods, self.tasksYticks[task.name], facecolor=self.tasksColors[task.name])\n else:\n print(\"Warning : Tried to run a task that was not ready.\")\n\n def addTask(self, task):\n # print(self.tasksYticks[task.name])\n if task.name in self.tasksYticks:\n self.gnt.broken_barh(\n task.runningPeriods, self.tasksYticks[task.name], facecolor=self.tasksColors[task.name])\n # self.gnt.plot(task.terminationTime, self.tasksYticks[task.name][0], c='red', marker='o')\n # self.gnt.arrow(task.activationTime, self.tasksYticks[task.name][0]-0.2, 0, 2, color='red', width=0.8, head_width=0.6)\n else:\n print(\"Warning : Tried to run a task that was not ready.\")\n\n def activateTask(self, task):\n if task.name in self.tasksYticks:\n y_index = self.tasksYticks[task.name]\n self.tasks[task.name].ready = True\n self.tasksYticks[task.name] = y_index\n self.ylabels[self.numberTasks] = task.name\n self.gnt.set_yticklabels(labels=self.ylabels)\n self.gnt.arrow(\n task.activationTime, y_index[0]-0.2, 0, 2, color='red', width=0.2, head_width=0.6)\n\n else:\n y_index = self.available_y[self.numberTasks]\n\n if self.numberTasks == int(self.ylim/3):\n print(\n 'Task was not added, gantt diagram full. Extend ylim to add more tasks.')\n else:\n self.tasksColors[task.name] = np.random.rand(3,)\n self.tasks[task.name] = task\n self.tasks[task.name].ready = True\n self.tasksYticks[task.name] = y_index\n self.ylabels[self.numberTasks] = task.name\n self.gnt.set_yticklabels(labels=self.ylabels)\n self.numberTasks += 1\n self.gnt.arrow(\n task.activationTime, y_index[0]-0.2, 0, 2, color='red', width=0.2, head_width=0.6)\n\n def terminateTask(self, task):\n y_index = self.tasksYticks[task.name]\n\n if task.name not in self.tasksYticks:\n print(\"Can't terminate a task that was not registered.\")\n else:\n self.tasks[task.name].ready = False\n self.gnt.plot(task.terminationTime,\n y_index[0], c='red', marker='o')\n\n def show(self):\n plt.show()\n"
]
| [
[
"matplotlib.pyplot.show",
"numpy.random.rand",
"matplotlib.pyplot.subplots"
]
]
|
galaxyChen/dl4ir-webnav | [
"09a76395cb414ac2b56cceea06ddb169cefd4f70"
]
| [
"op_sentence.py"
]
| [
"'''\nCustom theano class to access page sentences.\n'''\nimport numpy as np\nimport theano\nfrom theano import gof\nfrom theano import tensor\nimport utils\nfrom nltk.tokenize import wordpunct_tokenize\nimport nltk\nimport time\nimport parameters as prm\n\nclass Sentence(theano.Op):\n __props__ = ()\n\n def __init__(self, wiki, vocab, n_consec):\n self.wiki = wiki\n self.vocab = vocab\n self.n_consec = n_consec # number of consecutive sections that are used to form a query\n nltk.download('punkt')\n self.tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\n\n def make_node(self, x, x2, x3, x4):\n # check that the theano version has support for __props__.\n # This next line looks like it has a typo,\n # but it's actually a way to detect the theano version\n # is sufficiently recent to support the use of __props__.\n assert hasattr(self, '_props'), \"Your version of theano is too old to support __props__.\"\n x = tensor.as_tensor_variable(x)\n x2 = tensor.as_tensor_variable(x2)\n x3 = tensor.as_tensor_variable(x3)\n x4 = tensor.as_tensor_variable(x4)\n return theano.Apply(self, [x, x2, x3, x4], [tensor.fvector().type(), tensor.imatrix().type()])\n\n\n def perform(self, node, inputs, output_storage):\n st = time.time()\n q = inputs[0]\n q_m = inputs[1]\n pages_id = inputs[2]\n div = inputs[3]\n\n R = np.zeros((len(pages_id)/div,), np.float32)\n \n if prm.reward_type == None:\n # speed up by not computing rewards and best answer in supervised mode.\n best_answers_ = -2*np.ones((len(pages_id)/div, prm.n_consec*prm.max_words_query), np.int32) #initialize with -2. -2 means stop word.\n else:\n best_answers = []\n max_words = 0\n\n for i in range(0, len(pages_id), div):\n q_bow = {}\n for j, ax in enumerate(q[i/div]):\n if q_m[i/div][j] > 0.:\n q_bow[ax] = 0\n set_q_bow = set(q_bow.keys())\n\n sents = []\n ref_id = []\n ref_range = []\n for j in range(div):\n page_id = pages_id[i+j]\n if int(page_id) != -1:\n text = self.wiki.get_article_text(page_id)\n sents_pre = self.tokenizer.tokenize(text.decode('ascii', 'ignore'))\n n_consec = min(len(sents_pre),self.n_consec)\n for sk in range(0,len(sents_pre)-n_consec+1):\n sent = ''\n for sj in range(n_consec):\n sent += ' ' + sents_pre[sk+sj]\n sents.append(sent.strip())\n ref_id.append(page_id)\n\n ref_range.append([j,len(sents)])\n\n if len(sents) > 0:\n\n s = np.zeros((len(sents)), np.float32)\n c = np.zeros((len(sents)), np.float32)\n sents_idx = []\n for j, sent in enumerate(sents):\n words = wordpunct_tokenize(sent.lower())\n sent_bow = {}\n for word in words:\n if word in self.vocab:\n sent_bow[self.vocab[word]] = 0\n sents_idx.append(words)\n c[j] = len(list(set(sent_bow.keys()) & set_q_bow)) # Count how many elements they have in common\n s[j] = len(sent_bow)\n\n match_rate = 2 * c / np.maximum(1., (len(set_q_bow) + s))\n idx = np.argmax(match_rate)\n if str(prm.reward_type).lower() == 'discrete':\n R[i/div] = float(match_rate[idx] == 1.) # make reward \\in {0,1}\n elif str(prm.reward_type).lower() == 'continuous':\n R[i/div] = match_rate[idx] # make reward \\in [0,1]\n else:\n raise ValueError('Not a valid value for reward_type parameter. Valid options are \"continuous\", \"discrete\", or None.')\n\n sent_idx = utils.text2idx(sents_idx[idx], self.vocab)\n best_answers.append(sent_idx)\n if len(sent_idx) > max_words:\n max_words = len(sent_idx) \n else:\n best_answers.append([-2]) #initialize with -2. -2 means stop word.\n\n best_answers_ = -2*np.ones((len(best_answers), max_words), np.int32) #initialize with -2. -2 means stop word.\n for i, best_answer in enumerate(best_answers):\n best_answers_[i, :len(best_answer)] = best_answer \n\n output_storage[0][0] = R\n output_storage[1][0] = best_answers_\n #print 'time Sentence op:', str(time.time() - st)\n\n def grad(self, inputs, output_grads):\n return [tensor.zeros_like(ii, dtype=theano.config.floatX) for ii in inputs]\n\n"
]
| [
[
"numpy.argmax"
]
]
|
CBORT-NCBIB/oct-cbort | [
"7f2bc525bb3f5b3bcf2e41622129c87ee710161a"
]
| [
"oct/view/colormap.py"
]
| [
"from oct import *\nimport colorcet as cc\nimport matplotlib\n\ncp, np, convolve, gpuAvailable, freeMemory, e = checkForCupy()\n\n\nclass Colormap:\n \"\"\"Generate an rgb frame from a greyscale using a chosen colormap\"\"\"\n\n def __init__(self, cmapLabel='hsv'):\n self.cmap = matplotlib.cm.get_cmap('hsv')\n self.cmapLabel = cmapLabel\n\n def apply(self, image, weight=None, mask=None, cmap=None):\n \"\"\" Apply a given specific colormap to input greyscale image \"\"\"\n\n if not (cmap is None):\n self.cmapLabel = cmap\n\n if self.cmapLabel == 'hsv':\n rgb = self.applyHsv(image, weight=weight, mask=mask)\n elif self.cmapLabel == 'C2':\n rgb = self.applyC2(image, weight=weight, mask=mask)\n elif self.cmapLabel == 'CET_C2':\n rgb = self.applyCET_C2(image, weight=weight, mask=mask)\n else:\n print('Not an accepted colormap mode')\n\n out = self.formatOut(rgb)\n return out\n\n def applyHsv(self, image, weight=None, mask=None):\n \"\"\"\n Return a Uint-8 rgb image with hsv format\n\n Notes:\n Works with GPU as does not go through matplotlib\n \"\"\"\n # For colorbar\n self.cmap = matplotlib.cm.get_cmap('hsv')\n h = cp.asarray(image)\n s = cp.ones_like(h)\n\n if not (weight is None):\n v = cp.asarray(weight)\n else:\n v = cp.ones_like(h)\n\n if not (mask is None):\n v = v * cp.asarray(mask)\n\n hsv = cp.concatenate((h[:, :, None], s[:, :, None], v[:, :, None]), axis=2)\n rgb = self.hsv2Rgb(hsv)\n\n return rgb\n\n def applyC2(self, image, weight=None, mask=None):\n \"\"\"\n Return a Uint-8 rgb image with CIECAM format from an array (below)\n Notes:\n Does not work with cupy arrays because it uses matplotlib\n \"\"\"\n\n image = cp.asnumpy(image)\n\n if not (weight is None):\n w = cp.asnumpy(weight)\n else:\n w = cp.ones_like(image)\n\n if not (mask is None):\n w = w * cp.asnumpy(mask)\n\n rgba = self.applyCIECAM02(image)\n rgb = rgba[:, :, 0:3] * w[:, :, None]\n\n return rgb\n\n def applyCET_C2(self, image, weight=None, mask=None):\n \"\"\"\n Return a Uint-8 rgb image with CET_C2 format from the colorcet library\n Notes:\n Does not work with cupy arrays because it uses matplotlib\n \"\"\"\n image = cp.asnumpy(image)\n\n if not (weight is None):\n w = cp.asnumpy(weight)\n else:\n w = cp.ones_like(image)\n\n if not (mask is None):\n w = w * cp.asnumpy(mask)\n\n self.cmap = cc.cm['CET_C2']\n rgba = self.cmap(image)\n rgb = rgba[:, :, 0:3] * w[:, :, None]\n\n return rgb\n\n def hsv2Rgb(self, hsv):\n \"\"\"\n Convert hsv values to rgb.\n\n Parameters\n ----------\n hsv : (..., 3) array-like\n All values assumed to be in range [0, 1]\n\n Returns\n -------\n (..., 3) ndarray\n Colors converted to RGB values in range [0, 1]\n \"\"\"\n in_shape = hsv.shape\n\n h = hsv[..., 0]\n s = hsv[..., 1]\n v = hsv[..., 2]\n\n r = cp.empty_like(h)\n g = cp.empty_like(h)\n b = cp.empty_like(h)\n\n i = (h * 6.0).astype(int)\n f = (h * 6.0) - i\n p = v * (1.0 - s)\n q = v * (1.0 - s * f)\n t = v * (1.0 - s * (1.0 - f))\n\n idx = i % 6 == 0\n r[idx] = v[idx]\n g[idx] = t[idx]\n b[idx] = p[idx]\n\n idx = i == 1\n r[idx] = q[idx]\n g[idx] = v[idx]\n b[idx] = p[idx]\n\n idx = i == 2\n r[idx] = p[idx]\n g[idx] = v[idx]\n b[idx] = t[idx]\n\n idx = i == 3\n r[idx] = p[idx]\n g[idx] = q[idx]\n b[idx] = v[idx]\n\n idx = i == 4\n r[idx] = t[idx]\n g[idx] = p[idx]\n b[idx] = v[idx]\n\n idx = i == 5\n r[idx] = v[idx]\n g[idx] = p[idx]\n b[idx] = q[idx]\n\n idx = s == 0\n r[idx] = v[idx]\n g[idx] = v[idx]\n b[idx] = v[idx]\n\n rgb = cp.stack([r, g, b], axis=-1)\n\n return rgb.reshape(in_shape)\n\n def applyCIECAM02(self, image):\n colors = np.array([[0.91510904, 0.55114749, 0.67037311],\n [0.91696411, 0.55081563, 0.66264366],\n [0.91870995, 0.55055664, 0.65485881],\n [0.92034498, 0.55037149, 0.64702356],\n [0.92186763, 0.55026107, 0.63914306],\n [0.92327636, 0.55022625, 0.63122259],\n [0.9245696, 0.55026781, 0.62326754],\n [0.92574582, 0.5503865, 0.6152834],\n [0.92680349, 0.55058299, 0.6072758],\n [0.92774112, 0.55085789, 0.59925045],\n [0.9285572, 0.55121174, 0.59121319],\n [0.92925027, 0.551645, 0.58316992],\n [0.92981889, 0.55215808, 0.57512667],\n [0.93026165, 0.55275127, 0.56708953],\n [0.93057716, 0.5534248, 0.55906469],\n [0.93076407, 0.55417883, 0.55105838],\n [0.93082107, 0.55501339, 0.54307696],\n [0.93074689, 0.55592845, 0.53512681],\n [0.9305403, 0.55692387, 0.52721438],\n [0.93020012, 0.55799943, 0.51934621],\n [0.92972523, 0.55915477, 0.51152885],\n [0.92911454, 0.56038948, 0.50376893],\n [0.92836703, 0.56170301, 0.49607312],\n [0.92748175, 0.56309471, 0.48844813],\n [0.9264578, 0.56456383, 0.48090073],\n [0.92529434, 0.56610951, 0.47343769],\n [0.92399062, 0.56773078, 0.46606586],\n [0.92254595, 0.56942656, 0.45879209],\n [0.92095971, 0.57119566, 0.4516233],\n [0.91923137, 0.5730368, 0.44456642],\n [0.91736048, 0.57494856, 0.4376284],\n [0.91534665, 0.57692945, 0.43081625],\n [0.91318962, 0.57897785, 0.42413698],\n [0.91088917, 0.58109205, 0.41759765],\n [0.90844521, 0.58327024, 0.41120533],\n [0.90585771, 0.58551053, 0.40496711],\n [0.90312676, 0.5878109, 0.3988901],\n [0.90025252, 0.59016928, 0.39298143],\n [0.89723527, 0.5925835, 0.38724821],\n [0.89407538, 0.59505131, 0.38169756],\n [0.89077331, 0.59757038, 0.37633658],\n [0.88732963, 0.60013832, 0.37117234],\n [0.88374501, 0.60275266, 0.36621186],\n [0.88002022, 0.6054109, 0.36146209],\n [0.87615612, 0.60811044, 0.35692989],\n [0.87215369, 0.61084868, 0.352622],\n [0.86801401, 0.61362295, 0.34854502],\n [0.86373824, 0.61643054, 0.34470535],\n [0.85932766, 0.61926872, 0.3411092],\n [0.85478365, 0.62213474, 0.3377625],\n [0.85010767, 0.6250258, 0.33467091],\n [0.84530131, 0.62793914, 0.3318397],\n [0.84036623, 0.63087193, 0.32927381],\n [0.8353042, 0.63382139, 0.32697771],\n [0.83011708, 0.63678472, 0.32495541],\n [0.82480682, 0.63975913, 0.32321038],\n [0.81937548, 0.64274185, 0.32174556],\n [0.81382519, 0.64573011, 0.32056327],\n [0.80815818, 0.6487212, 0.31966522],\n [0.80237677, 0.65171241, 0.31905244],\n [0.79648336, 0.65470106, 0.31872531],\n [0.79048044, 0.65768455, 0.31868352],\n [0.78437059, 0.66066026, 0.31892606],\n [0.77815645, 0.66362567, 0.31945124],\n [0.77184076, 0.66657827, 0.32025669],\n [0.76542634, 0.66951562, 0.3213394],\n [0.75891609, 0.67243534, 0.32269572],\n [0.75231298, 0.67533509, 0.32432138],\n [0.74562004, 0.6782126, 0.32621159],\n [0.73884042, 0.68106567, 0.32836102],\n [0.73197731, 0.68389214, 0.33076388],\n [0.72503398, 0.68668995, 0.33341395],\n [0.7180138, 0.68945708, 0.33630465],\n [0.71092018, 0.69219158, 0.33942908],\n [0.70375663, 0.69489159, 0.34278007],\n [0.69652673, 0.69755529, 0.34635023],\n [0.68923414, 0.70018097, 0.35013201],\n [0.6818826, 0.70276695, 0.35411772],\n [0.67447591, 0.70531165, 0.3582996],\n [0.667018, 0.70781354, 0.36266984],\n [0.65951284, 0.71027119, 0.36722061],\n [0.65196451, 0.71268322, 0.37194411],\n [0.64437719, 0.71504832, 0.37683259],\n [0.63675512, 0.71736525, 0.38187838],\n [0.62910269, 0.71963286, 0.38707389],\n [0.62142435, 0.72185004, 0.39241165],\n [0.61372469, 0.72401576, 0.39788432],\n [0.60600841, 0.72612907, 0.40348469],\n [0.59828032, 0.72818906, 0.40920573],\n [0.59054536, 0.73019489, 0.41504052],\n [0.58280863, 0.73214581, 0.42098233],\n [0.57507535, 0.7340411, 0.42702461],\n [0.5673509, 0.7358801, 0.43316094],\n [0.55964082, 0.73766224, 0.43938511],\n [0.55195081, 0.73938697, 0.44569104],\n [0.54428677, 0.74105381, 0.45207286],\n [0.53665478, 0.74266235, 0.45852483],\n [0.52906111, 0.74421221, 0.4650414],\n [0.52151225, 0.74570306, 0.47161718],\n [0.5140149, 0.74713464, 0.47824691],\n [0.506576, 0.74850672, 0.48492552],\n [0.49920271, 0.74981912, 0.49164808],\n [0.49190247, 0.75107171, 0.4984098],\n [0.48468293, 0.75226438, 0.50520604],\n [0.47755205, 0.7533971, 0.51203229],\n [0.47051802, 0.75446984, 0.5188842],\n [0.46358932, 0.75548263, 0.52575752],\n [0.45677469, 0.75643553, 0.53264815],\n [0.45008317, 0.75732863, 0.5395521],\n [0.44352403, 0.75816207, 0.54646551],\n [0.43710682, 0.758936, 0.55338462],\n [0.43084133, 0.7596506, 0.56030581],\n [0.42473758, 0.76030611, 0.56722555],\n [0.41880579, 0.76090275, 0.5741404],\n [0.41305637, 0.76144081, 0.58104704],\n [0.40749984, 0.76192057, 0.58794226],\n [0.40214685, 0.76234235, 0.59482292],\n [0.39700806, 0.7627065, 0.60168598],\n [0.39209414, 0.76301337, 0.6085285],\n [0.38741566, 0.76326334, 0.6153476],\n [0.38298304, 0.76345681, 0.62214052],\n [0.37880647, 0.7635942, 0.62890454],\n [0.37489579, 0.76367593, 0.63563704],\n [0.37126045, 0.76370246, 0.64233547],\n [0.36790936, 0.76367425, 0.64899736],\n [0.36485083, 0.76359176, 0.6556203],\n [0.36209245, 0.76345549, 0.66220193],\n [0.359641, 0.76326594, 0.66873999],\n [0.35750235, 0.76302361, 0.67523226],\n [0.35568141, 0.76272903, 0.68167659],\n [0.35418202, 0.76238272, 0.68807086],\n [0.3530069, 0.76198523, 0.69441305],\n [0.35215761, 0.7615371, 0.70070115],\n [0.35163454, 0.76103888, 0.70693324],\n [0.35143685, 0.76049114, 0.71310742],\n [0.35156253, 0.75989444, 0.71922184],\n [0.35200839, 0.75924936, 0.72527472],\n [0.3527701, 0.75855647, 0.73126429],\n [0.3538423, 0.75781637, 0.73718884],\n [0.3552186, 0.75702964, 0.7430467],\n [0.35689171, 0.75619688, 0.74883624],\n [0.35885353, 0.75531868, 0.75455584],\n [0.36109522, 0.75439565, 0.76020396],\n [0.36360734, 0.75342839, 0.76577905],\n [0.36637995, 0.75241752, 0.77127961],\n [0.3694027, 0.75136364, 0.77670417],\n [0.37266493, 0.75026738, 0.7820513],\n [0.37615579, 0.74912934, 0.78731957],\n [0.37986429, 0.74795017, 0.79250759],\n [0.38377944, 0.74673047, 0.797614],\n [0.38789026, 0.74547088, 0.80263746],\n [0.3921859, 0.74417203, 0.80757663],\n [0.39665568, 0.74283455, 0.81243022],\n [0.40128912, 0.74145908, 0.81719695],\n [0.406076, 0.74004626, 0.82187554],\n [0.41100641, 0.73859673, 0.82646476],\n [0.41607073, 0.73711114, 0.83096336],\n [0.4212597, 0.73559013, 0.83537014],\n [0.42656439, 0.73403435, 0.83968388],\n [0.43197625, 0.73244447, 0.8439034],\n [0.43748708, 0.73082114, 0.84802751],\n [0.44308905, 0.72916502, 0.85205505],\n [0.44877471, 0.72747678, 0.85598486],\n [0.45453694, 0.72575709, 0.85981579],\n [0.46036897, 0.72400662, 0.8635467],\n [0.4662644, 0.72222606, 0.86717646],\n [0.47221713, 0.72041608, 0.87070395],\n [0.47822138, 0.71857738, 0.87412804],\n [0.4842717, 0.71671065, 0.87744763],\n [0.4903629, 0.71481659, 0.88066162],\n [0.49649009, 0.71289591, 0.8837689],\n [0.50264864, 0.71094931, 0.88676838],\n [0.50883417, 0.70897752, 0.88965898],\n [0.51504253, 0.70698127, 0.89243961],\n [0.52126981, 0.70496128, 0.8951092],\n [0.52751231, 0.70291829, 0.89766666],\n [0.53376652, 0.70085306, 0.90011093],\n [0.54002912, 0.69876633, 0.90244095],\n [0.54629699, 0.69665888, 0.90465565],\n [0.55256715, 0.69453147, 0.90675397],\n [0.55883679, 0.69238489, 0.90873487],\n [0.56510323, 0.69021993, 0.9105973],\n [0.57136396, 0.68803739, 0.91234022],\n [0.57761655, 0.68583808, 0.91396258],\n [0.58385872, 0.68362282, 0.91546336],\n [0.59008831, 0.68139246, 0.91684154],\n [0.59630323, 0.67914782, 0.9180961],\n [0.60250152, 0.67688977, 0.91922603],\n [0.60868128, 0.67461918, 0.92023033],\n [0.61484071, 0.67233692, 0.921108],\n [0.62097809, 0.67004388, 0.92185807],\n [0.62709176, 0.66774097, 0.92247957],\n [0.63318012, 0.66542911, 0.92297153],\n [0.63924166, 0.66310923, 0.92333301],\n [0.64527488, 0.66078227, 0.92356308],\n [0.65127837, 0.65844919, 0.92366082],\n [0.65725076, 0.65611096, 0.92362532],\n [0.66319071, 0.65376857, 0.92345572],\n [0.66909691, 0.65142302, 0.92315115],\n [0.67496813, 0.64907533, 0.92271076],\n [0.68080311, 0.64672651, 0.92213374],\n [0.68660068, 0.64437763, 0.92141929],\n [0.69235965, 0.64202973, 0.92056665],\n [0.69807888, 0.6396839, 0.91957507],\n [0.70375724, 0.63734122, 0.91844386],\n [0.70939361, 0.63500279, 0.91717232],\n [0.7149869, 0.63266974, 0.91575983],\n [0.72053602, 0.63034321, 0.91420578],\n [0.72603991, 0.62802433, 0.9125096],\n [0.7314975, 0.62571429, 0.91067077],\n [0.73690773, 0.62341425, 0.9086888],\n [0.74226956, 0.62112542, 0.90656328],\n [0.74758193, 0.61884899, 0.90429382],\n [0.75284381, 0.6165862, 0.90188009],\n [0.75805413, 0.61433829, 0.89932181],\n [0.76321187, 0.6121065, 0.89661877],\n [0.76831596, 0.6098921, 0.89377082],\n [0.77336536, 0.60769637, 0.89077786],\n [0.77835901, 0.6055206, 0.88763988],\n [0.78329583, 0.6033661, 0.88435693],\n [0.78817477, 0.60123418, 0.88092913],\n [0.79299473, 0.59912616, 0.87735668],\n [0.79775462, 0.59704339, 0.87363986],\n [0.80245335, 0.59498722, 0.86977904],\n [0.8070898, 0.592959, 0.86577468],\n [0.81166284, 0.5909601, 0.86162732],\n [0.81617134, 0.5889919, 0.8573376],\n [0.82061414, 0.58705579, 0.85290625],\n [0.82499007, 0.58515315, 0.84833413],\n [0.82929796, 0.58328538, 0.84362217],\n [0.83353661, 0.58145389, 0.83877142],\n [0.8377048, 0.57966009, 0.83378306],\n [0.8418013, 0.57790538, 0.82865836],\n [0.84582486, 0.57619119, 0.82339871],\n [0.84977422, 0.57451892, 0.81800565],\n [0.85364809, 0.57289, 0.8124808],\n [0.85744519, 0.57130585, 0.80682595],\n [0.86116418, 0.56976788, 0.80104298],\n [0.86480373, 0.56827749, 0.79513394],\n [0.86836249, 0.56683612, 0.789101],\n [0.87183909, 0.56544515, 0.78294645],\n [0.87523214, 0.56410599, 0.77667274],\n [0.87854024, 0.56282002, 0.77028247],\n [0.88176195, 0.56158863, 0.76377835],\n [0.88489584, 0.56041319, 0.75716326],\n [0.88794045, 0.55929505, 0.75044023],\n [0.89089432, 0.55823556, 0.74361241],\n [0.89375596, 0.55723605, 0.73668312],\n [0.89652387, 0.55629781, 0.72965583],\n [0.89919653, 0.55542215, 0.72253414],\n [0.90177242, 0.55461033, 0.71532181],\n [0.90425, 0.55386358, 0.70802274],\n [0.90662774, 0.55318313, 0.70064098],\n [0.90890408, 0.55257016, 0.69318073],\n [0.91107745, 0.55202582, 0.68564633],\n [0.91314629, 0.55155124, 0.67804225]])\n self.cmap = matplotlib.colors.ListedColormap(colors)\n rgba = self.cmap(image)\n return rgba\n\n def formatOut(self, image):\n \"\"\" Retutn Uint-8 numpy array\"\"\"\n return cp.asnumpy((image * 255).astype('uint8'))\n\n def showColorBar(self, offset=None, mode='circular'):\n \"\"\" Generates colorbars for the chosen colormap \"\"\"\n\n if not (offset is None):\n offset = offset\n else:\n offset = 0\n\n if mode == 'circular':\n azimuths = np.arange(0, 361, 1)\n zeniths = np.arange(40, 70, 1)\n values = azimuths * np.ones((30, 361))\n fig, ax = plt.subplots(subplot_kw=dict(projection='polar'))\n ax.pcolormesh(azimuths * np.pi / 180.0, zeniths, values, cmap=self.cmap, shading='auto')\n ax.set_yticks([])\n plt.show()\n else:\n azimuths = np.arange(0, 361, 1)\n zeniths = np.arange(40, 70, 1)\n values = azimuths * np.ones((30, 361))\n fig, ax = plt.subplots()\n ax.pcolormesh(azimuths * np.pi / 180.0, zeniths, values, cmap=self.cmap, shading='auto')\n ax.set_yticks([])\n plt.show()\n"
]
| [
[
"matplotlib.colors.ListedColormap",
"matplotlib.cm.get_cmap"
]
]
|
tsubauaaa/d2go | [
"9f746159ebf78ce79f644c405ca8695bc29d1075"
]
| [
"projects_oss/detr/test_op.py"
]
| [
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# ------------------------------------------------------------------------------------------------\n# Deformable DETR\n# Copyright (c) 2020 SenseTime. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------------------------------\n# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0\n# ------------------------------------------------------------------------------------------------\n\nimport io\nimport unittest\nfrom functools import wraps\n\nimport torch\nfrom detr.functions.ms_deform_attn_func import (\n ms_deform_attn_core_pytorch,\n MSDeformAttnFunction,\n)\nfrom torch.autograd import gradcheck\n\nUSE_CUDA = torch.cuda.device_count() > 0\n\n\nN, M, D = 1, 2, 2\nLq, L, P = 2, 2, 2\nif USE_CUDA:\n shapes = torch.as_tensor([(6, 4), (3, 2)], dtype=torch.long).cuda()\n level_start_index = torch.cat(\n (shapes.new_zeros((1,)), shapes.prod(1).cumsum(0)[:-1])\n )\n S = sum([(H * W).item() for H, W in shapes])\n\ntorch.manual_seed(3)\n\n\nclass Tester(unittest.TestCase):\n @unittest.skipIf(not USE_CUDA, \"CI does not have gpu\")\n @torch.no_grad()\n def test_forward_equal_with_pytorch_double(self):\n value = torch.rand(N, S, M, D).cuda() * 0.01\n sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()\n attention_weights = torch.rand(N, Lq, M, L, P).cuda() + 1e-5\n attention_weights /= attention_weights.sum(-1, keepdim=True).sum(\n -2, keepdim=True\n )\n im2col_step = 2\n output_pytorch = (\n ms_deform_attn_core_pytorch(\n value.double(),\n shapes,\n sampling_locations.double(),\n attention_weights.double(),\n )\n .detach()\n .cpu()\n )\n output_cuda = (\n MSDeformAttnFunction.apply(\n value.double(),\n shapes,\n level_start_index,\n sampling_locations.double(),\n attention_weights.double(),\n im2col_step,\n )\n .detach()\n .cpu()\n )\n fwdok = torch.allclose(output_cuda, output_pytorch)\n max_abs_err = (output_cuda - output_pytorch).abs().max()\n max_rel_err = (\n (output_cuda - output_pytorch).abs() / output_pytorch.abs()\n ).max()\n\n print(\n f\"* {fwdok} test_forward_equal_with_pytorch_double: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}\"\n )\n\n @unittest.skipIf(not USE_CUDA, \"CI does not have gpu\")\n @torch.no_grad()\n def test_forward_equal_with_pytorch_float(self):\n value = torch.rand(N, S, M, D).cuda() * 0.01\n sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()\n attention_weights = torch.rand(N, Lq, M, L, P).cuda() + 1e-5\n attention_weights /= attention_weights.sum(-1, keepdim=True).sum(\n -2, keepdim=True\n )\n im2col_step = 2\n output_pytorch = (\n ms_deform_attn_core_pytorch(\n value, shapes, sampling_locations, attention_weights\n )\n .detach()\n .cpu()\n )\n output_cuda = (\n MSDeformAttnFunction.apply(\n value,\n shapes,\n level_start_index,\n sampling_locations,\n attention_weights,\n im2col_step,\n )\n .detach()\n .cpu()\n )\n fwdok = torch.allclose(output_cuda, output_pytorch, rtol=1e-2, atol=1e-3)\n max_abs_err = (output_cuda - output_pytorch).abs().max()\n max_rel_err = (\n (output_cuda - output_pytorch).abs() / output_pytorch.abs()\n ).max()\n\n print(\n f\"* {fwdok} test_forward_equal_with_pytorch_float: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}\"\n )\n\n @unittest.skipIf(not USE_CUDA, \"CI does not have gpu\")\n def test_gradient_numerical(\n self, channels=4, grad_value=True, grad_sampling_loc=True, grad_attn_weight=True\n ):\n\n value = torch.rand(N, S, M, channels).cuda() * 0.01\n sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()\n attention_weights = torch.rand(N, Lq, M, L, P).cuda() + 1e-5\n attention_weights /= attention_weights.sum(-1, keepdim=True).sum(\n -2, keepdim=True\n )\n im2col_step = 2\n func = MSDeformAttnFunction.apply\n\n value.requires_grad = grad_value\n sampling_locations.requires_grad = grad_sampling_loc\n attention_weights.requires_grad = grad_attn_weight\n\n gradok = gradcheck(\n func,\n (\n value.double(),\n shapes,\n level_start_index,\n sampling_locations.double(),\n attention_weights.double(),\n im2col_step,\n ),\n )\n\n print(f\"* {gradok} test_gradient_numerical(D={channels})\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
]
| [
[
"torch.rand",
"torch.no_grad",
"torch.cuda.device_count",
"torch.manual_seed",
"torch.as_tensor",
"torch.allclose"
]
]
|
alexander-manley/MLServer | [
"42eac4715f208f028a8920c984ab296830d90f08"
]
| [
"runtimes/alibi-explain/tests/test_alibi_runtime_base.py"
]
| [
"import json\nfrom typing import Any, Dict\nfrom unittest.mock import patch\n\nimport numpy as np\nfrom alibi.api.interfaces import Explanation\nfrom numpy.testing import assert_array_equal\n\nfrom mlserver import ModelSettings, MLModel\nfrom mlserver.codecs import NumpyCodec\nfrom mlserver.types import (\n InferenceRequest,\n Parameters,\n RequestInput,\n MetadataTensor,\n InferenceResponse,\n)\nfrom mlserver_alibi_explain.common import (\n convert_from_bytes,\n remote_predict,\n AlibiExplainSettings,\n)\nfrom mlserver_alibi_explain.runtime import AlibiExplainRuntime, AlibiExplainRuntimeBase\n\n\"\"\"\nSmoke tests for runtimes\n\"\"\"\n\n\nasync def test_integrated_gradients__smoke(\n integrated_gradients_runtime: AlibiExplainRuntime,\n):\n # TODO: there is an inherit batch as first dimension\n data = np.random.randn(10, 28, 28, 1) * 255\n inference_request = InferenceRequest(\n parameters=Parameters(\n content_type=NumpyCodec.ContentType,\n explain_parameters={\n \"baselines\": None,\n },\n ),\n inputs=[\n RequestInput(\n name=\"predict\",\n shape=data.shape,\n data=data.tolist(),\n datatype=\"FP32\",\n )\n ],\n )\n response = await integrated_gradients_runtime.predict(inference_request)\n _ = convert_from_bytes(response.outputs[0], ty=str)\n\n\nasync def test_anchors__smoke(\n anchor_image_runtime_with_remote_predict_patch: AlibiExplainRuntime,\n):\n data = np.random.randn(28, 28, 1) * 255\n inference_request = InferenceRequest(\n parameters=Parameters(\n content_type=NumpyCodec.ContentType,\n explain_parameters={\n \"threshold\": 0.95,\n \"p_sample\": 0.5,\n \"tau\": 0.25,\n },\n ),\n inputs=[\n RequestInput(\n name=\"predict\",\n shape=data.shape,\n data=data.tolist(),\n datatype=\"FP32\",\n )\n ],\n )\n response = await anchor_image_runtime_with_remote_predict_patch.predict(\n inference_request\n )\n res = convert_from_bytes(response.outputs[0], ty=str)\n res_dict = json.dumps(res)\n assert \"meta\" in res_dict\n assert \"data\" in res_dict\n\n\ndef test_remote_predict__smoke(custom_runtime_tf, rest_client):\n with patch(\"mlserver_alibi_explain.common.requests\") as mock_requests:\n mock_requests.post = rest_client.post\n\n data = np.random.randn(10, 28, 28, 1) * 255\n inference_request = InferenceRequest(\n parameters=Parameters(content_type=NumpyCodec.ContentType),\n inputs=[\n RequestInput(\n name=\"predict\",\n shape=data.shape,\n data=data.tolist(),\n datatype=\"FP32\",\n )\n ],\n )\n\n endpoint = f\"v2/models/{custom_runtime_tf.settings.name}/infer\"\n\n res = remote_predict(inference_request, predictor_url=endpoint)\n assert isinstance(res, InferenceResponse)\n\n\nasync def test_alibi_runtime_wrapper(custom_runtime_tf: MLModel):\n \"\"\"\n Checks that the wrappers returns back the expected valued from the underlying rt\n \"\"\"\n\n class _MockInit(AlibiExplainRuntime):\n def __init__(self, settings: ModelSettings):\n self._rt = custom_runtime_tf\n\n data = np.random.randn(10, 28, 28, 1) * 255\n inference_request = InferenceRequest(\n parameters=Parameters(content_type=NumpyCodec.ContentType),\n inputs=[\n RequestInput(\n name=\"predict\",\n shape=data.shape,\n data=data.tolist(),\n datatype=\"FP32\",\n )\n ],\n )\n\n # settings is dummy and discarded\n wrapper = _MockInit(ModelSettings())\n\n assert wrapper.settings == custom_runtime_tf.settings\n assert wrapper.name == custom_runtime_tf.name\n assert wrapper.version == custom_runtime_tf.version\n assert wrapper.inputs == custom_runtime_tf.inputs\n assert wrapper.outputs == custom_runtime_tf.outputs\n assert wrapper.ready == custom_runtime_tf.ready\n\n assert await wrapper.metadata() == await custom_runtime_tf.metadata()\n assert await wrapper.predict(inference_request) == await custom_runtime_tf.predict(\n inference_request\n )\n\n # check setters\n dummy_shape_metadata = [\n MetadataTensor(\n name=\"dummy\",\n datatype=\"FP32\",\n shape=[1, 2],\n )\n ]\n wrapper.inputs = dummy_shape_metadata\n custom_runtime_tf.inputs = dummy_shape_metadata\n assert wrapper.inputs == custom_runtime_tf.inputs\n\n wrapper.outputs = dummy_shape_metadata\n custom_runtime_tf.outputs = dummy_shape_metadata\n assert wrapper.outputs == custom_runtime_tf.outputs\n\n wrapper_public_funcs = list(filter(lambda x: not x.startswith(\"_\"), dir(wrapper)))\n expected_public_funcs = list(\n filter(lambda x: not x.startswith(\"_\"), dir(custom_runtime_tf))\n )\n\n assert wrapper_public_funcs == expected_public_funcs\n\n\nasync def test_explain_parameters_pass_through():\n # test that the explain parameters are wired properly, if it runs ok then\n # the assertion is fine\n params = {\n \"threshold\": 0.95,\n }\n\n data_np = np.array([[1.0, 2.0]])\n\n class _DummyExplainer(AlibiExplainRuntimeBase):\n def _explain_impl(\n self, input_data: Any, explain_parameters: Dict\n ) -> Explanation:\n assert explain_parameters == params\n assert_array_equal(input_data, data_np)\n return Explanation(meta={}, data={})\n\n rt = _DummyExplainer(\n settings=ModelSettings(),\n explainer_settings=AlibiExplainSettings(\n infer_uri=\"dum\",\n explainer_type=\"dum\",\n init_parameters=None,\n ),\n )\n\n inference_request = InferenceRequest(\n parameters=Parameters(\n content_type=NumpyCodec.ContentType,\n explain_parameters=params, # this is what we pass through as explain params\n ),\n inputs=[\n RequestInput(\n name=\"predict\",\n shape=data_np.shape,\n data=data_np.tolist(),\n datatype=\"FP32\",\n )\n ],\n )\n\n res = await rt.predict(inference_request)\n assert isinstance(res, InferenceResponse)\n"
]
| [
[
"numpy.array",
"numpy.random.randn",
"numpy.testing.assert_array_equal"
]
]
|
ECNU-Text-Computing/Text-Mining | [
"5661bf6f9482183ecac14436bbd0a5e786d61467"
]
| [
"tutorials/bilstm_crf.py"
]
| [
"# Author: Robert Guthrie\n\nimport torch\nimport torch.autograd as autograd\nimport torch.nn as nn\nimport torch.optim as optim\n\ntorch.manual_seed(1)\n\n\ndef argmax(vec):\n # return the argmax as a python int\n _, idx = torch.max(vec, 1)\n return idx.item()\n\n\ndef prepare_sequence(seq, to_ix):\n idxs = [to_ix[w] for w in seq]\n return torch.tensor(idxs, dtype=torch.long)\n\n\n# Compute log sum exp in a numerically stable way for the forward algorithm\ndef log_sum_exp(vec):\n max_score = vec[0, argmax(vec)]\n max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])\n return max_score + \\\n torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))\n\n\nclass BiLSTM_CRF(nn.Module):\n\n def __init__(self, vocab_size, tag_to_ix, embedding_dim, hidden_dim):\n super(BiLSTM_CRF, self).__init__()\n self.embedding_dim = embedding_dim\n self.hidden_dim = hidden_dim\n self.vocab_size = vocab_size\n self.tag_to_ix = tag_to_ix\n self.tagset_size = len(tag_to_ix)\n\n self.word_embeds = nn.Embedding(vocab_size, embedding_dim)\n self.lstm = nn.LSTM(embedding_dim, hidden_dim // 2,\n num_layers=1, bidirectional=True)\n\n # Maps the output of the LSTM into tag space.\n self.hidden2tag = nn.Linear(hidden_dim, self.tagset_size)\n\n # Matrix of transition parameters. Entry i,j is the score of\n # transitioning *to* i *from* j.\n self.transitions = nn.Parameter(\n torch.randn(self.tagset_size, self.tagset_size))\n\n # These two statements enforce the constraint that we never transfer\n # to the start tag and we never transfer from the stop tag\n self.transitions.data[tag_to_ix[START_TAG], :] = -10000\n self.transitions.data[:, tag_to_ix[STOP_TAG]] = -10000\n\n self.hidden = self.init_hidden()\n\n def init_hidden(self):\n return (torch.randn(2, 1, self.hidden_dim // 2),\n torch.randn(2, 1, self.hidden_dim // 2))\n\n def _forward_alg(self, feats):\n # Do the forward algorithm to compute the partition function\n init_alphas = torch.full((1, self.tagset_size), -10000.)\n # START_TAG has all of the score.\n init_alphas[0][self.tag_to_ix[START_TAG]] = 0.\n\n # Wrap in a variable so that we will get automatic backprop\n forward_var = init_alphas\n\n # Iterate through the sentence\n for feat in feats:\n alphas_t = [] # The forward tensors at this timestep\n for next_tag in range(self.tagset_size):\n # broadcast the emission score: it is the same regardless of\n # the previous tag\n emit_score = feat[next_tag].view(\n 1, -1).expand(1, self.tagset_size)\n # the ith entry of trans_score is the score of transitioning to\n # next_tag from i\n trans_score = self.transitions[next_tag].view(1, -1)\n # The ith entry of next_tag_var is the value for the\n # edge (i -> next_tag) before we do log-sum-exp\n next_tag_var = forward_var + trans_score + emit_score\n # The forward variable for this tag is log-sum-exp of all the\n # scores.\n alphas_t.append(log_sum_exp(next_tag_var).view(1))\n forward_var = torch.cat(alphas_t).view(1, -1)\n terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]\n alpha = log_sum_exp(terminal_var)\n return alpha\n\n def _get_lstm_features(self, sentence):\n self.hidden = self.init_hidden()\n embeds = self.word_embeds(sentence).view(len(sentence), 1, -1)\n lstm_out, self.hidden = self.lstm(embeds, self.hidden)\n lstm_out = lstm_out.view(len(sentence), self.hidden_dim)\n lstm_feats = self.hidden2tag(lstm_out)\n return lstm_feats\n\n def _score_sentence(self, feats, tags):\n # Gives the score of a provided tag sequence\n score = torch.zeros(1)\n tags = torch.cat([torch.tensor([self.tag_to_ix[START_TAG]], dtype=torch.long), tags])\n for i, feat in enumerate(feats):\n score = score + \\\n self.transitions[tags[i + 1], tags[i]] + feat[tags[i + 1]]\n score = score + self.transitions[self.tag_to_ix[STOP_TAG], tags[-1]]\n return score\n\n def _viterbi_decode(self, feats):\n backpointers = []\n\n # Initialize the viterbi variables in log space\n init_vvars = torch.full((1, self.tagset_size), -10000.)\n init_vvars[0][self.tag_to_ix[START_TAG]] = 0\n\n # forward_var at step i holds the viterbi variables for step i-1\n forward_var = init_vvars\n for feat in feats:\n bptrs_t = [] # holds the backpointers for this step\n viterbivars_t = [] # holds the viterbi variables for this step\n\n for next_tag in range(self.tagset_size):\n # next_tag_var[i] holds the viterbi variable for tag i at the\n # previous step, plus the score of transitioning\n # from tag i to next_tag.\n # We don't include the emission scores here because the max\n # does not depend on them (we add them in below)\n next_tag_var = forward_var + self.transitions[next_tag]\n best_tag_id = argmax(next_tag_var)\n bptrs_t.append(best_tag_id)\n viterbivars_t.append(next_tag_var[0][best_tag_id].view(1))\n # Now add in the emission scores, and assign forward_var to the set\n # of viterbi variables we just computed\n forward_var = (torch.cat(viterbivars_t) + feat).view(1, -1)\n backpointers.append(bptrs_t)\n\n # Transition to STOP_TAG\n terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]\n best_tag_id = argmax(terminal_var)\n path_score = terminal_var[0][best_tag_id]\n\n # Follow the back pointers to decode the best path.\n best_path = [best_tag_id]\n for bptrs_t in reversed(backpointers):\n best_tag_id = bptrs_t[best_tag_id]\n best_path.append(best_tag_id)\n # Pop off the start tag (we dont want to return that to the caller)\n start = best_path.pop()\n assert start == self.tag_to_ix[START_TAG] # Sanity check\n best_path.reverse()\n return path_score, best_path\n\n def neg_log_likelihood(self, sentence, tags):\n feats = self._get_lstm_features(sentence)\n forward_score = self._forward_alg(feats)\n gold_score = self._score_sentence(feats, tags)\n return forward_score - gold_score\n\n def forward(self, sentence): # dont confuse this with _forward_alg above.\n # Get the emission scores from the BiLSTM\n lstm_feats = self._get_lstm_features(sentence)\n\n # Find the best path, given the features.\n score, tag_seq = self._viterbi_decode(lstm_feats)\n return score, tag_seq\n\nSTART_TAG = \"<START>\"\nSTOP_TAG = \"<STOP>\"\nEMBEDDING_DIM = 5\nHIDDEN_DIM = 4\n\n# Make up some training data\ntraining_data = [(\n \"the wall street journal reported today that apple corporation made money\".split(),\n \"B I I I O O O B I O O\".split()\n), (\n \"georgia tech is a university in georgia\".split(),\n \"B I O O O O B\".split()\n)]\n\nword_to_ix = {}\nfor sentence, tags in training_data:\n for word in sentence:\n if word not in word_to_ix:\n word_to_ix[word] = len(word_to_ix)\n\ntag_to_ix = {\"B\": 0, \"I\": 1, \"O\": 2, START_TAG: 3, STOP_TAG: 4}\n\nmodel = BiLSTM_CRF(len(word_to_ix), tag_to_ix, EMBEDDING_DIM, HIDDEN_DIM)\noptimizer = optim.SGD(model.parameters(), lr=0.01, weight_decay=1e-4)\n\n# Check predictions before training\nwith torch.no_grad():\n precheck_sent = prepare_sequence(training_data[0][0], word_to_ix)\n precheck_tags = torch.tensor([tag_to_ix[t] for t in training_data[0][1]], dtype=torch.long)\n print(model(precheck_sent))\n\n# Make sure prepare_sequence from earlier in the LSTM section is loaded\nfor epoch in range(\n 300): # again, normally you would NOT do 300 epochs, it is toy data\n for sentence, tags in training_data:\n # Step 1. Remember that Pytorch accumulates gradients.\n # We need to clear them out before each instance\n model.zero_grad()\n\n # Step 2. Get our inputs ready for the network, that is,\n # turn them into Tensors of word indices.\n sentence_in = prepare_sequence(sentence, word_to_ix)\n targets = torch.tensor([tag_to_ix[t] for t in tags], dtype=torch.long)\n\n # Step 3. Run our forward pass.\n loss = model.neg_log_likelihood(sentence_in, targets)\n\n # Step 4. Compute the loss, gradients, and update the parameters by\n # calling optimizer.step()\n loss.backward()\n optimizer.step()\n\n# Check predictions after training\nwith torch.no_grad():\n precheck_sent = prepare_sequence(training_data[0][0], word_to_ix)\n print(model(precheck_sent))\n# We got it!"
]
| [
[
"torch.nn.Linear",
"torch.zeros",
"torch.cat",
"torch.nn.LSTM",
"torch.max",
"torch.no_grad",
"torch.manual_seed",
"torch.randn",
"torch.full",
"torch.tensor",
"torch.exp",
"torch.nn.Embedding"
]
]
|
korur/Blog | [
"b34ddfc4a3390e6b600d0699d1e8a1413fba1ac1"
]
| [
"pandas_vs_datatable/code/filter.py"
]
| [
"import os\nimport re\nimport json\nimport time\nimport numpy as np\nimport pandas as pd\nfrom plotnine import *\n\n# Config\nPATH = os.getcwd()\npath_n = re.split(pattern=r\"/|\\\\\", string=PATH)[1:]\nif os.name == \"posix\":\n path_n = \"/\" + os.path.join(*path_n)\nelse:\n drive = PATH[0:3]\n path_n = drive + os.path.join(*path_n)\nRUNS = 100\n\n\ndef infer_column_cats(path: \"Path to working directoty.\") -> tuple:\n \"\"\"Helper function to identify dataset sizes based on file names.\"\"\"\n files = os.listdir(os.path.join(path, \"data\"))\n cats = set([re.match(pattern=\".*_(.*).csv$\", string=file).group(1) for file in files])\n cols = set([re.match(pattern=\".*_(.*)_.*.csv$\", string=file).group(1) for file in files])\n return cats, cols\n\n\ndef time_function(func: \"Function call to be evaluted as str.\") -> float:\n \"\"\"Helper function to time data access.\"\"\"\n start = time.time()\n eval(func)\n return time.time() - start\n\n\ndef create_stats(measures: \"List of function timings.\",\n col: \"Current Column.\", row: \"Current Row\",\n scenario: \"Current Scenario.\") -> dict:\n \"\"\"Helper function to create result dataset.\"\"\"\n return {\"scenario\": scenario,\n \"no_column\": col,\n \"data_length\": row,\n \"min\": np.min(measures),\n \"max\": np.max(measures),\n \"avg\": np.mean(measures),\n \"q50\": np.median(measures)}\n\n\nscenarios = json.load(open(os.path.join(path_n, \"output\", \"filter.JSON\")))\nnrows, ncols = infer_column_cats(path_n)\ntimings, results = [], []\n\nfor col in ncols:\n print(f\"-Column: {col}--\")\n for row in nrows:\n print(f\"--Row: {row}\")\n data = pd.read_csv(os.path.join(path_n, \"data\", f\"sim_data_{col}_{row}.csv\"))\n sel = [(target_col, scenarios[col][target_col][-1]) for target_col in scenarios[col]]\n print(f\"Scenario: {scenarios[col]} Selection: {sel}\")\n funcs = [f\"temp[temp['{sel[0][0]}'] >= {sel[0][1]}]\",\n f\"temp[temp['{sel[1][0]}'] >= {sel[1][1]}]\",\n f\"temp[temp['{sel[2][0]}'] == '{sel[2][1]}']\",\n f\"temp[temp['{sel[3][0]}'] == {sel[3][1]}]\",\n f\"\"\"temp[(temp['{sel[0][0]}'] >= {sel[0][1]}) &\n (temp['{sel[1][0]}'] >= {sel[1][1]})]\"\"\",\n f\"\"\"temp[(temp['{sel[0][0]}'] >= {sel[0][1]}) &\n (temp['{sel[1][0]}'] >= {sel[1][1]}) &\n (temp['{sel[2][0]}'] == '{sel[2][1]}')]\"\"\",\n f\"\"\"temp[(temp['{sel[0][0]}'] >= {sel[0][1]}) &\n (temp['{sel[1][0]}'] >= {sel[1][1]}) &\n (temp['{sel[2][0]}'] == '{sel[2][1]}') &\n (temp['{sel[3][0]}'] == {sel[3][1]})]\"\"\"]\n for i, fun in enumerate(funcs):\n print(i, fun)\n for j in range(RUNS):\n temp = data\n timings.append(time_function(func=fun))\n temp = None\n\n results.append(create_stats(col=col, row=row, measures=timings, scenario=i + 1))\n print(results[-1])\n timings = []\n\nresult_df = pd.DataFrame(results)\nresult_df[[\"data_length\", \"no_column\"]] = result_df[[\"data_length\", \"no_column\"]].apply(pd.to_numeric,\n axis=1,\n downcast=\"integer\")\nresult_df[[\"min\", \"max\", \"q50\", \"avg\"]] = round(result_df[[\"min\", \"max\", \"q50\", \"avg\"]] * 1000, 2)\nresult_df.sort_values([\"data_length\", \"no_column\"], inplace=True)\nresult_df.to_csv(os.path.join(path_n, \"output\", \"filter_results_pandas.csv\"), index=False)"
]
| [
[
"numpy.max",
"numpy.median",
"pandas.DataFrame",
"numpy.min",
"numpy.mean"
]
]
|
RentadroneCL/model-definition | [
"9dab1f1a808a1efc54d64144745277396c145ff7",
"9dab1f1a808a1efc54d64144745277396c145ff7"
]
| [
"ssd_keras-master/predict.py",
"GPS_Panel/Utils.py"
]
| [
"from keras import backend as K\nfrom keras.models import load_model\nfrom keras.preprocessing import image\nfrom keras.optimizers import Adam\nfrom imageio import imread\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport json\nimport argparse\nimport os\nimport time\n\nfrom models.keras_ssd300 import ssd_300\nfrom keras_loss_function.keras_ssd_loss import SSDLoss\nfrom keras_layers.keras_layer_AnchorBoxes import AnchorBoxes\nfrom keras_layers.keras_layer_DecodeDetections import DecodeDetections\nfrom keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast\nfrom keras_layers.keras_layer_L2Normalization import L2Normalization\n\nfrom ssd_encoder_decoder.ssd_output_decoder import decode_detections, decode_detections_fast\n\nfrom data_generator.object_detection_2d_data_generator import DataGenerator\nfrom data_generator.object_detection_2d_photometric_ops import ConvertTo3Channels\nfrom data_generator.object_detection_2d_geometric_ops import Resize\nfrom data_generator.object_detection_2d_misc_utils import apply_inverse_transforms\n\ndef get_session():\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n return tf.Session(config=config)\n\n\n\ndef makedirs(path):\n try:\n os.makedirs(path)\n except OSError:\n if not os.path.isdir(path):\n raise\n\n\n\ndef _main(args=None):\n # parse arguments\n config_path = args.conf\n input_path = args.input_path\n output_path = args.output_path\n \n with open(config_path) as config_buffer:\n config = json.loads(config_buffer.read())\n\n makedirs(args.output_path)\n ###############################\n # Parse the annotations\n ###############################\n score_threshold = 0.5\n labels = config['model']['labels']\n categories = {}\n #categories = {\"Razor\": 1, \"Gun\": 2, \"Knife\": 3, \"Shuriken\": 4} #la categoría 0 es la background\n for i in range(len(labels)): categories[labels[i]] = i+1\n print('\\nTraining on: \\t' + str(categories) + '\\n')\n\n img_height = config['model']['input'] # Height of the model input images\n img_width = config['model']['input'] # Width of the model input images\n img_channels = 3 # Number of color channels of the model input images\n n_classes = len(labels) # Number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO\n classes = ['background'] + labels\n\n model_mode = 'training'\n # TODO: Set the path to the `.h5` file of the model to be loaded.\n model_path = config['train']['saved_weights_name']\n\n # We need to create an SSDLoss object in order to pass that to the model loader.\n ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n\n K.clear_session() # Clear previous models from memory.\n\n model = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes,\n 'L2Normalization': L2Normalization,\n 'DecodeDetections': DecodeDetections,\n 'compute_loss': ssd_loss.compute_loss})\n\n\n \n\n image_paths = []\n\n if os.path.isdir(input_path):\n for inp_file in os.listdir(input_path):\n image_paths += [input_path + inp_file]\n else:\n image_paths += [input_path]\n\n image_paths = [inp_file for inp_file in image_paths if (inp_file[-4:] in ['.jpg', '.png', 'JPEG'])]\n times = []\n\n\n for img_path in image_paths:\n orig_images = [] # Store the images here.\n input_images = [] # Store resized versions of the images here.\n print(img_path)\n\n # preprocess image for network\n orig_images.append(imread(img_path))\n img = image.load_img(img_path, target_size=(img_height, img_width))\n img = image.img_to_array(img)\n input_images.append(img)\n input_images = np.array(input_images)\n # process image\n start = time.time()\n y_pred = model.predict(input_images)\n y_pred_decoded = decode_detections(y_pred,\n confidence_thresh=score_threshold,\n iou_threshold=score_threshold,\n top_k=200,\n normalize_coords=True,\n img_height=img_height,\n img_width=img_width)\n \n \n print(\"processing time: \", time.time() - start)\n times.append(time.time() - start)\n # correct for image scale\n\n # visualize detections\n # Set the colors for the bounding boxes\n colors = plt.cm.hsv(np.linspace(0, 1, 21)).tolist()\n\n plt.figure(figsize=(20,12))\n plt.imshow(orig_images[0],cmap = 'gray')\n\n current_axis = plt.gca()\n #print(y_pred)\n for box in y_pred_decoded[0]:\n # Transform the predicted bounding boxes for the 300x300 image to the original image dimensions.\n\n xmin = box[2] * orig_images[0].shape[1] / img_width\n ymin = box[3] * orig_images[0].shape[0] / img_height\n xmax = box[4] * orig_images[0].shape[1] / img_width\n ymax = box[5] * orig_images[0].shape[0] / img_height\n\n color = colors[int(box[0])]\n label = '{}: {:.2f}'.format(classes[int(box[0])], box[1])\n current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color=color, fill=False, linewidth=2))\n current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':color, 'alpha':1.0})\n\n #plt.figure(figsize=(15, 15))\n #plt.axis('off')\n save_path = output_path + img_path.split('/')[-1]\n plt.savefig(save_path)\n plt.close()\n\n file = open(output_path + 'time.txt','w')\n\n file.write('Tiempo promedio:' + str(np.mean(times)))\n\n file.close()\n\nif __name__ == '__main__':\n argparser = argparse.ArgumentParser(description='train and evaluate ssd model on any dataset')\n argparser.add_argument('-c', '--conf', help='path to configuration file')\n argparser.add_argument('-i', '--input_path', help='folder input.', type=str)\n argparser.add_argument('-o', '--output_path', help='folder output.', default='ouput/', type=str)\n argparser.add_argument('--score_threshold', help='score threshold detection.', default=0.5, type=float)\n args = argparser.parse_args()\n _main(args)\n",
"import math\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nfrom scipy import ndimage\nfrom scipy import signal\nimport georasters as gr\n\n\ndef order_points_rect(pts):\n\t# initialzie a list of coordinates that will be ordered\n\t# such that the first entry in the list is the top-left,\n\t# the second entry is the top-right,\n # the third is the bottom-right, and\n #the fourth is the bottom-left\n\trect = np.zeros((4, 2), dtype = \"float32\")\n\t# the top-left point will have the smallest sum, whereas\n\t# the bottom-right point will have the largest sum\n\ts = pts.sum(axis = 1)\n\trect[0] = pts[np.argmin(s)]\n\trect[2] = pts[np.argmax(s)]\n\t# now, compute the difference between the points, the\n\t# top-right point will have the smallest difference,\n\t# whereas the bottom-left will have the largest difference\n\tdiff = np.diff(pts, axis = 1)\n\trect[1] = pts[np.argmin(diff)]\n\trect[3] = pts[np.argmax(diff)]\n\t# return the ordered coordinates\n\treturn rect\n\n\ndef perspectiveTransform(Points):\n #Transform cuadrilater image segmentation to rectangle image\n # Return Matrix Transform\n Points = np.array(Points)\n Points_order = order_points_rect(Points)\n #dst = np.asarray([[0, 0], [0, 1], [1, 1], [1, 0]], dtype = \"float32\")\n\n (tl, tr, br, bl) = Points_order\n # compute the width of the new image, which will be the\n # maximum distance between bottom-right and bottom-left\n # x-coordiates or the top-right and top-left x-coordinates\n widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\n widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\n maxWidth = max(int(widthA), int(widthB))\n # compute the height of the new image, which will be the\n # maximum distance between the top-right and bottom-right\n # y-coordinates or the top-left and bottom-left y-coordinates\n heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\n heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\n maxHeight = max(int(heightA), int(heightB))\n # now that we have the dimensions of the new image, construct\n # the set of destination points to obtain a \"birds eye view\",\n # (i.e. top-down view) of the image, again specifying points\n # in the top-left, top-right, bottom-right, and bottom-left\n # order\n dst = np.array([\n [0, 0],\n [maxWidth , 0],\n [maxWidth , maxHeight ],\n [0, maxHeight ]], dtype = \"float32\")\n\n M = cv2.getPerspectiveTransform(Points_order, dst)\n return M, maxWidth, maxHeight\n\ndef subdivision_rect(factors, maxWidth, maxHeight, merge_percentaje = 0):\n ## From a rect (top-left, top-right, bottom-right, bottom-left) subidive in rectangle\n\n #factors = factors_number(n_divide)[-1] # First factor is smaller\n\n #if maxWidth > maxHeight:\n # split_Width = [maxWidth / factors[1] * i for i in range(factors[1] + 1)]\n # split_Height = [maxHeight / factors[0] * i for i in range(factors[0] + 1)]\n #else:\n # split_Width = [maxWidth / factors[0] * i for i in range(factors[0] + 1)]\n # split_Height = [maxHeight / factors[1] * i for i in range(factors[1] + 1)]\n merge_Width = maxWidth * merge_percentaje\n merge_Height = maxHeight * merge_percentaje\n split_Width = [maxWidth / factors[0] * i for i in range(factors[0] + 1)]\n split_Height = [maxHeight / factors[1] * i for i in range(factors[1] + 1)]\n\n sub_division = []\n for j in range(len(split_Height) - 1):\n for i in range(len(split_Width) - 1):\n\n sub_division.append([(max(split_Width[i] - merge_Width, 0) , max(split_Height[j] - merge_Height , 0)),\n (min(split_Width[i+1] + merge_Width , maxWidth - 1), max(split_Height[j] - merge_Height , 0)),\n (min(split_Width[i+1] + merge_Width , maxWidth - 1), min(split_Height[j+1] + merge_Height, maxHeight - 1)),\n (max(split_Width[i] - merge_Width, 0), min(split_Height[j+1] + merge_Height, maxHeight - 1))])\n\n return np.array(sub_division)\n\n\ndef skeleton(bin_image, n_important = 100):\n #From binary image (0,255) transform to skeleton edge\n\n kernel_size = 3\n edges = cv2.GaussianBlur((bin_image.copy()).astype(np.uint8),(kernel_size, kernel_size),0)\n kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(5, 5))\n height,width = edges.shape\n skel = np.zeros([height,width],dtype=np.uint8) #[height,width,3]\n temp_nonzero = np.count_nonzero(edges)\n\n while (np.count_nonzero(edges) != 0 ):\n eroded = cv2.erode(edges,kernel)\n #cv2.imshow(\"eroded\",eroded)\n temp = cv2.dilate(eroded,kernel)\n #cv2.imshow(\"dilate\",temp)\n temp = cv2.subtract(edges,temp)\n skel = cv2.bitwise_or(skel,temp)\n edges = eroded.copy()\n\n \"\"\"This function returns the count of labels in a mask image.\"\"\"\n label_im, nb_labels = ndimage.label(skel)#, structure= np.ones((2,2))) ## Label each connect region\n label_areas = np.bincount(label_im.ravel())[1:]\n keys_max_areas = np.array(sorted(range(len(label_areas)), key=lambda k: label_areas[k], reverse = True)) + 1\n keys_max_areas = keys_max_areas[:n_important]\n L = np.zeros(label_im.shape)\n for i in keys_max_areas:\n L[label_im == i] = i\n\n labels = np.unique(L)\n label_im = np.searchsorted(labels, L)\n\n return label_im>0\n\ndef angle_lines(skel_filter, n_important = 100, angle_resolution = 360, threshold = 100, min_line_length = 200, max_line_gap = 50, plot = False):\n #Measure the angle of lines in skel_filter. Obs the angle is positive in clockwise.\n\n rho = 1 # distance resolution in pixels of the Hough grid\n theta = np.pi / angle_resolution # angular resolution in radians of the Hough grid\n #threshold = 100 # minimum number of votes (intersections in Hough grid cell)\n #min_line_length = 200 # minimum number of pixels making up a line\n #max_line_gap = 50 # maximum gap in pixels between connectable line segments\n\n lines = cv2.HoughLines(np.uint8(skel_filter),rho, theta, threshold)\n lines_P = cv2.HoughLinesP(np.uint8(skel_filter),rho, theta, threshold, np.array([]) ,min_line_length, max_line_gap)\n\n if lines_P is None:\n print(\"linea no encontrada\")\n return 0\n\n theta_P = [np.pi/2 + np.arctan2(line[0][3] - line[0][1],line[0][2]-line[0][0]) for line in lines_P[:n_important]]\n\n theta = lines[0:n_important,0,1]\n\n h = np.histogram(np.array(theta), bins = angle_resolution, range=(-np.pi,np.pi))\n peaks = signal.find_peaks_cwt(h[0], widths= np.arange(2,4))\n h_P = np.histogram(np.array(theta_P), bins = angle_resolution, range=(-np.pi,np.pi))\n peaks_P = signal.find_peaks_cwt(h_P[0], widths= np.arange(2,4))\n\n #h= np.histogram(np.array(theta), bins = angle_resolution, range=(-np.pi,np.pi))\n #peaks = argrelextrema(h[0], np.greater)\n #h_P = np.histogram(np.array(theta_P), bins = angle_resolution, range=(-np.pi,np.pi))\n #peaks_P = argrelextrema(h_P[0], np.greater)\n\n mesh = np.array(np.meshgrid(h[1][peaks], h_P[1][peaks_P]))\n combinations = mesh.T.reshape(-1, 2)\n index_min = np.argmin([abs(a-b) for a,b in combinations])\n theta_prop = np.mean(combinations[index_min])\n\n if plot:\n print('Theta in HoughLines: ', h[1][peaks])\n print('Theta in HoughLinesP: ', h_P[1][peaks_P])\n print('combinations: ', combinations)\n print('Theta prop: ', theta_prop)\n\n\n Z1 = np.ones((skel_filter.shape))*255\n Z2 = np.ones((skel_filter.shape))*255\n\n for line in lines[0:n_important]:\n rho,theta = line[0]\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a*rho\n y0 = b*rho\n x1 = int(x0 + 1000*(-b))\n y1 = int(y0 + 1000*(a))\n x2 = int(x0 - 1000*(-b))\n y2 = int(y0 - 1000*(a))\n #print((x1,y1,x2,y2))\n cv2.line(Z1,(x1,y1),(x2,y2),(0,0,255),2)\n\n for line in lines_P[:n_important]:\n x1,y1,x2,y2 = line[0]\n cv2.line(Z2,(x1,y1),(x2,y2),(0,0,255),2)\n\n plt.figure(0)\n plt.figure(figsize=(16,8))\n\n plt.imshow(skel_filter)\n plt.title('Skel_filter')\n\n fig, axs = plt.subplots(1, 2, figsize=(16,8))\n axs[0].imshow(Z1)\n axs[0].title.set_text('Lines HoughLines')\n\n axs[1].imshow(Z2)\n axs[1].title.set_text('Lines HoughLinesP')\n\n fig, axs = plt.subplots(1, 2, figsize=(16,8))\n axs[0].hist(lines[0:n_important,0,1], bins = 45, range=[-np.pi,np.pi])\n axs[0].title.set_text('Lines HoughLines theta Histogram')\n\n\n axs[1].hist(theta_P, bins = 45, range=[-np.pi,np.pi])\n axs[1].title.set_text('Lines HoughLinesP theta Histogram')\n #print(lines.shape)\n #print(lines_P.shape)\n\n\n return theta_prop\n\ndef rgb2hsv(rgb):\n \"\"\" convert RGB to HSV color space\n\n :param rgb: np.ndarray\n :return: np.ndarray\n \"\"\"\n\n rgb = rgb.astype('float')\n maxv = np.amax(rgb, axis=2)\n maxc = np.argmax(rgb, axis=2)\n minv = np.amin(rgb, axis=2)\n minc = np.argmin(rgb, axis=2)\n\n hsv = np.zeros(rgb.shape, dtype='float')\n hsv[maxc == minc, 0] = np.zeros(hsv[maxc == minc, 0].shape)\n hsv[maxc == 0, 0] = (((rgb[..., 1] - rgb[..., 2]) * 60.0 / (maxv - minv + np.spacing(1))) % 360.0)[maxc == 0]\n hsv[maxc == 1, 0] = (((rgb[..., 2] - rgb[..., 0]) * 60.0 / (maxv - minv + np.spacing(1))) + 120.0)[maxc == 1]\n hsv[maxc == 2, 0] = (((rgb[..., 0] - rgb[..., 1]) * 60.0 / (maxv - minv + np.spacing(1))) + 240.0)[maxc == 2]\n hsv[maxv == 0, 1] = np.zeros(hsv[maxv == 0, 1].shape)\n hsv[maxv != 0, 1] = (1 - minv / (maxv + np.spacing(1)))[maxv != 0]\n hsv[..., 2] = maxv\n\n return hsv\n\n\ndef doubleMADsfromMedian(y,thresh=3.5):\n # warning: this function does not check for NAs\n # nor does it address issues when\n # more than 50% of your data have identical values\n m = np.median(y)\n abs_dev = np.abs(y - m)\n left_mad = np.median(abs_dev[y <= m])\n right_mad = np.median(abs_dev[y >= m])\n y_mad = left_mad * np.ones(len(y))\n y_mad[y > m] = right_mad\n modified_z_score = 0.6745 * abs_dev / y_mad\n modified_z_score[y == m] = 0\n return modified_z_score > thresh\n\n\ndef watershed_marked(thresh, min_Area = 100, threshold_median_Area = 3):\n ## Thresh is the segmentation image use to watershed\n ##\n\n # Perform the distance transform algorithm\n dist = cv2.distanceTransform(thresh, cv2.DIST_L2, 3)\n # Normalize the distance image for range = {0.0, 1.0}\n # so we can visualize and threshold it\n cv2.normalize(dist, dist, 0, 1.0, cv2.NORM_MINMAX)\n # Threshold to obtain the peaks\n # This will be the markers for the foreground objects\n _, dist = cv2.threshold((dist*255).astype(np.uint8), 0, 255 , cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n # Dilate a bit the dist image\n kernel1 = np.ones((3,3), dtype=np.uint8)\n dist = cv2.dilate(dist, kernel1 , iterations = 1)\n dist = cv2.erode(dist, kernel1 , iterations = 1)\n\n\n #dist[0: 10,-10:] = 255\n dist[-10:,-10:] = 255\n\n dist_8u = dist.astype('uint8')\n\n # Find total markers\n contours, _ = cv2.findContours(dist_8u, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n # Create the marker image for the watershed algorithm\n markers = np.zeros(dist.shape, dtype=np.int32)\n # Draw the foreground markers\n for i in range(len(contours)):\n cv2.drawContours(markers, contours, i, (i+1), -1)\n\n\n markers = cv2.watershed(cv2.cvtColor(thresh,cv2.COLOR_GRAY2RGB), markers)\n markers[markers == 1] = 0\n markers[markers == -1] = 0\n\n Areas = []\n for i in range(1,np.max(markers) + 1):\n if np.sum(markers == i) < min_Area:\n markers[markers == i] = 0\n else:\n Areas.append([i, np.sum(markers == i)])\n\n Areas = np.array(Areas)\n L_Areas = doubleMADsfromMedian(Areas[:,1], threshold_median_Area)\n for i,Logic in zip(Areas[:,0], L_Areas) :\n if Logic:\n markers[markers == i] = 0\n\n return Areas[L_Areas,:], dist_8u,markers\n\ndef pixel2gps(points, geot):\n # transform pixel to gps coordinate\n return np.vstack(gr.map_pixel_inv(points[:,1], points[:,0],geot[1],geot[-1], geot[0],geot[3])).T\n\n\n\ndef gps2pixel(points_coord, geot):\n # transform gps coordinate to pixel\n return np.flip(np.vstack(gr.map_pixel(points_coord[:,0], points_coord[:,1],geot[1],geot[-1], geot[0],geot[3])).T,1)\n"
]
| [
[
"numpy.array",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.close",
"numpy.mean",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pyplot.Rectangle",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.imshow"
],
[
"numpy.argmin",
"numpy.median",
"numpy.mean",
"numpy.cos",
"numpy.max",
"numpy.count_nonzero",
"numpy.uint8",
"numpy.sin",
"matplotlib.pyplot.subplots",
"numpy.argmax",
"numpy.arange",
"numpy.sqrt",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.title",
"numpy.diff",
"matplotlib.pyplot.figure",
"numpy.amax",
"numpy.arctan2",
"numpy.amin",
"numpy.searchsorted",
"numpy.spacing",
"scipy.ndimage.label",
"numpy.sum",
"numpy.ones",
"numpy.abs",
"numpy.meshgrid",
"numpy.unique",
"matplotlib.pyplot.imshow"
]
]
|
TinyVolt/normalizing-flows | [
"d4510383cdaaf0f5a289d97f9bf3d0ba47df4cac"
]
| [
"multi_dim_shared_params/data.py"
]
| [
"import numpy as np\nimport pickle\nfrom torch.utils.data import Dataset, DataLoader\n\nFILENAME = 'shapes.pkl'\nwith open(FILENAME, 'rb') as f:\n data = pickle.load(f)\ntraining_data, testing_data = data['train'], data['test']\n# training_data.shape = (10479, 20, 20, 1)\ntraining_data = (training_data > 127.5).astype(np.uint8)\n# training_data.shape = (4491, 20, 20, 1)\ntesting_data = (testing_data > 127.5).astype(np.uint8)\n\n\nclass ShapesDataset(Dataset):\n def __init__(self, array):\n self.array = array.astype(np.float32) / 2.0\n self.array = np.transpose(self.array, (0,3,1,2))\n\n def __len__(self):\n return len(self.array)\n\n def __getitem__(self, index):\n return self.array[index]\n\ntrain_loader = DataLoader(ShapesDataset(training_data), shuffle=True, batch_size=128)\ntest_loader = DataLoader(ShapesDataset(testing_data), shuffle=True, batch_size=128)\n"
]
| [
[
"numpy.transpose"
]
]
|
kingformatty/E2E-NLG-Project | [
"74185afa0830592d905668eee1e477395ee19cd6"
]
| [
"components/trainer/__init__.py"
]
| [
"# Based on: http://pytorch.org/tutorials/intermediate/seq2seq_translation_tutorial.html\n\nimport logging\nimport os\nimport time\n\nimport numpy as np\nimport torch\nfrom torch import optim\n\nfrom components.constants import PAD_ID\nfrom components.evaluator.e2e_evaluator import BaseEvaluator\nfrom components.evaluator.evaluation import eval_output\nfrom components.utils.serialization import save_model, save_scores\nfrom components.utils.serialization import save_predictions_txt\nfrom components.utils.timing import create_progress_bar, asMinutes\nfrom components.utils.visualize import torch_summarize\n\nlogger = logging.getLogger('experiment')\n\n\nclass BaseTrainer(object):\n def __init__(self, config):\n self.config = config\n self.init_params()\n\n def init_params(self):\n self.n_epochs = self.config[\"n_epochs\"]\n self.batch_size = self.config[\"batch_size\"]\n self.lr = self.config['learning_rate']\n self.model_dir = self.config[\"model_dir\"]\n self.evaluate_prediction = self.config[\"evaluate_prediction\"]\n self.save_model = self.config[\"save_model_each_epoch\"]\n if self.config[\"optimizer\"] == \"Warmup\":\n self.warmup_step = self.config[\"warmup_step\"]\n self.factor = self.config[\"factor\"]\n\n self.use_cuda = torch.cuda.is_available()\n\n self.train_losses = []\n self.dev_losses = []\n\n if self.evaluate_prediction:\n self.nist_scores = []\n self.bleu_scores = []\n self.cider_scores = []\n self.rouge_scores = []\n self.meteor_scores = []\n\n def run_external_eval(self, ref_fn, pred_fn):\n\n \"\"\"\n Run external evaluation script (provided by the E2E NLG org)\n\n :param ref_fn: reference filename\n :param pred_fn: prediction filename\n :return:\n \"\"\"\n\n bleu, nist, meteor, rouge, cider = eval_output(ref_fn, pred_fn)\n self.bleu_scores.append(bleu)\n self.nist_scores.append(nist)\n self.cider_scores.append(cider)\n self.rouge_scores.append(rouge)\n self.meteor_scores.append(meteor)\n avg = (bleu + nist + cider + rouge + meteor) / 5\n score_msg = 'BLEU=%0.5f NIST=%0.5f CIDEr=%0.5f ROUGE=%0.5f METEOR=%0.5f AVERAGE = %0.5f' \\\n % (bleu, nist, cider, rouge, meteor, avg)\n\n logger.info(score_msg)\n\n def record_loss(self, train_loss, dev_loss):\n self.train_losses.append(train_loss)\n self.dev_losses.append(dev_loss)\n\n logger.info('tloss=%0.5f dloss=%0.5f' % (train_loss, dev_loss))\n\n def training_start(self, model, data):\n\n training_start_time = time.time()\n logger.info(\"Start training\")\n\n # Print a model summary to make sure everything is ok with it\n model_summary = torch_summarize(model)\n logger.debug(model_summary)\n\n num_param = 0\n for name, param in model.named_parameters():\n num_param += param.numel()\n print(\"Number of parameters: {}\".format(num_param))\n\n evaluator = BaseEvaluator(self.config)\n logger.debug(\"Preparing training data\")\n\n train_batches = data.prepare_training_data(data.train, self.batch_size)\n dev_batches = data.prepare_training_data(data.dev, self.batch_size)\n\n id2word = data.vocab.id2tok\n dev_lexicalizations = data.lexicalizations['dev']\n dev_multi_ref_fn = '%s.multi-ref' % data.fnames['dev']\n\n self.set_optimizer(model, self.config['optimizer'])\n self.set_train_criterion(len(id2word), PAD_ID, model.nos_option)\n\n # Moving the model to GPU, if available\n if self.use_cuda:\n model = model.cuda()\n\n for epoch_idx in range(1, self.n_epochs + 1):\n\n epoch_start = time.time()\n pred_fn = os.path.join(self.model_dir, 'predictions.epoch%d' % epoch_idx)\n\n model.train()\n train_loss = self.train_epoch(epoch_idx, model, train_batches)\n \n model.eval()\n dev_loss = self.compute_val_loss(model, dev_batches)\n predicted_ids, attention_weights = evaluator.evaluate_model(model, data.dev[0], data.dev[1])\n predicted_tokens = evaluator.lexicalize_predictions(predicted_ids,\n dev_lexicalizations,\n id2word)\n\n save_predictions_txt(predicted_tokens, pred_fn)\n self.record_loss(train_loss, dev_loss)\n\n if self.evaluate_prediction:\n self.run_external_eval(dev_multi_ref_fn, pred_fn)\n\n if self.save_model:\n save_model(model, os.path.join(self.model_dir, 'weights.epoch%d' % epoch_idx))\n\n logger.info('Epoch %d/%d: time=%s' % (epoch_idx, self.n_epochs, asMinutes(time.time() - epoch_start)))\n\n self.plot_lcurve()\n\n if self.evaluate_prediction:\n score_fname = os.path.join(self.model_dir, 'scores.csv')\n scores = self.get_scores_to_save()\n save_scores(scores, self.score_file_header, score_fname)\n self.plot_training_results()\n\n logger.info('End training time=%s' % (asMinutes(time.time() - training_start_time)))\n\n def compute_val_loss(self, model, dev_batches):\n\n total_loss = 0\n running_losses = []\n num_dev_batches = len(dev_batches)\n bar = create_progress_bar('dev_loss')\n\n for batch_idx in bar(range(num_dev_batches)):\n loss_var = self.train_step(model, dev_batches[batch_idx])\n loss_data = loss_var.item()\n\n # Record loss\n running_losses = ([loss_data] + running_losses)[:20]\n bar.dynamic_messages['dev_loss'] = np.mean(running_losses)\n\n total_loss += loss_data\n\n total_loss_avg = total_loss / num_dev_batches\n return total_loss_avg\n\n def train_epoch(self, epoch_idx, model, train_batches):\n\n #np.random.shuffle(train_batches) # shuffling data\n running_losses = []\n epoch_losses = []\n\n num_train_batches = len(train_batches)\n bar = create_progress_bar('train_loss')\n\n for pair_idx in bar(range(num_train_batches)):\n self.optimizer.zero_grad()\n loss_var = self.train_step(model, train_batches[pair_idx])\n loss_data = loss_var.item()\n loss_var.backward() # compute gradients\n self.optimizer.step() # update weights\n\n running_losses = ([loss_data] + running_losses)[:20]\n bar.dynamic_messages['train_loss'] = np.mean(running_losses)\n epoch_losses.append(loss_data)\n\n epoch_loss_avg = np.mean(epoch_losses)\n\n return epoch_loss_avg\n\n def get_scores_to_save(self):\n scores = list(zip(self.bleu_scores,\n self.nist_scores,\n self.cider_scores,\n self.rouge_scores,\n self.meteor_scores,\n self.train_losses,\n self.dev_losses))\n\n return scores\n\n def train_step(self, *args, **kwargs):\n raise NotImplementedError()\n\n def calc_loss(self, *args, **kwargs):\n raise NotImplementedError()\n\n def set_train_criterion(self, *args, **kwargs):\n raise NotImplementedError()\n\n def set_optimizer(self, model, opt_name):\n\n logger.debug(\"Setting %s as optimizer\" % opt_name)\n\n if opt_name == \"SGD\":\n self.optimizer = optim.SGD(params=model.parameters(), lr=self.lr)\n\n elif opt_name == \"Adam\":\n self.optimizer = optim.Adam(params=model.parameters(), lr=self.lr)\n\n elif opt_name == 'RMSprop':\n self.optimizer = optim.RMSprop(params=model.parameters(), lr=self.lr)\n\n elif opt_name == \"Warmup\":\n self.optimizer = NoamOpt(model.model_size, self.factor, self.warmup_step,\n optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))\n \n else:\n raise NotImplementedError()\n\n def plot_training_results(self, *args, **kwargs):\n raise NotImplementedError()\n\n def plot_lcurve(self, *args, **kwargs):\n raise NotImplementedError()\n\n def get_plot_names(self):\n raise NotImplementedError()\n\n @property\n def score_file_header(self):\n HEADER = ['bleu', 'nist', 'cider', 'rouge', 'meteor', 'train_loss', 'dev_loss']\n return HEADER\n\n\nclass NoamOpt:\n \"Optim wrapper that implements rate.\"\n def __init__(self, model_size, factor, warmup, optimizer):\n self.optimizer = optimizer\n self._step = 0\n self.warmup = warmup\n self.factor = factor\n self.model_size = model_size\n self._rate = 0\n \n def step(self):\n \"Update parameters and rate\"\n self._step += 1\n rate = self.rate()\n for p in self.optimizer.param_groups:\n p['lr'] = rate\n self._rate = rate\n self.optimizer.step()\n \n def rate(self, step = None):\n \"Implement `lrate` above\"\n if step is None:\n step = self._step\n return self.factor * (self.model_size ** (-0.5) *\n min(step ** (-0.5), step * self.warmup ** (-1.5)))\n\n def zero_grad(self):\n self.optimizer.zero_grad()\n\n"
]
| [
[
"torch.cuda.is_available",
"numpy.mean"
]
]
|
ksang/moderu | [
"c7c8c24e86c9d549dd85f4f16b456cbf33f30cb2"
]
| [
"lenet5/train.py"
]
| [
"import argparse\nimport random\nimport warnings\nimport time\nimport os\n\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nimport torch.distributed as dist\nimport torch.optim as optim\nimport torch.multiprocessing as mp\nfrom torchvision import datasets, transforms\nfrom model import LeNet5\n\nparser = argparse.ArgumentParser(description='LeNet-5 MNIST Training')\n\nparser.add_argument('-j', '--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--data', metavar='DIR', default='data',\n help='path to dataset')\nparser.add_argument('--epochs', default=30, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('-b', '--batch-size', default=64, type=int,\n metavar='N',\n help='mini-batch size (default: 64), this is the total '\n 'batch size of all GPUs on the current node when '\n 'using Data Parallel or Distributed Data Parallel')\nparser.add_argument('--lr', '--learning-rate', default=0.01, type=float,\n metavar='LR', help='initial learning rate', dest='lr')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)',\n dest='weight_decay')\nparser.add_argument('-p', '--print-freq', default=100, type=int,\n metavar='N', help='print frequency')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',\n help='evaluate model on validation set')\nparser.add_argument('--world-size', default=-1, type=int,\n help='number of nodes for distributed training')\nparser.add_argument('--rank', default=-1, type=int,\n help='node rank for distributed training')\nparser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,\n help='url used to set up distributed training')\nparser.add_argument('--dist-backend', default='nccl', type=str,\n help='distributed backend')\nparser.add_argument('--seed', default=None, type=int,\n help='seed for initializing training. ')\nparser.add_argument('--gpu', default=None, type=int,\n help='GPU id to use.')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\nparser.add_argument('--multiprocessing-distributed', action='store_true',\n default=False,\n help='Use multi-processing distributed training to launch '\n 'N processes per node, which has N GPUs. This is the '\n 'fastest way to use PyTorch for either single node or '\n 'multi node data parallel training')\n\ndef adjust_learning_rate(optimizer, epoch, args):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 10 epochs\"\"\"\n lr = args.lr * (0.1 ** (epoch // 10))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\ndef save_checkpoint(state, filename='lenet5.tar', cpdir='checkpoints'):\n if not os.path.exists(cpdir):\n os.makedirs(cpdir)\n filename = os.path.join(cpdir, filename)\n print(\"=> saving checkpoint to: {}\".format(filename))\n torch.save(state, filename)\n\ndef train(train_loader, model, criterion, optimizer, epoch, device, args):\n # switch to train mode\n start_time = time.time()\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n # compute output\n output = model(data)\n loss = criterion(output, target)\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if batch_idx % args.print_freq == 0:\n end_time = time.time()\n if batch_idx == 0:\n thoughput = args.batch_size / (end_time - start_time)\n else:\n thoughput = args.batch_size * args.print_freq / (end_time - start_time)\n print('Train Epoch: {} [{:>5d}/{} ({:>3.0f}%)] Throughput: {:>8.1f}/sec Loss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader),\n thoughput,\n loss.item()))\n start_time = time.time()\n\n if not args.multiprocessing_distributed or \\\n (args.multiprocessing_distributed and args.rank == 0):\n save_checkpoint({\n 'epoch': epoch + 1,\n 'state_dict': model.state_dict(),\n 'optimizer' : optimizer.state_dict(),\n })\n\n\ndef validate(val_loader, model, criterion, device, args):\n # switch to evaluate mode\n model.eval()\n val_loss = 0\n correct = 0\n batch_num = 0\n with torch.no_grad():\n for batch_idx, (data, target) in enumerate(val_loader):\n data, target = data.to(device), target.to(device)\n # compute output\n output = model(data)\n val_loss += criterion(output, target)\n # get the index of the max log-probability\n pred = output.argmax(dim=1, keepdim=True)\n # Count samples that prediction equals to target\n correct += pred.eq(target.view_as(pred)).sum().item()\n batch_num += 1\n\n print('\\nValidation set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)\\n'.format(\n val_loss / batch_num,\n correct, len(val_loader.dataset),\n 100. * correct / len(val_loader.dataset)))\n\n return correct / len(val_loader.dataset)\n\n\ndef worker(gpu, ngpus_per_node, args):\n args.gpu = gpu\n device = torch.device(\"cpu\")\n if args.gpu is not None:\n print(\"Use GPU: {} for training\".format(args.gpu))\n device = torch.device(\"cuda:\"+str(args.gpu))\n\n if args.distributed:\n if args.dist_url == \"env://\" and args.rank == -1:\n args.rank = int(os.environ[\"RANK\"])\n if args.multiprocessing_distributed:\n # For multiprocessing distributed training, rank needs to be the\n # global rank among all the processes\n args.rank = args.rank * ngpus_per_node + gpu\n dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,\n world_size=args.world_size, rank=args.rank)\n\n model = LeNet5().to(device)\n\n if args.distributed:\n # For multiprocessing distributed, DistributedDataParallel constructor\n # should always set the single device scope, otherwise,\n # DistributedDataParallel will use all available devices.\n if args.gpu is not None:\n # When using a single GPU per process and per\n # DistributedDataParallel, we need to divide the batch size\n # ourselves based on the total number of GPUs we have\n args.batch_size = int(args.batch_size / ngpus_per_node)\n args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])\n else:\n model.cuda()\n # DistributedDataParallel will divide and allocate batch_size to all\n # available GPUs if device_ids are not set\n model = torch.nn.parallel.DistributedDataParallel(model)\n else:\n model = torch.nn.DataParallel(model)\n\n train_dataset = datasets.MNIST(args.data, train=True, download=False,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ]))\n\n if args.distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\n else:\n train_sampler = None\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),\n num_workers=args.workers, pin_memory=True, sampler=train_sampler)\n\n val_loader = torch.utils.data.DataLoader(\n datasets.MNIST(args.data, train=False,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=True)\n\n\n # define loss function (criterion) and optimizer\n criterion = nn.CrossEntropyLoss().to(device)\n\n optimizer = torch.optim.SGD(model.parameters(), args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n\n # optionally resume from a checkpoint\n if args.resume:\n if os.path.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume)\n args.start_epoch = checkpoint['epoch']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.resume))\n\n for epoch in range(args.start_epoch, args.epochs):\n if args.distributed:\n train_sampler.set_epoch(epoch)\n adjust_learning_rate(optimizer, epoch, args)\n\n # train for one epoch\n train(train_loader, model, criterion, optimizer, epoch, device, args)\n\n # evaluate on validation set\n acc = validate(val_loader, model, criterion, device, args)\n\ndef main():\n args = parser.parse_args()\n\n if args.seed is not None:\n random.seed(args.seed)\n torch.manual_seed(args.seed)\n cudnn.deterministic = True\n warnings.warn('You have chosen to seed training. '\n 'This will turn on the CUDNN deterministic setting, '\n 'which can slow down your training considerably! '\n 'You may see unexpected behavior when restarting '\n 'from checkpoints.')\n\n if args.gpu is not None:\n warnings.warn('You have chosen a specific GPU. This will completely '\n 'disable data parallelism.')\n\n if args.dist_url == \"env://\" and args.world_size == -1:\n args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n prepare_dataset = datasets.MNIST(args.data, download=True)\n\n\n args.distributed = args.world_size > 1 or args.multiprocessing_distributed\n\n ngpus_per_node = torch.cuda.device_count()\n if args.multiprocessing_distributed:\n # Since we have ngpus_per_node processes per node, the total world_size\n # needs to be adjusted accordingly\n args.world_size = ngpus_per_node * args.world_size\n # Use torch.multiprocessing.spawn to launch distributed processes: the\n # main_worker process function\n if ngpus_per_node == 0:\n warnings.warn('No GPU found on this node, not launching any worker.')\n mp.spawn(worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))\n else:\n # Simply call main_worker function\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n if use_cuda and not args.gpu:\n args.gpu = 0\n worker(args.gpu, ngpus_per_node, args)\n\nif __name__ == '__main__':\n mp.set_start_method('forkserver', force=True)\n main()\n"
]
| [
[
"torch.device",
"torch.distributed.init_process_group",
"torch.save",
"torch.no_grad",
"torch.multiprocessing.set_start_method",
"torch.multiprocessing.spawn",
"torch.nn.parallel.DistributedDataParallel",
"torch.cuda.device_count",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.utils.data.distributed.DistributedSampler",
"torch.load",
"torch.nn.CrossEntropyLoss",
"torch.nn.DataParallel"
]
]
|
tlubitz/rba | [
"073b591ff6047ee8df00288ecfe45094e2b7d195"
]
| [
"simulator/static/python/rbatools/rba/prerba/manual_annotation.py"
]
| [
"\"\"\"Interface to manual annotation.\"\"\"\n\n# python 2/3 compatibility\nfrom __future__ import division, print_function, absolute_import\n\nimport os.path\nfrom collections import namedtuple\nimport pandas\n\nfrom .curation_data import CurationData\nfrom .uniprot_data import Cofactor\n\nMetabolite = namedtuple('Metabolite', 'name sbml_id concentration')\n\n\nclass CuratedData(object):\n def __init__(self, filename, columns):\n self._raw_data = CurationData(filename, columns)\n self.data = {}\n self._warning = ''\n\n def update_file(self):\n if self._raw_data.update_file() and self._warning:\n print(self._warning)\n\n\nclass CuratedSubunits(CuratedData):\n def __init__(self, input_dir):\n filename = os.path.join(input_dir, 'subunits.tsv')\n super(CuratedSubunits, self).__init__(\n filename,\n ['ENTRY', 'STOICHIOMETRY', 'GENE NAMES',\n 'PROTEIN NAME', 'UNIPROT NOTE']\n )\n if self._raw_data.has_missing_information('STOICHIOMETRY'):\n raise UserWarning(filename + ': please fill in the'\n ' STOICHIOMETRY column.')\n self.data = {r[0]: float(r[1]) for r in self._raw_data.rows()}\n self._warning = (\n 'WARNING: ambiguous subunit notes have been added to '\n 'file {}. Execution will continue with default values.'\n .format(filename)\n )\n\n def append(self, uniprot_line, value):\n \"\"\"Add ambiguous stoichiometry.\"\"\"\n self.data[uniprot_line.name] = value\n self._raw_data.add_row(\n (uniprot_line.name, value) +\n tuple(uniprot_line[['Gene names', 'Protein names',\n 'Subunit structure [CC]']])\n )\n\n\nclass CuratedLocations(CuratedData):\n def __init__(self, input_dir):\n filename = os.path.join(input_dir, 'locations.tsv')\n super(CuratedLocations, self).__init__(\n filename, ['ENTRY', 'GENE NAME', 'PROTEIN NAME', 'LOCATION']\n )\n self.data = {r[0]: r[3] for r in self._raw_data.rows()}\n if self._raw_data.has_missing_information('LOCATION'):\n raise UserWarning(filename + ': please fill in the'\n ' LOCATION column.')\n self._warning = (\n 'WARNING: ambiguous uniprot locations have been added to '\n 'file {}. Execution will continue with default values.'\n .format(filename)\n )\n\n def append(self, uniprot_line, value):\n \"\"\"Add ambiguous location.\"\"\"\n self.data[uniprot_line.name] = value\n self._raw_data.add_row(\n (uniprot_line.name,) +\n tuple(uniprot_line[['Gene names', 'Protein names']]) +\n (value,)\n )\n\n\nclass CuratedCofactors(CuratedData):\n def __init__(self, input_dir):\n filename = os.path.join(input_dir, 'cofactors.tsv')\n super(CuratedCofactors, self).__init__(\n filename,\n ['ENTRY', 'CHEBI', 'NAME',\n 'STOICHIOMETRY', 'UNIPROT ANNOTATION']\n )\n self.data = {}\n for row in self._raw_data.rows():\n self._add_to_data(row[0], Cofactor(*row[1:]))\n self._warning = (\n 'WARNING: ambiguous uniprot cofactor notes have been added to '\n 'file {}. Execution will continue with default values.'\n .format(filename, CurationData.missing_tag)\n )\n\n def append(self, entry, cofactors):\n \"\"\"Add ambiguous cofactors.\"\"\"\n for c in cofactors:\n self._add_to_data(entry, c)\n self._raw_data.add_rows([(entry,) + c for c in cofactors])\n\n def _add_to_data(self, entry, cofactor):\n \"\"\"Add cofactor to data only if it has valid information.\"\"\"\n # do not include cofactors with missing chebi or stoichiometry\n # as well as cofactors with 0 stoichiometry\n sto = cofactor.stoichiometry\n cofactor_list = self.data.setdefault(entry, [])\n if (pandas.notnull(cofactor.chebi)\n and pandas.notnull(sto)\n and float(sto) > 0):\n cofactor_list.append(cofactor)\n\n\nclass CuratedLocationMap(CuratedData):\n def __init__(self, input_dir):\n filename = os.path.join(input_dir, 'location_map.tsv')\n super(CuratedLocationMap, self).__init__(\n filename, ['UNIPROT NAME', 'USER ID']\n )\n if self._raw_data.has_missing_information('USER ID'):\n raise UserWarning(filename + ': please fill in the'\n ' USER ID column.')\n self.data = {r[0]: r[1] for r in self._raw_data.rows()}\n # add mandatory compartments (if they are missing)\n self.data.setdefault('Secreted', 'Secreted')\n self._warning = (\n 'WARNING: uniprot locations with no user-defined '\n 'counterpart have been added to {}.'\n .format(filename)\n )\n\n def append(self, location, default_value):\n \"\"\"Add location without user counterpart.\"\"\"\n self.data[location] = default_value\n self._raw_data.add_row((location, default_value))\n\n\nclass CuratedUnknownProteins(CuratedData):\n def __init__(self, input_dir):\n filename = os.path.join(input_dir, 'unknown_proteins.tsv')\n super(CuratedUnknownProteins, self).__init__(\n filename, ['SBML ID', 'UNIPROT GENE']\n )\n if self._raw_data.has_missing_information('UNIPROT GENE'):\n raise UserWarning(filename + ': please fill in the'\n ' UNIPROT GENE column.')\n self.data = {r[0]: r[1] for r in self._raw_data.rows()}\n self._warning = (\n 'WARNING: SBML genes not referenced in uniprot have been added to '\n 'file {}. Execution will continue with default values.'\n .format(filename)\n )\n\n def append(self, gene_id, default_value):\n self.data[gene_id] = default_value\n self._raw_data.add_row((gene_id, default_value))\n\n\nclass CuratedMetabolites(CuratedData):\n def __init__(self, input_dir, known_species):\n filename = os.path.join(input_dir, 'metabolites.tsv')\n super(CuratedMetabolites, self).__init__(\n filename, ['ID', 'NAME', 'SBML ID', 'CONCENTRATION']\n )\n self.data = {}\n invalid_ids = []\n for id_, name, sbml_id, conc in self._raw_data.rows():\n if pandas.isnull(sbml_id):\n sbml_id = None\n elif sbml_id not in known_species:\n invalid_ids.append(id_)\n sbml_id = None\n if pandas.isnull(conc) or conc == '':\n conc = 0\n self.data[id_] = Metabolite(name, sbml_id, float(conc))\n if invalid_ids:\n print(\n '{}: {} do not map to valid SBML metabolite ids. '\n 'These metabolites will be removed from processes and '\n 'production targets.'\n .format(filename, ', '.join(invalid_ids))\n )\n self._warning = (\n 'WARNING: unidentified key metabolites have been added to file {}.'\n ' These metabolites will be removed from processes and '\n 'production targets.'\n .format(filename)\n )\n\n def append(self, id_, name, sbml_id, concentration):\n \"\"\"Add unrecognized metabolite.\"\"\"\n self.data[id_] = Metabolite(name, sbml_id, float(concentration))\n self._raw_data.add_row((id_, name, sbml_id, concentration))\n\n\nclass CuratedMacrocomponents(CuratedData):\n def __init__(self, input_dir, known_species):\n filename = os.path.join(input_dir, 'macrocomponents.tsv')\n super(CuratedMacrocomponents, self).__init__(\n filename, ['TARGET_METABOLITE', 'TARGET_FLUX']\n )\n self.data = {}\n invalid_ids = []\n for met, flux in self._raw_data.rows():\n if met in known_species:\n self.data[met] = float(flux)\n else:\n invalid_ids.append(met)\n if invalid_ids:\n print(\n '{}: {} are not valid SBML metabolite ids. '\n 'These entries will be removed from production targets.'\n .format(filename, ', '.join(invalid_ids))\n )\n"
]
| [
[
"pandas.isnull",
"pandas.notnull"
]
]
|
parva-jain/DataScientist | [
"7223798adabcd6438151813bbea9b8a107e103f1"
]
| [
"datascientist/feature_selection/test/test_pearson.py"
]
| [
"#Reading the test file having the data of footballers and target being their\n#overall skill score ( 1-100 ).\nimport pandas as pd\nimport numpy as np\n\nplayer_df = pd.read_csv(\"datascientist/feature_selection/test/CSV/data.csv\")\n\n#Taking only those columns which have numerical or categorical values since \n#feature selection with Pearson Correlation can be performed on numerical data.\nnumcols = ['Overall', 'Crossing','Finishing', 'ShortPassing', 'Dribbling',\n 'LongPassing', 'BallControl', 'Acceleration','SprintSpeed',\n 'Agility', 'Stamina','Volleys','FKAccuracy','Reactions','Balance',\n 'ShotPower','Strength','LongShots','Aggression','Interceptions']\ncatcols = ['Preferred Foot','Position','Body Type','Nationality','Weak Foot']\nplayer_df = player_df[numcols+catcols]\n\n#encoding categorical values with one-hot encoding.\ntraindf = pd.concat([player_df[numcols], pd.get_dummies(player_df[catcols])],axis=1)\nfeatures = traindf.columns\n\n#dropping rows with Nan values\ntraindf = traindf.dropna()\ntraindf = pd.DataFrame(traindf,columns=features)\n\n#Separating features(X) and target(y).\ny = traindf['Overall']\nX = traindf.copy()\nX = X.drop(['Overall'],axis = 1)\n\n#Generating the expected results for assert statements\nfrom scipy import stats\ncolumns = X.columns.tolist()\ncoeff = []\nfor column in columns:\n coeff.append(round(stats.pearsonr(X[column],y)[0],6))\ncoeff = [0 if np.isnan(i) else i for i in coeff]\n\n#for corr_score method with different parameter values\na1 = pd.DataFrame(columns = ['feature','cor_score'])\na1['feature'] = columns\na1['cor_score'] = coeff\na2 = a1.sort_values(by = ['cor_score'],ascending = False)\na3 = a1.reset_index(drop = True)\na4 = a2.reset_index(drop = True)\n\n#for top_corr_featurenames method with different parameter values/\nb1 = X.iloc[:,np.argsort(np.abs(coeff))[-(1):]].columns.tolist()\nb2 = X.iloc[:,np.argsort(np.abs(coeff))[-(15):]].columns.tolist()\nb3 = b2[::-1]\nb4 = X.iloc[:,np.argsort(np.abs(coeff))[-(30):]].columns.tolist()[::-1]\n\n#for top_corr_features method with different parameter values.\nC1 = X[b1]\nC2 = X[b2]\nC3 = X[b3]\nC4 = X[b4]\n\nfrom datascientist.feature_selection.filter_based_selection import PearsonCorrelation\nCol_sel = PearsonCorrelation(X, y)\n\n#using corr_score method with different parameter values.\nscore1 = Col_sel.corr_score()\nassert score1.equals(a1)\n\nscore2 = Col_sel.corr_score(sort = True)\nassert score2.equals(a2)\n\nscore3 = Col_sel.corr_score(reset_index = True)\nassert score3.equals(a3)\n\nscore4 = Col_sel.corr_score(sort = True,reset_index = True)\nassert score4.equals(a4)\n\n#using top_corr_featurenames method with different parameter values.\ntopfeatname1 = Col_sel.top_corr_featurenames()\nassert topfeatname1 == b1\n\ntopfeatname2 = Col_sel.top_corr_featurenames(feat_num = 15)\nassert topfeatname2 == b2\n\ntopfeatname3 = Col_sel.top_corr_featurenames(feat_num = 15,ascending = False)\nassert topfeatname3 == b3\n\ntopfeatname4 = Col_sel.top_corr_featurenames(feat_num = 30,ascending = False)\nassert topfeatname4 == b4\n\n#using top_corr_features method with different parameter values.\nX_mod1 = Col_sel.top_corr_features()\nassert X_mod1.equals(C1)\n\nX_mod2 = Col_sel.top_corr_features(feat_num = 15)\nassert X_mod2.equals(C2)\n\nX_mod3 = Col_sel.top_corr_features(feat_num = 15,ascending = False)\nassert X_mod3.equals(C3)\n\nX_mod4 = Col_sel.top_corr_features(feat_num = 30,ascending = False)\nassert X_mod4.equals(C4)"
]
| [
[
"numpy.isnan",
"pandas.DataFrame",
"scipy.stats.pearsonr",
"numpy.abs",
"pandas.read_csv",
"pandas.get_dummies"
]
]
|
ashishpatel26/pywick | [
"1afffd1c21c2b188836d3599e802146182757bb5"
]
| [
"pywick/models/segmentation/testnets/psp_saeed.py"
]
| [
"# Source: https://github.com/saeedizadi/binseg_pytoch/blob/master/models/pspnet.py\n\nimport torch\nimport torch.nn.init as init\nimport torch.nn as nn\nfrom torchvision import models\nimport torch.nn.functional as F\nimport numpy as np\n\nimport math\n\ndef initialize_weights(method='kaiming', *models):\n for model in models:\n for module in model.modules():\n\n if isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.Linear):\n if method == 'kaiming':\n init.kaiming_normal_(module.weight.data, np.sqrt(2.0))\n elif method == 'xavier':\n init.xavier_normal(module.weight.data, np.sqrt(2.0))\n elif method == 'orthogonal':\n init.orthogonal(module.weight.data, np.sqrt(2.0))\n elif method == 'normal':\n init.normal(module.weight.data,mean=0, std=0.02)\n if module.bias is not None:\n init.constant(module.bias.data,0)\n\nclass PyramidPoolingModule(nn.Module):\n def __init__(self, in_size, in_channels, out_channels, setting):\n super(PyramidPoolingModule, self).__init__()\n\n self.features = []\n\n for s in setting:\n pool_size = int(math.ceil(float(in_size[0])/s)),int(math.ceil(float(in_size[1])/s))\n self.features.append(nn.Sequential(nn.AvgPool2d(kernel_size=pool_size,stride=pool_size, ceil_mode=True),\n nn.Conv2d(in_channels, out_channels,kernel_size=1, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True),\n nn.UpsamplingBilinear2d(size=in_size)))\n\n self.features = nn.ModuleList(modules=self.features)\n\n def forward(self,x):\n out = []\n out.append(x)\n\n for m in self.features:\n out.append(m(x))\n\n out = torch.cat(out, 1)\n\n return out\n\n\nclass PSPNet(nn.Module):\n def __init__(self, num_classes, pretrained=True, **kwargs):\n super(PSPNet, self).__init__()\n\n feats = list(models.resnet101(pretrained=pretrained).modules())\n resent = models.resnet101(pretrained=pretrained)\n\n self.layer0 = nn.Sequential(resent.conv1, resent.bn1, resent.relu, resent.maxpool)\n self.layer1 = resent.layer1\n self.layer2 = resent.layer2\n self.layer3 = resent.layer3\n self.layer4 = resent.layer4\n\n\n for n, m in self.layer3.named_modules():\n if 'conv2' in n:\n m.dilation = (2,2)\n m.padding = (2,2)\n m.stride = (1,1)\n if 'downsample.0' in n:\n m.stride = (1,1)\n\n for n, m in self.layer4.named_modules():\n if 'conv2' in n:\n m.dilation = (4,4)\n m.padding = (4,4)\n m.stride = (1,1)\n if 'downsample.0' in n:\n m.stride = (1,1)\n\n\n #NOte that the size of input image is assumed to be 240hx320w\n self.ppm = PyramidPoolingModule(in_size=(30,40), in_channels=2048, out_channels=512, setting=(1,2,3,6))\n\n #4x512 = 4096\n self.final = nn.Sequential(nn.Conv2d(4096, 512, kernel_size=1, stride=1, bias=False),\n nn.BatchNorm2d(512),\n nn.ReLU(inplace=True),\n nn.Conv2d(512, num_classes, kernel_size=1))\n\n self.activation = nn.Sigmoid()\n initialize_weights(self.ppm, self.final)\n\n def forward(self,x):\n\n input_size = x.size()\n x = self.layer0(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.ppm(x)\n x = self.final(x)\n\n upsample = F.interpolate(x, input_size[2:], mode='bilinear')\n\n return upsample\n # return self.activation(upsample)\n\n\n\n"
]
| [
[
"torch.cat",
"torch.nn.init.constant",
"torch.nn.ModuleList",
"torch.nn.Sigmoid",
"torch.nn.Sequential",
"torch.nn.AvgPool2d",
"torch.nn.functional.interpolate",
"torch.nn.BatchNorm2d",
"torch.nn.UpsamplingBilinear2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"numpy.sqrt",
"torch.nn.init.normal"
]
]
|
georgios-vassos1/fastsgd | [
"5ccb2fc2f564056e5bd1ccf0993cf6bbd6425305"
]
| [
"fastsgd/sgd.py"
]
| [
"import os, time\nfrom functools import partial\nfrom scipy.optimize import brentq\nimport pandas as pd\nimport numpy as np\nfrom .learning_rate.__init__ import *\n\n\nclass SGD:\n def __init__(self, n: int, p: int, timer: time, **kwargs):\n self._name = kwargs.get(\"method\", None)\n self._n_params = p\n self._reltol = kwargs.get(\"reltol\", 1e-5) # relative tolerance for convergence\n self._n_passes = kwargs.get(\"npasses\", 10) # number of passes over data\n self._size = kwargs.get(\"size\", 10) # number of estimates to be recorded (log-uniformly)\n self._estimates = np.zeros((self._n_params, self._size))\n self._last_estimate = np.zeros(self._n_params)\n self._times = np.zeros(self._size)\n self._timer = timer\n self._t = 0\n self._n_recorded = 0 # number of coefs that have been recorded\n self._pos = np.zeros(self._size) # the iteration of recorded coefs\n self._pass = kwargs.get(\"pass\", True) # force running for n_passes on data\n self._check = kwargs.get(\"check\", False)\n if self._check: self._truth = kwargs.get(\"truth\", None)\n\n ## Select the iterations to store estimates\n n_iters = n * self._n_passes\n self._pos = (10.0 ** (np.arange(self._size) * np.log10(float(n_iters)) / (self._size-1))).astype(int)\n if self._pos[-1] != n_iters: self._pos[-1] = n_iters\n\n ## Set learning rate\n self._lr_choice = kwargs.get(\"lr\", \"one-dim\") # type of learning rate: 'one-dim', 'one-dim-eigen', 'd-dim', 'adagrad', 'rmsprop'\n controls = kwargs.get(\"lr_controls\", {\"scale\": 1.0, \"alpha\": 1.0, \"gamma\": 0.6, \"c\": 0.5})\n if self._lr_choice == \"one-dim\":\n self._lr_obj = OnedimLR(controls[\"scale\"], controls[\"gamma\"], controls[\"alpha\"], controls[\"c\"])\n elif self._lr_choice == \"one-dim-eigen\":\n self._lr_obj = OnedimEigLR(self._n_params)\n elif self._lr_choice == \"d-dim\":\n self._lr_obj = ddimLR(self._n_params, 1.0, 0.0, 1.0, 1.0, controls[\"eps\"])\n elif self._lr_choice == \"adagrad\":\n self._lr_obj = ddimLR(self._n_params, controls[\"eta\"], 1.0, 1.0, 0.5, controls[\"eps\"])\n elif self._lr_choice == \"rmsprop\":\n self._lr_obj = ddimLR(self._n_params, controls[\"eta\"], controls[\"gamma\"], 1.0 - controls[\"gamma\"], 0.5, controls[\"eps\"])\n\n\n def get_value_of(self, attribute: str):\n try: return self.__dict__[\"_\" + attribute]\n except KeyError as e: print(attribute + \" is not an attribute of the caller.\")\n\n def convergence(self, theta_new: np.ndarray, theta_old: np.ndarray) -> bool:\n if self._check:\n qe = np.mean((theta_new - self._truth) ** 2)\n # print(qe)\n if qe < self._reltol: return True\n else:\n qe = np.mean(np.abs(theta_new - theta_old)) / np.mean(np.abs(theta_old))\n if qe < self._reltol: return True\n return False\n\n def sync_members(self, theta_new: np.ndarray):\n self._last_estimate = theta_new\n self._t += 1\n if self._t == self._pos[self._n_recorded]:\n self._estimates[:, self._n_recorded] = theta_new\n ## TODO record elapsed time\n self._n_recorded += 1\n while (self._n_recorded < self._size) and (self._pos[self._n_recorded-1] == self._pos[self._n_recorded]):\n self._estimates[:, self._n_recorded] = theta_new\n ## TODO record elapsed time\n self._n_recorded += 1\n\n def early_stop(self):\n pass\n\n def _learning_rate(self, t: int, grad_t: np.ndarray) -> LRvalue:\n return self._lr_obj(t, grad_t)\n\n\n"
]
| [
[
"numpy.abs",
"numpy.mean",
"numpy.arange",
"numpy.zeros"
]
]
|
Ritvik19/Text-Data-Augmentation | [
"30e7ba85da514c328e951519ca86a07863ba69ca"
]
| [
"text_data_augmentation/synonym_replacement.py"
]
| [
"import random\nimport re\n\nimport nltk\nimport numpy as np\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom tqdm.auto import tqdm\n\n\nclass SynonymReplacement:\n \"\"\"Synonym Replacement Augmentation creates Augmented Samples by randomly\n replacing some words with their synonyms based on the word net data base.\n\n Args:\n alpha (float, optional): Control parameter, frequency of operation increases\n with increase in the vvalue of alpha. Defaults to 0.01.\n n_aug (int, optional): Number of augmentations to be created for one sentence.\n Defaults to 4.\n use_tfidf (bool, optional): Whether to use TFIDF weights to sample a word in the\n sentence. Defaults to True.\n seed (int, optional): Random State for reproducibility. Defaults to None.\n show_progress (bool, optional): Set True to display progress bar.\n Defaults to True.\n \"\"\"\n\n def __init__(\n self, alpha=0.1, n_aug=4, use_tfidf=True, seed=None, show_progress=True\n ):\n self.alpha = alpha\n self.n_aug = n_aug\n self.use_tfidf = use_tfidf\n self.seed = seed\n self.disable_progress = not show_progress\n self.stopwords = nltk.corpus.stopwords.words(\"english\")\n\n def __get_synonym(self, word):\n synonyms = set()\n for syn in nltk.corpus.wordnet.synsets(word):\n for l in syn.lemmas():\n synonyms.add(l.name())\n synonyms = [word] if len(synonyms) == 0 else list(synonyms)\n return random.choice(synonyms)\n\n def __replace_word(self, words):\n if self.use_tfidf:\n sentence = \" \".join(words)\n v = self.tvec.transform([sentence])\n v = np.ravel(v.todense())\n c = np.max(v)\n z = np.sum(c - v) / np.mean(v)\n weights = np.where(\n v != 0, np.minimum((0.7 * (c - v) / z), np.ones_like(v)), 0\n )\n indices = np.arange(len(v))\n word_2_replace = self.tvec.get_feature_names()[\n random.choices(indices, weights=weights)[0]\n ]\n syn = self.__get_synonym(word_2_replace)\n return re.sub(word_2_replace, syn, sentence, 1, re.IGNORECASE)\n else:\n r_idx = random.randint(0, len(words) - 1)\n while words[r_idx] in self.stopwords:\n r_idx = random.randint(0, len(words) - 1)\n syn = self.__get_synonym(words[r_idx])\n words[r_idx] = syn\n return \" \".join(words)\n\n def __replace_sent(self, sentence):\n words = nltk.word_tokenize(sentence)\n for _ in range(int(self.alpha * len(words))):\n aug_sent = self.__replace_word(words)\n return aug_sent\n\n def __call__(self, x):\n random.seed(self.seed)\n if self.use_tfidf:\n self.tvec = TfidfVectorizer().fit(x)\n augmented = []\n for sentence in tqdm(x, disable=self.disable_progress):\n for _ in range(self.n_aug):\n augmented.append(self.__replace_sent(sentence))\n return list(x) + augmented\n"
]
| [
[
"numpy.max",
"numpy.ones_like",
"numpy.sum",
"numpy.mean",
"sklearn.feature_extraction.text.TfidfVectorizer"
]
]
|
zongdaoming/CMT | [
"fc3773bb6c6b1ab091688addfffca3fb1e382ae4"
]
| [
"lib/models/HyperDensenet.py"
]
| [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport sys,os\nsys.path.append('/home/zongdaoming/cv/multi-organ/multi-organ-ijcai/')\nfrom lib.models.BaseModelClass import BaseModel\nfrom torchsummary import summary\n\n\"\"\"\nCodes are borrowed and modified from this repo: https://github.com/josedolz/HyperDenseNet_pytorch \n\"\"\"\ndef conv(nin, nout, kernel_size=3, stride=1, padding=1, bias=False, layer=nn.Conv2d,\n BN=False, ws=False, activ=nn.LeakyReLU(0.2), gainWS=2):\n convlayer = layer(nin, nout, kernel_size, stride=stride, padding=padding, bias=bias)\n layers = []\n # if ws:\n # layers.append(WScaleLayer(convlayer, gain=gainWS))\n if BN:\n layers.append(nn.BatchNorm2d(nout))\n if activ is not None:\n if activ == nn.PReLU:\n # to avoid sharing the same parameter, activ must be set to nn.PReLU (without '()')\n layers.append(activ(num_parameters=1))\n else:\n # if activ == nn.PReLU(), the parameter will be shared for the whole network !\n layers.append(activ)\n layers.insert(ws, convlayer)\n return nn.Sequential(*layers)\n\nclass ResidualConv(nn.Module):\n def __init__(self, nin, nout, bias=False, BN=False, ws=False, activ=nn.LeakyReLU(0.2)):\n super(ResidualConv, self).__init__()\n\n convs = [conv(nin, nout, bias=bias, BN=BN, ws=ws, activ=activ),\n conv(nout, nout, bias=bias, BN=BN, ws=ws, activ=None)]\n self.convs = nn.Sequential(*convs)\n\n res = []\n if nin != nout:\n res.append(conv(nin, nout, kernel_size=1, padding=0, bias=False, BN=BN, ws=ws, activ=None))\n self.res = nn.Sequential(*res)\n\n activation = []\n if activ is not None:\n if activ == nn.PReLU:\n # to avoid sharing the same parameter, activ must be set to nn.PReLU (without '()')\n activation.append(activ(num_parameters=1))\n else:\n # if activ == nn.PReLU(), the parameter will be shared for the whole network !\n activation.append(activ)\n self.activation = nn.Sequential(*activation)\n\n def forward(self, input):\n out = self.convs(input)\n return self.activation(out + self.res(input))\n\n\ndef upSampleConv_Res(nin, nout, upscale=2, bias=False, BN=False, ws=False, activ=nn.LeakyReLU(0.2)):\n return nn.Sequential(\n nn.Upsample(scale_factor=upscale),\n ResidualConv(nin, nout, bias=bias, BN=BN, ws=ws, activ=activ)\n )\n\n\ndef conv_block(in_dim, out_dim, act_fn, kernel_size=3, stride=1, padding=1, dilation=1):\n model = nn.Sequential(\n nn.Conv2d(in_dim, out_dim, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation),\n nn.BatchNorm2d(out_dim),\n act_fn,\n )\n return model\n\n\ndef conv_block_1(in_dim, out_dim):\n model = nn.Sequential(\n nn.Conv2d(in_dim, out_dim, kernel_size=1),\n nn.BatchNorm2d(out_dim),\n nn.PReLU(),\n )\n return model\n\n\ndef conv_block_Asym(in_dim, out_dim, kernelSize):\n model = nn.Sequential(\n nn.Conv2d(in_dim, out_dim, kernel_size=[kernelSize, 1], padding=tuple([2, 0])),\n nn.Conv2d(out_dim, out_dim, kernel_size=[1, kernelSize], padding=tuple([0, 2])),\n nn.BatchNorm2d(out_dim),\n nn.PReLU(),\n )\n return model\n\n\ndef conv_block_Asym_Inception(in_dim, out_dim, kernel_size, padding, dilation=1):\n model = nn.Sequential(\n nn.Conv2d(in_dim, out_dim, kernel_size=[kernel_size, 1], padding=tuple([padding * dilation, 0]),\n dilation=(dilation, 1)),\n nn.BatchNorm2d(out_dim),\n nn.ReLU(),\n nn.Conv2d(out_dim, out_dim, kernel_size=[1, kernel_size], padding=tuple([0, padding * dilation]),\n dilation=(dilation, 1)),\n nn.BatchNorm2d(out_dim),\n nn.ReLU(),\n )\n return model\n\n\ndef conv_block_Asym_Inception_WithIncreasedFeatMaps(in_dim, mid_dim, out_dim, kernel_size, padding, dilation=1):\n model = nn.Sequential(\n nn.Conv2d(in_dim, mid_dim, kernel_size=[kernel_size, 1], padding=tuple([padding * dilation, 0]),\n dilation=(dilation, 1)),\n nn.BatchNorm2d(mid_dim),\n nn.ReLU(),\n nn.Conv2d(mid_dim, out_dim, kernel_size=[1, kernel_size], padding=tuple([0, padding * dilation]),\n dilation=(dilation, 1)),\n nn.BatchNorm2d(out_dim),\n nn.ReLU(),\n )\n return model\n\ndef conv_block_Asym_ERFNet(in_dim, out_dim, kernelSize, padding, drop, dilation):\n model = nn.Sequential(\n nn.Conv2d(in_dim, out_dim, kernel_size=[kernelSize, 1], padding=tuple([padding, 0]), bias=True),\n nn.ReLU(),\n nn.Conv2d(out_dim, out_dim, kernel_size=[1, kernelSize], padding=tuple([0, padding]), bias=True),\n nn.BatchNorm2d(out_dim, eps=1e-03),\n nn.ReLU(),\n nn.Conv2d(in_dim, out_dim, kernel_size=[kernelSize, 1], padding=tuple([padding * dilation, 0]), bias=True,\n dilation=(dilation, 1)),\n nn.ReLU(),\n nn.Conv2d(out_dim, out_dim, kernel_size=[1, kernelSize], padding=tuple([0, padding * dilation]), bias=True,\n dilation=(1, dilation)),\n nn.BatchNorm2d(out_dim, eps=1e-03),\n nn.Dropout2d(drop),\n )\n return model\n\n\ndef conv_block_3_3(in_dim, out_dim):\n model = nn.Sequential(\n nn.Conv2d(in_dim, out_dim, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_dim),\n nn.PReLU(),\n )\n return model\n\n\n# TODO: Change order of block: BN + Activation + Conv\ndef conv_decod_block(in_dim, out_dim, act_fn):\n model = nn.Sequential(\n nn.ConvTranspose2d(in_dim, out_dim, kernel_size=3, stride=2, padding=1, output_padding=1),\n nn.BatchNorm2d(out_dim),\n act_fn,\n )\n return model\n\n\ndef dilation_conv_block(in_dim, out_dim, act_fn, stride_val, dil_val):\n model = nn.Sequential(\n nn.Conv2d(in_dim, out_dim, kernel_size=3, stride=stride_val, padding=1, dilation=dil_val),\n nn.BatchNorm2d(out_dim),\n act_fn,\n )\n return model\n\n\ndef maxpool():\n pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)\n return pool\n\n\ndef avrgpool05():\n pool = nn.AvgPool2d(kernel_size=2, stride=2, padding=0)\n return pool\n\n\ndef avrgpool025():\n pool = nn.AvgPool2d(kernel_size=2, stride=4, padding=0)\n return pool\n\n\ndef avrgpool0125():\n pool = nn.AvgPool2d(kernel_size=2, stride=8, padding=0)\n return pool\n\n\ndef maxpool():\n pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)\n return pool\n\n\ndef maxpool_1_4():\n pool = nn.MaxPool2d(kernel_size=2, stride=4, padding=0)\n return pool\n\n\ndef maxpool_1_8():\n pool = nn.MaxPool2d(kernel_size=2, stride=8, padding=0)\n return pool\n\n\ndef maxpool_1_16():\n pool = nn.MaxPool2d(kernel_size=2, stride=16, padding=0)\n return pool\n\n\ndef maxpool_1_32():\n pool = nn.MaxPool2d(kernel_size=2, stride=32, padding=0)\n\n\ndef conv_block_3(in_dim, out_dim, act_fn):\n model = nn.Sequential(\n conv_block(in_dim, out_dim, act_fn),\n conv_block(out_dim, out_dim, act_fn),\n nn.Conv2d(out_dim, out_dim, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(out_dim),\n )\n return model\n\n\ndef classificationNet(D_in):\n H = 400\n D_out = 1\n model = torch.nn.Sequential(\n torch.nn.Linear(D_in, H),\n torch.nn.ReLU(),\n torch.nn.Linear(H, int(H / 4)),\n torch.nn.ReLU(),\n torch.nn.Linear(int(H / 4), D_out)\n )\n\n return model\n\n\n# from layers import *\n\ndef croppCenter(tensorToCrop, finalShape):\n org_shape = tensorToCrop.shape\n diff = org_shape[2] - finalShape[2]\n croppBorders = int(diff / 2)\n return tensorToCrop[:,\n :,\n croppBorders:org_shape[2] - croppBorders,\n croppBorders:org_shape[3] - croppBorders,\n croppBorders:org_shape[4] - croppBorders]\n\n\ndef convBlock(nin, nout, kernel_size=3, batchNorm=False, layer=nn.Conv3d, bias=True, dropout_rate=0.0, dilation=1):\n if batchNorm == False:\n return nn.Sequential(\n nn.PReLU(),\n nn.Dropout(p=dropout_rate),\n layer(nin, nout, kernel_size=kernel_size, bias=bias, dilation=dilation)\n )\n else:\n return nn.Sequential(\n nn.BatchNorm3d(nin),\n nn.PReLU(),\n nn.Dropout(p=dropout_rate),\n layer(nin, nout, kernel_size=kernel_size, bias=bias, dilation=dilation)\n )\n\n\ndef convBatch(nin, nout, kernel_size=3, stride=1, padding=1, bias=False, layer=nn.Conv2d, dilation=1):\n return nn.Sequential(\n layer(nin, nout, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias, dilation=dilation),\n nn.BatchNorm2d(nout),\n # nn.LeakyReLU(0.2)\n nn.PReLU()\n )\n\n\nclass HyperDenseNet_2Mod(BaseModel):\n def __init__(self, in_channels=2, classes=4):\n super(HyperDenseNet_2Mod, self).__init__()\n self.num_classes = classes\n assert in_channels == 2, \"input channels must be two for this architecture\"\n\n # Path-Top\n self.conv1_Top = convBlock(1, 25)\n self.conv2_Top = convBlock(50, 25, batchNorm=True)\n self.conv3_Top = convBlock(100, 25, batchNorm=True)\n self.conv4_Top = convBlock(150, 50, batchNorm=True)\n self.conv5_Top = convBlock(250, 50, batchNorm=True)\n self.conv6_Top = convBlock(350, 50, batchNorm=True)\n self.conv7_Top = convBlock(450, 75, batchNorm=True)\n self.conv8_Top = convBlock(600, 75, batchNorm=True)\n self.conv9_Top = convBlock(750, 75, batchNorm=True)\n\n # Path-Bottom\n self.conv1_Bottom = convBlock(1, 25)\n self.conv2_Bottom = convBlock(50, 25, batchNorm=True)\n self.conv3_Bottom = convBlock(100, 25, batchNorm=True)\n self.conv4_Bottom = convBlock(150, 50, batchNorm=True)\n self.conv5_Bottom = convBlock(250, 50, batchNorm=True)\n self.conv6_Bottom = convBlock(350, 50, batchNorm=True)\n self.conv7_Bottom = convBlock(450, 75, batchNorm=True)\n self.conv8_Bottom = convBlock(600, 75, batchNorm=True)\n self.conv9_Bottom = convBlock(750, 75, batchNorm=True)\n\n self.fully_1 = nn.Conv3d(1800, 400, kernel_size=1)\n self.fully_2 = nn.Conv3d(400, 200, kernel_size=1)\n self.fully_3 = nn.Conv3d(200, 150, kernel_size=1)\n self.final = nn.Conv3d(150, classes, kernel_size=1)\n\n def forward(self, input):\n # ----- First layer ------ #\n # get 2 of the channels as 5D tensors\n # pdb.set_trace()\n print(\"input shape \", input.shape)\n y1t = self.conv1_Top(input[:, 0:1, :, :, :])\n y1b = self.conv1_Bottom(input[:, 1:2, :, :, :])\n\n # ----- Second layer ------ #\n # concatenate\n y2t_i = torch.cat((y1t, y1b), dim=1)\n y2b_i = torch.cat((y1b, y1t), dim=1)\n\n y2t_o = self.conv2_Top(y2t_i)\n y2b_o = self.conv2_Bottom(y2b_i)\n\n # ----- Third layer ------ #\n y2t_i_cropped = croppCenter(y2t_i, y2t_o.shape)\n y2b_i_cropped = croppCenter(y2b_i, y2t_o.shape)\n\n # concatenate\n y3t_i = torch.cat((y2t_i_cropped, y2t_o, y2b_o), dim=1)\n y3b_i = torch.cat((y2b_i_cropped, y2b_o, y2t_o), dim=1)\n\n y3t_o = self.conv3_Top(y3t_i)\n y3b_o = self.conv3_Bottom(y3b_i)\n\n # ------ Fourth layer ----- #\n y3t_i_cropped = croppCenter(y3t_i, y3t_o.shape)\n y3b_i_cropped = croppCenter(y3b_i, y3t_o.shape)\n\n # concatenate\n y4t_i = torch.cat((y3t_i_cropped, y3t_o, y3b_o), dim=1)\n y4b_i = torch.cat((y3b_i_cropped, y3b_o, y3t_o), dim=1)\n\n y4t_o = self.conv4_Top(y4t_i)\n y4b_o = self.conv4_Bottom(y4b_i)\n\n # ------ Fifth layer ----- #\n y4t_i_cropped = croppCenter(y4t_i, y4t_o.shape)\n y4b_i_cropped = croppCenter(y4b_i, y4t_o.shape)\n\n # concatenate\n y5t_i = torch.cat((y4t_i_cropped, y4t_o, y4b_o), dim=1)\n y5b_i = torch.cat((y4b_i_cropped, y4b_o, y4t_o), dim=1)\n\n y5t_o = self.conv5_Top(y5t_i)\n y5b_o = self.conv5_Bottom(y5b_i)\n\n # ------ Sixth layer ----- #\n y5t_i_cropped = croppCenter(y5t_i, y5t_o.shape)\n y5b_i_cropped = croppCenter(y5b_i, y5t_o.shape)\n\n # concatenate\n y6t_i = torch.cat((y5t_i_cropped, y5t_o, y5b_o), dim=1)\n y6b_i = torch.cat((y5b_i_cropped, y5b_o, y5t_o), dim=1)\n\n y6t_o = self.conv6_Top(y6t_i)\n y6b_o = self.conv6_Bottom(y6b_i)\n\n # ------ Seventh layer ----- #\n y6t_i_cropped = croppCenter(y6t_i, y6t_o.shape)\n y6b_i_cropped = croppCenter(y6b_i, y6t_o.shape)\n\n # concatenate\n y7t_i = torch.cat((y6t_i_cropped, y6t_o, y6b_o), dim=1)\n y7b_i = torch.cat((y6b_i_cropped, y6b_o, y6t_o), dim=1)\n\n y7t_o = self.conv7_Top(y7t_i)\n y7b_o = self.conv7_Bottom(y7b_i)\n\n # ------ Eight layer ----- #\n y7t_i_cropped = croppCenter(y7t_i, y7t_o.shape)\n y7b_i_cropped = croppCenter(y7b_i, y7t_o.shape)\n\n # concatenate\n y8t_i = torch.cat((y7t_i_cropped, y7t_o, y7b_o), dim=1)\n y8b_i = torch.cat((y7b_i_cropped, y7b_o, y7t_o), dim=1)\n\n y8t_o = self.conv8_Top(y8t_i)\n y8b_o = self.conv8_Bottom(y8b_i)\n\n # ------ Ninth layer ----- #\n y8t_i_cropped = croppCenter(y8t_i, y8t_o.shape)\n y8b_i_cropped = croppCenter(y8b_i, y8t_o.shape)\n\n # concatenate\n y9t_i = torch.cat((y8t_i_cropped, y8t_o, y8b_o), dim=1)\n y9b_i = torch.cat((y8b_i_cropped, y8b_o, y8t_o), dim=1)\n\n y9t_o = self.conv9_Top(y9t_i)\n y9b_o = self.conv9_Bottom(y9b_i)\n\n ##### Fully connected layers\n y9t_i_cropped = croppCenter(y9t_i, y9t_o.shape)\n y9b_i_cropped = croppCenter(y9b_i, y9t_o.shape)\n\n outputPath_top = torch.cat((y9t_i_cropped, y9t_o, y9b_o), dim=1)\n outputPath_bottom = torch.cat((y9b_i_cropped, y9b_o, y9t_o), dim=1)\n\n inputFully = torch.cat((outputPath_top, outputPath_bottom), dim=1)\n\n y = self.fully_1(inputFully)\n y = self.fully_2(y)\n y = self.fully_3(y)\n\n return self.final(y)\n\n def test(self, device='cpu'):\n input_tensor = torch.rand(1, 2, 22, 22, 22)\n ideal_out = torch.rand(1, self.num_classes, 22, 22, 22)\n out = self.forward(input_tensor)\n # assert ideal_out.shape == out.shape\n summary(self.to(torch.device(device)), (2, 22, 22, 22),device=device)\n print(\"HyperDenseNet_2Mod test is complete\", out.shape)\n\n\nclass HyperDenseNet(BaseModel):\n def __init__(self, in_channels=3, classes=4):\n super(HyperDenseNet, self).__init__()\n assert in_channels == 3, \"HyperDensenet supports 3 in_channels. For 2 in_channels use HyperDenseNet_2Mod \"\n self.num_classes = classes\n\n # Path-Top\n self.conv1_Top = convBlock(1, 25)\n self.conv2_Top = convBlock(75, 25, batchNorm=True)\n self.conv3_Top = convBlock(150, 25, batchNorm=True)\n self.conv4_Top = convBlock(225, 50, batchNorm=True)\n self.conv5_Top = convBlock(375, 50, batchNorm=True)\n self.conv6_Top = convBlock(525, 50, batchNorm=True)\n self.conv7_Top = convBlock(675, 75, batchNorm=True)\n self.conv8_Top = convBlock(900, 75, batchNorm=True)\n self.conv9_Top = convBlock(1125, 75, batchNorm=True)\n\n # Path-Middle\n self.conv1_Middle = convBlock(1, 25)\n self.conv2_Middle = convBlock(75, 25, batchNorm=True)\n self.conv3_Middle = convBlock(150, 25, batchNorm=True)\n self.conv4_Middle = convBlock(225, 50, batchNorm=True)\n self.conv5_Middle = convBlock(375, 50, batchNorm=True)\n self.conv6_Middle = convBlock(525, 50, batchNorm=True)\n self.conv7_Middle = convBlock(675, 75, batchNorm=True)\n self.conv8_Middle = convBlock(900, 75, batchNorm=True)\n self.conv9_Middle = convBlock(1125, 75, batchNorm=True)\n\n # Path-Bottom\n self.conv1_Bottom = convBlock(1, 25)\n self.conv2_Bottom = convBlock(75, 25, batchNorm=True)\n self.conv3_Bottom = convBlock(150, 25, batchNorm=True)\n self.conv4_Bottom = convBlock(225, 50, batchNorm=True)\n self.conv5_Bottom = convBlock(375, 50, batchNorm=True)\n self.conv6_Bottom = convBlock(525, 50, batchNorm=True)\n self.conv7_Bottom = convBlock(675, 75, batchNorm=True)\n self.conv8_Bottom = convBlock(900, 75, batchNorm=True)\n self.conv9_Bottom = convBlock(1125, 75, batchNorm=True)\n\n self.fully_1 = nn.Conv3d(4050, 400, kernel_size=1)\n self.fully_2 = nn.Conv3d(400, 200, kernel_size=1)\n self.fully_3 = nn.Conv3d(200, 150, kernel_size=1)\n self.final = nn.Conv3d(150, classes, kernel_size=1)\n\n def forward(self, input):\n # ----- First layer ------ #\n # get the 3 channels as 5D tensors\n y1t = self.conv1_Top(input[:, 0:1, :, :, :])\n y1m = self.conv1_Middle(input[:, 1:2, :, :, :])\n y1b = self.conv1_Bottom(input[:, 2:3, :, :, :])\n\n # ----- Second layer ------ #\n # concatenate\n y2t_i = torch.cat((y1t, y1m, y1b), dim=1)\n y2m_i = torch.cat((y1m, y1t, y1b), dim=1)\n y2b_i = torch.cat((y1b, y1t, y1m), dim=1)\n\n y2t_o = self.conv2_Top(y2t_i)\n y2m_o = self.conv2_Middle(y2m_i)\n y2b_o = self.conv2_Bottom(y2b_i)\n\n # ----- Third layer ------ #\n y2t_i_cropped = croppCenter(y2t_i, y2t_o.shape)\n y2m_i_cropped = croppCenter(y2m_i, y2t_o.shape)\n y2b_i_cropped = croppCenter(y2b_i, y2t_o.shape)\n\n # concatenate\n y3t_i = torch.cat((y2t_i_cropped, y2t_o, y2m_o, y2b_o), dim=1)\n y3m_i = torch.cat((y2m_i_cropped, y2m_o, y2t_o, y2b_o), dim=1)\n y3b_i = torch.cat((y2b_i_cropped, y2b_o, y2t_o, y2m_o), dim=1)\n\n y3t_o = self.conv3_Top(y3t_i)\n y3m_o = self.conv3_Middle(y3m_i)\n y3b_o = self.conv3_Bottom(y3b_i)\n\n # ------ Fourth layer ----- #\n y3t_i_cropped = croppCenter(y3t_i, y3t_o.shape)\n y3m_i_cropped = croppCenter(y3m_i, y3t_o.shape)\n y3b_i_cropped = croppCenter(y3b_i, y3t_o.shape)\n\n # concatenate\n y4t_i = torch.cat((y3t_i_cropped, y3t_o, y3m_o, y3b_o), dim=1)\n y4m_i = torch.cat((y3m_i_cropped, y3m_o, y3t_o, y3b_o), dim=1)\n y4b_i = torch.cat((y3b_i_cropped, y3b_o, y3t_o, y3m_o), dim=1)\n\n y4t_o = self.conv4_Top(y4t_i)\n y4m_o = self.conv4_Middle(y4m_i)\n y4b_o = self.conv4_Bottom(y4b_i)\n\n # ------ Fifth layer ----- #\n y4t_i_cropped = croppCenter(y4t_i, y4t_o.shape)\n y4m_i_cropped = croppCenter(y4m_i, y4t_o.shape)\n y4b_i_cropped = croppCenter(y4b_i, y4t_o.shape)\n\n # concatenate\n y5t_i = torch.cat((y4t_i_cropped, y4t_o, y4m_o, y4b_o), dim=1)\n y5m_i = torch.cat((y4m_i_cropped, y4m_o, y4t_o, y4b_o), dim=1)\n y5b_i = torch.cat((y4b_i_cropped, y4b_o, y4t_o, y4m_o), dim=1)\n\n y5t_o = self.conv5_Top(y5t_i)\n y5m_o = self.conv5_Middle(y5m_i)\n y5b_o = self.conv5_Bottom(y5b_i)\n\n # ------ Sixth layer ----- #\n y5t_i_cropped = croppCenter(y5t_i, y5t_o.shape)\n y5m_i_cropped = croppCenter(y5m_i, y5t_o.shape)\n y5b_i_cropped = croppCenter(y5b_i, y5t_o.shape)\n\n # concatenate\n y6t_i = torch.cat((y5t_i_cropped, y5t_o, y5m_o, y5b_o), dim=1)\n y6m_i = torch.cat((y5m_i_cropped, y5m_o, y5t_o, y5b_o), dim=1)\n y6b_i = torch.cat((y5b_i_cropped, y5b_o, y5t_o, y5m_o), dim=1)\n\n y6t_o = self.conv6_Top(y6t_i)\n y6m_o = self.conv6_Middle(y6m_i)\n y6b_o = self.conv6_Bottom(y6b_i)\n\n # ------ Seventh layer ----- #\n y6t_i_cropped = croppCenter(y6t_i, y6t_o.shape)\n y6m_i_cropped = croppCenter(y6m_i, y6t_o.shape)\n y6b_i_cropped = croppCenter(y6b_i, y6t_o.shape)\n\n # concatenate\n y7t_i = torch.cat((y6t_i_cropped, y6t_o, y6m_o, y6b_o), dim=1)\n y7m_i = torch.cat((y6m_i_cropped, y6m_o, y6t_o, y6b_o), dim=1)\n y7b_i = torch.cat((y6b_i_cropped, y6b_o, y6t_o, y6m_o), dim=1)\n\n y7t_o = self.conv7_Top(y7t_i)\n y7m_o = self.conv7_Middle(y7m_i)\n y7b_o = self.conv7_Bottom(y7b_i)\n\n # ------ Eight layer ----- #\n y7t_i_cropped = croppCenter(y7t_i, y7t_o.shape)\n y7m_i_cropped = croppCenter(y7m_i, y7t_o.shape)\n y7b_i_cropped = croppCenter(y7b_i, y7t_o.shape)\n\n # concatenate\n y8t_i = torch.cat((y7t_i_cropped, y7t_o, y7m_o, y7b_o), dim=1)\n y8m_i = torch.cat((y7m_i_cropped, y7m_o, y7t_o, y7b_o), dim=1)\n y8b_i = torch.cat((y7b_i_cropped, y7b_o, y7t_o, y7m_o), dim=1)\n\n y8t_o = self.conv8_Top(y8t_i)\n y8m_o = self.conv8_Middle(y8m_i)\n y8b_o = self.conv8_Bottom(y8b_i)\n\n # ------ Ninth layer ----- #\n y8t_i_cropped = croppCenter(y8t_i, y8t_o.shape)\n y8m_i_cropped = croppCenter(y8m_i, y8t_o.shape)\n y8b_i_cropped = croppCenter(y8b_i, y8t_o.shape)\n\n # concatenate\n y9t_i = torch.cat((y8t_i_cropped, y8t_o, y8m_o, y8b_o), dim=1)\n y9m_i = torch.cat((y8m_i_cropped, y8m_o, y8t_o, y8b_o), dim=1)\n y9b_i = torch.cat((y8b_i_cropped, y8b_o, y8t_o, y8m_o), dim=1)\n\n y9t_o = self.conv9_Top(y9t_i)\n y9m_o = self.conv9_Middle(y9m_i)\n y9b_o = self.conv9_Bottom(y9b_i)\n\n ##### Fully connected layers\n y9t_i_cropped = croppCenter(y9t_i, y9t_o.shape)\n y9m_i_cropped = croppCenter(y9m_i, y9t_o.shape)\n y9b_i_cropped = croppCenter(y9b_i, y9t_o.shape)\n\n outputPath_top = torch.cat((y9t_i_cropped, y9t_o, y9m_o, y9b_o), dim=1)\n outputPath_middle = torch.cat((y9m_i_cropped, y9m_o, y9t_o, y9b_o), dim=1)\n outputPath_bottom = torch.cat((y9b_i_cropped, y9b_o, y9t_o, y9m_o), dim=1)\n\n inputFully = torch.cat((outputPath_top, outputPath_middle, outputPath_bottom), dim=1)\n\n y = self.fully_1(inputFully)\n y = self.fully_2(y)\n y = self.fully_3(y)\n\n return self.final(y)\n\n def test(self, device='cpu'):\n input_tensor = torch.rand(1, 3, 20, 20, 20)\n ideal_out = torch.rand(1, self.num_classes, 20, 20, 20)\n out = self.forward(input_tensor)\n print(out.shape)\n # assert ideal_out.shape == out.shape\n device = torch.device(device) \n summary(self.to(device), (3, 16, 16, 16), device = 'cpu')\n print(\"HyperDenseNet test is complete!!!\", out.shape)\n\nif __name__ == \"__main__\":\n model = HyperDenseNet_2Mod(2,4)\n model.test()"
]
| [
[
"torch.nn.Linear",
"torch.rand",
"torch.cat",
"torch.device",
"torch.nn.Dropout",
"torch.nn.BatchNorm3d",
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.AvgPool2d",
"torch.nn.LeakyReLU",
"torch.nn.BatchNorm2d",
"torch.nn.ConvTranspose2d",
"torch.nn.Upsample",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.PReLU",
"torch.nn.Conv3d",
"torch.nn.Dropout2d"
]
]
|
Neronjust2017/Bayesian-neural-networks | [
"9d7f781f5c2dfa8fadf26300b4b5b64366c939cd"
]
| [
"Experiments/BostonHousing/mc_dropout_hetero.py"
]
| [
"import json\n\nfrom sklearn.linear_model import LinearRegression,Lasso,Ridge\nfrom sklearn.datasets import load_boston\nimport os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = curPath\nfor i in range(2):\n rootPath = os.path.split(rootPath)[0]\nsys.path.append(rootPath)\n\nimport numpy as np\nimport torch\nimport time\nimport math\nfrom pandas import Series,DataFrame\nimport argparse\nfrom src.utils import mkdir\nfrom src.MC_dropout.model import *\nfrom Experiments.BostonHousing.utils import *\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n\n\nif __name__ == '__main__':\n # Load data\n X, Y = load_data()\n inputs = 13\n outputs = 1\n\n # Hyper-parameters\n # pdrops = [0.005, 0.01, 0.05, 0.1]\n # taus = [0.1, 0.15, 0.2]\n # lengthscales = [1e-2, 1e-1, 1, 10]\n # lrs = [1e-3, 1e-4]\n # momentums = [0.9]\n # Ts = [1000]\n pdrops = [0.2, 0.1]\n taus = [0.1, 0.15]\n lengthscales = [1e-1, 1]\n lrs = [1e-3]\n momentums = [0.9]\n Ts = [1000]\n\n NTrainPoints = 364\n batch_size = 128\n nb_epochs = 40\n log_interval = 1\n n_splits = 15\n \n # Paths\n base_dir = './results_hetero/mc_dropout_results'\n\n # Grid search\n results = {}\n for pdrop in pdrops :\n for tau in taus:\n for lengthscale in lengthscales:\n for T in Ts:\n for lr in lrs:\n for momentum in momentums:\n\n Hps = 'Pdrop_' + str(pdrop) + '_Tau_' + str(tau) + '_Lengthscale_' + str(lengthscale) \\\n + '_Lr_' + str(lr) + '_Momentum_' + str(momentum) + '_T_' + str(T)\n print('Grid search step:' + Hps )\n\n results_dir = base_dir + '/' + Hps\n results_file = results_dir + '_results.txt'\n mkdir(results_dir)\n\n rmses = []\n picps = []\n mpiws = []\n\n for split in range(int(n_splits)):\n\n results_dir_split = results_dir + '/split_' + str(split)\n mkdir(results_dir_split)\n\n # get splited data\\dataset\\dataloder\n X_train, y_train, X_val, y_val, X_test, y_test, y_stds = get_data_splited(split, X, Y)\n trainset, valset, testset = get_dataset(X_train, y_train, X_val, y_val, X_test, y_test)\n\n use_cuda = torch.cuda.is_available()\n\n trainloader, valloader, testloader = get_dataloader(trainset, valset, testset, use_cuda,\n batch_size)\n\n results_val = base_dir + '/results_val_split_' + str(split) + '.txt'\n results_test = base_dir + '/results_test_split_' + str(split) + '.txt'\n\n # net dims\n N = X_train.shape[0]\n reg = lengthscale ** 2 * (1 - pdrop) / (2. * N * tau)\n\n cprint('c', '\\nNetwork:')\n net = MC_drop_net_BH_hetero(lr=lr, input_dim=inputs, output_dim=outputs, cuda=use_cuda,\n batch_size=batch_size, weight_decay=reg,n_hid=50, momentum=momentum, pdrop=pdrop)\n\n # ---- train\n epoch = 0\n cprint('c', '\\nTrain:')\n\n print(' init cost variables:')\n\n pred_cost_train = np.zeros(nb_epochs)\n rmse_train = np.zeros(nb_epochs)\n\n cost_dev = np.zeros(nb_epochs)\n rmse_dev = np.zeros(nb_epochs)\n best_rmse = np.inf\n\n nb_its_dev = 1\n\n tic0 = time.time()\n for i in range(epoch, nb_epochs):\n\n net.set_mode_train(True)\n tic = time.time()\n nb_samples = 0\n\n for x, y in trainloader:\n cost_pred = net.fit(x, y)\n pred_cost_train[i] += cost_pred\n rmse_train[i] += cost_pred\n nb_samples += len(x)\n\n pred_cost_train[i] /= nb_samples\n rmse_train[i] = (rmse_train[i] / nb_samples)**0.5\n\n toc = time.time()\n net.epoch = i\n # ---- print\n print(\"it %d/%d, Jtr_pred = %f, rmse = %f\" % (\n i, nb_epochs, pred_cost_train[i], rmse_train[i]), end=\"\")\n cprint('r', ' time: %f seconds\\n' % (toc - tic))\n\n # ---- dev\n if i % nb_its_dev == 0:\n net.set_mode_train(False)\n nb_samples = 0\n\n for j, (x, y) in enumerate(valloader):\n cost, mse, _, _, _ = net.eval(x, y, samples=T)\n cost_dev[i] += cost\n rmse_dev[i] += mse\n nb_samples += len(x)\n\n cost_dev[i] /= nb_samples\n rmse_dev[i] = (rmse_dev[i] / nb_samples)**0.5\n\n cprint('g', ' Jdev = %f, rmse = %f\\n' % (cost_dev[i], rmse_dev[i]))\n\n if rmse_dev[i] < best_rmse:\n best_rmse = rmse_dev[i]\n cprint('b', 'best val rmse')\n net.save(results_dir_split + '/theta_best_val.dat')\n\n toc0 = time.time()\n runtime_per_it = (toc0 - tic0) / float(nb_epochs)\n cprint('r', ' average time: %f seconds\\n' % runtime_per_it)\n ## ---------------------------------------------------------------------------------------------------------------------\n # results\n net.load(results_dir_split + '/theta_best_val.dat')\n cprint('c', '\\nRESULTS:')\n nb_parameters = net.get_nb_parameters()\n\n net.set_mode_train(False)\n nb_samples = 0\n cost_test = 0\n rmse_test = 0\n\n means = np.zeros((X_test.shape[0], outputs))\n stds = np.zeros((X_test.shape[0], outputs))\n noises = np.zeros((X_test.shape[0], outputs))\n\n # ---- test\n start = 0\n for j, (x, y) in enumerate(testloader):\n end = len(x) + start\n cost, mse, mean, std, noise = net.eval(x, y, samples=T)\n if use_cuda:\n mean = mean.cpu()\n std = std.cpu()\n noise = std.cpu()\n means[start:end, :] = mean\n stds[start:end, :] = std\n noises[start:end, :] = noise\n start = end\n\n cost_test += cost\n rmse_test += mse\n nb_samples += len(x)\n\n # compute PICP MPIW\n total_unc_1 = (noises ** 2 + stds ** 2) ** 0.5\n total_unc_2 = (noises ** 2 + (2 * stds) ** 2) ** 0.5\n total_unc_3 = (noises ** 2 + (3 * stds) ** 2) ** 0.5\n\n y_L = means - total_unc_2\n y_U = means + total_unc_2\n u = np.maximum(0, np.sign(y_U - y_test))\n l = np.maximum(0, np.sign(y_test - y_L))\n PICP = np.mean(np.multiply(u, l))\n MPIW = np.mean(y_U - y_L)\n\n cost_test /= nb_samples\n rmse_test = (rmse_test / nb_samples)**0.5\n\n cost_test = cost_test.cpu().data.numpy()\n rmse_test = rmse_test.cpu().data.numpy()\n\n rmses.append(rmse_test*y_stds)\n picps.append(PICP)\n mpiws.append(MPIW)\n\n best_cost_dev = np.min(cost_dev)\n best_cost_train = np.min(pred_cost_train)\n rmse_dev_min = rmse_dev[::nb_its_dev].min()\n\n print(' cost_test: %f ' % (cost_test))\n print(' rmse_test: %f' % (rmse_test))\n\n print(' cost_dev: %f (cost_train %f)' % (best_cost_dev, best_cost_train))\n print(' rmse_dev: %f' % (rmse_dev_min))\n print(' nb_parameters: %d (%s)' % (nb_parameters, humansize(nb_parameters)))\n print(' time_per_it: %fs\\n' % (runtime_per_it))\n\n ## Save results for plots\n np.save(results_dir_split + '/pred_cost_train.npy', pred_cost_train)\n np.save(results_dir_split + '/cost_dev.npy', cost_dev)\n np.save(results_dir_split + '/rmse_train.npy', rmse_train)\n np.save(results_dir_split + '/rmse_dev.npy', rmse_dev)\n np.save(results_dir_split + '/means.npy', means)\n np.save(results_dir_split + '/stds.npy', stds)\n\n # Storing validation results\n store_results(results_val, [Hps + ' :: ', 'rmse %f ' % (rmse_dev_min * y_stds) + '\\n'])\n\n # Storing testing results\n store_results(results_test, [Hps + ' :: ', 'rmse %f PICP %f MPIW %f' % (rmse_test * y_stds, PICP, MPIW) + '\\n'])\n\n # storing testing results for this split\n store_results(results_file,\n ['rmse %f PICP %f MPIW %f' % (rmse_test * y_stds, PICP, MPIW) + '\\n'])\n\n ## ---------------------------------------------------------------------------------------------------------------------\n ## plot figures\n plot_pred_cost(pred_cost_train, nb_epochs, nb_its_dev, cost_dev, results_dir_split)\n plot_rmse(nb_epochs, nb_its_dev, rmse_train, rmse_dev, results_dir_split)\n plot_uncertainty_noise(means, noises, [total_unc_1, total_unc_2, total_unc_3], y_test, results_dir_split)\n\n rmses = np.array(rmses)\n picps = np.array(picps)\n mpiws = np.array(mpiws)\n\n store_results(results_file,['Overall: \\n rmses %f +- %f (stddev) +- %f (std error) PICP %f MPIW %f\\n' % (\n np.mean(rmses), np.std(rmses), np.std(rmses) / math.sqrt(n_splits),\n np.mean(picps), np.mean(mpiws))])\n\n s = 'Pdrop: ' + str(pdrop) + ' Tau: ' + str(tau) + \\\n ' Lengthscale: ' + str(lengthscale) + ' Lr: ' + str(lr) + ' Momentum: ' + str(momentum) + ' T: ' + str(T)\n\n results[s] = [np.mean(rmses), np.std(rmses), np.std(rmses)/math.sqrt(n_splits), np.mean(picps), np.mean(mpiws)]\n\n # sort all the results\n store_all_results(results, base_dir)\n"
]
| [
[
"numpy.array",
"numpy.zeros",
"numpy.min",
"numpy.mean",
"numpy.save",
"numpy.multiply",
"numpy.std",
"numpy.sign",
"torch.cuda.is_available"
]
]
|
lorenzocorneo/surrounded-by-the-clouds | [
"536375943f8a6c23b31d6528403624d586ce1270"
]
| [
"src/plot-figure-7.py"
]
| [
"from collections import defaultdict\n\nimport matplotlib.pyplot as plt\n\nfrom boxes import generate_legend_handles, group_boxplot\nfrom commons import WIDTH_IN\n\nwith open(\"data/hops.csv\") as f:\n ret = defaultdict(list)\n for l in f.readlines()[1:]:\n split = l.rstrip(\"\\n\").split(\",\")\n # is datacenter?\n if split[-1] == \"1\":\n continue\n # Hops, ASes\n ret[split[0]].append((int(split[5]), int(split[6])))\n\ngrp = [\n (continent, [(\"Hops\", [x[0] for x in xs]), (\"ASes\", [x[1] for x in xs])])\n for continent, xs in ret.items()\n]\n\n\nfig, ax = plt.subplots(figsize=(WIDTH_IN, 1.2))\nax, positions, props = group_boxplot(grp, ax, showfliers=False)\nax.set_yticks(range(0, 26, 5))\nax.set_ylabel(\"Path length\")\nax.legend(\n handles=generate_legend_handles(props),\n handlelength=1,\n labelspacing=0.06,\n columnspacing=0.5,\n handletextpad=0.3,\n ncol=6,\n fontsize=\"small\",\n loc=\"upper right\",\n fancybox=False,\n edgecolor=\"k\",\n)\n\nplt.grid(axis=\"y\")\nplt.subplots_adjust(top=0.99, bottom=0.17, left=0.14, right=0.99)\nplt.savefig(\"figures/figure-7.pdf\")\n"
]
| [
[
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.subplots"
]
]
|
tomuram/mb_aligner_adisuissa | [
"325a97711c5e34e9d55584a2cf6e1a93943a0c66"
]
| [
"mb_aligner/dal/mfov.py"
]
| [
"import numpy as np\nimport os\nimport json\nfrom .tile import Tile\n\nclass Mfov(object):\n \"\"\"\n Represents a single Multibeam-Field-of-View (61 tiles) in the system\n \"\"\"\n\n def __init__(self, tiles=[], **kwargs):\n self._tiles = tiles\n\n # Initialize default values\n self._mfov_idx = None\n self._layer = None\n self._bbox = None\n #print('Creating mfov')\n\n # initialize values using kwargs\n if len(kwargs) > 0:\n if \"bbox\" in kwargs:\n self._bbox = kwargs[\"bbox\"] # [start_x, end_x, start_y, end_y] (after applying the transformations)\n if \"layer\" in kwargs:\n self._layer = kwargs[\"layer\"]\n if \"mfov_idx\" in kwargs:\n self._mfov_idx = kwargs[\"mfov\"]\n elif len(self._tiles) > 0:\n self._mfov_idx = self._tiles[0].mfov_index\n #print('Here', self._mfov_idx)\n self._layer = self._tiles[0].layer\n self._compute_bbox_from_tiles()\n #print('There', self._mfov_idx)\n\n\n @classmethod\n def create_from_tilespec(cls, tilespec):\n \"\"\"\n Creates an mfov from a given tilespec\n \"\"\"\n tiles = [Tile.create_from_tilespec(tile_ts) for tile_ts in tilespec]\n return Mfov(tiles)\n\n\n def _compute_bbox_from_tiles(self):\n \"\"\"\n Computes an mfov bounding box of all the tiles in the mfov\n \"\"\"\n if len(self._tiles) > 0:\n bboxes = [tile.bbox for tile in self._tiles]\n bboxes = [bbox for bbox in bboxes if bbox is not None] # Filter the tiles that don't have a bounding box\n if len(bboxes) > 0:\n bboxes = np.array(bboxes)\n self._bbox = [min(bboxes[:, 0]), max(bboxes[:, 1]), min(bboxes[:, 2]), max(bboxes[:, 3])]\n else:\n self._bbox = None\n\n\n @property\n def bbox(self):\n \"\"\"\n Returns the bounding box of the tile in the following format [from_x, to_x, from_y, to_y]\n \"\"\"\n return self._bbox\n\n @property\n def layer(self):\n \"\"\"\n Returns the section layer number\n \"\"\"\n return self._layer\n\n @property\n def mfov_index(self):\n \"\"\"\n Returns the mfov index the tile is from in the section\n \"\"\"\n return self._mfov_idx\n\n @property\n def tilespec(self):\n \"\"\"\n Returns a tilespec representation of the mfov\n \"\"\"\n return [tile.tilespec for tile in self._tiles]\n\n def save_as_json(self, out_fname):\n \"\"\"\n Saves the mfov as a tilespec (used for debugging).\n \"\"\"\n with open(out_fname, 'w') as out_f:\n json.dump(self.tilespec, out_f, sort_keys=True, indent=4)\n\n def tiles(self):\n '''\n A generator that iterates over all the tiles in the mfov\n '''\n for tile in self._tiles:\n yield tile\n\n def get_tile(self, tile_idx):\n '''\n Returns the tile with the given tile_idx\n '''\n for t in self.tiles():\n if t.tile_index == tile_idx:\n return t\n return None\n\n def remove_tile(self, tile_index):\n '''\n Removes a single tile from the mfov.\n '''\n to_remove_idx = None\n for t_idx, t in enumerate(self.tiles()):\n if t.tile_index == tile_index:\n to_remove_idx = t_idx\n break\n\n self._tiles.pop(to_remove_idx)\n\n"
]
| [
[
"numpy.array"
]
]
|
top-on/hackathon-kit | [
"a7c7493d9f9c86c894c7aa9b0fba8a8388bb95d4"
]
| [
"h2o-samples/classification.py"
]
| [
"\nimport pandas as pd\nimport h2o\nfrom h2o.estimators import H2OXGBoostEstimator\nfrom h2o.automl import H2OAutoML\nfrom matplotlib import pyplot\n\n# start h2o\nh2o.init() # h2o.shutdown()\n\n# load data\ndf = pd.read_csv('h2o-samples/data/iris.csv')\nhf = h2o.H2OFrame(df)\n\n# partition data\ntrain, test = hf.split_frame(ratios=[.8])\n\n# run automl\naml = H2OAutoML(max_runtime_secs=30)\naml.train(y='class',\n training_frame=train,\n validation_frame=test)\n\n# examine best models\naml.leaderboard\naml.leader\n"
]
| [
[
"pandas.read_csv"
]
]
|
angelolab/toffy | [
"4d6c50fe0dfbf1568ee3f9db2182a04dc9ac85c6"
]
| [
"toffy/streak_detection.py"
]
| [
"from matplotlib.pyplot import connect\nimport numpy as np\nimport os\nfrom typing import Union, Tuple\nfrom pathlib import Path\nfrom skimage import (\n filters,\n exposure,\n restoration,\n measure,\n draw,\n util,\n io,\n)\nfrom dataclasses import dataclass\nimport pandas as pd\nfrom functools import partial\nimport xarray as xr\n\n\n@dataclass\nclass StreakData:\n \"\"\"Contains data for correcting the streaks consisting of binary masks, dataframes with\n location and size properties, a directory for saving, and the shape / channel for mask\n generation.\n Args:\n shape (tuple): The shape of the image / fov.\n fov (str): The name of the fov being processed.\n streak_channel (str): The specific channel name used to create the masks.\n corrected_dir (Path): The directory used to save the corrected tiffs and data in.\n streak_mask (np.ndarray): The first binary mask indicating candidate streaks.\n streak_df (pd.DataFrame): A dataframe, containing the location, area, and eccentricity\n of each streak.\n filtered_streak_mask (np.ndarray): A binary mask with out the false streaks.\n filtered_streak_df (pd.DataFrame): A subset of the `streak_df` containing location, area\n and eccentricity values of the filtered streaks.\n boxed_streaks (np.ndarray): An optional binary mask containing an outline for each\n filtered streaks.\n corrected_streak_mask (np.ndarray): An optional binary mask containing the lines used for\n correcting the streaks.\n \"\"\"\n\n shape: Tuple[int, int] = None\n fov: str = None\n streak_channel: str = None\n corrected_dir: Path = None\n streak_mask: np.ndarray = None\n streak_df: pd.DataFrame = None\n filtered_streak_mask: np.ndarray = None\n filtered_streak_df: pd.DataFrame = None\n boxed_streaks: np.ndarray = None\n corrected_streak_mask: np.ndarray = None\n\n\ndef _get_save_dir(data_dir: Path, name: str, ext: str) -> Path:\n \"\"\"A helper function which generates the path where the masks and DataFrames\n are saved to.\n\n Args:\n data_dir (Path): The directory to save the binary masks and DataFrames.\n name (str): The field of the DataClass to be saved.\n ext (str): The file extension, either `csv` or `tiff`.\n\n Returns:\n Path: Returns the path where the file is saved to.\n \"\"\"\n return Path(data_dir, name + f\".{ext}\")\n\n\ndef _save_streak_data(streak_data: StreakData, name: str):\n \"\"\"Helper function for saving tiff binary masks and dataframes.\n\n Args:\n streak_data (StreakData): An instance of the StreakData Dataclass, holds all necessary\n data for streak correction.\n name (str): The field of the DataClass to be saved.\n \"\"\"\n data_dir = Path(streak_data.corrected_dir, f\"streak_data_{streak_data.streak_channel}\")\n if not os.path.exists(data_dir):\n os.makedirs(data_dir)\n\n data = getattr(streak_data, name)\n st = partial(_get_save_dir, data_dir, name)\n\n if type(data) is np.ndarray:\n io.imsave(st(\"tiff\"), data, check_contrast=False)\n elif type(data) is pd.DataFrame:\n data.to_csv(st(\"csv\"), index=True)\n\n\ndef _save_streak_masks(streak_data: StreakData):\n \"\"\"Saves the data in StreakData as a tiff file if it's a Numpy array, and a csv if it is a\n Pandas DataFrame. Useful for visualization and debugging.\n\n Args:\n streak_data (StreakData): An instance of the StreakData Dataclass, holds all necessary\n data for streak correction.\n \"\"\"\n fields = [\n \"streak_mask\",\n \"streak_df\",\n \"filtered_streak_mask\",\n \"filtered_streak_df\",\n \"boxed_streaks\",\n \"corrected_streak_mask\",\n ]\n for field in fields:\n _save_streak_data(streak_data, name=field)\n\n\ndef _make_binary_mask(\n input_image: np.ndarray,\n gaussian_sigma: float = 5.00,\n gamma: float = 4.0,\n gamma_gain: float = 1.00,\n log_gain: float = 1.00,\n pmin: int = 2,\n pmax: int = 98,\n threshold: float = 0.35,\n wavelet: str = \"db2\",\n mode: str = \"soft\",\n rescale_sigma: bool = True\n) -> np.ndarray:\n \"\"\"Performs a series of denoiseing, filtering, and exposure adjustments to create a binary\n mask for the given input image.\n\n Args:\n input_image (np.ndarray): The image to perform the streak masking on.\n gaussian_sigma (float, optional): Parameter for `skimage.filters.gaussian`. Defaults to\n 5.00.\n gamma (float, optional): Parameter for `skimage.exposure.adjust_gamma`. Defaults to 3.80.\n gamma_gain (float, optional): Parameter for `skimage.exposure.adjust_gamma`. Defaults to\n 0.10.\n log_gain (float, optional): Parameter for `skimage.exposure.adjust_log`. Defaults to 1.00.\n pmin (int, optional): Lower bound for the `np.percentile` threshold, used for rescaling\n the intensity. Defaults to 2.\n pmax (int, optional): Upper bound for the `np.percentile` threshold, used for rescaling\n the intensity. Defaults to 98.\n threshold (float, optional): The lower bound for pixel values used to create a binary mask.\n Defaults to 0.35.\n wavelet (str): The type of wavelet to perform and can be any of the options\n `pywt.wavelist` outputs. Defaults to \"db2\".\n mode (str): An optional argument to choose the type of denoising performed. Its noted that\n choosing soft thresholding given additive noise finds the best approximation of the\n original image. Defaults to \"soft\".\n rescale_sigma (bool): If False, no rescaling of the user-provided `sigma` will be\n performed. The default of `True` rescales `sigma` appropriately if the image is rescaled\n internally. Defaults to \"True\".\n\n\n Returns:\n np.ndarray: The binary mask containing all of the candidate strokes.\n \"\"\"\n input_image = restoration.denoise_wavelet(\n input_image, wavelet=wavelet, mode=mode, rescale_sigma=rescale_sigma\n )\n # Rescale the intensity using percentile ranges\n pmin_v, pmax_v = np.percentile(input_image, (pmin, pmax))\n input_image = exposure.rescale_intensity(input_image, in_range=(pmin_v, pmax_v))\n\n # Laplace filter to get the streaks\n input_image = filters.laplace(input_image, ksize=3)\n input_image = exposure.rescale_intensity(input_image, out_range=(0, 1))\n\n # Smoothing\n input_image = filters.gaussian(input_image, sigma=(0, gaussian_sigma)) # (y, x)\n\n # Exposure Adjustments\n input_image = exposure.adjust_gamma(input_image, gamma=gamma, gain=gamma_gain)\n input_image = exposure.adjust_log(input_image, gain=log_gain, inv=True)\n input_image = exposure.rescale_intensity(input_image, out_range=(0, 1))\n\n # apply threshold\n binary_mask = input_image > threshold\n\n return binary_mask\n\n\ndef _make_mask_dataframe(streak_data: StreakData, min_length: int = 70) -> None:\n \"\"\"Converts the binary mask created by `_make_binary_mask` into a dataframe for\n processing. The streaks are labeled, pixel information (min_row, min_col, max_row, max_col)\n is evaluated and streak lengths / areas are calculated. In addition the `min_length` argument\n allows the user to filter out streaks shorter than it.\n\n Args:\n streak_data (StreakData): An instance of the StreakData Dataclass, holds all necessary\n data for streak correction.\n min_length (int): The lower threshold for filtering streaks in pixels. Defaults to 70.\n \"\"\"\n # Label all the candidate streaks\n labeled_streaks = measure.label(streak_data.streak_mask, connectivity=2, return_num=False)\n\n # if streaks detected, filter dataframe\n if len(np.unique(labeled_streaks)) > 1:\n # Gather properties of all the candidate streaks using regionprops.\n region_properties = measure.regionprops_table(\n label_image=labeled_streaks,\n cache=True,\n properties=[\n \"label\",\n \"bbox\",\n \"eccentricity\",\n \"area\",\n ],\n )\n\n # Convert dictionary of region properties to DataFrame.\n streak_data.streak_df = pd.DataFrame(region_properties)\n\n # Rename the bounding box columns.\n streak_data.streak_df.rename(\n {\n \"bbox-0\": \"min_row\",\n \"bbox-1\": \"min_col\",\n \"bbox-2\": \"max_row\",\n \"bbox-3\": \"max_col\",\n \"area\": \"length\",\n },\n axis=\"columns\",\n inplace=True,\n )\n # Give the index column a name.\n streak_data.streak_df.index.names = [\"index\"]\n\n # Filter out eccentricities that are less than 0.99999 (only keep straight lines)\n # Filter out small areas (small lines)\n eccentricity_value = 0.9999999\n streak_data.filtered_streak_df = streak_data.streak_df.query(\n \"eccentricity > @eccentricity_value and length > @min_length\"\n )\n else:\n # otherwise, make a blank df\n blank_df = pd.DataFrame({\"min_row\": [], \"min_col\": [], \"max_row\": [], \"max_col\": []})\n streak_data.filtered_streak_df = blank_df\n\n\ndef _make_filtered_mask(streak_data: StreakData) -> None:\n \"\"\"Visualization Utility. Uses the filtered streak dataframe to create a binary mask, where\n 1 indicates the pixels that will get corrected. This mask can be later saved and used for\n visualization purposes.\n\n Args:\n streak_data (StreakData): An instance of the StreakData Dataclass, holds all necessary\n data for streak correction.\n \"\"\"\n streak_data.filtered_streak_mask = np.zeros(shape=streak_data.shape, dtype=np.uint8)\n for region in streak_data.filtered_streak_df.itertuples():\n streak_data.filtered_streak_mask[\n region.min_row: region.max_row, region.min_col: region.max_col\n ] = 1\n\n\ndef _make_box_outline(streak_data: StreakData) -> None:\n \"\"\"Visualization Utility. Creates a box outline for each binary streak using the filtered\n streak dataframe. Outlines the streaks that will get corrected. This mask can be later saved\n and used for visualization purposes.\n\n Args:\n streak_data (StreakData): An instance of the StreakData Dataclass, holds all necessary\n data for streak correction.\n \"\"\"\n padded_image = np.pad(\n np.zeros(shape=streak_data.shape, dtype=np.uint8), pad_width=(1, 1), mode=\"edge\"\n )\n for region in streak_data.filtered_streak_df.itertuples():\n y, x = draw.rectangle_perimeter(\n start=(region.min_row + 1, region.min_col + 1),\n end=(region.max_row, region.max_col),\n clip=True,\n shape=streak_data.shape,\n )\n padded_image[y, x] = 1\n streak_data.boxed_streaks = util.crop(padded_image, crop_width=(1, 1))\n\n\ndef _make_correction_mask(streak_data: StreakData) -> None:\n \"\"\"Visualization Utility. Creates the correction mask for each binary streak using the\n filtered streak DataFrame. Marks pixels which will be used for the correction method.\n This mask can be later saved and used for visualization purposes.\n\n Args:\n streak_data (StreakData): An instance of the StreakData Dataclass, holds all necessary\n data for streak correction.\n \"\"\"\n padded_image = np.pad(\n np.zeros(shape=streak_data.shape, dtype=np.uint8), pad_width=(1, 1), mode=\"edge\"\n )\n\n for region in streak_data.filtered_streak_df.itertuples():\n padded_image[region.min_row, region.min_col + 1: region.max_col + 1] = np.ones(\n shape=(region.max_col - region.min_col)\n )\n padded_image[region.max_row + 1, region.min_col + 1: region.max_col + 1] = np.ones(\n shape=(region.max_col - region.min_col)\n )\n\n streak_data.corrected_streak_mask = util.crop(padded_image, crop_width=(1, 1))\n\n\ndef _correct_streaks(streak_data: StreakData, input_image: np.ndarray) -> np.ndarray:\n \"\"\"Corrects the streaks for the input image. Uses masks in the streak_data Dataclass.\n Performs the correction by averaging the pixels above and below the streak.\n\n Args:\n streak_data (StreakData): An instance of the StreakData Dataclass, holds all necessary\n data for streak correction.\n input_image (np.ndarray): The channel which is being corrected.\n\n Returns:\n np.ndarray: The corrected image.\n \"\"\"\n # Pad the image for edge cases.\n padded_image = np.pad(input_image.copy(), pad_width=(1, 1), mode=\"edge\")\n corrected_image = padded_image.copy()\n # Correct each streak\n for region in streak_data.filtered_streak_df.itertuples():\n corrected_image[region.max_row, region.min_col: region.max_col] = _correct_mean_alg(\n padded_image,\n region.min_row,\n region.max_row,\n region.min_col,\n region.max_col,\n )\n # Crop and return the 'unpadded' image.\n return util.crop(corrected_image, crop_width=(1, 1), copy=True)\n\n\ndef _correct_mean_alg(\n input_image: np.ndarray, min_row: int, max_row: int, min_col: int, max_col: int\n) -> np.ndarray:\n \"\"\"Performs streak-wise correction by: setting the value of each pixel in the streak to the\n mean of pixel above and below it.\n\n Args:\n input_image (np.ndarray): The channel to be corrected.\n min_row (int): The minimum row index of the streak. The y location where the streak\n starts.\n max_row (int): The maximum row index of the streak. The y location where the streak ends.\n min_col (int): The minimum column index of the streak. The x location where the streak\n starts.\n max_col (int): The maximum column index of the streak. The x location where the streak\n ends.\n\n Returns:\n np.ndarray: Returns the corrected streak.\n \"\"\"\n streak_corrected: np.ndarray = np.mean(\n [\n # Row above\n input_image[min_row, min_col + 1: max_col + 1],\n # Row below\n input_image[max_row + 1, min_col + 1: max_col + 1],\n ],\n axis=0,\n dtype=input_image.dtype,\n )\n\n return streak_corrected\n\n\ndef save_corrected_channels(\n streak_data: StreakData,\n corrected_channels: xr.DataArray,\n data_dir: Path,\n save_streak_data=False,\n) -> None:\n \"\"\"Saves the corrected images in a subdirectory of `fov_dir`.\n\n Args:\n streak_data (StreakData): An instance of the StreakData Dataclass, holds all necessary\n data for streak correction.\n corrected_channels (xr.DataArray): The DataArray continaing the set of corrected channels.\n data_dir (Path): A directory containing the fov and all it's channels for correction.\n save_streak_data (bool): Saves the binary masks and dataframes contained in StreakData.\n\n \"\"\"\n # Create the directory to store the corrected tiffs\n streak_data.corrected_dir = Path(data_dir, streak_data.fov + \"-corrected\")\n if not os.path.exists(streak_data.corrected_dir):\n os.makedirs(streak_data.corrected_dir)\n\n # Save the corrected tiffs\n for channel in corrected_channels.channels.values:\n img: np.ndarray = corrected_channels.loc[:, :, channel].values\n fp = Path(streak_data.corrected_dir, channel + \".tiff\")\n io.imsave(fp, img, check_contrast=False)\n\n # Save streak masks\n if save_streak_data:\n _save_streak_masks(streak_data=streak_data)\n\n\ndef streak_correction(\n fov_data: xr.DataArray,\n streak_channel: str = \"Noodle\",\n visualization_masks: bool = False,\n) -> Tuple[xr.DataArray, StreakData]:\n \"\"\"Takes an DataArray representation of a fov and a user specified image for streak detection.\n Once all the streaks have been detected on that image, they are corrected via an averaging\n method. The function can also returns a DataClass containing various binary masks and\n dataframes which were used for filtering and correction when `visualization_masks` is True.\n\n Args:\n fov_data (xr.DataArray): The data structure containing all of the channels to be processed\n and corrected.\n streak_channel (str, optional): The name of the channel used (without the file extension)\n for identifying the streaks. Defaults to \"Noodle\".\n visualization_masks (bool, optional): If `True`, adds binary masks for visualization to\n the StreakData Dataclass which gets returned. Defaults to \"False\".\n Returns:\n Tuple[xr.DataArray, StreakData]: A tuple of the DataArray housing the corrected images,\n and the streak data containing masks and dataframes for analysis and visualization.\n \"\"\"\n\n # Initialize the streak DataClass\n streak_data = StreakData()\n streak_data.streak_channel = streak_channel\n streak_data.fov = fov_data.fovs.values[0]\n\n fov_data = fov_data[0, ...]\n\n # Get the correct channel for mask generation.\n with fov_data.loc[:, :, streak_channel] as channel_image:\n streak_data.shape = channel_image.shape\n # Create and filter the binary masks\n streak_data.streak_mask = _make_binary_mask(input_image=channel_image)\n _make_mask_dataframe(streak_data=streak_data)\n\n # Get the file names.\n channel_fn = fov_data.channels.values.tolist()\n\n # Initialize the corrected image fov dimensions.\n fov_dim_size: int = len(channel_fn)\n row_size, col_size = streak_data.shape\n cor_img_data = np.zeros(shape=(row_size, col_size, fov_dim_size), dtype=fov_data.dtype)\n\n # Correct streaks and add them to the np.array\n for idx, channel in enumerate(fov_data.channels.values):\n input_channel = fov_data.loc[:, :, channel]\n cor_img_data[:, :, idx] = _correct_streaks(\n streak_data=streak_data, input_image=input_channel\n )\n\n # Create xarray from np.array\n corrected_channels = xr.DataArray(\n data=cor_img_data,\n coords=[range(row_size), range(col_size), fov_data.channels.values],\n dims=[\"rows\", \"cols\", \"channels\"],\n )\n\n # Add mask information / visualization masks\n if visualization_masks:\n _make_box_outline(streak_data=streak_data)\n _make_correction_mask(streak_data=streak_data)\n _make_filtered_mask(streak_data=streak_data)\n\n return (corrected_channels, streak_data)\n"
]
| [
[
"numpy.zeros",
"numpy.percentile",
"pandas.DataFrame",
"numpy.ones",
"numpy.mean",
"numpy.unique"
]
]
|
mansum6/AdelaiDet | [
"7fa8252aa5e785175606a79b08b364f8ac11efa7"
]
| [
"demo/maskImages.py"
]
| [
"from detectron2.evaluation import COCOEvaluator, inference_on_dataset\nfrom detectron2.data import build_detection_test_loader\nfrom detectron2.data.detection_utils import read_image\nfrom detectron2.engine.defaults import DefaultPredictor\nfrom detectron2.data import MetadataCatalog\nfrom adet.config import get_cfg\nfrom detectron2.utils.visualizer import ColorMode, Visualizer\n\nfrom matplotlib.image import imread\nimport scipy.misc\nfrom PIL import Image \nimport numpy as np\nimport argparse\nimport os\nimport tqdm\nimport torch\nimport cv2\nfrom shutil import copyfile\n\nimport multiprocessing as mp\n\n\ndef setup_cfg(args):\n # load config from file and command-line arguments\n cfg = get_cfg()\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n # Set score_threshold for builtin models\n cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold\n cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold\n cfg.MODEL.FCOS.INFERENCE_TH_TEST = args.confidence_threshold\n cfg.MODEL.MEInst.INFERENCE_TH_TEST = args.confidence_threshold\n cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold\n cfg.freeze()\n return cfg\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(description=\"Detectron2 Demo\")\n parser.add_argument(\n \"--config-file\",\n default=\"configs/quick_schedules/e2e_mask_rcnn_R_50_FPN_inference_acc_test.yaml\",\n metavar=\"FILE\",\n help=\"path to config file\",\n )\n parser.add_argument(\"--webcam\", action=\"store_true\", help=\"Take inputs from webcam.\")\n parser.add_argument(\"--video-input\", help=\"Path to video file.\")\n parser.add_argument(\"--input\", nargs=\"+\", help=\"A list of space separated input images\")\n\n parser.add_argument(\n \"--output\",\n help=\"A file or directory to save output visualizations. \"\n \"If not given, will show output in an OpenCV window.\",\n )\n\n parser.add_argument(\n \"--confidence-threshold\",\n type=float,\n default=0.3,\n help=\"Minimum score for instance predictions to be shown\",\n )\n parser.add_argument(\n \"--opts\",\n help=\"Modify config options using the command-line 'KEY VALUE' pairs\",\n default=[],\n nargs=argparse.REMAINDER,\n )\n return parser\ndef getSubImage(rect, src):\n # Get center, size, and angle from rect\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n width = int(rect[1][0])\n height = int(rect[1][1])\n\n src_pts = box.astype(\"float32\")\n dst_pts = np.array([[0, height-1],\n [0, 0],\n [width-1, 0],\n [width-1, height-1]], dtype=\"float32\")\n M = cv2.getPerspectiveTransform(src_pts, dst_pts)\n return cv2.warpPerspective(src, M, (width, height))\n \ndef cropper(org_image_path, mask_array):\n num_instances = mask_array.shape[0]\n mask_array = np.moveaxis(mask_array, 0, -1)\n mask_array_instance = []\n img = imread(str(org_image_path))\n output = np.zeros_like(img)\n for i in range(num_instances):\n mask_array_instance.append(mask_array[:, :, i:(i+1)])\n #output = np.where(mask_array_instance[i] == False, 0, (np.where(mask_array_instance[i] == True, 255, img)))\n output=np.where(mask_array_instance[i] == True, 255,output)\n #print(output[:,:,0].shape)\n \n #print(img.shape)\n #im=Image.fromarray(np.where((output == 255, 0,img)))\n #im = Image.fromarray(output[:,:,0].astype(np.uint8))\n imgo = cv2.imread(org_image_path)\n masko=output.astype(np.uint8)\n mask_out=cv2.subtract(masko,imgo)\n mask_out=cv2.subtract(masko,mask_out)\n gray = cv2.cvtColor(masko, cv2.COLOR_BGR2GRAY)\n # find contours / rectangle\n contours = cv2.findContours(gray,cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[0]\n c = max(contours, key = cv2.contourArea)\n rect = cv2.minAreaRect(c)\n\n # crop\n img_cropped = getSubImage(rect, mask_out)\n #cv2.imwrite('color_img.jpg', img_cropped)\n #new_image=cv2.bitwise_and(imgo, imgo, mask = masko)\n '''\n cv2.imwrite('color_img.jpg', mask_out)\n \n if im.mode != 'RGBA':\n im = im.convert('RGBA')\n img = Image.open(org_image_path)\n imcom = Image.composite(img, im, im)\n imcom.save(\"./output/masks/1.png\")\n rgb_im = imcom.convert('RGB') '''\n \n return img_cropped\n\n \nif __name__ == \"__main__\":\n #mp.set_start_method(\"spawn\", force=True)\n args = get_parser().parse_args()\n\n\n cfg = setup_cfg(args)\n\n #demo = VisualizationDemo(cfg)\n\n if args.input:\n if os.path.isdir(args.input[0]):\n args.input = [os.path.join(args.input[0], fname) for fname in os.listdir(args.input[0])]\n elif len(args.input) == 1:\n args.input = glob.glob(os.path.expanduser(args.input[0]))\n assert args.input, \"The input path(s) was not found\"\n for path in tqdm.tqdm(args.input, disable=not args.output):\n # use PIL, to be consistent with evaluation\n file_name = os.path.basename(path)\n location = os.path.dirname(path)\n try:\n print(path)\n im = read_image(path, format=\"BGR\")\n #******* \n \n # Inference with a keypoint detection model\n predictor = DefaultPredictor(cfg)\n\n outputs = predictor(im)\n preds = outputs[\"instances\"].pred_classes.to(\"cpu\").tolist()\n # this will get the names of our classes in dataset..\n labels_ = MetadataCatalog.get(cfg.DATASETS.TRAIN[0]).thing_classes\n\n # Wanting to only extract chair and person\n retain_ = []\n # retain_.append(labels_.index(\"chair\"))\n retain_.append(labels_.index(\"person\"))\n \n\n # retaining only retain_ from preds\n my_masks = [x for x in preds if x in retain_]\n my_masks = torch.tensor(my_masks)\n \n \n outputs[\"instances\"].pred_classes = my_masks \n\n #print(outputs[\"instances\"].pred_masks.to(\"cpu\").numpy())\n #v = Visualizer(im[:,:,::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2)\n #out = v.draw_instance_predictions(outputs[\"instances\"].to(\"cpu\"))\n #cv2_imshow(out.get_image()[:, :, ::-1])\n\n\n\n\n try:\n mask_array = outputs[\"instances\"].pred_masks.to(\"cpu\").numpy()\n #print(outputs[\"instances\"].pred_keypoints.to(\"cpu\").numpy().shape)\n #print(mask_array.shape)\n\n #print(mask_array)\n #cv2.imwrite('mask.png', mask_array)\n #cropper('1.png', mask_array)\n if args.output:\n if os.path.isdir(args.output):\n assert os.path.isdir(args.output), args.output\n out_filename = os.path.join(args.output, os.path.basename(path))\n else:\n assert len(args.input) == 1, \"Please specify a directory with args.output\"\n out_filename = args.output\n im=cropper(path, mask_array)\n print(\"Saving\")\n cv2.imwrite(out_filename, im)\n #im.save(out_filename)\n else:\n cv2.imshow(WINDOW_NAME, visualized_output.get_image()[:, :, ::-1])\n except AttributeError:\n print(\"No mask in this image\")\n copyfile(path, args.output+'/noMask/'+file_name)\n except AssertionError:\n print(\"No person in this file\")\n copyfile(path, args.output+'/noPerson/'+file_name)\n \n"
]
| [
[
"numpy.zeros_like",
"numpy.array",
"numpy.where",
"torch.tensor",
"numpy.moveaxis",
"numpy.int0"
]
]
|
popura/blinky | [
"d9a81c0c0b8789ce9f0d234d1c768dd24f88d257"
]
| [
"python/blinkytools/io.py"
]
| [
"# Copyright 2020 Robin Scheibler\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\n# of the Software, and to permit persons to whom the Software is furnished to do\n# so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\nThis file defines the routine to read and write files\ncreated with BlinkyViewer\n\n\nThe underlying format is [MessagePack](https://github.com/msgpack/msgpack-python)\nwhich is suitable to efficiently save binary data.\n\n\nThe encoder/decoder were modelled after the\n[msgpack-numpy package](https://github.com/lebedov/msgpack-numpy).\n\"\"\"\nimport msgpack\nimport numpy as np\nfrom datetime import datetime\nfrom .version import __version__\nfrom .utils import pixel_to_str\n\n\ndef encoder(obj, chain=None):\n \"\"\" Custom encoder to store numpy.ndarray in MessagePack format \"\"\"\n\n if isinstance(obj, np.ndarray):\n\n # Make sure this is not a structured array type\n assert obj.dtype.kind != \"V\", \"Unsupported non-numeric type\"\n\n return {\n \"__nd__\": True, # indicate this is a numpy ndarray\n \"shape\": obj.shape,\n \"dtype\": obj.dtype.str,\n \"data\": obj.tobytes(),\n }\n else:\n return obj if chain is None else chain(obj)\n\n\ndef decoder(obj, chain=None):\n \"\"\" Custom decoder to recover numpy.ndarray saved in MessagePack format \"\"\"\n\n try:\n if \"__nd__\" in obj:\n return np.frombuffer(obj[\"data\"], dtype=np.dtype(obj[\"dtype\"])).reshape(\n obj[\"shape\"]\n )\n else:\n return obj if chain is None else chain(obj)\n\n except KeyError:\n print(\"error!\")\n # This is just a security in case an unrelated document\n # contains \"__nd__\" as a key\n return obj if chain is None else chain(obj)\n\n\nclass BlinkyFile(object):\n def __init__(self, locations, data, fps, version=None, creation=None, **metadata):\n self.locations = locations\n self.data = data\n\n assert len(self.locations) == self.data.shape[1], (\n \"Error when creating the Blinky file object. \"\n \"The number of locations should correspond to the \"\n \"number of signals recorded.\"\n )\n\n self.fps = fps\n self.version = version if version is not None else __version__\n self.creation = (\n creation\n if creation is not None\n else datetime.now().astimezone().isoformat()\n )\n self.metadata = metadata\n\n def dump(self, filename):\n \"\"\" Saves the object as a MessagePack file \"\"\"\n with open(filename, \"wb\") as f:\n msgpack.pack(self.__dict__, f, default=encoder, use_bin_type=True)\n\n @classmethod\n def load(cls, filename):\n \"\"\" Load a BlinkyFile object from MessagePack format \"\"\"\n with open(filename, \"rb\") as f:\n content = msgpack.unpack(f, object_hook=decoder, raw=False)\n return cls(**content)\n\n\ndef file_preview():\n \"\"\"\n Preview a Blinky file\n \"\"\"\n import matplotlib.pyplot as plt\n import argparse\n\n parser = argparse.ArgumentParser(description=\"Preview a Blinky file\")\n parser.add_argument(\"filename\", type=str, help=\"File name\")\n args = parser.parse_args()\n\n bfile = BlinkyFile.load(args.filename)\n data = bfile.data.astype(np.float)\n\n if len(data.shape) == 5:\n ## This is a color file, we will average the colors\n data = np.mean(data, axis=-1)\n\n # Now, for the purpose of preview, we average the boxes\n data = np.mean(data, axis=(-2, -1))\n\n # Make the time axis\n time = np.arange(data.shape[0]) / bfile.fps\n\n # Make the plot\n fig, ax = plt.subplots(1, 1)\n ax.plot(time, data)\n ax.set_xlabel(\"Time [s]\")\n ax.set_ylabel(\"Pixel value\")\n ax.set_title(f\"File creation {bfile.creation}\")\n ax.legend([pixel_to_str(p) for p in bfile.locations])\n\n plt.show()\n\n\nif __name__ == \"__main__\":\n file_preview()\n"
]
| [
[
"matplotlib.pyplot.subplots",
"numpy.mean",
"numpy.arange",
"matplotlib.pyplot.show",
"numpy.dtype"
]
]
|
deepkrishna/bullet12 | [
"7356421848b89e37ac91f09f2e3197c9dccf1ba4"
]
| [
"examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_InvertedPendulumBulletEnv_v0_2017may.py"
]
| [
"#add parent dir to find package. Only needed for source code build, pip install doesn't need it.\nimport os, inspect\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentdir = os.path.dirname(os.path.dirname(currentdir))\nos.sys.path.insert(0,parentdir)\n\nimport gym\nimport numpy as np\nimport pybullet as p\nimport pybullet_envs\nimport time\n\ndef relu(x):\n return np.maximum(x, 0)\n\nclass SmallReactivePolicy:\n \"Simple multi-layer perceptron policy, no internal state\"\n def __init__(self, observation_space, action_space):\n assert weights_dense1_w.shape == (observation_space.shape[0], 64)\n assert weights_dense2_w.shape == (64, 32)\n assert weights_final_w.shape == (32, action_space.shape[0])\n\n def act(self, ob):\n x = ob\n x = relu(np.dot(x, weights_dense1_w) + weights_dense1_b)\n x = relu(np.dot(x, weights_dense2_w) + weights_dense2_b)\n x = np.dot(x, weights_final_w) + weights_final_b\n return x\n\ndef main():\n print(\"create env\")\n env = gym.make(\"InvertedPendulumBulletEnv-v0\")\n env.render(mode=\"human\")\n pi = SmallReactivePolicy(env.observation_space, env.action_space)\n\n while 1:\n frame = 0\n score = 0\n restart_delay = 0\n obs = env.reset()\n print(\"frame\")\n while 1:\n time.sleep(1./60.)\n a = pi.act(obs)\n obs, r, done, _ = env.step(a)\n score += r\n frame += 1\n still_open = env.render(\"human\")\n if still_open==False:\n return\n if not done: continue\n if restart_delay==0:\n print(\"score=%0.2f in %i frames\" % (score, frame))\n restart_delay = 60*2 # 2 sec at 60 fps\n else:\n restart_delay -= 1\n if restart_delay==0: break\n\nweights_dense1_w = np.array([\n[ +0.8621, +0.3260, +0.0986, -0.1225, +0.2038, -0.8051, -0.7498, +0.1905, -0.3418, +0.5002, -0.1093, +0.0285, +0.3480, -0.1596, -0.1781, +0.3643, -0.4283, -0.3715, -0.1571, +0.3531, +0.0934, -0.2215, -0.3085, +0.9581, +0.2485, -0.6232, -0.3175, +0.9771, +0.3651, -0.8850, -0.4212, -0.0301, +0.0432, +0.3390, +0.7537, +0.1649, -0.0128, -0.1374, +0.3793, +1.0430, +0.8043, -0.9001, +0.4334, -0.1243, +1.2373, +0.1890, +0.3333, -0.0520, +0.1654, +0.2521, -0.0168, +0.8439, -0.6960, +0.1884, +0.0991, +0.5242, -0.6837, +0.6844, -0.2593, -0.3298, +0.2212, +0.0281, +0.2608, +0.6527],\n[ -0.9350, -0.2122, +0.0162, +0.5306, -0.2914, -0.8573, +0.2552, +0.7069, +0.7862, -0.0315, -1.0844, +0.2707, +0.5102, -1.1359, +0.3066, +0.0357, +0.1833, -0.1946, +0.0948, +0.6685, -0.6101, +0.4774, -0.3017, +0.3823, -0.2835, -0.6760, +1.2963, +0.4466, -0.7132, -0.9109, -0.0589, -0.8726, +0.6972, -0.2256, -0.0286, +0.4646, -0.5113, -0.1692, +0.7638, +0.2274, -0.5734, +0.7430, +0.9680, +0.7809, -0.2457, -0.4952, +0.0197, -0.6428, +0.2367, -0.5887, -0.5167, +0.2299, -0.5853, -0.4101, +0.9042, +0.0913, +0.5774, +0.2756, +0.2436, -0.6068, -0.2232, -0.1415, -0.5094, -0.1012],\n[ +0.0983, -0.3266, +0.2611, +0.0664, +0.6222, +0.0773, -0.2516, -0.4416, -0.3770, +0.0535, +0.3391, -0.7475, +0.5874, -0.0405, -0.2058, -0.5957, +0.2659, -0.8477, -0.5814, -0.0494, -0.1678, +0.2650, -0.4039, +0.1414, -0.6635, +0.0447, +0.2932, +0.1167, +0.1195, +0.0669, -0.4223, +0.1196, +0.0553, -0.7123, -0.4011, +0.3557, -0.4503, +0.7047, -0.4471, +0.0807, +0.3926, -0.1427, +0.4355, -0.3678, +0.3453, +0.1597, -0.3076, +0.4689, +0.3128, -0.7050, +0.6505, +0.3427, +0.1981, +0.1190, +0.2554, +0.8283, +0.1647, -0.4257, +0.1481, +0.4361, -0.5497, -0.6114, -0.0138, +0.0932],\n[ +0.1866, +0.6408, -1.8201, +1.0946, +0.7742, -0.7651, +0.1082, +0.6842, +0.3794, +0.3547, -0.8172, -0.0921, -0.6736, +1.0251, -0.9618, -0.6869, +1.8465, +0.2425, +0.7910, +1.0009, -0.8031, +1.6697, -0.8962, +0.1873, +0.4960, -0.6812, +0.6860, +1.1932, -0.7019, +0.4028, +0.4841, +0.6497, +1.6490, -0.5464, +0.7060, +1.8087, -0.6118, -0.7955, -0.3797, -1.2048, +1.2356, -0.6141, +1.2502, +0.5641, -0.1019, -1.7516, -0.1134, -0.6719, +1.5014, -0.2718, -0.5933, +0.1714, -1.3590, -0.3656, +1.0083, -0.8511, -0.5597, -0.4446, -1.7158, -0.0851, +0.3089, +0.0967, -1.0121, +0.3048],\n[ +0.3329, +0.5382, +0.1585, +0.8205, +0.5510, +0.2796, -0.7120, +0.3434, +0.2931, +1.2275, +0.4191, -0.6828, -0.5091, +0.8408, -0.3101, -0.5183, +0.2651, +0.2073, -0.1383, +0.6539, -0.2167, +0.7798, -0.5690, +0.3750, +0.4358, +0.6537, -0.2202, -0.0563, +0.6605, +0.4599, -0.5327, +0.6610, +0.8387, +0.1887, -0.2593, +0.7904, -0.3567, +0.4121, +0.4378, -0.2935, +0.1291, +0.0021, -0.3416, -0.5920, -0.2895, -0.4610, +0.7380, -0.6322, +0.6738, -0.1378, -0.3304, -0.2894, -0.3582, -0.8311, +0.2660, -0.2079, -0.1765, -0.6825, -0.1754, -0.4455, +0.7202, -0.8177, -0.9900, -0.7425]\n])\n\nweights_dense1_b = np.array([ +0.0009, -0.2179, -0.0299, +0.2069, -0.0099, +0.0907, +0.0271, +0.1957, +0.0185, +0.1671, -0.0699, -0.0332, -0.0244, +0.0022, +0.1877, -0.0801, +0.0235, -0.0097, -0.0088, +0.1961, -0.1055, +0.0605, -0.0913, +0.0884, -0.0638, +0.0229, -0.1101, +0.1966, -0.0042, +0.0221, -0.0966, +0.1554, +0.1623, -0.0454, +0.1068, +0.0114, +0.0544, +0.0201, +0.0257, +0.0637, +0.0761, +0.2120, +0.0225, +0.2023, -0.0931, +0.0585, -0.2253, -0.0302, +0.0682, +0.0000, -0.1215, -0.1105, +0.1376, +0.0583, -0.1706, -0.0262, -0.0897, +0.0728, +0.0787, +0.0912, -0.0964, -0.0959, -0.0195, +0.0232])\n\nweights_dense2_w = np.array([\n[ +0.0089, +0.2241, -0.0391, +0.1459, -0.0854, -0.0878, +0.2829, -0.1620, -0.1694, -0.5211, +0.0155, -0.1298, -0.0629, +0.1074, +0.0150, -0.3583, +0.0427, +0.1813, +0.2140, -0.4230, +0.1577, +0.1223, -0.0096, +0.0183, -0.1038, -0.5612, -0.0614, -0.0820, -0.0057, -0.2471, +0.0355, -0.1525],\n[ +0.1555, -0.2934, +0.2690, -0.3218, +0.0101, -0.1188, -0.1798, -0.1405, +0.2701, -0.0972, +0.2338, -0.0122, -0.2254, -0.3225, -0.0268, -0.0829, +0.4085, +0.0691, -0.1448, -0.0429, -0.2750, -0.2479, +0.0396, +0.0427, +0.0205, -0.1462, -0.1481, +0.1365, -0.0903, +0.0094, +0.3665, +0.1163],\n[ +0.0119, -0.3100, +0.1201, -0.2257, +0.1246, -0.1335, -0.3369, -0.0408, +0.3145, +0.2030, +0.1506, +0.0899, -0.1192, -0.2429, +0.0356, +0.0634, -0.0706, +0.1119, -0.0402, +0.1011, +0.1281, +0.4318, -0.4644, +0.0039, +0.0932, -0.0521, -0.1528, +0.1946, -0.0921, -0.0646, -0.0241, +0.1598],\n[ -0.1007, +0.3939, -0.2066, +0.0752, -0.1709, -0.0286, +0.0196, +0.1853, -0.3619, -0.0449, +0.0334, -0.2673, +0.0640, +0.3055, -0.1184, -0.4550, +0.0951, -0.2168, +0.1502, -0.4816, +0.1392, -0.3708, -0.0849, -0.4331, -0.0800, -0.0967, +0.1334, -0.3169, -0.0004, -0.3002, -0.0841, -0.1763],\n[ -0.0492, +0.0308, +0.0824, +0.0568, -0.0038, +0.3196, +0.5089, +0.0391, -0.1373, -0.1579, +0.0219, -0.2990, -0.0113, -0.2136, -0.0240, +0.1241, -0.1723, -0.0064, -0.0213, -0.2213, -0.0996, -0.0333, -0.4110, -0.2074, +0.0427, +0.0323, -0.0920, -0.1846, -0.1037, -0.0381, -0.0763, +0.0875],\n[ +0.0965, -0.1536, -0.0270, -0.0834, +0.0270, +0.0908, -0.0257, -0.1284, +0.1994, +0.2317, +0.0193, +0.0493, -0.0723, -0.2748, +0.0248, -0.0021, -0.0483, +0.0610, -0.0056, -0.0575, +0.0930, +0.0749, -0.2599, +0.0223, +0.0050, -0.0569, -0.6755, +0.2190, +0.0009, +0.1493, -0.1822, +0.0763],\n[ -0.0435, +0.3829, -0.2358, +0.3554, -0.1800, +0.0008, -0.0282, -0.0139, -0.2745, -0.2293, -0.4456, +0.1709, +0.0687, -0.0696, -0.0877, -0.0978, -0.0620, -0.4380, +0.2052, -0.1479, +0.0971, -0.0031, +0.0783, -0.0749, -0.2695, -0.0151, -0.0066, +0.0592, -0.0088, -0.0507, -0.0167, -0.2891],\n[ -0.1797, -0.1446, -0.0609, -0.2840, +0.1933, +0.0366, -0.3077, -0.0018, -0.1564, +0.0283, +0.1447, +0.2110, -0.0047, -0.2123, +0.0041, +0.0171, +0.2826, +0.1549, -0.1211, +0.1360, +0.1473, +0.1541, -0.1583, +0.0955, -0.1047, +0.0530, +0.0667, +0.1454, -0.0860, +0.0602, +0.1970, +0.0716],\n[ +0.0119, +0.1858, -0.1746, +0.0911, -0.0948, -0.0898, -0.0680, -0.2266, -0.1098, +0.0161, +0.0265, +0.1100, -0.3467, -0.0128, -0.2249, +0.0344, +0.1421, -0.1222, -0.0196, -0.1188, +0.0428, -0.2318, +0.0998, +0.1017, +0.0298, -0.1391, +0.1229, +0.1193, +0.0565, +0.1296, +0.0939, -0.0234],\n[ +0.1817, +0.2432, -0.2712, +0.0668, -0.1836, +0.0232, -0.0793, +0.0161, -0.1585, -0.3731, -0.0243, -0.1066, +0.0928, -0.0499, -0.0692, -0.3354, +0.0754, +0.0468, -0.2522, -0.7501, +0.0235, -0.5134, -0.3031, -0.1907, -0.2166, -0.1713, -0.0422, +0.0831, +0.0664, -0.0462, +0.1627, -0.4927],\n[ -0.0342, +0.2310, +0.2736, -0.0703, +0.1941, -0.0428, -0.0868, -0.2146, +0.1371, +0.0117, +0.0218, +0.0133, -0.0416, +0.1012, +0.1689, +0.3113, +0.0199, +0.1176, +0.0256, +0.0907, +0.0622, +0.3312, -0.0225, -0.0187, +0.2089, +0.1381, -0.2949, +0.1525, -0.0514, -0.1416, -0.0381, -0.0133],\n[ -0.0885, +0.3841, -0.3811, +0.1388, -0.1801, -0.0434, +0.1371, -0.0393, +0.2549, -0.4207, -0.2308, +0.0187, -0.0975, +0.2137, -0.0840, -0.0491, +0.0424, +0.0060, +0.1007, +0.0315, +0.3005, +0.0501, +0.0516, -0.0521, -0.0100, +0.0984, +0.3092, +0.0031, -0.0380, +0.2344, +0.0808, -0.0694],\n[ -0.0631, +0.0290, +0.1733, -0.0555, +0.1311, -0.0812, +0.1056, -0.1663, -0.1272, +0.2717, +0.0247, +0.0730, -0.3714, +0.0042, -0.0490, +0.0222, -0.0429, -0.1618, +0.1476, +0.1699, -0.1660, +0.1571, -0.0225, +0.1582, +0.1622, -0.0721, -0.1198, +0.1388, -0.1661, +0.0103, -0.1386, +0.0883],\n[ +0.0306, +0.1041, -0.2540, +0.0423, +0.1098, -0.0204, +0.1478, +0.1917, +0.1102, +0.0045, -0.0263, +0.0818, -0.0245, -0.0047, -0.2407, -0.6658, +0.0834, +0.0400, +0.1785, -0.5141, +0.3379, -0.5638, -0.0012, -0.2549, -0.4172, -0.2134, -0.3793, -0.0736, -0.3442, +0.1044, -0.0489, -0.2967],\n[ -0.0446, -0.1153, -0.0839, +0.0948, +0.3570, -0.0520, -0.1016, -0.0265, +0.4342, +0.2325, +0.1763, -0.2663, -0.0676, -0.0759, +0.0654, +0.2983, +0.1185, -0.0233, -0.5232, +0.1075, -0.3284, +0.2703, +0.2164, +0.0092, +0.2988, +0.1956, +0.0582, +0.3342, +0.0949, -0.1936, -0.0465, +0.4223],\n[ +0.0737, -0.0069, -0.1301, +0.3047, -0.2603, +0.0369, -0.2049, +0.0378, -0.1846, -0.3474, -0.1353, +0.0965, +0.0956, -0.0692, -0.0440, -0.1767, -0.1616, -0.2183, +0.1853, -0.0618, +0.1210, -0.2178, +0.1066, -0.3849, -0.2628, +0.1444, +0.2814, -0.2963, +0.0673, +0.0983, +0.0442, +0.0020],\n[ -0.0978, +0.2645, -0.3750, +0.2824, -0.3906, -0.0070, +0.1920, +0.0911, -0.0510, -0.1050, -0.2411, -0.2135, +0.0784, +0.3348, -0.0396, -0.4209, -0.0686, -0.2212, +0.3039, -0.4649, -0.0692, -0.5387, +0.0479, -0.4205, -0.2557, -0.1031, +0.1378, -0.3875, -0.1900, -0.0253, +0.1212, -0.4374],\n[ -0.1067, +0.1545, +0.2016, -0.0620, -0.1419, -0.0661, -0.1224, -0.0560, +0.1045, -0.2062, -0.2551, +0.2440, -0.1116, +0.1544, -0.2324, +0.0999, -0.1832, -0.1226, -0.1774, +0.0629, -0.1170, -0.1375, +0.0839, +0.2029, +0.0551, +0.0359, +0.0967, +0.2290, -0.0312, -0.1228, +0.2831, +0.1785],\n[ -0.1420, +0.1163, +0.0488, -0.0011, -0.1311, -0.1558, -0.0766, -0.0088, +0.1877, -0.1547, +0.1304, +0.0347, +0.1132, +0.2750, -0.0574, +0.0080, -0.2256, -0.0951, +0.1987, +0.2256, +0.0270, -0.0155, +0.0636, +0.0372, +0.2483, -0.1469, -0.2010, -0.0994, -0.1731, +0.0224, +0.0085, -0.1891],\n[ +0.1037, +0.0015, -0.1525, -0.0444, -0.3130, -0.0318, +0.2370, -0.1492, -0.4707, -0.0023, +0.0884, +0.1722, -0.0421, +0.0858, -0.1036, -0.5701, +0.1249, -0.2643, -0.0203, -0.1380, +0.0973, -0.2060, +0.1806, +0.3054, -0.6548, -0.3282, -0.2969, -0.3984, -0.0448, -0.1802, +0.3282, -0.1891],\n[ -0.1116, +0.3646, -0.0542, +0.3672, -0.4207, +0.2700, +0.3827, -0.0599, -0.3415, -0.2832, -0.0345, +0.1987, +0.0669, +0.1301, -0.3806, -0.2981, -0.1917, -0.2028, +0.1687, -0.2010, +0.3607, -0.0199, +0.2971, +0.0390, +0.0895, -0.3088, +0.0169, -0.1333, +0.0738, +0.2161, -0.1207, -0.3352],\n[ -0.0134, +0.3853, -0.2106, +0.1996, -0.2277, -0.0971, +0.0917, -0.2901, -0.2493, +0.0295, -0.1438, -0.1902, -0.0074, +0.2240, -0.0277, -0.4374, +0.0749, -0.1779, +0.2687, -0.4093, -0.0042, -0.5023, -0.1169, -0.3157, +0.0061, +0.0270, +0.0204, -0.4626, -0.1717, -0.2126, +0.1335, -0.5028],\n[ -0.0813, +0.1958, -0.4203, +0.3027, -0.3896, -0.1201, -0.0383, -0.1807, -0.4834, -0.3672, -0.3664, +0.2401, -0.0114, -0.0852, -0.2220, -0.1953, +0.0773, -0.0048, +0.1560, -0.1524, +0.0772, -0.2740, +0.1346, -0.3171, -0.0648, +0.1633, +0.2050, -0.1560, +0.0270, +0.3009, -0.2798, -0.0756],\n[ -0.1754, +0.1428, +0.2527, -0.2624, -0.1126, -0.0014, +0.1030, -0.2716, -0.2678, -0.0268, +0.0982, -0.0385, -0.0628, -0.0768, -0.2531, +0.2935, -0.0661, +0.0778, -0.1184, +0.0070, -0.1331, -0.1174, -0.1338, -0.1601, -0.0357, -0.1964, -0.0550, -0.1151, +0.2369, +0.1578, -0.0826, -0.1985],\n[ -0.1724, -0.0328, +0.0090, -0.0564, +0.0876, -0.0607, +0.0060, -0.2330, +0.1137, -0.0771, -0.0774, +0.0727, -0.2037, +0.1521, +0.0666, +0.0258, -0.2189, -0.1417, +0.0276, -0.0387, -0.0747, -0.0214, -0.0793, -0.0520, +0.0918, -0.1276, -0.0877, +0.0309, -0.0630, -0.0149, -0.0197, -0.1755],\n[ +0.1471, -0.1542, +0.1202, -0.2846, +0.1209, -0.0383, -0.2689, -0.0442, -0.1086, +0.3428, +0.0120, +0.0473, +0.0320, -0.2629, -0.0904, -0.3732, -0.2179, +0.2540, -0.1725, -0.4163, -0.0333, +0.0934, -0.3123, -0.1123, -0.2196, +0.1580, -0.6386, +0.0650, -0.0473, +0.0521, +0.0061, -0.2745],\n[ +0.0064, -0.1054, -0.2054, -0.1706, +0.1626, +0.0895, +0.0571, -0.2639, +0.0269, +0.1943, +0.0687, -0.1510, -0.1987, +0.0784, -0.1774, -0.0242, +0.0519, -0.3330, +0.0364, +0.1868, -0.3204, +0.1106, +0.0456, -0.1627, -0.2792, +0.0017, +0.2943, +0.0481, -0.1523, -0.1656, -0.0222, -0.0239],\n[ +0.0853, +0.2513, -0.1716, +0.0164, -0.1375, -0.0870, +0.2430, +0.2161, -0.4489, -0.3427, +0.0341, -0.0022, -0.1488, +0.2685, -0.2290, -0.2439, +0.1216, -0.1475, -0.0332, -0.1282, -0.1603, -0.1076, -0.1279, -0.1439, -0.2784, -0.4271, +0.1286, -0.1134, -0.1994, -0.1031, -0.0210, -0.2327],\n[ +0.1303, -0.0463, +0.1797, -0.0939, +0.2427, -0.0791, -0.0735, -0.2248, +0.1545, -0.1325, -0.1812, -0.0896, +0.0695, +0.0225, -0.1880, +0.1619, -0.0468, +0.0904, +0.1570, -0.0206, +0.1266, -0.0148, +0.0305, +0.2494, +0.1687, -0.0774, -0.2693, +0.0449, +0.0040, -0.1319, +0.1513, -0.0410],\n[ +0.0545, +0.0586, -0.0087, -0.1021, -0.1756, -0.0722, +0.0678, +0.0310, -0.1490, -0.2823, +0.1335, -0.0038, +0.0660, +0.0696, -0.2747, -0.3360, +0.1061, +0.3080, +0.1201, -0.3870, +0.2960, -0.4409, -0.0295, +0.0854, -0.0908, +0.1224, -0.4637, -0.4016, +0.0420, +0.0505, +0.0364, -0.2983],\n[ -0.1218, +0.2787, -0.1838, -0.0315, -0.1590, -0.2840, +0.2845, +0.0601, -0.1741, -0.2363, -0.3620, -0.1355, +0.0943, +0.1343, -0.0346, -0.1135, +0.0327, -0.2982, +0.1805, -0.1483, +0.1698, -0.1056, -0.0257, +0.0580, -0.1921, +0.0863, +0.1439, -0.1360, +0.0468, +0.2411, -0.1872, +0.0329],\n[ +0.0068, +0.1272, +0.0108, +0.0178, +0.2308, +0.0207, -0.0050, +0.0127, +0.1008, -0.2972, -0.2233, -0.1369, +0.0797, +0.0023, -0.0782, -0.4778, +0.1916, +0.1325, +0.0110, -0.2083, +0.2786, -0.2724, -0.1214, +0.0510, -0.1068, -0.1982, -0.4602, -0.1082, -0.1563, -0.0689, -0.0913, +0.0983],\n[ +0.1631, +0.1356, -0.1882, +0.2125, -0.4817, -0.1368, +0.1216, -0.1032, -0.4494, -0.2093, -0.0110, +0.0402, -0.0097, +0.1575, -0.2447, -0.8683, +0.1860, -0.4305, +0.1405, -0.3244, +0.1927, -0.5331, +0.0910, -0.1750, -0.2639, -0.3461, -0.0655, -0.4643, -0.0272, +0.0600, +0.1538, -0.3951],\n[ +0.0750, +0.0031, -0.1113, +0.0419, -0.0726, +0.1712, +0.1273, -0.0844, +0.0187, -0.1579, +0.0365, +0.1953, +0.0259, +0.1069, +0.1584, +0.0159, +0.1700, -0.0276, +0.0061, -0.1753, -0.0827, -0.0493, +0.0756, -0.1169, +0.0177, -0.2200, -0.0495, -0.0934, +0.1999, -0.0962, -0.0035, +0.1083],\n[ -0.0754, -0.1933, +0.1219, -0.3622, -0.2560, +0.0829, -0.3323, +0.0923, -0.1712, +0.0494, +0.1063, +0.3118, +0.0088, -0.3756, -0.0977, +0.0160, -0.0817, +0.1595, -0.3452, +0.2652, +0.2646, +0.2833, -0.3530, +0.0805, -0.1736, +0.0675, -0.1320, -0.3568, +0.1824, -0.0068, +0.0391, -0.3348],\n[ +0.0661, +0.1602, -0.0509, +0.0562, -0.1738, +0.0114, -0.0268, -0.0354, -0.2069, -0.0250, -0.1061, -0.1695, -0.0719, +0.2797, -0.2477, -0.2539, +0.1287, -0.2037, +0.2556, -0.1008, +0.1943, -0.1660, +0.2728, -0.2338, -0.0806, -0.2346, +0.0449, -0.4673, -0.0362, -0.1172, +0.1695, -0.2252],\n[ +0.0348, -0.2188, +0.0041, -0.1818, +0.3175, -0.0947, -0.2779, -0.0764, +0.2407, -0.1541, +0.2586, -0.1852, -0.1379, -0.3336, +0.1402, -0.0446, +0.0584, +0.0994, -0.3633, +0.0636, -0.0156, -0.0767, -0.2649, +0.0149, +0.2484, +0.2916, +0.1928, -0.0036, +0.0696, -0.0935, +0.2752, +0.0187],\n[ -0.2666, +0.0507, -0.1783, +0.2308, +0.3974, -0.0719, +0.0276, -0.0048, +0.1177, +0.0816, -0.2346, -0.2762, +0.1167, +0.0719, -0.1303, -0.0892, +0.0177, +0.0072, +0.0965, +0.2305, +0.0988, +0.1532, -0.1653, +0.0692, -0.0419, -0.1874, -0.0896, +0.0014, +0.0375, -0.0905, -0.3757, +0.3573],\n[ +0.4116, -0.2717, +0.2356, -0.1943, +0.0575, +0.0379, -0.0606, -0.0819, +0.1179, +0.2377, -0.1506, +0.1710, +0.0912, -0.2922, +0.0898, +0.1814, +0.1221, +0.1917, -0.3906, +0.1684, +0.1638, +0.2434, -0.1656, +0.1352, +0.0744, -0.0942, -0.2128, +0.0767, +0.0628, +0.1426, +0.3458, -0.0437],\n[ -0.1387, -0.5340, +0.2895, -0.5476, +0.5888, +0.1435, -0.4898, +0.0061, +0.6167, +0.1024, +0.1127, +0.2197, +0.0206, -0.4723, +0.1195, +0.6172, +0.0276, +0.3961, -0.5498, +0.4008, -0.2163, +0.3337, -0.2608, +0.1666, +0.3415, +0.0077, -0.1649, +0.2619, -0.1937, -0.1043, +0.1770, +0.4285],\n[ -0.0167, +0.0725, +0.1501, +0.0806, -0.0904, -0.2287, +0.1906, -0.0706, -0.0861, -0.1585, -0.1175, -0.0603, -0.0193, +0.4876, -0.1954, -0.0463, -0.1083, +0.1297, -0.0301, +0.0312, +0.0755, +0.0648, -0.4867, -0.0645, +0.0074, +0.0624, -0.1972, -0.1996, -0.1207, -0.1015, +0.0720, +0.0260],\n[ +0.0007, -0.1637, +0.1202, -0.1045, +0.2969, +0.1975, -0.1374, +0.1684, +0.0790, +0.2108, -0.0220, +0.0773, +0.0046, +0.0205, -0.1746, +0.3445, +0.0773, +0.0005, +0.0251, +0.3337, -0.3365, +0.3956, -0.2011, +0.2489, +0.1875, +0.0282, -0.4611, +0.2249, +0.0182, -0.1252, -0.1899, +0.1563],\n[ -0.0142, +0.0174, +0.1562, +0.0763, +0.1314, -0.0686, +0.3657, -0.0132, -0.0737, +0.0247, +0.0431, -0.2967, +0.0002, +0.2221, +0.1011, +0.1039, -0.0503, -0.3926, +0.1014, -0.1349, -0.1005, +0.1254, +0.0250, -0.1482, -0.2554, +0.1027, +0.1661, -0.1071, -0.0521, -0.0568, +0.1508, +0.0668],\n[ -0.1106, +0.1260, -0.3472, +0.2769, +0.0344, -0.0668, +0.2888, +0.1583, -0.2782, -0.1161, -0.2939, +0.1309, -0.0010, +0.4387, +0.1623, -0.2627, -0.1011, -0.3530, +0.0604, -0.2499, +0.2736, -0.2715, +0.2004, -0.5407, -0.4915, -0.1778, +0.1274, -0.1071, -0.0170, -0.1190, -0.1540, -0.0364],\n[ -0.1767, +0.2753, +0.2479, -0.0753, -0.2057, -0.2379, -0.0411, -0.0945, -0.1757, +0.1752, +0.1322, +0.0548, -0.0980, +0.1753, -0.0510, +0.2050, -0.0246, +0.5660, -0.2124, +0.1708, -0.1779, +0.2125, -0.0143, +0.1992, +0.1330, -0.2561, -0.1304, +0.2212, -0.2898, +0.0983, -0.1803, +0.1087],\n[ -0.0503, -0.3082, +0.1056, -0.1658, +0.3225, +0.0727, -0.4463, -0.0153, +0.0195, +0.0962, -0.0483, -0.0484, +0.2464, -0.5537, +0.1422, +0.1233, -0.1036, +0.0864, -0.2107, +0.1319, +0.2002, +0.3051, -0.2054, +0.3069, +0.2754, +0.1618, -0.0593, -0.0373, +0.2155, -0.1157, -0.1199, +0.2342],\n[ -0.1789, -0.1216, +0.0442, -0.1111, +0.1411, -0.0572, -0.4238, +0.0134, -0.1511, +0.0625, -0.0139, -0.2257, -0.1143, -0.2315, +0.3597, -0.1227, +0.0240, +0.2061, -0.0474, +0.0561, -0.2806, -0.0939, -0.0608, +0.1852, -0.0210, -0.3526, +0.0992, -0.3513, -0.0787, +0.1074, -0.0475, -0.1759],\n[ -0.0510, +0.0215, +0.1585, -0.0757, +0.0357, -0.0553, -0.1151, -0.1353, +0.1000, -0.2570, -0.0664, -0.1762, +0.0430, -0.0365, +0.0198, +0.1154, -0.5763, +0.0393, -0.0443, +0.0504, +0.0482, +0.1528, +0.1955, -0.0493, +0.2712, -0.0688, -0.1406, +0.1479, +0.0204, +0.0838, -0.2282, +0.2307],\n[ -0.1682, -0.0467, -0.0758, +0.3832, -0.1471, +0.0612, +0.3901, +0.1065, +0.2009, -0.3104, -0.2998, -0.3175, -0.0722, +0.1549, -0.2472, -0.1729, +0.0841, -0.1691, +0.1407, -0.1969, -0.0491, +0.0103, +0.1179, -0.1327, -0.1275, +0.0368, +0.0953, -0.1660, -0.0245, -0.3851, +0.1340, -0.1417],\n[ +0.0114, -0.0822, -0.2575, -0.0169, +0.1292, +0.0791, -0.0803, +0.0061, -0.0445, -0.2228, +0.0215, +0.1863, +0.2645, -0.0295, +0.0756, -0.2138, -0.1607, +0.0527, +0.0592, -0.1770, -0.0982, -0.1096, +0.0925, -0.0325, +0.0047, +0.1512, +0.0663, -0.1348, +0.0084, -0.1352, +0.0189, +0.1428],\n[ +0.0052, +0.1124, +0.1083, +0.1163, +0.0787, +0.0839, +0.0839, +0.0506, +0.0537, +0.1066, +0.1034, -0.1299, -0.1434, +0.0188, +0.1823, +0.1403, -0.4525, +0.0949, -0.0981, +0.0722, -0.1085, -0.2382, +0.1028, +0.0664, +0.1976, +0.1073, -0.2736, +0.2433, -0.3520, -0.0386, -0.2319, -0.0724],\n[ -0.3279, -0.1491, -0.1409, +0.2056, -0.1464, +0.0543, +0.1842, +0.1104, -0.2819, +0.0769, -0.1159, +0.0228, -0.0988, +0.0026, -0.1204, -0.0780, -0.2018, +0.1755, +0.1574, +0.0222, +0.1662, -0.2193, -0.0718, +0.0010, -0.0123, -0.0120, +0.2587, +0.0358, -0.1435, +0.0017, -0.2620, +0.0965],\n[ -0.1144, -0.1048, +0.2211, -0.0726, -0.1721, -0.2475, -0.3226, +0.0120, +0.0908, +0.0375, -0.0974, +0.0490, -0.1180, -0.3155, -0.2565, -0.0092, -0.4400, +0.2027, -0.1459, +0.1043, +0.0771, +0.0825, -0.1541, -0.0713, -0.0437, -0.0249, -0.1757, -0.1115, +0.0457, +0.1141, -0.2567, +0.0405],\n[ +0.0587, +0.1083, +0.0729, +0.2131, -0.1586, +0.2208, -0.1576, -0.0811, -0.0467, +0.2201, -0.1082, -0.2077, +0.0030, -0.1222, +0.2023, +0.1155, -0.1616, +0.0105, +0.1167, -0.1257, +0.4859, +0.1337, -0.0169, -0.0163, +0.2076, +0.0367, -0.0050, -0.2590, -0.0800, -0.2192, +0.0938, +0.1126],\n[ -0.3834, -0.0180, -0.2714, +0.0303, +0.0784, -0.1242, +0.1105, +0.0237, -0.0085, +0.2615, +0.0189, -0.3734, +0.0088, +0.1211, -0.0838, +0.0067, +0.1956, +0.1577, +0.2132, -0.0044, -0.2748, +0.1417, +0.0201, +0.1002, +0.0311, -0.0052, -0.1695, -0.0750, +0.2200, -0.2848, +0.0438, -0.0442],\n[ -0.1496, +0.1258, +0.1903, -0.0337, -0.1470, -0.0530, +0.0519, -0.0037, +0.0342, +0.0404, -0.0950, -0.0840, +0.1083, -0.0488, +0.0427, +0.1454, +0.0851, -0.0203, -0.2354, +0.1562, +0.1899, +0.3145, +0.0013, +0.1608, +0.0126, +0.2080, -0.1409, -0.0746, +0.0580, -0.1045, -0.1753, +0.1225],\n[ -0.0349, +0.1354, -0.1052, -0.1189, +0.0288, -0.0257, +0.0813, -0.1559, +0.1267, +0.0664, +0.2004, +0.1232, +0.2557, -0.1729, -0.0666, +0.1644, +0.1043, -0.2672, +0.0537, +0.0566, -0.1738, +0.0036, +0.1406, -0.0574, -0.0556, +0.3336, -0.0328, -0.1624, +0.0132, -0.0627, -0.1523, +0.0552],\n[ -0.3105, +0.2681, -0.5462, +0.2785, -0.2453, -0.2965, +0.1436, +0.0786, -0.3242, -0.3518, +0.1025, +0.2219, -0.1324, +0.1681, +0.0701, -0.0938, +0.1574, -0.5157, +0.3574, -0.1100, +0.2647, -0.1698, +0.2684, -0.3876, -0.6240, -0.1013, +0.2920, -0.3569, -0.0008, +0.0974, +0.1444, -0.3349],\n[ +0.0848, -0.1191, +0.2283, +0.0922, +0.2880, -0.1747, -0.4457, +0.1013, +0.2494, +0.1487, +0.1013, -0.0403, -0.0236, -0.1965, -0.0655, +0.0818, +0.0493, -0.0605, -0.1889, +0.1772, -0.2826, +0.2783, -0.1653, +0.3505, +0.4192, -0.1048, -0.1459, +0.0779, -0.0154, -0.1573, -0.1254, -0.1118],\n[ -0.1817, +0.0719, +0.1352, +0.3208, +0.2142, -0.1149, +0.0020, +0.1617, +0.1055, +0.0395, -0.1802, -0.0631, -0.3172, +0.1971, +0.0197, +0.1271, -0.2375, -0.1849, -0.0134, +0.1223, +0.2566, +0.0311, -0.2746, +0.0278, +0.1233, +0.0167, -0.0363, +0.2146, -0.0466, +0.0732, -0.1490, +0.1040],\n[ +0.1008, -0.1501, +0.0264, -0.4661, -0.0553, +0.0431, -0.3076, -0.0461, +0.1393, -0.1225, +0.2811, -0.0363, -0.0403, -0.3370, -0.0865, -0.1179, +0.1106, +0.2035, -0.2432, -0.0859, +0.0600, -0.0890, -0.0749, +0.0483, +0.0615, -0.0239, -0.4674, +0.0199, +0.0669, +0.1410, +0.1846, +0.2626],\n[ +0.0663, +0.1486, -0.3928, +0.3257, -0.0316, +0.1377, +0.0418, +0.1921, -0.1616, -0.2265, -0.0917, +0.1582, -0.0537, +0.0295, -0.2264, -0.1921, -0.0225, +0.0928, +0.0747, -0.5268, -0.0068, -0.3328, +0.0437, -0.2361, -0.1408, -0.1234, +0.2216, -0.1372, -0.0499, +0.1940, +0.0098, -0.2953],\n[ +0.0290, -0.1583, -0.0172, -0.1748, -0.0042, -0.0725, -0.2227, -0.1366, -0.1771, +0.1987, +0.3142, +0.1889, +0.0195, -0.5461, +0.0921, +0.1407, -0.1656, +0.1985, +0.0113, +0.2613, +0.2925, +0.1166, -0.1286, +0.1031, -0.2228, -0.0605, -0.2151, +0.2477, +0.1602, -0.0109, +0.0207, +0.1257],\n[ +0.0630, -0.1688, +0.1662, -0.2327, +0.2832, +0.1350, -0.1658, +0.0504, -0.0502, +0.1736, +0.1002, -0.0051, -0.0311, -0.0628, +0.0039, +0.5085, -0.2191, +0.5105, -0.0927, +0.2833, -0.2828, +0.1078, +0.0406, -0.0392, -0.2372, +0.1508, +0.0556, +0.0313, +0.1296, +0.1315, -0.1143, +0.1632]\n])\n\nweights_dense2_b = np.array([ -0.0655, +0.0020, +0.0358, -0.0192, +0.0570, +0.0000, +0.0711, -0.0145, +0.0294, +0.0139, -0.0215, -0.0952, +0.0000, +0.0526, -0.0585, +0.0633, -0.0332, +0.0030, +0.0107, +0.0830, +0.0140, +0.0888, -0.1115, -0.0722, +0.0240, +0.0476, -0.0807, -0.0421, +0.0000, -0.0557, -0.0403, +0.0034])\n\nweights_final_w = np.array([\n[ -0.0230],\n[ +0.0730],\n[ -0.2093],\n[ +0.0463],\n[ -0.1983],\n[ -0.0031],\n[ +0.2101],\n[ -0.0066],\n[ -0.1481],\n[ -0.1615],\n[ -0.1766],\n[ +0.1332],\n[ -0.0012],\n[ +0.2332],\n[ -0.0380],\n[ -0.3066],\n[ -0.1738],\n[ -0.2982],\n[ +0.0285],\n[ -0.1548],\n[ +0.2539],\n[ -0.2544],\n[ +0.2006],\n[ -0.4121],\n[ -0.2084],\n[ -0.0381],\n[ +0.2733],\n[ -0.3076],\n[ +0.0013],\n[ +0.0957],\n[ -0.1298],\n[ -0.1112]\n])\n\nweights_final_b = np.array([ +0.0352])\n\nif __name__==\"__main__\":\n main()\n"
]
| [
[
"numpy.array",
"numpy.dot",
"numpy.maximum"
]
]
|
LakeAndCat/CluOReg | [
"ba50cb056061b3833050d32e532e08152bdc8de2"
]
| [
"models/densenet_cluster.py"
]
| [
"# -*- encoding: utf-8 -*-\n'''\n@File : densenet2.py \n@Contact : [email protected]\n\n@Modify Time @Author @Version @Desciption\n------------ ----------- -------- -----------\n2020/10/7 20:15 guzhouweihu 1.0 None\n'''\n\n\n\"\"\"dense net in pytorch\n[1] Gao Huang, Zhuang Liu, Laurens van der Maaten, Kilian Q. Weinberger.\n Densely Connected Convolutional Networks\n https://arxiv.org/abs/1608.06993v5\n\"\"\"\n\n\n# \"\"\"Bottleneck layers. Although each layer only produces k\n# output feature-maps, it typically has many more inputs. It\n# has been noted in [37, 11] that a 1×1 convolution can be in-\n# troduced as bottleneck layer before each 3×3 convolution\n# to reduce the number of input feature-maps, and thus to\n# improve computational efficiency.\"\"\"\n\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn.parameter import Parameter\n\nclass Bottleneck(nn.Module):\n def __init__(self, in_channels, growth_rate):\n super().__init__()\n # \"\"\"In our experiments, we let each 1×1 convolution\n # produce 4k feature-maps.\"\"\"\n inner_channel = 4 * growth_rate\n\n # \"\"\"We find this design especially effective for DenseNet and\n # we refer to our network with such a bottleneck layer, i.e.,\n # to the BN-ReLU-Conv(1×1)-BN-ReLU-Conv(3×3) version of H ` ,\n # as DenseNet-B.\"\"\"\n self.bottle_neck = nn.Sequential(\n nn.BatchNorm2d(in_channels),\n nn.ReLU(inplace=True),\n nn.Conv2d(in_channels, inner_channel, kernel_size=1, bias=False),\n nn.BatchNorm2d(inner_channel),\n nn.ReLU(inplace=True),\n nn.Conv2d(\n inner_channel,\n growth_rate,\n kernel_size=3,\n padding=1,\n bias=False)\n )\n\n def forward(self, x):\n return torch.cat([x, self.bottle_neck(x)], 1)\n\n# \"\"\"We refer to layers between blocks as transition\n# layers, which do convolution and pooling.\"\"\"\n\n\nclass Transition(nn.Module):\n def __init__(self, in_channels, out_channels):\n super().__init__()\n # \"\"\"The transition layers used in our experiments\n # consist of a batch normalization layer and an 1×1\n # convolutional layer followed by a 2×2 average pooling\n # layer\"\"\".\n self.down_sample = nn.Sequential(\n nn.BatchNorm2d(in_channels),\n nn.Conv2d(in_channels, out_channels, 1, bias=False),\n nn.AvgPool2d(2, stride=2)\n )\n\n def forward(self, x):\n return self.down_sample(x)\n\n\nclass ClusterLayer(nn.Module):\n\n def __init__(self, n_cluster, expansion, cluster_m):\n super(ClusterLayer, self).__init__()\n self.center = Parameter(torch.Tensor(n_cluster, expansion))\n self.m = cluster_m\n\n def forward(self, x):\n mu = 1.0 / (torch.sum(torch.abs(x.unsqueeze(1) - self.center) ** (2.0 / (self.m - 1.0)), dim=2))\n mu = mu / torch.sum(mu, dim=1, keepdim=True)\n return mu\n \n\n# DesneNet-BC\n# B stands for bottleneck layer(BN-RELU-CONV(1x1)-BN-RELU-CONV(3x3))\n# C stands for compression factor(0<=theta<=1)\n\n\nclass DenseNet(nn.Module):\n def __init__(self, block, nblocks, n_cluster, cluster_m, growth_rate=12, num_classes=100,\n reduction=0.5):\n super(DenseNet, self).__init__()\n self.growth_rate = growth_rate\n\n # \"\"\"Before entering the first dense block, a convolution\n # with 16 (or twice the growth rate for DenseNet-BC)\n # output channels is performed on the input images.\"\"\"\n inner_channels = 2 * growth_rate\n\n # For convolutional layers with kernel size 3×3, each\n # side of the inputs is zero-padded by one pixel to keep\n # the feature-map size fixed.\n self.conv1 = nn.Conv2d(\n 3,\n inner_channels,\n kernel_size=3,\n padding=1,\n bias=False)\n\n self.features = nn.Sequential()\n\n for index in range(len(nblocks) - 1):\n self.features.add_module(\n \"dense_block_layer_{}\".format(index),\n self._make_dense_layers(\n block,\n inner_channels,\n nblocks[index]))\n inner_channels += growth_rate * nblocks[index]\n\n # \"\"\"If a dense block contains m feature-maps, we let the\n # following transition layer generate θm output feature-\n # maps, where 0 < θ ≤ 1 is referred to as the compression\n # fac-tor.\n # int() will automatic floor the value\n out_channels = int(reduction * inner_channels)\n self.features.add_module(\n \"transition_layer_{}\".format(index), Transition(\n inner_channels, out_channels))\n inner_channels = out_channels\n\n self.features.add_module(\"dense_block{}\".format(len(\n nblocks) - 1), self._make_dense_layers(block, inner_channels, nblocks[len(nblocks) - 1]))\n inner_channels += growth_rate * nblocks[len(nblocks) - 1]\n self.features.add_module('bn', nn.BatchNorm2d(inner_channels))\n self.features.add_module('relu', nn.ReLU(inplace=True))\n\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n\n self.linear = nn.Linear(inner_channels, num_classes)\n\n \n self.cluster_layer = ClusterLayer(n_cluster, inner_channels, cluster_m)\n self.cTok_fc = nn.Linear(n_cluster, num_classes, bias=False)\n\n self.cTok_bn = nn.BatchNorm1d(num_classes)\n self.cTok_relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n output = self.conv1(x)\n output = self.features(output)\n output = self.avgpool(output)\n output = output.view(output.size()[0], -1)\n # output = self.linear(output)\n\n mu = self.cluster_layer(output)\n\n cTok_output = self.cTok_fc(mu)\n cTok_output = self.cTok_bn(cTok_output)\n cTok_output = self.cTok_relu(cTok_output)\n\n return self.linear(output), output, cTok_output\n\n def _make_dense_layers(self, block, in_channels, nblocks):\n dense_block = nn.Sequential()\n for index in range(nblocks):\n dense_block.add_module(\n \"bottle_neck_layer_{}\".format(index), block(\n in_channels, self.growth_rate))\n in_channels += self.growth_rate\n return dense_block\n\n\ndef densenet121(n_cluster, cluster_m, num_classes):\n return DenseNet(Bottleneck, [6, 12, 24, 16],\n growth_rate=32, n_cluster=n_cluster, cluster_m=cluster_m, num_classes=num_classes)\n\n\ndef densenet169(n_cluster, cluster_m, num_classes):\n return DenseNet(Bottleneck, [6, 12, 32, 32],\n growth_rate=32, n_cluster=n_cluster, cluster_m=cluster_m, num_classes=num_classes)\n\n\ndef densenet201(num_classes):\n return DenseNet(Bottleneck, [6, 12, 48, 32],\n growth_rate=32, num_classes=num_classes)\n\n\ndef densenet161(num_classes):\n return DenseNet(Bottleneck, [6, 12, 36, 24],\n growth_rate=48, num_classes=num_classes)\n\n"
]
| [
[
"torch.nn.Linear",
"torch.nn.Sequential",
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.BatchNorm1d",
"torch.nn.AdaptiveAvgPool2d",
"torch.Tensor",
"torch.sum"
]
]
|
lynphoenix/ignite | [
"7b1e83f3fe7c36d12e01a09a01bf7efe7fd136a1"
]
| [
"tests/ignite/handlers/test_checkpoint.py"
]
| [
"import os\nimport warnings\nfrom collections import OrderedDict\nfrom unittest.mock import MagicMock\n\nimport pytest\nimport torch\nimport torch.nn as nn\n\nimport ignite.distributed as idist\nfrom ignite.engine import Engine, Events, State\nfrom ignite.handlers import Checkpoint, DiskSaver, EarlyStopping, ModelCheckpoint, global_step_from_engine\nfrom ignite.handlers.checkpoint import BaseSaveHandler\n\n_PREFIX = \"PREFIX\"\n\n\nclass DummyModel(nn.Module):\n def __init__(self):\n super(DummyModel, self).__init__()\n self.net = nn.Linear(1, 1)\n\n def forward(self, x):\n return self.net(x)\n\n\nclass DummyPretrainedModel(nn.Module):\n def __init__(self):\n super(DummyPretrainedModel, self).__init__()\n self.features = nn.Linear(4, 2, bias=False)\n self.fc = nn.Linear(2, 1)\n\n def forward(self, x):\n x = self.features(x)\n x = self.fc(x)\n return x\n\n\ndef test_checkpoint_wrong_input():\n\n with pytest.raises(TypeError, match=r\"Argument `to_save` should be a dictionary\"):\n Checkpoint(12, lambda x: x, \"prefix\")\n\n with pytest.raises(TypeError, match=r\"Argument `to_save` should be a dictionary\"):\n Checkpoint([12], lambda x: x, \"prefix\")\n\n with pytest.raises(ValueError, match=r\"No objects to checkpoint.\"):\n Checkpoint({}, lambda x: x, \"prefix\")\n\n model = DummyModel()\n to_save = {\"model\": model}\n\n with pytest.raises(TypeError, match=r\"Argument `save_handler` should be callable\"):\n Checkpoint(to_save, 12, \"prefix\")\n\n with pytest.raises(\n ValueError, match=r\"If `score_name` is provided, then `score_function` should be also provided.\"\n ):\n Checkpoint(to_save, lambda x: x, score_name=\"acc\")\n\n with pytest.raises(TypeError, match=r\"global_step_transform should be a function.\"):\n Checkpoint(to_save, lambda x: x, score_function=lambda e: 123, score_name=\"acc\", global_step_transform=123)\n\n with pytest.warns(UserWarning, match=r\"Argument archived is deprecated\"):\n Checkpoint(to_save, lambda x: x, score_function=lambda e: 123, score_name=\"acc\", archived=True)\n\n with pytest.raises(ValueError, match=r\"Cannot have key 'checkpointer' if `include_self` is True\"):\n Checkpoint({\"checkpointer\": model}, lambda x: x, include_self=True)\n\n\ndef test_checkpoint_score_function_wrong_output():\n model = DummyModel()\n to_save = {\"model\": model}\n\n checkpointer = Checkpoint(to_save, lambda x: x, score_function=lambda e: {\"1\": 1}, score_name=\"acc\")\n trainer = Engine(lambda e, b: None)\n trainer.state = State(epoch=0, iteration=0)\n with pytest.raises(ValueError, match=r\"Output of score_function should be a number\"):\n checkpointer(trainer)\n\n\ndef test_checkpoint_default():\n def _test(to_save, obj, name):\n save_handler = MagicMock(spec=BaseSaveHandler)\n\n checkpointer = Checkpoint(to_save, save_handler=save_handler)\n assert checkpointer.last_checkpoint is None\n\n trainer = Engine(lambda e, b: None)\n trainer.state = State(epoch=0, iteration=0)\n\n checkpointer(trainer)\n assert save_handler.call_count == 1\n\n metadata = {\"basename\": name, \"score_name\": None, \"priority\": 0}\n save_handler.assert_called_with(obj, \"{}_0.pt\".format(name), metadata)\n\n trainer.state.epoch = 12\n trainer.state.iteration = 1234\n checkpointer(trainer)\n assert save_handler.call_count == 2\n metadata[\"priority\"] = 1234\n save_handler.assert_called_with(obj, \"{}_1234.pt\".format(name), metadata)\n assert save_handler.remove.call_count == 1\n save_handler.remove.assert_called_with(\"{}_0.pt\".format(name))\n assert checkpointer.last_checkpoint == \"{}_1234.pt\".format(name)\n\n model = DummyModel()\n to_save = {\"model\": model}\n _test(to_save, model.state_dict(), \"model\")\n\n model = DummyModel()\n optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n to_save = {\"model\": model, \"optimizer\": optimizer}\n _test(to_save, {\"model\": model.state_dict(), \"optimizer\": optimizer.state_dict()}, \"checkpoint\")\n\n\ndef test_checkpoint_include_self_state_dict():\n def _test(to_save, obj, name):\n save_handler = MagicMock(spec=BaseSaveHandler)\n\n checkpointer = Checkpoint(to_save, save_handler=save_handler, include_self=True)\n assert checkpointer.last_checkpoint is None\n\n trainer = Engine(lambda e, b: None)\n trainer.state = State(epoch=0, iteration=0)\n\n checkpointer(trainer)\n assert save_handler.call_count == 1\n\n fname = \"{}_0.pt\".format(name)\n obj[\"checkpointer\"] = OrderedDict([(\"saved\", [(0, fname)])])\n\n metadata = {\"basename\": name, \"score_name\": None, \"priority\": 0}\n save_handler.assert_called_with(obj, fname, metadata)\n\n # Swap object, state should be maintained\n checkpointer2 = Checkpoint(to_save, save_handler=save_handler, include_self=True)\n checkpointer2.load_state_dict(checkpointer.state_dict())\n assert checkpointer2.last_checkpoint == fname\n\n trainer.state.epoch = 12\n trainer.state.iteration = 1234\n checkpointer2(trainer)\n assert save_handler.call_count == 2\n metadata[\"priority\"] = 1234\n\n # This delete only happens if state was restored correctly.\n save_handler.remove.assert_called_with(\"{}_0.pt\".format(name))\n\n fname = \"{}_1234.pt\".format(name)\n obj[\"checkpointer\"] = OrderedDict([(\"saved\", [(1234, fname)])])\n\n save_handler.assert_called_with(obj, fname, metadata)\n assert save_handler.remove.call_count == 1\n assert checkpointer2.last_checkpoint == fname\n\n model = DummyModel()\n to_save = {\"model\": model}\n _test(to_save, model.state_dict(), \"model\")\n\n model = DummyModel()\n optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n to_save = {\"model\": model, \"optimizer\": optimizer}\n _test(to_save, {\"model\": model.state_dict(), \"optimizer\": optimizer.state_dict()}, \"checkpoint\")\n\n\ndef test_checkpoint_with_dp():\n\n model = DummyModel()\n dp_model = nn.DataParallel(model)\n to_save = {\"model\": dp_model}\n\n save_handler = MagicMock(spec=BaseSaveHandler)\n checkpointer = Checkpoint(to_save, save_handler=save_handler)\n\n trainer = Engine(lambda e, b: None)\n trainer.state = State(epoch=0, iteration=0)\n\n checkpointer(trainer)\n assert save_handler.call_count == 1\n metadata = {\"basename\": \"model\", \"score_name\": None, \"priority\": 0}\n save_handler.assert_called_with(model.state_dict(), \"model_0.pt\", metadata)\n\n\ndef test_checkpoint_with_global_step_transform():\n def _test(filename_prefix, to_save, obj, name):\n save_handler = MagicMock(spec=BaseSaveHandler)\n\n checkpointer = Checkpoint(\n to_save,\n save_handler=save_handler,\n filename_prefix=filename_prefix,\n global_step_transform=lambda e, _: e.state.epoch,\n )\n\n trainer = Engine(lambda e, b: None)\n trainer.state = State(epoch=2, iteration=1)\n\n checkpointer(trainer)\n assert save_handler.call_count == 1\n\n if len(filename_prefix) > 0:\n filename_prefix += \"_\"\n\n metadata = {\"basename\": \"{}{}\".format(filename_prefix, name), \"score_name\": None, \"priority\": 2}\n save_handler.assert_called_with(obj, \"{}{}_2.pt\".format(filename_prefix, name), metadata)\n\n trainer.state.epoch = 12\n trainer.state.iteration = 1234\n checkpointer(trainer)\n assert save_handler.call_count == 2\n metadata[\"priority\"] = 12\n save_handler.assert_called_with(obj, \"{}{}_12.pt\".format(filename_prefix, name), metadata)\n assert save_handler.remove.call_count == 1\n save_handler.remove.assert_called_with(\"{}{}_2.pt\".format(filename_prefix, name))\n assert checkpointer.last_checkpoint == \"{}{}_12.pt\".format(filename_prefix, name)\n\n for prefix in [\"\", \"dummytask\"]:\n model = DummyModel()\n to_save = {\"model\": model}\n _test(prefix, to_save, model.state_dict(), \"model\")\n\n model = DummyModel()\n optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n to_save = {\"model\": model, \"optimizer\": optimizer}\n _test(prefix, to_save, {\"model\": model.state_dict(), \"optimizer\": optimizer.state_dict()}, \"checkpoint\")\n\n\ndef test_checkpoint_with_score_function():\n def _test(to_save, obj, name):\n save_handler = MagicMock(spec=BaseSaveHandler)\n\n checkpointer = Checkpoint(to_save, save_handler=save_handler, score_function=lambda e: e.state.score)\n\n trainer = Engine(lambda e, b: None)\n trainer.state = State(epoch=1, iteration=1, score=0.77)\n\n checkpointer(trainer)\n assert save_handler.call_count == 1\n\n metadata = {\"basename\": name, \"score_name\": None, \"priority\": 0.77}\n save_handler.assert_called_with(obj, \"{}_0.7700.pt\".format(name), metadata)\n\n trainer.state.epoch = 12\n trainer.state.iteration = 1234\n trainer.state.score = 0.78\n\n checkpointer(trainer)\n assert save_handler.call_count == 2\n metadata[\"priority\"] = 0.78\n save_handler.assert_called_with(obj, \"{}_0.7800.pt\".format(name), metadata)\n assert save_handler.remove.call_count == 1\n save_handler.remove.assert_called_with(\"{}_0.7700.pt\".format(name))\n assert checkpointer.last_checkpoint == \"{}_0.7800.pt\".format(name)\n\n model = DummyModel()\n to_save = {\"model\": model}\n _test(to_save, model.state_dict(), \"model\")\n\n model = DummyModel()\n optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n to_save = {\"model\": model, \"optimizer\": optimizer}\n _test(to_save, {\"model\": model.state_dict(), \"optimizer\": optimizer.state_dict()}, \"checkpoint\")\n\n\ndef test_checkpoint_with_score_name_and_function():\n def _test(to_save, obj, name):\n save_handler = MagicMock(spec=BaseSaveHandler)\n\n checkpointer = Checkpoint(\n to_save, save_handler=save_handler, score_name=\"loss\", score_function=lambda e: e.state.score\n )\n\n trainer = Engine(lambda e, b: None)\n trainer.state = State(epoch=1, iteration=1, score=-0.77)\n\n checkpointer(trainer)\n assert save_handler.call_count == 1\n\n metadata = {\"basename\": name, \"score_name\": \"loss\", \"priority\": -0.77}\n save_handler.assert_called_with(obj, \"{}_loss=-0.7700.pt\".format(name), metadata)\n\n trainer.state.epoch = 12\n trainer.state.iteration = 1234\n trainer.state.score = -0.76\n\n checkpointer(trainer)\n assert save_handler.call_count == 2\n metadata[\"priority\"] = -0.76\n save_handler.assert_called_with(obj, \"{}_loss=-0.7600.pt\".format(name), metadata)\n assert save_handler.remove.call_count == 1\n save_handler.remove.assert_called_with(\"{}_loss=-0.7700.pt\".format(name))\n assert checkpointer.last_checkpoint == \"{}_loss=-0.7600.pt\".format(name)\n\n model = DummyModel()\n to_save = {\"model\": model}\n _test(to_save, model.state_dict(), \"model\")\n\n model = DummyModel()\n optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n to_save = {\"model\": model, \"optimizer\": optimizer}\n _test(to_save, {\"model\": model.state_dict(), \"optimizer\": optimizer.state_dict()}, \"checkpoint\")\n\n\ndef test_checkpoint_with_int_score():\n def _test(to_save, obj, name, score_name=None):\n save_handler = MagicMock(spec=BaseSaveHandler)\n\n checkpointer = Checkpoint(\n to_save, save_handler=save_handler, score_name=score_name, score_function=lambda e: e.state.epoch\n )\n\n if score_name is None:\n score_name = \"\"\n else:\n score_name += \"=\"\n\n trainer = Engine(lambda e, b: None)\n trainer.state = State(epoch=1, iteration=1)\n\n checkpointer(trainer)\n assert save_handler.call_count == 1\n\n metadata = {\"basename\": name, \"score_name\": score_name[:-1] if len(score_name) > 0 else None, \"priority\": 1}\n save_handler.assert_called_with(obj, \"{}_{}1.pt\".format(name, score_name), metadata)\n\n trainer.state.epoch = 12\n trainer.state.iteration = 1234\n\n checkpointer(trainer)\n assert save_handler.call_count == 2\n metadata[\"priority\"] = 12\n save_handler.assert_called_with(obj, \"{}_{}12.pt\".format(name, score_name), metadata)\n assert save_handler.remove.call_count == 1\n save_handler.remove.assert_called_with(\"{}_{}1.pt\".format(name, score_name))\n assert checkpointer.last_checkpoint == \"{}_{}12.pt\".format(name, score_name)\n\n model = DummyModel()\n to_save = {\"model\": model}\n _test(to_save, model.state_dict(), \"model\")\n _test(to_save, model.state_dict(), \"model\", \"epoch\")\n\n model = DummyModel()\n optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n to_save = {\"model\": model, \"optimizer\": optimizer}\n _test(to_save, {\"model\": model.state_dict(), \"optimizer\": optimizer.state_dict()}, \"checkpoint\")\n _test(to_save, {\"model\": model.state_dict(), \"optimizer\": optimizer.state_dict()}, \"checkpoint\", \"epoch\")\n\n\ndef test_checkpoint_with_score_function_and_trainer_epoch():\n def _test(to_save, obj, name):\n save_handler = MagicMock(spec=BaseSaveHandler)\n\n trainer = Engine(lambda e, b: None)\n evaluator = Engine(lambda e, b: None)\n trainer.state = State(epoch=11, iteration=1)\n\n checkpointer = Checkpoint(\n to_save,\n save_handler=save_handler,\n global_step_transform=lambda _1, _2: trainer.state.epoch,\n score_function=lambda e: e.state.metrics[\"val_acc\"],\n )\n\n evaluator.state = State(epoch=1, iteration=1000, metrics={\"val_acc\": 0.77})\n checkpointer(evaluator)\n assert save_handler.call_count == 1\n\n metadata = {\"basename\": name, \"score_name\": None, \"priority\": 0.77}\n save_handler.assert_called_with(obj, \"{}_11_0.7700.pt\".format(name), metadata)\n\n trainer.state.epoch = 12\n evaluator.state.metrics[\"val_acc\"] = 0.78\n\n checkpointer(evaluator)\n assert save_handler.call_count == 2\n metadata[\"priority\"] = 0.78\n save_handler.assert_called_with(obj, \"{}_12_0.7800.pt\".format(name), metadata)\n assert save_handler.remove.call_count == 1\n save_handler.remove.assert_called_with(\"{}_11_0.7700.pt\".format(name))\n assert checkpointer.last_checkpoint == \"{}_12_0.7800.pt\".format(name)\n\n model = DummyModel()\n to_save = {\"model\": model}\n _test(to_save, model.state_dict(), \"model\")\n\n\ndef test_checkpoint_with_score_name_and_function_and_trainer_epoch():\n def _test(to_save, obj, name):\n save_handler = MagicMock(spec=BaseSaveHandler)\n\n trainer = Engine(lambda e, b: None)\n evaluator = Engine(lambda e, b: None)\n trainer.state = State(epoch=11, iteration=1)\n\n checkpointer = Checkpoint(\n to_save,\n save_handler=save_handler,\n global_step_transform=lambda _1, _2: trainer.state.epoch,\n score_name=\"val_acc\",\n score_function=lambda e: e.state.metrics[\"val_acc\"],\n )\n\n evaluator.state = State(epoch=1, iteration=1000, metrics={\"val_acc\": 0.77})\n\n checkpointer(evaluator)\n assert save_handler.call_count == 1\n\n metadata = {\"basename\": name, \"score_name\": \"val_acc\", \"priority\": 0.77}\n save_handler.assert_called_with(obj, \"{}_11_val_acc=0.7700.pt\".format(name), metadata)\n\n trainer.state.epoch = 12\n evaluator.state.metrics[\"val_acc\"] = 0.78\n\n checkpointer(evaluator)\n assert save_handler.call_count == 2\n metadata[\"priority\"] = 0.78\n save_handler.assert_called_with(obj, \"{}_12_val_acc=0.7800.pt\".format(name), metadata)\n assert save_handler.remove.call_count == 1\n save_handler.remove.assert_called_with(\"{}_11_val_acc=0.7700.pt\".format(name))\n assert checkpointer.last_checkpoint == \"{}_12_val_acc=0.7800.pt\".format(name)\n\n model = DummyModel()\n to_save = {\"model\": model}\n _test(to_save, model.state_dict(), \"model\")\n\n\ndef test_checkpoint_last_checkpoint():\n save_handler = MagicMock(spec=BaseSaveHandler)\n to_save = {\"model\": DummyModel()}\n\n checkpointer = Checkpoint(to_save, save_handler=save_handler, n_saved=None)\n\n trainer = Engine(lambda e, b: None)\n\n for i in range(10):\n trainer.state = State(epoch=1, iteration=i)\n checkpointer(trainer)\n\n assert save_handler.call_count == 10\n assert checkpointer.last_checkpoint == \"{}_9.pt\".format(\"model\")\n\n\ndef test_checkpoint_last_checkpoint_on_score():\n save_handler = MagicMock(spec=BaseSaveHandler)\n to_save = {\"model\": DummyModel()}\n\n checkpointer = Checkpoint(\n to_save,\n save_handler=save_handler,\n n_saved=None,\n score_name=\"val_acc\",\n score_function=lambda e: e.state.metrics[\"val_acc\"],\n )\n\n trainer = Engine(lambda e, b: None)\n\n val_acc = 0.0\n for i in range(10):\n val_acc = i * 0.1\n trainer.state = State(epoch=1, iteration=i, metrics={\"val_acc\": val_acc})\n checkpointer(trainer)\n\n assert save_handler.call_count == 10\n assert checkpointer.last_checkpoint == \"{}_val_acc=0.9000.pt\".format(\"model\")\n\n\ndef test_checkpoint_save_handler_callable():\n def save_handler(c, f):\n assert f == \"model_12.pt\"\n\n to_save = {\"model\": DummyModel()}\n\n checkpointer = Checkpoint(to_save, save_handler=save_handler,)\n\n trainer = Engine(lambda e, b: None)\n\n trainer.state = State(epoch=1, iteration=12)\n checkpointer(trainer)\n\n\ndef test_model_checkpoint_args_validation(dirname):\n existing = os.path.join(dirname, \"existing_dir\")\n nonempty = os.path.join(dirname, \"nonempty\")\n\n os.makedirs(existing)\n os.makedirs(nonempty)\n\n with open(os.path.join(nonempty, \"{}_name_0.pt\".format(_PREFIX)), \"w\"):\n pass\n\n with pytest.raises(ValueError, match=r\"with extension '.pt' are already present \"):\n ModelCheckpoint(nonempty, _PREFIX)\n\n with pytest.raises(ValueError, match=r\"Argument save_interval is deprecated and should be None\"):\n ModelCheckpoint(existing, _PREFIX, save_interval=42)\n\n with pytest.raises(ValueError, match=r\"Directory path '\\S+' is not found\"):\n ModelCheckpoint(os.path.join(dirname, \"non_existing_dir\"), _PREFIX, create_dir=False)\n\n with pytest.raises(ValueError, match=r\"Argument save_as_state_dict is deprecated and should be True\"):\n ModelCheckpoint(existing, _PREFIX, create_dir=False, save_as_state_dict=False)\n\n with pytest.raises(ValueError, match=r\"If `score_name` is provided, then `score_function` \"):\n ModelCheckpoint(existing, _PREFIX, create_dir=False, score_name=\"test\")\n\n with pytest.raises(TypeError, match=r\"global_step_transform should be a function\"):\n ModelCheckpoint(existing, _PREFIX, create_dir=False, global_step_transform=1234)\n\n with pytest.warns(UserWarning, match=r\"Argument archived is deprecated\"):\n ModelCheckpoint(existing, _PREFIX, create_dir=False, archived=True)\n\n h = ModelCheckpoint(dirname, _PREFIX, create_dir=False)\n assert h.last_checkpoint is None\n with pytest.raises(RuntimeError, match=r\"No objects to checkpoint found.\"):\n h(None, [])\n\n\ndef test_model_checkpoint_simple_recovery(dirname):\n h = ModelCheckpoint(dirname, _PREFIX, create_dir=False)\n engine = Engine(lambda e, b: None)\n engine.state = State(epoch=0, iteration=1)\n\n model = DummyModel()\n to_save = {\"model\": model}\n h(engine, to_save)\n\n fname = h.last_checkpoint\n assert isinstance(fname, str)\n assert os.path.join(dirname, _PREFIX) in fname\n assert os.path.exists(fname)\n loaded_objects = torch.load(fname)\n assert loaded_objects == model.state_dict()\n\n\ndef test_model_checkpoint_simple_recovery_from_existing_non_empty(dirname):\n def _test(ext, require_empty):\n previous_fname = os.path.join(dirname, \"{}_{}_{}{}\".format(_PREFIX, \"obj\", 1, ext))\n with open(previous_fname, \"w\") as f:\n f.write(\"test\")\n\n h = ModelCheckpoint(dirname, _PREFIX, create_dir=True, require_empty=require_empty)\n engine = Engine(lambda e, b: None)\n engine.state = State(epoch=0, iteration=1)\n\n model = DummyModel()\n to_save = {\"model\": model}\n h(engine, to_save)\n\n fname = h.last_checkpoint\n ext = \".pt\"\n assert isinstance(fname, str)\n assert os.path.join(dirname, \"{}_{}_{}{}\".format(_PREFIX, \"model\", 1, ext)) == fname\n assert os.path.exists(fname)\n assert os.path.exists(previous_fname)\n loaded_objects = torch.load(fname)\n assert loaded_objects == model.state_dict()\n os.remove(fname)\n\n _test(\".txt\", require_empty=True)\n _test(\".pt\", require_empty=False)\n\n\ndef test_disk_saver_atomic(dirname):\n\n model = DummyModel()\n to_save_serializable = {\"model\": model}\n to_save_non_serializable = {\"model\": lambda x: x}\n\n def _test_existance(atomic, _to_save, expected):\n\n saver = DiskSaver(dirname, atomic=atomic, create_dir=False, require_empty=False)\n fname = \"test.pt\"\n try:\n with warnings.catch_warnings():\n # Ignore torch/serialization.py:292: UserWarning: Couldn't retrieve source code for container of type\n # DummyModel. It won't be checked for correctness upon loading.\n warnings.simplefilter(\"ignore\", category=UserWarning)\n saver(_to_save, fname)\n except Exception:\n pass\n fp = os.path.join(saver.dirname, fname)\n assert os.path.exists(fp) == expected\n if expected:\n saver.remove(fname)\n\n _test_existance(atomic=False, _to_save=to_save_serializable, expected=True)\n _test_existance(atomic=False, _to_save=to_save_non_serializable, expected=True)\n\n _test_existance(atomic=True, _to_save=to_save_serializable, expected=True)\n _test_existance(atomic=True, _to_save=to_save_non_serializable, expected=False)\n\n\ndef test_last_k(dirname):\n\n h = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=2)\n engine = Engine(lambda e, b: None)\n engine.state = State(epoch=0, iteration=0)\n\n model = DummyModel()\n to_save = {\"model\": model}\n h(engine, to_save)\n\n for i in range(1, 9):\n engine.state.iteration = i\n h(engine, to_save)\n\n expected = [\"{}_{}_{}.pt\".format(_PREFIX, \"model\", i) for i in [7, 8]]\n\n assert sorted(os.listdir(dirname)) == expected, \"{} vs {}\".format(sorted(os.listdir(dirname)), expected)\n\n\ndef test_disabled_n_saved(dirname):\n\n h = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=None)\n engine = Engine(lambda e, b: None)\n engine.state = State(epoch=0, iteration=0)\n\n model = DummyModel()\n to_save = {\"model\": model}\n\n num_iters = 100\n for i in range(num_iters):\n engine.state.iteration = i\n h(engine, to_save)\n\n saved_files = sorted(os.listdir(dirname))\n assert len(saved_files) == num_iters, \"{}\".format(saved_files)\n\n expected = sorted([\"{}_{}_{}.pt\".format(_PREFIX, \"model\", i) for i in range(num_iters)])\n assert saved_files == expected, \"{} vs {}\".format(saved_files, expected)\n\n\ndef test_best_k(dirname):\n scores = iter([1.2, -2.0, 3.1, -4.0])\n\n def score_function(_):\n return next(scores)\n\n h = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=2, score_function=score_function)\n\n engine = Engine(lambda e, b: None)\n engine.state = State(epoch=0, iteration=0)\n\n model = DummyModel()\n to_save = {\"model\": model}\n for _ in range(4):\n h(engine, to_save)\n\n expected = [\"{}_{}_{:.4f}.pt\".format(_PREFIX, \"model\", i) for i in [1.2, 3.1]]\n\n assert sorted(os.listdir(dirname)) == expected\n\n\ndef test_best_k_with_suffix(dirname):\n scores = [0.3456789, 0.1234, 0.4567, 0.134567]\n scores_iter = iter(scores)\n\n def score_function(engine):\n return next(scores_iter)\n\n h = ModelCheckpoint(\n dirname, _PREFIX, create_dir=False, n_saved=2, score_function=score_function, score_name=\"val_loss\"\n )\n\n engine = Engine(lambda e, b: None)\n engine.state = State(epoch=0, iteration=0)\n\n model = DummyModel()\n to_save = {\"model\": model}\n for _ in range(4):\n engine.state.epoch += 1\n h(engine, to_save)\n\n expected = [\"{}_{}_val_loss={:.4}.pt\".format(_PREFIX, \"model\", scores[e - 1]) for e in [1, 3]]\n\n assert sorted(os.listdir(dirname)) == expected\n\n\ndef test_removes_each_score_at_most_once(dirname):\n scores = [0, 1, 1, 2, 3]\n scores_iter = iter(scores)\n\n def score_function(_):\n return next(scores_iter)\n\n h = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=2, score_function=score_function)\n\n engine = Engine(lambda e, b: None)\n engine.state = State(epoch=0, iteration=0)\n\n model = DummyModel()\n to_save = {\"model\": model}\n for _ in range(len(scores)):\n h(engine, to_save)\n\n # If a score was removed multiple times, the code above would have raise a\n # FileNotFoundError. So this just tests the absence of such a failure\n # without futher assertions.\n\n\ndef test_with_engine(dirname):\n def update_fn(_1, _2):\n pass\n\n name = \"model\"\n engine = Engine(update_fn)\n handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=2)\n\n model = DummyModel()\n to_save = {\"model\": model}\n engine.add_event_handler(Events.EPOCH_COMPLETED, handler, to_save)\n engine.run([0], max_epochs=4)\n\n expected = [\"{}_{}_{}.pt\".format(_PREFIX, name, i) for i in [3, 4]]\n\n assert sorted(os.listdir(dirname)) == expected\n\n\ndef test_with_state_dict(dirname):\n def update_fn(_1, _2):\n pass\n\n engine = Engine(update_fn)\n handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)\n\n model = DummyModel()\n to_save = {\"model\": model}\n engine.add_event_handler(Events.EPOCH_COMPLETED, handler, to_save)\n engine.run([0], max_epochs=4)\n\n saved_model = os.path.join(dirname, os.listdir(dirname)[0])\n load_model = torch.load(saved_model)\n\n assert not isinstance(load_model, DummyModel)\n assert isinstance(load_model, dict)\n\n model_state_dict = model.state_dict()\n loaded_model_state_dict = load_model\n for key in model_state_dict.keys():\n assert key in loaded_model_state_dict\n\n model_value = model_state_dict[key]\n loaded_model_value = loaded_model_state_dict[key]\n\n assert model_value.numpy() == loaded_model_value.numpy()\n\n\ndef test_valid_state_dict_save(dirname):\n model = DummyModel()\n h = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)\n\n engine = Engine(lambda e, b: None)\n engine.state = State(epoch=0, iteration=0)\n\n to_save = {\"name\": 42}\n with pytest.raises(TypeError, match=r\"should have `state_dict` method\"):\n h(engine, to_save)\n to_save = {\"name\": model}\n try:\n h(engine, to_save)\n except ValueError:\n pytest.fail(\"Unexpected ValueError\")\n\n\ndef _test_save_model_optimizer_lr_scheduler_with_state_dict(device, dirname, on_zero_rank=False):\n\n torch.manual_seed(23)\n\n model = DummyModel().to(device)\n\n optim = torch.optim.SGD(model.parameters(), lr=0.1)\n lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optim, gamma=0.5)\n\n def update_fn(engine, batch):\n x = torch.rand((4, 1)).to(device)\n optim.zero_grad()\n y = model(x)\n loss = y.pow(2.0).sum()\n loss.backward()\n if idist.has_xla_support:\n import torch_xla.core.xla_model as xm\n\n xm.optimizer_step(optim, barrier=True)\n else:\n optim.step()\n lr_scheduler.step()\n\n engine = Engine(update_fn)\n\n if (not on_zero_rank) or (on_zero_rank and idist.get_rank() == 0):\n handler = ModelCheckpoint(dirname, _PREFIX, create_dir=True, n_saved=1)\n\n engine.add_event_handler(\n Events.EPOCH_COMPLETED, handler, {\"model\": model, \"optimizer\": optim, \"lr_scheduler\": lr_scheduler}\n )\n\n engine.run([0], max_epochs=4)\n\n idist.barrier()\n\n saved_objects = sorted(os.listdir(dirname))\n # saved object is ['PREFIX_checkpoint_3.pt', ]\n saved_checkpoint = os.path.join(dirname, saved_objects[0])\n\n if idist.has_xla_support:\n device = \"cpu\"\n\n loaded_obj = torch.load(saved_checkpoint, map_location=device)\n for f in [\"model\", \"optimizer\", \"lr_scheduler\"]:\n assert f in loaded_obj\n loaded_model_state_dict = loaded_obj[\"model\"]\n loaded_optimizer_state_dict = loaded_obj[\"optimizer\"]\n loaded_lr_scheduler_state_dict = loaded_obj[\"lr_scheduler\"]\n\n assert isinstance(loaded_model_state_dict, dict)\n assert isinstance(loaded_optimizer_state_dict, dict)\n assert isinstance(loaded_lr_scheduler_state_dict, dict)\n\n # Specifically move device to CPU first\n model_state_dict = model.cpu().state_dict()\n for key in model_state_dict.keys():\n assert key in loaded_model_state_dict\n model_value = model_state_dict[key]\n loaded_model_value = loaded_model_state_dict[key]\n assert model_value.cpu().numpy() == loaded_model_value.cpu().numpy()\n\n optim_state_dict = optim.state_dict()\n for key in optim_state_dict.keys():\n assert key in loaded_optimizer_state_dict\n optim_value = optim_state_dict[key]\n loaded_optim_value = loaded_optimizer_state_dict[key]\n if idist.get_rank() == 0:\n assert optim_value == loaded_optim_value\n\n lr_scheduler_state_dict = lr_scheduler.state_dict()\n for key in lr_scheduler_state_dict.keys():\n assert key in loaded_lr_scheduler_state_dict\n lr_scheduler_value = lr_scheduler_state_dict[key]\n loaded_lr_scheduler_value = loaded_lr_scheduler_state_dict[key]\n assert lr_scheduler_value == loaded_lr_scheduler_value\n\n\ndef test_save_model_optimizer_lr_scheduler_with_state_dict(dirname):\n _test_save_model_optimizer_lr_scheduler_with_state_dict(\"cpu\", dirname)\n\n\ndef _test_save_model_optimizer_lr_scheduler_with_validation(device, dirname, on_zero_rank=False):\n torch.manual_seed(23)\n\n def _build_objects(acc_list):\n\n model = DummyModel().to(device)\n optim = torch.optim.SGD(model.parameters(), lr=0.1)\n lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optim, gamma=0.5)\n\n def update_fn(engine, batch):\n x = torch.rand((4, 1)).to(device)\n optim.zero_grad()\n y = model(x)\n loss = y.pow(2.0).sum()\n loss.backward()\n if idist.has_xla_support:\n import torch_xla.core.xla_model as xm\n\n xm.optimizer_step(optim, barrier=True)\n else:\n optim.step()\n lr_scheduler.step()\n\n trainer = Engine(update_fn)\n\n evaluator = Engine(lambda e, b: None)\n acc_iter = iter(acc_list)\n\n @evaluator.on(Events.EPOCH_COMPLETED)\n def setup_result():\n evaluator.state.metrics[\"accuracy\"] = next(acc_iter)\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def run_eval():\n evaluator.run([0, 1, 2])\n\n def score_function(engine):\n return engine.state.metrics[\"accuracy\"]\n\n save_handler = DiskSaver(dirname, create_dir=True, require_empty=False)\n early_stop = EarlyStopping(score_function=score_function, patience=2, trainer=trainer)\n evaluator.add_event_handler(Events.COMPLETED, early_stop)\n\n checkpointer = Checkpoint(\n {\n \"trainer\": trainer,\n \"model\": model,\n \"optim\": optim,\n \"lr_scheduler\": lr_scheduler,\n \"early_stop\": early_stop,\n },\n save_handler,\n include_self=True,\n global_step_transform=global_step_from_engine(trainer),\n )\n evaluator.add_event_handler(Events.COMPLETED, checkpointer)\n\n return trainer, evaluator, model, optim, lr_scheduler, early_stop, checkpointer\n\n trainer, evaluator, model, optim, scheduler, early, checkpointer = _build_objects([0.2, 0.3, 0.2])\n trainer.run([0], max_epochs=3)\n\n saved_objects = sorted(os.listdir(dirname))\n saved_checkpoint = os.path.join(dirname, saved_objects[0])\n\n loaded_obj = torch.load(saved_checkpoint, map_location=device)\n for f in [\"trainer\", \"model\", \"optim\", \"lr_scheduler\", \"early_stop\", \"checkpointer\"]:\n assert f in loaded_obj\n\n trainer2, evaluator2, model2, optim2, scheduler2, early2, checkpointer2 = _build_objects([0.1, 0.1, 0.1])\n Checkpoint.load_objects(\n {\n \"trainer\": trainer2,\n \"model\": model2,\n \"optim\": optim2,\n \"lr_scheduler\": scheduler2,\n \"early_stop\": early2,\n \"checkpointer\": checkpointer2,\n },\n loaded_obj,\n )\n assert checkpointer2.last_checkpoint == checkpointer.last_checkpoint\n\n model_state_dict = model.cpu().state_dict()\n loaded_model_state_dict = model2.cpu().state_dict()\n for key in model_state_dict.keys():\n assert key in loaded_model_state_dict\n model_value = model_state_dict[key]\n loaded_model_value = loaded_model_state_dict[key]\n assert model_value.cpu().numpy() == loaded_model_value.cpu().numpy()\n\n optim_state_dict = optim.state_dict()\n loaded_optimizer_state_dict = optim2.state_dict()\n # \"params\" contains tensor IDs, which are different\n del optim_state_dict[\"param_groups\"][0][\"params\"]\n del loaded_optimizer_state_dict[\"param_groups\"][0][\"params\"]\n for key in optim_state_dict.keys():\n assert key in loaded_optimizer_state_dict\n optim_value = optim_state_dict[key]\n loaded_optim_value = loaded_optimizer_state_dict[key]\n if idist.get_rank() == 0:\n assert optim_value == loaded_optim_value\n\n def _check_state_dict(original, loaded):\n original_state_dict = original.state_dict()\n loaded_state_dict = loaded.state_dict()\n for key in original_state_dict.keys():\n assert key in loaded_state_dict\n original_value = original_state_dict[key]\n loaded_value = loaded_state_dict[key]\n assert original_value == loaded_value\n\n _check_state_dict(trainer, trainer2)\n _check_state_dict(scheduler, scheduler2)\n _check_state_dict(early, early2)\n _check_state_dict(checkpointer, checkpointer2)\n\n trainer2.run([0], max_epochs=6)\n\n # early stopping should have triggered\n assert trainer2.state.epoch == 4\n\n # If Checkpoint's state was restored correctly, it should continue to respect n_saved\n # and delete old checkpoints, and have the correct last_checkpoint.\n assert os.listdir(dirname) == [\"checkpoint_4.pt\"]\n assert checkpointer2.last_checkpoint == \"checkpoint_4.pt\"\n\n\ndef test_save_model_optimizer_lr_scheduler_with_validation(dirname):\n _test_save_model_optimizer_lr_scheduler_with_validation(\"cpu\", dirname)\n\n\ndef test_checkpoint_load_objects():\n\n with pytest.raises(TypeError, match=r\"Argument checkpoint should be a dictionary\"):\n Checkpoint.load_objects({}, [])\n\n with pytest.raises(TypeError, match=r\"should have `load_state_dict` method\"):\n Checkpoint.load_objects({\"a\": None}, {\"a\": None})\n\n model = DummyModel()\n to_load = {\"model\": model, \"another_model\": model}\n\n with pytest.raises(ValueError, match=r\"from `to_load` is not found in the checkpoint\"):\n Checkpoint.load_objects(to_load, {})\n\n model = DummyModel()\n to_load = {\"model\": model}\n model2 = DummyModel()\n\n chkpt = {\"model\": model2.state_dict()}\n Checkpoint.load_objects(to_load, chkpt)\n assert model.state_dict() == model2.state_dict()\n\n\ndef test_checkpoint_load_objects_from_saved_file(dirname):\n def _get_single_obj_to_save():\n model = DummyModel()\n to_save = {\"model\": model}\n return to_save\n\n def _get_multiple_objs_to_save():\n model = DummyModel()\n optim = torch.optim.SGD(model.parameters(), lr=0.001)\n lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optim, gamma=0.5)\n to_save = {\"model\": model, \"optimizer\": optim, \"lr_scheduler\": lr_scheduler}\n return to_save\n\n trainer = Engine(lambda e, b: None)\n trainer.state = State(epoch=0, iteration=0)\n\n # case: multiple objects\n handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)\n to_save = _get_multiple_objs_to_save()\n handler(trainer, to_save)\n fname = handler.last_checkpoint\n assert isinstance(fname, str)\n assert os.path.join(dirname, _PREFIX) in fname\n assert os.path.exists(fname)\n loaded_objects = torch.load(fname)\n Checkpoint.load_objects(to_save, loaded_objects)\n os.remove(fname)\n\n # case: saved multiple objects, loaded single object\n handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)\n to_save = _get_multiple_objs_to_save()\n handler(trainer, to_save)\n fname = handler.last_checkpoint\n assert isinstance(fname, str)\n assert os.path.join(dirname, _PREFIX) in fname\n assert os.path.exists(fname)\n loaded_objects = torch.load(fname)\n to_load = {\"model\": to_save[\"model\"]}\n Checkpoint.load_objects(to_load, loaded_objects)\n os.remove(fname)\n\n # case: single object\n handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)\n to_save = _get_single_obj_to_save()\n handler(trainer, to_save)\n fname = handler.last_checkpoint\n assert isinstance(fname, str)\n assert os.path.join(dirname, _PREFIX) in fname\n assert os.path.exists(fname)\n loaded_objects = torch.load(fname)\n Checkpoint.load_objects(to_save, loaded_objects)\n\n\ndef test_load_checkpoint_with_different_num_classes(dirname):\n model = DummyPretrainedModel()\n to_save_single_object = {\"model\": model}\n\n trainer = Engine(lambda e, b: None)\n trainer.state = State(epoch=0, iteration=0)\n\n handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)\n handler(trainer, to_save_single_object)\n\n fname = handler.last_checkpoint\n loaded_checkpoint = torch.load(fname)\n\n to_load_single_object = {\"pretrained_features\": model.features}\n\n with pytest.raises(RuntimeError):\n Checkpoint.load_objects(to_load_single_object, loaded_checkpoint)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=UserWarning)\n Checkpoint.load_objects(to_load_single_object, loaded_checkpoint, strict=False, blah=\"blah\")\n\n loaded_weights = to_load_single_object[\"pretrained_features\"].state_dict()[\"weight\"]\n\n assert torch.all(model.state_dict()[\"features.weight\"].eq(loaded_weights))\n\n\ndef test_disksaver_wrong_input(dirname):\n\n with pytest.raises(ValueError, match=r\"Directory path '\\S+' is not found\"):\n DiskSaver(\"/tmp/non-existing-folder\", create_dir=False)\n\n def _test(ext):\n previous_fname = os.path.join(dirname, \"{}_{}_{}{}\".format(_PREFIX, \"obj\", 1, ext))\n with open(previous_fname, \"w\") as f:\n f.write(\"test\")\n\n with pytest.raises(ValueError, match=r\"with extension '.pt' are already present\"):\n DiskSaver(dirname, require_empty=True)\n\n _test(\".pt\")\n\n\ndef _test_checkpoint_with_ddp(device):\n torch.manual_seed(0)\n\n model = DummyModel().to(device)\n device_ids = (\n None if \"cpu\" in device.type else [device,]\n )\n ddp_model = nn.parallel.DistributedDataParallel(model, device_ids=device_ids)\n to_save = {\"model\": ddp_model}\n\n save_handler = MagicMock(spec=BaseSaveHandler)\n checkpointer = Checkpoint(to_save, save_handler=save_handler)\n\n trainer = Engine(lambda e, b: None)\n trainer.state = State(epoch=0, iteration=0)\n\n checkpointer(trainer)\n assert save_handler.call_count == 1\n metadata = {\"basename\": \"model\", \"score_name\": None, \"priority\": 0}\n save_handler.assert_called_with(model.state_dict(), \"model_0.pt\", metadata)\n\n\ndef _test_checkpoint_load_objects_ddp(device):\n model = DummyModel().to(device)\n device_ids = (\n None if \"cpu\" in device.type else [device,]\n )\n ddp_model = nn.parallel.DistributedDataParallel(model, device_ids=device_ids)\n opt = torch.optim.SGD(ddp_model.parameters(), lr=0.01)\n\n # single object:\n to_load = {\"model\": ddp_model}\n checkpoint = ddp_model.module.state_dict()\n Checkpoint.load_objects(to_load, checkpoint)\n\n # multiple objects:\n to_load = {\"model\": ddp_model, \"opt\": opt}\n checkpoint = {\"model\": ddp_model.module.state_dict(), \"opt\": opt.state_dict()}\n Checkpoint.load_objects(to_load, checkpoint)\n\n\[email protected]\[email protected](not idist.has_native_dist_support, reason=\"Skip if no native dist support\")\ndef test_distrib_cpu(distributed_context_single_node_gloo, get_rank_zero_dirname):\n device = torch.device(\"cpu\")\n dirname = get_rank_zero_dirname()\n _test_save_model_optimizer_lr_scheduler_with_state_dict(device, os.path.join(dirname, \"1\"))\n _test_save_model_optimizer_lr_scheduler_with_state_dict(device, os.path.join(dirname, \"2\"), on_zero_rank=True)\n _test_checkpoint_with_ddp(device)\n _test_checkpoint_load_objects_ddp(device)\n\n\[email protected]\[email protected](not idist.has_native_dist_support, reason=\"Skip if no native dist support\")\[email protected](torch.cuda.device_count() < 1, reason=\"Skip if no GPU\")\ndef test_distrib_gpu(distributed_context_single_node_nccl, get_rank_zero_dirname):\n device = idist.device()\n dirname = get_rank_zero_dirname()\n _test_save_model_optimizer_lr_scheduler_with_state_dict(device, os.path.join(dirname, \"1\"))\n _test_save_model_optimizer_lr_scheduler_with_state_dict(\"cpu\", os.path.join(dirname, \"2\"), on_zero_rank=True)\n _test_checkpoint_with_ddp(device=device)\n _test_checkpoint_load_objects_ddp(device=device)\n\n\ndef _test_tpu_saves_to_cpu(device, dirname):\n torch.manual_seed(0)\n\n h = ModelCheckpoint(dirname, _PREFIX)\n engine = Engine(lambda e, b: None)\n engine.state = State(epoch=0, iteration=1)\n\n model = DummyModel().to(device)\n to_save = {\"model\": model}\n\n h(engine, to_save)\n\n idist.barrier()\n\n fname = h.last_checkpoint\n assert isinstance(fname, str)\n assert os.path.join(dirname, _PREFIX) in fname\n assert os.path.exists(fname)\n loaded_objects = torch.load(fname)\n assert loaded_objects == model.cpu().state_dict()\n\n\[email protected]\[email protected](\"NUM_TPU_WORKERS\" in os.environ, reason=\"Skip if NUM_TPU_WORKERS is in env vars\")\[email protected](not idist.has_xla_support, reason=\"Not on TPU device\")\ndef test_distrib_single_device_xla(dirname):\n assert \"xla\" in idist.device().type\n _test_tpu_saves_to_cpu(idist.device(), os.path.join(dirname, \"1\"))\n _test_save_model_optimizer_lr_scheduler_with_state_dict(idist.device(), os.path.join(dirname, \"2\"))\n\n\ndef _test_tpu_saves_to_cpu_nprocs(index, dirname):\n device = idist.device()\n _test_tpu_saves_to_cpu(device, os.path.join(dirname, \"1\"))\n _test_save_model_optimizer_lr_scheduler_with_state_dict(device, os.path.join(dirname, \"2\"))\n\n import time\n\n # hack to have all proc properly sync:\n time.sleep(1)\n\n\[email protected]\[email protected](\"NUM_TPU_WORKERS\" not in os.environ, reason=\"Skip if NUM_TPU_WORKERS is in env vars\")\[email protected](not idist.has_xla_support, reason=\"Not on TPU device\")\ndef test_distrib_single_device_xla_nprocs(xmp_executor, dirname):\n n = int(os.environ[\"NUM_TPU_WORKERS\"])\n xmp_executor(_test_tpu_saves_to_cpu_nprocs, args=(dirname,), nprocs=n)\n\n\ndef test_checkpoint_filename_pattern():\n def _test(\n to_save,\n filename_prefix=\"\",\n score_function=None,\n score_name=None,\n global_step_transform=None,\n filename_pattern=None,\n ):\n save_handler = MagicMock(spec=BaseSaveHandler)\n\n checkpointer = Checkpoint(\n to_save,\n save_handler=save_handler,\n filename_prefix=filename_prefix,\n score_function=score_function,\n score_name=score_name,\n global_step_transform=global_step_transform,\n filename_pattern=filename_pattern,\n )\n\n trainer = Engine(lambda e, b: None)\n trainer.state = State(epoch=12, iteration=203, score=0.9999)\n\n checkpointer(trainer)\n return checkpointer.last_checkpoint\n\n model = DummyModel()\n to_save = {\"model\": model}\n\n assert _test(to_save) == \"model_203.pt\"\n assert _test(to_save, \"best\") == \"best_model_203.pt\"\n assert _test(to_save, score_function=lambda e: e.state.score) == \"model_0.9999.pt\"\n\n res = _test(to_save, score_function=lambda e: e.state.score, global_step_transform=lambda e, _: e.state.epoch)\n assert res == \"model_12_0.9999.pt\"\n\n assert _test(to_save, score_function=lambda e: e.state.score, score_name=\"acc\") == \"model_acc=0.9999.pt\"\n\n res = _test(\n to_save,\n score_function=lambda e: e.state.score,\n score_name=\"acc\",\n global_step_transform=lambda e, _: e.state.epoch,\n )\n assert res == \"model_12_acc=0.9999.pt\"\n\n assert _test(to_save, \"best\", score_function=lambda e: e.state.score) == \"best_model_0.9999.pt\"\n\n res = _test(\n to_save, \"best\", score_function=lambda e: e.state.score, global_step_transform=lambda e, _: e.state.epoch\n )\n assert res == \"best_model_12_0.9999.pt\"\n\n res = _test(to_save, \"best\", score_function=lambda e: e.state.score, score_name=\"acc\")\n assert res == \"best_model_acc=0.9999.pt\"\n\n res = _test(\n to_save,\n \"best\",\n score_function=lambda e: e.state.score,\n score_name=\"acc\",\n global_step_transform=lambda e, _: e.state.epoch,\n )\n assert res == \"best_model_12_acc=0.9999.pt\"\n\n pattern = \"chk-{name}--{global_step}.{ext}\"\n assert _test(to_save, to_save, filename_pattern=pattern) == \"chk-model--203.pt\"\n pattern = \"chk-{filename_prefix}--{name}--{global_step}.{ext}\"\n assert _test(to_save, \"best\", filename_pattern=pattern) == \"chk-best--model--203.pt\"\n pattern = \"chk-{name}--{score}.{ext}\"\n assert _test(to_save, score_function=lambda e: e.state.score, filename_pattern=pattern) == \"chk-model--0.9999.pt\"\n pattern = \"{global_step}-{name}-{score}.chk.{ext}\"\n res = _test(\n to_save,\n score_function=lambda e: e.state.score,\n global_step_transform=lambda e, _: e.state.epoch,\n filename_pattern=pattern,\n )\n assert res == \"12-model-0.9999.chk.pt\"\n\n pattern = \"chk-{name}--{score_name}--{score}.{ext}\"\n res = _test(to_save, score_function=lambda e: e.state.score, score_name=\"acc\", filename_pattern=pattern)\n assert res == \"chk-model--acc--0.9999.pt\"\n\n pattern = \"chk-{name}-{global_step}-{score_name}-{score}.{ext}\"\n res = _test(\n to_save,\n score_function=lambda e: e.state.score,\n score_name=\"acc\",\n global_step_transform=lambda e, _: e.state.epoch,\n filename_pattern=pattern,\n )\n assert res == \"chk-model-12-acc-0.9999.pt\"\n\n pattern = \"{filename_prefix}-{name}-{score}.chk\"\n res = _test(to_save, \"best\", score_function=lambda e: e.state.score, filename_pattern=pattern)\n assert res == \"best-model-0.9999.chk\"\n\n pattern = \"resnet-{filename_prefix}-{name}-{global_step}-{score}.chk\"\n res = _test(\n to_save,\n \"best\",\n score_function=lambda e: e.state.score,\n global_step_transform=lambda e, _: e.state.epoch,\n filename_pattern=pattern,\n )\n assert res == \"resnet-best-model-12-0.9999.chk\"\n\n pattern = \"{filename_prefix}-{name}-{score_name}-{score}.chk\"\n res = _test(to_save, \"best\", score_function=lambda e: e.state.score, score_name=\"acc\", filename_pattern=pattern)\n assert res == \"best-model-acc-0.9999.chk\"\n\n pattern = \"{global_step}-{filename_prefix}-{name}-{score_name}-{score}\"\n res = _test(\n to_save,\n \"best\",\n score_function=lambda e: e.state.score,\n score_name=\"acc\",\n global_step_transform=lambda e, _: e.state.epoch,\n filename_pattern=pattern,\n )\n assert res == \"12-best-model-acc-0.9999\"\n\n pattern = \"SAVE:{name}-{score_name}-{score}.pth\"\n res = _test(\n to_save,\n \"best\",\n score_function=lambda e: e.state.score,\n score_name=\"acc\",\n global_step_transform=lambda e, _: e.state.epoch,\n filename_pattern=pattern,\n )\n\n assert res == \"SAVE:model-acc-0.9999.pth\"\n\n pattern = \"{global_step}-chk-{filename_prefix}-{name}-{score_name}-{score}.{ext}\"\n assert _test(to_save, filename_pattern=pattern) == \"203-chk--model-None-None.pt\"\n\n with pytest.raises(KeyError, match=r\"random_key\"):\n pattern = \"SAVE:{random_key}.{ext}\"\n _test(to_save, filename_pattern=pattern)\n\n\ndef test_setup_filename_pattern():\n # default filename pattern\n assert Checkpoint.setup_filename_pattern() == \"{filename_prefix}_{name}_{global_step}_{score_name}={score}.{ext}\"\n\n assert Checkpoint.setup_filename_pattern(False) == \"{name}_{global_step}_{score_name}={score}.{ext}\"\n assert Checkpoint.setup_filename_pattern(False, False, False) == \"{name}_{global_step}.{ext}\"\n assert Checkpoint.setup_filename_pattern(False, True, False) == \"{name}_{global_step}_{score}.{ext}\"\n assert Checkpoint.setup_filename_pattern(False, True, False, False) == \"{name}_{score}.{ext}\"\n assert Checkpoint.setup_filename_pattern(False, True, True, False) == \"{name}_{score_name}={score}.{ext}\"\n\n with pytest.raises(ValueError, match=r\"At least one of with_score and with_global_step should be True.\"):\n Checkpoint.setup_filename_pattern(False, False, False, False)\n\n with pytest.raises(ValueError, match=r\"If with_score_name is True, with_score should be also True\"):\n Checkpoint.setup_filename_pattern(True, False, True, True)\n\n\ndef _setup_checkpoint():\n save_handler = MagicMock(spec=BaseSaveHandler)\n model = DummyModel()\n to_save = {\"model\": model}\n\n checkpointer = Checkpoint(to_save, save_handler=save_handler, n_saved=None)\n assert checkpointer.last_checkpoint is None\n\n trainer = Engine(lambda e, b: None)\n trainer.state = State(epoch=0, iteration=0)\n\n checkpointer(trainer)\n trainer.state.iteration = 10\n checkpointer(trainer)\n trainer.state.iteration = 20\n checkpointer(trainer)\n assert save_handler.call_count == 3\n return checkpointer\n\n\ndef test_checkpoint_state_dict():\n checkpointer = _setup_checkpoint()\n sd = checkpointer.state_dict()\n assert \"saved\" in sd\n assert isinstance(sd[\"saved\"], list) and len(sd[\"saved\"]) == len(checkpointer._saved)\n\n for saved_item, true_item in zip(sd[\"saved\"], checkpointer._saved):\n assert saved_item[0] == true_item.priority\n assert saved_item[1] == true_item.filename\n\n\ndef test_checkpoint_load_state_dict():\n true_checkpointer = _setup_checkpoint()\n\n save_handler = MagicMock(spec=BaseSaveHandler)\n model = DummyModel()\n to_save = {\"model\": model}\n checkpointer = Checkpoint(to_save, save_handler=save_handler, n_saved=None)\n\n sd = {\"saved\": [(0, \"model_0.pt\"), (10, \"model_10.pt\"), (20, \"model_20.pt\")]}\n checkpointer.load_state_dict(sd)\n assert checkpointer._saved == true_checkpointer._saved\n"
]
| [
[
"torch.nn.Linear",
"torch.device",
"torch.rand",
"torch.optim.lr_scheduler.ExponentialLR",
"torch.nn.parallel.DistributedDataParallel",
"torch.cuda.device_count",
"torch.manual_seed",
"torch.load",
"torch.nn.DataParallel"
]
]
|
falkben/jwql | [
"4f035a0f48d875cad6cc832431f1d8fda67e520e"
]
| [
"jwql/utils/preview_image.py"
]
| [
"#! /usr/bin/env python\n\n\"\"\"\nCreate a preview image from a fits file containing an observation.\n\nThis module creates and saves a \"preview image\" from a fits file that\ncontains a JWST observation. Data from the user-supplied ``extension``\nof the file are read in, along with the ``PIXELDQ`` extension if\npresent. For each integration in the exposure, the first group is\nsubtracted from the final group in order to create a difference image.\nThe lower and upper limits to be displayed are defined as the\n``clip_percent`` and ``(1. - clip_percent)`` percentile signals.\n``matplotlib`` is then used to display a linear- or log-stretched\nversion of the image, with accompanying colorbar. The image is then\nsaved.\n\nAuthors:\n--------\n\n - Bryan Hilbert\n\nUse:\n----\n\n This module can be imported as such:\n\n ::\n\n from jwql.preview_image.preview_image import PreviewImage\n im = PreviewImage(my_file, \"SCI\")\n im.clip_percent = 0.01\n im.scaling = 'log'\n im.output_format = 'jpg'\n im.make_image()\n\"\"\"\n\nimport logging\nimport os\nimport socket\n\nfrom astropy.io import fits\nimport numpy as np\n\nfrom jwql.utils import permissions\n\n# Use the 'Agg' backend to avoid invoking $DISPLAY\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\n\n# Only import jwst if not running from readthedocs\nif 'build' and 'project' not in socket.gethostname():\n from jwst.datamodels import dqflags\n\n\nclass PreviewImage():\n \"\"\"An object for generating and saving preview images, used by\n ``generate_preview_images``.\n\n Attributes\n ----------\n clip_percent : float\n The amount to sigma clip the input data by when scaling the\n preview image. Default is 0.01.\n cmap : str\n The colormap used by ``matplotlib`` in the preview image.\n Default value is ``viridis``.\n data : obj\n The data used to generate the preview image.\n dq : obj\n The DQ data used to generate the preview image.\n file : str\n The filename to generate the preview image from.\n output_format : str\n The format to which the preview image is saved. Options are\n ``jpg`` and ``thumb``\n preview_output_directory : str or None\n The output directory to which the preview image is saved.\n scaling : str\n The scaling used in the preview image. Default is ``log``.\n thumbnail_output_directory : str or None\n The output directory to which the thumbnail is saved.\n\n Methods\n -------\n difference_image(data)\n Create a difference image from the data\n find_limits(data, pixmap, clipperc)\n Find the min and max signal levels after clipping by\n ``clipperc``\n get_data(filename, ext)\n Read in data from the given ``filename`` and ``ext``\n make_figure(image, integration_number, min_value, max_value, scale, maxsize, thumbnail)\n Create the ``matplotlib`` figure\n make_image(max_img_size)\n Main function\n save_image(fname, thumbnail)\n Save the figure\n \"\"\"\n\n def __init__(self, filename, extension):\n \"\"\"Initialize the class.\n\n Parameters\n ----------\n filename : str\n Name of fits file containing data\n extension : str\n Extension name to be read in\n \"\"\"\n self.clip_percent = 0.01\n self.cmap = 'viridis'\n self.file = filename\n self.output_format = 'jpg'\n self.preview_output_directory = None\n self.scaling = 'log'\n self.thumbnail_output_directory = None\n\n # Read in file\n self.data, self.dq = self.get_data(self.file, extension)\n\n def difference_image(self, data):\n \"\"\"\n Create a difference image from the data. Use last group minus\n first group in order to maximize signal to noise. With 4D\n input, make a separate difference image for each integration.\n\n Parameters\n ----------\n data : obj\n 4D ``numpy`` ``ndarray`` array of floats\n\n Returns\n -------\n result : obj\n 3D ``numpy`` ``ndarray`` containing the difference image(s)\n from the input exposure\n \"\"\"\n return data[:, -1, :, :] - data[:, 0, :, :]\n\n def find_limits(self, data, pixmap, clipperc):\n \"\"\"\n Find the minimum and maximum signal levels after clipping the\n top and bottom ``clipperc`` of the pixels.\n\n Parameters\n ----------\n data : obj\n 2D numpy ndarray of floats\n pixmap : obj\n 2D numpy ndarray boolean array of science pixel locations\n (``True`` for science pixels, ``False`` for non-science\n pixels)\n clipperc : float\n Fraction of top and bottom signal levels to clip (e.g. 0.01\n means to clip brightest and dimmest 1% of pixels)\n\n Returns\n -------\n results : tuple\n Tuple of floats, minimum and maximum signal levels\n \"\"\"\n nelem = np.sum(pixmap)\n numclip = np.int(clipperc * nelem)\n sorted = np.sort(data[pixmap], axis=None)\n minval = sorted[numclip]\n maxval = sorted[-numclip - 1]\n return (minval, maxval)\n\n def get_data(self, filename, ext):\n \"\"\"\n Read in the data from the given file and extension. Also find\n how many rows/cols of reference pixels are present.\n\n Parameters\n ----------\n filename : str\n Name of fits file containing data\n ext : str\n Extension name to be read in\n\n Returns\n -------\n data : obj\n Science data from file. A 2-, 3-, or 4D numpy ndarray\n dq : obj\n 2D ``ndarray`` boolean map of reference pixels. Science\n pixels flagged as ``True`` and non-science pixels are\n ``False``\n \"\"\"\n if os.path.isfile(filename):\n extnames = []\n with fits.open(filename) as hdulist:\n for exten in hdulist:\n try:\n extnames.append(exten.header['EXTNAME'])\n except:\n pass\n if ext in extnames:\n dimensions = len(hdulist[ext].data.shape)\n if dimensions == 4:\n data = hdulist[ext].data[:, [0, -1], :, :].astype(np.float)\n else:\n data = hdulist[ext].data.astype(np.float)\n else:\n raise ValueError('WARNING: no {} extension in {}!'.format(ext, filename))\n\n if 'PIXELDQ' in extnames:\n dq = hdulist['PIXELDQ'].data\n dq = (dq & dqflags.pixel['NON_SCIENCE'] == 0)\n else:\n yd, xd = data.shape[-2:]\n dq = np.ones((yd, xd), dtype=\"bool\")\n\n # Collect information on aperture location within the\n # full detector. This is needed for mosaicking NIRCam\n # detectors later.\n try:\n self.xstart = hdulist[0].header['SUBSTRT1']\n self.ystart = hdulist[0].header['SUBSTRT2']\n self.xlen = hdulist[0].header['SUBSIZE1']\n self.ylen = hdulist[0].header['SUBSIZE2']\n except KeyError:\n logging.warning('SUBSTR and SUBSIZE header keywords not found')\n\n else:\n raise FileNotFoundError('WARNING: {} does not exist!'.format(filename))\n\n return data, dq\n\n def make_figure(self, image, integration_number, min_value, max_value,\n scale, maxsize=8, thumbnail=False):\n \"\"\"\n Create the matplotlib figure of the image\n\n Parameters\n ----------\n image : obj\n 2D ``numpy`` ``ndarray`` of floats\n\n integration_number : int\n Integration number within exposure\n\n min_value : float\n Minimum value for display\n\n max_value : float\n Maximum value for display\n\n scale : str\n Image scaling (``log``, ``linear``)\n\n maxsize : int\n Size of the longest dimension of the output figure (inches)\n\n thumbnail : bool\n True to create a thumbnail image, False to create the full\n preview image\n\n Returns\n -------\n result : obj\n Matplotlib Figure object\n \"\"\"\n\n # Check the input scaling\n if scale not in ['linear', 'log']:\n raise ValueError('WARNING: scaling option {} not supported.'.format(scale))\n\n # Set the figure size\n yd, xd = image.shape\n ratio = yd / xd\n if xd >= yd:\n xsize = maxsize\n ysize = maxsize * ratio\n else:\n ysize = maxsize\n xsize = maxsize / ratio\n\n if scale == 'log':\n\n # Shift data so everything is positive\n shiftdata = image - min_value + 1\n shiftmin = 1\n shiftmax = max_value - min_value + 1\n\n # If making a thumbnail, make a figure with no axes\n if thumbnail:\n fig = plt.imshow(shiftdata,\n norm=colors.LogNorm(vmin=shiftmin,\n vmax=shiftmax),\n cmap=self.cmap)\n # Invert y axis\n plt.gca().invert_yaxis()\n\n plt.axis('off')\n fig.axes.get_xaxis().set_visible(False)\n fig.axes.get_yaxis().set_visible(False)\n\n # If preview image, add axes and colorbars\n else:\n fig, ax = plt.subplots(figsize=(xsize, ysize))\n cax = ax.imshow(shiftdata,\n norm=colors.LogNorm(vmin=shiftmin,\n vmax=shiftmax),\n cmap=self.cmap)\n # Invert y axis\n plt.gca().invert_yaxis()\n\n # Add colorbar, with original data values\n tickvals = np.logspace(np.log10(shiftmin), np.log10(shiftmax), 5)\n tlabelflt = tickvals + min_value - 1\n\n # Adjust the number of digits after the decimal point\n # in the colorbar labels based on the signal range\n delta = tlabelflt[-1] - tlabelflt[0]\n if delta >= 100:\n dig = 0\n elif ((delta < 100) & (delta >= 10)):\n dig = 1\n elif ((delta < 10) & (delta >= 1)):\n dig = 2\n elif delta < 1:\n dig = 3\n format_string = \"%.{}f\".format(dig)\n tlabelstr = [format_string % number for number in tlabelflt]\n cbar = fig.colorbar(cax, ticks=tickvals)\n cbar.ax.set_yticklabels(tlabelstr)\n cbar.ax.tick_params(labelsize=maxsize * 5. / 4)\n ax.set_xlabel('Pixels', fontsize=maxsize * 5. / 4)\n ax.set_ylabel('Pixels', fontsize=maxsize * 5. / 4)\n ax.tick_params(labelsize=maxsize)\n plt.rcParams.update({'axes.titlesize': 'small'})\n plt.rcParams.update({'font.size': maxsize * 5. / 4})\n plt.rcParams.update({'axes.labelsize': maxsize * 5. / 4})\n plt.rcParams.update({'ytick.labelsize': maxsize * 5. / 4})\n plt.rcParams.update({'xtick.labelsize': maxsize * 5. / 4})\n\n elif scale == 'linear':\n fig, ax = plt.subplots(figsize=(xsize, ysize))\n cax = ax.imshow(image, clim=(min_value, max_value), cmap=self.cmap)\n\n if not thumbnail:\n cbar = fig.colorbar(cax)\n ax.set_xlabel('Pixels')\n ax.set_ylabel('Pixels')\n\n # If preview image, set a title\n if not thumbnail:\n filename = os.path.split(self.file)[-1]\n ax.set_title(filename + ' Int: {}'.format(np.int(integration_number)))\n\n def make_image(self, max_img_size=8):\n \"\"\"The main function of the ``PreviewImage`` class.\"\"\"\n\n shape = self.data.shape\n\n if len(shape) == 4:\n # Create difference image(s)\n diff_img = self.difference_image(self.data)\n elif len(shape) < 4:\n diff_img = self.data\n\n # If there are multiple integrations in the file,\n # work on one integration at a time from here onwards\n ndim = len(diff_img.shape)\n if ndim == 2:\n diff_img = np.expand_dims(diff_img, axis=0)\n nint, ny, nx = diff_img.shape\n\n for i in range(nint):\n frame = diff_img[i, :, :]\n\n # Find signal limits for the display\n minval, maxval = self.find_limits(frame, self.dq,\n self.clip_percent)\n\n # Create preview image matplotlib object\n indir, infile = os.path.split(self.file)\n suffix = '_integ{}.{}'.format(i, self.output_format)\n if self.preview_output_directory is None:\n outdir = indir\n else:\n outdir = self.preview_output_directory\n outfile = os.path.join(outdir, infile.split('.')[0] + suffix)\n self.make_figure(frame, i, minval, maxval, self.scaling.lower(),\n maxsize=max_img_size, thumbnail=False)\n self.save_image(outfile, thumbnail=False)\n plt.close()\n\n # Create thumbnail image matplotlib object\n if self.thumbnail_output_directory is None:\n outdir = indir\n else:\n outdir = self.thumbnail_output_directory\n outfile = os.path.join(outdir, infile.split('.')[0] + suffix)\n self.make_figure(frame, i, minval, maxval, self.scaling.lower(),\n maxsize=max_img_size, thumbnail=True)\n self.save_image(outfile, thumbnail=True)\n plt.close()\n\n def save_image(self, fname, thumbnail=False):\n \"\"\"\n Save an image in the requested output format and sets the\n appropriate permissions\n\n Parameters\n ----------\n image : obj\n A ``matplotlib`` figure object\n\n fname : str\n Output filename\n\n thumbnail : bool\n True if saving a thumbnail image, false for the full\n preview image.\n \"\"\"\n\n plt.savefig(fname, bbox_inches='tight', pad_inches=0)\n permissions.set_permissions(fname)\n\n # If the image is a thumbnail, rename to '.thumb'\n if thumbnail:\n thumb_fname = fname.replace('.jpg', '.thumb')\n os.rename(fname, thumb_fname)\n logging.info('Saved image to {}'.format(thumb_fname))\n else:\n logging.info('Saved image to {}'.format(fname))\n"
]
| [
[
"matplotlib.use",
"matplotlib.pyplot.rcParams.update",
"numpy.int",
"matplotlib.colors.LogNorm",
"matplotlib.pyplot.gca",
"numpy.sum",
"matplotlib.pyplot.savefig",
"numpy.ones",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots",
"numpy.sort",
"matplotlib.pyplot.axis",
"numpy.log10",
"numpy.expand_dims"
]
]
|
uber-research/metropolis-hastings-gans | [
"41857c9d51da07e87f1de0c7443acdecb8c2c59f"
]
| [
"mhgan/contrib/dcgan/dcgan_loader.py"
]
| [
"# Modifications Copyright (c) 2018 Uber Technologies, Inc.\nfrom __future__ import print_function\nimport argparse\nimport os\nimport random\nimport torch\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.utils.data\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nfrom dcgan import gan_trainer\n\nSEED_MAX = 2**32 - 1\n\n\ndef get_opts(args=None):\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', required=True, help='cifar10 | lsun | imagenet | folder | lfw | fake') # noqa: E402\n parser.add_argument('--dataroot', required=True, help='path to dataset')\n parser.add_argument('--workers', type=int, default=2, help='number of data loading workers') # noqa: E402\n parser.add_argument('--batchSize', type=int, default=64, help='input batch size') # noqa: E402\n parser.add_argument('--imageSize', type=int, default=64, help='the height / width of the input image to network') # noqa: E402\n parser.add_argument('--nz', type=int, default=100, help='size of the latent z vector') # noqa: E402\n parser.add_argument('--ngf', type=int, default=64)\n parser.add_argument('--ndf', type=int, default=64)\n parser.add_argument('--niter', type=int, default=25, help='number of epochs to train for') # noqa: E402\n parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002') # noqa: E402\n parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5') # noqa: E402\n parser.add_argument('--cuda', action='store_true', help='enables cuda')\n parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use') # noqa: E402\n parser.add_argument('--netG', default='', help='path to netG (to continue training)') # noqa: E402\n parser.add_argument('--netD', default='', help='path to netD (to continue training)') # noqa: E402\n parser.add_argument('--outf', required=True, help='folder to output images and model checkpoints') # noqa: E402\n parser.add_argument('--manualSeed', required=True, type=int, help='manual seed') # noqa: E402\n\n opt = parser.parse_args(args=args)\n\n try:\n os.makedirs(opt.outf)\n except OSError:\n pass\n\n return opt\n\n\ndef get_data_loader(dataset, dataroot, workers, image_size, batch_size):\n if dataset in ['imagenet', 'folder', 'lfw']:\n # folder dataset\n dataset = dset.ImageFolder(root=dataroot,\n transform=transforms.Compose([\n transforms.Resize(image_size),\n transforms.CenterCrop(image_size),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5),\n (0.5, 0.5, 0.5)),\n ]))\n elif dataset == 'lsun':\n dataset = dset.LSUN(root=dataroot, classes=['bedroom_train'],\n transform=transforms.Compose([\n transforms.Resize(image_size),\n transforms.CenterCrop(image_size),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5),\n (0.5, 0.5, 0.5)),\n ]))\n elif dataset == 'cifar10':\n dataset = dset.CIFAR10(root=dataroot, download=True,\n transform=transforms.Compose([\n transforms.Resize(image_size),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5),\n (0.5, 0.5, 0.5)),\n ]))\n elif dataset == 'mnist':\n dataset = dset.MNIST(root=dataroot, train=True, download=True,\n transform=transforms.Compose([\n transforms.Resize(image_size),\n transforms.CenterCrop(image_size),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5),\n (0.5, 0.5, 0.5)),\n ]))\n elif dataset == 'fake':\n dataset = dset.FakeData(image_size=(3, image_size, image_size),\n transform=transforms.ToTensor())\n else:\n assert False\n assert dataset\n\n data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,\n shuffle=True,\n num_workers=int(workers))\n return data_loader\n\n\ndef main():\n opt = get_opts()\n\n # Set all random seeds: avoid correlated streams ==> must use diff seeds.\n # Note: we are not setting np seed since this appears not to use numpy,\n # but that could create reprod issues if there is latent np.random use.\n random.seed(opt.manualSeed)\n torch.manual_seed(random.randint(0, SEED_MAX))\n if torch.cuda.is_available():\n torch.cuda.manual_seed(random.randint(0, SEED_MAX))\n\n # This is faster but worse for reprod.\n cudnn.benchmark = True\n\n data_loader = get_data_loader(opt.dataset, opt.dataroot, opt.workers,\n opt.imageSize, opt.batchSize)\n\n device = torch.device('cuda:0' if opt.cuda else 'cpu')\n T = gan_trainer(device=device, data_loader=data_loader,\n batch_size=opt.batchSize, nz=opt.nz, ngf=opt.ngf,\n ndf=opt.ndf, lr=opt.lr, beta1=opt.beta1, ngpu=opt.ngpu,\n netG_file=opt.netG, netD_file=opt.netD, outf=opt.outf)\n\n for net_G, net_D in T:\n print('epoch done.')\n\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"torch.device",
"torch.cuda.is_available"
]
]
|
andikarachman/Face-Recognition-App | [
"f3bb46e96c27e0f72e7b1123bf4b15a43c21f673"
]
| [
"webcam_face_recognition.py"
]
| [
"import face_recognition\nimport cv2\nimport numpy as np\nimport glob\nimport os\nimport logging\nimport datetime\n\n\nIMAGES_PATH = './images'\nCROPPED_IMAGES_PATH = './cropped_images'\nCAMERA_DEVICE_ID = 0\nMAX_DISTANCE = 0.6\n\n\ndef get_face_embeddings_from_image(image, convert_to_rgb=False):\n \"\"\"\n Take a raw image and run both the face detection and face embedding model on it\n \"\"\"\n \n # Convert from BGR to RGB if needed\n if convert_to_rgb:\n image = image[:, :, ::-1]\n \n # Run the face detection model to find face locations\n face_locations = face_recognition.face_locations(image)\n \n # Run the embedding model to get face embeddings for the supplied locations\n face_embeddings = face_recognition.face_encodings(image, face_locations)\n \n return face_locations, face_embeddings\n\n\ndef setup_database():\n \"\"\"\n Load reference images and create a database of their face encodings\n \"\"\"\n database = {}\n \n for filename in glob.glob(os.path.join(IMAGES_PATH, '*.png')):\n # Load image\n image_rgb = face_recognition.load_image_file(filename)\n \n # Use the name in the filename as the identity key\n identity = os.path.splitext(os.path.basename(filename))[0]\n \n # Get the face encoding and link it to the identity\n locations, encodings = get_face_embeddings_from_image(image_rgb)\n database[identity] = encodings[0]\n \n return database\n\n\ndef paint_detected_face_on_image(frame, location, name=None):\n \"\"\"\n Paint a rectangle around the face and write the name\n \"\"\"\n # unpack the coordinates from the location tuple\n top, right, bottom, left = location\n \n if name is None:\n name = 'Unknown'\n color = (0, 0, 255) # red for unrecognized face\n else:\n color = (0, 128, 0) # dark green for recognized face\n \n # Draw a box around the face\n cv2.rectangle(frame, (left, top), (right, bottom), color, 2)\n \n # Draw a label with a name below the face\n cv2.rectangle(frame, (left, bottom + 15), (right, bottom), color, cv2.FILLED)\n cv2.putText(frame, name, (left + 6, bottom + 12), cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255), 1)\n\n\ndef click_event(event, x, y, flags, params):\n \"\"\"\n Crop an image based on the clicked detected face\n \"\"\"\n\n # event is triggered with a mouse click\n if event == cv2.EVENT_LBUTTONUP:\n for location in face_locations:\n\n # unpack the coordinates from the location tuple\n top, right, bottom, left = location\n if (top < y < bottom) and (left < x < right):\n frame_copy = np.copy(frame)\n roi = frame_copy[top:bottom, left:right]\n\n # give a unique name for the cropped image\n currentDT = datetime.datetime.now()\n cropped_name = os.path.join(CROPPED_IMAGES_PATH, loc_name_dict[location] + '_' + str(currentDT) + '.png')\n\n # save the cropped image\n cv2.imwrite(cropped_name, roi)\n\n # show the cropped image\n crop = cv2.imread(cropped_name)\n cv2.imshow('cropped image', crop)\n\n # re-run the run_face_recognition function\n run_face_recognition(database)\n\n\ndef run_face_recognition(database):\n \"\"\"\n Start the face recognition via webcam\n \"\"\"\n \n global face_locations\n global frame\n global name\n global loc_name_dict\n\n # Open a connection to the camera\n video_capture = cv2.VideoCapture(CAMERA_DEVICE_ID)\n \n # The face_recognition library uses keys and values of your database separately\n known_face_encodings = list(database.values())\n known_face_names = list(database.keys())\n\n \n while video_capture.isOpened():\n \n # Capture frame-by-frame\n ret, frame = video_capture.read()\n frame = cv2.resize(frame, None, fx=0.5, fy=0.5)\n \n if not ret:\n logging.error(\"Could not read frame from camera. Stopping video capture.\")\n break\n \n # Run detection and embedding models\n face_locations, face_encodings = get_face_embeddings_from_image(frame, convert_to_rgb=True)\n\n # Build a dictionary to pair the location with the name of the detected face \n loc_name_dict = dict()\n \n # Loop through each face in this frame of video and see if there's a match\n for location, face_encoding in zip(face_locations, face_encodings):\n \n # get the distances from this encoding to those of all reference images\n distances = face_recognition.face_distance(known_face_encodings, face_encoding)\n \n # select the closest match (smallest distance) if it's below the threshold value\n if np.any(distances <= MAX_DISTANCE):\n best_match_idx = np.argmin(distances)\n name = known_face_names[best_match_idx]\n else:\n name = None\n \n # Pair the location with the name of the detected face inside a dictionary\n loc_name_dict[location] = name\n \n # show recognition info on the image\n paint_detected_face_on_image(frame, location, name)\n \n # Display the resulting image\n cv2.imshow('frame', frame)\n\n # Crop image if triggered by a mouse click\n cv2.setMouseCallback('frame', click_event)\n \n # Hit 'q' on the keyboard to stop the loop\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n \n # When everything done, release the capture\n video_capture.release()\n cv2.destroyAllWindows()\n\n\ndatabase = setup_database()\nrun_face_recognition(database)"
]
| [
[
"numpy.any",
"numpy.copy",
"numpy.argmin"
]
]
|
yoyonel/twint | [
"bdfd267ff12e8064c37821a814312b23a2e5068c"
]
| [
"twint/storage/panda.py"
]
| [
"from time import strftime, localtime\nimport pandas as pd\nimport warnings\nfrom .elasticsearch import hour\n\nTweets_df = None\nFollow_df = None\nUser_df = None\n\n_object_blocks = {\n \"tweet\": [],\n \"user\": [],\n \"following\": [],\n \"followers\": []\n}\n\nweekdays = {\n \"Monday\": 1,\n \"Tuesday\": 2,\n \"Wednesday\": 3,\n \"Thursday\": 4,\n \"Friday\": 5,\n \"Saturday\": 6,\n \"Sunday\": 7,\n }\n\n_type = \"\"\n\ndef _concat(df, type):\n if df is None:\n df = pd.DataFrame(_object_blocks[type])\n else:\n _df = pd.DataFrame(_object_blocks[type])\n df = pd.concat([df, _df], sort=True)\n return df\n\ndef _autoget(type):\n global Tweets_df\n global Follow_df\n global User_df\n\n if type == \"tweet\":\n Tweets_df = _concat(Tweets_df, type)\n if type == \"followers\" or type == \"following\":\n Follow_df = _concat(Follow_df, type)\n if type == \"user\":\n User_df = _concat(User_df, type)\n\n\ndef update(object, config):\n global _type\n\n #try:\n # _type = ((object.__class__.__name__ == \"tweet\")*\"tweet\" +\n # (object.__class__.__name__ == \"user\")*\"user\")\n #except AttributeError:\n # _type = config.Following*\"following\" + config.Followers*\"followers\"\n if object.__class__.__name__ == \"tweet\":\n _type = \"tweet\"\n elif object.__class__.__name__ == \"user\":\n _type = \"user\"\n elif object.__class__.__name__ == \"dict\":\n _type = config.Following*\"following\" + config.Followers*\"followers\"\n\n if _type == \"tweet\":\n Tweet = object\n day = weekdays[strftime(\"%A\", localtime(Tweet.datetime))]\n dt = f\"{object.datestamp} {object.timestamp}\"\n _data = {\n \"id\": str(Tweet.id),\n \"conversation_id\": Tweet.conversation_id,\n \"created_at\": Tweet.datetime,\n \"date\": dt,\n \"timezone\": Tweet.timezone,\n \"place\": Tweet.place,\n \"tweet\": Tweet.tweet,\n \"hashtags\": Tweet.hashtags,\n \"cashtags\": Tweet.cashtags,\n \"user_id\": Tweet.user_id,\n \"user_id_str\": Tweet.user_id_str,\n \"username\": Tweet.username,\n \"name\": Tweet.name,\n \"day\": day,\n \"hour\": hour(Tweet.datetime),\n \"link\": Tweet.link,\n \"retweet\": Tweet.retweet,\n \"nlikes\": int(Tweet.likes_count),\n \"nreplies\": int(Tweet.replies_count),\n \"nretweets\": int(Tweet.retweets_count),\n \"quote_url\": Tweet.quote_url,\n \"search\": str(config.Search),\n \"near\": Tweet.near,\n \"geo\": Tweet.geo,\n \"source\": Tweet.source,\n \"user_rt_id\": Tweet.user_rt_id,\n \"user_rt\": Tweet.user_rt,\n \"retweet_id\": Tweet.retweet_id,\n \"reply_to\": Tweet.reply_to,\n \"retweet_date\": Tweet.retweet_date\n }\n _object_blocks[_type].append(_data)\n elif _type == \"user\":\n user = object\n _data = {\n \"id\": user.id,\n \"name\": user.name,\n \"username\": user.username,\n \"bio\": user.bio,\n \"url\": user.url,\n \"join_datetime\": user.join_date + \" \" + user.join_time,\n \"join_date\": user.join_date,\n \"join_time\": user.join_time,\n \"tweets\": user.tweets,\n \"location\": user.location,\n \"following\": user.following,\n \"followers\": user.followers,\n \"likes\": user.likes,\n \"media\": user.media_count,\n \"private\": user.is_private,\n \"verified\": user.is_verified,\n \"avatar\": user.avatar,\n \"background_image\": user.background_image,\n }\n _object_blocks[_type].append(_data)\n elif _type == \"followers\" or _type == \"following\":\n _data = {\n config.Following*\"following\" + config.Followers*\"followers\" :\n {config.Username: object[_type]}\n }\n _object_blocks[_type] = _data\n else:\n print(\"Wrong type of object passed!\")\n\n\ndef clean():\n global Tweets_df\n global Follow_df\n global User_df\n _object_blocks[\"tweet\"].clear()\n _object_blocks[\"following\"].clear()\n _object_blocks[\"followers\"].clear()\n _object_blocks[\"user\"].clear()\n Tweets_df = None\n Follow_df = None\n User_df = None\n\ndef save(_filename, _dataframe, **options):\n if options.get(\"dataname\"):\n _dataname = options.get(\"dataname\")\n else:\n _dataname = \"twint\"\n\n if not options.get(\"type\"):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n _store = pd.HDFStore(_filename + \".h5\")\n _store[_dataname] = _dataframe\n _store.close()\n elif options.get(\"type\") == \"Pickle\":\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n _dataframe.to_pickle(_filename + \".pkl\")\n else:\n print(\"\"\"Please specify: filename, DataFrame, DataFrame name and type\n (HDF5, default, or Pickle)\"\"\")\n\ndef read(_filename, **options):\n if not options.get(\"dataname\"):\n _dataname = \"twint\"\n else:\n _dataname = options.get(\"dataname\")\n\n if not options.get(\"type\"):\n _store = pd.HDFStore(_filename + \".h5\")\n _df = _store[_dataname]\n return _df\n elif options.get(\"type\") == \"Pickle\":\n _df = pd.read_pickle(_filename + \".pkl\")\n return _df\n else:\n print(\"\"\"Please specify: DataFrame, DataFrame name (twint as default),\n filename and type (HDF5, default, or Pickle\"\"\")\n"
]
| [
[
"pandas.HDFStore",
"pandas.DataFrame",
"pandas.read_pickle",
"pandas.concat"
]
]
|
timgates42/statsmodels | [
"ab8ff09e3eb8c385214bd1575aa47b81bf53d584"
]
| [
"statsmodels/datasets/utils.py"
]
| [
"from statsmodels.compat.python import lrange\n\nfrom io import StringIO\nimport shutil\nfrom os import environ, makedirs\nfrom os.path import expanduser, exists, dirname, abspath, join\nfrom urllib.error import HTTPError, URLError\nfrom urllib.request import urlopen\nfrom urllib.parse import urljoin\n\nimport numpy as np\nfrom pandas import read_stata, read_csv, DataFrame, Series, Index\n\n\ndef webuse(data, baseurl='https://www.stata-press.com/data/r11/', as_df=True):\n \"\"\"\n Download and return an example dataset from Stata.\n\n Parameters\n ----------\n data : str\n Name of dataset to fetch.\n baseurl : str\n The base URL to the stata datasets.\n as_df : bool\n Deprecated. Always returns a DataFrame\n\n Returns\n -------\n dta : DataFrame\n A DataFrame containing the Stata dataset.\n\n Examples\n --------\n >>> dta = webuse('auto')\n\n Notes\n -----\n Make sure baseurl has trailing forward slash. Does not do any\n error checking in response URLs.\n \"\"\"\n url = urljoin(baseurl, data+'.dta')\n return read_stata(url)\n\n\nclass Dataset(dict):\n def __init__(self, **kw):\n # define some default attributes, so pylint can find them\n self.endog = None\n self.exog = None\n self.data = None\n self.names = None\n\n dict.__init__(self, kw)\n self.__dict__ = self\n # Some datasets have string variables. If you want a raw_data\n # attribute you must create this in the dataset's load function.\n try: # some datasets have string variables\n self.raw_data = self.data.astype(float)\n except:\n pass\n\n def __repr__(self):\n return str(self.__class__)\n\n\ndef process_pandas(data, endog_idx=0, exog_idx=None, index_idx=None):\n names = data.columns\n\n if isinstance(endog_idx, int):\n endog_name = names[endog_idx]\n endog = data[endog_name].copy()\n if exog_idx is None:\n exog = data.drop([endog_name], axis=1)\n else:\n exog = data[names[exog_idx]].copy()\n else:\n endog = data.loc[:, endog_idx].copy()\n endog_name = list(endog.columns)\n if exog_idx is None:\n exog = data.drop(endog_name, axis=1)\n elif isinstance(exog_idx, int):\n exog = data[names[exog_idx]].copy()\n else:\n exog = data[names[exog_idx]].copy()\n\n if index_idx is not None: # NOTE: will have to be improved for dates\n index = Index(data.iloc[:, index_idx])\n endog.index = index\n exog.index = index.copy()\n data = data.set_index(names[index_idx])\n\n exog_name = list(exog.columns)\n dataset = Dataset(data=data, names=list(names), endog=endog,\n exog=exog, endog_name=endog_name, exog_name=exog_name)\n return dataset\n\n\ndef _maybe_reset_index(data):\n \"\"\"\n All the Rdatasets have the integer row.labels from R if there is no\n real index. Strip this for a zero-based index\n \"\"\"\n if data.index.equals(Index(lrange(1, len(data) + 1))):\n data = data.reset_index(drop=True)\n return data\n\n\ndef _get_cache(cache):\n if cache is False:\n # do not do any caching or load from cache\n cache = None\n elif cache is True: # use default dir for cache\n cache = get_data_home(None)\n else:\n cache = get_data_home(cache)\n return cache\n\n\ndef _cache_it(data, cache_path):\n import zlib\n open(cache_path, \"wb\").write(zlib.compress(data))\n\n\ndef _open_cache(cache_path):\n import zlib\n data = zlib.decompress(open(cache_path, 'rb').read())\n # return as bytes object encoded in utf-8 for cross-compat of cached\n return data\n\n\ndef _urlopen_cached(url, cache):\n \"\"\"\n Tries to load data from cache location otherwise downloads it. If it\n downloads the data and cache is not None then it will put the downloaded\n data in the cache path.\n \"\"\"\n from_cache = False\n if cache is not None:\n file_name = url.split(\"://\")[-1].replace('/', ',')\n file_name = file_name.split('.')\n if len(file_name) > 1:\n file_name[-2] += '-v2'\n else:\n file_name[0] += '-v2'\n file_name = '.'.join(file_name) + \".zip\"\n cache_path = join(cache, file_name)\n try:\n data = _open_cache(cache_path)\n from_cache = True\n except:\n pass\n\n # not using the cache or did not find it in cache\n if not from_cache:\n data = urlopen(url, timeout=3).read()\n if cache is not None: # then put it in the cache\n _cache_it(data, cache_path)\n return data, from_cache\n\n\ndef _get_data(base_url, dataname, cache, extension=\"csv\"):\n url = base_url + (dataname + \".%s\") % extension\n try:\n data, from_cache = _urlopen_cached(url, cache)\n except HTTPError as err:\n if '404' in str(err):\n raise ValueError(\"Dataset %s was not found.\" % dataname)\n else:\n raise err\n\n data = data.decode('utf-8', 'strict')\n return StringIO(data), from_cache\n\n\ndef _get_dataset_meta(dataname, package, cache):\n # get the index, you'll probably want this cached because you have\n # to download info about all the data to get info about any of the data...\n index_url = (\"https://raw.githubusercontent.com/vincentarelbundock/\"\n \"Rdatasets/master/datasets.csv\")\n data, _ = _urlopen_cached(index_url, cache)\n data = data.decode('utf-8', 'strict')\n index = read_csv(StringIO(data))\n idx = np.logical_and(index.Item == dataname, index.Package == package)\n dataset_meta = index.loc[idx]\n return dataset_meta[\"Title\"].iloc[0]\n\n\ndef get_rdataset(dataname, package=\"datasets\", cache=False):\n \"\"\"download and return R dataset\n\n Parameters\n ----------\n dataname : str\n The name of the dataset you want to download\n package : str\n The package in which the dataset is found. The default is the core\n 'datasets' package.\n cache : bool or str\n If True, will download this data into the STATSMODELS_DATA folder.\n The default location is a folder called statsmodels_data in the\n user home folder. Otherwise, you can specify a path to a folder to\n use for caching the data. If False, the data will not be cached.\n\n Returns\n -------\n dataset : Dataset instance\n A `statsmodels.data.utils.Dataset` instance. This objects has\n attributes:\n\n * data - A pandas DataFrame containing the data\n * title - The dataset title\n * package - The package from which the data came\n * from_cache - Whether not cached data was retrieved\n * __doc__ - The verbatim R documentation.\n\n\n Notes\n -----\n If the R dataset has an integer index. This is reset to be zero-based.\n Otherwise the index is preserved. The caching facilities are dumb. That\n is, no download dates, e-tags, or otherwise identifying information\n is checked to see if the data should be downloaded again or not. If the\n dataset is in the cache, it's used.\n \"\"\"\n # NOTE: use raw github bc html site might not be most up to date\n data_base_url = (\"https://raw.githubusercontent.com/vincentarelbundock/Rdatasets/\"\n \"master/csv/\"+package+\"/\")\n docs_base_url = (\"https://raw.githubusercontent.com/vincentarelbundock/Rdatasets/\"\n \"master/doc/\"+package+\"/rst/\")\n cache = _get_cache(cache)\n data, from_cache = _get_data(data_base_url, dataname, cache)\n data = read_csv(data, index_col=0)\n data = _maybe_reset_index(data)\n\n title = _get_dataset_meta(dataname, package, cache)\n doc, _ = _get_data(docs_base_url, dataname, cache, \"rst\")\n\n return Dataset(data=data, __doc__=doc.read(), package=package, title=title,\n from_cache=from_cache)\n\n# The below function were taken from sklearn\n\n\ndef get_data_home(data_home=None):\n \"\"\"Return the path of the statsmodels data dir.\n\n This folder is used by some large dataset loaders to avoid\n downloading the data several times.\n\n By default the data dir is set to a folder named 'statsmodels_data'\n in the user home folder.\n\n Alternatively, it can be set by the 'STATSMODELS_DATA' environment\n variable or programatically by giving an explicit folder path. The\n '~' symbol is expanded to the user home folder.\n\n If the folder does not already exist, it is automatically created.\n \"\"\"\n if data_home is None:\n data_home = environ.get('STATSMODELS_DATA',\n join('~', 'statsmodels_data'))\n data_home = expanduser(data_home)\n if not exists(data_home):\n makedirs(data_home)\n return data_home\n\n\ndef clear_data_home(data_home=None):\n \"\"\"Delete all the content of the data home cache.\"\"\"\n data_home = get_data_home(data_home)\n shutil.rmtree(data_home)\n\n\ndef check_internet(url=None):\n \"\"\"Check if internet is available\"\"\"\n url = \"https://github.com\" if url is None else url\n try:\n urlopen(url)\n except URLError as err:\n return False\n return True\n\n\ndef strip_column_names(df):\n \"\"\"\n Remove leading and trailing single quotes\n\n Parameters\n ----------\n df : DataFrame\n DataFrame to process\n\n Returns\n -------\n df : DataFrame\n DataFrame with stripped column names\n\n Notes\n -----\n In-place modification\n \"\"\"\n columns = []\n for c in df:\n if c.startswith('\\'') and c.endswith('\\''):\n c = c[1:-1]\n elif c.startswith('\\''):\n c = c[1:]\n elif c.endswith('\\''):\n c = c[:-1]\n columns.append(c)\n df.columns = columns\n return df\n\n\ndef load_csv(base_file, csv_name, sep=',', convert_float=False):\n \"\"\"Standard simple csv loader\"\"\"\n filepath = dirname(abspath(base_file))\n filename = join(filepath,csv_name)\n engine = 'python' if sep != ',' else 'c'\n float_precision = {}\n if engine == 'c':\n float_precision = {'float_precision': 'high'}\n data = read_csv(filename, sep=sep, engine=engine, **float_precision)\n if convert_float:\n data = data.astype(float)\n return data\n\n\ndef as_numpy_dataset(ds, as_pandas=True, retain_index=False):\n \"\"\"Convert a pandas dataset to a NumPy dataset\"\"\"\n if as_pandas:\n return ds\n ds.data = ds.data.to_records(index=retain_index)\n for d in dir(ds):\n if d.startswith('_'):\n continue\n attr = getattr(ds, d)\n if isinstance(attr, (Series, DataFrame)):\n setattr(ds, d, np.asarray(attr))\n\n return ds\n"
]
| [
[
"pandas.Index",
"numpy.asarray",
"pandas.read_stata",
"numpy.logical_and",
"pandas.read_csv"
]
]
|
FernandoSBorges/netpyne | [
"e1a7adb56b94aa78f8461397319eb4e9754c2d75"
]
| [
"examples/sonata_300_cells/init.py"
]
| [
"\"\"\"\ninit.py\n\nInitial script to import, simulate and plot raster of SONATA example 300_cells\n\"\"\"\n\nfrom netpyne import sim\nfrom netpyne.conversion import sonataImport\nimport h5py\nimport json\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\nrootFolder = '/u/salvadord/Documents/ISB/Models/sonata/examples/300_cells/'\noutFolder = '/u/salvadord/Documents/ISB/Models/netpyne_repo/examples/sonata_300_cells/'\nsonataConfigFile = rootFolder+'config.json'\n\n# Options\nimportSonataModel = 1\nsaveForGUI = 0\nsaveJsonPsection = 0\nsaveJsonConns = 0\nrunPlot = 1\ncompareRaster = 1\ncompareTraces = 0\nsaveSpikesToBMTK = 0\nplotSpikesUsingBMTK = 0\n\n# Improt SONATA model and instantiate in netpyne\nif importSonataModel:\n sonataImporter = sonataImport.SONATAImporter()\n sonataImporter.importNet(sonataConfigFile, replaceAxon=True, setdLNseg=True)\n\n\n# code to save network to json so can read from NetPyNE-UI\nif saveForGUI:\n sim.cfg.saveJson = True\n #for k,v in sim.net.params.popParams.items():\n # v['numCells'] = 20\n sim.cfg.saveDataInclude = ['netParams', 'net'] # 'simConfig', \n newCells = [c for c in sim.net.cells if c.gid not in sim.net.pops['external_virtual_100'].cellGids] # if c.gid == 1\n sim.net.cells = newCells\n del sim.net.pops['external_virtual_100']\n # remove axon\n for k in sim.net.params.cellParams:\n try:\n del sim.net.params.cellParams[k]['secs']['axon_0']\n del sim.net.params.cellParams[k]['secs']['axon_1']\n for c in sim.net.cells:\n del c.secs['axon_0']\n del c.secs['axon_1']\n except:\n pass\n # remove conns\n for c in sim.net.cells:\n c.conns = []\n # save\n sim.saveData(filename='sonata_300cells')\n\n\n# save json with psection\nif saveJsonPsection:\n import json\n data = {}\n remove = ['cell', 'regions','species', 'point_processes', 'hoc_internal_name', 'name']#, 'morphology']\n removeMorph = ['parent', 'trueparent']\n for icell, c in enumerate(sim.net.cells):\n try:\n data[icell] = {}\n for isec, sec in enumerate(c.secs.values()):\n name = str(sec['hObj'].name()).split('.')[-1]\n data[icell][name] = sec['hObj'].psection()\n for x in remove:\n del data[icell][name][x]\n for key in removeMorph:\n if key in data[icell][name]['morphology']:\n del data[icell][name]['morphology'][key]\n #data[icell][name]['morphology'][key] = str(data[icell][name]['morphology'][key])\n except:\n print('Error processing %d'%(icell))\n\n with open('300cells_secs_netpyne.json', 'w') as f:\n json.dump(data, f)\n\n\n# save json with psection\nif saveJsonConns:\n import json\n data = {}\n data_wrong = []\n \n from neuron import h\n conns = list(h.List('NetCon'))\n \n for conn in conns:\n try:\n preGid = conn.precell().gid\n postGid = conn.postcell().gid\n sec_loc = str(conn.postseg()).split('>.')[1]\n sec = sec_loc.split('(')[0]\n loc = sec_loc.split('(')[1][:-1]\n weight = conn.weight[0]\n delay = conn.delay\n synTau1 = conn.syn().tau1\n synTau2 = conn.syn().tau2\n\n data['%s_%s_%s_%s' % (str(preGid), str(postGid), sec, str(loc))] = [weight, delay, synTau1, synTau2]\n except:\n data_wrong.append([str(conn.precell()), str(conn.postseg())])\n\n with open('300cells_conns_netpyne.json', 'w') as f:\n json.dump(data, f)\n\n\n# run simulation and plot raster+traces\nif runPlot:\n sim.cfg.recordTraces = {'V_soma':{'sec':'soma_0','loc':0.5,'var':'v'}}\n sim.cfg.recordCells = range(9)\n sim.cfg.analysis['plotTraces'] = {} # needed for 9 cell example\n sim.cfg.cache_efficient = True\n \n sim.setupRecording()\n sim.simulate()\n includePops = [p for p in sim.net.pops if p not in ['external_virtual_100']]\n fig = sim.analysis.plotRaster(include=includePops, spikeHist='subplot', spikeHistBin=10, figSize=(14, 8), dpi=300, saveFig='model_output_raster_axonv2_dl_300cells.png', marker='.', markerSize=3)\n data = {'spkt': list(sim.simData.spkt), 'spkid': list(sim.simData.spkid)}\n with open('300cells_spikes_netpyne.json', 'w') as f:\n json.dump(data, f)\n #fig = sim.analysis.plotTraces(figSize=(10,14), oneFigPer='trace', include=range(10), saveFig='model_output_traces_axonv2_dl_300cells.png')\n\n\n# Compare with SONATA data\nif compareRaster:\n # store netpyne spikes\n with open('300cells_spikes_netpyne.json', 'r') as f:\n d=json.load(f)\n netpyneSpkt = np.array(d['spkt'])\n netpyneSpkid = np.array(d['spkid'])\n\n # load spiks from bmtk HDF5\n dataFile=rootFolder+'output/spikes.h5'\n h5data = h5py.File(dataFile, 'r')\n bmtkSpkt = np.array(h5data['spikes']['timestamps']) \n bmtkSpkid = np.array(h5data['spikes']['gids']) \n\n # plot both spike times overlayed\n recordStep = sim.cfg.recordStep\n timeRange = [0, sim.cfg.duration]\n fontsiz=8\n ylim = [0,299]\n figSize = (10,6)\n fig = plt.figure(figsize=figSize) # Open a new figure\n\n plt.ylabel('Gid', fontsize=fontsiz)\n plt.scatter(netpyneSpkt, netpyneSpkid, s=1.5, color='red', label='NetPyNE')\n plt.scatter(bmtkSpkt, bmtkSpkid, s=0.5, color='green', label='BioNet') # linestyle=':'\n plt.xlabel('Time (ms)', fontsize=fontsiz)\n plt.xlim(timeRange)\n plt.legend(loc='upper right', bbox_to_anchor=(1.25, 1.0))\n plt.ylim(ylim)\n plt.ion()\n plt.tight_layout()\n plt.savefig(outFolder+'comparison_raster.png', dpi=300)\n plt.show()\n\nif compareTraces:\n # store netpyne traces\n netpyneTraces = []\n netpyneTracesList = []\n for c in sim.cfg.recordCells:\n netpyneTraces.append(np.array(sim.simData['V_soma']['cell_' + str(c)]))\n netpyneTracesList.append(list(sim.simData['V_soma']['cell_' + str(c)]))\n\n with open(outFolder+'netpyne_traces_300cells.json', 'w') as f:\n json.dump(netpyneTracesList, f)\n\n # load traces from bmtk HDF5\n dataFile=rootFolder+'output/membrane_potential.h5'\n h5data = h5py.File(dataFile, 'r')\n bmtkTraces = np.array(h5data['data']) # shape (30000, 9)\n\n # plot both traces overlayed\n recordStep = sim.cfg.recordStep\n timeRange = [0, sim.cfg.duration]\n fontsiz=8\n ylim = [-100, 40]\n figSize = (10,10)\n fig = plt.figure(figsize=figSize) # Open a new figure\n\n for gid in sim.cfg.recordCells:\n netpyneTrace = netpyneTraces[gid][int(timeRange[0] / recordStep):int(timeRange[1] / recordStep)]\n bmtkTrace = bmtkTraces[:,gid][int(timeRange[0]/recordStep):int(timeRange[1]/recordStep)]\n t = np.arange(timeRange[0], timeRange[1]+recordStep, recordStep)\n plt.subplot(len(sim.cfg.recordCells), 1, gid+1)\n plt.ylabel('V (mV)', fontsize=fontsiz)\n plt.plot(t[:len(netpyneTrace)], netpyneTrace, linewidth=1.5, color='red', label='Gid %d'%(int(gid))+', NetPyNE')\n plt.plot(t[:len(bmtkTrace)], bmtkTrace, linewidth=1.0, color='green', label='Gid %d'%(int(gid))+', BioNet') # linestyle=':'\n plt.xlabel('Time (ms)', fontsize=fontsiz)\n plt.xlim(timeRange)\n plt.ylim(ylim)\n plt.grid(True)\n plt.legend(loc='upper right', bbox_to_anchor=(1.25, 1.0))\n plt.ion()\n plt.tight_layout()\n plt.savefig(outFolder+'comparison_traces_270-280.png')\n plt.show()\n\n# save netpyne spikes to bmtk format\nif saveSpikesToBMTK:\n # load netpyne spikes\n with open('300cells_spikes_netpyne.json', 'r') as f:\n d=json.load(f)\n netpyneSpkt = list(d['spkt'])\n netpyneSpkid = list(d['spkid'])\n\n event_file = 'netpyne_spikes.h5'\n print('Resaving netpyne spike data to %s'%event_file)\n import tables # pytables for HDF5 support\n h5file=tables.open_file(event_file,mode='w')\n spike_grp = h5file.create_group(\"/\", 'spikes')\n gids = netpyneSpkid\n spiketimes = netpyneSpkt\n # for nml_q in events:\n # nml_pop, nml_index = _get_nml_pop_id(nml_q)\n # (sonata_node, sonata_node_id) = sr.nml_ids_vs_gids[nml_pop][nml_index]\n # for t in events[nml_q]:\n # gids.append(sonata_node_id)\n # spiketimes.append(t*1000.0)\n\n h5file.create_array(spike_grp, 'gids', gids)\n h5file.create_array(spike_grp, 'timestamps', spiketimes)\n\n h5file.close()\n\n\nif plotSpikesUsingBMTK:\n from bmtk.analyzer.spike_trains import raster_plot\n \n raster_plot(rootFolder + '/network/internal_nodes.h5', rootFolder + '/network/internal_node_types.csv', rootFolder + '/output/spikes.h5', group_key='node_type_id', title='Simulator: NEURON via BMTK', save_as=None, show=0)\n ax = plt.gcf().get_axes()[0]\n ax.get_legend().remove()\n plt.savefig('bmtk_300_cells_raster.png', dpi=300)\n \n raster_plot(rootFolder + '/network/internal_nodes.h5', rootFolder + '/network/internal_node_types.csv', 'netpyne_spikes.h5', group_key='node_type_id', title='Simulator: NEURON via NetPyNE', save_as=None, show=0)\n ax = plt.gcf().get_axes()[0]\n ax.get_legend().remove()\n plt.savefig('netpyne_300_cells_raster.png', dpi=300)"
]
| [
[
"numpy.array",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.ylabel",
"numpy.arange",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.scatter"
]
]
|
swischuk/operator_inference | [
"c495afb79d7d96f20d3ca882725238aad0c049d2"
]
| [
"src/rom_operator_inference/pre/_basis.py"
]
| [
"# pre/_basis.py\n\"\"\"Tools for basis computation and reduced-dimension selection.\"\"\"\n\n__all__ = [\n \"pod_basis\",\n \"svdval_decay\",\n \"cumulative_energy\",\n \"residual_energy\",\n \"projection_error\",\n ]\n\nimport numpy as np\nimport scipy.linalg as la\nimport scipy.sparse.linalg as spla\nimport sklearn.utils.extmath as sklmath\nimport matplotlib.pyplot as plt\n\n\n# Basis computation ===========================================================\ndef pod_basis(states, r=None, mode=\"dense\", return_W=False, **options):\n \"\"\"Compute the POD basis of rank r corresponding to the states.\n\n Parameters\n ----------\n states : (n, k) ndarray\n Matrix of k snapshots. Each column is a single snapshot of dimension n.\n r : int or None\n Number of POD basis vectors and singular values to compute.\n If None (default), compute the full SVD.\n mode : str\n Strategy to use for computing the truncated SVD of the states. Options:\n * \"dense\" (default): Use scipy.linalg.svd() to compute the SVD.\n May be inefficient for very large matrices.\n * \"sparse\": Use scipy.sparse.linalg.svds() to compute the SVD.\n This uses ARPACK for the eigensolver. Inefficient for non-sparse\n matrices; requires separate computations for full SVD.\n * \"randomized\": Compute an approximate SVD with a randomized approach\n using sklearn.utils.extmath.randomized_svd(). This gives faster\n results at the cost of some accuracy.\n return_W : bool\n If True, also return the first r *right* singular vectors.\n options\n Additional parameters for the SVD solver, which depends on `mode`:\n * \"dense\": scipy.linalg.svd()\n * \"sparse\": scipy.sparse.linalg.svds()\n * \"randomized\": sklearn.utils.extmath.randomized_svd()\n\n Returns\n -------\n basis : (n, r) ndarray\n First r POD basis vectors (left singular vectors).\n Each column is a single basis vector of dimension n.\n svdvals : (n,), (k,), or (r,) ndarray\n Singular values in descending order. Always returns as many as are\n calculated: r for mode=\"randomize\" or \"sparse\", min(n, k) for \"dense\".\n W : (k, r) ndarray\n First r **right** singular vectors, as columns.\n **Only returned if return_W=True.**\n \"\"\"\n # Validate the rank.\n rmax = min(states.shape)\n if r is None:\n r = rmax\n if r > rmax or r < 1:\n raise ValueError(f\"invalid POD rank r = {r} (need 1 ≤ r ≤ {rmax})\")\n\n if mode == \"dense\" or mode == \"simple\":\n V, svdvals, Wt = la.svd(states, full_matrices=False, **options)\n W = Wt.T\n\n elif mode == \"sparse\" or mode == \"arpack\":\n get_smallest = False\n if r == rmax:\n r -= 1\n get_smallest = True\n\n # Compute all but the last svd vectors / values (maximum allowed).\n V, svdvals, Wt = spla.svds(states, r, which=\"LM\",\n return_singular_vectors=True, **options)\n V = V[:, ::-1]\n svdvals = svdvals[::-1]\n W = Wt[::-1, :].T\n\n # Get the smallest vector / value separately.\n if get_smallest:\n V1, smallest, W1 = spla.svds(states, 1, which=\"SM\",\n return_singular_vectors='u',\n **options)\n print(f\"W1.shape: {W1.shape}\")\n V = np.concatenate((V, V1), axis=1)\n svdvals = np.concatenate((svdvals, smallest))\n W = np.concatenate((W, W1.T), axis=1)\n r += 1\n\n elif mode == \"randomized\":\n if \"random_state\" not in options:\n options[\"random_state\"] = None\n V, svdvals, Wt = sklmath.randomized_svd(states, r, **options)\n W = Wt.T\n\n else:\n raise NotImplementedError(f\"invalid mode '{mode}'\")\n\n if return_W:\n return V[:, :r], svdvals, W[:, :r]\n return V[:, :r], svdvals\n\n\n# Reduced dimension selection =================================================\ndef svdval_decay(singular_values, tol=1e-8, normalize=True,\n plot=True, ax=None):\n \"\"\"Count the number of normalized singular values that are greater than\n the specified tolerance.\n\n Parameters\n ----------\n singular_values : (n,) ndarray\n Singular values of a snapshot set, e.g., scipy.linalg.svdvals(states).\n tol : float or list(float)\n Cutoff value(s) for the singular values.\n normalize : bool\n If True, normalize so that the maximum singular value is 1.\n plot : bool\n If True, plot the singular values and the cutoff value(s) against the\n singular value index.\n ax : plt.Axes or None\n Matplotlib Axes to plot the results on if plot = True.\n If not given, a new single-axes figure is created.\n\n Returns\n -------\n ranks : int or list(int)\n The number of singular values greater than the cutoff value(s).\n \"\"\"\n # Calculate the number of singular values above the cutoff value(s).\n one_tol = np.isscalar(tol)\n if one_tol:\n tol = [tol]\n singular_values = np.sort(singular_values)[::-1]\n if normalize:\n singular_values /= singular_values[0]\n ranks = [np.count_nonzero(singular_values > epsilon) for epsilon in tol]\n\n if plot:\n # Visualize singular values and cutoff value(s).\n if ax is None:\n ax = plt.figure().add_subplot(111)\n j = np.arange(1, singular_values.size + 1)\n ax.semilogy(j, singular_values, 'C0*', ms=10, mew=0, zorder=3)\n ax.set_xlim((0, j.size))\n ylim = ax.get_ylim()\n for epsilon, r in zip(tol, ranks):\n ax.axhline(epsilon, color=\"black\", linewidth=.5, alpha=.75)\n ax.axvline(r, color=\"black\", linewidth=.5, alpha=.75)\n ax.set_ylim(ylim)\n ax.set_xlabel(r\"Singular value index $j$\")\n ax.set_ylabel(r\"Singular value $\\sigma_j$\")\n\n return ranks[0] if one_tol else ranks\n\n\ndef cumulative_energy(singular_values, thresh=.9999, plot=True, ax=None):\n \"\"\"Compute the number of singular values needed to surpass a given\n energy threshold. The energy of j singular values is defined by\n\n energy_j = sum(singular_values[:j]**2) / sum(singular_values**2).\n\n Parameters\n ----------\n singular_values : (n,) ndarray\n Singular values of a snapshot set, e.g., scipy.linalg.svdvals(states).\n thresh : float or list(float)\n Energy capture threshold(s). Default is 99.99%.\n plot : bool\n If True, plot the singular values and the cumulative energy against\n the singular value index (linear scale).\n ax : plt.Axes or None\n Matplotlib Axes to plot the results on if plot = True.\n If not given, a new single-axes figure is created.\n\n Returns\n -------\n ranks : int or list(int)\n The number of singular values required to capture more than each\n energy capture threshold.\n \"\"\"\n # Calculate the cumulative energy.\n svdvals2 = np.sort(singular_values)[::-1]**2\n cum_energy = np.cumsum(svdvals2) / np.sum(svdvals2)\n\n # Determine the points at which the cumulative energy passes the threshold.\n one_thresh = np.isscalar(thresh)\n if one_thresh:\n thresh = [thresh]\n ranks = [int(np.searchsorted(cum_energy, xi)) + 1 for xi in thresh]\n\n if plot:\n # Visualize cumulative energy and threshold value(s).\n if ax is None:\n ax = plt.figure().add_subplot(111)\n j = np.arange(1, singular_values.size + 1)\n ax.plot(j, cum_energy, 'C2.-', ms=10, lw=1, zorder=3)\n ax.set_xlim(0, j.size)\n for xi, r in zip(thresh, ranks):\n ax.axhline(xi, color=\"black\", linewidth=.5, alpha=.5)\n ax.axvline(r, color=\"black\", linewidth=.5, alpha=.5)\n ax.set_xlabel(r\"Singular value index\")\n ax.set_ylabel(r\"Cumulative energy\")\n\n return ranks[0] if one_thresh else ranks\n\n\ndef residual_energy(singular_values, tol=1e-6, plot=True, ax=None):\n \"\"\"Compute the number of singular values needed such that the residual\n energy drops beneath the given tolerance. The residual energy of j\n singular values is defined by\n\n residual_j = 1 - sum(singular_values[:j]**2) / sum(singular_values**2).\n\n Parameters\n ----------\n singular_values : (n,) ndarray\n Singular values of a snapshot set, e.g., scipy.linalg.svdvals(states).\n tol : float or list(float)\n Energy residual tolerance(s). Default is 10^-6.\n plot : bool\n If True, plot the singular values and the residual energy against\n the singular value index (log scale).\n ax : plt.Axes or None\n Matplotlib Axes to plot the results on if plot = True.\n If not given, a new single-axes figure is created.\n\n Returns\n -------\n ranks : int or list(int)\n Number of singular values required to for the residual energy to drop\n beneath each tolerance.\n \"\"\"\n # Calculate the cumulative energy.\n svdvals2 = np.sort(singular_values)[::-1]**2\n res_energy = 1 - (np.cumsum(svdvals2) / np.sum(svdvals2))\n\n # Determine the points when the residual energy dips under the tolerance.\n one_tol = np.isscalar(tol)\n if one_tol:\n tol = [tol]\n ranks = [np.count_nonzero(res_energy > epsilon) + 1 for epsilon in tol]\n\n if plot:\n # Visualize residual energy and tolerance value(s).\n if ax is None:\n ax = plt.figure().add_subplot(111)\n j = np.arange(1, singular_values.size + 1)\n ax.semilogy(j, res_energy, 'C1.-', ms=10, lw=1, zorder=3)\n ax.set_xlim(0, j.size)\n for epsilon, r in zip(tol, ranks):\n ax.axhline(epsilon, color=\"black\", linewidth=.5, alpha=.5)\n ax.axvline(r, color=\"black\", linewidth=.5, alpha=.5)\n ax.set_xlabel(r\"Singular value index\")\n ax.set_ylabel(r\"Residual energy\")\n\n return ranks[0] if one_tol else ranks\n\n\ndef projection_error(states, basis):\n \"\"\"Calculate the absolute and relative projection errors induced by\n projecting states to a low dimensional basis, i.e.,\n\n absolute_error = ||Q - Vr Vr^T Q||_F,\n relative_error = ||Q - Vr Vr^T Q||_F / ||Q||_F\n\n where Q = states and Vr = basis. Note that Vr Vr^T is the orthogonal\n projector onto subspace of R^n defined by the basis.\n\n Parameters\n ----------\n states : (n, k) or (k,) ndarray\n Matrix of k snapshots where each column is a single snapshot, or a\n single 1D snapshot. If 2D, use the Frobenius norm; if 1D, the l2 norm.\n Vr : (n, r) ndarray\n Low-dimensional basis of rank r. Each column is one basis vector.\n\n Returns\n -------\n absolute_error : float\n Absolute projection error ||Q - Vr Vr^T Q||_F.\n relative_error : float\n Relative projection error ||Q - Vr Vr^T Q||_F / ||Q||_F.\n \"\"\"\n norm_of_states = la.norm(states)\n absolute_error = la.norm(states - basis @ (basis.T @ states))\n return absolute_error, absolute_error / norm_of_states\n"
]
| [
[
"numpy.concatenate",
"scipy.sparse.linalg.svds",
"numpy.count_nonzero",
"numpy.sum",
"scipy.linalg.svd",
"matplotlib.pyplot.figure",
"numpy.isscalar",
"numpy.sort",
"numpy.arange",
"numpy.cumsum",
"sklearn.utils.extmath.randomized_svd",
"numpy.searchsorted",
"scipy.linalg.norm"
]
]
|
dawedawe/traipor | [
"abfb027dec6837a5a912d6470e8e4bb0eca0c815"
]
| [
"app/training/parameterfitting.py"
]
| [
"from scipy import optimize\nimport lmfit\nimport numpy as np\nimport cma\n\ntry:\n from .fitnessfatigue import performance_over_time as \\\n ff_performance_over_time\n from .fitnessfatigue import performance_over_time2 as \\\n ff_performance_over_time2\n from .perpot import performance_over_time as pp_performance_over_time\n from .perpot import performance_over_time2 as pp_performance_over_time2\n from .perpot import calc_pp_load_scale_factor, calc_pp_perf_scale_factor\n from .fitting_util import filter_model_perf_values_2_load_days\n from .fitting_util import filter_model_perfs_2_real_perfs\n from .fitting_util import csv_value_dict_from_path\n from .fitting_util import plan_perfs_from_dic, calc_rmse, calc_residuals\nexcept SystemError:\n from fitnessfatigue import performance_over_time as ff_performance_over_time\n from fitnessfatigue import performance_over_time2 as \\\n ff_performance_over_time2\n from perpot import performance_over_time as pp_performance_over_time\n from perpot import performance_over_time2 as pp_performance_over_time2\n from perpot import calc_pp_load_scale_factor, calc_pp_perf_scale_factor\n # from fitting_util import choose_initial_p\n from fitting_util import filter_model_perf_values_2_load_days\n from fitting_util import filter_model_perfs_2_real_perfs\n # from plots import plot_model_and_metrics\n # from plots import plot_model_and_metrics2\n # from plots import pp_plot_model_and_metrics2\n from fitting_util import csv_value_dict_from_path\n from fitting_util import plan_perfs_from_dic, calc_rmse, calc_residuals\n\n\ndef objective_f(x, *args):\n '''the objective function to minimize with the optimization methods. returns\n the calculated error'''\n plan = args[0]\n real_perfs = args[1]\n calc_error = args[2]\n unpack_parms = args[3]\n model_func = args[4]\n model_parms = unpack_parms(x)\n model_perfs = model_func(plan, model_parms)\n model_perfs = filter_model_perfs_2_real_perfs(model_perfs, real_perfs)\n real_perfs = list(filter(lambda x: x > 0.0, real_perfs))\n assert(len(model_perfs) == len(real_perfs))\n return calc_error(np.array(real_perfs), np.array(model_perfs))\n\n\ndef unpack_ff_parms_list(x):\n return x\n\n\ndef unpack_pp_parms_list(x):\n # optimize delays and init_p aka perfpot\n return [0.0, 0.0, x[0], x[1], x[2], x[3]]\n\n\ndef unpack_ff_lmfit_parms(x):\n parvals = x.valuesdict()\n return [parvals['initial_p'], parvals['k_1'], parvals['tau_1'],\n parvals['k_2'], parvals['tau_2']]\n\n\ndef unpack_pp_lmfit_parms(x):\n # optimize delays and init_p aka perfpot\n parvals = x.valuesdict()\n return [0.0, 0.0, parvals['perfpot'], parvals['straindelay'],\n parvals['responsedelay'], parvals['overflowdelay']]\n\n\ndef ff_minimize_fitting(plan, real_perf_values, method):\n '''generic interface for optimization.minimize for Fitness Fatigue fitting.\n returns the OptimizeResult object.'''\n x0 = np.array([real_perf_values[0], 1.0, 30.0, 1.0, 15.0]) # initial guess\n args = (plan,\n real_perf_values,\n calc_rmse,\n unpack_ff_parms_list,\n ff_performance_over_time2)\n bounds = [(0, max(real_perf_values)), # initial_p\n (0.01, 5), # k_1\n (1, 70), # tau_1\n (0.01, 5), # k_2\n (1, 70)] # tau_2\n # options = {'maxiter': 50, 'disp': True}\n options = {'disp': False}\n\n def iter_callback(xk):\n print(\"current parameter vector: {}\".format(xk))\n\n return optimize.minimize(objective_f,\n x0,\n args,\n method,\n bounds=bounds,\n options=options,\n callback=None)\n # callback=iter_callback)\n\n\ndef pp_minimize_fitting(plan, real_perf_values, method):\n '''generic interface for optimization.minimize for PerPot fitting.\n returns the OptimizeResult object and the scale factors.'''\n # x0 = np.array([4.0, 2.0, 0.001]) # initial guess\n x0 = np.array([0.5, 4.0, 2.0, 15]) # initial guess including perfpot\n load_scale_factor = calc_pp_load_scale_factor(plan)\n perf_scale_factor = calc_pp_perf_scale_factor(real_perf_values)\n scaled_plan = list(map(lambda l: load_scale_factor * l, plan))\n scaled_perfs = list(map(lambda p: perf_scale_factor * p, real_perf_values))\n args = (scaled_plan,\n scaled_perfs,\n calc_rmse,\n unpack_pp_parms_list,\n pp_performance_over_time2)\n bounds = [(0.0, 1.0), # perfpot\n (0.001, 30.0), # DS Delay of Strain Rate\n (0.001, 30.0), # DR Delay of Response Rate\n (0.001, 30.0)] # DSO Delay of Strain Overflow Rate\n options = {}\n # options['maxiter'] = 50\n options['disp'] = False\n\n def iter_callback(xk):\n print(\"current parameter vector: {}\".format(xk))\n\n optres = optimize.minimize(objective_f,\n x0,\n args,\n method,\n bounds=bounds,\n options=options,\n callback=iter_callback)\n return optres, load_scale_factor, perf_scale_factor\n\n\ndef ff_cmaes_fitting(plan, real_perf_values):\n x0 = np.array([real_perf_values[0], 1.0, 30.0, 1.0, 15.0]) # initial guess\n args = (plan,\n real_perf_values,\n calc_rmse,\n unpack_ff_parms_list,\n ff_performance_over_time2)\n opts = cma.CMAOptions()\n bounds = [[0.0, 0.01, 1.0, 0.01, 1.0],\n [real_perf_values[0] * 2, 5.0, 70.0, 5.0, 70.0]]\n opts.set('bounds', bounds)\n opts.set('tolfun', 1e-5)\n # opts.set('verb_disp', False)\n # opts.set('verbose', -9)\n # opts.set('maxiter', 800)\n res = cma.fmin(objective_f, x0, 0.5, args=args, options=opts)\n return res\n\n\ndef pp_cmaes_fitting(plan, real_perf_values):\n # x0 = np.array([4.0, 2.0, 15]) # initial guess of delays\n x0 = np.array([0.5, 4.0, 2.0, 15]) # initial guess including perfpot\n load_scale_factor = calc_pp_load_scale_factor(plan)\n perf_scale_factor = calc_pp_perf_scale_factor(real_perf_values)\n scaled_plan = list(map(lambda l: load_scale_factor * l, plan))\n scaled_perfs = list(map(lambda p: perf_scale_factor * p, real_perf_values))\n args = (scaled_plan,\n scaled_perfs,\n calc_rmse,\n unpack_pp_parms_list,\n pp_performance_over_time2)\n opts = cma.CMAOptions()\n # only optimize delays\n '''bounds = [[0.001, 0.001, 0.001],\n [30.0, 30.0, 30.0]]'''\n # optimize delays and init_p aka perfpot\n bounds = [[0.0, 0.001, 0.001, 0.001],\n [1.0, 30.0, 30.0, 30.0]]\n opts.set('bounds', bounds)\n # opts.set('verb_disp', False)\n # opts.set('verbose', -9)\n opts.set('maxiter', 800)\n res = cma.fmin(objective_f, x0, 0.5, args=args, options=opts)\n print('res[0] = {}'.format(res[0]))\n print('res[1] = {}'.format(res[1]))\n return res, load_scale_factor, perf_scale_factor\n\n\ndef ff_lmfit_fitting(plan, real_perf_values, method='leastsq'):\n '''least squares or differential evolution fitting with bounds'''\n real_perf_values = np.array(real_perf_values)\n args = (plan,\n real_perf_values,\n calc_residuals,\n unpack_ff_lmfit_parms,\n ff_performance_over_time2)\n params = lmfit.Parameters()\n params.add(name='initial_p',\n value=real_perf_values[0],\n min=0,\n max=max(real_perf_values))\n params.add(name='k_1', value=1.0, min=0.01, max=5.0)\n params.add(name='tau_1', value=30.0, min=1.00, max=70.0)\n params.add(name='k_2', value=1.0, min=0.01, max=5.0)\n params.add(name='tau_2', value=15.0, min=1.00, max=70.0)\n lmfit.minimize(objective_f, params, method=method, args=args)\n model_perfs = ff_performance_over_time(plan,\n params['initial_p'],\n params['k_1'],\n params['tau_1'],\n params['k_2'],\n params['tau_2'])\n model_perfs = filter_model_perfs_2_real_perfs(model_perfs, real_perf_values)\n real_perf_values = list(filter(lambda x: x > 0.0, real_perf_values))\n assert(len(model_perfs) == len(real_perf_values))\n rmse = calc_rmse(real_perf_values, model_perfs)\n return (params['initial_p'].value,\n params['k_1'].value,\n params['tau_1'].value,\n params['k_2'].value,\n params['tau_2'].value), rmse\n\n\ndef pp_lmfit_fitting(plan, real_perf_values, method='leastsq'):\n '''least squares or differential evolution fitting with bounds'''\n real_perf_values = np.array(real_perf_values)\n load_scale_factor = calc_pp_load_scale_factor(plan)\n perf_scale_factor = calc_pp_perf_scale_factor(real_perf_values)\n scaled_plan = list(map(lambda l: load_scale_factor * l, plan))\n scaled_perfs = list(map(lambda p: perf_scale_factor * p, real_perf_values))\n args = (scaled_plan,\n np.array(scaled_perfs),\n calc_residuals,\n unpack_pp_lmfit_parms,\n pp_performance_over_time2)\n params = lmfit.Parameters()\n params.add(name='perfpot', value=0.5, min=0, max=1)\n params.add(name='straindelay', value=4.0, min=0.001, max=30)\n params.add(name='responsedelay', value=2.0, min=0.001, max=30)\n params.add(name='overflowdelay', value=15, min=0.001, max=30)\n lmfit.minimize(objective_f, params, method=method, args=args)\n model_perfs = pp_performance_over_time(scaled_plan,\n 0.0,\n 0.0,\n params['perfpot'],\n params['straindelay'],\n params['responsedelay'],\n params['overflowdelay'])\n model_perfs = filter_model_perfs_2_real_perfs(model_perfs, real_perf_values)\n scaled_perfs = list(filter(lambda x: x > 0.0, scaled_perfs))\n assert(len(model_perfs) == len(scaled_perfs))\n rmse = calc_rmse(scaled_perfs, model_perfs)\n return (((params['perfpot'].value,\n params['straindelay'].value,\n params['responsedelay'].value,\n params['overflowdelay'].value), rmse),\n load_scale_factor,\n perf_scale_factor)\n"
]
| [
[
"numpy.array",
"scipy.optimize.minimize"
]
]
|
bibliolytic/moabb | [
"46799bfd7957b1da3e7a0534286c1973af9c95d9"
]
| [
"moabb/paradigms/base.py"
]
| [
"from abc import ABCMeta, abstractproperty, abstractmethod\nimport numpy as np\nimport pandas as pd\n\n\nclass BaseParadigm(metaclass=ABCMeta):\n \"\"\"Base Paradigm.\n \"\"\"\n\n def __init__(self):\n pass\n\n @abstractproperty\n def scoring(self):\n '''Property that defines scoring metric (e.g. ROC-AUC or accuracy\n or f-score), given as a sklearn-compatible string or a compatible\n sklearn scorer.\n\n '''\n pass\n\n @abstractproperty\n def datasets(self):\n '''Property that define the list of compatible datasets\n\n '''\n pass\n\n @abstractmethod\n def is_valid(self, dataset):\n \"\"\"Verify the dataset is compatible with the paradigm.\n\n This method is called to verify dataset is compatible with the\n paradigm.\n\n This method should raise an error if the dataset is not compatible\n with the paradigm. This is for example the case if the\n dataset is an ERP dataset for motor imagery paradigm, or if the\n dataset does not contain any of the required events.\n\n Parameters\n ----------\n dataset : dataset instance\n The dataset to verify.\n \"\"\"\n\n @abstractmethod\n def process_raw(self, raw, dataset):\n \"\"\"\n Process one raw data file.\n\n This function is apply the preprocessing and eventual epoching on the\n individual run, and return the data, labels and a dataframe with\n metadata.\n\n metadata is a dataframe with as many row as the length of the data\n and labels.\n\n Parameters\n ----------\n\n raw: mne.Raw instance\n the raw EEG data.\n\n dataset : dataset instance\n The dataset corresponding to the raw file. mainly use to access\n dataset specific information.\n\n returns\n -------\n X : np.ndarray\n the data that will be used as features for the model\n\n labels: np.ndarray\n the labels for training / evaluating the model\n\n metadata: pd.DataFrame\n A dataframe containing the metadata\n\n \"\"\"\n pass\n\n def get_data(self, dataset, subjects=None):\n \"\"\"\n Return the data for a list of subject.\n\n return the data, labels and a dataframe with metadata. the dataframe\n will contain at least the following columns\n\n - subject : the subject indice\n - session : the session indice\n - run : the run indice\n\n parameters\n ----------\n dataset:\n A dataset instance.\n subjects: List of int\n List of subject number\n\n returns\n -------\n X : np.ndarray\n the data that will be used as features for the model\n labels: np.ndarray\n the labels for training / evaluating the model\n metadata: pd.DataFrame\n A dataframe containing the metadata.\n \"\"\"\n\n if not self.is_valid(dataset):\n message = \"Dataset {} is not valid for paradigm\".format(\n dataset.code)\n raise AssertionError(message)\n\n data = dataset.get_data(subjects)\n\n X = []\n labels = []\n metadata = []\n for subject, sessions in data.items():\n for session, runs in sessions.items():\n for run, raw in runs.items():\n proc = self.process_raw(raw, dataset)\n\n if proc is None:\n # this mean the run did not contain any selected event\n # go to next\n continue\n\n x, lbs, met = proc\n met['subject'] = subject\n met['session'] = session\n met['run'] = run\n metadata.append(met)\n\n # grow X and labels in a memory efficient way. can be slow\n if len(X) > 0:\n X = np.append(X, x, axis=0)\n labels = np.append(labels, lbs, axis=0)\n else:\n X = x\n labels = lbs\n\n metadata = pd.concat(metadata, ignore_index=True)\n return X, labels, metadata\n"
]
| [
[
"numpy.append",
"pandas.concat"
]
]
|
TheFebrin/thesis-normals-estimation | [
"43c2b9f902b93ec8eace610bb386d190a58eb4e3"
]
| [
"utils/make_plots.py"
]
| [
"import numpy as np\nfrom PIL import Image\nimport numba\nimport matplotlib.pyplot as plt\n# import albumentations as A\n# import cv2\nimport pptk\n\nfrom util_functions import (\n get_normals_from_depth,\n depth_image_to_pointcloud,\n normalize,\n get_normals_from_depth_avg,\n)\n\n\ndef plot_images():\n fig, axes = plt.subplots(2, 3, figsize=(15, 7))\n fig.tight_layout()\n\n img1 = Image.open('../images/dataset2/SBXCameraSensor_Top_PolyBag_00000009.png')\n img2 = Image.open('../images/production/color_production_1.png')\n\n depth1 = Image.open('../images/dataset2/depth_SBXCameraSensor_Top_PolyBag_00000009.png')\n depth2 = Image.open('../images/production/depth_production_1.png')\n\n depth1 = np.array(depth1, dtype=np.float32)\n depth2 = np.array(depth2, dtype=np.float32)\n\n normals1 = get_normals_from_depth(depth1)\n normals21 = get_normals_from_depth(depth2)\n\n\n normals2 = get_normals_from_depth_avg(depth2, k=2)\n normals3 = get_normals_from_depth_avg(depth2, k=3)\n normals4 = get_normals_from_depth_avg(depth2, k=4)\n normals5 = get_normals_from_depth_avg(depth2, k=5)\n normals6 = get_normals_from_depth_avg(depth2, k=6)\n\n\n # pointcloud1 = depth_image_to_pointcloud(depth1)\n #\n # fx, fy, cx, cy = 1338.7076416015625, 1338.7076416015625, 960.0, 540.0\n # pointcloud2 = depth_image_to_pointcloud(\n # depth2, fx=fx, fy=fy, cx=cx, cy=cy\n # )\n\n # normals1 = pptk.estimate_normals(\n # normalize(pointcloud1.reshape((-1, 3))),\n # k=9, r=0.3, output_eigenvalues=False,\n # output_all_eigenvectors=False, output_neighborhood_sizes=False,\n # verbose=True\n # ).reshape(*pointcloud1.shape)\n #\n # normals2 = pptk.estimate_normals(\n # normalize(pointcloud2.reshape((-1, 3))),\n # k=9, r=0.2, output_eigenvalues=False,\n # output_all_eigenvectors=False, output_neighborhood_sizes=False,\n # verbose=True\n # ).reshape(*pointcloud2.shape)\n\n axes[0][0].imshow(normals21)\n axes[0][0].set_title('k = 1')\n\n axes[0][1].imshow(normals2)\n axes[0][1].set_title('k = 2')\n\n axes[0][2].imshow(normals3)\n axes[0][2].set_title('k = 3')\n\n axes[1][0].imshow(normals4)\n axes[1][0].set_title('k = 4')\n\n axes[1][1].imshow(normals5)\n axes[1][1].set_title('k = 5')\n\n axes[1][2].imshow(normals6)\n axes[1][2].set_title('k = 6')\n\n plt.show()\n\n\ndef plot_transforms():\n\ttransform1 = A.Compose([\n\t\t#A.HorizontalFlip(p=1),\n\t\t#A.VerticalFlip(p=1),\n\t\t#A.RandomCrop(width=1500, height=900),\n\t\t#A.Resize(width=1280, height=720)\n\t\tA.GaussianBlur(\n\t\t blur_limit=(11, 15), sigma_limit=10, always_apply=True, p=1\n\t\t),\n\t])\n\n\ttransform = A.Compose([\n\t\tA.ColorJitter(\n\t\t brightness=0.2, contrast=0.2, saturation=0.2, hue=0.2, always_apply=False, p=0.5\n\t\t),\n\t\tA.RandomBrightnessContrast(\n\t\t brightness_limit=0.2, contrast_limit=0.2, brightness_by_max=True, always_apply=False, p=0.5\n\t\t),\n\t\tA.HueSaturationValue(\n\t\t hue_shift_limit=20, sat_shift_limit=30, val_shift_limit=20, always_apply=False, p=0.5\n\t\t),\n\t\tA.GaussianBlur(\n\t\t blur_limit=(3, 5), sigma_limit=0, always_apply=False, p=0.5\n\t\t),\n\t])\n\n\t# Read an image with OpenCV and convert it to the RGB colorspace\n\tcolor_img = cv2.imread('../images/dataset2/SBXCameraSensor_Top_PolyBag_00000009.png')\n\tcolor_img = cv2.cvtColor(color_img, cv2.COLOR_BGR2RGB)\n\n\ttransformed = transform1(image=color_img)\n\ttransformed_color_img = transformed[\"image\"]\n\n\tfig, axes = plt.subplots(1, 2, figsize=(10, 5))\n\tfig.tight_layout()\n\n\n\taxes[0].imshow(color_img)\n\taxes[0].set_title('Original image')\n\taxes[1].imshow(transformed_color_img)\n\taxes[1].set_title('Transformed image after GaussianBlur')\n\n\tplt.show()\n\n\ndef main():\n\tplot_images()\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"matplotlib.pyplot.show",
"numpy.array",
"matplotlib.pyplot.subplots"
]
]
|
tran-khoa/fairseq | [
"558366b3c6970a5dd85ad1909581d43e41fdce9f"
]
| [
"fairseq/checkpoint_utils.py"
]
| [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport ast\nimport collections\nimport contextlib\nimport logging\nimport numpy as np\nimport os\nimport re\nimport time\nimport traceback\nfrom collections import OrderedDict\nfrom typing import Any, Dict, Optional, Union\n\nimport torch\nfrom fairseq.data import data_utils\nfrom fairseq.dataclass.configs import CheckpointConfig\nfrom fairseq.dataclass.utils import (\n convert_namespace_to_omegaconf,\n overwrite_args_by_name,\n)\nfrom fairseq.distributed.fully_sharded_data_parallel import FSDP, has_FSDP\nfrom fairseq.file_io import PathManager\nfrom fairseq.models import FairseqDecoder, FairseqEncoder\nfrom omegaconf import DictConfig, open_dict, OmegaConf\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef save_checkpoint(cfg: CheckpointConfig, trainer, epoch_itr, val_loss):\n from fairseq import meters\n\n # only one worker should attempt to create the required dir\n if trainer.data_parallel_rank == 0:\n os.makedirs(cfg.save_dir, exist_ok=True)\n\n prev_best = getattr(save_checkpoint, \"best\", val_loss)\n if val_loss is not None:\n best_function = max if cfg.maximize_best_checkpoint_metric else min\n save_checkpoint.best = best_function(val_loss, prev_best)\n\n if cfg.no_save:\n return\n\n trainer.consolidate_optimizer() # TODO(SS): do we need this if no_save_optimizer_state\n\n if not trainer.should_save_checkpoint_on_current_rank:\n if trainer.always_call_state_dict_during_save_checkpoint:\n trainer.state_dict()\n return\n\n write_timer = meters.StopwatchMeter()\n write_timer.start()\n\n epoch = epoch_itr.epoch\n end_of_epoch = epoch_itr.end_of_epoch()\n updates = trainer.get_num_updates()\n\n logger.info(f\"Preparing to save checkpoint for epoch {epoch} @ {updates} updates\")\n\n def is_better(a, b):\n return a >= b if cfg.maximize_best_checkpoint_metric else a <= b\n\n suffix = trainer.checkpoint_suffix\n checkpoint_conds = collections.OrderedDict()\n checkpoint_conds[\"checkpoint{}{}.pt\".format(epoch, suffix)] = (\n end_of_epoch and not cfg.no_epoch_checkpoints and epoch % cfg.save_interval == 0\n )\n checkpoint_conds[\"checkpoint_{}_{}{}.pt\".format(epoch, updates, suffix)] = (\n not end_of_epoch\n and cfg.save_interval_updates > 0\n and updates % cfg.save_interval_updates == 0\n )\n checkpoint_conds[\"checkpoint_best{}.pt\".format(suffix)] = val_loss is not None and (\n not hasattr(save_checkpoint, \"best\")\n or is_better(val_loss, save_checkpoint.best)\n )\n if val_loss is not None and cfg.keep_best_checkpoints > 0:\n worst_best = getattr(save_checkpoint, \"best\", None)\n chkpts = checkpoint_paths(\n cfg.save_dir,\n pattern=r\"checkpoint\\.best_{}_(\\d+\\.?\\d*){}\\.pt\".format(\n cfg.best_checkpoint_metric, suffix\n ),\n )\n if len(chkpts) > 0:\n p = chkpts[-1] if cfg.maximize_best_checkpoint_metric else chkpts[0]\n worst_best = float(p.rsplit(\"_\")[-1].replace(\"{}.pt\".format(suffix), \"\"))\n # add random digits to resolve ties\n with data_utils.numpy_seed(epoch, updates, val_loss):\n rand_sfx = np.random.randint(0, cfg.keep_best_checkpoints)\n\n checkpoint_conds[\n \"checkpoint.best_{}_{:.3f}{}{}.pt\".format(\n cfg.best_checkpoint_metric, val_loss, rand_sfx, suffix\n )\n ] = worst_best is None or is_better(val_loss, worst_best)\n checkpoint_conds[\n \"checkpoint_last{}.pt\".format(suffix)\n ] = not cfg.no_last_checkpoints\n\n extra_state = {\"train_iterator\": epoch_itr.state_dict(), \"val_loss\": val_loss}\n if hasattr(save_checkpoint, \"best\"):\n extra_state.update({\"best\": save_checkpoint.best})\n\n checkpoints = [\n os.path.join(cfg.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond\n ]\n if len(checkpoints) > 0:\n trainer.save_checkpoint(checkpoints[0], extra_state)\n for cp in checkpoints[1:]:\n if cfg.write_checkpoints_asynchronously:\n # TODO[ioPath]: Need to implement a delayed asynchronous\n # file copying/moving feature.\n logger.warning(\n f\"ioPath is not copying {checkpoints[0]} to {cp} \"\n \"since async write mode is on.\"\n )\n else:\n assert PathManager.copy(\n checkpoints[0], cp, overwrite=True\n ), f\"Failed to copy {checkpoints[0]} to {cp}\"\n\n write_timer.stop()\n logger.info(\n \"Saved checkpoint {} (epoch {} @ {} updates, score {}) (writing took {} seconds)\".format(\n checkpoints[0], epoch, updates, val_loss, write_timer.sum\n )\n )\n\n if not end_of_epoch and cfg.keep_interval_updates > 0:\n # remove old checkpoints; checkpoints are sorted in descending order\n if cfg.keep_interval_updates_pattern == -1:\n checkpoints = checkpoint_paths(\n cfg.save_dir, pattern=r\"checkpoint_\\d+_(\\d+){}\\.pt\".format(suffix)\n )\n else:\n checkpoints = checkpoint_paths(\n cfg.save_dir,\n pattern=r\"checkpoint_\\d+_(\\d+){}\\.pt\".format(suffix),\n keep_match=True,\n )\n checkpoints = [\n x[0]\n for x in checkpoints\n if x[1] % cfg.keep_interval_updates_pattern != 0\n ]\n\n for old_chk in checkpoints[cfg.keep_interval_updates :]:\n if os.path.lexists(old_chk):\n os.remove(old_chk)\n elif PathManager.exists(old_chk):\n PathManager.rm(old_chk)\n\n if cfg.keep_last_epochs > 0:\n # remove old epoch checkpoints; checkpoints are sorted in descending order\n checkpoints = checkpoint_paths(\n cfg.save_dir, pattern=r\"checkpoint(\\d+){}\\.pt\".format(suffix)\n )\n for old_chk in checkpoints[cfg.keep_last_epochs :]:\n if os.path.lexists(old_chk):\n os.remove(old_chk)\n elif PathManager.exists(old_chk):\n PathManager.rm(old_chk)\n\n if cfg.keep_best_checkpoints > 0:\n # only keep the best N checkpoints according to validation metric\n checkpoints = checkpoint_paths(\n cfg.save_dir,\n pattern=r\"checkpoint\\.best_{}_(\\d+\\.?\\d*){}\\.pt\".format(\n cfg.best_checkpoint_metric, suffix\n ),\n )\n if not cfg.maximize_best_checkpoint_metric:\n checkpoints = checkpoints[::-1]\n for old_chk in checkpoints[cfg.keep_best_checkpoints :]:\n if os.path.lexists(old_chk):\n os.remove(old_chk)\n elif PathManager.exists(old_chk):\n PathManager.rm(old_chk)\n\n\ndef load_checkpoint(cfg: CheckpointConfig, trainer, **passthrough_args):\n \"\"\"\n Load a checkpoint and restore the training iterator.\n\n *passthrough_args* will be passed through to\n ``trainer.get_train_iterator``.\n \"\"\"\n\n reset_optimizer = cfg.reset_optimizer\n reset_lr_scheduler = cfg.reset_lr_scheduler\n optimizer_overrides = ast.literal_eval(cfg.optimizer_overrides)\n reset_meters = cfg.reset_meters\n reset_dataloader = cfg.reset_dataloader\n\n if cfg.finetune_from_model is not None and (\n reset_optimizer or reset_lr_scheduler or reset_meters or reset_dataloader\n ):\n raise ValueError(\n \"--finetune-from-model can not be set together with either --reset-optimizer\"\n \" or reset_lr_scheduler or reset_meters or reset_dataloader\"\n )\n\n first_launch = True\n suffix = trainer.checkpoint_suffix\n if (\n cfg.restore_file == \"checkpoint_last.pt\"\n ): # default value of restore_file is 'checkpoint_last.pt'\n checkpoint_path = os.path.join(\n cfg.save_dir, \"checkpoint_last{}.pt\".format(suffix)\n )\n first_launch = not PathManager.exists(checkpoint_path)\n if cfg.finetune_from_model is not None and first_launch:\n # if there is no last checkpoint to restore, start the finetune from pretrained model\n # else just use usual logic to load checkpoint, e.g. restart from last checkpoint and etc.\n if PathManager.exists(cfg.finetune_from_model):\n checkpoint_path = cfg.finetune_from_model\n reset_optimizer = True\n reset_lr_scheduler = True\n reset_meters = True\n reset_dataloader = True\n logger.info(\n f\"loading pretrained model from {checkpoint_path}: \"\n \"optimizer, lr scheduler, meters, dataloader will be reset\"\n )\n else:\n raise ValueError(\n f\"--funetune-from-model {cfg.finetune_from_model} does not exist\"\n )\n elif suffix is not None:\n checkpoint_path = cfg.restore_file.replace(\".pt\", suffix + \".pt\")\n else:\n checkpoint_path = cfg.restore_file\n\n if cfg.restore_file != \"checkpoint_last.pt\" and cfg.finetune_from_model:\n raise ValueError(\n \"--finetune-from-model and --restore-file (non-default value) \"\n \"can not be specified together: \" + str(cfg)\n )\n\n extra_state = trainer.load_checkpoint(\n checkpoint_path,\n reset_optimizer,\n reset_lr_scheduler,\n optimizer_overrides,\n reset_meters=reset_meters,\n reset_subepochs=(cfg.finetune_from_model is not None and first_launch)\n )\n\n if (\n extra_state is not None\n and \"best\" in extra_state\n and not reset_optimizer\n and not reset_meters\n ):\n save_checkpoint.best = extra_state[\"best\"]\n\n if extra_state is not None and not reset_dataloader:\n # restore iterator from checkpoint\n itr_state = extra_state[\"train_iterator\"]\n epoch_itr = trainer.get_train_iterator(\n epoch=itr_state[\"epoch\"], load_dataset=True, **passthrough_args\n )\n epoch_itr.load_state_dict(itr_state)\n else:\n epoch_itr = trainer.get_train_iterator(\n epoch=1, load_dataset=True, **passthrough_args\n )\n\n trainer.lr_step(epoch_itr.epoch)\n\n return extra_state, epoch_itr\n\n\ndef load_checkpoint_to_cpu(path, arg_overrides=None, load_on_all_ranks=False):\n \"\"\"Loads a checkpoint to CPU (with upgrading for backward compatibility).\n\n If doing single-GPU training or if the checkpoint is only being loaded by at\n most one process on each node (current default behavior is for only rank 0\n to read the checkpoint from disk), load_on_all_ranks should be False to\n avoid errors from torch.distributed not having been initialized or\n torch.distributed.barrier() hanging.\n\n If all processes on each node may be loading the checkpoint\n simultaneously, load_on_all_ranks should be set to True to avoid I/O\n conflicts.\n\n There's currently no support for > 1 but < all processes loading the\n checkpoint on each node.\n \"\"\"\n local_path = PathManager.get_local_path(path)\n # The locally cached file returned by get_local_path() may be stale for\n # remote files that are periodically updated/overwritten (ex:\n # checkpoint_last.pt) - so we remove the local copy, sync across processes\n # (if needed), and then download a fresh copy.\n if local_path != path and PathManager.path_requires_pathmanager(path):\n try:\n os.remove(local_path)\n except FileNotFoundError:\n # With potentially multiple processes removing the same file, the\n # file being missing is benign (missing_ok isn't available until\n # Python 3.8).\n pass\n if load_on_all_ranks:\n torch.distributed.barrier()\n local_path = PathManager.get_local_path(path)\n\n with open(local_path, \"rb\") as f:\n state = torch.load(f, map_location=torch.device(\"cpu\"))\n\n if \"args\" in state and state[\"args\"] is not None and arg_overrides is not None:\n args = state[\"args\"]\n for arg_name, arg_val in arg_overrides.items():\n setattr(args, arg_name, arg_val)\n\n if \"cfg\" in state and state[\"cfg\"] is not None:\n\n # hack to be able to set Namespace in dict config. this should be removed when we update to newer\n # omegaconf version that supports object flags, or when we migrate all existing models\n from omegaconf import _utils\n\n old_primitive = _utils.is_primitive_type\n _utils.is_primitive_type = lambda _: True\n\n state[\"cfg\"] = OmegaConf.create(state[\"cfg\"])\n\n _utils.is_primitive_type = old_primitive\n OmegaConf.set_struct(state[\"cfg\"], True)\n\n if arg_overrides is not None:\n overwrite_args_by_name(state[\"cfg\"], arg_overrides)\n\n state = _upgrade_state_dict(state)\n return state\n\n\ndef load_model_ensemble(\n filenames,\n arg_overrides: Optional[Dict[str, Any]] = None,\n task=None,\n strict=True,\n suffix=\"\",\n num_shards=1,\n state=None,\n):\n \"\"\"Loads an ensemble of models.\n\n Args:\n filenames (List[str]): checkpoint files to load\n arg_overrides (Dict[str,Any], optional): override model args that\n were used during model training\n task (fairseq.tasks.FairseqTask, optional): task to use for loading\n \"\"\"\n assert not (\n strict and num_shards > 1\n ), \"Cannot load state dict with strict=True and checkpoint shards > 1\"\n ensemble, args, _task = load_model_ensemble_and_task(\n filenames,\n arg_overrides,\n task,\n strict,\n suffix,\n num_shards,\n state,\n )\n return ensemble, args\n\n\ndef get_maybe_sharded_checkpoint_filename(\n filename: str, suffix: str, shard_idx: int, num_shards: int\n) -> str:\n orig_filename = filename\n filename = filename.replace(\".pt\", suffix + \".pt\")\n fsdp_filename = filename[:-3] + f\"-shard{shard_idx}.pt\"\n model_parallel_filename = orig_filename[:-3] + f\"_part{shard_idx}.pt\"\n if PathManager.exists(fsdp_filename):\n return fsdp_filename\n elif num_shards > 1:\n return model_parallel_filename\n else:\n return filename\n\n\ndef load_model_ensemble_and_task(\n filenames,\n arg_overrides: Optional[Dict[str, Any]] = None,\n task=None,\n strict=True,\n suffix=\"\",\n num_shards=1,\n state=None,\n):\n assert state is None or len(filenames) == 1\n\n from fairseq import tasks\n\n assert not (\n strict and num_shards > 1\n ), \"Cannot load state dict with strict=True and checkpoint shards > 1\"\n ensemble = []\n cfg = None\n for filename in filenames:\n orig_filename = filename\n model_shard_state = {\"shard_weights\": [], \"shard_metadata\": []}\n assert num_shards > 0\n st = time.time()\n for shard_idx in range(num_shards):\n filename = get_maybe_sharded_checkpoint_filename(\n orig_filename, suffix, shard_idx, num_shards\n )\n\n if not PathManager.exists(filename):\n raise IOError(\"Model file not found: {}\".format(filename))\n if state is None:\n state = load_checkpoint_to_cpu(filename, arg_overrides)\n if \"args\" in state and state[\"args\"] is not None:\n cfg = convert_namespace_to_omegaconf(state[\"args\"])\n elif \"cfg\" in state and state[\"cfg\"] is not None:\n cfg = state[\"cfg\"]\n else:\n raise RuntimeError(\n f\"Neither args nor cfg exist in state keys = {state.keys()}\"\n )\n\n if task is None:\n task = tasks.setup_task(cfg.task)\n\n if \"task_state\" in state:\n task.load_state_dict(state[\"task_state\"])\n\n if \"fsdp_metadata\" in state and num_shards > 1:\n model_shard_state[\"shard_weights\"].append(state[\"model\"])\n model_shard_state[\"shard_metadata\"].append(state[\"fsdp_metadata\"])\n # check FSDP import before the code goes too far\n if not has_FSDP:\n raise ImportError(\n \"Cannot find FullyShardedDataParallel. \"\n \"Please install fairscale with: pip install fairscale\"\n )\n if shard_idx == num_shards - 1:\n consolidated_model_state = FSDP.consolidate_shard_weights(\n shard_weights=model_shard_state[\"shard_weights\"],\n shard_metadata=model_shard_state[\"shard_metadata\"],\n )\n model = task.build_model(cfg.model)\n if (\n \"optimizer_history\" in state\n and len(state[\"optimizer_history\"]) > 0\n and \"num_updates\" in state[\"optimizer_history\"][-1]\n ):\n model.set_num_updates(\n state[\"optimizer_history\"][-1][\"num_updates\"]\n )\n model.load_state_dict(\n consolidated_model_state, strict=strict, model_cfg=cfg.model\n )\n else:\n # model parallel checkpoint or unsharded checkpoint\n model = task.build_model(cfg.model)\n if (\n \"optimizer_history\" in state\n and len(state[\"optimizer_history\"]) > 0\n and \"num_updates\" in state[\"optimizer_history\"][-1]\n ):\n model.set_num_updates(state[\"optimizer_history\"][-1][\"num_updates\"])\n model.load_state_dict(\n state[\"model\"], strict=strict, model_cfg=cfg.model\n )\n\n # reset state so it gets loaded for the next model in ensemble\n state = None\n if shard_idx % 10 == 0 and shard_idx > 0:\n elapsed = time.time() - st\n logger.info(\n f\"Loaded {shard_idx} shards in {elapsed:.2f}s, {elapsed / (shard_idx+1):.2f}s/shard\"\n )\n\n # build model for ensemble\n ensemble.append(model)\n return ensemble, cfg, task\n\n\ndef checkpoint_paths(path, pattern=r\"checkpoint(\\d+)\\.pt\", keep_match=False):\n \"\"\"Retrieves all checkpoints found in `path` directory.\n\n Checkpoints are identified by matching filename to the specified pattern. If\n the pattern contains groups, the result will be sorted by the first group in\n descending order.\n \"\"\"\n pt_regexp = re.compile(pattern)\n files = PathManager.ls(path)\n\n entries = []\n for i, f in enumerate(files):\n m = pt_regexp.fullmatch(f)\n if m is not None:\n idx = float(m.group(1)) if len(m.groups()) > 0 else i\n entries.append((idx, m.group(0)))\n if keep_match:\n return [(os.path.join(path, x[1]), x[0]) for x in sorted(entries, reverse=True)]\n else:\n return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)]\n\n\ndef torch_persistent_save(obj, filename, async_write: bool = False):\n if async_write:\n with PathManager.opena(filename, \"wb\") as f:\n _torch_persistent_save(obj, f)\n else:\n if PathManager.supports_rename(filename):\n # do atomic save\n with PathManager.open(filename + \".tmp\", \"wb\") as f:\n _torch_persistent_save(obj, f)\n PathManager.rename(filename + \".tmp\", filename)\n else:\n # fallback to non-atomic save\n with PathManager.open(filename, \"wb\") as f:\n _torch_persistent_save(obj, f)\n\n\ndef _torch_persistent_save(obj, f):\n if isinstance(f, str):\n with PathManager.open(f, \"wb\") as h:\n torch_persistent_save(obj, h)\n return\n for i in range(3):\n try:\n return torch.save(obj, f)\n except Exception:\n if i == 2:\n logger.error(traceback.format_exc())\n raise\n\n\ndef _upgrade_state_dict(state):\n \"\"\"Helper for upgrading old model checkpoints.\"\"\"\n\n # add optimizer_history\n if \"optimizer_history\" not in state:\n state[\"optimizer_history\"] = [\n {\"criterion_name\": \"CrossEntropyCriterion\", \"best_loss\": state[\"best_loss\"]}\n ]\n state[\"last_optimizer_state\"] = state[\"optimizer\"]\n del state[\"optimizer\"]\n del state[\"best_loss\"]\n # move extra_state into sub-dictionary\n if \"epoch\" in state and \"extra_state\" not in state:\n state[\"extra_state\"] = {\n \"epoch\": state[\"epoch\"],\n \"batch_offset\": state[\"batch_offset\"],\n \"val_loss\": state[\"val_loss\"],\n }\n del state[\"epoch\"]\n del state[\"batch_offset\"]\n del state[\"val_loss\"]\n # reduce optimizer history's memory usage (only keep the last state)\n if \"optimizer\" in state[\"optimizer_history\"][-1]:\n state[\"last_optimizer_state\"] = state[\"optimizer_history\"][-1][\"optimizer\"]\n for optim_hist in state[\"optimizer_history\"]:\n del optim_hist[\"optimizer\"]\n # record the optimizer class name\n if \"optimizer_name\" not in state[\"optimizer_history\"][-1]:\n state[\"optimizer_history\"][-1][\"optimizer_name\"] = \"FairseqNAG\"\n # move best_loss into lr_scheduler_state\n if \"lr_scheduler_state\" not in state[\"optimizer_history\"][-1]:\n state[\"optimizer_history\"][-1][\"lr_scheduler_state\"] = {\n \"best\": state[\"optimizer_history\"][-1][\"best_loss\"]\n }\n del state[\"optimizer_history\"][-1][\"best_loss\"]\n # keep track of number of updates\n if \"num_updates\" not in state[\"optimizer_history\"][-1]:\n state[\"optimizer_history\"][-1][\"num_updates\"] = 0\n # use stateful training data iterator\n if \"train_iterator\" not in state[\"extra_state\"]:\n state[\"extra_state\"][\"train_iterator\"] = {\n \"epoch\": state[\"extra_state\"][\"epoch\"],\n \"iterations_in_epoch\": state[\"extra_state\"].get(\"batch_offset\", 0),\n }\n\n # backward compatibility, cfg updates\n if \"args\" in state and state[\"args\"] is not None:\n # old model checkpoints may not have separate source/target positions\n if hasattr(state[\"args\"], \"max_positions\") and not hasattr(\n state[\"args\"], \"max_source_positions\"\n ):\n state[\"args\"].max_source_positions = state[\"args\"].max_positions\n state[\"args\"].max_target_positions = state[\"args\"].max_positions\n # default to translation task\n if not hasattr(state[\"args\"], \"task\"):\n state[\"args\"].task = \"translation\"\n # --raw-text and --lazy-load are deprecated\n if getattr(state[\"args\"], \"raw_text\", False):\n state[\"args\"].dataset_impl = \"raw\"\n elif getattr(state[\"args\"], \"lazy_load\", False):\n state[\"args\"].dataset_impl = \"lazy\"\n # epochs start at 1\n if state[\"extra_state\"][\"train_iterator\"] is not None:\n state[\"extra_state\"][\"train_iterator\"][\"epoch\"] = max(\n state[\"extra_state\"][\"train_iterator\"].get(\"epoch\", 1), 1\n )\n # --remove-bpe ==> --postprocess\n if hasattr(state[\"args\"], \"remove_bpe\"):\n state[\"args\"].post_process = state[\"args\"].remove_bpe\n # --min-lr ==> --stop-min-lr\n if hasattr(state[\"args\"], \"min_lr\"):\n state[\"args\"].stop_min_lr = state[\"args\"].min_lr\n del state[\"args\"].min_lr\n # binary_cross_entropy / kd_binary_cross_entropy => wav2vec criterion\n if hasattr(state[\"args\"], \"criterion\") and state[\"args\"].criterion in [\n \"binary_cross_entropy\",\n \"kd_binary_cross_entropy\",\n ]:\n state[\"args\"].criterion = \"wav2vec\"\n # remove log_keys if it's None (criteria will supply a default value of [])\n if hasattr(state[\"args\"], \"log_keys\") and state[\"args\"].log_keys is None:\n delattr(state[\"args\"], \"log_keys\")\n # speech_pretraining => audio pretraining\n if (\n hasattr(state[\"args\"], \"task\")\n and state[\"args\"].task == \"speech_pretraining\"\n ):\n state[\"args\"].task = \"audio_pretraining\"\n # audio_cpc => wav2vec\n if hasattr(state[\"args\"], \"arch\") and state[\"args\"].arch == \"audio_cpc\":\n state[\"args\"].arch = \"wav2vec\"\n # convert legacy float learning rate to List[float]\n if hasattr(state[\"args\"], \"lr\") and isinstance(state[\"args\"].lr, float):\n state[\"args\"].lr = [state[\"args\"].lr]\n # convert task data arg to a string instead of List[string]\n if (\n hasattr(state[\"args\"], \"data\")\n and isinstance(state[\"args\"].data, list)\n and len(state[\"args\"].data) > 0\n ):\n state[\"args\"].data = state[\"args\"].data[0]\n\n state[\"cfg\"] = convert_namespace_to_omegaconf(state[\"args\"])\n\n if \"cfg\" in state and state[\"cfg\"] is not None:\n cfg = state[\"cfg\"]\n with open_dict(cfg):\n # any upgrades for Hydra-based configs\n if (\n \"task\" in cfg\n and \"eval_wer_config\" in cfg.task\n and isinstance(cfg.task.eval_wer_config.print_alignment, bool)\n ):\n cfg.task.eval_wer_config.print_alignment = \"hard\"\n if \"generation\" in cfg and isinstance(cfg.generation.print_alignment, bool):\n cfg.generation.print_alignment = (\n \"hard\" if cfg.generation.print_alignment else None\n )\n if (\n \"model\" in cfg\n and \"w2v_args\" in cfg.model\n and cfg.model.w2v_args is not None\n and (\n hasattr(cfg.model.w2v_args, \"task\") or \"task\" in cfg.model.w2v_args\n )\n and hasattr(cfg.model.w2v_args.task, \"eval_wer_config\")\n and cfg.model.w2v_args.task.eval_wer_config is not None\n and isinstance(\n cfg.model.w2v_args.task.eval_wer_config.print_alignment, bool\n )\n ):\n cfg.model.w2v_args.task.eval_wer_config.print_alignment = \"hard\"\n\n return state\n\n\ndef prune_state_dict(state_dict, model_cfg: Optional[DictConfig]):\n \"\"\"Prune the given state_dict if desired for LayerDrop\n (https://arxiv.org/abs/1909.11556).\n\n Training with LayerDrop allows models to be robust to pruning at inference\n time. This function prunes state_dict to allow smaller models to be loaded\n from a larger model and re-maps the existing state_dict for this to occur.\n\n It's called by functions that load models from checkpoints and does not\n need to be called directly.\n \"\"\"\n arch = None\n if model_cfg is not None:\n arch = (\n model_cfg._name\n if isinstance(model_cfg, DictConfig)\n else getattr(model_cfg, \"arch\", None)\n )\n\n if not model_cfg or arch is None or arch == \"ptt_transformer\":\n # args should not be none, but don't crash if it is.\n return state_dict\n\n encoder_layers_to_keep = getattr(model_cfg, \"encoder_layers_to_keep\", None)\n decoder_layers_to_keep = getattr(model_cfg, \"decoder_layers_to_keep\", None)\n\n if not encoder_layers_to_keep and not decoder_layers_to_keep:\n return state_dict\n\n # apply pruning\n logger.info(\n \"Pruning model to specified layer configuration - this works best if the model was trained with LayerDrop\"\n )\n\n def create_pruning_pass(layers_to_keep, layer_name):\n keep_layers = sorted(\n int(layer_string) for layer_string in layers_to_keep.split(\",\")\n )\n mapping_dict = {}\n for i in range(len(keep_layers)):\n mapping_dict[str(keep_layers[i])] = str(i)\n\n regex = re.compile(r\"^{layer}.*\\.layers\\.(\\d+)\".format(layer=layer_name))\n return {\"substitution_regex\": regex, \"mapping_dict\": mapping_dict}\n\n pruning_passes = []\n if encoder_layers_to_keep:\n pruning_passes.append(create_pruning_pass(encoder_layers_to_keep, \"encoder\"))\n if decoder_layers_to_keep:\n pruning_passes.append(create_pruning_pass(decoder_layers_to_keep, \"decoder\"))\n\n new_state_dict = {}\n for layer_name in state_dict.keys():\n match = re.search(r\"\\.layers\\.(\\d+)\\.\", layer_name)\n # if layer has no number in it, it is a supporting layer, such as an\n # embedding\n if not match:\n new_state_dict[layer_name] = state_dict[layer_name]\n continue\n\n # otherwise, layer should be pruned.\n original_layer_number = match.group(1)\n # figure out which mapping dict to replace from\n for pruning_pass in pruning_passes:\n if original_layer_number in pruning_pass[\"mapping_dict\"] and pruning_pass[\n \"substitution_regex\"\n ].search(layer_name):\n new_layer_number = pruning_pass[\"mapping_dict\"][original_layer_number]\n substitution_match = pruning_pass[\"substitution_regex\"].search(\n layer_name\n )\n new_state_key = (\n layer_name[: substitution_match.start(1)]\n + new_layer_number\n + layer_name[substitution_match.end(1) :]\n )\n new_state_dict[new_state_key] = state_dict[layer_name]\n\n # Since layers are now pruned, *_layers_to_keep are no longer needed.\n # This is more of \"It would make it work fix\" rather than a proper fix.\n if isinstance(model_cfg, DictConfig):\n context = open_dict(model_cfg)\n else:\n context = contextlib.ExitStack()\n with context:\n if hasattr(model_cfg, \"encoder_layers_to_keep\"):\n model_cfg.encoder_layers_to_keep = None\n if hasattr(model_cfg, \"decoder_layers_to_keep\"):\n model_cfg.decoder_layers_to_keep = None\n\n return new_state_dict\n\n\ndef load_pretrained_component_from_model(\n component: Union[FairseqEncoder, FairseqDecoder], checkpoint: str\n):\n \"\"\"\n Load a pretrained FairseqEncoder or FairseqDecoder from checkpoint into the\n provided `component` object. If state_dict fails to load, there may be a\n mismatch in the architecture of the corresponding `component` found in the\n `checkpoint` file.\n \"\"\"\n if not PathManager.exists(checkpoint):\n raise IOError(\"Model file not found: {}\".format(checkpoint))\n state = load_checkpoint_to_cpu(checkpoint)\n if isinstance(component, FairseqEncoder):\n component_type = \"encoder\"\n elif isinstance(component, FairseqDecoder):\n component_type = \"decoder\"\n else:\n raise ValueError(\n \"component to load must be either a FairseqEncoder or \"\n \"FairseqDecoder. Loading other component types are not supported.\"\n )\n component_state_dict = OrderedDict()\n for key in state[\"model\"].keys():\n if key.startswith(component_type):\n # encoder.input_layers.0.0.weight --> input_layers.0.0.weight\n component_subkey = key[len(component_type) + 1 :]\n component_state_dict[component_subkey] = state[\"model\"][key]\n component.load_state_dict(component_state_dict, strict=True)\n return component\n\n\ndef verify_checkpoint_directory(save_dir: str) -> None:\n if not os.path.exists(save_dir):\n os.makedirs(save_dir, exist_ok=True)\n temp_file_path = os.path.join(save_dir, \"dummy\")\n try:\n with open(temp_file_path, \"w\"):\n pass\n except OSError as e:\n logger.warning(\n \"Unable to access checkpoint save directory: {}\".format(save_dir)\n )\n raise e\n else:\n os.remove(temp_file_path)\n\n\ndef load_ema_from_checkpoint(fpath):\n \"\"\"Loads exponential moving averaged (EMA) checkpoint from input and\n returns a model with ema weights.\n\n Args:\n fpath: A string path of checkpoint to load from.\n\n Returns:\n A dict of string keys mapping to various values. The 'model' key\n from the returned dict should correspond to an OrderedDict mapping\n string parameter names to torch Tensors.\n \"\"\"\n params_dict = collections.OrderedDict()\n new_state = None\n\n with PathManager.open(fpath, \"rb\") as f:\n new_state = torch.load(\n f,\n map_location=(\n lambda s, _: torch.serialization.default_restore_location(s, \"cpu\")\n ),\n )\n\n # EMA model is stored in a separate \"extra state\"\n model_params = new_state[\"extra_state\"][\"ema\"]\n\n for key in list(model_params.keys()):\n p = model_params[key]\n if isinstance(p, torch.HalfTensor):\n p = p.float()\n if key not in params_dict:\n params_dict[key] = p.clone()\n # NOTE: clone() is needed in case of p is a shared parameter\n else:\n raise ValueError(\"Key {} is repeated in EMA model params.\".format(key))\n\n if len(params_dict) == 0:\n raise ValueError(\n f\"Input checkpoint path '{fpath}' does not contain \"\n \"ema model weights, is this model trained with EMA?\"\n )\n\n new_state[\"model\"] = params_dict\n return new_state\n"
]
| [
[
"torch.device",
"torch.save",
"numpy.random.randint",
"torch.distributed.barrier",
"torch.serialization.default_restore_location"
]
]
|
dprelogo/tools21cm | [
"ba6aa185ced0cd73263e5750df02d6a54a545a98"
]
| [
"example/example_BSD.py"
]
| [
"import numpy as np\nimport matplotlib.pyplot as plt\nimport tools21cm as t2c\n\n### Setting the simulation environment\nt2c.set_sim_constants(244)\n\n### Reading files\nxfrac_filename = '/disk/dawn-1/garrelt/Reionization/C2Ray_WMAP7/244Mpc/244Mpc_f2_0_250/results/xfrac3d_6.450.bin'\n\nxfrac = t2c.read_c2ray_files(xfrac_filename, file_type='xfrac') # Ionization fraction file\nneut = 1 - xfrac \t\t\t\t\t\t# Neutral fraction file\n\n### Redshift from filename.....It can be manually given also\nz = float(xfrac_filename.split('_')[-1].split('.b')[0])\n\n### Smoothing neutral field to SKA resolution\nsmt_neut = t2c.smooth_coeval(neut, z)\n\n### Generating the binary fields\nxth = 0.5 # The fixed threshold to identify the regions of interest\nbin_neut = neut > xth \nbin_smt = smt_neut > xth\n\n### Looking at the slices\nfig, axes = plt.subplots(nrows=2, ncols=2)\nfig.subplots_adjust(left=0.07, bottom=0.06, right=0.90, top=0.96, wspace=0.01, hspace=0.15)\nim00 = axes[0,0].imshow(neut[:,:,125], vmin=0, vmax=1)\nim01 = axes[0,1].imshow(smt_neut[:,:,125], vmin=0, vmax=1)\nim10 = axes[1,0].imshow(bin_neut[:,:,125], vmin=0, vmax=1)\nim11 = axes[1,1].imshow(bin_smt[:,:,125], vmin=0, vmax=1)\n\ncax = fig.add_axes([0.91, 0.08, 0.02, 0.88])\ncbar = fig.colorbar(im00, cax=cax)\ncbar.set_label('x$_\\mathrm{HI}$')\n\nplt.show()\n\n\n#################### Size statistics\n\n### MFP\nrs, dn, r_p = t2c.mfp(bin_neut, boxsize=t2c.conv.LB)\nrs_smt, dn_smt, r_p_smt = t2c.mfp(bin_smt, boxsize=t2c.conv.LB)\n\n### FOF\nmp, sz = t2c.fof(bin_neut) # This gives a list of sizes\nmp_smt, sz_smt = t2c.fof(bin_smt) # We have to convert them into the probability distribution\n\nvolume_resolution = t2c.conv.LB**3/neut.shape[0]**3\nvs, vdn, dm = t2c.plot_fof_sizes(sz*volume_resolution, bins=30)\nvs_smt, vdn_smt, dm_smt = t2c.plot_fof_sizes(sz_smt*volume_resolution, bins=30)\n\n\n### Ploting the BSDs\nfig, axes = plt.subplots(nrows=2, ncols=1)\nfig.set_size_inches(6.0, 12.0,forward=True)\nfig.subplots_adjust(left=0.14, bottom=0.06, right=0.96, top=0.96, wspace=0.01, hspace=0.25)\n\naxes[0].set_title('MFP-BSD')\naxes[0].semilogx(rs, dn, c='b', label='SimRes')\naxes[0].semilogx(rs_smt, dn_smt, '--', c='b', label='Smooth')\naxes[0].set_xlim(1.25,110)\naxes[0].set_xlabel('R (Mpc)')\naxes[0].set_ylabel('R$\\\\frac{\\mathbf{dp}}{\\mathbf{dR}}$')\naxes[0].legend(loc=2)\naxes[1].set_title('FOF-BSD')\naxes[1].loglog(vs, vdn, c='r', label='SimRes', linestyle='steps')\naxes[1].loglog(vs_smt, vdn_smt, c='r', label='Smooth', linestyle='steps--')\naxes[1].set_ylim(max(dm,dm_smt),1)\naxes[1].set_xlim(volume_resolution,3e6)\naxes[1].set_xlabel('V (Mpc$^3$)')\naxes[1].set_ylabel('V$^2\\\\frac{\\mathbf{dp}}{\\mathbf{dV}}$ (Mpc)')\naxes[1].legend(loc=2)\nplt.show()\n\n\n"
]
| [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
]
|
ZhuoyiZou/python-challenge | [
"c4a3e3a0ec54f46fbf98808cfd8b8ab4fe581e74"
]
| [
"PyBank/main.py"
]
| [
"# Import module \nimport csv\nimport os\nimport numpy as np\n\n\n# Select the csv file through directory \npybank_data = os.path.join(\".\", \"PyBank_Resources_budget_data.csv\") \n\n\ncolumn_1 = []\ncolumn_2 = []\ntotal_amount = 0\n# Read the csv file \nwith open (pybank_data, newline = \"\") as csvfile:\n pybank_data_reader = csv.reader(csvfile, delimiter = \",\")\n for row in pybank_data_reader:\n column_1.append(row[0])\n column_2.append(row[1])\n\n \n# Calculate the number of months.\ntotal_month = len(column_1) - 1\n\n\n# Calculate the total net amount profit/loss\ntotal_net_amount = 0\nfor i in column_2[1:]:\n total_net_amount = total_net_amount + int(i)\n\n \n# Calculate the average changes \nconverted_column_2 = [int(i) for i in column_2[1:]]\nchanges = []\nfor i in range(len(converted_column_2)):\n changes.append(converted_column_2[i] - converted_column_2[i-1])\naverage_changes = round(np.mean(changes[1:]),2)\n\n\n# Find the greatest increase and greatesr decrease in profit\ngreatest_increase = max(changes)\ngreatest_decrease = min(changes)\nfor i in range(len(changes)):\n if changes[i] == greatest_increase:\n increase_month = i + 1\n elif changes[i] == greatest_decrease:\n decrease_month = i + 1\ngreatest_increase_month = column_1[increase_month]\ngreatest_decrease_month = column_1[decrease_month]\n\n\n# Print the results\nprint(\"Financial Analysis\")\nprint(\"-------------------------\")\nprint(f\"Total Months: {total_month}\")\nprint(f\"Total: ${total_net_amount}\")\nprint(f\"Average Change: ${average_changes}\")\nprint(f\"Greatest Increase in Profits: {greatest_increase_month} (${greatest_increase})\")\nprint(f\"Greatest Decrease in Profits: {greatest_decrease_month} (${greatest_decrease})\")\n "
]
| [
[
"numpy.mean"
]
]
|
muratcancicek/Deep_RL_For_Head_Pose_Est | [
"03fe9fadc19f0d6dab3a42d9bcf5ec4a12a2c253"
]
| [
"DeepRL_For_HPE/TF_RNN/data_processing.py"
]
| [
"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function\n\nimport numpy as np\nimport pandas as pd\n\n\ndef x_sin(x):\n return x * np.sin(x)\n\n\ndef sin_cos(x):\n return pd.DataFrame(dict(a=np.sin(x), b=np.cos(x)), index=x)\n\n\ndef rnn_data(data, time_steps, labels=False):\n \"\"\"\n creates new data frame based on previous observation\n * example:\n l = [1, 2, 3, 4, 5]\n time_steps = 2\n -> labels == False [[1, 2], [2, 3], [3, 4]]\n -> labels == True [3, 4, 5]\n \"\"\"\n rnn_df = []\n for i in range(len(data) - time_steps):\n if labels:\n try:\n rnn_df.append(data.iloc[i + time_steps].as_matrix())\n except AttributeError:\n rnn_df.append(data.iloc[i + time_steps])\n else:\n data_ = data.iloc[i: i + time_steps].as_matrix()\n rnn_df.append(data_ if len(data_.shape) > 1 else [[i] for i in data_])\n\n return np.array(rnn_df, dtype=np.float32)\n\n\ndef split_data(data, val_size=0.1, test_size=0.1):\n \"\"\"\n splits data to training, validation and testing parts\n \"\"\"\n ntest = int(round(len(data) * (1 - test_size)))\n nval = int(round(len(data.iloc[:ntest]) * (1 - val_size)))\n\n df_train, df_val, df_test = data.iloc[:nval], data.iloc[nval:ntest], data.iloc[ntest:]\n\n return df_train, df_val, df_test\n\n\ndef prepare_data(data, time_steps, labels=False, val_size=0.1, test_size=0.1):\n \"\"\"\n Given the number of `time_steps` and some data,\n prepares training, validation and test data for an lstm cell.\n \"\"\"\n df_train, df_val, df_test = split_data(data, val_size, test_size)\n return (rnn_data(df_train, time_steps, labels=labels),\n rnn_data(df_val, time_steps, labels=labels),\n rnn_data(df_test, time_steps, labels=labels))\n\n\ndef load_csvdata(rawdata, time_steps, seperate=False):\n data = rawdata\n if not isinstance(data, pd.DataFrame):\n data = pd.DataFrame(data)\n\n train_x, val_x, test_x = prepare_data(data['a'] if seperate else data, time_steps)\n train_y, val_y, test_y = prepare_data(data['b'] if seperate else data, time_steps, labels=True)\n return dict(train=train_x, val=val_x, test=test_x), dict(train=train_y, val=val_y, test=test_y)\n\n\ndef generate_data(fct, x, time_steps, seperate=False):\n \"\"\"generates data with based on a function fct\"\"\"\n data = fct(x)\n if not isinstance(data, pd.DataFrame):\n data = pd.DataFrame(data)\n train_x, val_x, test_x = prepare_data(data['a'] if seperate else data, time_steps)\n train_y, val_y, test_y = prepare_data(data['b'] if seperate else data, time_steps, labels=True)\n return dict(train=train_x, val=val_x, test=test_x), dict(train=train_y, val=val_y, test=test_y)\n"
]
| [
[
"pandas.DataFrame",
"numpy.array",
"numpy.sin",
"numpy.cos"
]
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.