repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
DwangoMediaVillage/marltas_core | [
"91a5caf75c2350a31d47d1b0408c817644a0d41e"
] | [
"bin/async_rnn_train.py"
] | [
"\"\"\"Asynchronized (distributed) rnn tranining.\"\"\"\nimport os # noqa isort:skip\nos.environ['OMP_NUM_THREADS'] = '1' # noqa isort:skip\n\nimport argparse\nimport logging\nimport pprint\nimport time\nfrom dataclasses import asdict, dataclass\nfrom functools import partial\nfrom pathlib import Path\n\nimport numpy as np\n\nfrom dqn.actor_manager import ActorManagerClient, run_actor_manager_server\nfrom dqn.actor_runner import ActorRunner\nfrom dqn.async_train import AsyncTrainerConfig, async_train\nfrom dqn.evaluator import EvaluatorClient, EvaluatorServerRunner\nfrom dqn.param_distributor import (ParamDistributorClient,\n run_param_distributor_server)\nfrom dqn.policy import PolicyParam\nfrom dqn.rnn.config import RNNConfigBase\nfrom dqn.rnn.datum import Batch\nfrom dqn.rnn.evaluator import run_evaluator_server\nfrom dqn.rnn.learner import Learner\nfrom dqn.rnn.replay_buffer import ReplayBufferServer\nfrom dqn.rnn.run_actor import run_actor\nfrom dqn.subprocess_manager import SubprocessManager\nfrom dqn.utils import init_log_dir, init_random_seed\n\n\n@dataclass\nclass Config(RNNConfigBase):\n trainer: AsyncTrainerConfig = AsyncTrainerConfig()\n\n\ndef init_actor_runner(config: Config) -> ActorRunner:\n \"\"\"Initialize actor runner.\n\n Args:\n config: Configuration of training.\n \"\"\"\n policy_param = PolicyParam(epsilon=np.ones(config.actor.vector_env_size),\n gamma=np.ones(config.actor.vector_env_size) * config.gamma)\n actor_runner = ActorRunner(n_processes=config.n_actor_process,\n run_actor_func=partial(run_actor, init_policy_param=policy_param, config=config))\n return actor_runner\n\n\ndef main_run_actor(config: Config, logger: logging.Logger = logging.getLogger(__name__)) -> None:\n \"\"\"Run actor forever.\n\n Args:\n config: Training configuration.\n logger: Logger object.\n \"\"\"\n actor_runner = init_actor_runner(config)\n logger.info(\"Actor runner initialized.\")\n\n try:\n actor_runner.start()\n logger.info(\"Actor runner start.\")\n while True:\n assert actor_runner.workers_alive, f\"Actor runner's worker died.\"\n time.sleep(1)\n finally:\n logger.info(f\"Finalize actor runner\")\n actor_runner.finalize()\n\n\ndef main(log_dir: Path, enable_actor: bool, config: Config,\n logger: logging.Logger = logging.getLogger(__name__)) -> None:\n \"\"\"Run all the components.\n\n Args:\n log_dir: Directory to put log data.\n config: Training configuration.\n logger: Logger object.\n \"\"\"\n # show configuration\n logger.info(pprint.pformat(asdict(config)))\n\n # init config\n if not enable_actor:\n logger.warning('enable_actor is false. You should run actor in other process')\n config.n_actor_process = 0 # disable actor\n\n # NOTE: All child processes should be forked before init gRPC channel (https://github.com/grpc/grpc/issues/13873)\n subprocess_manager = SubprocessManager()\n\n # init actor manager\n subprocess_manager.append_worker(\n partial(run_actor_manager_server,\n url=config.actor_manager_url,\n gamma=config.gamma,\n config=config.trainer.actor_manager))\n\n # init param distributor\n subprocess_manager.append_worker(partial(run_param_distributor_server, url=config.param_distributor_url))\n\n # init evaluator\n evaluator_runner = EvaluatorServerRunner(run_evaluator_server_func=partial(run_evaluator_server, config=config))\n\n # may init actor\n actor_runner = init_actor_runner(config=config)\n\n # init replay buffer\n replay_buffer_server = ReplayBufferServer(config=config)\n\n # init learner\n learner = Learner(config=config)\n\n try:\n\n def check_subprocess_func():\n \"\"\"Helper function to check child processes.\"\"\"\n assert subprocess_manager.workers_alive, 'Subprocess manager worker has been dead'\n assert evaluator_runner.workers_alive, 'Evaluator runner worker has been dead'\n assert actor_runner.workers_alive, 'Actor runner worker has been dead'\n\n check_subprocess_func()\n\n # init grpc clients of trainer\n evaluator_runner.start()\n actor_runner.start()\n\n evaluator_client = EvaluatorClient(url=config.evaluator_url)\n param_distributor_client = ParamDistributorClient(url=config.param_distributor_url)\n actor_manager_client = ActorManagerClient(url=config.actor_manager_url)\n\n # run train\n async_train(log_dir=log_dir,\n check_subprocess_func=check_subprocess_func,\n actor_manager_client=actor_manager_client,\n evaluator_client=evaluator_client,\n param_distributor_client=param_distributor_client,\n replay_buffer_server=replay_buffer_server,\n learner=learner,\n batch_from_sample=Batch.from_buffer_sample,\n config=config.trainer)\n finally:\n replay_buffer_server.finalize()\n subprocess_manager.finalize()\n evaluator_runner.finalize()\n actor_runner.finalize()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"Asynchronized RNN-DQN training.\")\n parser.add_argument('log_dir', type=Path, help=\"Directory to put log and snapshots\")\n parser.add_argument('--log_level',\n type=str,\n choices=('debug', 'info', 'error', 'critical'),\n default='info',\n help=\"Logging level\")\n parser.add_argument('--disable_actor', action='store_true', help=\"Disable actor module or not.\")\n parser.add_argument('--run_only_actor', action='store_true', help=\"Running only actor module or not.\")\n parser.add_argument('--config', type=Path, help=\"Path of DQN configuration YAML file.\")\n parser.add_argument('--seed', type=int, default=1, help=\"Random seed value.\")\n args = parser.parse_args()\n\n # init configuration\n config = Config.load_from_yaml(args.config) if args.config else Config()\n\n # init log_dir\n log_handlers = [logging.StreamHandler()]\n if not args.run_only_actor:\n args.log_dir.mkdir(exist_ok=False, parents=False)\n init_log_dir(args.log_dir, config)\n log_handlers.append(logging.FileHandler(args.log_dir / 'main.log'))\n\n # init logger\n logging.basicConfig(level=getattr(logging, args.log_level.upper()),\n format='[%(asctime)s %(name)s %(levelname)s] %(message)s',\n datefmt='%Y/%m/%d %I:%M:%S',\n handlers=log_handlers)\n\n # init random seed\n init_random_seed(args.seed)\n\n # start training or exploration\n if args.run_only_actor:\n assert not args.disable_actor, 'run_actor should be specified without disable_actor.'\n main_run_actor(config)\n else:\n main(args.log_dir, not args.disable_actor, config)\n"
] | [
[
"numpy.ones"
]
] |
elucherini/t-recs | [
"ab50f81d5413700775783c5b3eafd9ef4289d9d5"
] | [
"trecs/components/users.py"
] | [
"\"\"\"\nSuite of classes related to users of the system, including predicted user-item\nscores, predicted user profiles, actual user profiles, and a Users class (which\nencapsulates some of these concepts).\n\"\"\"\nimport numpy as np\nimport scipy.sparse as sp\n\nimport trecs.matrix_ops as mo\nfrom trecs.random import Generator\nfrom trecs.utils import check_consistency\nfrom trecs.base import Component, BaseComponent\n\n\nclass PredictedScores(Component): # pylint: disable=too-many-ancestors\n \"\"\"\n User scores about items generated by the model. This class is a container\n compatible with Numpy operations and it does not make assumptions on the\n size of the representation.\n \"\"\"\n\n def __init__(self, predicted_scores=None, verbose=False):\n self.name = \"predicted_user_scores\"\n Component.__init__(\n self, current_state=predicted_scores, size=None, verbose=verbose, seed=None\n )\n\n def filter_by_index(self, item_indices):\n \"\"\"\n Return a subset of the predicted scores, filtered by the indices\n of valid items.\n\n Parameters\n -------------\n\n item_indices: :obj:`numpy.ndarray` or :obj:`scipy.sparse.spmatrix`\n A matrix with :math:`|U|` rows that specifies the indices of items\n requested for each user.\n\n \"\"\"\n if item_indices.shape[0] != self.current_state.shape[0]:\n error_msg = \"Number of users does not match between score matrix and item index matrix\"\n raise ValueError(error_msg)\n # generates row matrix like the following:\n # [0, 0, 0, ..., 0]\n # [1, 1, 1, ..., 1]\n # [ ... ]\n # [n, n, n, ..., n]\n num_users = item_indices.shape[0]\n row = np.repeat(np.arange(num_users), item_indices.shape[1]).reshape((num_users, -1))\n # for now, we have to keep the score matrix a dense array because scipy\n # sparse has no equivalent of argsort\n # TODO: look into potential solutions using things like Numba to maintain\n # speed?\n # https://stackoverflow.com/questions/31790819/scipy-sparse-csr-matrix-how-to-get-top-ten-values-and-indices\n return mo.to_dense(self.current_state)[row, item_indices]\n\n def append_new_scores(self, new_scores):\n \"\"\"\n Appends a set of scores for new items to the current set of scores.\n\n Parameters\n -------------\n\n new_scores: :obj:`numpy.ndarray` or :obj:`scipy.sparse.spmatrix`\n Matrix of new scores with dimension :math:`|U|\\\\times|I_{new}|`,\n where :math:`I_{new}` indicates the number of new items whose scores\n are being to be appended.\n \"\"\"\n self.current_state = mo.hstack([self.current_state, new_scores])\n\n\nclass PredictedUserProfiles(Component): # pylint: disable=too-many-ancestors\n \"\"\"\n User profiles as predicted by the model. This class is a container\n compatible with Numpy operations and it does not make assumptions on the\n size of the representation.\n \"\"\"\n\n def __init__(self, user_profiles=None, size=None, verbose=False, seed=None):\n self.name = \"predicted_users\"\n Component.__init__(self, current_state=user_profiles, size=size, verbose=verbose, seed=seed)\n\n @property\n def num_users(self):\n \"\"\"\n Shortcut getter method for the number of users.\n \"\"\"\n return self.current_state.shape[0]\n\n @property\n def num_attrs(self):\n \"\"\"\n Shortcut getter method for the number of attributes in each user profile.\n \"\"\"\n return self.current_state.shape[1]\n\n\nclass ActualUserProfiles(Component): # pylint: disable=too-many-ancestors\n \"\"\"\n True user profiles, unknown to the model. This class is a container\n compatible with Numpy operations and it does not make assumptions on the\n size of the representation.\n \"\"\"\n\n def __init__(self, user_profiles=None, size=None, verbose=False, seed=None):\n self.name = \"actual_user_profiles\"\n Component.__init__(self, current_state=user_profiles, size=size, verbose=verbose, seed=seed)\n\n\nclass ActualUserScores(Component): # pylint: disable=too-many-ancestors\n \"\"\"\n Matrix of true user-item scores, unknown to the model.\n \"\"\"\n\n def __init__(self, user_profiles=None, size=None, verbose=False, seed=None):\n self.name = \"actual_user_scores\"\n if user_profiles is not None:\n num_users, num_items = user_profiles.shape\n self.user_rows = np.repeat(np.arange(num_users), num_items).reshape((num_users, -1))\n else:\n self.user_rows = None\n Component.__init__(self, current_state=user_profiles, size=size, verbose=verbose, seed=seed)\n\n def get_item_scores(self, items_shown):\n \"\"\"\n Return the user scores for the items shown, in the order specified by the\n list of items shown to each user.\n \"\"\"\n if self.user_rows is None or self.user_rows.shape != self.current_state.shape:\n num_users, num_items = self.current_state.shape\n self.user_rows = np.repeat(np.arange(num_users), num_items).reshape((num_users, -1))\n num_items = items_shown.shape[1]\n return self.current_state[self.user_rows[:, :num_items], items_shown]\n\n def set_item_scores_to_value(self, item_indices, value):\n \"\"\"\n Set scores for the specified user-item indices to the determined\n value.\n\n Parameters\n -----------\n item_indices: :obj:`numpy.ndarray` or :obj:`scipy.sparse.spmatrix`\n A matrix with :math:`|U|` rows that specifies the indices of items\n requested for each user.\n\n value: float\n Single value with which to replace scores.\n \"\"\"\n if self.user_rows is None or self.user_rows.shape != self.current_state.shape:\n num_users, num_items = self.current_state.shape\n self.user_rows = np.repeat(np.arange(num_users), num_items).reshape((num_users, -1))\n num_items = item_indices.shape[1]\n self.current_state[self.user_rows[:, :num_items], item_indices] = value\n\n def append_new_scores(self, new_scores):\n \"\"\"\n Appends a set of scores for new items to the current set of scores.\n\n Parameters\n -------------\n\n new_scores: :obj:`numpy.ndarray` or :obj:`scipy.sparse.spmatrix`\n Matrix of new scores with dimension :math:`|U|\\\\times|I_{new}|`,\n where :math:`I_{new}` indicates the number of new items whose scores\n are being to be appended.\n \"\"\"\n self.current_state = mo.hstack([self.current_state, new_scores])\n # update user rows matrix\n num_users, num_items = self.current_state.shape\n self.user_rows = np.repeat(np.arange(num_users), num_items).reshape((num_users, -1))\n\n @property\n def num_users(self):\n \"\"\"\n Shortcut getter method for the number of users.\n \"\"\"\n # rows = users, cols = items\n return self.current_state.shape[0]\n\n @property\n def num_items(self):\n \"\"\"\n Shortcut getter method for the number of items.\n \"\"\"\n # rows = users, cols = items\n return self.current_state.shape[1]\n\n\nclass Users(BaseComponent): # pylint: disable=too-many-ancestors\n \"\"\"\n Class representing users in the system.\n\n This class contains the real user preferences, which are unknown to the\n system, and the behavior of users when interacting with items.\n\n In general, users are represented with single *array_like* objects that\n contain all the users' preferences and characteristics. For example, real\n user preferences can be represented by a Numpy ndarray of size\n `(number_of_users, number_of_items)` where element `[u,i]` is the score\n assigned by user u to item i.\n\n Models determine the size constraints of objects representing users.\n Requirements vary across models and, unless specified, this class does not\n make assumptions on the real user components.\n\n This class inherits from :class:`~base.base_components.BaseComponent`.\n\n Parameters\n ------------\n\n actual_user_profiles: array_like, optional\n Representation of the real user profiles.\n\n actual_user_scores: array_like, optional\n Representation of the real scores that users assign to items.\n\n interact_with_items: callable, optional\n Function that specifies the behavior of users when interacting with\n items. If None, users follow the behavior specified in\n :meth:`get_user_feedback()`.\n\n num_users: int, optional\n The number of users in the system.\n\n size: tuple, optional\n Size of the user representation. It expects a tuple. If None,\n it is chosen randomly.\n\n drift: float, default 0\n If greater than 0, user profiles will update dynamically as they\n interact with items, \"drifting\" towards the item attribute vectors\n they interact with. ``drift`` is a parameter between 0 and 1 that\n controls the degree of rotational drift. If ``t=1``, then the user\n profile vector takes on the exact same direction as the attribute\n vector of the item they just interacted with. If 0, user profiles\n are generated once at initialization and never change.\n\n attention_exp: float, default 0\n If this parameter is non-zero, then the order of the items\n in the recommendation set affects the user's choice, in that\n the item chosen will be a function of its index in the recommendation\n set and the underlying user-item score. (See Chaney et al. 2018\n for a description of this mechanism.) Concretely, the item chosen will\n be according to\n :math:`i_u(t)=\\\\mathrm{argmax}_i( \\\\mathrm{rank}_{u,t}(i)^{\\\\alpha}\n \\\\cdot S_{u,i}(t) )`, where :math:`\\\\alpha` is the attention exponent\n and :math:`S_{u,i}(t)` is the underlying user-item score.\n\n score_fn: callable\n Function that is used to calculate each user's scores for each\n candidate item. The score function should take as input\n ``user_profiles`` and ``item_attributes``.\n\n verbose: bool, default False\n If ``True``, enables verbose mode. Disabled by default.\n\n seed: int, optional\n Seed for random generator.\n\n Attributes\n ------------\n\n Attributes from BaseComponent\n Inherited by :class:`~trecs.components.base_components.BaseComponent`\n\n actual_user_profiles: :obj:`numpy.ndarray`\n A matrix representing the *real* user profiles.\n\n actual_user_scores: :obj:`numpy.ndarray`\n A :math:`|U|\\\\times|I|` matrix representing the *true* scores assigned by\n each user to each item, where :math:`|U|` is the number of users and\n :math:`|I|` is the number of items in the system. The element of this matrix\n indexed by :math:`(u,i)` is the score assigned by user :math:`u` to item\n :math:`i`.\n\n interact_with_items: callable\n A function that defines user behaviors when interacting with items.\n If None, users follow the behavior in :meth:`get_user_feedback()`.\n\n user_vector: :obj:`numpy.ndarray`\n A :math:`|U|` array of user indices.\n\n score_fn: callable\n Function that is used to calculate each user's scores for each\n candidate item. The score function should take as input\n user_profiles and item_attributes.\n\n repeat_interactions: bool (optional, default: True)\n If ``True``, then users will interact with items regardless of whether\n they have already interacted with them before. If ``False``, users\n will not perform repeat interactions.\n\n Raises\n --------\n\n TypeError\n If parameters are of the wrong type.\n\n ValueError\n If both ``actual_user_profiles`` and ``size`` are None.\n \"\"\"\n\n def __init__(\n self,\n actual_user_profiles=None,\n actual_user_scores=None,\n interact_with_items=None,\n size=None,\n num_users=None,\n drift=0,\n score_fn=mo.inner_product,\n verbose=False,\n seed=None,\n attention_exp=0.0,\n repeat_interactions=True,\n ): # pylint: disable=too-many-arguments\n self.rng = Generator(seed=seed)\n # general input checks\n if actual_user_profiles is not None:\n if not isinstance(actual_user_profiles, (list, np.ndarray, sp.spmatrix)):\n raise TypeError(\n \"actual_user_profiles must be a list, numpy.ndarray, or scipy sparse matrix\"\n )\n if interact_with_items is not None and not callable(interact_with_items):\n raise TypeError(\"interact_with_items must be callable\")\n if actual_user_profiles is None and size is None:\n raise ValueError(\"actual_user_profiles and size can't both be None\")\n if actual_user_profiles is None and not isinstance(size, tuple):\n raise TypeError(\"size must be a tuple, is %s\" % type(size))\n if actual_user_scores is not None:\n if not isinstance(actual_user_scores, (list, np.ndarray, sp.spmatrix)):\n raise TypeError(\n \"actual_user_profiles must be a list, numpy.ndarray, or scipy sparse matrix\"\n )\n actual_user_scores = ActualUserScores(actual_user_scores)\n if actual_user_profiles is None and size is not None:\n row_zeros = np.zeros(size[1]) # one row vector of zeroes\n while actual_user_profiles is None or mo.contains_row(actual_user_profiles, row_zeros):\n # generate matrix until no row is the zero vector\n actual_user_profiles = self.rng.normal(size=size)\n\n # check_consistency also returns num_items and num_attributes, which are not needed\n num_users = check_consistency(\n users=actual_user_profiles, user_item_scores=actual_user_scores, num_users=num_users\n )[0]\n self.actual_user_profiles = ActualUserProfiles(actual_user_profiles)\n self.interact_with_items = interact_with_items\n self.drift = drift\n self.attention_exp = attention_exp\n if not callable(score_fn):\n raise TypeError(\"Custom score function must be a callable method\")\n self.score_fn = score_fn # function that dictates how scores will be generated\n self.actual_user_scores = actual_user_scores\n self.user_vector = np.arange(num_users, dtype=int)\n self.repeat_interactions = repeat_interactions\n if not repeat_interactions:\n self.user_interactions = np.array([], dtype=int).reshape((num_users, 0))\n self.name = \"actual_user_scores\"\n BaseComponent.__init__(self, verbose=verbose, init_value=self.actual_user_profiles.value)\n\n def set_score_function(self, score_fn):\n \"\"\"\n Users \"score\" items before \"deciding\" which item to interact with.\n This function makes it possible to set an arbitrary function as the\n score function.\n\n Parameters\n ------------\n\n score_fn: callable\n Function that is used to calculate each user's scores for each\n candidate item. Note that this function can be the same function\n used by the recommender system to generate its predictions for\n user-item scores. The score function should take as input\n ``user_profiles`` and ``item_attributes``.\n\n Raises\n --------\n\n TypeError\n If ``score_fn`` is not callable.\n \"\"\"\n if not callable(score_fn):\n raise TypeError(\"score function must be callable\")\n self.score_fn = score_fn\n\n def compute_user_scores(self, item_attributes):\n \"\"\"\n Computes and stores the actual scores that users assign to items\n compatible with the system. Note that we expect the ``self.score_fn``\n attribute to be set to some callable function which takes item\n attributes and user profiles.\n\n Parameters\n ------------\n\n item_attributes: :obj:`array_like`\n A matrix representation of item attributes.\n \"\"\"\n if not callable(self.score_fn):\n raise TypeError(\"score function must be callable\")\n actual_scores = self.score_fn(\n user_profiles=self.actual_user_profiles.value, item_attributes=item_attributes\n )\n if self.actual_user_scores is None:\n self.actual_user_scores = ActualUserScores(actual_scores)\n else:\n self.actual_user_scores.value = actual_scores\n\n self.actual_user_scores.store_state()\n\n def score_new_items(self, new_items):\n \"\"\"\n Computes and stores the actual scores that users assign to any new\n items that enter the system. Note that we expect the ``self.score_fn``\n attribute to be set to some callable function which takes item\n attributes and user profiles.\n\n Parameters\n ------------\n\n new_items: :obj:`array_like`\n A matrix representation of item attributes. Should be of dimension\n :math:`|A|\\\\times|I|`, where :math:`|I|` is the\n number of items and :math:`|A|` is the number of attributes.\n \"\"\"\n new_scores = self.score_fn(\n user_profiles=self.actual_user_profiles.value, item_attributes=new_items\n )\n self.actual_user_scores.append_new_scores(new_scores)\n self.actual_user_scores.store_state()\n\n def get_actual_user_scores(self, user=None):\n \"\"\"\n Returns an array of actual user scores.\n\n Parameters\n -----------\n\n user: int or :obj:`numpy.ndarray` or list (optional, default: None)\n Specifies the user index (or indices) for which to return the\n actual user scores. If ``None``, the function returns the whole\n matrix.\n\n Returns\n --------\n scores:\n An array of actual user scores for each item.\n \"\"\"\n if user is None:\n return self.actual_user_scores\n else:\n return self.actual_user_scores[user, :]\n\n def get_user_feedback(self, items_shown):\n \"\"\"\n Generates user interactions at a given timestep, generally called by a\n model.\n\n Parameters\n ------------\n\n args, kwargs:\n Parameters needed by the model's train function.\n\n items_shown: :obj:`numpy.ndarray`\n A :math:`|U|\\\\times\\\\text{num_items_per_iter}` matrix with\n recommendations and new items.\n\n Returns\n ---------\n :obj:`numpy.ndarray`\n Array of interactions s.t. element :math:`\\\\text{interactions}_u(t)` represents the\n index of the item selected by user `u` at time `t`. Shape: :math:`|U|\\\\times 1`\n\n Raises\n -------\n\n ValueError\n If :attr:`interact_with_items` is ``None`` and there is not ``item``\n parameter.\n \"\"\"\n # use custom item interaction function, if provided\n if self.interact_with_items is not None:\n return self.interact_with_items(self, items_shown)\n if not self.repeat_interactions:\n # scores must be set back later to non-infinite values\n prev_interacted_scores = self.actual_user_scores.get_item_scores(self.user_interactions)\n # \"remove\" items that have been interacted with by setting scores to negative infinity\n self.actual_user_scores.set_item_scores_to_value(self.user_interactions, float(\"-inf\"))\n rec_item_scores = self.actual_user_scores.get_item_scores(items_shown)\n rec_item_scores = self.attention_transform(rec_item_scores)\n sorted_user_preferences = mo.argmax(rec_item_scores, axis=1)\n interactions = items_shown[self.user_vector, sorted_user_preferences]\n # logging information if requested\n if self.is_verbose():\n self.log(f\"User scores for given items are:\\n{str(rec_item_scores)}\")\n self.log(f\"Users interact with the following items respectively:\\n{str(interactions)}\")\n # record interactions if needed to ensure users don't repeat interactions\n if not self.repeat_interactions:\n # set scores back to the original scores\n self.actual_user_scores.set_item_scores_to_value(\n self.user_interactions, prev_interacted_scores\n )\n interactions_col = interactions.reshape((-1, 1))\n # append interactions as column of user interactions\n self.user_interactions = np.hstack([self.user_interactions, interactions_col])\n return interactions\n\n def attention_transform(self, recommended_item_scores):\n \"\"\"\n Transforms a matrix of user-item scores based on a user attention mechanism; for example,\n because user attention is limited, items at the top of the recommendation list\n may have a higher effective score than items at the end of the recommendation list.\n\n Parameters\n ------------\n recommended_item_scores: :obj:`numpy.ndarray`\n A :math:`|U|\\\\times\\\\text{num_items_per_iter}` matrix with\n pre-attention user-item scores.\n\n Returns\n ---------\n recommended_item_scores: :obj:`numpy.ndarray`\n A :math:`|U|\\\\times\\\\text{num_items_per_iter}` matrix with\n transformed user-item scores that take into account\n the positions in the recommendation list.\n \"\"\"\n if self.attention_exp != 0:\n num_items = recommended_item_scores.shape[1]\n idxs = np.arange(num_items) + 1\n multiplier = np.power(idxs, self.attention_exp)\n # multiply each row by the attention coefficient\n return recommended_item_scores * multiplier\n else:\n return recommended_item_scores\n\n def update_profiles(self, item_attributes):\n \"\"\"In the case of dynamic user profiles, we update the user's actual\n profiles with new values as each user profile \"drifts\" towards\n items that they consume.\n\n Parameters\n -----------\n\n interactions: :obj:`numpy.ndarray` or list\n A matrix where row ``i`` corresponds to the attribute vector\n that user ``i`` interacted with.\n \"\"\"\n # we make no assumptions about whether the user profiles or item\n # attributes vectors are normalized\n self.actual_user_profiles.value = mo.slerp(\n self.actual_user_profiles, item_attributes, perc=self.drift\n )\n\n def store_state(self):\n \"\"\"Store the actual user scores in the state history\"\"\"\n self.state_history.append(np.copy(self.actual_user_scores.value))\n\n\nclass DNUsers(Users):\n \"\"\"\n Subclass of :class:`~components.users.Users` in which user agents perform\n choices in accordance with the Divisive Normalization model of choice\n from `Webb et al., 2020`_.\n\n .. _Webb et al., 2020: https://pubsonline.informs.org/doi/pdf/10.1287/mnsc.2019.3536\n\n Parameters\n -----------\n sigma: float\n Parameter for the DN model (see docstring). Default value is fitted\n parameter from Webb et al. (2020).\n\n omega: float\n Parameter for the DN model (see docstring). Default value is fitted\n parameter from Webb et al. (2020).\n\n beta: float\n Parameter for the DN model (see docstring). Default value is fitted\n parameter from Webb et al. (2020).\n \"\"\"\n\n def __init__(\n self,\n actual_user_profiles=None,\n actual_user_scores=None,\n interact_with_items=None,\n size=None,\n num_users=None,\n drift=0,\n score_fn=mo.inner_product,\n sigma=0.0,\n omega=0.2376,\n beta=0.9739,\n verbose=False,\n seed=None,\n ): # pylint: disable=too-many-arguments\n Users.__init__(\n self,\n actual_user_profiles,\n actual_user_scores,\n interact_with_items,\n size,\n num_users,\n drift,\n score_fn,\n verbose,\n seed,\n )\n self.sigma = sigma\n self.omega = omega\n self.beta = beta\n\n def get_user_feedback(self, items_shown):\n \"\"\"\n Generates user interactions at a given timestep, generally called by a\n model.\n\n Parameters\n ------------\n\n args, kwargs:\n Parameters needed by the model's train function.\n\n items_shown: :obj:`numpy.ndarray`\n A :math:`|U|\\\\times\\\\text{num_items_per_iter}` matrix with\n recommendations and new items.\n\n\n Returns\n ---------\n :obj:`numpy.ndarray`\n Array of interactions s.t. element :math:`\\\\text{interactions}_u(t)` represents the\n index of the item selected by user `u` at time `t`. Shape: :math:`|U|\\\\times 1`\n\n Raises\n -------\n\n ValueError\n If :attr:`interact_with_items` is None and there is not `item`\n parameter.\n \"\"\"\n if self.interact_with_items is not None:\n return self.interact_with_items(self, items_shown)\n\n reshaped_user_vector = self.user_vector.reshape((items_shown.shape[0], 1))\n interaction_scores = self.actual_user_scores[reshaped_user_vector, items_shown]\n\n self.log(\"User scores for given items are:\\n\" + str(interaction_scores))\n item_utilities = mo.to_dense(self.calc_dn_utilities(interaction_scores))\n sorted_user_preferences = item_utilities.argsort()[:, -1]\n interactions = items_shown[self.user_vector, sorted_user_preferences]\n self.log(\"Users interact with the following items respectively:\\n\" + str(interactions))\n\n return interactions\n\n def normalize_values(self, user_item_scores):\n \"\"\"\n Calculating the expression for :math:`z(\\\\textbf{v})` in the equation\n :math:`z(\\\\textbf{v})+\\\\mathbf{\\\\eta}`.\n\n Parameters\n -----------\n\n user_item_scores: :obj:`array_like`\n The element at index :math:`i,j` should represent user :math:`i`'s\n context-independent value for item :math:`j`.\n Dimension: :math:`|U|\\\\times|I|`\n\n Returns\n --------\n\n normed_values: :obj:`numpy.ndarray`\n The transformed utility values (i.e., :math:`z(\\\\textbf{v})`).\n \"\"\"\n summed_norms = np.linalg.norm(user_item_scores, ord=self.beta, axis=1)\n denom = self.sigma + np.multiply(self.omega, summed_norms)\n return np.divide(user_item_scores.T, denom) # now |I| x |U|\n\n def calc_dn_utilities(self, user_item_scores):\n \"\"\"\n Scores items according to divisive normalization. Note that the parameters\n / matrix operations we perform here are directly taken from\n https://github.com/UofT-Neuroecon-1/Normalization. For more information,\n see Webb, R., Glimcher, P. W., & Louie, K. (2020). The Normalization of\n Consumer Valuations: Context-Dependent Preferences from Neurobiological\n Constraints. Management Science.\n\n Note that the generalized DN model takes the following functional form:\n :math:`z_i(\\\\textbf{v})=\\\\frac{v_i}{\\\\sigma+\\\\omega(\\\\sum_n v_n^{\\\\beta})^\n {\\\\frac{1}{\\\\beta}}}`, where :math:`\\\\sigma, \\\\omega, \\\\beta` are all\n parameters that specify the exact choice model. After the original values\n :math:`\\\\textbf{v}` are transformed this way, the choice is determined by\n choosing the maximum value over :math:`z(\\\\textbf{v})+\\\\mathbf{\\\\eta}`,\n which in our case is generated by a multivariate normal distribution.\n\n Parameters\n -----------\n user_item_scores: :obj:`array_like`\n The element at index :math:`i,j` should represent user :math:`i`'s\n context-independent value for item :math:`j`.\n Dimension: :math:`|U|\\\\times|I|`\n\n Returns\n --------\n utility: :obj:`numpy.ndarray`\n Normalized & randomly perturbed utilities for different each\n pair of users and items in the recommendation set.\n \"\"\"\n normed_values = self.normalize_values(user_item_scores)\n num_choices, num_users = normed_values.shape\n eps = self.sample_from_error_dist(num_choices, num_users)\n utility = normed_values + eps\n # transform so |U| x |I|\n return utility.T\n\n def sample_from_error_dist(self, num_choices, num_users):\n \"\"\"\n The second stage of generating the divisive normalization utilities\n :math:`\\\\text{interactions}_{u(t)}` is adding the error term\n :math:`\\\\eta`. In this implementation, we sample from\n a specific multivariate normal distribution used by Webb et al.\n (see https://github.com/UofT-Neuroecon-1/Normalization).\n\n Parameters\n -----------\n\n num_choices: int\n Number of items every user is choosing between.\n\n num_users: int\n Number of users in the system.\n\n Returns\n --------\n\n eps: :obj:`numpy.ndarray`\n Randomly sampled errors from the error distribution. Should have\n shape :math:`|I|\\\\times|U|`.\n \"\"\"\n mean = np.zeros(num_choices)\n # in accordance with the DN model from Webb et al.,\n # the following covariance matrix has the structure\n # [ 1 0.5 ... 0.5 0.5 ]\n # [ 0.5 1 ... 0.5 0.5 ]\n # [ 0.5 0.5 ... 1 0.5 ]\n # [ 0.5 0.5 ... 0.5 1 ]\n cov = np.ones((num_choices, num_choices)) * 0.5\n cov[np.arange(num_choices), np.arange(num_choices)] = 1\n # generate |I| x |U| multivariate normal\n eps = self.rng.multivariate_normal(mean, cov, size=num_users).T\n return eps\n"
] | [
[
"numpy.divide",
"numpy.array",
"numpy.linalg.norm",
"numpy.zeros",
"numpy.copy",
"numpy.ones",
"numpy.multiply",
"numpy.arange",
"numpy.power",
"numpy.hstack"
]
] |
talonchandler/polharmonic | [
"2aa3ca984e11050f901579b8eaa45a3a61d07957"
] | [
"notes/2018-05-30-polarized-dispim-svd/calculations/test-multi.py"
] | [
"from polharmonic import det, ill, micro, multi\nimport numpy as np\n\n#n_px=2**4 + 1\nn_px=2**4 + 1\nfolder='multi-out/'\n\nmm = multi.MultiMicroscope(sigma_ax=0.33)\n# mm.micros[0].plot(mm.micros[0].H, filename=folder+'H0.pdf', n_px=n_px, plot_m=[-2, -1, 0, 1, 2])\nmm.calc_SVD(n_px=n_px)\nmm.plot_SVS_3D(filename=folder+'SVS3Dx.pdf')\nmm.plot_SVS_3D(filename=folder+'SVS3Dy.pdf', marks=np.array([[0,0,0], [0, 0.5 ,0], [0,1,0], [0,1.5,0]]))\nmm.plot_SVS_3D(filename=folder+'SVS3Dz.pdf', marks=np.array([[0,0,0], [0, 0, 0.5], [0,0,1], [0,0,1.5]]))\n\n\n"
] | [
[
"numpy.array"
]
] |
mikeireland/pymfe | [
"ce78392215bb40467a0d4efd453c2a6d062c12f5"
] | [
"rhea_subaru_superK.py"
] | [
"\"\"\"A script to fit tramlines etc for RHEA@Subaru data.\n\nLong wavelengths are down and right. 15 lines visible.\n\nlines = np.loadtxt('argon.txt')\norder = 1e7/31.6*2*np.sin(np.radians(64.0))/argon\nplt.plot(1375 - (order - np.round(order))/order*1.8e5)\nplt.plot(1375 - (order - np.round(order)+1)/order*1.8e5)\nplt.plot(1375 - (order - np.round(order)-1)/order*1.8e5)\n\nSuper-bright Neon line may be 7032.\n\n15000 counts in 20s\n\"\"\"\n\nfrom __future__ import division, print_function\nimport pymfe\ntry:\n import pyfits\nexcept:\n import astropy.io.fits as pyfits\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport glob\nimport opticstools as ot\nimport pdb\nimport scipy.optimize as op\nimport scipy.interpolate as interp\nimport time\nfrom astropy.time import Time\nfrom astropy.coordinates import SkyCoord\nfrom astropy import units as u\nimport PyAstronomy.pyasl as pyasl\nfrom astropy import constants as const\nimport matplotlib.cm as cm\nimport pickle\nimport astropy.modeling as amod\nplt.ion()\n\nsavefile=\"Focus00.pkl\"\ndir = \"/Users/mireland/data/rhea_subaru/160221/Focus00/\"\n\nsavefile=\"Focus30.pkl\"\ndir = \"/Users/mireland/data/rhea_subaru/160221/Focus30/\"\n\nsavefile=\"Focus60.pkl\"\ndir = \"/Users/mireland/data/rhea_subaru/160221/Focus60/\"\n\nsavefile=\"1603.pkl\"\ndir = \"/Users/mireland/data/rhea_subaru/160317/dither_final/\"\n\nsavefile=\"1603_initial.pkl\"\ndir = \"/Users/mireland/data/rhea_subaru/160317/dither_initial/\"\n\nstar_files = glob.glob(dir + \"*.fits\")\n\nnstars = len(star_files)\nlenslet_ims = np.empty( (nstars,3,3) )\nxpos = np.empty( (nstars) )\nypos = np.empty( (nstars) )\n\nrhea2_format = pymfe.rhea.Format(spect='subaru',mode='slit')\nrhea2_extract = pymfe.Extractor(rhea2_format, transpose_data=True)\nxx, wave, blaze = rhea2_format.spectral_format()\n\nfluxes = []\nfor i in range(nstars):\n star_data = pyfits.getdata(star_files[i])\n star_data -= np.median(star_data[0:500,:])\n\n hh = pyfits.getheader(star_files[i])\n xpos[i] = hh['ZABERX']\n ypos[i] = hh['ZABERY'] \n\n flux,var = rhea2_extract.one_d_extract(data=star_data.T, rnoise=20.0)\n fluxes.append(flux)\n\n lenslet_ims[i,:,:] = np.median(np.median(flux[12:20,:,:],axis=0),axis=0)[1:].reshape(3,3)\n lenslet_ims[i,1,:] = lenslet_ims[i,1,::-1]\n \n plt.imshow(lenslet_ims[i,:,:],interpolation='nearest', cmap=cm.gray)\n\npickle.dump((lenslet_ims,xpos,ypos), open(savefile, 'wb'))\nplt.clf()\nplt.scatter(xpos,ypos,s=100,c=np.sum(np.sum(lenslet_ims,2),1),cmap=cm.gist_heat)\n"
] | [
[
"matplotlib.pyplot.ion",
"numpy.empty",
"numpy.median",
"numpy.sum",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.imshow"
]
] |
XiangyuYang-Opt/scipy | [
"465da5496a8dda099646e9d5947f24dfc0ec44e9"
] | [
"scipy/stats/_axis_nan_policy.py"
] | [
"# Many scipy.stats functions support `axis` and `nan_policy` parameters.\n# When the two are combined, it can be tricky to get all the behavior just\n# right. This file contains utility functions useful for scipy.stats functions\n# that support `axis` and `nan_policy`, including a decorator that\n# automatically adds `axis` and `nan_policy` arguments to a function.\n\nimport numpy as np\nimport scipy.stats\nimport scipy.stats._stats_py\nfrom functools import wraps\nfrom scipy._lib._docscrape import FunctionDoc, Parameter\nimport inspect\n\n\ndef _broadcast_arrays(arrays, axis=None):\n \"\"\"\n Broadcast shapes of arrays, ignoring incompatibility of specified axes\n \"\"\"\n new_shapes = _broadcast_array_shapes(arrays, axis=axis)\n if axis is None:\n new_shapes = [new_shapes]*len(arrays)\n return [np.broadcast_to(array, new_shape)\n for array, new_shape in zip(arrays, new_shapes)]\n\n\ndef _broadcast_array_shapes(arrays, axis=None):\n \"\"\"\n Broadcast shapes of arrays, ignoring incompatibility of specified axes\n \"\"\"\n shapes = [np.asarray(arr).shape for arr in arrays]\n return _broadcast_shapes(shapes, axis)\n\n\ndef _broadcast_shapes(shapes, axis=None):\n \"\"\"\n Broadcast shapes, ignoring incompatibility of specified axes\n \"\"\"\n if not shapes:\n return shapes\n\n # input validation\n if axis is not None:\n axis = np.atleast_1d(axis)\n axis_int = axis.astype(int)\n if not np.array_equal(axis_int, axis):\n raise ValueError('`axis` must be an integer, a '\n 'tuple of integers, or `None`.')\n axis = axis_int\n\n # First, ensure all shapes have same number of dimensions by prepending 1s.\n n_dims = max([len(shape) for shape in shapes])\n new_shapes = np.ones((len(shapes), n_dims), dtype=int)\n for row, shape in zip(new_shapes, shapes):\n row[len(row)-len(shape):] = shape # can't use negative indices (-0:)\n\n # Remove the shape elements of the axes to be ignored, but remember them.\n if axis is not None:\n axis[axis < 0] = n_dims + axis[axis < 0]\n axis = np.sort(axis)\n if axis[-1] >= n_dims or axis[0] < 0:\n message = (f\"`axis` is out of bounds \"\n f\"for array of dimension {n_dims}\")\n raise ValueError(message)\n\n if len(np.unique(axis)) != len(axis):\n raise ValueError(\"`axis` must contain only distinct elements\")\n\n removed_shapes = new_shapes[:, axis]\n new_shapes = np.delete(new_shapes, axis, axis=1)\n\n # If arrays are broadcastable, shape elements that are 1 may be replaced\n # with a corresponding non-1 shape element. Assuming arrays are\n # broadcastable, that final shape element can be found with:\n new_shape = np.max(new_shapes, axis=0)\n # except in case of an empty array:\n new_shape *= new_shapes.all(axis=0)\n\n # Among all arrays, there can only be one unique non-1 shape element.\n # Therefore, if any non-1 shape element does not match what we found\n # above, the arrays must not be broadcastable after all.\n if np.any(~((new_shapes == 1) | (new_shapes == new_shape))):\n raise ValueError(\"Array shapes are incompatible for broadcasting.\")\n\n if axis is not None:\n # Add back the shape elements that were ignored\n new_axis = axis - np.arange(len(axis))\n new_shapes = [tuple(np.insert(new_shape, new_axis, removed_shape))\n for removed_shape in removed_shapes]\n return new_shapes\n else:\n return tuple(new_shape)\n\n\ndef _broadcast_array_shapes_remove_axis(arrays, axis=None):\n \"\"\"\n Broadcast shapes of arrays, dropping specified axes\n\n Given a sequence of arrays `arrays` and an integer or tuple `axis`, find\n the shape of the broadcast result after consuming/dropping `axis`.\n In other words, return output shape of a typical hypothesis test on\n `arrays` vectorized along `axis`.\n\n Examples\n --------\n >>> a = np.zeros((5, 2, 1))\n >>> b = np.zeros((9, 3))\n >>> _broadcast_array_shapes((a, b), 1)\n (5, 3)\n \"\"\"\n # Note that here, `axis=None` means do not consume/drop any axes - _not_\n # ravel arrays before broadcasting.\n shapes = [arr.shape for arr in arrays]\n return _broadcast_shapes_remove_axis(shapes, axis)\n\n\ndef _broadcast_shapes_remove_axis(shapes, axis=None):\n \"\"\"\n Broadcast shapes, dropping specified axes\n\n Same as _broadcast_array_shapes, but given a sequence\n of array shapes `shapes` instead of the arrays themselves.\n \"\"\"\n shapes = _broadcast_shapes(shapes, axis)\n shape = shapes[0]\n if axis is not None:\n shape = np.delete(shape, axis)\n return tuple(shape)\n\n\ndef _broadcast_concatenate(arrays, axis):\n \"\"\"Concatenate arrays along an axis with broadcasting.\"\"\"\n arrays = _broadcast_arrays(arrays, axis)\n res = np.concatenate(arrays, axis=axis)\n return res\n\n\n# TODO: add support for `axis` tuples\ndef _remove_nans(samples, paired):\n \"Remove nans from paired or unpaired 1D samples\"\n # potential optimization: don't copy arrays that don't contain nans\n if not paired:\n return [sample[~np.isnan(sample)] for sample in samples]\n\n # for paired samples, we need to remove the whole pair when any part\n # has a nan\n nans = np.isnan(samples[0])\n for sample in samples[1:]:\n nans = nans | np.isnan(sample)\n not_nans = ~nans\n return [sample[not_nans] for sample in samples]\n\n\ndef _remove_sentinel(samples, paired, sentinel):\n \"Remove sentinel values from paired or unpaired 1D samples\"\n # could consolidate with `_remove_nans`, but it's not quite as simple as\n # passing `sentinel=np.nan` because `(np.nan == np.nan) is False`\n\n # potential optimization: don't copy arrays that don't contain sentinel\n if not paired:\n return [sample[sample != sentinel] for sample in samples]\n\n # for paired samples, we need to remove the whole pair when any part\n # has a nan\n sentinels = (samples[0] == sentinel)\n for sample in samples[1:]:\n sentinels = sentinels | (sample == sentinel)\n not_sentinels = ~sentinels\n return [sample[not_sentinels] for sample in samples]\n\n\ndef _masked_arrays_2_sentinel_arrays(samples):\n # masked arrays in `samples` are converted to regular arrays, and values\n # corresponding with masked elements are replaced with a sentinel value\n\n # return without modifying arrays if none have a mask\n has_mask = False\n for sample in samples:\n mask = getattr(sample, 'mask', False)\n has_mask = has_mask or np.any(mask)\n if not has_mask:\n return samples, None # None means there is no sentinel value\n\n # Choose a sentinel value. We can't use `np.nan`, because sentinel (masked)\n # values are always omitted, but there are different nan policies.\n for i in range(len(samples)):\n # Things get more complicated if the arrays are of different types.\n # We could have different sentinel values for each array, but\n # the purpose of this code is convenience, not efficiency.\n samples[i] = samples[i].astype(np.float64, copy=False)\n\n max_possible, eps = np.finfo(np.float64).max, np.finfo(np.float64).eps\n\n sentinel = max_possible\n while sentinel > 0:\n for sample in samples:\n if np.any(sample == sentinel):\n sentinel *= (1 - 2*eps) # choose a new sentinel value\n break\n else: # when sentinel value is OK, break the while loop\n break\n\n # replace masked elements with sentinel value\n out_samples = []\n for sample in samples:\n mask = getattr(sample, 'mask', False)\n if np.any(mask):\n mask = np.broadcast_to(mask, sample.shape)\n sample = sample.data.copy() # don't modify original array\n sample[mask] = sentinel\n out_samples.append(sample)\n\n return out_samples, sentinel\n\n\ndef _check_empty_inputs(samples, axis):\n \"\"\"\n Check for empty sample; return appropriate output for a vectorized hypotest\n \"\"\"\n # if none of the samples are empty, we need to perform the test\n if not any((sample.size == 0 for sample in samples)):\n return None\n # otherwise, the statistic and p-value will be either empty arrays or\n # arrays with NaNs. Produce the appropriate array and return it.\n output_shape = _broadcast_array_shapes_remove_axis(samples, axis)\n output = np.ones(output_shape) * np.nan\n return output\n\n\n# Standard docstring / signature entries for `axis` and `nan_policy`\n_name = 'axis'\n_type = \"int or None, default: 0\"\n_desc = (\n \"\"\"If an int, the axis of the input along which to compute the statistic.\nThe statistic of each axis-slice (e.g. row) of the input will appear in a\ncorresponding element of the output.\nIf ``None``, the input will be raveled before computing the statistic.\"\"\"\n .split('\\n'))\n_axis_parameter_doc = Parameter(_name, _type, _desc)\n_axis_parameter = inspect.Parameter(_name,\n inspect.Parameter.KEYWORD_ONLY,\n default=0)\n\n_name = 'nan_policy'\n_type = \"{'propagate', 'omit', 'raise'}\"\n_desc = (\n \"\"\"Defines how to handle input NaNs.\n\n- ``propagate``: if a NaN is present in the axis slice (e.g. row) along\n which the statistic is computed, the corresponding entry of the output\n will be NaN.\n- ``omit``: NaNs will be omitted when performing the calculation.\n If insufficient data remains in the axis slice along which the\n statistic is computed, the corresponding entry of the output will be\n NaN.\n- ``raise``: if a NaN is present, a ``ValueError`` will be raised.\"\"\"\n .split('\\n'))\n_nan_policy_parameter_doc = Parameter(_name, _type, _desc)\n_nan_policy_parameter = inspect.Parameter(_name,\n inspect.Parameter.KEYWORD_ONLY,\n default='propagate')\n\n\ndef _axis_nan_policy_factory(result_object, default_axis=0,\n n_samples=1, paired=False,\n result_unpacker=None, too_small=0,\n n_outputs=2, kwd_samples=[]):\n \"\"\"Factory for a wrapper that adds axis/nan_policy params to a function.\n\n Parameters\n ----------\n result_object : callable\n Callable that returns an object of the type returned by the function\n being wrapped (e.g. the namedtuple or dataclass returned by a\n statistical test) provided the separate components (e.g. statistic,\n pvalue).\n default_axis : int, default: 0\n The default value of the axis argument. Standard is 0 except when\n backwards compatibility demands otherwise (e.g. `None`).\n n_samples : int or callable, default: 1\n The number of data samples accepted by the function\n (e.g. `mannwhitneyu`), a callable that accepts a dictionary of\n parameters passed into the function and returns the number of data\n samples (e.g. `wilcoxon`), or `None` to indicate an arbitrary number\n of samples (e.g. `kruskal`).\n paired : {False, True}\n Whether the function being wrapped treats the samples as paired (i.e.\n corresponding elements of each sample should be considered as different\n components of the same sample.)\n result_unpacker : callable, optional\n Function that unpacks the results of the function being wrapped into\n a tuple. This is essentially the inverse of `result_object`. Default\n is `None`, which is appropriate for statistical tests that return a\n statistic, pvalue tuple (rather than, e.g., a non-iterable datalass).\n too_small : int, default: 0\n The largest unnacceptably small sample for the function being wrapped.\n For example, some functions require samples of size two or more or they\n raise an error. This argument prevents the error from being raised when\n input is not 1D and instead places a NaN in the corresponding element\n of the result.\n n_outputs : int, default: 2\n The number of outputs produced by the function given 1d sample(s). For\n example, hypothesis tests that return a namedtuple or result object\n with attributes ``statistic`` and ``pvalue`` use the default\n ``n_outputs=2``; summary statistics with scalar output use\n ``n_outputs=1``.\n kwd_samples : sequence, default: []\n The names of keyword parameters that should be treated as samples. For\n example, `gmean` accepts as its first argument a sample `a` but\n also `weights` as a fourth, optional keyword argument. In this case, we\n use `n_samples=1` and kwd_samples=['weights'].\n \"\"\"\n\n if result_unpacker is None:\n def result_unpacker(res):\n return res[..., 0], res[..., 1]\n\n def is_too_small(samples):\n for sample in samples:\n if len(sample) <= too_small:\n return True\n return False\n\n def axis_nan_policy_decorator(hypotest_fun_in):\n @wraps(hypotest_fun_in)\n def axis_nan_policy_wrapper(*args, _no_deco=False, **kwds):\n\n if _no_deco: # for testing, decorator does nothing\n return hypotest_fun_in(*args, **kwds)\n\n # We need to be flexible about whether position or keyword\n # arguments are used, but we need to make sure users don't pass\n # both for the same parameter. To complicate matters, some\n # functions accept samples with *args, and some functions already\n # accept `axis` and `nan_policy` as positional arguments.\n # The strategy is to make sure that there is no duplication\n # between `args` and `kwds`, combine the two into `kwds`, then\n # the samples, `nan_policy`, and `axis` from `kwds`, as they are\n # dealt with separately.\n\n # Check for intersection between positional and keyword args\n params = list(inspect.signature(hypotest_fun_in).parameters)\n if n_samples is None:\n # Give unique names to each positional sample argument\n # Note that *args can't be provided as a keyword argument\n params = [f\"arg{i}\" for i in range(len(args))] + params[1:]\n\n d_args = dict(zip(params, args))\n intersection = set(d_args) & set(kwds)\n if intersection:\n message = (f\"{hypotest_fun_in.__name__}() got multiple values \"\n f\"for argument '{list(intersection)[0]}'\")\n raise TypeError(message)\n\n # Consolidate other positional and keyword args into `kwds`\n kwds.update(d_args)\n\n # rename avoids UnboundLocalError\n if callable(n_samples):\n # Future refactoring idea: no need for callable n_samples.\n # Just replace `n_samples` and `kwd_samples` with a single\n # list of the names of all samples, and treat all of them\n # as `kwd_samples` are treated below.\n n_samp = n_samples(kwds)\n else:\n n_samp = n_samples or len(args)\n\n # If necessary, rearrange function signature: accept other samples\n # as positional args right after the first n_samp args\n kwd_samp = [name for name in kwd_samples\n if kwds.get(name, None) is not None]\n n_kwd_samp = len(kwd_samp)\n if not kwd_samp:\n hypotest_fun_out = hypotest_fun_in\n else:\n def hypotest_fun_out(*samples, **kwds):\n new_kwds = dict(zip(kwd_samp, samples[n_samp:]))\n kwds.update(new_kwds)\n return hypotest_fun_in(*samples[:n_samp], **kwds)\n\n # Extract the things we need here\n samples = [np.atleast_1d(kwds.pop(param))\n for param in (params[:n_samp] + kwd_samp)]\n vectorized = True if 'axis' in params else False\n axis = kwds.pop('axis', default_axis)\n nan_policy = kwds.pop('nan_policy', 'propagate')\n del args # avoid the possibility of passing both `args` and `kwds`\n\n # convert masked arrays to regular arrays with sentinel values\n samples, sentinel = _masked_arrays_2_sentinel_arrays(samples)\n\n # standardize to always work along last axis\n if axis is None:\n samples = [sample.ravel() for sample in samples]\n else:\n samples = _broadcast_arrays(samples, axis=axis)\n axis = np.atleast_1d(axis)\n n_axes = len(axis)\n # move all axes in `axis` to the end to be raveled\n samples = [np.moveaxis(sample, axis, range(-len(axis), 0))\n for sample in samples]\n shapes = [sample.shape for sample in samples]\n # New shape is unchanged for all axes _not_ in `axis`\n # At the end, we append the product of the shapes of the axes\n # in `axis`. Appending -1 doesn't work for zero-size arrays!\n new_shapes = [shape[:-n_axes] + (np.prod(shape[-n_axes:]),)\n for shape in shapes]\n samples = [sample.reshape(new_shape)\n for sample, new_shape in zip(samples, new_shapes)]\n axis = -1 # work over the last axis\n\n # if axis is not needed, just handle nan_policy and return\n ndims = np.array([sample.ndim for sample in samples])\n if np.all(ndims <= 1):\n # Addresses nan_policy == \"raise\"\n contains_nans = []\n for sample in samples:\n contains_nan, _ = (\n scipy.stats._stats_py._contains_nan(sample, nan_policy))\n contains_nans.append(contains_nan)\n\n # Addresses nan_policy == \"propagate\"\n # Consider adding option to let function propagate nans, but\n # currently the hypothesis tests this is applied to do not\n # propagate nans in a sensible way\n if any(contains_nans) and nan_policy == 'propagate':\n res = np.full(n_outputs, np.nan)\n return result_object(*res)\n\n # Addresses nan_policy == \"omit\"\n if any(contains_nans) and nan_policy == 'omit':\n # consider passing in contains_nans\n samples = _remove_nans(samples, paired)\n\n # ideally, this is what the behavior would be:\n # if is_too_small(samples):\n # return result_object(np.nan, np.nan)\n # but some existing functions raise exceptions, and changing\n # behavior of those would break backward compatibility.\n\n if sentinel:\n samples = _remove_sentinel(samples, paired, sentinel)\n return hypotest_fun_out(*samples, **kwds)\n\n # check for empty input\n # ideally, move this to the top, but some existing functions raise\n # exceptions for empty input, so overriding it would break\n # backward compatibility.\n empty_output = _check_empty_inputs(samples, axis)\n if empty_output is not None:\n return result_object(*([empty_output.copy()\n for i in range(n_outputs)]))\n\n # otherwise, concatenate all samples along axis, remembering where\n # each separate sample begins\n lengths = np.array([sample.shape[axis] for sample in samples])\n split_indices = np.cumsum(lengths)\n x = _broadcast_concatenate(samples, axis)\n\n # Addresses nan_policy == \"raise\"\n contains_nan, _ = (\n scipy.stats._stats_py._contains_nan(x, nan_policy))\n\n if vectorized and not contains_nan and not sentinel:\n return hypotest_fun_out(*samples, axis=axis, **kwds)\n\n # Addresses nan_policy == \"omit\"\n if contains_nan and nan_policy == 'omit':\n def hypotest_fun(x):\n samples = np.split(x, split_indices)[:n_samp+n_kwd_samp]\n samples = _remove_nans(samples, paired)\n if sentinel:\n samples = _remove_sentinel(samples, paired, sentinel)\n if is_too_small(samples):\n res = np.full(n_outputs, np.nan)\n return result_object(*res)\n return hypotest_fun_out(*samples, **kwds)\n\n # Addresses nan_policy == \"propagate\"\n elif contains_nan and nan_policy == 'propagate':\n def hypotest_fun(x):\n if np.isnan(x).any():\n res = np.full(n_outputs, np.nan)\n return result_object(*res)\n samples = np.split(x, split_indices)[:n_samp+n_kwd_samp]\n if sentinel:\n samples = _remove_sentinel(samples, paired, sentinel)\n if is_too_small(samples):\n res = np.full(n_outputs, np.nan)\n return result_object(*res)\n return hypotest_fun_out(*samples, **kwds)\n\n else:\n def hypotest_fun(x):\n samples = np.split(x, split_indices)[:n_samp+n_kwd_samp]\n if sentinel:\n samples = _remove_sentinel(samples, paired, sentinel)\n if is_too_small(samples):\n res = np.full(n_outputs, np.nan)\n return result_object(*res)\n return hypotest_fun_out(*samples, **kwds)\n\n x = np.moveaxis(x, axis, -1)\n res = np.apply_along_axis(hypotest_fun, axis=-1, arr=x)\n return result_object(*result_unpacker(res))\n\n doc = FunctionDoc(axis_nan_policy_wrapper)\n parameter_names = [param.name for param in doc['Parameters']]\n if 'axis' in parameter_names:\n doc['Parameters'][parameter_names.index('axis')] = (\n _axis_parameter_doc)\n else:\n doc['Parameters'].append(_axis_parameter_doc)\n if 'nan_policy' in parameter_names:\n doc['Parameters'][parameter_names.index('nan_policy')] = (\n _nan_policy_parameter_doc)\n else:\n doc['Parameters'].append(_nan_policy_parameter_doc)\n doc = str(doc).split(\"\\n\", 1)[1] # remove signature\n axis_nan_policy_wrapper.__doc__ = str(doc)\n\n sig = inspect.signature(axis_nan_policy_wrapper)\n parameters = sig.parameters\n parameter_list = list(parameters.values())\n if 'axis' not in parameters:\n parameter_list.append(_axis_parameter)\n if 'nan_policy' not in parameters:\n parameter_list.append(_nan_policy_parameter)\n sig = sig.replace(parameters=parameter_list)\n axis_nan_policy_wrapper.__signature__ = sig\n\n return axis_nan_policy_wrapper\n return axis_nan_policy_decorator\n"
] | [
[
"numpy.array_equal",
"numpy.finfo",
"numpy.apply_along_axis",
"numpy.sort",
"numpy.cumsum",
"numpy.broadcast_to",
"numpy.max",
"numpy.concatenate",
"numpy.full",
"numpy.prod",
"scipy._lib._docscrape.Parameter",
"numpy.delete",
"numpy.array",
"scipy._lib._docscrape.FunctionDoc",
"numpy.insert",
"numpy.isnan",
"numpy.asarray",
"numpy.ones",
"numpy.split",
"numpy.any",
"numpy.atleast_1d",
"numpy.all",
"numpy.moveaxis",
"numpy.unique"
]
] |
tdinesh/occamsam | [
"a0b27ca8d8f8b32d5d46f9b7861ec34f3d060b49"
] | [
"occamsam/optim.py"
] | [
"import cvxpy as cp\nimport cvxpy.atoms\nfrom cvxpy.atoms import norm, mixed_norm, sum_squares\nfrom cvxpy.atoms.affine.vec import vec\n\nimport numpy as np\nimport scipy as sp\nimport scipy.sparse\n\nimport equivalence\nimport utilities\nfrom factorgraph import GaussianFactorGraph\n\nimport itertools\n\n\ndef _sanitized_noise_array(sigma):\n \"\"\"\n Replaces zero-noise estimates with 1 to maintain a achieve neutral weight within weighted least-squares\n\n :param sigma: list of noise estimates for each measurement\n :return sigma_: copy of sigma with 0 entries replaced by 1\n \"\"\"\n\n sigma_ = sigma.copy()\n zero_mask = np.isclose(sigma_, 0)\n if np.any(zero_mask):\n sigma_[zero_mask] = 1\n return sigma_\n\n\nclass LeastSquares(object):\n\n def __init__(self, graph, solver=None, verbosity=False):\n \"\"\"\n Ordinary Least-Squares optimizer for the odometric and distance measurements contained in a GaussianFactorGraph\n\n graph instance is modified using the solution found by optimize() with each call to update()\n\n :param graph: GaussianFactorGraph instance\n :param solver: One of the supported CvxPy solvers, e.g. 'GUROBI' (default1), 'MOSEK' (default2), 'ECOS' (default3)\n :param verbosity: Prints solver output to console if True\n \"\"\"\n\n assert isinstance(graph, GaussianFactorGraph), \"Expected type GaussainFactorGraph for graph, got %s\" % type(graph)\n self.graph = graph\n\n self.M = [] # estimated landmark positions\n self.P = [] # estimated robot positions\n self.res_d = [] # distance measurement residuals for a given solution\n self.res_t = [] # translation measurement residuals for a given solution\n\n self._verbosity = verbosity # solver output printed to console when True\n\n if 'GUROBI' in cp.installed_solvers():\n self._solver = 'GUROBI'\n elif 'MOSEK' in cp.installed_solvers():\n self._solver = 'MOSEK'\n else:\n self._solver = 'ECOS'\n\n if solver is not None:\n self._solver = solver\n\n def optimize(self):\n\n num_points = len(self.graph.free_points)\n point_dim = self.graph.point_dim\n num_landmarks = len(self.graph.landmarks)\n landmark_dim = self.graph.landmark_dim\n\n Am, Ap, d, _ = self.graph.observation_system()\n Bp, t, _ = self.graph.odometry_system()\n\n if (num_points != 0) and (num_landmarks != 0):\n\n M = cp.Variable((landmark_dim, num_landmarks))\n P = cp.Variable((point_dim, num_points))\n objective = cp.Minimize(sum_squares(Am * vec(M) + Ap * vec(P) - d) + sum_squares(Bp * vec(P) - t))\n\n problem = cp.Problem(objective)\n problem.solve(verbose=self._verbosity, solver=self._solver)\n\n self.M = M.value\n self.P = P.value\n\n m = self.M.ravel(order='F')\n p = self.P.ravel(order='F')\n\n self.res_d = Am.dot(m) + Ap.dot(p) - d\n self.res_t = Bp.dot(p) - t\n\n elif (num_points != 0) and (num_landmarks == 0):\n\n P = cp.Variable((point_dim, num_points))\n objective = cp.Minimize(sum_squares(Bp * vec(P) - t))\n\n problem = cp.Problem(objective)\n problem.solve(verbose=self._verbosity, solver=self._solver)\n\n self.P = P.value\n\n p = self.P.ravel(order='F')\n\n self.res_t = Bp.dot(p) - t\n\n else:\n return\n\n\n def update(self):\n\n for i, m in enumerate(self.graph.landmarks):\n if m.position is None:\n m.position = self.M[:, i].copy()\n else:\n m.position[:] = self.M[:, i].copy()\n\n for i, p in enumerate(self.graph.free_points):\n if p.position is None:\n p.position = self.P[:, i].copy()\n else:\n p.position[:] = self.P[:, i].copy()\n\n\nclass WeightedLeastSquares(object):\n\n def __init__(self, graph, solver=None, verbosity=False):\n \"\"\"\n Weighted Least-Squares optimizer for the odometric and distance measurements contained in a GaussianFactorGraph\n\n Weights for each measurement in the regression are defined as the inverse of the standard deviation for each.\n If and when 0, the corresponding standard deviation is assumed to be 1.\n\n graph instance is modified using the solution found by optimize() with each call to update()\n\n :param graph: GaussianFactorGraph instance\n :param solver: One of the supported CvxPy solvers, e.g. 'GUROBI' (default1), 'MOSEK' (default2), 'ECOS' (default3)\n :param verbosity: Prints solver output to console if True\n \"\"\"\n\n assert isinstance(graph, GaussianFactorGraph), \"Expected type GaussainFactorGraph for graph, got %s\" % type(graph)\n self.graph = graph\n\n self.M = None # estimated landmark positions\n self.P = None # estimated robot positions\n self.res_d = None # distance measurement residuals for a given solution\n self.res_t = None # translation measurement residuals for a given solution\n\n self._verbosity = verbosity # solver output printed to console when True\n\n if 'GUROBI' in cp.installed_solvers():\n self._solver = 'GUROBI'\n elif 'MOSEK' in cp.installed_solvers():\n self._solver = 'MOSEK'\n else:\n self._solver = 'ECOS'\n\n if solver is not None:\n self._solver = solver\n\n def optimize(self):\n\n num_points = len(self.graph.free_points)\n point_dim = self.graph.point_dim\n num_landmarks = len(self.graph.landmarks)\n landmark_dim = self.graph.landmark_dim\n\n Am, Ap, d, sigma_d = self.graph.observation_system()\n Bp, t, sigma_t = self.graph.odometry_system()\n\n S_d, S_t = sp.sparse.diags(1 / _sanitized_noise_array(sigma_d)), sp.sparse.diags(1 / _sanitized_noise_array(sigma_t))\n\n if (num_points != 0) and (num_landmarks != 0):\n\n M = cp.Variable((landmark_dim, num_landmarks))\n P = cp.Variable((point_dim, num_points))\n objective = cp.Minimize(\n sum_squares(S_d * ((Am * vec(M)) + (Ap * vec(P)) - d)) + sum_squares(S_t * ((Bp * vec(P)) - t)))\n problem = cp.Problem(objective)\n problem.solve(verbose=self._verbosity, solver=self._solver)\n\n self.M = M.value\n self.P = P.value\n\n m = self.M.ravel(order='F')\n p = self.P.ravel(order='F')\n\n self.res_d = Am.dot(m) + Ap.dot(p) - d\n self.res_t = Bp.dot(p) - t\n\n elif (num_points != 0) and (num_landmarks == 0):\n\n P = cp.Variable((point_dim, num_points))\n objective = cp.Minimize(\n sum_squares(sum_squares(S_t * ((Bp * vec(P)) - t))))\n problem = cp.Problem(objective)\n problem.solve(verbose=self._verbosity, solver=self._solver)\n\n self.P = P.value\n\n p = self.P.ravel(order='F')\n\n self.res_t = Bp.dot(p) - t\n\n else:\n return\n\n def update(self):\n\n for i, m in enumerate(self.graph.landmarks):\n if m.position is None:\n m.position = self.M[:, i].copy()\n else:\n m.position[:] = self.M[:, i].copy()\n\n for i, p in enumerate(self.graph.free_points):\n if p.position is None:\n p.position = self.P[:, i].copy()\n else:\n p.position[:] = self.P[:, i].copy()\n\n\nclass Occam(object):\n\n def __init__(self, graph, assoc_range=1, solver=None, verbosity=False):\n \"\"\"\n Occam Smoothing-And-Mapping optimizer for the odometric and distance factors contained in a GaussianFactorGraph\n\n Corresponding paper explaining the procedure can be found here:\n\n Landmark associations are uncovered automatically and stored in equivalence_pairs between calls to optimize()\n\n graph instance is modified using the solution found by optimize() with each call to update()\n\n :param graph: GaussianFactorGraph instance\n :param assoc_range: Standard deviation (distance) between pairs of observations to the same landmark\n :param solver: One of the supported CvxPy solvers, e.g. 'GUROBI' (default1), 'MOSEK' (default2), 'ECOS' (default3)\n :param verbosity: Prints solver output to console if True\n \"\"\"\n\n assert isinstance(graph, GaussianFactorGraph), \"Expected type GaussainFactorGraph for graph, got %s\" % type(graph)\n self.graph = graph\n\n self.M = None # estimated landmark positions\n self.P = None # estimated robot positions\n self.res_d = None # distance measurement residuals for a given solution\n self.res_t = None # translation measurement residuals for a given solution\n\n self.equivalence_pairs = [] # equivalent LandmarkVariable pairs\n\n self._sigma = assoc_range\n\n self._verbosity = verbosity # solver output printed to console when True\n\n if 'GUROBI' in cp.installed_solvers():\n self._solver = 'GUROBI'\n elif 'MOSEK' in cp.installed_solvers():\n self._solver = 'MOSEK'\n else:\n self._solver = 'ECOS'\n\n if solver is not None:\n self._solver = solver\n\n self._pre_optimizer = WeightedLeastSquares(graph, solver=solver, verbosity=verbosity)\n\n def optimize(self):\n\n self._pre_optimizer.optimize()\n self._pre_optimizer.update()\n\n points = self.graph.free_points\n landmarks = self.graph.landmarks\n\n num_points = len(points)\n point_dim = self.graph.point_dim\n num_landmarks = len(landmarks)\n landmark_dim = self.graph.landmark_dim\n\n transforms = [equivalence.SumMass(self.graph.correspondence_map.set_map()),\n equivalence.ExpDistance(self._sigma),\n equivalence.Facing()]\n E, W = equivalence.equivalence_matrix(landmarks, transforms=transforms)\n if E.shape[0] == 0:\n self.M = self._pre_optimizer.M\n self.P = self._pre_optimizer.P\n self.res_d = self._pre_optimizer.res_d\n self.res_t = self._pre_optimizer.res_t\n self.equivalence_pairs = []\n return\n\n Am, Ap, d, sigma_d = self.graph.observation_system()\n Bp, t, sigma_t = self.graph.odometry_system()\n\n S_d, S_t = sp.sparse.diags(1 / _sanitized_noise_array(sigma_d)), sp.sparse.diags(1 / _sanitized_noise_array(sigma_t))\n\n M = cp.Variable((landmark_dim, num_landmarks))\n P = cp.Variable((point_dim, num_points))\n\n M.value = self._pre_optimizer.M\n P.value = self._pre_optimizer.P\n\n objective = cp.Minimize(mixed_norm(W * E * M.T))\n constraints = [norm((Am * vec(M)) + (Ap * vec(P)) - d) <= 2 * np.linalg.norm(sigma_d + 1e-6),\n norm((Bp * vec(P)) - t) <= 2 * np.linalg.norm(sigma_t + 1e-6)]\n problem = cp.Problem(objective, constraints)\n problem.solve(verbose=self._verbosity, solver=self._solver, warm_start=True)\n\n if problem.solution.status == 'infeasible':\n self.M = self._pre_optimizer.M\n self.P = self._pre_optimizer.P\n self.res_d = self._pre_optimizer.res_d\n self.res_t = self._pre_optimizer.res_t\n self.equivalence_pairs = []\n return\n\n E_ = E[np.abs(np.linalg.norm(E * M.value.T, axis=1)) < 0.001, :]\n objective = cp.Minimize(\n sum_squares(S_d * ((Am * vec(M)) + (Ap * vec(P)) - d)) + sum_squares(S_t * ((Bp * vec(P)) - t)))\n constraints = [E_ * M.T == 0] if E_.shape[0] > 0 else []\n problem = cp.Problem(objective, constraints)\n problem.solve(verbose=self._verbosity, solver=self._solver, warm_start=True)\n\n self.M = M.value\n self.P = P.value\n\n m = self.M.ravel(order='F')\n p = self.P.ravel(order='F')\n\n self.res_d = Am.dot(m) + Ap.dot(p) - d\n self.res_t = Bp.dot(p) - t\n\n self.equivalence_pairs = [(landmarks[i], landmarks[j]) for (i, j) in E_.tolil().rows]\n\n def update(self, merge=True):\n\n for i, m in enumerate(self.graph.landmarks):\n if m.position is None:\n m.position = self.M[:, i].copy()\n else:\n m.position[:] = self.M[:, i].copy()\n\n for i, p in enumerate(self.graph.free_points):\n if p.position is None:\n p.position = self.P[:, i].copy()\n else:\n p.position[:] = self.P[:, i].copy()\n\n if merge:\n self.graph.merge_landmarks(self.equivalence_pairs)\n\n\nclass EM(object):\n\n def __init__(self, graph, assoc_range=1, solver=None, verbosity=False):\n\n assert isinstance(graph, GaussianFactorGraph), \"Expected type GaussainFactorGraph for graph, got %s\" % type(graph)\n self.graph = graph\n\n self.M = None # estimated landmark positions\n self.P = None # estimated robot positions\n self.res_d = None # distance measurement residuals for a given solution\n self.res_t = None # translation measurement residuals for a given solution\n\n self._sigma = assoc_range\n\n self._verbosity = verbosity # solver output printed to console when True\n\n if 'GUROBI' in cp.installed_solvers():\n self._solver = 'GUROBI'\n elif 'MOSEK' in cp.installed_solvers():\n self._solver = 'MOSEK'\n else:\n self._solver = 'ECOS'\n\n if solver is not None:\n self._solver = solver\n\n pre_optimizer = WeightedLeastSquares(graph, solver=solver, verbosity=verbosity)\n pre_optimizer.optimize()\n pre_optimizer.update()\n\n self.M = pre_optimizer.M\n self.P = pre_optimizer.P\n\n self.iter_counter = 0\n\n def optimize(self):\n\n self.iter_counter = 0\n\n Am, Ap, d, sigma_d = self.graph.observation_system()\n sigma_d = _sanitized_noise_array(sigma_d)\n\n W = np.Inf\n W_ = -np.Inf\n while np.linalg.norm(W - W_) > 1e-3:\n\n # print(np.linalg.norm(W - W_))\n\n W_ = W\n\n W = self._e_step(Am, Ap, d, sigma_d)\n self._m_step(W, Am, Ap, d, sigma_d)\n\n self.iter_counter += 1\n\n def update(self):\n\n for i, m in enumerate(self.graph.landmarks):\n if m.position is None:\n m.position = self.M[:, i].copy()\n else:\n m.position[:] = self.M[:, i].copy()\n\n for i, p in enumerate(self.graph.free_points):\n if p.position is None:\n p.position = self.P[:, i].copy()\n else:\n p.position[:] = self.P[:, i].copy()\n\n def _e_step(self, Am, Ap, d, sigma_d):\n\n m = self.M.ravel(order='F')\n p = self.P.ravel(order='F')\n\n block_rows = self.graph.landmark_dim\n Am = sp.sparse.bsr_matrix(Am, blocksize=[block_rows, self.graph.landmark_dim])\n Ap = sp.sparse.bsr_matrix(Ap, blocksize=[block_rows, self.graph.point_dim])\n b = -Ap.dot(p) + d\n\n points = self.graph.free_points\n landmarks = self.graph.landmarks\n\n W = []\n k = 0\n for t, xt in enumerate(points):\n\n # measurements at timestep t\n ks_t = []\n while k < len(Ap.indices) and t == Ap.indices[k]:\n ks_t.append(k)\n k = k + 1\n\n if len(ks_t) == 0:\n continue\n\n # space of data associations\n landmarks_t = [landmarks[Am.indices[kt]] for kt in ks_t]\n Dt = self._association_list(landmarks, landmarks_t)\n\n # probability of each data association\n Am_copy = Am.copy()\n p_z_xld = np.zeros(len(Dt))\n for di, dt in enumerate(Dt):\n p_z_xld[di] = self._model_probability(Am_copy, b, sigma_d, m, ks_t, dt)\n p_z_xl = np.sum(p_z_xld)\n\n # for each measurement\n Wt = np.zeros((len(ks_t), len(landmarks)))\n for ki in range(len(ks_t)):\n\n # for each landmark\n for j in range(len(landmarks)):\n\n # subset of data associations\n for di, dt in enumerate(Dt):\n if dt[ki] == j:\n Wt[ki, j] += p_z_xld[di] / p_z_xl\n\n W.append(Wt)\n\n return np.concatenate(W, axis=0)\n\n def _m_step(self, W, Am, Ap, d, sigma_d):\n\n num_points = len(self.graph.free_points)\n point_dim = self.graph.point_dim\n num_landmarks = len(self.graph.landmarks)\n landmark_dim = self.graph.landmark_dim\n\n Bp, t, sigma_t = self.graph.odometry_system()\n S_t = sp.sparse.diags(1 / _sanitized_noise_array(sigma_t))\n\n sigma_d = np.tile(_sanitized_noise_array(sigma_d), num_landmarks)\n S_d = sp.sparse.diags(1 / sigma_d)\n W = sp.sparse.diags(W.flatten('F'))\n\n Am = [np.zeros((Am.shape[0], Am.shape[1])) for _ in range(num_landmarks)]\n for j in range(num_landmarks):\n Am[j][:, j] = 1\n Am = sp.sparse.csr_matrix(np.concatenate(Am, axis=0))\n Ap = sp.sparse.vstack([Ap for _ in range(num_landmarks)])\n\n d = np.tile(d, num_landmarks)\n\n M = cp.Variable((landmark_dim, num_landmarks))\n P = cp.Variable((point_dim, num_points))\n objective = cp.Minimize(\n sum_squares(W * S_d * ((Am * vec(M)) + (Ap * vec(P)) - d)) + sum_squares(S_t * ((Bp * vec(P)) - t)))\n problem = cp.Problem(objective)\n problem.solve(verbose=self._verbosity, solver=self._solver)\n\n self.M = M.value\n self.P = P.value\n\n m = self.M.ravel(order='F')\n p = self.P.ravel(order='F')\n\n self.res_d = Am.dot(m) + Ap.dot(p) - d\n self.res_t = Bp.dot(p) - t\n\n @staticmethod\n def _model_probability(Am, b, sigma_d, m, ks_t, dt):\n\n Am.indices[Am.indptr[ks_t[0]]:Am.indptr[ks_t[-1]+1]] = dt\n\n r = (Am.dot(m) - b)\n rk = r[ks_t[0]*Am.blocksize[0]:(ks_t[-1]+1)*Am.blocksize[0]]\n sigma_dk = sigma_d[ks_t[0]*Am.blocksize[0]:(ks_t[-1]+1)*Am.blocksize[0]]\n\n S_dk = np.diag(1 / sigma_dk**2)\n p = np.exp(-0.5 * np.dot(np.dot(rk, S_dk), rk))\n\n return p\n\n\n @staticmethod\n def _association_list(landmarks, landmarks_t):\n\n num_landmarks = len(landmarks)\n num_measurements = len(landmarks_t)\n\n D = []\n for Dt in itertools.product(*([range(num_landmarks)]*num_measurements)):\n violation = False\n for k, j in enumerate(Dt):\n if landmarks_t[k].class_label != landmarks[j].class_label:\n violation = True\n break\n\n if not violation:\n D.append(np.array(Dt))\n\n return D\n"
] | [
[
"numpy.concatenate",
"numpy.array",
"scipy.sparse.bsr_matrix",
"numpy.isclose",
"scipy.sparse.diags",
"numpy.linalg.norm",
"numpy.zeros",
"numpy.dot",
"numpy.sum",
"numpy.tile",
"numpy.any",
"numpy.diag"
]
] |
MrD1360/random_projects | [
"7f18dd9a0c720c46837eeb59cf6fdd8afb137aa5"
] | [
"NLP/chatbot_with_attention/chatbot_models_class.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport tensorflow as tf\n\nimport tensorflow_addons as tfa\n\n#vocab_len=23\n#batch_size=16\n#buck_t1=8 #max len input (threshold of the first bucket)\n\n\n# In[2]:\n\n\nclass encoder_model(tf.keras.Model):\n def __init__(self,lat_dim=256,vocab_len=14):#,**kwargs):\n super().__init__()#(**kwargs)\n #encoder\n self.encoder_embedding_layer= tf.keras.layers.Embedding(input_dim=(vocab_len+3),\n output_dim=lat_dim,\n #mask_zero=True,\n name='encoder_embedding')\n self.encoder= tf.keras.layers.LSTM(lat_dim*2,return_sequences=True, return_state=True,name=\"encoder\")#,dropout=0.4)\n \n\n\n# In[3]:\n\n\nclass decoder_model(tf.keras.Model):\n def __init__(self,lat_dim=256,vocab_len=14,batch_size=16,buck_t1=8):#,**kwargs):\n super().__init__()#(**kwargs)\n #decoder\n self.decoder_embedding_layer= tf.keras.layers.Embedding(input_dim=(vocab_len+3),\n output_dim=(lat_dim),\n #mask_zero=True,\n #embeddings_initializer=tf.initializers.RandomNormal(0., 0.1),\n name='decoder_embedding')\n \n #define LuongAttention\n self.attention_mechanism= tfa.seq2seq.LuongAttention(units=(lat_dim*2),\n memory=None,\n memory_sequence_length=batch_size*[buck_t1]\n )\n \n self.decoder= tf.keras.layers.LSTMCell(lat_dim*2, name='decoder_cell')#,dropout=0.4)\n self.attention_decoder= tfa.seq2seq.AttentionWrapper(self.decoder,\n self.attention_mechanism,\n attention_layer_size=(lat_dim*2)\n ) \n \n \n #dense output layer\n self.dense_layer=tf.keras.layers.Dense(vocab_len+3, \n #activation='softmax',\n name='dense_layer')\n \n #sampler for training \n training_sampler= tfa.seq2seq.sampler.TrainingSampler() #ScheduledEmbeddingTrainingSampler \n \n #define training decoder\n self.training_decoder= tfa.seq2seq.BasicDecoder(cell=self.attention_decoder,\n sampler=training_sampler,\n output_layer=self.dense_layer\n )\n \n\n \n def get_initial_states(self,batch_size,enc_state_h,enc_state_c):\n init_states=self.attention_decoder.get_initial_state(dtype=tf.float32,\n batch_size=batch_size).clone(cell_state=[enc_state_h,enc_state_c])\n return init_states\n\n"
] | [
[
"tensorflow.keras.layers.LSTMCell",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Embedding"
]
] |
hitfee01/rtm3d | [
"9e872c1bf857234d17c8863be6006722d4aab283"
] | [
"models/nets/resnet.py"
] | [
"\"\"\"\n# ---------------------------------------------------------------------------------\n# -*- coding: utf-8 -*-\n-----------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Written by Bin Xiao ([email protected])\n# Modified by Xingyi Zhou\n# Refer from: https://github.com/xingyizhou/CenterNet\n\n# Modifier: Nguyen Mau Dung (2020.08.09)\n# ------------------------------------------------------------------------------\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.model_zoo as model_zoo\nfrom collections import OrderedDict\nfrom models.nets.ShapeSpec import ShapeSpec\nBN_MOMENTUM = 0.1\n\nmodel_urls = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\n}\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,\n bias=False)\n self.bn3 = nn.BatchNorm2d(planes * self.expansion,\n momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass PoseResNet(nn.Module):\n\n def __init__(self, configs, block, layers):\n self.inplanes = 64\n self.deconv_with_bias = False\n self.kfns = configs.MODEL.KFNs\n\n super(PoseResNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n channels = [64, 128, 256, 512]\n self.layer1 = self._make_layer(block, channels[0], layers[0]) # 4\n self.layer2 = self._make_layer(block, channels[1], layers[1], stride=2) # 8\n self.layer3 = self._make_layer(block, channels[2], layers[2], stride=2) # 16\n self.layer4 = self._make_layer(block, channels[3], layers[3], stride=2) # 32\n self._kfpn_spec = OrderedDict()\n for layer in self.kfns:\n i = int(layer[-1])\n self._kfpn_spec[layer] = ShapeSpec(channels=channels[i-1], stride=2 ** (i + 1))\n\n @property\n def kfpn_spec(self):\n return self._kfpn_spec\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def _get_deconv_cfg(self, deconv_kernel, index):\n if deconv_kernel == 4:\n padding = 1\n output_padding = 0\n elif deconv_kernel == 3:\n padding = 1\n output_padding = 1\n elif deconv_kernel == 2:\n padding = 0\n output_padding = 0\n\n return deconv_kernel, padding, output_padding\n\n def _make_deconv_layer(self, num_layers, num_filters, num_kernels):\n assert num_layers == len(num_filters), \\\n 'ERROR: num_deconv_layers is different len(num_deconv_filters)'\n assert num_layers == len(num_kernels), \\\n 'ERROR: num_deconv_layers is different len(num_deconv_filters)'\n\n layers = []\n for i in range(num_layers):\n kernel, padding, output_padding = \\\n self._get_deconv_cfg(num_kernels[i], i)\n\n planes = num_filters[i]\n layers.append(\n nn.ConvTranspose2d(\n in_channels=self.inplanes,\n out_channels=planes,\n kernel_size=kernel,\n stride=2,\n padding=padding,\n output_padding=output_padding,\n bias=self.deconv_with_bias))\n layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))\n layers.append(nn.ReLU(inplace=True))\n self.inplanes = planes\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n y = []\n for layer in self.kfns:\n x = getattr(self, layer)(x)\n y.append(x)\n\n return y\n\n def init_weights(self, num_layers, pretrained=True):\n if pretrained:\n # pretrained_state_dict = torch.load(pretrained)\n url = model_urls['resnet{}'.format(num_layers)]\n pretrained_state_dict = model_zoo.load_url(url)\n print('=> loading pretrained model {}'.format(url))\n self.load_state_dict(pretrained_state_dict, strict=False)\n else:\n print('=> imagenet pretrained model dose not exist')\n print('=> please download it first')\n raise ValueError('imagenet pretrained model does not exist')\n\n\nresnet_spec = {18: (BasicBlock, [2, 2, 2, 2]),\n 34: (BasicBlock, [3, 4, 6, 3]),\n 50: (Bottleneck, [3, 4, 6, 3]),\n 101: (Bottleneck, [3, 4, 23, 3]),\n 152: (Bottleneck, [3, 8, 36, 3])}\n\n\ndef get_pose_net(num_layers, configs):\n block_class, layers = resnet_spec[int(num_layers)]\n\n model = PoseResNet(configs, block_class, layers)\n # model.init_weights(num_layers, pretrained=True)\n return model\n"
] | [
[
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.nn.ConvTranspose2d",
"torch.utils.model_zoo.load_url",
"torch.nn.ReLU",
"torch.nn.Conv2d"
]
] |
ymzayek/asteroidsCNN | [
"b10d1a7b713ef878a0459340d4060cfdad9575a7"
] | [
"utils/CNN_utils.py"
] | [
"import csv\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\n\nfrom matplotlib.ticker import FormatStrFormatter\nfrom tensorflow.keras import backend as K\n\n\ndef load_data_from_images(image_path, datasplit):\n \"\"\"Load images from directory\n\n Parameters:\n image_path (str): Path to images\n datasplit (str): Choose 'train', 'valid', or 'test'\n\n Returns:\n DataFrame: table with path, label, and dataset description for each image\n numpy array: image matrices\n numpy array: corresponding labels\n\n \"\"\"\n image_path = image_path\n data = {\n \"Path\": [\n glob.glob(f\"{image_path}/{datasplit}/asteroids/\" + '*'), \n glob.glob(f\"{image_path}/{datasplit}/other/\" + '*')\n ],\n \"Label\": [1,0],\n \"Set\": datasplit\n }\n df = pd.DataFrame(data).explode('Path')\n df = df.sample(frac=1, random_state=35) #shuffle\n x = []\n y = []\n for i, file in enumerate(df['Path']):\n im = Image.open(file)\n im = np.asarray(im)\n x.append(im)\n y.append(df['Label'].iloc[i])\n \n return df, np.array(x, dtype=int), np.array(y, dtype=float)\n\n\ndef crop_center(im, new_w, new_h):\n \"\"\"\n Crop center of image\n \"\"\"\n width, height = im.size # Get dimensions\n\n left = (width - new_w)/2\n top = (height - new_h)/2\n right = (width + new_w)/2\n bottom = (height + new_h)/2\n\n # Crop the center of the image\n im = im.crop((left, top, right, bottom))\n \n return im\n \n\ndef f1_metric(y_true, y_pred):\n \"\"\"\n F1 metric that combines precision and recall\n \"\"\"\n\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n recall = true_positives / (possible_positives + K.epsilon())\n f1_val = 2 * (precision * recall) / (precision + recall + K.epsilon())\n\n return f1_val\n\n\ndef analyze_5unit_errors(predictionsLabel, Y_testLabel): #,image_size=20\n \"\"\"\n Compares predictions of labels and coordinates to ground truth\n \"\"\"\n tp = 0\n tn = 0\n fp = 0\n fn = 0\n for i in range(len(predictionsLabel)):\n trueLabel = Y_testLabel[i]\n predictedLabel = round(predictionsLabel[i])\n if trueLabel == 1 and predictedLabel == 1:\n tp += 1\n elif trueLabel == 0 and predictedLabel == 0:\n tn += 1\n elif trueLabel == 0 and predictedLabel == 1:\n fp += 1\n elif trueLabel == 1 and predictedLabel == 0:\n fn += 1\n else:\n pass\n accuracy = (tp + tn) / len(predictionsLabel) * 100\n precision = tp / (tp + fp) * 100\n recall = tp / (tp + fn) * 100\n\n print(\"\\nClassification accuracy, precision, recall:\", f\"{accuracy:.2f}\", f\"{precision:.2f}\", f\"{recall:.2f}\")\n print(\"TP, TN, FP, FN:\", tp, tn, fp, fn)\n\n return f\"\\nClassification accuracy, precision, recall: {accuracy:.2f} {precision:.2f} {recall:.2f}\\n TP, TN, FP, FN: {tp} {tn} {fp} {fn}\"\n\n\ndef convert_pixels_to_arcsec_per_h(pixels):\n \"\"\"\n Converts streak length in pixels to arcsec/h.\n Values are specific to ESA Euclid mission.\n \"\"\"\n\n coefficient = 565/3600*10\n arcsecPerH = pixels/coefficient\n\n return arcsecPerH\n\n\ndef plot_results_heatmap(dataframe2D, binsMag, title, fig_name = 'Plot_2D_histogram_CNN.pdf', savepdf=True):\n \"\"\"\n Plots the recall (completeness of predictions as a heatmap).\n Saves the heatmap to a pdf.\n \"\"\"\n cmap = \"PRGn\"\n xticklabels = np.append(dataframe2D.columns.values.round(0).astype(int), 80)\n yticklabels = np.append(dataframe2D.index.values.round(2), 26.0)\n fig4, ax4 = plt.subplots(figsize=(10, 6), dpi=100)\n sns.heatmap(dataframe2D,\n ax=ax4,\n cmap=cmap,\n annot=True,\n fmt='3.0f',\n cbar_kws={'label': 'Completeness [%]'},\n xticklabels=xticklabels,\n yticklabels=yticklabels,\n vmin=0,\n vmax=100,\n linewidths=0.5)\n ax4.set_ylim(0, binsMag)\n ax4.set_title(title)\n ax4.set_xlabel(r'Sky motion [$\\rm arcsec\\,h^{-1}$]')\n ax4.set_ylabel('Magnitude')\n ax4.yaxis.set_major_formatter(FormatStrFormatter('%5.2f'))\n yticks = ax4.get_yticks() - 0.5\n ax4.set_yticks(yticks)\n xticks = ax4.get_xticks() - 0.5\n ax4.set_xticks(xticks)\n ax4.set_xticklabels(ax4.get_xticklabels(), rotation=0) # , horizontalalignment='right')\n ax4.set_yticklabels(ax4.get_yticklabels(), rotation=0)\n #PRGn\n #PuBuGn\n fig4.tight_layout()\n if savepdf:\n fig4.savefig(fig_name, dpi=300, format=\"pdf\")\n return\n"
] | [
[
"numpy.array",
"numpy.asarray",
"pandas.DataFrame",
"matplotlib.ticker.FormatStrFormatter",
"matplotlib.pyplot.subplots",
"tensorflow.keras.backend.epsilon",
"tensorflow.keras.backend.clip"
]
] |
avdul-q101/numba | [
"199798e2c849b5e63eeef36972566fda7b84625c"
] | [
"numba/cuda/tests/cudadrv/test_managed_alloc.py"
] | [
"import numpy as np\nfrom ctypes import byref, c_size_t\nfrom numba.cuda.cudadrv.driver import device_memset, driver\nfrom numba import cuda\nfrom numba.cuda.testing import unittest, ContextResettingTestCase\nfrom numba.cuda.testing import skip_on_cudasim\nfrom numba.tests.support import linux_only\n\n\n@skip_on_cudasim('CUDA Driver API unsupported in the simulator')\n@linux_only\nclass TestManagedAlloc(ContextResettingTestCase):\n\n def get_total_gpu_memory(self):\n # We use a driver function to directly get the total GPU memory because\n # an EMM plugin may report something different (or not implement\n # get_memory_info at all).\n free = c_size_t()\n total = c_size_t()\n driver.cuMemGetInfo(byref(free), byref(total))\n return total.value\n\n def skip_if_cc_major_lt(self, min_required, reason):\n \"\"\"\n Skip the current test if the compute capability of the device is\n less than `min_required`.\n \"\"\"\n ctx = cuda.current_context()\n cc_major = ctx.device.compute_capability[0]\n if cc_major < min_required:\n self.skipTest(reason)\n\n # CUDA Unified Memory comes in two flavors. For GPUs in the Kepler and\n # Maxwell generations, managed memory allocations work as opaque,\n # contiguous segments that can either be on the device or the host. For\n # GPUs in the Pascal or later generations, managed memory operates on a\n # per-page basis, so we can have arrays larger than GPU memory, where only\n # part of them is resident on the device at one time. To ensure that this\n # test works correctly on all supported GPUs, we'll select the size of our\n # memory such that we only oversubscribe the GPU memory if we're on a\n # Pascal or newer GPU (compute capability at least 6.0).\n\n def test_managed_alloc_driver_undersubscribe(self):\n msg = \"Managed memory unsupported prior to CC 3.0\"\n self.skip_if_cc_major_lt(3, msg)\n self._test_managed_alloc_driver(0.5)\n\n # This test is skipped by default because it is easy to hang the machine\n # for a very long time or get OOM killed if the GPU memory size is >50% of\n # the system memory size. Even if the system does have more than 2x the RAM\n # of the GPU, this test runs for a very long time (in comparison to the\n # rest of the tests in the suite).\n #\n # However, it is left in here for manual testing as required.\n\n @unittest.skip\n def test_managed_alloc_driver_oversubscribe(self):\n msg = \"Oversubscription of managed memory unsupported prior to CC 6.0\"\n self.skip_if_cc_major_lt(6, msg)\n self._test_managed_alloc_driver(2.0)\n\n def test_managed_alloc_driver_host_attach(self):\n msg = \"Host attached managed memory is not accessible prior to CC 6.0\"\n self.skip_if_cc_major_lt(6, msg)\n # Only test with a small array (0.01 * memory size) to keep the test\n # quick.\n self._test_managed_alloc_driver(0.01, attach_global=False)\n\n def _test_managed_alloc_driver(self, memory_factor, attach_global=True):\n # Verify that we can allocate and operate on managed\n # memory through the CUDA driver interface.\n\n total_mem_size = self.get_total_gpu_memory()\n n_bytes = int(memory_factor * total_mem_size)\n\n ctx = cuda.current_context()\n mem = ctx.memallocmanaged(n_bytes, attach_global=attach_global)\n\n dtype = np.dtype(np.uint8)\n n_elems = n_bytes // dtype.itemsize\n ary = np.ndarray(shape=n_elems, dtype=dtype, buffer=mem)\n\n magic = 0xab\n device_memset(mem, magic, n_bytes)\n ctx.synchronize()\n\n # Note that this assertion operates on the CPU, so this\n # test effectively drives both the CPU and the GPU on\n # managed memory.\n\n self.assertTrue(np.all(ary == magic))\n\n def _test_managed_array(self, attach_global=True):\n # Check the managed_array interface on both host and device.\n\n ary = cuda.managed_array(100, dtype=np.double)\n ary.fill(123.456)\n self.assertTrue(all(ary == 123.456))\n\n @cuda.jit('void(double[:])')\n def kernel(x):\n i = cuda.grid(1)\n if i < x.shape[0]:\n x[i] = 1.0\n\n kernel[10, 10](ary)\n cuda.current_context().synchronize()\n\n self.assertTrue(all(ary == 1.0))\n\n def test_managed_array_attach_global(self):\n self._test_managed_array()\n\n def test_managed_array_attach_host(self):\n self._test_managed_array()\n msg = \"Host attached managed memory is not accessible prior to CC 6.0\"\n self.skip_if_cc_major_lt(6, msg)\n self._test_managed_array(attach_global=False)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.all",
"numpy.ndarray",
"numpy.dtype"
]
] |
wx-b/ravens | [
"249c316301d84fda10e0ce4ca99519d9a37a5059"
] | [
"ravens/tasks/manipulating_rope.py"
] | [
"# coding=utf-8\n# Copyright 2021 The Ravens Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Cable task.\"\"\"\n\nimport os\n\nimport numpy as np\nfrom ravens.tasks import primitives\nfrom ravens.tasks.task import Task\nfrom ravens.utils import utils\n\nimport pybullet as p\n\n\nclass ManipulatingRope(Task):\n \"\"\"Cable task.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.max_steps = 20\n self.pos_eps = 0.02\n\n if self.continuous:\n self.primitive = primitives.PickPlaceContinuous(speed=0.001)\n else:\n self.primitive = primitives.PickPlace(height=0.02, speed=0.001)\n\n def _continuous_oracle(self, env, **kwargs):\n kwargs['height'] = 0.02\n return super()._continuous_oracle(env, **kwargs)\n\n def reset(self, env):\n super().reset(env)\n\n n_parts = 20\n radius = 0.005\n length = 2 * radius * n_parts * np.sqrt(2)\n\n # Add 3-sided square.\n square_size = (length, length, 0)\n square_pose = self.get_random_pose(env, square_size)\n square_template = 'square/square-template.urdf'\n replace = {'DIM': (length,), 'HALF': (length / 2 - 0.005,)}\n urdf = self.fill_template(square_template, replace)\n env.add_object(urdf, square_pose, 'fixed')\n os.remove(urdf)\n\n # Get corner points of square.\n corner0 = (length / 2, length / 2, 0.001)\n corner1 = (-length / 2, length / 2, 0.001)\n corner0 = utils.apply(square_pose, corner0)\n corner1 = utils.apply(square_pose, corner1)\n\n # Add cable (series of articulated small blocks).\n increment = (np.float32(corner1) - np.float32(corner0)) / n_parts\n position, _ = self.get_random_pose(env, (0.1, 0.1, 0.1))\n position = np.float32(position)\n part_shape = p.createCollisionShape(p.GEOM_BOX, halfExtents=[radius] * 3)\n part_visual = p.createVisualShape(p.GEOM_SPHERE, radius=radius * 1.5)\n parent_id = -1\n targets = []\n objects = []\n for i in range(n_parts):\n position[2] += np.linalg.norm(increment)\n part_id = p.createMultiBody(0.1, part_shape, part_visual,\n basePosition=position)\n if parent_id > -1:\n constraint_id = p.createConstraint(\n parentBodyUniqueId=parent_id,\n parentLinkIndex=-1,\n childBodyUniqueId=part_id,\n childLinkIndex=-1,\n jointType=p.JOINT_POINT2POINT,\n jointAxis=(0, 0, 0),\n parentFramePosition=(0, 0, np.linalg.norm(increment)),\n childFramePosition=(0, 0, 0))\n p.changeConstraint(constraint_id, maxForce=100)\n if (i > 0) and (i < n_parts - 1):\n color = utils.COLORS['red'] + [1]\n p.changeVisualShape(part_id, -1, rgbaColor=color)\n env.obj_ids['rigid'].append(part_id)\n parent_id = part_id\n target_xyz = np.float32(corner0) + i * increment + increment / 2\n objects.append((part_id, (0, None)))\n targets.append((target_xyz, (0, 0, 0, 1)))\n\n matches = np.clip(np.eye(n_parts) + np.eye(n_parts)[::-1], 0, 1)\n\n self.goals.append((objects, matches, targets,\n False, False, 'pose', None, 1))\n\n for i in range(480):\n p.stepSimulation()\n"
] | [
[
"numpy.float32",
"numpy.linalg.norm",
"numpy.sqrt",
"numpy.eye"
]
] |
timvink/flee | [
"c7901e2a0de70f18c3267236e127ab1c26c8f5e5"
] | [
"flee/postprocessing/analysis.py"
] | [
"import numpy as np\n\n\n# Primitive error function for single values.\n\ndef rel_error(val, correct_val):\n if correct_val < 0.00001:\n return 0.0\n return np.abs(float(val) / float(correct_val) - 1)\n\n\ndef abs_error(val, correct_val):\n return np.abs(float(val) - float(correct_val))\n\n\n# Primitive error function for arrays.\n\ndef abs_diffs(forecast_vals, correct_vals):\n return np.abs(forecast_vals - correct_vals)\n\n\ndef mean_abs_diffs(forecast_vals, correct_vals):\n return np.mean(np.abs(forecast_vals - correct_vals))\n\n\ndef calculate_ln_accuracy_ratio(forecast_vals, actual_vals):\n \"\"\"\n Calculate the log of the accuracy ratio (forecast / actual)\n Return -1 if there is a 0 in the actual values\n \"\"\"\n return np.mean(np.abs(np.log(forecast_vals / actual_vals)))\n\n\ndef calculate_MASE(forecast_vals, actual_vals, naieve_vals, start_of_forecast_period=30):\n \"\"\"\n Calculate the Mean Absolute Scaled Error.\n \"\"\"\n if len(actual_vals) != len(naieve_vals):\n print(\"Error in calculate_MASE: len(actual_vals) != len(naieve_vals)\", len(\n actual_vals), len(naieve_vals))\n\n if len(actual_vals) != len(forecast_vals):\n print(\"Error in calculate_MASE: len(actual_vals) != len(forecast_vals)\", len(\n actual_vals), len(forecast_vals))\n\n offset = start_of_forecast_period + 1\n\n mean_naieve_error = np.sum((np.abs(\n actual_vals[offset:] - naieve_vals[offset:]))) / float(len(actual_vals[offset:]))\n #mean_forecast_error = np.mean((np.abs(actual_vals[start_of_forecast_period:] - forecast_vals[start_of_forecast_period:])) / float(len(actual_vals[start_of_forecast_period:])))\n mean_forecast_error = np.sum(\n (np.abs(actual_vals - forecast_vals))) / float(len(actual_vals))\n\n return mean_forecast_error / mean_naieve_error\n"
] | [
[
"numpy.log",
"numpy.abs"
]
] |
swing-research/trumpets | [
"df8dc2b1e902393453e552c3a9b8f0cd3e10d741"
] | [
"logdetJ.py"
] | [
"import tensorflow as tf\nimport numpy as np\n\ndef wrapper_logdet(x, f):\n\n # @tf.function\n def power_iteration(f, n):\n v = tf.random.normal(x.shape, dtype=tf.float32)\n v /= tf.linalg.norm(v, axis=-1, keepdims=True)\n\n for _ in range(n):\n with tf.autodiff.ForwardAccumulator(primals=x, tangents=v) as acc:\n y = f(x)\n\n u1 = acc.jvp(y) # Jv\n\n with tf.GradientTape() as tape:\n tape.watch(x)\n y = f(x)\n\n u1 = tape.gradient(y, x, output_gradients=u1) # J^T v\n\n # current estimate of eigval\n eigval = tf.reduce_sum(v*u1, axis=-1)\n\n # calculate the norm\n u1_norm = tf.linalg.norm(u1, axis=-1, keepdims=True)\n\n # re normalize the vector\n v = u1 / u1_norm\n\n\n return tf.reduce_max(eigval)\n\n\n # @tf.function\n def logdet_1(x, f, n, beta):\n logdet_val = tf.zeros(tf.shape(x)[0], dtype=tf.float32)\n\n v = tf.random.normal(x.shape, dtype=tf.float32)\n v1 = tf.identity(v)\n\n for k in range(1,n+1):\n with tf.autodiff.ForwardAccumulator(primals=x, tangents=v1) as acc:\n y = f(x)\n\n u1 = acc.jvp(y)\n\n with tf.GradientTape(persistent=False) as tape:\n tape.watch(x)\n y = f(x)\n\n u2 = tape.gradient(y, x, output_gradients=u1)\n v1 = v1 - beta*u2\n\n logdet_val -= tf.reduce_sum(v1*v, axis=-1)/tf.cast(k, tf.float32)\n\n return logdet_val\n\n def logdet(x, f, n, nv=10, beta=1):\n logdet_val = 0\n\n y = f(x)\n d = tf.math.minimum(y.shape[1], x.shape[1])\n\n for _ in range(nv):\n logdet_val += logdet_1(x, f, n, beta)\n logdet_val /= nv\n\n return logdet_val - tf.cast(d, tf.float32)*np.log(beta)\n\n def get_logdet():\n val = power_iteration(f, 10)\n beta = 0.95/val.numpy()\n print('beta is')\n print(beta)\n\n nevals = 50\n ld = 0\n for _ in range(nevals):\n n = 10\n ld += logdet(x, f, np.int32(n), beta=np.float32(beta), nv=10)\n\n ld /= nevals\n \n return ld\n\n\n return get_logdet()\n\n\ndef unit_test():\n DIM = 10\n x = tf.Variable(tf.ones((45, DIM), dtype=tf.float32), trainable=True)*2\n\n\n def f(x, reverse=False):\n return tf.concat((x[:,:DIM//2]**2, x**2), axis=1)/2.0\n\n ld = wrapper_logdet(x, f)\n\n print('true_value is %f'%(np.log(2)*25))\n\n print(ld)\n print('mean and std deviation are:')\n print(np.mean(ld.numpy()))\n print(np.std(ld.numpy()))\n\n return ld\n\n\nif __name__ == '__main__':\n ld = unit_test()\n\n"
] | [
[
"tensorflow.shape",
"tensorflow.concat",
"tensorflow.GradientTape",
"numpy.log",
"tensorflow.random.normal",
"tensorflow.ones",
"tensorflow.math.minimum",
"tensorflow.linalg.norm",
"tensorflow.reduce_max",
"tensorflow.autodiff.ForwardAccumulator",
"numpy.float32",
"tensorflow.reduce_sum",
"numpy.int32",
"tensorflow.identity",
"tensorflow.cast"
]
] |
zhaoyueyi/TaichiRenderer | [
"56f3bf5a59cd5cce3a8939e689620d84ccf7b0e4"
] | [
"hacker.py"
] | [
"# from taichi-three(tina) source code\n\nimport taichi as ti\n\n# hasattr(ti, '_tinahacked') or setattr(ti, '_tinahacked', 1) or setattr(ti,\n# 'static', lambda x, *xs: [x] + list(xs) if xs else x) or setattr(\n# ti.Matrix, 'element_wise_writeback_binary', (lambda f: lambda x, y, z:\n# (y.__name__ != 'assign' or not setattr(y, '__name__', '_assign'))\n# and f(x, y, z))(ti.Matrix.element_wise_writeback_binary)) or setattr(\n# ti.Matrix, 'is_global', (lambda f: lambda x: len(x) and f(x))(\n# ti.Matrix.is_global)) or setattr(ti.lang.common_ops.TaichiOperations, '__pos__',\n# lambda x: x) or setattr(ti, 'pi', __import__('math').pi) or setattr(ti,\n# 'tau', __import__('math').tau) or setattr(ti, 'materialize_callback',\n# (lambda f: lambda x: [(x() if ti.get_runtime().materialized else f(x)),\n# x][1])(ti.materialize_callback)) or setattr(ti, 'expr_init', (lambda f:\n# lambda x: x if isinstance(x, dict) else f(x))(ti.expr_init)) or setattr(\n# ti, 'expr_init_func', (lambda f: lambda x: x if isinstance(x, dict)\n# else f(x))(ti.expr_init_func)) or print('[Tina] Taichi properties hacked')\n\n\n@eval('lambda x: x()')\ndef _():\n class GUI(ti.GUI):\n def __init__(self, name='Tina', res=512, **kwargs):\n if isinstance(res, ti.Matrix):\n res = res.entries\n if isinstance(res, list):\n res = tuple(res)\n super().__init__(name=name, res=res, **kwargs)\n self._post_show_cbs = []\n\n def post_show(self, cb):\n self._post_show_cbs.append(cb)\n return cb\n\n def rects(self, topleft, bottomright, radius=1, color=0xffffff):\n import numpy as np\n topright = np.stack([topleft[:, 0], bottomright[:, 1]], axis=1)\n bottomleft = np.stack([bottomright[:, 0], topleft[:, 1]], axis=1)\n self.lines(topleft, topright, radius, color)\n self.lines(topright, bottomright, radius, color)\n self.lines(bottomright, bottomleft, radius, color)\n self.lines(bottomleft, topleft, radius, color)\n\n def show(self, *args, **kwargs):\n super().show(*args, **kwargs)\n for cb in self._post_show_cbs:\n cb(self)\n\n ti.GUI = GUI\n\n\n@eval('lambda x: x()')\ndef _():\n if hasattr(ti, 'smart'):\n return\n\n ti.smart = lambda x: x\n\n import copy, ast\n from taichi.lang.transformer import ASTTransformerBase, ASTTransformerPreprocess\n\n old_get_decorator = ASTTransformerBase.get_decorator\n\n @staticmethod\n def get_decorator(node):\n if not (isinstance(node, ast.Call)\n and isinstance(node.func, ast.Attribute) and isinstance(\n node.func.value, ast.Name) and node.func.value.id == 'ti'\n and node.func.attr in ['smart']):\n return old_get_decorator(node)\n return node.func.attr\n\n ASTTransformerBase.get_decorator = get_decorator\n\n old_visit_struct_for = ASTTransformerPreprocess.visit_struct_for\n\n def visit_struct_for(self, node, is_grouped):\n if not is_grouped:\n decorator = self.get_decorator(node.iter)\n if decorator == 'smart': # so smart!\n self.current_control_scope().append('smart')\n self.generic_visit(node, ['body'])\n t = self.parse_stmt('if 1: pass; del a')\n t.body[0] = node\n target = copy.deepcopy(node.target)\n target.ctx = ast.Del()\n if isinstance(target, ast.Tuple):\n for tar in target.elts:\n tar.ctx = ast.Del()\n t.body[-1].targets = [target]\n return t\n\n return old_visit_struct_for(self, node, is_grouped)\n\n ASTTransformerPreprocess.visit_struct_for = visit_struct_for\n\n\n__all__ = []\n"
] | [
[
"numpy.stack"
]
] |
ikramulkayes/Python_season2 | [
"d057460d07c5d2d218ecd52e08c1d355add44df2"
] | [
"practice64.py"
] | [
"import numpy as np\nx = np.array([6,8,12,14,18])\ny = np.array([7,9,13,15,19])\n\ndef gradiant_descent(x,y):\n m = c = 0\n loop = 10\n alpha = 0.3\n n = len(x)\n for i in range(loop):\n yp = m*x + c\n costfunc = (1/n)*sum(val*val for val in (y-yp))\n dm = -(2/n)*sum(x*(y-yp))\n dn = -(2/n)*sum(y-yp)\n m = m - alpha*dm\n c = c - alpha*dn\n print(f\"m:{m} c:{c} y:{yp} cost: {costfunc}\")\n\ngradiant_descent(x,y)"
] | [
[
"numpy.array"
]
] |
bingqingchen/oatomobile | [
"5576458a1036b7c969a45753443b00b5fbc347cf"
] | [
"oatomobile/baselines/rulebased/autopilot/agent.py"
] | [
"# Copyright 2020 The OATomobile Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Defines a simple PID-controller-based autopilot.\"\"\"\n\nimport math\nimport random\nimport numpy as np\n\nfrom typing import Any\nfrom typing import Optional\nfrom typing import Tuple\n\nfrom absl import logging\nimport pdb\nimport carla\nimport oatomobile\nfrom oatomobile.simulators.carla import defaults\nfrom oatomobile.utils import carla as cutil\n\ntry:\n from agents.navigation.local_planner import \\\n LocalPlanner # pylint: disable=import-error\n from agents.tools.misc import \\\n compute_magnitude_angle # pylint: disable=import-error\n from agents.tools.misc import \\\n is_within_distance_ahead # pylint: disable=import-error\nexcept ImportError:\n raise ImportError(\"Missing CARLA installation, \"\n \"make sure the environment variable CARLA_ROOT is provided \"\n \"and that the PythonAPI is `easy_install`ed\")\n\n\nclass AutopilotAgent(oatomobile.Agent):\n \"\"\"An autopilot agent, based on the official implementation of\n `carla.PythonAPI.agents.navigation.basic_agent.BasicAgent`\"\"\"\n\n def __init__(self,\n environment: oatomobile.envs.CARLAEnv,\n *,\n proximity_tlight_threshold: float = 10.0,\n proximity_vehicle_threshold: float = 10.0,\n noise: float = 0.1) -> None:\n \"\"\"Constructs an autopilot agent.\n\n Args:\n environment: The navigation environment to spawn the agent.\n proximity_tlight_threshold: The threshold (in metres) to\n the traffic light.\n proximity_vehicle_threshold: The threshold (in metres) to\n the front vehicle.\n noise: The percentage of random actions.\n \"\"\"\n super(AutopilotAgent, self).__init__(environment=environment)\n\n # References to the CARLA objects.\n self._vehicle = self._environment.simulator.hero\n self._world = self._vehicle.get_world()\n self._map = self._world.get_map()\n\n # Agent hyperparametres.\n self._proximity_tlight_threshold = proximity_tlight_threshold\n self._proximity_vehicle_threshold = proximity_vehicle_threshold\n self._hop_resolution = 2.0\n self._path_seperation_hop = 2\n self._path_seperation_threshold = 0.5\n self._target_speed = defaults.TARGET_SPEED\n self._noise = noise\n\n # The internal state of the agent.\n self._last_traffic_light = None\n\n # Local planner, including the PID controllers.\n dt = self._vehicle.get_world().get_settings().fixed_delta_seconds\n # lateral_control_dict = defaults.LATERAL_PID_CONTROLLER_CONFIG.copy()\n # lateral_control_dict.update({\"dt\": dt})\n # TODO(filangel): tune the parameters for FPS != 20\n self._local_planner = LocalPlanner(\n self._vehicle,\n opt_dict=dict(\n target_speed=self._target_speed,\n dt=dt,\n ),\n )\n\n # Set agent's dsestination.\n if hasattr(self._environment.unwrapped.simulator, \"destination\"):\n self._set_destination(\n self._environment.unwrapped.simulator.destination.location)\n\n def act(\n self,\n observation: oatomobile.Observations,\n ) -> oatomobile.Action:\n \"\"\"Takes in an observation, samples from agent's policy, returns an\n action.\"\"\"\n # Remove unused arguments.\n del observation\n\n # Random action branch.\n if random.random() < self._noise:\n return carla.VehicleControl( # pylint: disable=no-member\n **{\n k: float(v)\n for (k, v) in self._environment.action_space.sample().items()\n })\n # Normal autopilot action.\n else:\n return self._run_step()\n\n def _run_step(\n self,\n debug: bool = False,\n ) -> carla.VehicleControl: # pylint: disable=no-member\n \"\"\"Executes one step of navigation.\"\"\"\n\n # is there an obstacle in front of us?\n hazard_detected = False\n\n # retrieve relevant elements for safe navigation, i.e.: traffic lights\n # and other vehicles\n actor_list = self._world.get_actors()\n vehicle_list = actor_list.filter(\"*vehicle*\")\n lights_list = actor_list.filter(\"*traffic_light*\")\n\n # check possible obstacles\n vehicle_state, vehicle = self._is_vehicle_hazard(vehicle_list)\n if vehicle_state:\n if debug:\n logging.debug('!!! VEHICLE BLOCKING AHEAD [{}])'.format(vehicle.id))\n\n hazard_detected = True\n\n # Do not check for the state of the traffic lights\n # light_state, traffic_light = self._is_light_red(lights_list)\n # if light_state:\n # if debug:\n # logging.debug('=== RED LIGHT AHEAD [{}])'.format(traffic_light.id))\n\n # hazard_detected = True\n\n if hazard_detected:\n control = carla.VehicleControl() # pylint: disable=no-member\n control.steer = 0.0\n control.throttle = 0.0\n control.brake = 1.0\n control.hand_brake = False\n else:\n # standard local planner behavior\n control = self._local_planner.run_step(debug=debug)\n # print(control)\n return control\n\n def _set_destination(\n self,\n destination: carla.Location, # pylint: disable=no-member\n ) -> None:\n \"\"\"Generates the global plan for the agent.\n\n Args:\n destination: The location of the new destination.\n \"\"\"\n # Set vehicle's current location as start for the plan.\n origin = self._vehicle.get_location()\n start_waypoint = self._map.get_waypoint(origin).transform.location\n end_waypoint = self._map.get_waypoint(destination).transform.location\n # Calculate the plan.\n waypoints, roadoptions, _ = cutil.global_plan(\n world=self._world,\n origin=start_waypoint,\n destination=end_waypoint,\n )\n # Mutate the local planner's global plan.\n self._local_planner.set_global_plan(list(zip(waypoints, roadoptions)))\n \n ############### DEBUG ####################\n # Converts goals to `NumPy` arrays.\n self._goal = np.asarray([\n cutil.carla_xyz_to_ndarray(waypoint.transform.location)\n for waypoint in waypoints #goals_world\n ])\n #print(\"Number of global waypoints = \", len(self._goal))\n np.save('waypoints_agent.npy', self._goal)\n ###########################################\n \n def _is_vehicle_hazard(\n self,\n vehicle_list,\n ) -> Tuple[bool, Optional[carla.Vehicle]]: # pylint: disable=no-member\n \"\"\"It detects if a vehicle in the scene can be dangerous for the ego\n vehicle's current plan.\n\n Args:\n vehicle_list: List of potential vehicles (obstancles) to check.\n\n Returns:\n vehicle_ahead: If True a vehicle ahead blocking us and False otherwise.\n vehicle: The blocker vehicle itself.\n \"\"\"\n\n ego_vehicle_location = self._vehicle.get_location()\n ego_vehicle_waypoint = self._map.get_waypoint(ego_vehicle_location)\n\n for target_vehicle in vehicle_list:\n # do not account for the ego vehicle.\n if target_vehicle.id == self._vehicle.id:\n continue\n\n # if the object is not in our lane it's not an obstacle.\n target_vehicle_waypoint = self._map.get_waypoint(\n target_vehicle.get_location())\n if target_vehicle_waypoint.road_id != ego_vehicle_waypoint.road_id or \\\n target_vehicle_waypoint.lane_id != ego_vehicle_waypoint.lane_id:\n continue\n\n loc = target_vehicle.get_location()\n if is_within_distance_ahead(\n loc,\n ego_vehicle_location,\n self._vehicle.get_transform().rotation.yaw,\n self._proximity_vehicle_threshold,\n ):\n return (True, target_vehicle)\n\n return (False, None)\n\n def _is_light_red(\n self,\n lights_list,\n ) -> Tuple[bool, Any]: # pylint: disable=no-member\n \"\"\"It detects if the light in the scene is red.\n\n Args:\n lights_list: The list containing TrafficLight objects.\n\n Returns:\n light_ahead: If True a traffic light ahead is read and False otherwise.\n traffic_light: The traffic light object ahead itself.\n \"\"\"\n if self._map.name == 'Town01' or self._map.name == 'Town02':\n return self._is_light_red_europe_style(lights_list)\n else:\n return self._is_light_red_us_style(lights_list)\n\n def _is_light_red_europe_style(self, lights_list):\n \"\"\"This method is specialized to check European style traffic lights.\"\"\"\n ego_vehicle_location = self._vehicle.get_location()\n ego_vehicle_waypoint = self._map.get_waypoint(ego_vehicle_location)\n\n for traffic_light in lights_list:\n object_waypoint = self._map.get_waypoint(traffic_light.get_location())\n if object_waypoint.road_id != ego_vehicle_waypoint.road_id or \\\n object_waypoint.lane_id != ego_vehicle_waypoint.lane_id:\n continue\n\n loc = traffic_light.get_location()\n if is_within_distance_ahead(\n loc,\n ego_vehicle_location,\n self._vehicle.get_transform().rotation.yaw,\n self._proximity_tlight_threshold,\n ):\n if traffic_light.state == carla.TrafficLightState.Red: # pylint: disable=no-member\n return (True, traffic_light)\n\n return (False, None)\n\n def _is_light_red_us_style(self, lights_list, debug=False):\n \"\"\"This method is specialized to check US style traffic lights.\"\"\"\n ego_vehicle_location = self._vehicle.get_location()\n ego_vehicle_waypoint = self._map.get_waypoint(ego_vehicle_location)\n\n if ego_vehicle_waypoint.is_junction:\n # It is too late. Do not block the intersection! Keep going!\n return (False, None)\n\n if self._local_planner.target_waypoint is not None:\n if self._local_planner.target_waypoint.is_junction:\n min_angle = 180.0\n sel_magnitude = 0.0\n sel_traffic_light = None\n for traffic_light in lights_list:\n loc = traffic_light.get_location()\n magnitude, angle = compute_magnitude_angle(\n loc, ego_vehicle_location,\n self._vehicle.get_transform().rotation.yaw)\n if magnitude < 60.0 and angle < min(25.0, min_angle):\n sel_magnitude = magnitude\n sel_traffic_light = traffic_light\n min_angle = angle\n\n if sel_traffic_light is not None:\n if debug:\n logging.debug('=== Magnitude = {} | Angle = {} | ID = {}'.format(\n sel_magnitude, min_angle, sel_traffic_light.id))\n\n if self._last_traffic_light is None:\n self._last_traffic_light = sel_traffic_light\n\n if self._last_traffic_light.state == carla.TrafficLightState.Red: # pylint: disable=no-member\n return (True, self._last_traffic_light)\n else:\n self._last_traffic_light = None\n\n return (False, None)\n\n def _get_trafficlight_trigger_location(\n self,\n traffic_light,\n ) -> carla.Location: # pylint: disable=no-member\n \"\"\"Calculates the yaw of the waypoint that represents the trigger volume of\n the traffic light.\"\"\"\n\n def rotate_point(point, radians):\n \"\"\"Rotates a given point by a given angle.\"\"\"\n rotated_x = math.cos(radians) * point.x - math.sin(radians) * point.y\n rotated_y = math.sin(radians) * point.x - math.cos(radians) * point.y\n\n return carla.Vector3D(rotated_x, rotated_y, point.z) # pylint: disable=no-member\n\n base_transform = traffic_light.get_transform()\n base_rot = base_transform.rotation.yaw\n area_loc = base_transform.transform(traffic_light.trigger_volume.location)\n area_ext = traffic_light.trigger_volume.extent\n\n point = rotate_point(\n carla.Vector3D(0, 0, area_ext.z), # pylint: disable=no-member\n math.radians(base_rot),\n )\n point_location = area_loc + carla.Location(x=point.x, y=point.y) # pylint: disable=no-member\n\n return carla.Location(point_location.x, point_location.y, point_location.z) # pylint: disable=no-member\n"
] | [
[
"numpy.save"
]
] |
benman1/ep-stan | [
"c1bc6edf0ed2775266fba7d4fa65e8f0009f9b17"
] | [
"experiment/models/m3b.py"
] | [
"\"\"\"A simulated experiment model used by the sckript fit.py\n\nModel name: m3b\nDefinition:\n group index j = 1 ... J\n input index d = 1 ... D\n explanatory variable x = [x_1 ... x_D]\n response variable y\n local parameter alpha = [alpha_1 ... alpha_J]\n local parameter beta = [[beta_11 ... beta_1D] ... [beta_J1 ... beta_JD]]\n shared parameter sigma_a\n shared parameter sigma_b = [sigma_b_1 ... sigma_b_D]\n y ~ bernoulli_logit(alpha_j + beta_j*' * x)\n alpha ~ N(0, sigma_a)\n beta_*d ~ N(0, sigma_b_d), for all d\n sigma_a ~ log-N(0, sigma_aH)\n sigma_b_d ~ log-N(0, sigma_bH), for all d\n phi = [log(sigma_a), log(sigma_b)]\n\n\"\"\"\n\n# Licensed under the 3-clause BSD license.\n# http://opensource.org/licenses/BSD-3-Clause\n#\n# Copyright (C) 2014 Tuomas Sivula\n# All rights reserved.\n\n\nimport numpy as np\nfrom scipy.linalg import cholesky\nfrom .common import data, calc_input_param_classification, rand_corr_vine\n\n\n# ------------------------------------------------------------------------------\n# >>>>>>>>>>>>> Configurations start >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n# ------------------------------------------------------------------------------\n\n# ====== Model parameters ======================================================\n# If SIGMA_A is None, it is sampled from log-N(0,SIGMA_AH)\nSIGMA_A = 1\nSIGMA_AH = None\nSIGMA_BH = 1\n\n# ====== Prior =================================================================\n# Prior for log(sigma_a)\nM0_A = 0\nV0_A = 1.5**2\n# Prior for log(sigma_b)\nM0_B = 0\nV0_B = 1.5**2\n\n# ====== Regulation ============================================================\n# Min for abs(sum(beta))\nB_ABS_MIN_SUM = 1e-4\n\n# ------------------------------------------------------------------------------\n# <<<<<<<<<<<<< Configurations end <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n# ------------------------------------------------------------------------------\n\n\nclass model(object):\n \"\"\"Model definition.\n\n Parameters\n ----------\n J : int\n Number of groups\n\n D : int\n Number of inputs\n\n npg : {int, seq of ints}\n Number of observations per group (constant or [min, max])\n\n \"\"\"\n\n def __init__(self, J, D, npg):\n self.J = J\n self.D = D\n self.npg = npg\n self.dphi = D+1\n\n def simulate_data(self, Sigma_x=None, rng=None):\n \"\"\"Simulate data from the model.\n\n Returns models.common.data instance\n\n Parameters\n ----------\n Sigma_x : {None, 'rand', ndarray}\n The covariance structure of the explanatory variable. This is\n scaled to regulate the uncertainty. If not provided or None,\n identity matrix is used. Providing string 'rand' uses method\n common.rand_corr_vine to randomise one.\n\n \"\"\"\n # Localise params\n J = self.J\n D = self.D\n npg = self.npg\n\n # set randomisation\n if not isinstance(rng, np.random.RandomState):\n rng = np.random.RandomState(rng)\n # Draw random seed for input covariance for consistency in randomness\n # even if not needed\n seed_input_cov = rng.randint(2**31-1)\n\n # Randomise input covariance structure if needed\n if Sigma_x == 'rand':\n Sigma_x = rand_corr_vine(D, seed=seed_input_cov)\n\n # Parameters\n # Number of observations for each group\n if hasattr(npg, '__getitem__') and len(npg) == 2:\n Nj = rng.randint(npg[0],npg[1]+1, size=J)\n else:\n Nj = npg*np.ones(J, dtype=np.int64)\n # Total number of observations\n N = np.sum(Nj)\n # Observation index limits for J groups\n j_lim = np.concatenate(([0], np.cumsum(Nj)))\n # Group indices for each sample\n j_ind = np.empty(N, dtype=np.int64)\n for j in range(J):\n j_ind[j_lim[j]:j_lim[j+1]] = j\n\n # Assign parameters\n if SIGMA_A is None:\n sigma_a = np.exp(rng.randn()*SIGMA_AH)\n else:\n sigma_a = SIGMA_A\n sigma_b = np.exp(rng.randn(D)*SIGMA_BH)\n alpha_j = rng.randn(J)*sigma_a\n beta_j = rng.randn(J,D)*sigma_b\n\n # Regulate beta\n for j in range(J):\n beta_sum = np.sum(beta_j[j])\n while np.abs(beta_sum) < B_ABS_MIN_SUM:\n # Replace one random element in beta\n index = rng.randint(D)\n beta_sum -= beta_j[j,index]\n beta_j[j,index] = rng.randn()*sigma_b[index]\n beta_sum += beta_j[j,index]\n\n phi_true = np.append(np.log(sigma_a), np.log(sigma_b))\n\n # Determine suitable mu_x and sigma_x\n mu_x_j, sigma_x_j = calc_input_param_classification(\n alpha_j, beta_j, Sigma_x\n )\n\n # Simulate data\n # Different mu_x and sigma_x for every group\n X = np.empty((N,D))\n if Sigma_x is None:\n for j in range(J):\n X[j_lim[j]:j_lim[j+1],:] = \\\n mu_x_j[j] + rng.randn(Nj[j],D)*sigma_x_j[j]\n else:\n cho_x = cholesky(Sigma_x)\n for j in range(J):\n X[j_lim[j]:j_lim[j+1],:] = \\\n mu_x_j[j] + rng.randn(Nj[j],D).dot(sigma_x_j[j]*cho_x)\n y = np.empty(N)\n for n in range(N):\n y[n] = alpha_j[j_ind[n]] + X[n].dot(beta_j[j_ind[n]])\n y = 1/(1+np.exp(-y))\n y_true = (0.5 < y).astype(int)\n y = (rng.rand(N) < y).astype(int)\n\n return data(\n X, y, {'mu_x':mu_x_j, 'sigma_x':sigma_x_j, 'Sigma_x':Sigma_x},\n y_true, Nj, j_lim, j_ind, {'phi':phi_true, 'alpha':alpha_j,\n 'beta':beta_j}\n )\n\n def get_prior(self):\n \"\"\"Get prior for the model.\n\n Returns: S, m, Q, r\n\n \"\"\"\n D = self.D\n # Moment parameters of the prior (transposed in order to get\n # F-contiguous)\n S0 = np.diag(np.append(V0_A, np.ones(D)*V0_B)).T\n m0 = np.append(M0_A, np.ones(D)*M0_B)\n # Natural parameters of the prior\n Q0 = np.diag(np.append(1./V0_A, np.ones(D)/V0_B)).T\n r0 = np.append(M0_A/V0_A, np.ones(D)*(M0_B/V0_B))\n return S0, m0, Q0, r0\n\n def get_param_definitions(self):\n \"\"\"Return the definition of the inferred parameters.\n\n Returns\n -------\n names : seq of str\n Names of the parameters\n\n shapes : seq of tuples\n Shapes of the parameters\n\n hiers : seq of int\n The indexes of the hierarchical dimension of the parameter or None\n if it does not have one.\n\n \"\"\"\n names = ('alpha', 'beta')\n shapes = ((self.J,), (self.J,self.D))\n hiers = (0, 0)\n return names, shapes, hiers\n"
] | [
[
"numpy.empty",
"numpy.log",
"numpy.random.RandomState",
"numpy.sum",
"numpy.ones",
"numpy.exp",
"numpy.abs",
"scipy.linalg.cholesky",
"numpy.cumsum"
]
] |
Sangameshkodge/IMAC-TCASI | [
"3b3a4cf2fd79822d4258658b7b4289eb6f7d7113"
] | [
"Application levels/MNIST/train.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 24 16:30:25 2019\n@author: skodge\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom torchvision import datasets, transforms\nfrom LeNet import lenet\nimport numpy\n#import sys\ncuda = True\ntrain_batch_size = 32\ntest_batch_size = 128\n\nbest_loss = float(\"inf\")\nbest_epoch = -1\nbest_correct=0\ndataset_path = './MNIST'\n\ncuda = cuda and torch.cuda.is_available()\ntrainset = datasets.MNIST(root=dataset_path, train=True, download=True)\ntrain_mean = (((trainset.data.float()).mean()/255).view(1,)).numpy() # [0.1307]\ntrain_std = (((trainset.data.float()).std()/255).view(1,)).numpy() # [0.3081]\n\ntransform_train = transforms.Compose([\n transforms.RandomCrop(28, padding=4),\n transforms.ToTensor(),\n transforms.Normalize(train_mean, train_std),\n])\n#sys.exit()\ntransform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(train_mean, train_std),\n])\nkwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}\ntrain_loader = torch.utils.data.DataLoader(datasets.MNIST(\n root=dataset_path, train=True, download=True,\n transform=transform_train),\n batch_size=train_batch_size, shuffle=True, **kwargs)\ntest_loader = torch.utils.data.DataLoader(\n datasets.MNIST(root=dataset_path, train=False, download=True,\n transform=transform_test),\n batch_size=test_batch_size, shuffle=False, **kwargs)\n \nmodel = lenet(input_size=1,bit_W=4,bit_A=4,sigma=0.6)\nif cuda:\n model.cuda()\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.Adam(model.parameters(), lr=0.001)\nscheduler = optim.lr_scheduler.MultiStepLR(\n optimizer, milestones=[20,50,80], gamma=0.1)\n\ndef train(epoch):\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n if cuda:\n data, target = data.cuda(), target.cuda()\n data, target = Variable(data), Variable(target)\n optimizer.zero_grad()\n output = model(data)\n loss = criterion(output, target)\n loss.backward()\n optimizer.step()\n if batch_idx % 200 == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.data.item()))\n \ndef test(epoch, best_loss, best_epoch, best_correct, do_quantise,do_add_var,mode,update=False):\n model.eval()\n test_loss = 0\n correct = 0\n for batch_idx, (data, target) in enumerate(test_loader):\n if cuda:\n data, target = data.cuda(), target.cuda()\n data, target = Variable(data), Variable(target)\n #output = model.inference(data, do_quantise= do_quantise, do_add_var= do_add_var)\n output = model(data, training = False)\n # sum up batch loss\n test_loss += criterion(output, target).data.item()\n # get the index of the max log-probability\n pred = output.data.max(1, keepdim=True)[1]\n correct += pred.eq(target.data.view_as(pred)).long().cpu().sum()\n if (batch_idx % 30 == 0 and do_add_var==True):\n print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(\n test_loss, correct, batch_idx*test_batch_size+test_batch_size, 100. * correct /\n (batch_idx*test_batch_size+test_batch_size)))\n \n\n test_loss /= len(test_loader.dataset)\n print(\n 'Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n\\n'.format(\n test_loss, correct, best_correct, 100. * correct /\n len(test_loader.dataset)))\n \n if (best_correct<correct):\n best_epoch = epoch\n best_loss = test_loss\n best_correct=correct\n if (update):\n torch.save(model, \"lenet_parameter_noinf.pt\")\n \n return best_loss, best_epoch, best_correct,correct\n\nfor epoch in range(100):\n scheduler.step()\n train(epoch)\n best_loss, best_epoch, best_correct,_ = test(epoch, best_loss, best_epoch, best_correct, do_quantise=False,do_add_var=False,mode=False,update=True)\n \n\n\n"
] | [
[
"torch.autograd.Variable",
"torch.save",
"torch.optim.lr_scheduler.MultiStepLR",
"torch.cuda.is_available",
"torch.nn.CrossEntropyLoss"
]
] |
OpenGVLab/gv-benchmark | [
"ffe1f7460b857c88276d52a1537cbc673002f1e4"
] | [
"gvbenchmark/cls/datasets/retinopathy.py"
] | [
"# Copyright (c) OpenMMLab. All rights reserved.\nimport os\n\nimport numpy as np\nfrom mmcls.core.evaluation.eval_metrics import calculate_confusion_matrix\nfrom mmcls.datasets.base_dataset import BaseDataset\nfrom mmcls.datasets.builder import DATASETS\nfrom mmcls.models.losses import accuracy\n\n\ndef has_file_allowed_extension(filename, extensions):\n \"\"\"Checks if a file is an allowed extension.\n\n Args:\n filename (string): path to a file\n\n Returns:\n bool: True if the filename ends with a known image extension\n \"\"\"\n filename_lower = filename.lower()\n return any(filename_lower.endswith(ext) for ext in extensions)\n\n\ndef find_folders(root):\n \"\"\"Find classes by folders under a root.\n\n Args:\n root (string): root directory of folders\n\n Returns:\n folder_to_idx (dict): the map from folder name to class idx\n \"\"\"\n folders = [\n d for d in os.listdir(root) if os.path.isdir(os.path.join(root, d))\n ]\n folders.sort()\n folder_to_idx = {folders[i]: i for i in range(len(folders))}\n return folder_to_idx\n\n\ndef get_samples(root, folder_to_idx, extensions):\n \"\"\"Make dataset by walking all images under a root.\n\n Args:\n root (string): root directory of folders\n folder_to_idx (dict): the map from class name to class idx\n extensions (tuple): allowed extensions\n\n Returns:\n samples (list): a list of tuple where each element is (image, label)\n \"\"\"\n samples = []\n root = os.path.expanduser(root)\n for folder_name in sorted(list(folder_to_idx.keys())):\n _dir = os.path.join(root, folder_name)\n for _, _, fns in sorted(os.walk(_dir)):\n for fn in sorted(fns):\n if has_file_allowed_extension(fn, extensions):\n path = os.path.join(folder_name, fn)\n item = (path, folder_to_idx[folder_name])\n samples.append(item)\n return samples\n\n\[email protected]_module()\nclass RETINOPATHY(BaseDataset):\n\n IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif')\n\n def load_annotations(self):\n if self.ann_file is None:\n folder_to_idx = find_folders(self.data_prefix)\n samples = get_samples(self.data_prefix,\n folder_to_idx,\n extensions=self.IMG_EXTENSIONS)\n if len(samples) == 0:\n raise (RuntimeError('Found 0 files in subfolders of: '\n f'{self.data_prefix}. '\n 'Supported extensions are: '\n f'{\",\".join(self.IMG_EXTENSIONS)}'))\n\n self.folder_to_idx = folder_to_idx\n elif isinstance(self.ann_file, str):\n with open(self.ann_file) as f:\n samples = [x.strip().rsplit(' ', 1) for x in f.readlines()]\n else:\n raise TypeError('ann_file must be a str or None')\n self.samples = samples\n\n data_infos = []\n for filename, gt_label in self.samples:\n info = {'img_prefix': self.data_prefix}\n info['img_info'] = {'filename': filename}\n info['gt_label'] = np.array(gt_label, dtype=np.int64)\n data_infos.append(info)\n return data_infos\n\n def evaluate(\n self,\n results,\n # gt_labels,\n metric='accuracy',\n metric_options=None,\n logger=None):\n \"\"\"Evaluate the dataset.\n\n Args:\n results (list): Testing results of the dataset.\n metric (str | list[str]): Metrics to be evaluated.\n Default value is `accuracy`.\n metric_options (dict, optional): Options for calculating metrics.\n Allowed keys are 'topk', 'thrs' and 'average_mode'.\n Defaults to None.\n logger (logging.Logger | str, optional): Logger used for printing\n related information during evaluation. Defaults to None.\n Returns:\n dict: evaluation results\n \"\"\"\n if metric_options is None:\n metric_options = {'topk': (1, 5)}\n if isinstance(metric, str):\n metrics = [metric]\n else:\n metrics = metric\n allowed_metrics = ['accuracy', 'per_class_acc']\n eval_results = {}\n results = np.vstack(results)\n gt_labels = self.get_gt_labels()\n num_imgs = len(results)\n assert len(gt_labels) == num_imgs, 'dataset testing results should '\\\n 'be of the same length as gt_labels.'\n\n invalid_metrics = set(metrics) - set(allowed_metrics)\n if len(invalid_metrics) != 0:\n raise ValueError(f'metric {invalid_metrics} is not supported.')\n\n topk = metric_options.get('topk', (1, 5))\n thrs = metric_options.get('thrs')\n\n if 'accuracy' in metrics:\n if thrs is not None:\n acc = accuracy(results, gt_labels, topk=topk, thrs=thrs)\n else:\n acc = accuracy(results, gt_labels, topk=topk)\n if isinstance(topk, tuple):\n eval_results_ = {\n f'accuracy_top-{k}': a\n for k, a in zip(topk, acc)\n }\n else:\n eval_results_ = {'accuracy': acc}\n if isinstance(thrs, tuple):\n for key, values in eval_results_.items():\n eval_results.update({\n f'{key}_thr_{thr:.2f}': value.item()\n for thr, value in zip(thrs, values)\n })\n else:\n eval_results.update(\n {k: v.item()\n for k, v in eval_results_.items()})\n\n #####\n if 'per_class_acc' in metrics:\n confusion_matrix = calculate_confusion_matrix(results, gt_labels)\n per_class_acc = (confusion_matrix.diag() /\n confusion_matrix.sum(dim=1)).mean()\n eval_results['per_class_acc'] = per_class_acc\n\n return eval_results\n"
] | [
[
"numpy.array",
"numpy.vstack"
]
] |
secimTools/GalaxyTools | [
"a79d9f6d733c9a8c1e52b5aa8f4b7d0da79cbf6d"
] | [
"src/scripts/mahalanobis_distance.py"
] | [
"#!/usr/bin/env python\n####################################################################\n# AUTHOR: Miguel Ibarra ([email protected])\n#\n# DESCRIPTION: Pairwise and to mean standarized euclidean comparison\n####################################################################\n\nimport os\nimport logging\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport scipy.stats as stats\nfrom numpy.linalg import svd\nimport matplotlib\nmatplotlib.use(\"Agg\")\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom sklearn.neighbors import DistanceMetric\nfrom secimtools.dataManager import logger as sl\nfrom secimtools.dataManager.interface import wideToDesign\nfrom secimtools.visualManager import module_box as box\nfrom secimtools.visualManager import module_lines as lines\nfrom secimtools.visualManager import module_scatter as scatter\nfrom secimtools.visualManager.manager_color import colorHandler\nfrom secimtools.visualManager.manager_figure import figureHandler\n\n\ndef getOptions():\n \"\"\" Function to pull in arguments \"\"\"\n description = \"\"\"\"\"\"\n\n parser = argparse.ArgumentParser(\n description=description, formatter_class=argparse.RawDescriptionHelpFormatter\n )\n # Standard Input\n standard = parser.add_argument_group(description=\"Required Input\")\n standard.add_argument(\n \"-i\",\n \"--input\",\n dest=\"input\",\n action=\"store\",\n required=True,\n help=\"Dataset in Wide format\",\n )\n standard.add_argument(\n \"-d\",\n \"--design\", dest=\"design\",\n action=\"store\",\n required=True,\n help=\"Design file\",\n )\n standard.add_argument(\n \"-id\",\n \"--ID\",\n dest=\"uniqID\",\n action=\"store\",\n required=True,\n help=\"Name of the column with uniqueID.\",\n )\n standard.add_argument(\n \"-g\",\n \"--group\",\n dest=\"group\",\n default=False,\n action=\"store\",\n required=False,\n help=\"Treatment group\",\n )\n standard.add_argument(\n \"-o\", \"--order\", dest=\"order\", action=\"store\", default=False, help=\"Run Order\"\n )\n standard.add_argument(\n \"-l\",\n \"--levels\",\n dest=\"levels\",\n action=\"store\",\n default=False,\n help=\"Additional notes.\",\n )\n # Tool output\n output = parser.add_argument_group(description=\"Output Files\")\n output.add_argument(\n \"-f\",\n \"--figure\",\n dest=\"figure\",\n action=\"store\",\n required=True,\n help=\"PDF Output of standardized\" \"Euclidean distance plot\",\n )\n output.add_argument(\n \"-m\",\n \"--distanceToMean\",\n dest=\"toMean\",\n action=\"store\",\n required=True,\n help=\"TSV Output of Mahalanobis \" \"distances from samples to the mean.\",\n )\n output.add_argument(\n \"-pw\",\n \"--distancePairwise\",\n dest=\"pairwise\",\n action=\"store\",\n required=True,\n help=\"TSV Output of sample-pairwise\" \"mahalanobis distances.\",\n )\n # Tool Input\n tool = parser.add_argument_group(description=\"Optional Input\")\n tool.add_argument(\n \"-p\",\n \"--per\",\n dest=\"p\",\n action=\"store\",\n required=False,\n default=0.95,\n type=float,\n help=\"The threshold\" \"for standard distributions. The default is 0.95.\",\n )\n tool.add_argument(\n \"-pen\",\n \"--penalty\",\n dest=\"penalty\",\n action=\"store\",\n required=False,\n default=0.5,\n type=float,\n help=\"Value\" \" of lambda for the penalty.\",\n )\n tool.add_argument(\n \"-lg\",\n \"--log\",\n dest=\"log\",\n action=\"store\",\n required=False,\n default=True,\n help=\"Log file\",\n )\n # Plot options\n plot = parser.add_argument_group(title=\"Plot options\")\n plot.add_argument(\n \"-pal\",\n \"--palette\",\n dest=\"palette\",\n action=\"store\",\n required=False,\n default=\"tableau\",\n help=\"Name of the palette to use.\",\n )\n plot.add_argument(\n \"-col\",\n \"--color\",\n dest=\"color\",\n action=\"store\",\n required=False,\n default=\"Tableau_20\",\n help=\"Name of a valid color scheme\" \" on the selected palette\",\n )\n args = parser.parse_args()\n\n # Standardize paths\n args.figure = os.path.abspath(args.figure)\n args.toMean = os.path.abspath(args.toMean)\n args.pairwise = os.path.abspath(args.pairwise)\n\n # if args.levels then split otherwise args.level = emptylist\n if args.levels:\n args.levels = args.levels.split(\",\")\n\n return args\n\n\ndef calculatePenalizedSigma(data, penalty=0.5):\n # Getting n and p of data.\n n, p = data.shape\n\n # Calculate the mean of every row\n data[\"mean\"] = data.mean(axis=1)\n\n # Standardize data (_std stands for standardized)\n data_std = data.apply(lambda x: x - x[\"mean\"], axis=1)\n\n # Dropping mean column from both data and data_standardized\n data.drop(\"mean\", axis=1, inplace=True)\n data_std.drop(\"mean\", axis=1, inplace=True)\n\n # Calculate singular value decomposition\n U, s, V = svd(data_std)\n\n # Calculate ds based on ss (d = s**2)\n d = s ** 2\n\n # Based on ds calculate the penalty. penalty must be expressed as a\n # proportion (from 0 to 1) to use it on the np.percentile it will be\n # multiplied by 100\n penalty = np.percentile(d, q=penalty * 100.0)\n\n # After the calculation of the penalty extend d vector to size n\n d = np.append(d, np.zeros(shape=(n - len(d))))\n\n # Calculate penalty and inverse for d (1/(d+penalty))\n d = [1 / (val + penalty) for val in d]\n\n # Creating empty matrix of size n x p and the fiiling the diagonal with\n # penalized s values\n # S = np.zeros((n,p))\n # S[:len(s),:len(s)] = np.diag(s)\n D = np.diag(d)\n\n # Compute the stimate of the penalized sigma\n penalized_sigma = np.dot(U, np.dot(D, U.T))\n\n # Multiply everything by p-1\n penalized_sigma = (p - 1) * penalized_sigma\n\n # Returning penalized sigma\n return penalized_sigma\n\n\ndef calculateDistances(data, V_VI):\n \"\"\"\n Calculates euclidean or mahalanobis distances. Returns an array of\n distances to the Mean and an a matrix of pairwise distances.\n\n :Arguments:\n :type wide: pandas.DataFrame\n :param wide: A wide formatted data frame with samples as columns and\n compounds as rows.\n\n :Returns:\n :return distanceToMean: pd.DataFrames with distances to the mean.\n :rtype: pd.DataFrames\n\n :return distancePairwise: pd.DataFrames with pairwisde distances between\n samples.\n :rtype: pd.DataFrames\n \"\"\"\n # Calculating mean\n mean = pd.DataFrame(data.mean(axis=1))\n\n # Getting metric\n dist = DistanceMetric.get_metric(\"mahalanobis\", VI=V_VI)\n\n # Calculate distance from all samples to the mean\n distanceToMean = dist.pairwise(data.values.T, mean.T)\n distanceToMean = pd.DataFrame(\n distanceToMean, columns=[\"distance_to_mean\"], index=data.columns\n )\n distanceToMean.name = data.name\n\n # Calculate pairwise distances among samples\n distancePairwise = dist.pairwise(data.values.T)\n distancePairwise = pd.DataFrame(\n distancePairwise, columns=data.columns, index=data.columns\n )\n distancePairwise.name = data.name\n\n # Converts to NaN the diagonal\n for index, row in distancePairwise.iterrows():\n distancePairwise.loc[index, index] = np.nan\n\n return (distanceToMean, distancePairwise)\n\n\ndef calculateCutoffs(data, p):\n \"\"\"\n Calculate the Standardized Euclidean Distance and return an array of\n distances to the Mean and a matrix of pairwise distances.\n\n :Arguments:\n :type wide: pandas.DataFrame\n :param wide: A wide formatted data frame with samples as columns and\n compounds as rows.\n\n :type p: float.\n :param p: percentile of cutoff.\n\n :Returns:\n :rtype cutoff1: pandas.dataFrame\n :return cutoff1: Cutoff values for mean, beta, chi-sqr and normal.\n\n :rtype cutoff2: pandas.dataFrame\n :return cutoff2: Cutoff values for pairwise, beta, chi-sqr and normal.\n \"\"\"\n\n # Stablish iterations, and numer of colums ps and number of rows nf\n ps = len(data.columns) # p number of samples\n nf = len(data.index) # s number of features\n iters = 20000\n\n # Calculates betaP\n betaP = np.percentile(\n pd.DataFrame(\n stats.beta.rvs(0.5, 0.5 * (ps - 2), size=iters * nf).reshape(iters, nf)\n ).sum(axis=1),\n p * 100.0,\n )\n\n # casting to float so it behaves well\n ps = float(ps)\n nf = float(nf)\n\n # Calculates cutoffs beta,norm & chisq for data to mean\n betaCut1 = np.sqrt((ps - 1) ** 2 / ps * betaP)\n normCut1 = np.sqrt(\n stats.norm.ppf(\n p,\n (ps - 1) / ps * nf,\n np.sqrt(2 * nf * (ps - 2) * (ps - 1) ** 2 / ps ** 2 / (ps + 1)),\n )\n )\n chisqCut1 = np.sqrt((ps - 1) / ps * stats.chi2.ppf(p, nf))\n\n # Calculates cutoffs beta,n norm & chisq for pairwise\n betaCut2 = np.sqrt((ps - 1) * 2 * betaP)\n normCut2 = np.sqrt(stats.norm.ppf(p, 2 * nf, np.sqrt(8 * nf * (ps - 2) / (ps + 1))))\n chisqCut2 = np.sqrt(2 * stats.chi2.ppf(p, nf))\n\n # Create data fram for ecah set of cut offs\n cutoff1 = pd.DataFrame(\n [[betaCut1, normCut1, chisqCut1], [\"Beta(Exact)\", \"Normal\", \"Chi-sq\"]],\n index=[\"cut\", \"name\"],\n columns=[\"Beta(Exact)\", \"Normal\", \"Chi-sq\"],\n )\n cutoff2 = pd.DataFrame(\n [[betaCut2, normCut2, chisqCut2], [\"Beta(Exact)\", \"Normal\", \"Chi-sq\"]],\n index=[\"cut\", \"name\"],\n columns=[\"Beta(Exact)\", \"Normal\", \"Chi-sq\"],\n )\n\n # Create Palette\n cutPalette.getColors(cutoff1.T, [\"name\"])\n\n # Returning colors\n return (cutoff1, cutoff2)\n\n\ndef plotCutoffs(cut_S, ax, p):\n \"\"\"\n Plot the cutoff lines to each plot\n\n :Arguments:\n :type cut_S: pandas.Series\n :param cut_S: contains a cutoff value, name and color\n\n :type ax: matplotlib.axes._subplots.AxesSubplot\n :param ax: Gets an ax project.\n\n :type p: float\n :param p: percentile of cutoff\n \"\"\"\n lines.drawCutoffHoriz(\n ax=ax,\n y=float(cut_S.values[0]),\n cl=cutPalette.ugColors[cut_S.name],\n lb=\"{0} {1}% Threshold: {2}\".format(\n cut_S.name, round(p * 100, 3), round(float(cut_S.values[0]), 1)\n ),\n ls=\"--\",\n lw=2,\n )\n\n\ndef plotDistances(df_distance, palette, plotType, disType, cutoff, p, pdf):\n # Geting number of samples in dataframe (ns stands for number of samples)\n ns = len(df_distance.index)\n\n # Calculates the widht for the figure base on the number of samples\n figWidth = max(ns / 2, 16)\n\n # Keeping the order on the colors\n df_distance[\"colors\"] = palette.design[\"colors\"]\n\n # Create figure object with a single axis\n figure = figureHandler(proj=\"2d\", figsize=(figWidth, 8))\n\n # Getting type of distance file\n if \"distance_to_mean\" in df_distance.columns:\n dataType = \"to the mean\"\n else:\n dataType = \"pairwise\"\n\n # Getting ty of distance header\n if disType == \"Mahalanobis\":\n distType1 = \"Penalized\"\n distType2 = disType\n else:\n distType1 = \"Standardized\"\n distType2 = disType\n\n # Adds Figure title, x axis limits and set the xticks\n figure.formatAxis(\n figTitle=\"{0} for {1} {2} Distance for {3} {4}\".format(\n plotType, distType1, distType2, df_distance.name, dataType\n ),\n yTitle=\"{0} {1} Distance\".format(distType1, distType2),\n xTitle=\"Index\",\n ylim=\"ignore\",\n xlim=(-0.5, -0.5 + ns),\n xticks=df_distance.index,\n )\n\n # If distance to mean\n if dataType == \"to the mean\":\n # Plot scatterplot quickplot\n scatter.scatter2D(\n ax=figure.ax[0],\n colorList=df_distance[\"colors\"],\n x=range(len(df_distance.index)),\n y=df_distance[\"distance_to_mean\"],\n )\n # if pairwise\n else:\n if plotType == \"Scatterplot\":\n # Plot scatterplot\n for index in df_distance.index:\n scatter.scatter2D(\n ax=figure.ax[0],\n colorList=df_distance[\"colors\"][index],\n x=range(len(df_distance.index)),\n y=df_distance[index],\n )\n\n elif plotType == \"Box-plots\":\n # Plot Box plot\n box.boxDF(ax=figure.ax[0], colors=df_distance[\"colors\"], dat=df_distance)\n\n # Shrink figure\n figure.shrink()\n\n # Plot legend\n figure.makeLegend(figure.ax[0], palette.ugColors, palette.combName)\n\n # Add a cutoof line\n cutoff.apply(lambda x: plotCutoffs(x, ax=figure.ax[0], p=p), axis=0)\n\n # Add figure to PDF and close the figure afterwards\n figure.addToPdf(pdf)\n\n # Drop \"color\" column to no mess the results\n df_distance.drop(\"colors\", axis=1, inplace=True)\n\n\ndef main(args):\n \"\"\"\n Main function\n \"\"\"\n if args.levels and args.group:\n levels = [args.group] + args.levels\n elif args.group and not args.levels:\n levels = [args.group]\n else:\n levels = []\n logger.info(u\"Color selection groups: {0}\".format(\",\".join(levels)))\n\n dat = wideToDesign(\n args.input,\n args.design,\n args.uniqID,\n group=args.group,\n anno=args.levels,\n logger=logger,\n runOrder=args.order,\n )\n\n # Removing groups with just one sample and then clean from missing data.\n dat.removeSingle()\n dat.dropMissing()\n\n # Select colors for data by adding an additional column for colors\n dataPalette.getColors(design=dat.design, groups=levels)\n\n if args.group:\n disGroups = [\n (group.index, level)\n for level, group in dataPalette.design.groupby(dataPalette.combName)\n ]\n else:\n disGroups = [(dat.design.index, \"samples\")]\n\n pairwise_disCuts = list()\n toMean_disCuts = list()\n for indexes, name in disGroups:\n # If less than 3 elements in the group skip to the next\n if len(indexes) < 3:\n logger.error(\n \"Group {0} with fewer than 3 elements is excluded from the analysis\".format(name)\n )\n continue\n\n # Subsetting wide\n currentFrame = pd.DataFrame(dat.wide[indexes].copy())\n currentFrame.name = name\n\n # Calculate Penalized Sigma\n penalizedSigma = calculatePenalizedSigma(\n data=currentFrame, penalty=args.penalty\n )\n\n # Calculate Distances (dis stands for distance)\n disToMean, disPairwise = calculateDistances(\n data=currentFrame, V_VI=penalizedSigma\n )\n\n # Calculate cutoffs\n cutoff1, cutoff2 = calculateCutoffs(currentFrame, args.p)\n\n # Appending results\n pairwise_disCuts.append([disPairwise, cutoff2])\n toMean_disCuts.append([disToMean, cutoff1])\n\n if args.group:\n # Splitting results to mean and pairwise\n pairwise_dis = [distance for distance, cutoff in pairwise_disCuts]\n toMean_dis = [distance for distance, cutoff in toMean_disCuts]\n\n # Merging to get distance for all pairwise\n pairwise_dis_all = pd.DataFrame()\n for dis in pairwise_dis:\n pairwise_dis_all = pd.DataFrame.merge(\n pairwise_dis_all,\n dis,\n left_index=True,\n right_index=True,\n how=\"outer\",\n sort=False,\n )\n pairwise_dis_all.name = \"samples\"\n\n # Merging to get distance for all to mean\n toMean_dis_all = pd.DataFrame(columns=[\"distance_to_mean\",\"group\"])\n for dis in toMean_dis:\n dis[\"group\"] = dis.name\n toMean_dis_all = toMean_dis_all.append(dis)\n toMean_dis_all.sort_values(by=\"group\", inplace=True)\n toMean_dis_all.drop(\"group\", axis=1, inplace=True)\n toMean_dis_all.name = \"samples\"\n\n # Get cuttoffs for distances\n cutoff1, cutoff2 = calculateCutoffs(dat.wide, args.p)\n\n # Append toMean_dis_all and pairwise_dis_all to toMean_dis_cuts and\n # pairwise_dis_cuts respectively.\n toMean_disCuts.append([toMean_dis_all, cutoff1])\n pairwise_disCuts.append([pairwise_dis_all, cutoff2])\n\n # Iterate over each pair of (distance,cutoff) for toMean and pairwise to\n # plot distances.\n with PdfPages((args.figure)) as pdf:\n # Iterating over toMean,pairwise distances in parallel\n for toMean, pairwise in zip(toMean_disCuts, pairwise_disCuts):\n # Making plots\n plotDistances(\n df_distance=toMean[0],\n palette=dataPalette,\n p=args.p,\n plotType=\"Scatterplot\",\n disType=\"Mahalanobis\",\n cutoff=toMean[1],\n pdf=pdf,\n )\n plotDistances(\n df_distance=pairwise[0],\n palette=dataPalette,\n p=args.p,\n plotType=\"Scatterplot\",\n disType=\"Mahalanobis\",\n cutoff=pairwise[1],\n pdf=pdf,\n )\n plotDistances(\n df_distance=pairwise[0],\n palette=dataPalette,\n p=args.p,\n plotType=\"Box-plots\",\n disType=\"Mahalanobis\",\n cutoff=pairwise[1],\n pdf=pdf,\n )\n\n # Since its a list of dataframes and we are only interested in the last one\n # we are using [-1] to access it and [0] to getit out of the list.\n # Outputting distances to mean and pairwise\n toMean_disCuts[-1][0].to_csv(args.toMean, index_label=\"sampleID\", sep=\"\\t\")\n pairwise_disCuts[-1][0].to_csv(args.pairwise, index_label=\"sampleID\", sep=\"\\t\")\n\n # Ending script\n logger.info(\"Script complete.\")\n\n\nif __name__ == \"__main__\":\n args = getOptions()\n logger = logging.getLogger()\n sl.setLogger(logger)\n logger.info(\n u\"\"\"Importing data with following parameters:\n \\tWide: {0}\n \\tDesign: {1}\n \\tUnique ID: {2}\n \\tGroup: {3}\n \\tRun Order: {4}\n \"\"\".format(\n args.input, args.design, args.uniqID, args.group, args.order\n )\n )\n dataPalette = colorHandler(pal=args.palette, col=args.color)\n cutPalette = colorHandler(pal=\"tableau\", col=\"TrafficLight_9\")\n logger.info(\n u\"Using {0} color scheme from {1} palette\".format(args.color, args.palette)\n )\n main(args)\n"
] | [
[
"matplotlib.use",
"numpy.dot",
"numpy.percentile",
"pandas.DataFrame",
"matplotlib.backends.backend_pdf.PdfPages",
"pandas.DataFrame.merge",
"scipy.stats.beta.rvs",
"numpy.diag",
"numpy.linalg.svd",
"numpy.sqrt",
"scipy.stats.chi2.ppf",
"sklearn.neighbors.DistanceMetric.get_metric"
]
] |
zky362550824/MAUIL | [
"cd791a6872d4c797f519a762f530007c41ad3eb2"
] | [
"code/main_dblp.py"
] | [
"# -*- coding: utf-8 -*-\r\nimport numpy as np\r\nimport json,pickle,time,os\r\nfrom align import align_cca\r\nfrom utils import dataset,get_sim,hit_precision\r\nfrom multiprocessing import Pool\r\nfrom functools import partial\r\n\r\ndef psearch(n_train,emb,K,reg,seed):\r\n test = datasets.get('test',n=500,seed=seed)\r\n train = datasets.get('train',n=n_train,seed=seed)\r\n\r\n traindata = []\r\n for k,v in train:\r\n traindata.append([emb[k],emb[v]])\r\n traindata = np.array(traindata)\r\n\r\n testdata = []\r\n for k,v in test:\r\n testdata.append([emb[k],emb[v]])\r\n testdata = np.array(testdata)\r\n \r\n zx,zy=align_cca(traindata,testdata,K=K,reg=reg)\r\n \r\n sim_matrix = get_sim(zx,zy,top_k=10)\r\n score=[]\r\n for top_k in [1,3,5,10]:\r\n score_ = hit_precision(sim_matrix,top_k=top_k)\r\n score.append(score_)\r\n return score\r\n\r\nanchors = dict(json.load(open('../data/dblp/anchors.txt','r')))\r\nprint(time.ctime(),'\\t # of Anchors:',len(anchors))\r\ng1,g2 = pickle.load(open('../data/dblp/networks','rb'))\r\nprint(time.ctime(),'\\t Size of two networks:',len(g1),len(g2)) \r\ndatasets = dataset(anchors)\r\npool=Pool(min(16,os.cpu_count()-2))\r\n\r\nif __name__ == '__main__':\r\n result=[]\r\n for seed in range(3):\r\n d = 100\r\n fname = '../emb/emb_dblp_seed_{}_dim_{}'.format(seed,d)\r\n emb_c,emb_w,emb_t,emb_s = pickle.load(open(fname,'rb'))\r\n\r\n emb_attr = np.concatenate((emb_c,emb_w,emb_t),axis=-1)\r\n emb_all = np.concatenate((emb_c,emb_w,emb_t,emb_s),axis=-1)\r\n for model in [2]:\r\n n_train = 200\r\n emb = [emb_attr,emb_s,emb_all][model]\r\n model_name = ['MAUIL-a','MAUIL-s','MAUIL'][model]\r\n dim = emb.shape[-1]\r\n for K in [[0],[0],[80]][model]:\r\n for reg in [100,1000]:\r\n score=[]\r\n seed_ = list(range(10))\r\n score_10 = pool.map(partial(psearch,n_train,emb,K,reg),seed_)\r\n score_10 = np.array(score_10)\r\n assert score_10.shape==(10,4)\r\n score = np.mean(score_10,axis=0)\r\n \r\n record = [seed,d,model_name,n_train,K,reg]+score.tolist()\r\n result.append(record)\r\n print(record)\r\n\r\n json.dump(result,open('result_MAUIL_dblp.txt','w'))"
] | [
[
"numpy.concatenate",
"numpy.array",
"numpy.mean"
]
] |
aachenmax/vmaf | [
"e65143f36ac9324a1242614bdd6256861d4f46f6"
] | [
"python/test/local_explainer_test.py"
] | [
"import os\nimport unittest\n\nimport numpy as np\n\nfrom vmaf.config import VmafConfig\nfrom vmaf.core.asset import Asset\nfrom vmaf.core.local_explainer import LocalExplainer\nfrom vmaf.core.quality_runner_extra import VmafQualityRunnerWithLocalExplainer\nfrom vmaf.core.noref_feature_extractor import MomentNorefFeatureExtractor\nfrom vmaf.core.raw_extractor import DisYUVRawVideoExtractor\nfrom vmaf.core.result_store import FileSystemResultStore\nfrom vmaf.core.train_test_model import SklearnRandomForestTrainTestModel, \\\n MomentRandomForestTrainTestModel\nfrom vmaf.routine import read_dataset\nfrom vmaf.tools.misc import import_python_file\n\n__copyright__ = \"Copyright 2016-2020, Netflix, Inc.\"\n__license__ = \"BSD+Patent\"\n\n\nclass LocalExplainerTest(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def tearDown(self):\n if hasattr(self, 'runner'):\n self.runner.remove_results()\n pass\n\n def test_explain_train_test_model(self):\n\n model_class = SklearnRandomForestTrainTestModel\n\n train_dataset_path = VmafConfig.test_resource_path('test_image_dataset_diffdim.py')\n train_dataset = import_python_file(train_dataset_path)\n train_assets = read_dataset(train_dataset)\n\n fextractor = MomentNorefFeatureExtractor(\n train_assets,\n None,\n fifo_mode=True,\n delete_workdir=True,\n result_store=None,\n optional_dict=None,\n optional_dict2=None,\n )\n fextractor.run(parallelize=True)\n self.features = fextractor.results\n\n xys = model_class.get_xys_from_results(self.features[:7])\n model = model_class({'norm_type': 'normalize',\n 'n_estimators': 10,\n 'random_state': 0}, None)\n model.train(xys)\n\n np.random.seed(0)\n\n xs = model_class.get_xs_from_results(self.features[7:])\n explainer = LocalExplainer(neighbor_samples=1000)\n exps = explainer.explain(model, xs)\n\n self.assertAlmostEqual(exps['feature_weights'][0, 0], -0.12416, places=4)\n self.assertAlmostEqual(exps['feature_weights'][1, 0], 0.00076, places=4)\n self.assertAlmostEqual(exps['feature_weights'][0, 1], -0.20931, places=4)\n self.assertAlmostEqual(exps['feature_weights'][1, 1], -0.01245, places=4)\n self.assertAlmostEqual(exps['feature_weights'][0, 2], 0.02322, places=4)\n self.assertAlmostEqual(exps['feature_weights'][1, 2], 0.03673, places=4)\n\n self.assertAlmostEqual(exps['features'][0, 0], 107.73501, places=4)\n self.assertAlmostEqual(exps['features'][1, 0], 35.81638, places=4)\n self.assertAlmostEqual(exps['features'][0, 1], 13691.23881, places=4)\n self.assertAlmostEqual(exps['features'][1, 1], 1611.56764, places=4)\n self.assertAlmostEqual(exps['features'][0, 2], 2084.40542, places=4)\n self.assertAlmostEqual(exps['features'][1, 2], 328.75389, places=4)\n\n self.assertAlmostEqual(exps['features_normalized'][0, 0], -0.65527, places=4)\n self.assertAlmostEqual(exps['features_normalized'][1, 0], -3.74922, places=4)\n self.assertAlmostEqual(exps['features_normalized'][0, 1], -0.68872, places=4)\n self.assertAlmostEqual(exps['features_normalized'][1, 1], -2.79586, places=4)\n self.assertAlmostEqual(exps['features_normalized'][0, 2], 0.08524, places=4)\n self.assertAlmostEqual(exps['features_normalized'][1, 2], -1.32625, places=4)\n\n self.assertEqual(exps['feature_names'],\n ['Moment_noref_feature_1st_score',\n 'Moment_noref_feature_2nd_score',\n 'Moment_noref_feature_var_score']\n )\n\n def test_explain_vmaf_results(self):\n ref_path = VmafConfig.test_resource_path(\"yuv\", \"src01_hrc00_576x324.yuv\")\n dis_path = VmafConfig.test_resource_path(\"yuv\", \"src01_hrc01_576x324.yuv\")\n asset = Asset(dataset=\"test\", content_id=0, asset_id=0,\n workdir_root=VmafConfig.workdir_path(),\n ref_path=ref_path,\n dis_path=dis_path,\n asset_dict={'width': 576, 'height': 324})\n\n asset_original = Asset(dataset=\"test\", content_id=0, asset_id=1,\n workdir_root=VmafConfig.workdir_path(),\n ref_path=ref_path,\n dis_path=ref_path,\n asset_dict={'width': 576, 'height': 324})\n\n self.runner = VmafQualityRunnerWithLocalExplainer(\n [asset, asset_original],\n None, fifo_mode=True,\n delete_workdir=True,\n result_store=None,\n optional_dict2={'explainer': LocalExplainer(neighbor_samples=100)}\n )\n\n np.random.seed(0)\n\n self.runner.run()\n results = self.runner.results\n\n self.assertAlmostEqual(results[0]['VMAF_score'], 76.699271272486044, places=4)\n self.assertAlmostEqual(results[1]['VMAF_score'], 99.946416604585025, places=4)\n\n expected_feature_names = ['VMAF_feature_adm2_score',\n 'VMAF_feature_motion2_score',\n 'VMAF_feature_vif_scale0_score',\n 'VMAF_feature_vif_scale1_score',\n 'VMAF_feature_vif_scale2_score',\n 'VMAF_feature_vif_scale3_score']\n\n weights = np.mean(results[0]['VMAF_scores_exps']['feature_weights'], axis=0)\n self.assertAlmostEqual(weights[0], 0.66021689480916868, places=4)\n self.assertAlmostEqual(weights[1], 0.14691682562211777, places=4)\n self.assertAlmostEqual(weights[2], -0.023682744847036086, places=4)\n self.assertAlmostEqual(weights[3], -0.029779341850172818, places=4)\n self.assertAlmostEqual(weights[4], 0.19149485210137338, places=4)\n self.assertAlmostEqual(weights[5], 0.31890978778344126, places=4)\n\n self.assertEqual(results[0]['VMAF_scores_exps']['feature_names'],\n expected_feature_names)\n\n weights = np.mean(results[1]['VMAF_scores_exps']['feature_weights'], axis=0)\n self.assertAlmostEqual(weights[0], 0.69597961598838509, places=4)\n self.assertAlmostEqual(weights[1], 0.18256016705513464, places=4)\n self.assertAlmostEqual(weights[2], 0.0090048099912423147, places=4)\n self.assertAlmostEqual(weights[3], 0.028671810808880094, places=4)\n self.assertAlmostEqual(weights[4], 0.21935602577417926, places=4)\n self.assertAlmostEqual(weights[5], 0.34190431429767715, places=4)\n\n self.assertEqual(results[1]['VMAF_scores_exps']['feature_names'],\n expected_feature_names)\n\n # self.runner.show_local_explanations(results, indexs=[2, 3])\n # import matplotlib.pyplot as plt\n # DisplayConfig.show()\n\n\nclass LocalExplainerMomentRandomForestTest(unittest.TestCase):\n\n def setUp(self):\n train_dataset_path = VmafConfig.test_resource_path(\"test_image_dataset_diffdim.py\")\n train_dataset = import_python_file(train_dataset_path)\n train_assets = read_dataset(train_dataset)\n\n self.h5py_filepath = VmafConfig.workdir_path('test.hdf5')\n self.h5py_file = DisYUVRawVideoExtractor.open_h5py_file(self.h5py_filepath)\n optional_dict2 = {'h5py_file': self.h5py_file}\n\n fextractor = DisYUVRawVideoExtractor(\n train_assets,\n None,\n fifo_mode=True,\n delete_workdir=True,\n result_store=None,\n optional_dict=None,\n optional_dict2=optional_dict2,\n )\n fextractor.run(parallelize=False) # CAN ONLY USE SERIAL MODE FOR DisYRawVideoExtractor\n self.features = fextractor.results\n\n def tearDown(self):\n if hasattr(self, 'h5py_file'):\n DisYUVRawVideoExtractor.close_h5py_file(self.h5py_file)\n if os.path.exists(self.h5py_filepath):\n os.remove(self.h5py_filepath)\n\n def test_explain_train_test_model(self):\n\n model_class = MomentRandomForestTrainTestModel\n\n xys = model_class.get_xys_from_results(self.features[:7])\n del xys['dis_u']\n del xys['dis_v']\n\n model = model_class({'norm_type': 'normalize',\n 'n_estimators': 10,\n 'random_state': 0})\n model.train(xys)\n\n np.random.seed(0)\n\n xs = model_class.get_xs_from_results(self.features[7:])\n del xs['dis_u']\n del xs['dis_v']\n\n explainer = LocalExplainer(neighbor_samples=1000)\n exps = explainer.explain(model, xs)\n\n self.assertAlmostEqual(exps['feature_weights'][0, 0], -0.12416, places=4)\n self.assertAlmostEqual(exps['feature_weights'][1, 0], 0.00076, places=4)\n self.assertAlmostEqual(exps['feature_weights'][0, 1], -0.20931, places=4)\n self.assertAlmostEqual(exps['feature_weights'][1, 1], -0.01245, places=4)\n self.assertAlmostEqual(exps['feature_weights'][0, 2], 0.02322, places=4)\n self.assertAlmostEqual(exps['feature_weights'][1, 2], 0.03673, places=4)\n\n self.assertAlmostEqual(exps['features'][0, 0], 107.73501, places=4)\n self.assertAlmostEqual(exps['features'][1, 0], 35.81638, places=4)\n self.assertAlmostEqual(exps['features'][0, 1], 13691.23881, places=4)\n self.assertAlmostEqual(exps['features'][1, 1], 1611.56764, places=4)\n self.assertAlmostEqual(exps['features'][0, 2], 2084.40542, places=4)\n self.assertAlmostEqual(exps['features'][1, 2], 328.75389, places=4)\n\n self.assertAlmostEqual(exps['features_normalized'][0, 0], -0.65527, places=4)\n self.assertAlmostEqual(exps['features_normalized'][1, 0], -3.74922, places=4)\n self.assertAlmostEqual(exps['features_normalized'][0, 1], -0.68872, places=4)\n self.assertAlmostEqual(exps['features_normalized'][1, 1], -2.79586, places=4)\n self.assertAlmostEqual(exps['features_normalized'][0, 2], 0.08524, places=4)\n self.assertAlmostEqual(exps['features_normalized'][1, 2], -1.32625, places=4)\n\n self.assertEqual(exps['feature_names'], ['dis_y'])\n # TODO: fix feature name to 'Moment_noref_feature_1st_score', ...\n\n\nclass QualityRunnerTest(unittest.TestCase):\n\n def tearDown(self):\n if hasattr(self, 'runner'):\n self.runner.remove_results()\n pass\n\n def setUp(self):\n self.result_store = FileSystemResultStore()\n\n def test_run_vmaf_runner_local_explainer_with_bootstrap_model(self):\n ref_path = VmafConfig.test_resource_path(\"yuv\", \"src01_hrc00_576x324.yuv\")\n dis_path = VmafConfig.test_resource_path(\"yuv\", \"src01_hrc01_576x324.yuv\")\n asset = Asset(dataset=\"test\", content_id=0, asset_id=0,\n workdir_root=VmafConfig.workdir_path(),\n ref_path=ref_path,\n dis_path=dis_path,\n asset_dict={'width': 576, 'height': 324})\n\n asset_original = Asset(dataset=\"test\", content_id=0, asset_id=1,\n workdir_root=VmafConfig.workdir_path(),\n ref_path=ref_path,\n dis_path=ref_path,\n asset_dict={'width': 576, 'height': 324})\n\n self.runner = VmafQualityRunnerWithLocalExplainer(\n [asset, asset_original],\n None, fifo_mode=True,\n delete_workdir=True,\n result_store=None,\n optional_dict={\n 'model_filepath': VmafConfig.test_resource_path('model', 'vmafplus_v0.5.2boot_test.pkl'),\n },\n )\n self.runner.run()\n\n results = self.runner.results\n\n self.assertAlmostEqual(results[0]['VMAF_score'], 75.44304862545658, places=4)\n self.assertAlmostEqual(results[1]['VMAF_score'], 99.95804893252175, places=4)\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n"
] | [
[
"numpy.random.seed",
"numpy.mean"
]
] |
KanHatakeyama/anneal_project2 | [
"e9b5e776f4ac1d202b952c9b6fb4ffaee833441d"
] | [
"lib/MoleculeRegressor.py"
] | [
"import numpy as np\nfrom sklearn.base import BaseEstimator, RegressorMixin\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import SGDRegressor\nfrom Fingerprint import Fingerprint\nfrom rdkit.Avalon.pyAvalonTools import GetAvalonFP\n\nFP = Fingerprint()\n\n\nclass MoleculeRegressor(BaseEstimator, RegressorMixin):\n\n \"\"\"\n wrapper class of rdkit and sklearn. it can input smiles string as X, converting to fingerprint\n\n Attributes\n ----------\n model : object\n sklearn regressor\n FP: object\n class to calculate fingerprint\n auto_scaling: bool\n if true, scale y automatically\n \"\"\"\n\n def __init__(self,\n model=SGDRegressor(),\n FP=FP,\n auto_scaling=True\n ):\n self.model = model\n self.FP = FP\n self.auto_scaling = auto_scaling\n self.scaler = StandardScaler()\n\n def _calc_fp(self, X):\n fp, _ = self.FP.calc_fingerprint(X)\n return np.array(fp)\n\n def fit(self, X, y):\n # scale y\n if self.auto_scaling:\n y = self.scaler.fit_transform(y.reshape(-1, 1))\n\n # calc fingerprint\n self.model.fit(self._calc_fp(X), y)\n\n self.coef_ = self.model.coef_\n return self\n\n def predict(self, X):\n fp = self._calc_fp(X)\n pred_y = self._predict(fp)\n return pred_y\n\n def _predict(self, fp):\n pred_y = self.model.predict(fp)\n if self.auto_scaling:\n return self.scaler.inverse_transform(pred_y)\n else:\n return pred_y\n"
] | [
[
"numpy.array",
"sklearn.linear_model.SGDRegressor",
"sklearn.preprocessing.StandardScaler"
]
] |
esceptico/toxic | [
"b337df1790d4fd3a253522a38b91888705770822"
] | [
"src/toxic/modelling/encoder.py"
] | [
"from typing import Sequence, Tuple\n\nimport torch\nfrom torch import nn\n\nfrom src.toxic.modelling.modules import Conv1dMaxPooling\n\n\nclass WideCNNEncoder(nn.Module):\n \"\"\"Convolutional sentence encoder\n\n References:\n Convolutional Neural Networks for Sentence Classification\n https://arxiv.org/abs/1408.5882\n \"\"\"\n def __init__(\n self,\n token_embedding_size: int = 32,\n vocab_size: int = 256,\n filters: Sequence[Tuple[int, int]] = ((1, 4), (2, 8), (3, 16)),\n dropout: float = 0.2,\n projection_size: int = 256\n ):\n \"\"\"Constructor\n\n Args:\n token_embedding_size (int): Size of token embedding.\n Defaults to `32`.\n vocab_size (int): Number of token dictionary. Defaults to `256`.\n filters (Sequence[Tuple[int, int]]): Sequence of\n [kernel_size, out_channels] tuples.\n dropout (float): Dropout value.\n projection_size (int): Output layer size. Defaults to `256`\n \"\"\"\n super().__init__()\n self.projection_size = projection_size\n self.token_embedding = nn.Embedding(\n vocab_size, token_embedding_size, padding_idx=0\n )\n self.convolutions = nn.ModuleList([\n Conv1dMaxPooling(\n in_channels=token_embedding_size,\n out_channels=out_size,\n kernel_size=width\n ) for width, out_size in filters\n ])\n projection_input_size = sum(out_size for _, out_size in filters)\n self.dropout = nn.Dropout(dropout)\n self.projection = nn.Linear(projection_input_size, projection_size)\n\n def forward(self, inputs):\n token_embedding = self.token_embedding(inputs).transpose(1, 2)\n conv = [conv(token_embedding) for conv in self.convolutions]\n conv = torch.cat(conv, dim=-1)\n conv = self.dropout(conv)\n return self.projection(conv)\n"
] | [
[
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.cat",
"torch.nn.Embedding"
]
] |
timudk/uwEnergyHacks | [
"ddb2966fe98b8b51f9bc584ef497a1b24264de67"
] | [
"make_datasets/data_visualization.py"
] | [
"import numpy as np \nimport matplotlib.pyplot as plt\nimport pickle\nimport math\nimport matplotlib.animation as animation\n\ndef compute_nx_ny(k):\n\tn_x = k%4\n\tn_y = int((k-n_x)/4)\n\n\treturn n_x, n_y\n\ndef read_outside_temp(filename):\n\toutside_temp = np.loadtxt(filename)\n\n\treturn outside_temp\n\ndef read_data(filename):\n\twith open(filename, 'rb') as f:\n\t\treturn pickle.load(f)\n\ndef compute_24_hours_matrix(outside_temp, data, frames_per_min):\n\ttime_range = 24*frames_per_min\n\n\ttemperatures = np.ones((time_range, 5, 6))\n\n\tcurrent_time = 0.0\n\tfor i in range(time_range):\n\t\tcurrent_time = 24*(i/time_range)\n\t\ttemperatures[i].fill(outside_temp[math.floor(current_time)])\n\t\tfor j in range(16): \n\t\t\twanted_avg = compute_average_in_office(current_time, data[j][0])\n\t\t\tif np.isnan(wanted_avg):\n\t\t\t\twanted_avg = 17\n\t\t\tnum_x, num_y = compute_nx_ny(j)\n\t\t\tnum_y += 1\n\t\t\tnum_x += 1\n\n\t\t\tif i==0:\n\t\t\t\ttemperatures[i, num_x, num_y] = wanted_avg\n\n\t\t\telse:\n\t\t\t\ttemperatures[i, num_x, num_y] = (wanted_avg + temperatures[i-1, num_x, num_y])/2\n\n\treturn temperatures\n\t\t\ndef compute_average_in_office(time, data):\n\twho_in_office = []\n\n\tfor i in range(4):\n\t\tif time >= data[i][0] and data[i][1] >= time:\n\t\t\twho_in_office.append(i)\n\n\tn_people_in_office = len(who_in_office)\n\tif n_people_in_office == 0:\n\t\treturn np.nan\n\n\tsum_temp = 0.0\n\tfor j in who_in_office:\n\t\tsum_temp += get_temp(time, data[j][2])\n\n\treturn sum_temp/n_people_in_office\n\ndef get_temp(time, data):\n\ttemp = data[0][0]\n\tfor i in range(10):\n\t\tif time > data[i][1]:\n\t\t\ttemp = data[i][0]\n\treturn temp\n\n\ndef main():\n\toutside_temp = read_outside_temp('day_toronto')\n\tdata = read_data('section_data')\n\t\n\tN_FRAMES_PER_MINUTE = 12\n\n\ttemp = compute_24_hours_matrix(outside_temp, data, N_FRAMES_PER_MINUTE)\n\n\tfor i in range(24*N_FRAMES_PER_MINUTE):\n\t\tfig = plt.figure()\n\t\tplt.imshow(temp[i], cmap=plt.cm.RdBu_r, interpolation='nearest')\n\t\tplt.plot([0.5, 0.5], [0.5, 4.5], 'k')\n\t\tplt.plot([4.5, 4.5], [0.5, 4.5], 'k')\n\t\tfor j in range(5):\n\t\t\tplt.plot([0.5, 4.5], [j+0.5, j+0.5], 'k')\n\n\t\tplt.axis('off')\n\t\tplt.colorbar()\n\t\tplt.clim(0.0, 28.0)\n\t\t# ims.append([im])\n\t\t# plt.show()\n\t\tif(i > 99):\n\t\t\tplt.savefig('picfolder/00' + str(i) + '.jpeg')\n\t\telif(i>9):\n\t\t\tplt.savefig('picfolder/000' + str(i) + '.jpeg')\n\t\telse:\n\t\t\tplt.savefig('picfolder/0000' + str(i) + '.jpeg')\n\n\n\nif __name__ == '__main__':\n\tmain()"
] | [
[
"matplotlib.pyplot.colorbar",
"numpy.isnan",
"matplotlib.pyplot.clim",
"matplotlib.pyplot.plot",
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.loadtxt",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.imshow"
]
] |
seblars/AdventOfCode2020 | [
"dc41181caa50fe03645aa36d70fe0ebd76cd6e25"
] | [
"day17.py"
] | [
"import numpy as np\r\nimport fileinput\r\n\r\nd = np.array([[1 if j == '#' else 0 for j in list(i)]\r\n for i in \"\".join(fileinput.input()).split('\\n')])\r\n\r\ndef sumNN(d, idx):\r\n return np.sum(d[tuple([slice(i-1,i+2) for i in idx])])\r\n \r\ndef cubeConv(d, n_cycles=6):\r\n\r\n for _ in range(n_cycles):\r\n d = np.pad(d,1)\r\n d_new = d.copy()\r\n mg = [np.arange(1,i-1) for i in d.shape]\r\n ind = np.array([i.ravel() for i in np.meshgrid(*mg)]).T\r\n\r\n for idx in ind:\r\n idx = tuple(idx)\r\n nn = sumNN(d,idx)\r\n if (d[idx] == 1) & (nn < 3) | (nn > 4):\r\n d_new[idx] = 0\r\n elif (d[idx] == 0) & (nn == 3):\r\n d_new[idx] = 1\r\n\r\n d = d_new.copy()\r\n \r\n print(d.sum())\r\n\r\n# part 1\r\nd1 = np.pad(d.reshape(1,*d.shape),1)\r\ncubeConv(d1)\r\n\r\n# part 2\r\nd2 = np.pad(d.reshape(*([1]*2),*d.shape), 1)\r\ncubeConv(d2)\r\n"
] | [
[
"numpy.meshgrid",
"numpy.pad",
"numpy.arange"
]
] |
ghaggart/nPYc-Toolbox | [
"d0160b476581fbd695f3f5f0303048466ed95864"
] | [
"nPYc/batchAndROCorrection/_batchAndROCorrection.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport types\nimport numpy\nimport scipy\nimport warnings\nfrom scipy.signal import savgol_filter\nimport statsmodels.api as sm\nlowess = sm.nonparametric.lowess\nimport logging\nfrom scipy.signal import savgol_filter\nimport time\nimport sys\nimport copy\nfrom datetime import datetime, timedelta\nfrom ..objects._msDataset import MSDataset\nfrom ..enumerations import AssayRole, SampleType\n\n\ndef correctMSdataset(data, window=11, method='LOWESS', align='median', parallelise=True, excludeFailures=True, correctionSampleType=SampleType.StudyPool):\n\t\"\"\"\n\tConduct run-order correction and batch alignment on the :py:class:`~nPYc.objects.MSDataset` instance *data*, returning a new instance with corrected intensity values.\n\n\tSample are seperated into batches acording to the *'Correction Batch'* column in *data.sampleMetadata*.\n\n\t:param data: MSDataset object with measurements to be corrected\n\t:type data: MSDataset\n\t:param int window: When calculating trends, consider this many reference samples, centred on the current position\n\t:param str method: Correction method, one of 'LOWESS' (default), 'SavitzkyGolay' or None for no correction\n\t:param str align: Average calculation of batch and feature intensity for correction, one of 'median' (default) or 'mean'\n\t:param bool parallelise: If ``True``, use multiple cores\n\t:param bool excludeFailures: If ``True``, remove features where a correct fit could not be calculated from the dataset\n\t:param enum correctionSampleType: Which SampleType to use for the correction, default SampleType.StudyPool\n\t:return: Duplicate of *data*, with run-order correction applied\n\t:rtype: MSDataset\n\t\"\"\"\n\timport copy\n\n\t# Check inputs\n\tif not isinstance(data, MSDataset):\n\t\traise TypeError(\"data must be a MSDataset instance\")\n\tif not isinstance(window, int) & (window>0):\n\t\traise TypeError('window must be a positive integer')\n\tif method is not None:\n\t\tif not isinstance(method, str) & (method in {'LOWESS', 'SavitzkyGolay'}):\n\t\t\traise ValueError('method must be == LOWESS or SavitzkyGolay')\n\tif not isinstance(align, str) & (align in {'mean', 'median'}):\n\t\traise ValueError('align must be == mean or median')\n\tif not isinstance(parallelise, bool):\n\t\traise TypeError(\"parallelise must be a boolean\")\n\tif not isinstance(excludeFailures, bool):\n\t\traise TypeError(\"excludeFailures must be a boolean\")\n\tif not isinstance(correctionSampleType,SampleType):\n\t\traise TypeError(\"correctionType must be a SampleType\")\n\n\twith warnings.catch_warnings():\n\t\twarnings.simplefilter('ignore', category=RuntimeWarning)\n\n\t\tcorrectedP = _batchCorrectionHead(data.intensityData,\n\t\t\t\t\t\t\t\t\t data.sampleMetadata['Run Order'].values,\n\t\t\t\t\t\t\t\t\t (data.sampleMetadata['SampleType'].values == correctionSampleType) & (data.sampleMetadata['AssayRole'].values == AssayRole.PrecisionReference),\n\t\t\t\t\t\t\t\t\t data.sampleMetadata['Correction Batch'].values,\n\t\t\t\t\t\t\t\t\t window=window,\n\t\t\t\t\t\t\t\t\t method=method,\n\t\t\t\t\t\t\t\t\t align=align,\n\t\t\t\t\t\t\t\t\t parallelise=parallelise)\n\n\tcorrectedData = copy.deepcopy(data)\n\tcorrectedData.intensityData = correctedP[0]\n\tcorrectedData.fit = correctedP[1]\n\tcorrectedData.Attributes['Log'].append([datetime.now(),'Batch and run order correction applied'])\n\n\treturn correctedData\n\n\ndef _batchCorrectionHead(data, runOrder, referenceSamples, batchList, window=11, method='LOWESS', align='median', parallelise=True, savePlots=False):\n\t\"\"\"\n\tConduct run-order correction and batch alignment.\n\n\t:param data: Raw *n* × *m* numpy array of measurements to be corrected\n\t:type data: numpy.array\n\t:param runOrder: *n* item list of order of analysis\n\t:type runOrder: numpy.series\n\t:param referenceSamples: *n* element boolean array indicating reference samples to base the correction on\n\t:type referenceSamples: numpy.series\n\t:param batchList: *n* item list of correction batch, defines sample groupings into discrete batches for correction\n\t:type batchList: numpy.series\n\t:param int window: When calculating trends, use a consider this many reference samples, centred on the current position\n\t:param str method: Correction method, one of 'LOWESS' (default), 'SavitzkyGolay' or None for no correction\n\t:param str align: Average calculation of batch and feature intensity for correction, one of 'median' (default) or 'mean'\n\t\"\"\"\n\t# Validate inputs\n\tif not isinstance(data, numpy.ndarray):\n\t\traise TypeError('data must be a numpy array')\n\tif not isinstance(runOrder, numpy.ndarray):\n\t\traise TypeError('runOrder must be a numpy array')\n\tif not isinstance(referenceSamples, numpy.ndarray):\n\t\traise TypeError('referenceSamples must be a numpy array')\n\tif not isinstance(batchList, numpy.ndarray):\n\t\traise TypeError('batchList must be a numpy array')\n\tif not isinstance(window, int) & (window>0):\n\t\traise TypeError('window must be a positive integer')\n\tif method is not None:\n\t\tif not isinstance(method, str) & (method in {'LOWESS', 'SavitzkyGolay'}):\n\t\t\traise ValueError('method must be == LOWESS or SavitzkyGolay')\t\n\tif not isinstance(align, str) & (align in {'mean', 'median'}):\n\t\t\traise ValueError('align must be == mean or median')\n\tif not isinstance(parallelise, bool):\n\t\traise TypeError('parallelise must be True or False')\n\tif not isinstance(savePlots, bool):\n\t\traise TypeError('savePlots must be True or False')\n\n\t# Store paramaters in a dict to avoid arg lists going out of control\n\tparameters = dict()\n\tparameters['window'] = window\n\tparameters['method'] = method\n\tparameters['align'] = align\n\n\tif parallelise:\n\t\t# Set up multiprocessing enviroment\n\t\timport multiprocessing\n\t\t\n\t\t# Generate an index and set up pool\n\t\t# Use one less workers than CPU cores\n\t\tif multiprocessing.cpu_count()-1 <= 0:\n\t\t\tcores = 1\n\t\telse: \n\t\t\tcores = multiprocessing.cpu_count()-1\n\n\t\tpool = multiprocessing.Pool(processes=cores)\n\n\t\tinstances = range(0, cores)\n\n\t\t# Break features into no cores chunks\n\t\tfeatureIndex = _chunkMatrix(range(0, data.shape[1]), cores)\n\n\t\t# run _batchCorection\n\t\t##\n\t\t# At present pickle args and returns and reassemble after - possiblly share memory in the future.\n\t\t##\n\t\tresults2 = [pool.apply_async(_batchCorrection, args=(data, runOrder, referenceSamples, batchList, featureIndex, parameters, w)) for w in instances]\n\n\t\tresults2 = [p.get(None) for p in results2]\n\n\t\tresults = list()\n\t\t# Unpack results\n\t\tfor instanceOutput in results2:\n\t\t\tfor item in instanceOutput:\n\t\t\t\tresults.append(item)\n\n\t\t# Shut down the pool\n\t\tpool.close()\n\n\n\telse:\n\t\t# Just run it\n\t\t# Iterate over features in one batch and correct them\n\t\tresults = _batchCorrection(data, \n\t\t\t\t\t\t\t\t runOrder,\n\t\t\t\t\t\t\t\t referenceSamples,\n\t\t\t\t\t\t\t\t batchList,\n\t\t\t\t\t\t\t\t range(0, data.shape[1]), # All features\n\t\t\t\t\t\t\t\t parameters,\n\t\t\t\t\t\t\t\t 0)\n\n\tcorrectedData = numpy.empty_like(data)\n\tfits = numpy.empty_like(data)\n\n\t# Extract return values from tuple\n\tfor (w, feature, fit) in results:\n\t\tcorrectedData[:, w] = feature\n\t\tfits[:, w] = fit\n\n\treturn (correctedData, fits)\n\n\ndef _batchCorrection(data, runOrder, QCsamples, batchList, featureIndex, parameters, w):\n\t\"\"\"\n\tBreak the dataset into batches to be corrected together.\n\t\"\"\"\n\n\t# Check if we have a list of lists, or just one list:\n\tif isinstance(featureIndex[0], range):\n\t\tfeatureList = featureIndex[w]\n\telse:\n\t\tfeatureList = range(0, len(featureIndex))\n\n\t# add results to this list:\n\tresults = list()\n\t\n\t# Loop over all elements in featureList\n\tfor i in featureList:\n\n\t\t# Create a matrix to be used with `nonlocal` to store fits\n\t\ttry:\n\t\t\tfeature = copy.deepcopy(data[:,i])\n\t\texcept IndexError:\n\t\t\tfeature = copy.deepcopy(data)\n\t\tfit = numpy.empty_like(feature)\n\t\tfit.fill(numpy.nan)\n\t\t\t\n\t\t# Identify number of unique batches\n\t\tbatches = list(set(batchList))\n\n\t\t# Get overall average intensity\n\t\tif parameters['align'] == 'mean':\n\t\t\tfeatureAverage = numpy.mean(feature[QCsamples])\n\t\telif parameters['align'] == 'median':\n\t\t\tfeatureAverage = numpy.median(feature[QCsamples])\n\t\telse:\n\t\t\treturn numpy.zeros_like(data)\n\t\t\t\t\n\t\t# Iterate over batches.\n\t\tfor batch in batches:\n\t\t\t# Skip the NaN batch\n\t\t\tif not numpy.isnan(batch):\n\n\t\t\t\tbatchMask = numpy.squeeze(numpy.asarray(batchList == batch, 'bool'))\n\n\t\t\t\tif parameters['method'] == None:\n\t\t\t\t\t# Skip RO correction if method is none\n\t\t\t\t\tpass\n\t\t\t\telse:\n\n\t\t\t\t\t(feature[batchMask], fit[batchMask]) = runOrderCompensation(feature[batchMask],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\trunOrder[batchMask],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tQCsamples[batchMask],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tparameters)\n\n\t\t\t\t# Correct batch average to overall feature average\n\t\t\t\tif parameters['align'] == 'mean':\n\t\t\t\t\tbatchMean = numpy.mean(feature[batchMask & QCsamples])\n\t\t\t\telif parameters['align'] == 'median':\n\t\t\t\t\tbatchMean = numpy.median(feature[batchMask & QCsamples])\n\t\t\t\telse:\n\t\t\t\t\tbatchMean = numpy.nan_like(feature[batchMask])\n\n\t\t\t\tfeature[batchMask] = numpy.divide(feature[batchMask], batchMean)\n\t\t\t\tfeature[batchMask] = numpy.multiply(feature[batchMask], featureAverage)\n\t\t\t\t\n#\t\t\t\t# If negative data mark for exclusion (occurs when too many QCsamples have intensity==0)\n#\t\t\t\tif sum(feature[batchMask]<0) != 0: # CJS 240816\n#\t\t\t\t\texclude = exclude + '; negativeData=' + str(sum(feature[batchMask]<0))\n\n#\t\tresults.append((i, feature, fit, exclude)) # CJS 240816\n\t\tresults.append((i, feature, fit))\n\n\treturn results\n\n\ndef runOrderCompensation(data, runOrder, referenceSamples, parameters):\n\t\"\"\"\n\tModel and remove longitudinal effects.\n\t\"\"\"\n\n\t# Break the QCs out of the dataset\n\tQCdata = data[referenceSamples]\n\tQCrunorder = runOrder[referenceSamples]\n\n\t# Select model\n\t# Optimisation of window would happen here.\n\twindow = parameters['window']\n\tif parameters['method'] == 'LOWESS':\n\t\t(data, fit) = doLOESScorrection(QCdata, \n\t\t\t\t\t\t\t\t\t\tQCrunorder, \n\t\t\t\t\t\t\t\t\t\tdata, \n\t\t\t\t\t\t\t\t\t\trunOrder, \n\t\t\t\t\t\t\t\t\t\twindow=window)\n\telif parameters['method'] == 'SavitzkyGolay':\n\t\t(data, fit) = doSavitzkyGolayCorrection(QCdata, \n\t\t\t\t\t\t\t\t\t\t\t\tQCrunorder, \n\t\t\t\t\t\t\t\t\t\t\t\tdata, \n\t\t\t\t\t\t\t\t\t\t\t\trunOrder, \n\t\t\t\t\t\t\t\t\t\t\t\twindow=window)\n\n\t# Potentially exclude features with poor fits that retuned NaN &c here.\n\t\n\treturn (data, fit)\n\n\ndef doLOESScorrection(QCdata, QCrunorder, data, runorder, window=11):\n\t\"\"\"\n\tFit a LOWESS regression to the data.\n\t\"\"\"\n\t# Convert window number of samples to fraction of the dataset:\n\tnoSamples = QCrunorder.shape\n\n\tif noSamples == 0:\n\n\t\tfit = numpy.zeros(shape=runorder.shape)\n\t\tcorrected = data\n\n\telse:\n\t\tfrac = window / float(numpy.squeeze(noSamples))\n\t\tfrac = min([1, frac])\n\t\t# actually do the work\n\t\tz = lowess(QCdata, QCrunorder, frac=frac)\n\n\t\t# Divide by fit, then rescale to batch median\n\t\tfit = numpy.interp(runorder, z[:,0], z[:,1])\n\t\n\t\t# Fit can go negative if too many adjacent QC samples == 0; set any negative fit values to zero\n\t\tfit[fit < 0] = 0\n\n\t\tcorrected = numpy.divide(data, fit)\n\t\tcorrected = numpy.multiply(corrected, numpy.median(QCdata))\n\n\treturn (corrected, fit)\n\n\ndef doSavitzkyGolayCorrection(QCdata, QCrunorder, data, runorder, window=11, polyOrder=3):\n\t\"\"\"\n\tFit a Savitzky-Golay curve to the data.\n\t\"\"\"\n\t# Sort the array\n\tsortedRO = numpy.argsort(QCrunorder)\n\tsortedRO2 = QCrunorder[sortedRO]\n\tQCdataSorted = QCdata[sortedRO]\n\n\t# actually do the work\n\tz = savgol_filter(QCdataSorted, window, polyOrder)\n\n\tfit = numpy.interp(runorder, sortedRO2, z)\n\n\tcorrected = numpy.divide(data, fit)\n\tcorrected = numpy.multiply(corrected, numpy.median(QCdata))\n\n\treturn (corrected, fit)\n\n\ndef optimiseCorrection(feature, optimise):\n\t\"\"\"\n\tOptimise the window function my mimising the output of `optimise(data)`\n\t\"\"\"\n\tpass\n\n\n##\n# Adapted from http://stackoverflow.com/questions/2130016/splitting-a-list-of-arbitrary-size-into-only-roughly-n-equal-parts\n## \ndef _chunkMatrix(seq, num):\n\tavg = round(len(seq) / float(num))\n\tout = []\n\tlast = 0.0\n\n\tfor i in range(0, num-1):\n\t\tout.append(seq[int(last):int(last + avg)])\n\t\tlast += avg\n\tout.append(seq[int(last):max(seq)+1])\n\n\treturn out\n"
] | [
[
"numpy.divide",
"numpy.zeros_like",
"numpy.isnan",
"scipy.signal.savgol_filter",
"numpy.asarray",
"numpy.zeros",
"numpy.median",
"numpy.interp",
"numpy.mean",
"numpy.multiply",
"numpy.argsort",
"numpy.nan_like",
"numpy.squeeze",
"numpy.empty_like"
]
] |
Konrad337/MINST_nn_and_misc | [
"9db2295991eed5d91214ddd4912349c31d50286b"
] | [
"src/classic_nn.py"
] | [
"import numpy as np\nfrom mnist_file_tools import get_bytes, get_input_layer, get_label\nimport print_network\nfrom graphics import GraphWin\nimport matplotlib.pyplot as plt\nimport math\n\n\ndraw_iter = 1000\n\n\ndef softmax(x):\n exps = np.exp(x - x.max())\n return exps / np.sum(exps)\n# Copy paste softmax\n\n\ndef nonlin(x, deriv=False):\n if deriv is True:\n return (x)*(1 - (x))\n x = np.clip(x, -500, 500)\n return 1/(1+np.exp(-x))\n# sigmoid function\n\n\ndef update_line(hl, new_data):\n hl.set_xdata(np.append(hl.get_xdata(), new_data[0]))\n hl.set_ydata(np.append(hl.get_ydata(), new_data[1]))\n# Update graph\n\n\nclass simpleNN:\n\n def __init__(self):\n print('\\n')\n\n def nn(self, print_net=True, draw_cost_plot=True,\n draw_guess_plot=True, draw_synapses_plot=False, draw_synapses=False,\n layer_size=50, number_of_layers=3, learning_rate=0.00001):\n\n set = '../data/train-images'\n labels = '../data/train-labels'\n np.random.seed(1)\n train_set = open(set, 'rb')\n label_set = open(labels, 'rb')\n\n m_n = get_bytes(train_set)\n if m_n != 2051:\n raise Exception('Wrong magic number ' + str(m_n))\n m_n = get_bytes(label_set)\n if m_n != 2049:\n raise Exception('Wrong magic number ' + str(m_n))\n # Checking magic numbers\n\n set_size = get_bytes(train_set)\n get_bytes(label_set)\n rows = get_bytes(train_set)\n columns = get_bytes(train_set)\n # loading data\n\n synapses = 2*np.random.random((number_of_layers-1,\n layer_size + 1,\n layer_size)) - 1\n output_synapses = 2*np.random.random((layer_size + 1, 10)) - 1\n input_synapses = 2*np.random.random((rows * columns + 1, layer_size)) - 1\n layers = np.zeros((number_of_layers, layer_size + 1), dtype=np.float128)\n min_output_synapses = math.inf\n max_output_synapses = -math.inf\n # init\n\n if print_net:\n net_win = GraphWin('Neural Net', 1600, 1000, autoflush=False)\n if draw_cost_plot:\n if draw_guess_plot:\n plt.subplot(211)\n cost_plot, = plt.plot([], [], 'b-')\n plt.xlabel('iter')\n plt.ylabel('cost')\n plt.axis([0, set_size, 0, 10])\n if draw_guess_plot:\n if draw_cost_plot:\n plt.subplot(212)\n guess_plot, = plt.plot([], [], 'b-')\n plt.xlabel('iter')\n plt.ylabel('guesses percentage')\n plt.axis([0, set_size, 0, 100])\n if draw_synapses_plot:\n plt.figure()\n synapses_plot = []\n for i in range(output_synapses.size):\n synapses_plot.append(plt.plot([], [], '--')[0])\n plt.xlabel('iter')\n plt.ylabel('synapses_val')\n # Setting up plots\n\n guessed = 0\n cost_arr = np.zeros(draw_iter) + 10\n\n ###############################################################################\n\n for iter in range(set_size):\n\n input_layer = np.append(np.resize(\n get_input_layer(train_set, rows, columns),\n (rows*columns)) / 255, [1])\n # Loading data with normalization (data points in range 0-255)\n\n layers[0, :-1] = nonlin(np.dot(input_layer,\n input_synapses))\n layers[0, -1] = 1\n # Using RELU for first layer\n\n for i in range(1, number_of_layers):\n layers[i, :-1] = nonlin(np.dot(layers[i-1, :],\n synapses[i-1, :]))\n layers[i, -1] = 1\n # Forward prop - calculating layers with sigmoid fun\n\n output_layer = softmax(np.dot(layers[number_of_layers-1],\n output_synapses))\n\n output_correct = np.zeros(10, dtype=float)\n correct_number = get_label(label_set)\n\n output_correct[correct_number] = 1\n # Getting correct output for last layer\n\n if np.argmax(output_layer) == correct_number:\n guessed += 1\n # Calculating how many good guesses would we get\n\n # output_delta = -1 * output_correct * 1/output_layer \\\n # + (1 - output_correct) * 1/(1 - output_layer)\n # dE / dOout\n output_delta = output_layer - output_correct\n\n cost_arr[iter % draw_iter] = np.sum((output_delta)**2)\n # Cost for statistics\n\n output_w_influence = np.dot(layers[number_of_layers-1][:, None],\n (output_delta)[None, :])\n # Calculating delta and w_influence for last,\n # where w_influence is influence of weights on delta\n\n w_influence = np.zeros((number_of_layers, layer_size + 1, layer_size),\n dtype=np.float128)\n delta = np.zeros((number_of_layers, layer_size + 1), dtype=np.float128)\n delta[number_of_layers-1] = output_delta \\\n .dot(output_synapses.T)\n\n for i in range(number_of_layers-2, 0, -1):\n w_influence[i] = np.dot(layers[i][:, None], (delta[i+1, :-1])[None, :])\n delta[i] = delta[i+1, :-1].dot(synapses[i].T)\n if number_of_layers > 1:\n w_influence[0] = np.dot(layers[0][:, None], (delta[1, :-1])[None, :])\n delta[0] = delta[1, :-1].dot(synapses[0].T)\n input_w_influence = np.dot(input_layer[:, None], (delta[0, :-1])[None, :])\n # Calculating deltas and w_influences for others\n\n output_synapses -= output_w_influence * learning_rate\n input_synapses -= input_w_influence * learning_rate\n\n for i in range(0, number_of_layers - 1):\n synapses[i] -= w_influence[i] * learning_rate\n # Changing syanpses\n\n ###################\n\n if iter % draw_iter == 0:\n cost = np.sum(cost_arr) / draw_iter\n print(\"Iteration {0}\\tAverage cost: {1:0.1f}, Guessed correct: {2}\"\n .format(iter, cost, guessed))\n if print_net:\n print_network.print_net(layers, (synapses),\n output_layer, (output_synapses),\n net_win, np.argmax(output_correct),\n cost, draw_synapses)\n if False:\n print_network.print_vizualized_net(input_layer, (input_synapses), layers, (synapses),\n output_layer, (output_synapses),\n net_win, np.argmax(output_correct),\n cost, columns, rows)\n if draw_cost_plot:\n update_line(cost_plot, [iter, cost])\n if draw_guess_plot:\n update_line(guess_plot, [iter, guessed/draw_iter*100])\n if draw_synapses_plot:\n min_output_synapses = min(np.amin(output_synapses),\n min_output_synapses)\n max_output_synapses = max(np.amax(output_synapses),\n max_output_synapses)\n plt.axis([0, set_size, min_output_synapses, max_output_synapses])\n for i in range(output_synapses.size):\n update_line(synapses_plot[i],\n [iter,\n np.resize(output_synapses,\n output_synapses.size)[i]])\n if draw_cost_plot or draw_guess_plot or draw_synapses_plot:\n plt.draw()\n plt.pause(0.001)\n guessed = 0\n\n self.layers = layers\n self.input_synapses = input_synapses\n self.synapses = synapses\n self.output_synapses = output_synapses\n self.number_of_layers = number_of_layers\n\n ###############################################################################\n #\n #\n #\n #\n #\n #\n\n\n def check_neural(self):\n\n layers = self.layers\n input_synapses = self.input_synapses\n synapses = self.synapses\n output_synapses = self.output_synapses\n number_of_layers = self.number_of_layers\n\n test_data = '../data/images'\n test_labels = '../data/labels'\n\n train_set = open(test_data, 'rb')\n label_set = open(test_labels, 'rb')\n\n m_n = get_bytes(train_set)\n if m_n != 2051:\n raise Exception('Wrong magic number ' + str(m_n))\n m_n = get_bytes(label_set)\n if m_n != 2049:\n raise Exception('Wrong magic number ' + str(m_n))\n\n set_size = get_bytes(train_set)\n get_bytes(label_set)\n rows = get_bytes(train_set)\n columns = get_bytes(train_set)\n\n guessed = 0\n for iter in range(set_size):\n input_layer = np.append(np.resize(\n get_input_layer(train_set, rows, columns),\n (rows*columns)), [1]) / 255\n # Loading data with normalization (data points in range 0-255)\n\n layers[0, :-1] = nonlin(np.dot(input_layer,\n input_synapses))\n layers[0, -1] = 1\n # Using RELU for first layer\n\n for i in range(1, number_of_layers):\n layers[i, :-1] = nonlin(np.dot(layers[i-1, :],\n synapses[i-1, :]))\n layers[i, -1] = 1\n # Forward prop - calculating layers with sigmoid fun\n\n output_layer = softmax(np.dot(layers[number_of_layers-1],\n output_synapses))\n\n output_correct = np.zeros(10, dtype=float)\n correct_number = get_label(label_set)\n\n output_correct[correct_number] = 1\n # Getting correct output for last layer\n\n if np.argmax(output_layer) == correct_number:\n guessed += 1\n # Calculating how many good guesses would we get\n\n if iter % 1000 == 0 and iter > 0:\n print(\"Iteration {0}\\t, Guessed correct: {1}/{2}\"\n .format(iter, guessed, set_size))\n print(\"Guessed with percentage \" + str(guessed/set_size*100) + \"%\\n\")\n"
] | [
[
"numpy.dot",
"numpy.exp",
"numpy.resize",
"matplotlib.pyplot.draw",
"numpy.random.random",
"numpy.argmax",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplot",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.amax",
"numpy.amin",
"numpy.clip",
"numpy.random.seed",
"numpy.sum",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.ylabel"
]
] |
avisherwood/DovPandaDev | [
"fd8f30b6dd243bdc1b3a4f3dbeab2b9f52c4f553"
] | [
"dovpanda/core.py"
] | [
"import os\r\n\r\nimport numpy as np\r\nfrom dateutil import parser\r\n\r\nfrom dovpanda import base, config\r\nfrom dovpanda.base import Ledger\r\n\r\nledger = Ledger()\r\n\r\n\r\[email protected]_hint('DataFrame.iterrows')\r\ndef iterrows_is_bad(arguments):\r\n ledger.tell(\"iterrows is not recommended, and in the majority of cases will have better alternatives\")\r\n\r\n\r\[email protected]_hint('DataFrame.groupby')\r\ndef time_grouping(arguments):\r\n by = base.setify(arguments.get('by'))\r\n time_cols = set(config.TIME_COLUMNS).intersection(by)\r\n num_cols = len(time_cols)\r\n cols = ', '.join([str(col) for col in time_cols])\r\n if num_cols <= 0:\r\n return\r\n elif num_cols == 1:\r\n first_line = f\"a column\"\r\n else:\r\n first_line = f\"columns\"\r\n\r\n ledger.tell(f\"Seems like you are grouping by {first_line} named <strong>{cols}</strong>.<br>\"\r\n f\"consider setting the time column as \"\r\n f\"index and then use df.resample('time abbrevations'), for example:<br>\"\r\n f\"<code>df.set_index('date').resample('h')</code>\")\r\n\r\n\r\[email protected]_hint('concat', hook_type='post')\r\ndef duplicate_index_after_concat(res, arguments):\r\n if res.index.nunique() != len(res.index):\r\n ledger.tell('After concatenation you have duplicated indexes values - pay attention')\r\n if res.columns.nunique() != len(res.columns):\r\n ledger.tell('After concatenation you have duplicated column names - pay attention')\r\n\r\n\r\[email protected]_hint('concat')\r\ndef concat_single_column(arguments):\r\n objs = arguments.get('objs')\r\n axis = arguments.get('axis')\r\n cols = {df.shape[1] for df in objs}\r\n if axis == 1 and 1 in cols:\r\n ledger.tell(\r\n 'One of the dataframes you are concatenating is with a single column, '\r\n 'consider using `df.assign()` or `df.insert()`')\r\n\r\n\r\[email protected]_hint('concat')\r\ndef wrong_concat_axis(arguments):\r\n objs = arguments.get('objs')\r\n axis = arguments.get('axis')\r\n rows = {df.shape[0] for df in objs}\r\n cols = {df.shape[1] for df in objs}\r\n col_names = set.union(*[set(df.columns) for df in objs])\r\n same_cols = (len(cols) == 1) and (len(col_names) == list(cols)[0])\r\n same_rows = (len(rows) == 1)\r\n axis_translation = {0: 'vertically', 1: 'horizontally'}\r\n if same_cols and not same_rows:\r\n if axis == 1:\r\n ledger.tell(\"All dataframes have the same columns, which hints for concat on axis 0.\"\r\n \"You specified <code>axis=1</code> which may result in an unwanted behaviour\")\r\n elif same_rows and not same_cols:\r\n if axis == 0:\r\n ledger.tell(\"All dataframes have same number of rows, which hints for concat on axis 1.\"\r\n \"You specified <code>axis=0</code> which may result in an unwanted behaviour\")\r\n\r\n elif same_rows and same_rows:\r\n ledger.tell(\"All dataframes have the same columns and same number of rows. \"\r\n f\"Pay attention, your axis is {axis} which concatenates {axis_translation[axis]}\")\r\n\r\n\r\[email protected]_hint('DataFrame.__eq__')\r\ndef df_check_equality(arguments):\r\n print(arguments)\r\n if isinstance(arguments.get('self'), type(arguments.get('other'))):\r\n ledger.tell(f'Calling df1 == df2 compares the objects element-wise. '\r\n 'If you need a boolean condition, try df1.equals(df2)')\r\n\r\n\r\[email protected]_hint('Series.__eq__')\r\ndef series_check_equality(arguments):\r\n if isinstance(arguments.get('self'), type(arguments.get('other'))):\r\n ledger.tell(f'Calling series1 == series2 compares the objects element-wise. '\r\n 'If you need a boolean condition, try series1.equals(series2)')\r\n\r\n\r\[email protected]_hint('read_csv', 'post')\r\ndef csv_index(res, arguments):\r\n filename = arguments.get('filepath_or_buffer')\r\n if type(filename) is str:\r\n filename = \"'\" + filename + \"'\"\r\n else:\r\n filename = 'file'\r\n if 'Unnamed: 0' in res.columns:\r\n if arguments.get('index_col') is None:\r\n ledger.tell('Your left most column is unnamed. This suggets it might be the index column, try: '\r\n f'<code>pd.read_csv({filename}, index_col=0)</code>')\r\n\r\n\r\[email protected]_hint('read_csv', 'pre')\r\ndef check_csv_size(arguments):\r\n filename = arguments.get('filepath_or_buffer')\r\n if not os.path.exists(filename):\r\n return\r\n if os.path.getsize(filename) > config.MAX_CSV_SIZE:\r\n ledger.tell('File size is very large and may take time to load. '\r\n 'If you would like to avoid format issues before the complete file loads, '\r\n f'try: <code>pd.read_csv({filename}, nrows=5)</code> to check schema is as expected.')\r\n\r\n\r\[email protected]_hint(config.READ_METHODS, 'post')\r\ndef suggest_category_dtype(res, arguments):\r\n rows = res.shape[0]\r\n threshold = int(rows / config.CATEGORY_SHARE_THRESHOLD) + 1\r\n col_uniques = res.select_dtypes('object').nunique()\r\n if col_uniques.empty:\r\n return\r\n else:\r\n obj_type = (col_uniques\r\n .loc[lambda x: x <= threshold]\r\n .to_dict())\r\n for col, uniques in obj_type.items():\r\n if uniques == 2:\r\n dtype = 'boolean'\r\n arbitrary = res.loc[:, col].at[0]\r\n code = f\"df['{col}'] = (df['{col}'] == '{arbitrary}')\"\r\n else:\r\n dtype = 'categorical'\r\n code = f\"df['{col}'] = df['{col}'].astype('category')\"\r\n message = (f\"Dataframe has {rows} rows. Column <code>{col}</code> has only {uniques} values \"\r\n f\"which suggests it's a {dtype} feature.<br>\"\r\n f\"After df is created, Consider converting it to {dtype} by using \"\r\n f\"<code>{code}</code>\")\r\n ledger.tell(message)\r\n\r\n\r\ndef is_date_time_format(arr):\r\n \"\"\"\r\n Check if a given array is a in a datetime format\r\n Parameters\r\n ----------\r\n arr\r\n\r\n Returns\r\n -------\r\n\r\n \"\"\"\r\n try:\r\n list(map(parser.parse, arr))\r\n except (ValueError, TypeError):\r\n return False\r\n return True\r\n\r\n\r\ndef tell_time_dtype(col_name, arr):\r\n if not np.issubdtype(arr.dtype, np.datetime64):\r\n # Tthe content is in a datetime format but not in datetime type\r\n ledger.tell(f\"columns '{col_name}' looks like a datetime but the type is '{arr.dtype}' \"\r\n f\"Consider using<br>\"\r\n f\"<code>df['{col_name}'] = pd.to_datetime(df.{col_name})</code>\")\r\n\r\n\r\[email protected]_hint('DataFrame.insert')\r\ndef data_in_date_format_insert(arguments):\r\n column_name = arguments.get('column')\r\n value = arguments.get('value')\r\n value_array = np.asarray(value)\r\n if is_date_time_format(value_array):\r\n tell_time_dtype(column_name, value_array)\r\n\r\n\r\[email protected]_hint(config.READ_METHODS, 'post')\r\ndef data_in_date_format_read(res, arguments):\r\n for col in res.columns:\r\n value_array = np.asarray(res[col])\r\n if is_date_time_format(value_array):\r\n tell_time_dtype(col, value_array)\r\n\r\n\r\[email protected]_hint(config.GET_ITEM, 'post')\r\ndef suggest_at_iat(res, arguments):\r\n self = arguments.get('self')\r\n shp = res.shape\r\n if res.ndim < 1: # Sometimes specific slicing will return value\r\n return\r\n i = 'i' if isinstance(self, type(res.iloc)) else '' # Help the user with at and iat\r\n if all(dim == 1 for dim in shp):\r\n obj = config.ndim_to_obj.get(res.ndim, 'object')\r\n ledger.tell(f\"The shape of the returned {obj} from slicing is {shp} \"\r\n f\"Which suggests you are interested in the value and not \"\r\n f\"in a new {obj}. Try instead: <br>\"\r\n f\"<code>{obj}.{i}at[row, col]</code>\")\r\n\r\n\r\[email protected]_hint(['DataFrame.append', 'concat'], stop_nudge=4)\r\ndef dont_append_with_loop(arguments):\r\n if ledger.similar >= 4:\r\n ledger.tell('dont append or concat dfs iteratively. '\r\n 'it is a better practice to first create a list of dfs. '\r\n 'and then <code>pd.concat(list_of_dfs)</code> in one go')\r\n"
] | [
[
"numpy.issubdtype",
"numpy.asarray"
]
] |
nkorojoseph/Project-Trust | [
"3064c7672a2e82da6879909048c8f639eab23ae4"
] | [
"Dataset/paper-20210313T183737Z-001/paper/projectTrust.py"
] | [
"\n# coding: utf-8\n\n# In[11]:\n\nimport nltk, string\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport glob\nimport csv\nimport os\nimport ntpath\nimport networkx as nx\nimport pandas as pd\nfrom collections import Counter\nimport matplotlib.pyplot as plt\nfrom wordcloud import WordCloud, STOPWORDS, ImageColorGenerator\nimport collections\nfrom spotlight import annotate\nfrom functools import partial\nfrom itertools import islice\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport numpy\n\n#A function for Slicing a dictionary\n'''n is the slice rate \niterable -> the dictionary to be sliced. \nconvert the dictionary to a list and use islice function to slice it\n'''\ndef dicslice(n, iterable):\n return list(islice(iterable, n))\n\n#nltk.download('punkt') # if necessary...\n''' a stemmer which would be used to reduce \neach word to its root equivalence is built.\nthis will help reduce the noise in the text document.\nthis is also built alongside punctuation removal.\n'''\n\n'''Function that creates tokens to use.'''\ndef stem_tokens(tokens):\n stemmer = nltk.stem.porter.PorterStemmer()\n return [stemmer.stem(item) for item in tokens]\n\n'''remove punctuation, lowercase, stem'''\ndef normalize(text):\n remove_punctuation_map = dict((ord(char), None) for char in string.punctuation)\n return stem_tokens(nltk.word_tokenize(text.lower().translate(remove_punctuation_map)))\n\ndef cosine_sim(text1, text2):\n vectorizer = TfidfVectorizer(tokenizer=normalize, stop_words=STOPWORDS) \n tfidf = vectorizer.fit_transform([text1, text2])\n return ((tfidf * tfidf.T).A)[0,1]\ndef ProjectTrust(targetP):\n \n targetp = ntpath.basename(targetP)\n testfile = targetp\n\n newProjectReadme = []\n with open(r\"githubtestreadme/\"+testfile,'r') as newPorject:\n dataNew = newPorject.read()\n newProjectReadme.append(dataNew.replace(\"\\n\",\" \"))\n\n '''for both the new and old readme files, append the name of the files to a \n list containing the readme texts for each document'''\n path = \"githubreadmefiles/*\"\n oldProjectReadme = []\n readmewithname = []\n for fname in glob.glob(path):\n with open(fname, 'r') as infile:\n data = infile.read()\n oldProjectReadme.append(data.replace(\"\\n\",\"\"))\n readmewithname.append((fname,data.replace(\"\\n\",\"\")))\n\n\n '''put the path of each of the read me file into first_elts'''\n first_elts = [x[0] for x in readmewithname]\n #print(first_elts)\n '''put the readme file in a list called second_elts'''\n #project list\n second_elts = [x[1] for x in readmewithname]\n \n #compute cosine similarity of new and old readme files. this is done by \n #comparing each of the old readme with the new readme\n \n similarityvalues = []\n for i in range(len(second_elts)):\n sim = cosine_sim(second_elts[i],newProjectReadme[0])\n similarityvalues.append(sim)\n print(sim)\n #print(\"Similarity between old and new Readme %d = %f\"%(i,sim))\n\n #compute the maximum of the similarity value and get the path.\n maxi = 0\n for i in range(len(similarityvalues)):\n if similarityvalues[i] > maxi:\n maxi = similarityvalues[i]\n maxIndex = i\n\n #ntpath.basename extracts the name of a file from a path\n targetp = ntpath.basename(first_elts[maxIndex])\n\n '''Graph of fork activities on github'''\n \n F= nx.Graph()\n fork_headers = ['user_id','projectid']\n forks = pd.read_csv('trialdata/fork.csv',header=None,skiprows=1, names=fork_headers)\n fusers = forks.user_id.tolist()\n fproj = forks.projectid.tolist()\n #print(forks.head())\n F.add_edges_from(forks.values)\n\n '''Graph of watch or stargazers activities on github'''\n W= nx.Graph()\n watch_headers = ['user_id','projectid']\n watchers = pd.read_csv('trialdata/watchers.csv',header=None,skiprows=1, names=watch_headers)\n wusers = watchers.user_id.tolist()\n wproj = watchers.projectid.tolist()\n #print(watchers.head())\n W.add_edges_from(watchers.values)\n\n '''Graph of pullrequests activities on github'''\n P= nx.Graph()\n pullrequest_headers = ['user_id','projectid']\n pullrequest = pd.read_csv('trialdata/pullrequest.csv',header=None,skiprows=1, names=pullrequest_headers)\n pusers = pullrequest.user_id.tolist()\n pproj = pullrequest.projectid.tolist()\n #print(pullrequest.head())\n P.add_edges_from(pullrequest.values)\n\n '''Graph of commits activities on github'''\n C= nx.Graph()\n commit_headers = ['user_id','projectid']\n commits = pd.read_csv('trialdata/commits.csv',header=None,skiprows=1, names=commit_headers)\n cusers = commits.user_id.tolist()\n cproj = commits.projectid.tolist()\n #print(commits.head())\n C.add_edges_from(commits.values)\n\n '''compute the total users and total projects in the four graphs. using set excludes\n repeatition of any user or project\n '''\n totalusers = set(fusers + wusers + cusers + pusers)\n totalprojec = set(fproj + wproj + cproj + pproj)\n c = 0\n #generate a new directed graph where there is a common edge between the existing four graphs.\n '''\n This new graph is the graph of all developers that meets up with the four criteria\n '''\n G = nx.DiGraph()\n for i in totalusers:\n for j in totalprojec:\n if F.has_edge(i,j) and W.has_edge(i,j) and P.has_edge(i,j) and C.has_edge(i,j):\n #print('user %s found in (%s, %s)'% (i,i,j))\n G.add_edge(i,j)\n c = c + 1\n\n occur_users = [u[0] for u in G.edges()]\n testusers = occur_users\n occur_projects = [u[1] for u in G.edges()]\n #count the number project of each user occurence in the trust graph \n usercount = Counter(occur_users)\n #print('usercount',\\n')\n\n nx.draw(F,with_labels=True,node_color='g')\n plt.savefig('fork graph')\n #plt.show()\n\n nx.draw(W,with_labels=True,node_color='b')\n plt.savefig('watch graph')\n #plt.show()\n\n nx.draw(G,with_labels = True,node_color='y')\n plt.savefig('Trust Graph')\n plt.show()\n #max(occur_users, key=occur_users.count)\n\n #print( 'Users with the four characteristics to a project ',G.edges() )\n #Generic Recommendation. without consideration of experience level \n Recommended_users = [u[0] for u in G.in_edges(targetp)]\n\n '''\n #normalize the experience level by dividing the number of projects a developer has worked \n on by the total projects\n occur_projects = projects with developers that met up with the four criteria above\n '''\n j = dict(usercount)\n\n normUsercount = {}\n for key,value in j.items():\n normUsercount[key] = value/len(occur_projects)\n print(normUsercount)\n\n '''Gets only developers who are involved in the most similar project we have identified earlier\n u[0] = userid\n u[1] = identified project\n QUsers = Qualified developers involved with identified similar project\n '''\n dicOfQualifiedUsers = {}\n for u in G.in_edges(targetp):\n dicOfQualifiedUsers[u[0]] = u[1]\n \n QUsers = [] \n for key,value in dicOfQualifiedUsers.items():\n QUsers.append(key)\n\n '''\n # getting the experience level of only qualified users in the normUserCount experience level above.\n key = userid in normUsercount(which is experience level of all users in the new trust graph G)\n value = experience level value.\n ''' \n\n QUserExp = {}\n print('user',' value')\n for user in QUsers:\n for key,value in normUsercount.items():\n if user == key:\n print(key,' ',value,'\\n')\n QUserExp[user] = value\n\n #appending the concepts of the old readme file that selected users have worked with to the user,\n #getting project the Qusers were trusted partakers of \n #for x in occur_users,occur_projects:\n\n '''\n extract all other projects the Quser or Trusted User has participated in\n zip(occur_users,occur_projects) brings list together and makes them a tuple of user-project.\n the code below simply converts the user project tuple to a user-project dictionary.\n then checks to see if a user in the occured users from graph G is a qualified developer then assign the involved\n projet as key, value pair repespectively\n '''\n QUserProj = {}\n for user, project in zip(occur_users, occur_projects):\n if user in QUserExp:\n QUserProj[user] = project\n '''\n read in the programming languages of projects.\n #convert the projects and programming language into 'dicti'\n key = projects\n value = list of programming languages used in a project\n l2[1:]= takes off all the keys in the programming language (prolang) dictionary which is project names\n and returns only the list of programming languages involoved in it.\n 'p'+l2[0] = appends a p to the id of each project.\n '''\n prolang = []\n with open(r\"githubProjLang/languages.csv\",'r') as Prolang:\n reader = csv.reader(Prolang)\n for row in reader:\n prolang.append(row)\n dicti = {}\n for l2 in prolang:\n dicti['p'+l2[0]] = l2[1:]\n\n '''\n generate a programming language profile of each qualified developer in quserproj.\n QUsersprofilecount counts the number of programming languages a developer has been involved in.\n '''\n QUsersProfile = {}\n for key,value in QUserProj.items():\n for key1,value1 in dicti.items():\n if value==key1:\n QUsersProfile[key] = value1\n QUsersProfileCount = {}\n for key,value in QUsersProfile.items():\n QUsersProfileCount[key] = len(value)\n\n '''\n read in the programming languages used in the new project which we are recommending developers\n newdicti is a dictionary containing the new project name as key and the list of programming languages as value\n '''\n newprolang = []\n with open(\"newprojectreadme/pnewlang.csv\",'r') as NewProlang:\n reader = csv.reader(NewProlang)\n for row in reader:\n newprolang.append(row)\n newdicti = {}\n for l2 in newprolang:\n newdicti[l2[0]] = l2[1:]\n print(newprolang,newdicti)\n\n '''\n evaluate the number of languages each user has that is common to the new project's programming languages.\n '''\n langsim = {}\n for key,value in QUsersProfile.items():\n lang_sim = set(value).intersection(set(newdicti['pnew']))\n langsim[key] = len(lang_sim)/len(set(newdicti['pnew']))\n '''\n compute total relevance or trust level a project has for each qualified developer by summing up languages\n similarity score and experience level\n '''\n totalRel = {}\n for key,value in QUserExp.items():\n for k1,v2 in langsim.items():\n if key in langsim and k1 in QUserExp:\n totalRel[key] = QUserExp[key] + langsim[key] \n\n '''\n Rank the users based on their total relevance level or projectTrust level.\n top n = 2, developers are recommended. by slicing down the ranked recommendation list\n '''\n #sorted(totalRel,key=totalRel.get, reverse=True)\n Recommendation_list = {key: rank for rank, key in enumerate( sorted(totalRel, key=totalRel.get, reverse=True), 1)}\n n_recomm = dicslice(2,Recommendation_list.items())\n predicted_users = [x for x in Recommendation_list]\n\n\n '''\n Test for accuracy which is the correctly recommended users from the testusers list in the first notebook\n '''\n count = 0\n real_predicted = []\n for user in predicted_users:\n if user in testusers:\n count = count + 1\n real_predicted.append(user)\n acurracy = count/len(set(testusers))\n\n print('Maximum similarity value is %f index %d'%(maxi,maxIndex))\n print('Similar project is ',ntpath.basename(first_elts[maxIndex]))\n print('total relevance level',totalRel)\n print('Recommendation List =',Recommendation_list)\n print('Top two recommended developers for the new project',n_recomm)\n print('Recommended developers = ',predicted_users)\n print('testusers',testusers)\n print('realpredicted',real_predicted)\n print('Recommendation Acurracy = ',acurracy)\n\n\n# In[12]:\n\nProjectTrust('p52')\n\n\n# In[ ]:\n\n\n\n"
] | [
[
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"matplotlib.pyplot.show",
"sklearn.feature_extraction.text.TfidfVectorizer"
]
] |
g-pichler/flower | [
"e455cdc3678921ece960287a0e1ae5123500c948"
] | [
"src/py/flwr/server/strategy/fast_and_slow.py"
] | [
"# Copyright 2020 Adap GmbH. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Federating: Fast and Slow.\"\"\"\n\n\nimport math\nimport statistics\nfrom logging import DEBUG, INFO\nfrom typing import Callable, Dict, List, Optional, Tuple, cast\n\nimport numpy as np\n\nfrom flwr.common import (\n EvaluateRes,\n FitIns,\n FitRes,\n Parameters,\n Scalar,\n Weights,\n parameters_to_weights,\n weights_to_parameters,\n)\nfrom flwr.common.logger import log\nfrom flwr.server.client_manager import ClientManager\nfrom flwr.server.client_proxy import ClientProxy\n\nfrom .aggregate import aggregate, weighted_loss_avg\nfrom .fedavg import FedAvg\n\nE = 0.001\nE_TIMEOUT = 0.0001\nWAIT_TIMEOUT = 600\n\n\nclass FastAndSlow(FedAvg):\n \"\"\"Strategy implementation which alternates between fast and slow rounds.\n\n :meta private:\n \"\"\"\n\n # pylint: disable=too-many-arguments,too-many-instance-attributes,too-many-locals\n def __init__(\n self,\n fraction_fit: float = 0.1,\n fraction_eval: float = 0.1,\n min_fit_clients: int = 1,\n min_eval_clients: int = 1,\n min_available_clients: int = 1,\n eval_fn: Optional[\n Callable[[Weights], Optional[Tuple[float, Dict[str, Scalar]]]]\n ] = None,\n min_completion_rate_fit: float = 0.5,\n min_completion_rate_evaluate: float = 0.5,\n on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None,\n on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None,\n importance_sampling: bool = True,\n dynamic_timeout: bool = True,\n dynamic_timeout_percentile: float = 0.8,\n alternating_timeout: bool = False,\n r_fast: int = 1,\n r_slow: int = 1,\n t_fast: int = 10,\n t_slow: int = 10,\n initial_parameters: Optional[Parameters] = None,\n ) -> None:\n super().__init__(\n fraction_fit=fraction_fit,\n fraction_eval=fraction_eval,\n min_fit_clients=min_fit_clients,\n min_eval_clients=min_eval_clients,\n min_available_clients=min_available_clients,\n eval_fn=eval_fn,\n on_fit_config_fn=on_fit_config_fn,\n on_evaluate_config_fn=on_evaluate_config_fn,\n initial_parameters=initial_parameters,\n )\n self.min_completion_rate_fit = min_completion_rate_fit\n self.min_completion_rate_evaluate = min_completion_rate_evaluate\n self.importance_sampling = importance_sampling\n self.dynamic_timeout = dynamic_timeout\n self.dynamic_timeout_percentile = dynamic_timeout_percentile\n self.alternating_timeout = alternating_timeout\n self.r_fast = r_fast\n self.r_slow = r_slow\n self.t_fast = t_fast\n self.t_slow = t_slow\n self.contributions: Dict[str, List[Tuple[int, int, int]]] = {}\n self.durations: List[Tuple[str, float, int, int]] = []\n\n def __repr__(self) -> str:\n rep = f\"FastAndSlow(importance_sampling={self.importance_sampling}, \"\n rep += f\"dynamic_timeout={self.dynamic_timeout}, \"\n rep += f\"dynamic_timeout_percentile={self.dynamic_timeout_percentile}, \"\n rep += f\"alternating_timeout={self.alternating_timeout}, \"\n rep += f\"r_fast={self.r_fast}, r_slow={self.r_slow}, \"\n rep += f\"t_fast={self.t_fast}, t_slow={self.t_slow})\"\n return rep\n\n # pylint: disable=too-many-locals\n def configure_fit(\n self, rnd: int, parameters: Parameters, client_manager: ClientManager\n ) -> List[Tuple[ClientProxy, FitIns]]:\n \"\"\"Configure the next round of training.\"\"\"\n\n # Block until `min_num_clients` are available\n sample_size, min_num_clients = self.num_fit_clients(\n client_manager.num_available()\n )\n success = client_manager.wait_for(\n num_clients=min_num_clients, timeout=WAIT_TIMEOUT\n )\n if not success:\n # Do not continue if not enough clients are available\n log(\n INFO,\n \"FedFS: not enough clients available after timeout %s\",\n WAIT_TIMEOUT,\n )\n return []\n\n # Sample clients\n msg = \"FedFS round %s, sample %s clients (based on all previous contributions)\"\n if self.alternating_timeout:\n log(\n DEBUG,\n msg,\n str(rnd),\n str(sample_size),\n )\n clients = self._contribution_based_sampling(\n sample_size=sample_size, client_manager=client_manager\n )\n elif self.importance_sampling:\n if rnd == 1:\n # Sample with 1/k in the first round\n log(\n DEBUG,\n \"FedFS round %s, sample %s clients with 1/k\",\n str(rnd),\n str(sample_size),\n )\n clients = self._one_over_k_sampling(\n sample_size=sample_size, client_manager=client_manager\n )\n else:\n fast_round = is_fast_round(\n rnd - 1, r_fast=self.r_fast, r_slow=self.r_slow\n )\n log(\n DEBUG,\n \"FedFS round %s, sample %s clients, fast_round %s\",\n str(rnd),\n str(sample_size),\n str(fast_round),\n )\n clients = self._fs_based_sampling(\n sample_size=sample_size,\n client_manager=client_manager,\n fast_round=fast_round,\n )\n else:\n clients = self._one_over_k_sampling(\n sample_size=sample_size, client_manager=client_manager\n )\n\n # Prepare parameters and config\n config = {}\n if self.on_fit_config_fn is not None:\n # Use custom fit config function if provided\n config = self.on_fit_config_fn(rnd)\n\n # Set timeout for this round\n if self.dynamic_timeout:\n if self.durations:\n candidates = timeout_candidates(\n durations=self.durations,\n max_timeout=self.t_slow,\n )\n timeout = next_timeout(\n candidates=candidates,\n percentile=self.dynamic_timeout_percentile,\n )\n config[\"timeout\"] = str(timeout)\n else:\n # Initial round has not past durations, use max_timeout\n config[\"timeout\"] = str(self.t_slow)\n elif self.alternating_timeout:\n use_fast_timeout = is_fast_round(rnd - 1, self.r_fast, self.r_slow)\n config[\"timeout\"] = str(self.t_fast if use_fast_timeout else self.t_slow)\n else:\n config[\"timeout\"] = str(self.t_slow)\n\n # Fit instructions\n fit_ins = FitIns(parameters, config)\n\n # Return client/config pairs\n return [(client, fit_ins) for client in clients]\n\n def _one_over_k_sampling(\n self, sample_size: int, client_manager: ClientManager\n ) -> List[ClientProxy]:\n \"\"\"Sample clients with probability 1/k.\"\"\"\n sample_size, min_num_clients = self.num_fit_clients(\n client_manager.num_available()\n )\n clients = client_manager.sample(\n num_clients=sample_size, min_num_clients=min_num_clients\n )\n return clients\n\n def _contribution_based_sampling(\n self, sample_size: int, client_manager: ClientManager\n ) -> List[ClientProxy]:\n \"\"\"Sample clients depending on their past contributions.\"\"\"\n # Get all clients and gather their contributions\n all_clients: Dict[str, ClientProxy] = client_manager.all()\n cid_idx: Dict[int, str] = {}\n raw: List[float] = []\n for idx, (cid, _) in enumerate(all_clients.items()):\n cid_idx[idx] = cid\n penalty = 0.0\n if cid in self.contributions:\n contribs: List[Tuple[int, int, int]] = self.contributions[cid]\n penalty = statistics.mean([c / m for _, c, m in contribs])\n # `p` should be:\n # - High for clients which have never been picked before\n # - Medium for clients which have contributed,\n # but not used their entire budget\n # - Low (but not 0) for clients which have been picked and used their budget\n raw.append(1.1 - penalty)\n\n # Sample clients\n return normalize_and_sample(\n all_clients=all_clients,\n cid_idx=cid_idx,\n raw=np.array(raw),\n sample_size=sample_size,\n use_softmax=False,\n )\n\n def _fs_based_sampling(\n self, sample_size: int, client_manager: ClientManager, fast_round: bool\n ) -> List[ClientProxy]:\n \"\"\"Sample clients with 1/k * c/m in fast rounds and 1 - c/m in slow rounds.\"\"\"\n all_clients: Dict[str, ClientProxy] = client_manager.all()\n k = len(all_clients)\n cid_idx: Dict[int, str] = {}\n raw: List[float] = []\n for idx, (cid, _) in enumerate(all_clients.items()):\n cid_idx[idx] = cid\n\n if cid in self.contributions:\n # Previously selected clients\n contribs: List[Tuple[int, int, int]] = self.contributions[cid]\n\n # pylint: disable=invalid-name\n _, c, m = contribs[-1]\n c_over_m = c / m\n # pylint: enable-msg=invalid-name\n\n if fast_round:\n importance = (1 / k) * c_over_m + E\n else:\n importance = 1 - c_over_m + E\n else:\n # Previously unselected clients\n if fast_round:\n importance = 1 / k\n else:\n importance = 1\n raw.append(importance)\n\n log(\n DEBUG,\n \"FedFS _fs_based_sampling, sample %s clients, raw %s\",\n str(sample_size),\n str(raw),\n )\n\n return normalize_and_sample(\n all_clients=all_clients,\n cid_idx=cid_idx,\n raw=np.array(raw),\n sample_size=sample_size,\n use_softmax=False,\n )\n\n def aggregate_fit(\n self,\n rnd: int,\n results: List[Tuple[ClientProxy, FitRes]],\n failures: List[BaseException],\n ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]:\n \"\"\"Aggregate fit results using weighted average.\"\"\"\n if not results:\n return None, {}\n\n # Check if enough results are available\n completion_rate = len(results) / (len(results) + len(failures))\n if completion_rate < self.min_completion_rate_fit:\n # Not enough results for aggregation\n return None, {}\n\n # Convert results\n weights_results = [\n (parameters_to_weights(fit_res.parameters), fit_res.num_examples)\n for client, fit_res in results\n ]\n weights_prime = aggregate(weights_results)\n\n if self.importance_sampling:\n # Track contributions to the global model\n for client, fit_res in results:\n cid = client.cid\n\n assert \"num_examples_ceil\" in fit_res.metrics\n num_examples_ceil = cast(int, fit_res.metrics[\"num_examples_ceil\"])\n\n contribution: Tuple[int, int, int] = (\n rnd,\n fit_res.num_examples,\n num_examples_ceil,\n )\n if cid not in self.contributions:\n self.contributions[cid] = []\n self.contributions[cid].append(contribution)\n\n if self.dynamic_timeout:\n self.durations = []\n for client, fit_res in results:\n\n assert \"fit_duration\" in fit_res.metrics\n fit_duration: float = cast(float, fit_res.metrics[\"fit_duration\"])\n\n assert \"num_examples_ceil\" in fit_res.metrics\n num_examples_ceil = cast(int, fit_res.metrics[\"num_examples_ceil\"])\n\n cid_duration = (\n client.cid,\n fit_duration,\n fit_res.num_examples,\n num_examples_ceil,\n )\n self.durations.append(cid_duration)\n\n return weights_to_parameters(weights_prime), {}\n\n def aggregate_evaluate(\n self,\n rnd: int,\n results: List[Tuple[ClientProxy, EvaluateRes]],\n failures: List[BaseException],\n ) -> Tuple[Optional[float], Dict[str, Scalar]]:\n \"\"\"Aggregate evaluation losses using weighted average.\"\"\"\n if not results:\n return None, {}\n\n # Check if enough results are available\n completion_rate = len(results) / (len(results) + len(failures))\n if completion_rate < self.min_completion_rate_evaluate:\n # Not enough results for aggregation\n return None, {}\n\n return (\n weighted_loss_avg(\n [\n (evaluate_res.num_examples, evaluate_res.loss)\n for _, evaluate_res in results\n ]\n ),\n {},\n )\n\n\ndef is_fast_round(rnd: int, r_fast: int, r_slow: int) -> bool:\n \"\"\"Determine if the round is fast or slow.\n\n :meta private:\n \"\"\"\n remainder = rnd % (r_fast + r_slow)\n return remainder - r_fast < 0\n\n\ndef softmax(logits: np.ndarray) -> np.ndarray:\n \"\"\"Compute softmax.\n\n :meta private:\n \"\"\"\n e_x = np.exp(logits - np.max(logits))\n return cast(np.ndarray, e_x / e_x.sum(axis=0))\n\n\ndef normalize_and_sample(\n all_clients: Dict[str, ClientProxy],\n cid_idx: Dict[int, str],\n raw: np.ndarray,\n sample_size: int,\n use_softmax: bool = False,\n) -> List[ClientProxy]:\n \"\"\"Normalize the relative importance and sample clients accordingly.\n\n :meta private:\n \"\"\"\n indices = np.arange(len(all_clients.keys()))\n if use_softmax:\n probs = softmax(np.array(raw))\n else:\n probs = raw / sum(raw)\n\n log(\n DEBUG,\n \"FedFS normalize_and_sample, sample %s clients from %s, probs: %s\",\n str(sample_size),\n str(len(indices)),\n str(probs),\n )\n sampled_indices = np.random.choice(\n indices, size=sample_size, replace=False, p=probs\n )\n clients = [all_clients[cid_idx[idx]] for idx in sampled_indices]\n return clients\n\n\ndef timeout_candidates(\n durations: List[Tuple[str, float, int, int]], max_timeout: int\n) -> List[float]:\n \"\"\"Calculate timeout candidates based on previous round training durations.\n\n :meta private:\n \"\"\"\n scaled_timeout_candidates = [\n fit_duration * float(num_ex_ceil) / (float(num_ex) + E_TIMEOUT)\n for _, fit_duration, num_ex, num_ex_ceil in durations\n ]\n return [min(st, max_timeout) for st in scaled_timeout_candidates]\n\n\ndef next_timeout(candidates: List[float], percentile: float) -> int:\n \"\"\"Cacluate timeout for the next round.\n\n :meta private:\n \"\"\"\n candidates.sort()\n num_included = math.ceil(len(candidates) * percentile)\n timeout_raw = candidates[num_included - 1]\n timeout_ceil = math.ceil(timeout_raw)\n return timeout_ceil\n"
] | [
[
"numpy.max",
"numpy.array",
"numpy.random.choice"
]
] |
EigenPro/EigenPro-tensorflow | [
"324d26451f1bd1f0331d6ad80e0e00e6b975a5ab"
] | [
"layers.py"
] | [
"from keras import backend as K\nfrom keras.engine.topology import Layer\nimport numpy as np\n\nclass KernelEmbedding(Layer):\n \"\"\" Generate kernel features.\n\n Arguments:\n kernel_f: kernel function k(x, y).\n centers: matrix of shape (n_center, n_feature).\n \"\"\"\n\n def __init__(self, kernel_f, centers, **kwargs):\n self.kernel_f = kernel_f\n self._centers = centers\n self.n_center = centers.shape[0]\n super(KernelEmbedding, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.centers = self.add_weight(name='centers', \n shape=self._centers.shape,\n initializer=(lambda shape: self._centers),\n trainable=False)\n super(KernelEmbedding, self).build(input_shape) # Be sure to call this somewhere!\n\n def call(self, x):\n embed = self.kernel_f(x, self.centers)\n return embed\n\n def compute_output_shape(self, input_shape):\n return (input_shape[0], self.n_center)\n\n\ndef rff(X, W):\n \"\"\"Calculate random Fourier features according to paper,\n 'Random Features for Large-Scale Kernel Machines'.\n\n Arguments:\n X: data matrix of shape (n, D).\n W: weight matrix of shape (D, d).\n\n Returns:\n feature matrix of shape (n, d).\n \"\"\"\n\n d = K.get_variable_shape(W)[1]\n dot = K.dot(X, W) # of shape (n, d)\n RF = K.concatenate([K.cos(dot), K.sin(dot)], axis=1) / np.sqrt(d, dtype='float32')\n return RF\n\n\nclass RFF(Layer):\n \"\"\" Generate random Fourier features.\n\n Arguments:\n weights: of shape (D, d).\n \"\"\"\n\n def __init__(self, weights, **kwargs):\n self._weights = weights\n self.d = weights.shape[1]\n super(RFF, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.W = self.add_weight(name='rff-weight', \n shape=self._weights.shape,\n initializer=(lambda shape: self._weights),\n trainable=False)\n super(RFF, self).build(input_shape) # Be sure to call this somewhere!\n\n def call(self, x):\n embed = rff(x, self.W)\n return embed\n\n def compute_output_shape(self, input_shape):\n return (input_shape[0], 2 * self.d)\n"
] | [
[
"numpy.sqrt"
]
] |
KALVS/RandomStuff | [
"a347d73ee3621597c6efa731b36194d1743ef36c"
] | [
"OpenCV/MEAN and CAM shift/MeanShift.py"
] | [
"\nimport numpy as np\nimport cv2\n\n# Start the webcam\ncap = cv2.VideoCapture('slow.flv')\n\n\n# Take the first frame\nret, frame = cap.read()\nrows, cols = frame.shape[:2]\n\n# Define the initial window location at the frame center\nwindowWidth = 150\nwindowHeight = 200\nwindowCol = int((cols - windowWidth) / 2)\nwindowRow = int((rows - windowHeight) / 2)\nwindow = (windowCol, windowRow, windowWidth, windowHeight)\n\n# Get the ROI and convert it to HSV scale ( Region of Interest)\nroi = frame[windowRow:windowRow + windowHeight, windowCol:windowCol + windowWidth]\nroiHsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)\n\n# Mask the dark areas\nlowLimit = np.array((0., 60., 32.))\nhighLimit = np.array((180., 255., 255.))\nmask = cv2.inRange(roiHsv, lowLimit, highLimit)\n\n# Calculate the hue histogram of the unmasked region\nroiHist = cv2.calcHist([roiHsv], [0], mask, [180], [0, 180])\ncv2.normalize(roiHist, roiHist, 0, 255, cv2.NORM_MINMAX)\n\n# Set the termination criteria: either finished 10 iteration or moved less than one pixel\nterminationCriteria = (cv2.TERM_CRITERIA_COUNT | cv2.TERM_CRITERIA_EPS , 10, 1)\n\n# Play until the user decides to stop\nwhile True:\n # Get the next frame\n ret , frame = cap.read()\n\n if ret:\n # Calculate the histogram back projection\n frameHsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n backprojectedFrame = cv2.calcBackProject([frameHsv], [0], roiHist, [0, 180], 1)\n\n # Mask the dark areas to improve the results\n mask = cv2.inRange(frameHsv, lowLimit, highLimit)\n backprojectedFrame &= mask\n\n # Apply meanshift method to get the new window location\n ret, window = cv2.meanShift(backprojectedFrame, window, terminationCriteria)\n\n # Draw the window on the frame\n windowCol, windowRow = window[:2]\n frame = cv2.rectangle(frame, (windowCol, windowRow), (windowCol + windowWidth, windowRow + windowHeight), 255, 2)\n\n # Display the resulting frame\n cv2.imshow('meanshift', frame)\n k = cv2.waitKey(60) & 0xff\n \n # Exit if the user press ESC\n if k == 27:\n break\n else:\n break\n\n# When everything is done, release the capture and close all windows\ncap.release()\ncv2.destroyAllWindows()\n"
] | [
[
"numpy.array"
]
] |
rohankumardubey/ibis | [
"e416dcfdb32792ffeb6f5214b361872582aa8795"
] | [
"ibis/backends/sqlite/tests/test_client.py"
] | [
"import os\nimport uuid\n\nimport numpy as np\nimport pandas.testing as tm\nimport pytest\n\nimport ibis\nimport ibis.config as config\nimport ibis.expr.types as ir\nfrom ibis.backends.sqlite import Backend\nfrom ibis.util import guid\n\n\ndef test_file_not_exist_and_create():\n path = f'__ibis_tmp_{guid()}.db'\n\n with pytest.raises(FileNotFoundError):\n ibis.sqlite.connect(path)\n\n con = ibis.sqlite.connect(path, create=True)\n try:\n assert os.path.exists(path)\n finally:\n con.con.dispose()\n os.remove(path)\n\n\ndef test_table(con):\n table = con.table('functional_alltypes')\n assert isinstance(table, ir.TableExpr)\n\n\ndef test_column_execute(alltypes, df):\n expr = alltypes.double_col\n result = expr.execute()\n expected = df.double_col\n tm.assert_series_equal(result, expected)\n\n\ndef test_literal_execute(con):\n expr = ibis.literal('1234')\n result = con.execute(expr)\n assert result == '1234'\n\n\ndef test_simple_aggregate_execute(alltypes, df):\n expr = alltypes.double_col.sum()\n result = expr.execute()\n expected = df.double_col.sum()\n np.testing.assert_allclose(result, expected)\n\n\ndef test_list_tables(con):\n assert con.list_tables()\n assert len(con.list_tables(like='functional')) == 1\n\n\ndef test_attach_file(dbpath):\n client = Backend().connect(None)\n\n client.attach('foo', dbpath)\n client.attach('bar', dbpath)\n\n foo_tables = client.list_tables(database='foo')\n bar_tables = client.list_tables(database='bar')\n\n assert foo_tables == bar_tables\n\n\ndef test_database_layer(con, db):\n assert db.list_tables() == con.list_tables()\n\n\ndef test_compile_toplevel():\n t = ibis.table([('foo', 'double')], name='t0')\n\n # it works!\n expr = t.foo.sum()\n result = ibis.sqlite.compile(expr)\n expected = \"\"\"\\\nSELECT sum(t0.foo) AS sum \nFROM t0 AS t0\"\"\" # noqa\n assert str(result) == expected\n\n\ndef test_create_and_drop_table(con):\n t = con.table('functional_alltypes')\n name = str(uuid.uuid4())\n con.create_table(name, t.limit(5))\n new_table = con.table(name)\n tm.assert_frame_equal(new_table.execute(), t.limit(5).execute())\n con.drop_table(name)\n assert name not in con.list_tables()\n\n\ndef test_verbose_log_queries(con):\n queries = []\n\n with config.option_context('verbose', True):\n with config.option_context('verbose_log', queries.append):\n con.table('functional_alltypes')['year'].execute()\n\n assert len(queries) == 1\n (query,) = queries\n expected = 'SELECT t0.year \\n'\n expected += 'FROM base.functional_alltypes AS t0\\n'\n expected += ' LIMIT ? OFFSET ?'\n assert query == expected\n"
] | [
[
"numpy.testing.assert_allclose",
"pandas.testing.assert_series_equal"
]
] |
fairgelle/disaster-response-pipeline | [
"b275a7d50fec1e5fcb5151720a275e374265e911"
] | [
"data/process_data.py"
] | [
"import numpy as np\nimport pandas as pd\n\nfrom sqlalchemy.engine import create_engine\nimport sys\n\n\ndef load_data(messages_filepath, categories_filepath):\n '''\n Returns a df that is a result of the merged 'messages' &\n 'categories' data. The data are concatenated by the id.\n \n Args:\n messages_filepath (string): The filepath of the messages csv.\n param2 (str): The filepath of the categories csv.\n \n Returns:\n df (dataframe): The resulting dataframe of the concatenation of the\n two datasets.\n '''\n messages = pd.read_csv(messages_filepath)\n categories = pd.read_csv(categories_filepath)\n \n df = pd.merge(messages, categories, on='id')\n \n return df\n\n\ndef clean_data(df):\n '''\n Cleans the dataframe by splitting up the categories column values\n into separate columns. \n Duplicates will be removed here as well.\n \n Args:\n df (dataframe): Raw dataframe.\n \n Returns:\n df (dataframe): Cleaned dataframe\n '''\n\n # excluding columns with imbalance dataset for simplicity's sake\n cols_to_exclude = [\n 'child_alone', 'security', 'offer', 'money', 'missing_people', 'buildings',\n 'other_weather', 'fire', 'other_infrastructure', 'infrastructure_related',\n 'other_aid', 'aid_centers', 'shops', 'hospitals', 'tools', 'electricity'\n ]\n \n categories = df.categories.str.split(pat=';', expand=True)\n row = categories.iloc[0]\n\n category_colnames = row.apply(lambda x: x[:-2])\n categories.columns = category_colnames\n \n for column in categories:\n categories[column] = categories[column].apply(lambda x: x[-1])\n categories[column] = pd.to_numeric(categories[column])\n \n df = df.drop(['categories'], axis=1)\n \n df = pd.concat([df, categories], axis=1)\n \n df = df.drop(cols_to_exclude, axis=1).drop_duplicates()\n \n df['related'] = df['related'].replace(2, 1)\n \n return df\n\n\ndef save_data(df, database_filename):\n '''\n Saves data into SQLite DB.\n \n Args:\n df (dataframe): The dataframe to be saved into the DB\n database_filename (string): The resulting name of the table that is \n saved in SQLite db. The DB name and the table name would be the same\n '''\n engine = create_engine('sqlite:///{db_name}'.format(db_name=database_filename))\n df.to_sql(database_filename, engine, index=False, if_exists='replace') \n\n\ndef main():\n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n df = load_data(messages_filepath, categories_filepath)\n\n print('Cleaning data...')\n df = clean_data(df)\n \n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n save_data(df, database_filepath)\n \n print('Cleaned data saved to database!')\n \n else:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"pandas.read_csv",
"pandas.to_numeric",
"pandas.merge",
"pandas.concat"
]
] |
FlorisHoogenboom/yolo-v2-tf-2 | [
"00de6dbfdc3b2d96e45a51dd848461d91ce01919"
] | [
"test/test_layers.py"
] | [
"import numpy as np\n\nfrom yolo import layers\n\n\ndef test_anchor_layer_base_anchor_boxes():\n anchors = [\n [1, 1],\n [1, 1.5],\n [1.5, 1]\n ]\n grid_height = 2\n grid_width = 3\n layer = layers.AnchorLayer(\n grid_height=grid_height,\n grid_width=grid_width,\n anchors=anchors,\n n_classes=7\n )\n\n # Check that the shape matches the anchor boxes and grid height/width\n assert (\n layer.base_anchor_boxes.shape ==\n (grid_height, grid_width, len(anchors), 4)\n )\n\n # The yx coordinates should be centroids (hence end in 0.5)\n assert np.all(layer.base_anchor_boxes[..., 0:2] % 1 == 0.5)\n\n # Elements on the diagonals should be increasing stepwise\n diag_diffs = (\n layer.base_anchor_boxes[1:, 1:, :, 0:2] -\n layer.base_anchor_boxes[:-1, :-1, :, 0:2]\n )\n assert np.all(diag_diffs == 1)\n\n # Check that when moving in the y direction the y coordinate varies\n # this also checks that the order of the y and x coordinates is proper, i.e.\n # first the y axis and then the x axis\n y_diffs = (\n layer.base_anchor_boxes[1:, ..., 0] -\n layer.base_anchor_boxes[:-1, ..., 0]\n )\n assert np.all(y_diffs == 1)\n\n x_diffs = (\n layer.base_anchor_boxes[:, 1:, ..., 1] -\n layer.base_anchor_boxes[:, :-1, ..., 1]\n )\n assert np.all(x_diffs == 1)\n\n # The WH part of every coordinate section should simply match the anchors\n assert np.all(\n layer.base_anchor_boxes[..., 2:] == anchors\n )\n\n\ndef test_anchor_layer_predicted_boxes():\n anchors = [\n [1, 1],\n [1, 1.5],\n [1.5, 1]\n ]\n grid_height = 2\n grid_width = 3\n layer = layers.AnchorLayer(\n grid_height=grid_height,\n grid_width=grid_width,\n anchors=anchors,\n n_classes=7\n )\n\n input = np.random.randn(5, grid_height, grid_width, 1024)\n output = layer.compute_boxes(input)\n\n assert output.shape == (5, grid_height, grid_width, len(anchors), 4)\n\n # Since we are working with coordinates on the grid all the predictions\n # should be positive\n assert np.all(output >= 0)\n\n # The differences between adjacent cells should be max 2 since that is the\n # maximum grid size\n diag_diffs = (\n output[:, 1:, 1:, :, 0:2] - output[:, :-1, :-1, :, 0:2]\n )\n assert np.all((0 <= diag_diffs) & (diag_diffs <= 2))\n\n input = np.zeros(((5, grid_height, grid_width, 1024)))\n output = layer.compute_boxes(input)\n\n # With no stimulus, all predicted coordinates should be equal to the base\n # anchor boxes\n assert np.all((output - layer.base_anchor_boxes) == 0)\n\n\ndef test_anchor_layer_confidences():\n anchors = [\n [1, 1],\n [1, 1.5],\n [1.5, 1]\n ]\n batch_size = 5\n grid_height = 2\n grid_width = 3\n layer = layers.AnchorLayer(\n grid_height=grid_height,\n grid_width=grid_width,\n anchors=anchors,\n n_classes=7\n )\n\n input = np.random.randn(batch_size, grid_height, grid_width, 1024)\n output = layer.compute_confidences(input)\n\n assert (\n output.shape == (batch_size, grid_height, grid_width, len(anchors), 1)\n )\n\n # Confidences are probabilities\n assert np.all((0 <= output) & (output <= 1))\n\n # Each cell should output it's own probability\n assert (\n np.unique(output).shape ==\n (batch_size * grid_height * grid_width * len(anchors),)\n )\n\n # When fed with no stimulus all probabilities should resort to 0.5\n input = np.zeros((batch_size, grid_height, grid_width, 1024))\n output = layer.compute_confidences(input)\n assert np.all(output == 0.5)\n\n\ndef test_anchor_layer_classes():\n anchors = [\n [1, 1],\n [1, 1.5],\n [1.5, 1]\n ]\n batch_size = 5\n grid_height = 2\n grid_width = 3\n n_classes = 7\n layer = layers.AnchorLayer(\n grid_height=grid_height,\n grid_width=grid_width,\n anchors=anchors,\n n_classes=n_classes\n )\n\n input = np.random.randn(batch_size, grid_height, grid_width, 1024)\n output = layer.compute_classes(input)\n\n assert output.shape == (\n (batch_size, grid_height, grid_width, len(anchors), n_classes)\n )\n\n # The classes should be independent of the exact anchor box\n assert np.all(\n (output[..., 0, :] == output[..., 1, :]) &\n (output[..., 1, :] == output[..., 2, :])\n )\n\n # It should be probabilities over the last axis.\n np.testing.assert_almost_equal(output.numpy().sum(axis=-1), 1.0, decimal=5)\n\n\ndef test_anchor_layer_dimensions():\n pass\n\n\ndef test_anchor_layer_fed_with_zeros():\n pass\n"
] | [
[
"numpy.all",
"numpy.random.randn",
"numpy.zeros",
"numpy.unique"
]
] |
StochLab/autompc | [
"657cf9c6ae6771b65b20fdcbaaadde31150afdff"
] | [
"autompc/control/nmpc.py"
] | [
"from collections import Iterable\nfrom .controller import Controller, ControllerFactory\nfrom pdb import set_trace\nimport ConfigSpace as CS\nimport ConfigSpace.hyperparameters as CSH\n\nimport numpy as np\n\nclass TrajOptProblem(object):\n \"\"\"Just a general interface for nonlinear optimization problems.\n I will just use knitro/ipopt style and the snopt one is easily written as well.\n\n Args:\n nx (int): dimension of the decision variable\n nc (int): dimension of the constraints\n \"\"\"\n def __init__(self, nx, nc):\n self.dimx = nx\n self.dimc = nc\n self.xlb, self.xub = np.zeros((2, nx))\n self.clb, self.cub = np.zeros((2, nc))\n\n def get_cost(self, x):\n raise NotImplementedError(\"Sub-class has to implement get_cost function.\")\n\n def get_gradient(self, x):\n raise NotImplementedError(\"Sub-class has to implement get_gradient function.\")\n\n def get_constraint(self, x):\n raise NotImplementedError(\"Sub-class has to implement get_constraint function.\")\n\n def get_jacobian(self, x, return_rowcol):\n \"\"\"This function computes the Jacobian at current solution x, if return_rowcol is True, it has to return row and col, too.\"\"\"\n raise NotImplementedError(\"Sub-class has to implement get_jacobian function.\")\n\nclass NonLinearMPCProblem(TrajOptProblem):\n \"\"\"Just write the NonLinear MPC problem in the OptProblem style.\n \"\"\"\n def __init__(self, system, model, task, horizon):\n self.system = system\n self.task = task\n self.model = model\n self.horizon = horizon\n dc = system.ctrl_dim\n ds = model.state_dim\n self.ctrl_dim = dc\n self.obs_dim = ds\n # now I can get the size of the problem\n nx = ds * (horizon + 1) + dc * horizon # x0 to xN, u0 to u_{N-1}\n nf = horizon * ds # for dynamics and other constraints\n TrajOptProblem.__init__(self, nx, nf)\n self._create_cache()\n\n def _create_cache(self):\n self._x = np.zeros(self.dimx)\n self._grad = np.zeros(self.dimx)\n self._c = np.zeros(self.dimc)\n self._c_dyn = self._c[-self.horizon * self.obs_dim:].reshape((self.horizon, -1)) # the last parts store dynamics\n len1 = (self.horizon + 1) * self.obs_dim\n len2 = self.horizon * self.ctrl_dim\n self._state = self._x[:len1].reshape((self.horizon + 1, self.obs_dim))\n self._ctrl = self._x[len1:].reshape((self.horizon, self.ctrl_dim))\n self._grad_state = self._grad[:len1].reshape((self.horizon + 1, self.obs_dim))\n self._grad_ctrl = self._grad[len1:].reshape((self.horizon, self.ctrl_dim))\n self._x[:] = np.random.random(self.dimx)\n self._row, self._col = self.get_jacobian(self._x, True)\n self._jac = np.zeros(self._row.size)\n \n @property\n def nnz(self):\n return self._jac.size\n\n def get_cost(self, x):\n # compute the cost function, not sure how it's gonna be written though\n cost = self.task.get_cost()\n self._x[:] = x # copy contents in\n dt = self.system.dt\n tc = cost.eval_term_obs_cost(self._state[-1, :self.system.obs_dim])\n for i in range(self.horizon + 1):\n tc += cost.eval_obs_cost(self._state[i, :self.system.obs_dim]) * dt\n for i in range(self.horizon):\n tc += cost.eval_ctrl_cost(self._ctrl[i]) * dt\n return tc\n\n def get_gradient(self, x):\n \"\"\"Compute the gradient given some guess\"\"\"\n self._x[:] = x\n self._grad[:] = 0 # reset just in case\n # terminal one\n cost = self.task.get_cost()\n _, gradtc = cost.eval_term_obs_cost_diff(self._state[-1, :self.system.obs_dim])\n self._grad_state[-1, :self.system.obs_dim] = gradtc\n dt = self.system.dt\n for i in range(self.horizon + 1):\n _, gradx = cost.eval_obs_cost_diff(self._state[i, :self.system.obs_dim])\n self._grad_state[i, :self.system.obs_dim] += gradx * dt\n for i in range(self.horizon):\n _, gradu = cost.eval_ctrl_cost_diff(self._ctrl[i])\n self._grad_ctrl[i] = gradu * dt\n return self._grad\n\n def get_constraint(self, x):\n \"\"\"Evaluate the constraint function\"\"\"\n self._x[:] = x\n self._c[:] = 0\n # first compute for dynamics\n pred_states = self.model.pred_batch(self._state[:self.horizon], self._ctrl[:self.horizon])\n for i in range(self.horizon):\n self._c_dyn[i] = -self._state[i + 1] + pred_states[i]\n return self._c\n\n def get_constr_bounds(self):\n \"\"\"Just return the bounds of constraints\"\"\"\n clb, cub = np.zeros((2, self.dimc))\n return clb, cub\n\n def get_variable_bounds(self):\n statebd = np.zeros((self.obs_dim, 2))\n statebd[:,0] = -np.inf\n statebd[:,1] = np.inf\n statebd[:self.system.obs_dim, :] = self.task.get_obs_bounds()\n ctrlbd = self.task.get_ctrl_bounds()\n dc = self.ctrl_dim\n ds = self.obs_dim\n xlb, xub = np.zeros((2, self.dimx))\n xlb[:(self.horizon + 1) * ds].reshape((-1, ds))[:] = statebd[:, 0]\n xub[:(self.horizon + 1) * ds].reshape((-1, ds))[:] = statebd[:, 1]\n xlb[-self.horizon * dc:].reshape((-1, dc))[:] = ctrlbd[:, 0]\n xub[-self.horizon * dc:].reshape((-1, dc))[:] = ctrlbd[:, 1]\n return xlb, xub\n\n def _dense_to_rowcol(self, shape, row0, col0):\n row, col = shape\n rows = np.arange(row)[:, None] * np.ones(col) + row0\n cols = np.ones((row, 1)) * np.arange(col) + col0\n return rows.flatten(), cols.flatten()\n\n def get_state_index(self, index):\n return index * self.obs_dim\n\n def get_ctrl_index(self, index):\n return (self.horizon + 1) * self.obs_dim + index * self.ctrl_dim\n\n def get_jacobian(self, x, return_rowcol):\n \"\"\"This function computes the Jacobian at current solution x, if return_rowcol is True, it returns a tuple of the patterns of row and col\"\"\"\n self._x[:] = x\n # Here I may as well assume all the ret_grad stuff returns a dense jacobian or None which means all zero, support for coo_matrix is under development\n dims = self.obs_dim\n dimu = self.ctrl_dim\n if return_rowcol:\n cr = 0\n row = []\n col = []\n _, mat1, mat2 = self.model.pred_diff(self._state[0], self._ctrl[0])\n srowptn, scolptn = self._dense_to_rowcol(mat1.shape, 0, 0)\n urowptn, ucolptn = self._dense_to_rowcol(mat2.shape, 0, 0)\n # compute patterns for it\n base_x_idx = 0\n base_u_idx = dims * (self.horizon + 1)\n for i in range(self.horizon):\n row.append(cr + srowptn)\n col.append(base_x_idx + i * dims + scolptn)\n row.append(cr + urowptn)\n col.append(base_u_idx + i * dimu + ucolptn)\n # take care, here you are placing them after placing jacobian\n row.append(cr + np.arange(dims))\n col.append(base_x_idx + (i + 1) * dims + np.arange(dims))\n cr += dims\n return np.concatenate(row), np.concatenate(col)\n else:\n # I have to compute the jacobian here\n cr = 0\n cg = 0\n self._jac[:] = 0\n # for terminal constraints first\n ###### Placeholder for terminal constraints\n # then other point constraints\n _, matss, matus = self.model.pred_diff_batch(self._state[:self.horizon], self._ctrl[:self.horizon])\n for i in range(self.horizon):\n mats, matu = matss[i], matus[i]\n self._jac[cg: cg + mats.size] = mats.flat\n cg += mats.size\n self._jac[cg: cg + matu.size] = matu.flat\n cg += matu.size\n self._jac[cg: cg + dims] = -1\n cg += dims\n return self._jac\n\n\nclass IpoptWrapper:\n \"\"\"Just the ipopt style stuff\"\"\"\n def __init__(self, prob):\n self.prob = prob\n\n def objective(self, x):\n return self.prob.get_cost(x)\n\n def gradient(self, x):\n return self.prob.get_gradient(x)\n\n def constraints(self, x):\n return self.prob.get_constraint(x)\n\n def jacobian(self, x):\n jac = self.prob.get_jacobian(x, False)\n return jac\n\n def jacobianstructure(self):\n x = np.zeros(self.prob.dimx)\n return self.prob.get_jacobian(x, True)\n\nclass DirectTranscriptionControllerFactory(ControllerFactory):\n \"\"\"\n Direct Transcription (DT) is a method to discretize an optimal control problem which is inherently continuous.\n Such discretization is usually necessary in order to get an optimization problem of finite dimensionality.\n For a trajectory with time length :math:`T`, it discretize the time interval into a equidistant grid of size :math:`N`, called knots.\n The state and control at each knot are optimized.\n The constraints are imposed at the knots, including system dynamics constraints.\n DT uses first-order Euler integration to approximate the constraints of system dynamics.\n The details can be found in `An Introduction to Trajectory Optimization: How to Do Your Own Direct Collocation <https://epubs.siam.org/doi/pdf/10.1137/16M1062569>`_.\n\n Hyperparameter:\n - *horizon* (Type: int, Lower: 1, High: 30, Default: 10): Control Horizon\n \"\"\"\n def __init__(self, *args, **kwargs):\n try:\n import cyipopt\n except:\n raise ImportError(\"Missing dependency for Direct Transcription Controller\")\n super().__init__(*args, **kwargs)\n self.Controller = DirectTranscriptionController\n self.name = \"DirectTranscription\"\n\n def get_configuration_space(self):\n cs = CS.ConfigurationSpace()\n horizon = CSH.UniformIntegerHyperparameter(name=\"horizon\",\n lower=1, upper=30, default_value=10)\n cs.add_hyperparameter(horizon)\n return cs\n\nclass DirectTranscriptionController(Controller):\n \"\"\"\n Implementation of the linear controller. For this very basic version, it accepts some linear models and compute output.\n constraints is a dict of constraints we have to consider, it has two keys: path and terminal. The items are list of Constraints.\n cost is a Cost instance to compute fitness of a trajectory\n \"\"\"\n def __init__(self, system, model, task, horizon):\n global cyipopt\n try:\n import cyipopt\n except:\n raise ImportError(\"Missing dependency for Direct Transcription Controller\")\n Controller.__init__(self, system, model, task)\n self.horizon = int(np.ceil(horizon / system.dt))\n self._built = False\n self._guess = None\n self._x_dim = (self.horizon + 1) * system.obs_dim + self.horizon * system.ctrl_dim\n\n def reset(self):\n self._built = False\n self._guess = None\n\n def set_guess(self, guess):\n if guess.size != self._xdim:\n raise Exception(\"Guess dimension should be %d\" % self._x_dim)\n self._guess = guess\n\n def _build_problem(self):\n \"\"\"Use cvxpy to construct the problem\"\"\"\n self._built = True\n self.problem = NonLinearMPCProblem(self.system, self.model, self.task, self.horizon)\n self.wrapper = IpoptWrapper(self.problem)\n\n def _update_problem_and_solve(self, x0):\n \"\"\"Solve the problem\"\"\"\n if not self._built:\n self._build_problem()\n\n dims = self.model.state_dim\n lb, ub = self.problem.get_variable_bounds()\n cl, cu = self.problem.get_constr_bounds()\n lb[:dims] = ub[:dims] = x0\n ipopt_prob = cyipopt.Problem(\n n=self.problem.dimx,\n m=self.problem.dimc,\n problem_obj = self.wrapper,\n lb=lb,\n ub=ub,\n cl=cl,\n cu=cu\n )\n if self._guess is None:\n guess = np.zeros(self.problem.dimx)\n else:\n guess = self._guess\n\n ipopt_prob.add_option(\"max_iter\", 10)\n sol, info = ipopt_prob.solve(guess)\n return sol, info\n\n @property\n def state_dim(self):\n return self.model.state_dim + self.model.ctrl_dim\n\n @staticmethod\n def is_compatible(system, task, model):\n return True # this should be universal...\n \n def traj_to_state(self, traj):\n return np.concatenate([self.model.traj_to_state(traj),\n traj[-1].ctrl])\n\n def run(self, state, new_obs):\n x = self.model.update_state(state[:-self.system.ctrl_dim],\n state[-self.system.ctrl_dim:], new_obs)\n self._x_cache = x\n sol, info = self._update_problem_and_solve(x)\n\n # update guess\n self._guess = sol.copy()\n dims = self.problem.obs_dim\n dimu = self.problem.ctrl_dim\n idx0 = dims * (self.horizon + 1)\n u = sol[idx0: idx0 + dimu]\n statenew = np.concatenate([x, u])\n\n return u, statenew\n"
] | [
[
"numpy.concatenate",
"numpy.ceil",
"numpy.zeros",
"numpy.ones",
"numpy.arange",
"numpy.random.random"
]
] |
samhsia/dlrm | [
"a50709a24b690782293a44941264c83f09e15a3c"
] | [
"dlrm_data_pytorch.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n#\n# Description: generate inputs and targets for the dlrm benchmark\n# The inpts and outputs are generated according to the following three option(s)\n# 1) random distribution\n# 2) synthetic distribution, based on unique accesses and distances between them\n# i) R. Hassan, A. Harris, N. Topham and A. Efthymiou \"Synthetic Trace-Driven\n# Simulation of Cache Memory\", IEEE AINAM'07\n# 3) public data set\n# i) Criteo Kaggle Display Advertising Challenge Dataset\n# https://labs.criteo.com/2014/02/kaggle-display-advertising-challenge-dataset\n# ii) Criteo Terabyte Dataset\n# https://labs.criteo.com/2013/12/download-terabyte-click-logs\n\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n# others\nfrom os import path\nimport sys\nimport bisect\nimport collections\n\nimport data_utils\n\n# numpy\nimport numpy as np\nfrom numpy import random as ra\nfrom collections import deque\n\n\n# pytorch\nimport torch\nfrom torch.utils.data import Dataset, RandomSampler\n\nimport data_loader_terabyte\nimport mlperf_logger\n\n\n# Kaggle Display Advertising Challenge Dataset\n# dataset (str): name of dataset (Kaggle or Terabyte)\n# randomize (str): determines randomization scheme\n# \"none\": no randomization\n# \"day\": randomizes each day\"s data (only works if split = True)\n# \"total\": randomizes total dataset\n# split (bool) : to split into train, test, validation data-sets\nclass CriteoDataset(Dataset):\n\n def __init__(\n self,\n dataset,\n max_ind_range,\n sub_sample_rate,\n randomize,\n split=\"train\",\n raw_path=\"\",\n pro_data=\"\",\n memory_map=False,\n dataset_multiprocessing=False,\n ):\n # dataset\n # tar_fea = 1 # single target\n den_fea = 13 # 13 dense features\n # spa_fea = 26 # 26 sparse features\n # tad_fea = tar_fea + den_fea\n # tot_fea = tad_fea + spa_fea\n if dataset == \"kaggle\":\n days = 7\n out_file = \"kaggleAdDisplayChallenge_processed\"\n elif dataset == \"terabyte\":\n days = 24\n out_file = \"terabyte_processed\"\n else:\n raise(ValueError(\"Data set option is not supported\"))\n self.max_ind_range = max_ind_range\n self.memory_map = memory_map\n\n # split the datafile into path and filename\n lstr = raw_path.split(\"/\")\n self.d_path = \"/\".join(lstr[0:-1]) + \"/\"\n self.d_file = lstr[-1].split(\".\")[0] if dataset == \"kaggle\" else lstr[-1]\n self.npzfile = self.d_path + (\n (self.d_file + \"_day\") if dataset == \"kaggle\" else self.d_file\n )\n self.trafile = self.d_path + (\n (self.d_file + \"_fea\") if dataset == \"kaggle\" else \"fea\"\n )\n\n # check if pre-processed data is available\n data_ready = True\n if memory_map:\n for i in range(days):\n reo_data = self.npzfile + \"_{0}_reordered.npz\".format(i)\n if not path.exists(str(reo_data)):\n data_ready = False\n else:\n if not path.exists(str(pro_data)):\n data_ready = False\n\n # pre-process data if needed\n # WARNNING: when memory mapping is used we get a collection of files\n if data_ready:\n print(\"Reading pre-processed data=%s\" % (str(pro_data)))\n file = str(pro_data)\n else:\n print(\"Reading raw data=%s\" % (str(raw_path)))\n file = data_utils.getCriteoAdData(\n raw_path,\n out_file,\n max_ind_range,\n sub_sample_rate,\n days,\n split,\n randomize,\n dataset == \"kaggle\",\n memory_map,\n dataset_multiprocessing,\n )\n\n # get a number of samples per day\n total_file = self.d_path + self.d_file + \"_day_count.npz\"\n with np.load(total_file) as data:\n total_per_file = data[\"total_per_file\"]\n # compute offsets per file\n self.offset_per_file = np.array([0] + [x for x in total_per_file])\n for i in range(days):\n self.offset_per_file[i + 1] += self.offset_per_file[i]\n # print(self.offset_per_file)\n\n # setup data\n if memory_map:\n # setup the training/testing split\n self.split = split\n if split == 'none' or split == 'train':\n self.day = 0\n self.max_day_range = days if split == 'none' else days - 1\n elif split == 'test' or split == 'val':\n self.day = days - 1\n num_samples = self.offset_per_file[days] - \\\n self.offset_per_file[days - 1]\n self.test_size = int(np.ceil(num_samples / 2.))\n self.val_size = num_samples - self.test_size\n else:\n sys.exit(\"ERROR: dataset split is neither none, nor train or test.\")\n\n '''\n # text\n print(\"text\")\n for i in range(days):\n fi = self.npzfile + \"_{0}\".format(i)\n with open(fi) as data:\n ttt = 0; nnn = 0\n for _j, line in enumerate(data):\n ttt +=1\n if np.int32(line[0]) > 0:\n nnn +=1\n print(\"day=\" + str(i) + \" total=\" + str(ttt) + \" non-zeros=\"\n + str(nnn) + \" ratio=\" +str((nnn * 100.) / ttt) + \"%\")\n # processed\n print(\"processed\")\n for i in range(days):\n fi = self.npzfile + \"_{0}_processed.npz\".format(i)\n with np.load(fi) as data:\n yyy = data[\"y\"]\n ttt = len(yyy)\n nnn = np.count_nonzero(yyy)\n print(\"day=\" + str(i) + \" total=\" + str(ttt) + \" non-zeros=\"\n + str(nnn) + \" ratio=\" +str((nnn * 100.) / ttt) + \"%\")\n # reordered\n print(\"reordered\")\n for i in range(days):\n fi = self.npzfile + \"_{0}_reordered.npz\".format(i)\n with np.load(fi) as data:\n yyy = data[\"y\"]\n ttt = len(yyy)\n nnn = np.count_nonzero(yyy)\n print(\"day=\" + str(i) + \" total=\" + str(ttt) + \" non-zeros=\"\n + str(nnn) + \" ratio=\" +str((nnn * 100.) / ttt) + \"%\")\n '''\n\n # load unique counts\n with np.load(self.d_path + self.d_file + \"_fea_count.npz\") as data:\n self.counts = data[\"counts\"]\n self.m_den = den_fea # X_int.shape[1]\n self.n_emb = len(self.counts)\n print(\"Sparse features= %d, Dense features= %d\" % (self.n_emb, self.m_den))\n\n # Load the test data\n # Only a single day is used for testing\n if self.split == 'test' or self.split == 'val':\n # only a single day is used for testing\n fi = self.npzfile + \"_{0}_reordered.npz\".format(\n self.day\n )\n with np.load(fi) as data:\n self.X_int = data[\"X_int\"] # continuous feature\n self.X_cat = data[\"X_cat\"] # categorical feature\n self.y = data[\"y\"] # target\n\n else:\n # load and preprocess data\n with np.load(file) as data:\n X_int = data[\"X_int\"] # continuous feature\n X_cat = data[\"X_cat\"] # categorical feature\n y = data[\"y\"] # target\n self.counts = data[\"counts\"]\n self.m_den = X_int.shape[1] # den_fea\n self.n_emb = len(self.counts)\n print(\"Sparse fea = %d, Dense fea = %d\" % (self.n_emb, self.m_den))\n\n # create reordering\n indices = np.arange(len(y))\n\n if split == \"none\":\n # randomize all data\n if randomize == \"total\":\n indices = np.random.permutation(indices)\n print(\"Randomized indices...\")\n\n X_int[indices] = X_int\n X_cat[indices] = X_cat\n y[indices] = y\n\n else:\n indices = np.array_split(indices, self.offset_per_file[1:-1])\n\n # randomize train data (per day)\n if randomize == \"day\": # or randomize == \"total\":\n for i in range(len(indices) - 1):\n indices[i] = np.random.permutation(indices[i])\n print(\"Randomized indices per day ...\")\n\n train_indices = np.concatenate(indices[:-1])\n test_indices = indices[-1]\n test_indices, val_indices = np.array_split(test_indices, 2)\n\n print(\"Defined %s indices...\" % (split))\n\n # randomize train data (across days)\n if randomize == \"total\":\n train_indices = np.random.permutation(train_indices)\n print(\"Randomized indices across days ...\")\n\n # create training, validation, and test sets\n if split == 'train':\n self.X_int = [X_int[i] for i in train_indices]\n self.X_cat = [X_cat[i] for i in train_indices]\n self.y = [y[i] for i in train_indices]\n elif split == 'val':\n self.X_int = [X_int[i] for i in val_indices]\n self.X_cat = [X_cat[i] for i in val_indices]\n self.y = [y[i] for i in val_indices]\n elif split == 'test':\n self.X_int = [X_int[i] for i in test_indices]\n self.X_cat = [X_cat[i] for i in test_indices]\n self.y = [y[i] for i in test_indices]\n\n print(\"Split data according to indices...\")\n\n def __getitem__(self, index):\n\n if isinstance(index, slice):\n return [\n self[idx] for idx in range(\n index.start or 0, index.stop or len(self), index.step or 1\n )\n ]\n\n if self.memory_map:\n if self.split == 'none' or self.split == 'train':\n # check if need to swicth to next day and load data\n if index == self.offset_per_file[self.day]:\n # print(\"day_boundary switch\", index)\n self.day_boundary = self.offset_per_file[self.day]\n fi = self.npzfile + \"_{0}_reordered.npz\".format(\n self.day\n )\n # print('Loading file: ', fi)\n with np.load(fi) as data:\n self.X_int = data[\"X_int\"] # continuous feature\n self.X_cat = data[\"X_cat\"] # categorical feature\n self.y = data[\"y\"] # target\n self.day = (self.day + 1) % self.max_day_range\n\n i = index - self.day_boundary\n elif self.split == 'test' or self.split == 'val':\n # only a single day is used for testing\n i = index + (0 if self.split == 'test' else self.test_size)\n else:\n sys.exit(\"ERROR: dataset split is neither none, nor train or test.\")\n else:\n i = index\n\n if self.max_ind_range > 0:\n return self.X_int[i], self.X_cat[i] % self.max_ind_range, self.y[i]\n else:\n return self.X_int[i], self.X_cat[i], self.y[i]\n\n def _default_preprocess(self, X_int, X_cat, y):\n X_int = torch.log(torch.tensor(X_int, dtype=torch.float) + 1)\n if self.max_ind_range > 0:\n X_cat = torch.tensor(X_cat % self.max_ind_range, dtype=torch.long)\n else:\n X_cat = torch.tensor(X_cat, dtype=torch.long)\n y = torch.tensor(y.astype(np.float32))\n\n return X_int, X_cat, y\n\n def __len__(self):\n if self.memory_map:\n if self.split == 'none':\n return self.offset_per_file[-1]\n elif self.split == 'train':\n return self.offset_per_file[-2]\n elif self.split == 'test':\n return self.test_size\n elif self.split == 'val':\n return self.val_size\n else:\n sys.exit(\"ERROR: dataset split is neither none, nor train nor test.\")\n else:\n return len(self.y)\n\n\ndef collate_wrapper_criteo_offset(list_of_tuples):\n # where each tuple is (X_int, X_cat, y)\n transposed_data = list(zip(*list_of_tuples))\n X_int = torch.log(torch.tensor(transposed_data[0], dtype=torch.float) + 1)\n X_cat = torch.tensor(transposed_data[1], dtype=torch.long)\n T = torch.tensor(transposed_data[2], dtype=torch.float32).view(-1, 1)\n\n batchSize = X_cat.shape[0]\n featureCnt = X_cat.shape[1]\n\n lS_i = [X_cat[:, i] for i in range(featureCnt)]\n lS_o = [torch.tensor(range(batchSize)) for _ in range(featureCnt)]\n\n return X_int, torch.stack(lS_o), torch.stack(lS_i), T\n\n\ndef ensure_dataset_preprocessed(args, d_path):\n _ = CriteoDataset(\n args.data_set,\n args.max_ind_range,\n args.data_sub_sample_rate,\n args.data_randomize,\n \"train\",\n args.raw_data_file,\n args.processed_data_file,\n args.memory_map,\n args.dataset_multiprocessing\n )\n\n _ = CriteoDataset(\n args.data_set,\n args.max_ind_range,\n args.data_sub_sample_rate,\n args.data_randomize,\n \"test\",\n args.raw_data_file,\n args.processed_data_file,\n args.memory_map,\n args.dataset_multiprocessing\n )\n\n for split in ['train', 'val', 'test']:\n print('Running preprocessing for split =', split)\n\n train_files = ['{}_{}_reordered.npz'.format(args.raw_data_file, day)\n for\n day in range(0, 23)]\n\n test_valid_file = args.raw_data_file + '_23_reordered.npz'\n\n output_file = d_path + '_{}.bin'.format(split)\n\n input_files = train_files if split == 'train' else [test_valid_file]\n data_loader_terabyte.numpy_to_binary(input_files=input_files,\n output_file_path=output_file,\n split=split)\n\n\n# Conversion from offset to length\ndef offset_to_length_converter(lS_o, lS_i):\n def diff(tensor):\n return tensor[1:] - tensor[:-1]\n\n return torch.stack(\n [\n diff(torch.cat((S_o, torch.tensor(lS_i[ind].shape))).int())\n for ind, S_o in enumerate(lS_o)\n ]\n )\n\n\ndef collate_wrapper_criteo_length(list_of_tuples):\n # where each tuple is (X_int, X_cat, y)\n transposed_data = list(zip(*list_of_tuples))\n X_int = torch.log(torch.tensor(transposed_data[0], dtype=torch.float) + 1)\n X_cat = torch.tensor(transposed_data[1], dtype=torch.long)\n T = torch.tensor(transposed_data[2], dtype=torch.float32).view(-1, 1)\n\n batchSize = X_cat.shape[0]\n featureCnt = X_cat.shape[1]\n\n lS_i = torch.stack([X_cat[:, i] for i in range(featureCnt)])\n lS_o = torch.stack(\n [torch.tensor(range(batchSize)) for _ in range(featureCnt)]\n )\n\n lS_l = offset_to_length_converter(lS_o, lS_i)\n\n return X_int, lS_l, lS_i, T\n\n\ndef make_criteo_data_and_loaders(args, offset_to_length_converter=False):\n if args.mlperf_logging and args.memory_map and args.data_set == \"terabyte\":\n # more efficient for larger batches\n data_directory = path.dirname(args.raw_data_file)\n\n if args.mlperf_bin_loader:\n lstr = args.processed_data_file.split(\"/\")\n d_path = \"/\".join(lstr[0:-1]) + \"/\" + lstr[-1].split(\".\")[0]\n train_file = d_path + \"_train.bin\"\n test_file = d_path + \"_test.bin\"\n # val_file = d_path + \"_val.bin\"\n counts_file = args.raw_data_file + '_fea_count.npz'\n\n if any(not path.exists(p) for p in [train_file,\n test_file,\n counts_file]):\n ensure_dataset_preprocessed(args, d_path)\n\n train_data = data_loader_terabyte.CriteoBinDataset(\n data_file=train_file,\n counts_file=counts_file,\n batch_size=args.mini_batch_size,\n max_ind_range=args.max_ind_range\n )\n\n mlperf_logger.log_event(key=mlperf_logger.constants.TRAIN_SAMPLES,\n value=train_data.num_samples)\n\n train_loader = torch.utils.data.DataLoader(\n train_data,\n batch_size=None,\n batch_sampler=None,\n shuffle=False,\n num_workers=0,\n collate_fn=None,\n pin_memory=False,\n drop_last=False,\n sampler=RandomSampler(train_data) if args.mlperf_bin_shuffle else None\n )\n\n test_data = data_loader_terabyte.CriteoBinDataset(\n data_file=test_file,\n counts_file=counts_file,\n batch_size=args.test_mini_batch_size,\n max_ind_range=args.max_ind_range\n )\n\n mlperf_logger.log_event(key=mlperf_logger.constants.EVAL_SAMPLES,\n value=test_data.num_samples)\n\n test_loader = torch.utils.data.DataLoader(\n test_data,\n batch_size=None,\n batch_sampler=None,\n shuffle=False,\n num_workers=0,\n collate_fn=None,\n pin_memory=False,\n drop_last=False,\n )\n else:\n data_filename = args.raw_data_file.split(\"/\")[-1]\n\n train_data = CriteoDataset(\n args.data_set,\n args.max_ind_range,\n args.data_sub_sample_rate,\n args.data_randomize,\n \"train\",\n args.raw_data_file,\n args.processed_data_file,\n args.memory_map,\n args.dataset_multiprocessing\n )\n\n test_data = CriteoDataset(\n args.data_set,\n args.max_ind_range,\n args.data_sub_sample_rate,\n args.data_randomize,\n \"test\",\n args.raw_data_file,\n args.processed_data_file,\n args.memory_map,\n args.dataset_multiprocessing\n )\n\n train_loader = data_loader_terabyte.DataLoader(\n data_directory=data_directory,\n data_filename=data_filename,\n days=list(range(23)),\n batch_size=args.mini_batch_size,\n max_ind_range=args.max_ind_range,\n split=\"train\"\n )\n\n test_loader = data_loader_terabyte.DataLoader(\n data_directory=data_directory,\n data_filename=data_filename,\n days=[23],\n batch_size=args.test_mini_batch_size,\n max_ind_range=args.max_ind_range,\n split=\"test\"\n )\n else:\n train_data = CriteoDataset(\n args.data_set,\n args.max_ind_range,\n args.data_sub_sample_rate,\n args.data_randomize,\n \"train\",\n args.raw_data_file,\n args.processed_data_file,\n args.memory_map,\n args.dataset_multiprocessing,\n )\n\n test_data = CriteoDataset(\n args.data_set,\n args.max_ind_range,\n args.data_sub_sample_rate,\n args.data_randomize,\n \"test\",\n args.raw_data_file,\n args.processed_data_file,\n args.memory_map,\n args.dataset_multiprocessing,\n )\n\n collate_wrapper_criteo = collate_wrapper_criteo_offset\n if offset_to_length_converter:\n collate_wrapper_criteo = collate_wrapper_criteo_length\n\n train_loader = torch.utils.data.DataLoader(\n train_data,\n batch_size=args.mini_batch_size,\n shuffle=False,\n num_workers=args.num_workers,\n collate_fn=collate_wrapper_criteo,\n pin_memory=False,\n drop_last=False, # True\n )\n\n test_loader = torch.utils.data.DataLoader(\n test_data,\n batch_size=args.test_mini_batch_size,\n shuffle=False,\n num_workers=args.test_num_workers,\n collate_fn=collate_wrapper_criteo,\n pin_memory=False,\n drop_last=False, # True\n )\n\n return train_data, train_loader, test_data, test_loader\n\n\n# uniform ditribution (input data)\nclass RandomDataset(Dataset):\n\n def __init__(\n self,\n m_den,\n ln_emb,\n data_size,\n num_batches,\n mini_batch_size,\n num_indices_per_lookup,\n num_indices_per_lookup_fixed,\n num_targets=1,\n round_targets=False,\n data_generation=\"random\",\n trace_file=\"\",\n enable_padding=False,\n reset_seed_on_access=False,\n rand_data_dist=\"uniform\",\n rand_data_min=1,\n rand_data_max=1,\n rand_data_mu=-1,\n rand_data_sigma=1,\n rand_seed=0\n ):\n # compute batch size\n nbatches = int(np.ceil((data_size * 1.0) / mini_batch_size))\n if num_batches != 0:\n nbatches = num_batches\n data_size = nbatches * mini_batch_size\n # print(\"Total number of batches %d\" % nbatches)\n\n # save args (recompute data_size if needed)\n self.m_den = m_den\n self.ln_emb = ln_emb\n self.data_size = data_size\n self.num_batches = nbatches\n self.mini_batch_size = mini_batch_size\n self.num_indices_per_lookup = num_indices_per_lookup\n self.num_indices_per_lookup_fixed = num_indices_per_lookup_fixed\n self.num_targets = num_targets\n self.round_targets = round_targets\n self.data_generation = data_generation\n self.trace_file = trace_file\n self.enable_padding = enable_padding\n self.reset_seed_on_access = reset_seed_on_access\n self.rand_seed = rand_seed\n self.rand_data_dist = rand_data_dist\n self.rand_data_min = rand_data_min\n self.rand_data_max = rand_data_max\n self.rand_data_mu = rand_data_mu\n self.rand_data_sigma = rand_data_sigma\n\n def reset_numpy_seed(self, numpy_rand_seed):\n np.random.seed(numpy_rand_seed)\n # torch.manual_seed(numpy_rand_seed)\n\n def __getitem__(self, index):\n\n if isinstance(index, slice):\n return [\n self[idx] for idx in range(\n index.start or 0, index.stop or len(self), index.step or 1\n )\n ]\n\n # WARNING: reset seed on access to first element\n # (e.g. if same random samples needed across epochs)\n if self.reset_seed_on_access and index == 0:\n self.reset_numpy_seed(self.rand_seed)\n\n # number of data points in a batch\n n = min(self.mini_batch_size, self.data_size - (index * self.mini_batch_size))\n\n # generate a batch of dense and sparse features\n if self.data_generation == \"random\":\n (X, lS_o, lS_i) = generate_dist_input_batch(\n self.m_den,\n self.ln_emb,\n n,\n self.num_indices_per_lookup,\n self.num_indices_per_lookup_fixed,\n rand_data_dist=self.rand_data_dist,\n rand_data_min=self.rand_data_min,\n rand_data_max=self.rand_data_max,\n rand_data_mu=self.rand_data_mu,\n rand_data_sigma=self.rand_data_sigma,\n )\n elif self.data_generation == \"synthetic\":\n (X, lS_o, lS_i) = generate_synthetic_input_batch(\n self.m_den,\n self.ln_emb,\n n,\n self.num_indices_per_lookup,\n self.num_indices_per_lookup_fixed,\n self.trace_file,\n self.enable_padding\n )\n else:\n sys.exit(\n \"ERROR: --data-generation=\" + self.data_generation + \" is not supported\"\n )\n\n # generate a batch of target (probability of a click)\n T = generate_random_output_batch(n, self.num_targets, self.round_targets)\n\n return (X, lS_o, lS_i, T)\n\n def __len__(self):\n # WARNING: note that we produce bacthes of outputs in __getitem__\n # therefore we should use num_batches rather than data_size below\n return self.num_batches\n\n\ndef collate_wrapper_random_offset(list_of_tuples):\n # where each tuple is (X, lS_o, lS_i, T)\n (X, lS_o, lS_i, T) = list_of_tuples[0]\n return (X,\n torch.stack(lS_o),\n lS_i,\n T)\n\n\ndef collate_wrapper_random_length(list_of_tuples):\n # where each tuple is (X, lS_o, lS_i, T)\n (X, lS_o, lS_i, T) = list_of_tuples[0]\n return (X,\n offset_to_length_converter(torch.stack(lS_o), lS_i),\n lS_i,\n T)\n\n\ndef make_random_data_and_loader(args, ln_emb, m_den,\n offset_to_length_converter=False,\n):\n\n train_data = RandomDataset(\n m_den,\n ln_emb,\n args.data_size,\n args.num_batches,\n args.mini_batch_size,\n args.num_indices_per_lookup,\n args.num_indices_per_lookup_fixed,\n 1, # num_targets\n args.round_targets,\n args.data_generation,\n args.data_trace_file,\n args.data_trace_enable_padding,\n reset_seed_on_access=True,\n rand_data_dist=args.rand_data_dist,\n rand_data_min=args.rand_data_min,\n rand_data_max=args.rand_data_max,\n rand_data_mu=args.rand_data_mu,\n rand_data_sigma=args.rand_data_sigma,\n rand_seed=args.numpy_rand_seed\n ) # WARNING: generates a batch of lookups at once\n\n collate_wrapper_random = collate_wrapper_random_offset\n if offset_to_length_converter:\n collate_wrapper_random = collate_wrapper_random_length\n\n train_loader = torch.utils.data.DataLoader(\n train_data,\n batch_size=1,\n shuffle=False,\n num_workers=args.num_workers,\n collate_fn=collate_wrapper_random,\n pin_memory=False,\n drop_last=False, # True\n )\n return train_data, train_loader\n\n\ndef generate_random_data(\n m_den,\n ln_emb,\n data_size,\n num_batches,\n mini_batch_size,\n num_indices_per_lookup,\n num_indices_per_lookup_fixed,\n num_targets=1,\n round_targets=False,\n data_generation=\"random\",\n trace_file=\"\",\n enable_padding=False,\n length=False, # length for caffe2 version (except dlrm_s_caffe2)\n):\n nbatches = int(np.ceil((data_size * 1.0) / mini_batch_size))\n if num_batches != 0:\n nbatches = num_batches\n data_size = nbatches * mini_batch_size\n # print(\"Total number of batches %d\" % nbatches)\n\n # inputs\n lT = []\n lX = []\n lS_offsets = []\n lS_indices = []\n for j in range(0, nbatches):\n # number of data points in a batch\n n = min(mini_batch_size, data_size - (j * mini_batch_size))\n\n # generate a batch of dense and sparse features\n if data_generation == \"random\":\n (Xt, lS_emb_offsets, lS_emb_indices) = generate_uniform_input_batch(\n m_den,\n ln_emb,\n n,\n num_indices_per_lookup,\n num_indices_per_lookup_fixed,\n length,\n )\n elif data_generation == \"synthetic\":\n (Xt, lS_emb_offsets, lS_emb_indices) = generate_synthetic_input_batch(\n m_den,\n ln_emb,\n n,\n num_indices_per_lookup,\n num_indices_per_lookup_fixed,\n trace_file,\n enable_padding\n )\n else:\n sys.exit(\n \"ERROR: --data-generation=\" + data_generation + \" is not supported\"\n )\n # dense feature\n lX.append(Xt)\n # sparse feature (sparse indices)\n lS_offsets.append(lS_emb_offsets)\n lS_indices.append(lS_emb_indices)\n\n # generate a batch of target (probability of a click)\n P = generate_random_output_batch(n, num_targets, round_targets)\n lT.append(P)\n\n return (nbatches, lX, lS_offsets, lS_indices, lT)\n\n\ndef generate_random_output_batch(n, num_targets, round_targets=False):\n # target (probability of a click)\n if round_targets:\n P = np.round(ra.rand(n, num_targets).astype(np.float32)).astype(np.float32)\n else:\n P = ra.rand(n, num_targets).astype(np.float32)\n\n return torch.tensor(P)\n\n\n# uniform ditribution (input data)\ndef generate_uniform_input_batch(\n m_den,\n ln_emb,\n n,\n num_indices_per_lookup,\n num_indices_per_lookup_fixed,\n length,\n):\n # dense feature\n Xt = torch.tensor(ra.rand(n, m_den).astype(np.float32))\n\n # sparse feature (sparse indices)\n lS_emb_offsets = []\n lS_emb_indices = []\n # for each embedding generate a list of n lookups,\n # where each lookup is composed of multiple sparse indices\n for size in ln_emb:\n lS_batch_offsets = []\n lS_batch_indices = []\n offset = 0\n for _ in range(n):\n # num of sparse indices to be used per embedding (between\n if num_indices_per_lookup_fixed:\n sparse_group_size = np.int64(num_indices_per_lookup)\n else:\n # random between [1,num_indices_per_lookup])\n r = ra.random(1)\n sparse_group_size = np.int64(\n np.round(max([1.0], r * min(size, num_indices_per_lookup)))\n )\n # sparse indices to be used per embedding\n r = ra.random(sparse_group_size)\n sparse_group = np.unique(np.round(r * (size - 1)).astype(np.int64))\n # reset sparse_group_size in case some index duplicates were removed\n sparse_group_size = np.int32(sparse_group.size)\n # store lengths and indices\n if length: # for caffe2 version\n lS_batch_offsets += [sparse_group_size]\n else:\n lS_batch_offsets += [offset]\n lS_batch_indices += sparse_group.tolist()\n # update offset for next iteration\n offset += sparse_group_size\n lS_emb_offsets.append(torch.tensor(lS_batch_offsets))\n lS_emb_indices.append(torch.tensor(lS_batch_indices))\n\n return (Xt, lS_emb_offsets, lS_emb_indices)\n\n\n# random data from uniform or gaussian ditribution (input data)\ndef generate_dist_input_batch(\n m_den,\n ln_emb,\n n,\n num_indices_per_lookup,\n num_indices_per_lookup_fixed,\n rand_data_dist,\n rand_data_min,\n rand_data_max,\n rand_data_mu,\n rand_data_sigma,\n):\n # dense feature\n Xt = torch.tensor(ra.rand(n, m_den).astype(np.float32))\n\n # sparse feature (sparse indices)\n lS_emb_offsets = []\n lS_emb_indices = []\n # for each embedding generate a list of n lookups,\n # where each lookup is composed of multiple sparse indices\n for size in ln_emb:\n lS_batch_offsets = []\n lS_batch_indices = []\n offset = 0\n for _ in range(n):\n # num of sparse indices to be used per embedding (between\n if num_indices_per_lookup_fixed:\n sparse_group_size = np.int64(num_indices_per_lookup)\n else:\n # random between [1,num_indices_per_lookup])\n r = ra.random(1)\n sparse_group_size = np.int64(\n np.round(max([1.0], r * min(size, num_indices_per_lookup)))\n )\n # sparse indices to be used per embedding\n if rand_data_dist == \"gaussian\":\n if rand_data_mu == -1:\n rand_data_mu = (rand_data_max + rand_data_min) / 2.0\n r = ra.normal(rand_data_mu, rand_data_sigma, sparse_group_size)\n sparse_group = np.clip(r, rand_data_min, rand_data_max)\n sparse_group = np.unique(sparse_group).astype(np.int64)\n elif rand_data_dist == \"uniform\":\n r = ra.random(sparse_group_size)\n sparse_group = np.unique(np.round(r * (size - 1)).astype(np.int64))\n else:\n raise(rand_data_dist, \"distribution is not supported. \\\n please select uniform or gaussian\")\n\n # reset sparse_group_size in case some index duplicates were removed\n sparse_group_size = np.int64(sparse_group.size)\n # store lengths and indices\n lS_batch_offsets += [offset]\n lS_batch_indices += sparse_group.tolist()\n # update offset for next iteration\n offset += sparse_group_size\n lS_emb_offsets.append(torch.tensor(lS_batch_offsets))\n lS_emb_indices.append(torch.tensor(lS_batch_indices))\n\n return (Xt, lS_emb_offsets, lS_emb_indices)\n\n\n# synthetic distribution (input data)\ndef generate_synthetic_input_batch(\n m_den,\n ln_emb,\n n,\n num_indices_per_lookup,\n num_indices_per_lookup_fixed,\n trace_file,\n enable_padding=False,\n):\n # dense feature\n Xt = torch.tensor(ra.rand(n, m_den).astype(np.float32))\n\n # sparse feature (sparse indices)\n lS_emb_offsets = []\n lS_emb_indices = []\n # for each embedding generate a list of n lookups,\n # where each lookup is composed of multiple sparse indices\n for i, size in enumerate(ln_emb):\n lS_batch_offsets = []\n lS_batch_indices = []\n offset = 0\n for _ in range(n):\n # num of sparse indices to be used per embedding (between\n if num_indices_per_lookup_fixed:\n sparse_group_size = np.int64(num_indices_per_lookup)\n else:\n # random between [1,num_indices_per_lookup])\n r = ra.random(1)\n sparse_group_size = np.int64(\n max(1, np.round(r * min(size, num_indices_per_lookup))[0])\n )\n # sparse indices to be used per embedding\n file_path = trace_file\n line_accesses, list_sd, cumm_sd = read_dist_from_file(\n file_path.replace(\"j\", str(i))\n )\n # debug prints\n # print(\"input\")\n # print(line_accesses); print(list_sd); print(cumm_sd);\n # print(sparse_group_size)\n # approach 1: rand\n # r = trace_generate_rand(\n # line_accesses, list_sd, cumm_sd, sparse_group_size, enable_padding\n # )\n # approach 2: lru\n r = trace_generate_lru(\n line_accesses, list_sd, cumm_sd, sparse_group_size, enable_padding\n )\n # WARNING: if the distribution in the file is not consistent\n # with embedding table dimensions, below mod guards against out\n # of range access\n sparse_group = np.unique(r).astype(np.int64)\n minsg = np.min(sparse_group)\n maxsg = np.max(sparse_group)\n if (minsg < 0) or (size <= maxsg):\n print(\n \"WARNING: distribution is inconsistent with embedding \"\n + \"table size (using mod to recover and continue)\"\n )\n sparse_group = np.mod(sparse_group, size).astype(np.int64)\n # sparse_group = np.unique(np.array(np.mod(r, size-1)).astype(np.int64))\n # reset sparse_group_size in case some index duplicates were removed\n sparse_group_size = np.int64(sparse_group.size)\n # store lengths and indices\n lS_batch_offsets += [offset]\n lS_batch_indices += sparse_group.tolist()\n # update offset for next iteration\n offset += sparse_group_size\n lS_emb_offsets.append(torch.tensor(lS_batch_offsets))\n lS_emb_indices.append(torch.tensor(lS_batch_indices))\n\n return (Xt, lS_emb_offsets, lS_emb_indices)\n\n\ndef generate_stack_distance(cumm_val, cumm_dist, max_i, i, enable_padding=False):\n u = ra.rand(1)\n if i < max_i:\n # only generate stack distances up to the number of new references seen so far\n j = bisect.bisect(cumm_val, i) - 1\n fi = cumm_dist[j]\n u *= fi # shrink distribution support to exclude last values\n elif enable_padding:\n # WARNING: disable generation of new references (once all have been seen)\n fi = cumm_dist[0]\n u = (1.0 - fi) * u + fi # remap distribution support to exclude first value\n\n for (j, f) in enumerate(cumm_dist):\n if u <= f:\n return cumm_val[j]\n\n\n# WARNING: global define, must be consistent across all synthetic functions\ncache_line_size = 1\n\n\ndef trace_generate_lru(\n line_accesses, list_sd, cumm_sd, out_trace_len, enable_padding=False\n):\n max_sd = list_sd[-1]\n l = len(line_accesses)\n i = 0\n ztrace = deque()\n for _ in range(out_trace_len):\n sd = generate_stack_distance(list_sd, cumm_sd, max_sd, i, enable_padding)\n mem_ref_within_line = 0 # floor(ra.rand(1)*cache_line_size) #0\n\n # generate memory reference\n if sd == 0: # new reference #\n line_ref = line_accesses[0]\n del line_accesses[0]\n line_accesses.append(line_ref)\n mem_ref = np.uint64(line_ref * cache_line_size + mem_ref_within_line)\n i += 1\n else: # existing reference #\n line_ref = line_accesses[l - sd]\n mem_ref = np.uint64(line_ref * cache_line_size + mem_ref_within_line)\n del line_accesses[l - sd]\n line_accesses.append(line_ref)\n # save generated memory reference\n ztrace.append(mem_ref)\n\n return ztrace\n\n\ndef trace_generate_rand(\n line_accesses, list_sd, cumm_sd, out_trace_len, enable_padding=False\n):\n max_sd = list_sd[-1]\n l = len(line_accesses) # !!!Unique,\n i = 0\n ztrace = []\n for _ in range(out_trace_len):\n sd = generate_stack_distance(list_sd, cumm_sd, max_sd, i, enable_padding)\n mem_ref_within_line = 0 # floor(ra.rand(1)*cache_line_size) #0\n # generate memory reference\n if sd == 0: # new reference #\n line_ref = line_accesses.pop(0)\n line_accesses.append(line_ref)\n mem_ref = np.uint64(line_ref * cache_line_size + mem_ref_within_line)\n i += 1\n else: # existing reference #\n line_ref = line_accesses[l - sd]\n mem_ref = np.uint64(line_ref * cache_line_size + mem_ref_within_line)\n ztrace.append(mem_ref)\n\n return ztrace\n\n\ndef trace_profile(trace, enable_padding=False):\n # number of elements in the array (assuming 1D)\n # n = trace.size\n\n rstack = deque() # S\n stack_distances = deque() # SDS\n line_accesses = deque() # L\n for x in trace:\n r = np.uint64(x / cache_line_size)\n l = len(rstack)\n try: # found #\n i = rstack.index(r)\n # WARNING: I believe below is the correct depth in terms of meaning of the\n # algorithm, but that is not what seems to be in the paper alg.\n # -1 can be subtracted if we defined the distance between\n # consecutive accesses (e.g. r, r) as 0 rather than 1.\n sd = l - i # - 1\n # push r to the end of stack_distances\n stack_distances.appendleft(sd)\n # remove r from its position and insert to the top of stack\n del rstack[i] # rstack.remove(r)\n rstack.append(r)\n except ValueError: # not found #\n sd = 0 # -1\n # push r to the end of stack_distances/line_accesses\n stack_distances.appendleft(sd)\n line_accesses.appendleft(r)\n # push r to the top of stack\n rstack.append(r)\n\n if enable_padding:\n # WARNING: notice that as the ratio between the number of samples (l)\n # and cardinality (c) of a sample increases the probability of\n # generating a sample gets smaller and smaller because there are\n # few new samples compared to repeated samples. This means that for a\n # long trace with relatively small cardinality it will take longer to\n # generate all new samples and therefore obtain full distribution support\n # and hence it takes longer for distribution to resemble the original.\n # Therefore, we may pad the number of new samples to be on par with\n # average number of samples l/c artificially.\n l = len(stack_distances)\n c = max(stack_distances)\n padding = int(np.ceil(l / c))\n stack_distances = stack_distances + [0] * padding\n\n return (rstack, stack_distances, line_accesses)\n\n\n# auxiliary read/write routines\ndef read_trace_from_file(file_path):\n try:\n with open(file_path) as f:\n if args.trace_file_binary_type:\n array = np.fromfile(f, dtype=np.uint64)\n trace = array.astype(np.uint64).tolist()\n else:\n line = f.readline()\n trace = list(map(lambda x: np.uint64(x), line.split(\", \")))\n return trace\n except Exception:\n print(\"ERROR: trace file '{}' is not available.\".format(file_path))\n\n\ndef write_trace_to_file(file_path, trace):\n try:\n if args.trace_file_binary_type:\n with open(file_path, \"wb+\") as f:\n np.array(trace).astype(np.uint64).tofile(f)\n else:\n with open(file_path, \"w+\") as f:\n s = str(trace)\n f.write(s[1 : len(s) - 1])\n except Exception:\n print(\"ERROR: no output trace file has been provided\")\n\n\ndef read_dist_from_file(file_path):\n try:\n with open(file_path, \"r\") as f:\n lines = f.read().splitlines()\n except Exception:\n print(\"Wrong file or file path\")\n # read unique accesses\n unique_accesses = [int(el) for el in lines[0].split(\", \")]\n # read cumulative distribution (elements are passed as two separate lists)\n list_sd = [int(el) for el in lines[1].split(\", \")]\n cumm_sd = [float(el) for el in lines[2].split(\", \")]\n\n return unique_accesses, list_sd, cumm_sd\n\n\ndef write_dist_to_file(file_path, unique_accesses, list_sd, cumm_sd):\n try:\n with open(file_path, \"w\") as f:\n # unique_acesses\n s = str(unique_accesses)\n f.write(s[1 : len(s) - 1] + \"\\n\")\n # list_sd\n s = str(list_sd)\n f.write(s[1 : len(s) - 1] + \"\\n\")\n # cumm_sd\n s = str(cumm_sd)\n f.write(s[1 : len(s) - 1] + \"\\n\")\n except Exception:\n print(\"Wrong file or file path\")\n\n\nif __name__ == \"__main__\":\n import sys\n import operator\n import argparse\n\n ### parse arguments ###\n parser = argparse.ArgumentParser(description=\"Generate Synthetic Distributions\")\n parser.add_argument(\"--trace-file\", type=str, default=\"./input/trace.log\")\n parser.add_argument(\"--trace-file-binary-type\", type=bool, default=False)\n parser.add_argument(\"--trace-enable-padding\", type=bool, default=False)\n parser.add_argument(\"--dist-file\", type=str, default=\"./input/dist.log\")\n parser.add_argument(\n \"--synthetic-file\", type=str, default=\"./input/trace_synthetic.log\"\n )\n parser.add_argument(\"--numpy-rand-seed\", type=int, default=123)\n parser.add_argument(\"--print-precision\", type=int, default=5)\n args = parser.parse_args()\n\n ### some basic setup ###\n np.random.seed(args.numpy_rand_seed)\n np.set_printoptions(precision=args.print_precision)\n\n ### read trace ###\n trace = read_trace_from_file(args.trace_file)\n # print(trace)\n\n ### profile trace ###\n (_, stack_distances, line_accesses) = trace_profile(\n trace, args.trace_enable_padding\n )\n stack_distances.reverse()\n line_accesses.reverse()\n # print(line_accesses)\n # print(stack_distances)\n\n ### compute probability distribution ###\n # count items\n l = len(stack_distances)\n dc = sorted(\n collections.Counter(stack_distances).items(), key=operator.itemgetter(0)\n )\n\n # create a distribution\n list_sd = list(map(lambda tuple_x_k: tuple_x_k[0], dc)) # x = tuple_x_k[0]\n dist_sd = list(\n map(lambda tuple_x_k: tuple_x_k[1] / float(l), dc)\n ) # k = tuple_x_k[1]\n cumm_sd = deque() # np.cumsum(dc).tolist() #prefixsum\n for i, (_, k) in enumerate(dc):\n if i == 0:\n cumm_sd.append(k / float(l))\n else:\n # add the 2nd element of the i-th tuple in the dist_sd list\n cumm_sd.append(cumm_sd[i - 1] + (k / float(l)))\n\n ### write stack_distance and line_accesses to a file ###\n write_dist_to_file(args.dist_file, line_accesses, list_sd, cumm_sd)\n\n ### generate correspondinf synthetic ###\n # line_accesses, list_sd, cumm_sd = read_dist_from_file(args.dist_file)\n synthetic_trace = trace_generate_lru(\n line_accesses, list_sd, cumm_sd, len(trace), args.trace_enable_padding\n )\n # synthetic_trace = trace_generate_rand(\n # line_accesses, list_sd, cumm_sd, len(trace), args.trace_enable_padding\n # )\n write_trace_to_file(args.synthetic_file, synthetic_trace)\n"
] | [
[
"numpy.random.rand",
"torch.stack",
"torch.utils.data.RandomSampler",
"numpy.set_printoptions",
"numpy.load",
"numpy.min",
"numpy.random.random",
"numpy.max",
"numpy.concatenate",
"numpy.random.normal",
"torch.tensor",
"torch.utils.data.DataLoader",
"numpy.int32",
"numpy.mod",
"numpy.array",
"numpy.round",
"numpy.fromfile",
"numpy.clip",
"numpy.array_split",
"numpy.ceil",
"numpy.random.seed",
"numpy.random.permutation",
"numpy.uint64",
"numpy.int64",
"numpy.unique"
]
] |
J535D165/asreview | [
"eda3c52a595d739093c3cd6cd37d41eeed6dd15c"
] | [
"asreview/query_strategies/max.py"
] | [
"# Copyright 2019 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Max sampling while saving prediction probabilities.\"\"\"\n\nimport numpy as np\n\nfrom asreview.query_strategies.base import ProbaQueryStrategy\n\n\nclass MaxQuery(ProbaQueryStrategy):\n \"\"\"Maximum sampling query strategy.\n\n Choose the most likely samples to be included according to the model.\n \"\"\"\n\n name = \"max\"\n\n def _query(self, X, pool_idx, n_instances=1, proba=None):\n proba = proba[pool_idx]\n query_idx = np.argsort(proba[:, 0])[:n_instances]\n\n return pool_idx[query_idx], X[pool_idx[query_idx]]\n"
] | [
[
"numpy.argsort"
]
] |
imageslr/NLP | [
"f56796a86620accd487480e5c3bd992cf3dc7578"
] | [
"3.Tasks/Dialog-RL-GAN/gen/seq2seq.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# We disable pylint because we need python3 compatibility.\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nfrom six.moves import zip # pylint: disable=redefined-builtin\nimport tensorflow as tf\nfrom tensorflow.python import shape\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import embedding_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import rnn\nfrom tensorflow.python.ops import rnn_cell\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.util import nest\n\ntry:\n from tensorflow.python.ops.rnn_cell_impl import _linear\n linear = _linear\nexcept:\n from tensorflow.contrib.rnn.python.ops import core_rnn_cell\n linear = core_rnn_cell._linear # pylint: disable=protected-access\n\n\ndef _argmax_or_mcsearch(embedding, output_projection=None, update_embedding=True, mc_search=False):\n def loop_function(prev, _):\n if output_projection is not None:\n # TODO 这一行 prev的形状是什么?[batch_size, output_size || vocab_size]\n # 这里第一个维度表示的是并行训练的batch?第二个维度是最后一层的隐向量?\n prev = nn_ops.xw_plus_b(prev, output_projection[0], output_projection[1])\n\n\n if isinstance(mc_search, bool):\n # TODO 这一行 prev的形状是什么?[batch_size, output_size]\n # 这里第一个维度表示的是并行训练的batch?第二个维度是单词个数、表示这个预测单词是每个单词的概率?\n # 如果是mc_search,则每个token是采样得到的,最后reshape从二维变成一维\n # TODO 那prev_symbol是指当前预测的单词?\n # TODO 10.31 周三 使用蒙特卡洛搜索时会生成beam_size个样例,\n prev_symbol = tf.reshape(tf.multinomial(prev, 1), [-1]) if mc_search else math_ops.argmax(prev, 1)\n else:\n prev_symbol = tf.cond(mc_search, lambda: tf.reshape(tf.multinomial(prev, 1), [-1]), lambda: tf.argmax(prev, 1))\n\n\n emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol)\n if not update_embedding:\n emb_prev = array_ops.stop_gradient(emb_prev)\n return emb_prev\n return loop_function\n\ndef _extract_argmax_and_embed(embedding, output_projection=None, update_embedding=True):\n \"\"\"Get a loop_function that extracts the previous symbol and embeds it.\n\n Args:\n embedding: embedding tensor for symbols.\n output_projection: None or a pair (W, B). If provided, each fed previous\n output will first be multiplied by W and added B.\n update_embedding: Boolean; if False, the gradients will not propagate\n through the embeddings.\n\n Returns:\n A loop function.\n \"\"\"\n def loop_function(prev, _):\n if output_projection is not None:\n prev = nn_ops.xw_plus_b(\n prev, output_projection[0], output_projection[1])\n prev_symbol = math_ops.argmax(prev, 1)\n # Note that gradients will not propagate through the second parameter of\n # embedding_lookup.\n emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol)\n if not update_embedding:\n emb_prev = array_ops.stop_gradient(emb_prev)\n return emb_prev\n return loop_function\n\n\ndef rnn_decoder(decoder_inputs, initial_state, cell, loop_function=None,\n scope=None):\n \"\"\"RNN decoder for the sequence-to-sequence model.\n\n Args:\n decoder_inputs: A list of 2D Tensors [batch_size x input_size].\n initial_state: 2D Tensor with shape [batch_size x cell.state_size].\n cell: rnn_cell.RNNCell defining the cell function and size.\n loop_function: If not None, this function will be applied to the i-th output\n in order to generate the i+1-st input, and decoder_inputs will be ignored,\n except for the first element (\"GO\" symbol). This can be used for decoding,\n but also for training to emulate http://arxiv.org/abs/1506.03099.\n Signature -- loop_function(prev, i) = next\n * prev is a 2D Tensor of shape [batch_size x output_size],\n * i is an integer, the step number (when advanced control is needed),\n * next is a 2D Tensor of shape [batch_size x input_size].\n scope: VariableScope for the created subgraph; defaults to \"rnn_decoder\".\n\n Returns:\n A tuple of the form (outputs, state), where:\n outputs: A list of the same length as decoder_inputs of 2D Tensors with\n shape [batch_size x output_size] containing generated outputs.\n state: The state of each cell at the final time-step.\n It is a 2D Tensor of shape [batch_size x cell.state_size].\n (Note that in some cases, like basic RNN cell or GRU cell, outputs and\n states can be the same. They are different for LSTM cells though.)\n \"\"\"\n with variable_scope.variable_scope(scope or \"rnn_decoder\"):\n state = initial_state\n outputs = []\n prev = None\n for i, inp in enumerate(decoder_inputs):\n if loop_function is not None and prev is not None:\n with variable_scope.variable_scope(\"loop_function\", reuse=True):\n inp = loop_function(prev, i)\n if i > 0:\n variable_scope.get_variable_scope().reuse_variables()\n output, state = cell(inp, state)\n outputs.append(output)\n if loop_function is not None:\n prev = output\n return outputs, state\n\n\ndef basic_rnn_seq2seq(\n encoder_inputs, decoder_inputs, cell, dtype=dtypes.float32, scope=None):\n \"\"\"Basic RNN sequence-to-sequence model.\n\n This model first runs an RNN to encode encoder_inputs into a state vector,\n then runs decoder, initialized with the last encoder state, on decoder_inputs.\n Encoder and decoder use the same RNN cell type, but don't share parameters.\n\n Args:\n encoder_inputs: A list of 2D Tensors [batch_size x input_size].\n decoder_inputs: A list of 2D Tensors [batch_size x input_size].\n cell: rnn_cell.RNNCell defining the cell function and size.\n dtype: The dtype of the initial state of the RNN cell (default: tf.float32).\n scope: VariableScope for the created subgraph; default: \"basic_rnn_seq2seq\".\n\n Returns:\n A tuple of the form (outputs, state), where:\n outputs: A list of the same length as decoder_inputs of 2D Tensors with\n shape [batch_size x output_size] containing the generated outputs.\n state: The state of each decoder cell in the final time-step.\n It is a 2D Tensor of shape [batch_size x cell.state_size].\n \"\"\"\n with variable_scope.variable_scope(scope or \"basic_rnn_seq2seq\"):\n _, enc_state = rnn.rnn(cell, encoder_inputs, dtype=dtype)\n return rnn_decoder(decoder_inputs, enc_state, cell)\n\n\ndef tied_rnn_seq2seq(encoder_inputs, decoder_inputs, cell,\n loop_function=None, dtype=dtypes.float32, scope=None):\n \"\"\"RNN sequence-to-sequence model with tied encoder and decoder parameters.\n\n This model first runs an RNN to encode encoder_inputs into a state vector, and\n then runs decoder, initialized with the last encoder state, on decoder_inputs.\n Encoder and decoder use the same RNN cell and share parameters.\n\n Args:\n encoder_inputs: A list of 2D Tensors [batch_size x input_size].\n decoder_inputs: A list of 2D Tensors [batch_size x input_size].\n cell: rnn_cell.RNNCell defining the cell function and size.\n loop_function: If not None, this function will be applied to i-th output\n in order to generate i+1-th input, and decoder_inputs will be ignored,\n except for the first element (\"GO\" symbol), see rnn_decoder for details.\n dtype: The dtype of the initial state of the rnn cell (default: tf.float32).\n scope: VariableScope for the created subgraph; default: \"tied_rnn_seq2seq\".\n\n Returns:\n A tuple of the form (outputs, state), where:\n outputs: A list of the same length as decoder_inputs of 2D Tensors with\n shape [batch_size x output_size] containing the generated outputs.\n state: The state of each decoder cell in each time-step. This is a list\n with length len(decoder_inputs) -- one item for each time-step.\n It is a 2D Tensor of shape [batch_size x cell.state_size].\n \"\"\"\n with variable_scope.variable_scope(\"combined_tied_rnn_seq2seq\"):\n scope = scope or \"tied_rnn_seq2seq\"\n _, enc_state = rnn.rnn(\n cell, encoder_inputs, dtype=dtype, scope=scope)\n variable_scope.get_variable_scope().reuse_variables()\n return rnn_decoder(decoder_inputs, enc_state, cell,\n loop_function=loop_function, scope=scope)\n\n\ndef embedding_rnn_decoder(decoder_inputs,\n initial_state,\n cell,\n num_symbols,\n embedding_size,\n output_projection=None,\n feed_previous=False,\n update_embedding_for_previous=True,\n scope=None):\n\n with variable_scope.variable_scope(scope or \"embedding_rnn_decoder\") as scope:\n if output_projection is not None:\n dtype = scope.dtype\n proj_weights = ops.convert_to_tensor(output_projection[0], dtype=dtype)\n proj_weights.get_shape().assert_is_compatible_with([None, num_symbols])\n proj_biases = ops.convert_to_tensor(output_projection[1], dtype=dtype)\n proj_biases.get_shape().assert_is_compatible_with([num_symbols])\n\n embedding = variable_scope.get_variable(\"embedding\",\n [num_symbols, embedding_size])\n loop_function = _extract_argmax_and_embed(\n embedding, output_projection,\n update_embedding_for_previous) if feed_previous else None\n emb_inp = (\n embedding_ops.embedding_lookup(embedding, i) for i in decoder_inputs)\n return rnn_decoder(emb_inp, initial_state, cell,\n loop_function=loop_function)\n\n\ndef embedding_rnn_seq2seq(encoder_inputs,\n decoder_inputs,\n cell,\n num_encoder_symbols,\n num_decoder_symbols,\n embedding_size,\n output_projection=None,\n feed_previous=False,\n dtype=None,\n scope=None):\n\n with variable_scope.variable_scope(scope or \"embedding_rnn_seq2seq\") as scope:\n if dtype is not None:\n scope.set_dtype(dtype)\n else:\n dtype = scope.dtype\n\n # Encoder.\n encoder_cell = tf.contrib.rnn.EmbeddingWrapper(\n cell, embedding_classes=num_encoder_symbols,\n embedding_size=embedding_size)\n _, encoder_state = rnn.rnn(encoder_cell, encoder_inputs, dtype=dtype)\n\n # Decoder.\n if output_projection is None:\n cell = rnn_cell.OutputProjectionWrapper(cell, num_decoder_symbols)\n\n if isinstance(feed_previous, bool):\n return embedding_rnn_decoder(\n decoder_inputs,\n encoder_state,\n cell,\n num_decoder_symbols,\n embedding_size,\n output_projection=output_projection,\n feed_previous=feed_previous,\n scope=scope)\n\n # If feed_previous is a Tensor, we construct 2 graphs and use cond.\n def decoder(feed_previous_bool):\n reuse = None if feed_previous_bool else True\n with variable_scope.variable_scope(\n variable_scope.get_variable_scope(), reuse=reuse) as scope:\n outputs, state = embedding_rnn_decoder(\n decoder_inputs, encoder_state, cell, num_decoder_symbols,\n embedding_size, output_projection=output_projection,\n feed_previous=feed_previous_bool,\n update_embedding_for_previous=False)\n state_list = [state]\n if nest.is_sequence(state):\n state_list = nest.flatten(state)\n return outputs + state_list\n\n outputs_and_state = control_flow_ops.cond(feed_previous,\n lambda: decoder(True),\n lambda: decoder(False))\n outputs_len = len(decoder_inputs) # Outputs length same as decoder inputs.\n state_list = outputs_and_state[outputs_len:]\n state = state_list[0]\n if nest.is_sequence(encoder_state):\n state = nest.pack_sequence_as(structure=encoder_state,\n flat_sequence=state_list)\n return outputs_and_state[:outputs_len], state\n\n\ndef embedding_tied_rnn_seq2seq(encoder_inputs,\n decoder_inputs,\n cell,\n num_symbols,\n embedding_size,\n num_decoder_symbols=None,\n output_projection=None,\n feed_previous=False,\n dtype=None,\n scope=None):\n \"\"\"Embedding RNN sequence-to-sequence model with tied (shared) parameters.\n\n This model first embeds encoder_inputs by a newly created embedding (of shape\n [num_symbols x input_size]). Then it runs an RNN to encode embedded\n encoder_inputs into a state vector. Next, it embeds decoder_inputs using\n the same embedding. Then it runs RNN decoder, initialized with the last\n encoder state, on embedded decoder_inputs. The decoder output is over symbols\n from 0 to num_decoder_symbols - 1 if num_decoder_symbols is none; otherwise it\n is over 0 to num_symbols - 1.\n\n Args:\n encoder_inputs: A list of 1D int32 Tensors of shape [batch_size].\n decoder_inputs: A list of 1D int32 Tensors of shape [batch_size].\n cell: rnn_cell.RNNCell defining the cell function and size.\n num_symbols: Integer; number of symbols for both encoder and decoder.\n embedding_size: Integer, the length of the embedding vector for each symbol.\n num_decoder_symbols: Integer; number of output symbols for decoder. If\n provided, the decoder output is over symbols 0 to num_decoder_symbols - 1.\n Otherwise, decoder output is over symbols 0 to num_symbols - 1. Note that\n this assumes that the vocabulary is set up such that the first\n num_decoder_symbols of num_symbols are part of decoding.\n output_projection: None or a pair (W, B) of output projection weights and\n biases; W has shape [output_size x num_symbols] and B has\n shape [num_symbols]; if provided and feed_previous=True, each\n fed previous output will first be multiplied by W and added B.\n feed_previous: Boolean or scalar Boolean Tensor; if True, only the first\n of decoder_inputs will be used (the \"GO\" symbol), and all other decoder\n inputs will be taken from previous outputs (as in embedding_rnn_decoder).\n If False, decoder_inputs are used as given (the standard decoder case).\n dtype: The dtype to use for the initial RNN states (default: tf.float32).\n scope: VariableScope for the created subgraph; defaults to\n \"embedding_tied_rnn_seq2seq\".\n\n Returns:\n A tuple of the form (outputs, state), where:\n outputs: A list of the same length as decoder_inputs of 2D Tensors with\n shape [batch_size x output_symbols] containing the generated\n outputs where output_symbols = num_decoder_symbols if\n num_decoder_symbols is not None otherwise output_symbols = num_symbols.\n state: The state of each decoder cell at the final time-step.\n It is a 2D Tensor of shape [batch_size x cell.state_size].\n\n Raises:\n ValueError: When output_projection has the wrong shape.\n \"\"\"\n with variable_scope.variable_scope(\n scope or \"embedding_tied_rnn_seq2seq\", dtype=dtype) as scope:\n dtype = scope.dtype\n\n if output_projection is not None:\n proj_weights = ops.convert_to_tensor(output_projection[0], dtype=dtype)\n proj_weights.get_shape().assert_is_compatible_with([None, num_symbols])\n proj_biases = ops.convert_to_tensor(output_projection[1], dtype=dtype)\n proj_biases.get_shape().assert_is_compatible_with([num_symbols])\n\n embedding = variable_scope.get_variable(\n \"embedding\", [num_symbols, embedding_size], dtype=dtype)\n\n emb_encoder_inputs = [embedding_ops.embedding_lookup(embedding, x)\n for x in encoder_inputs]\n emb_decoder_inputs = [embedding_ops.embedding_lookup(embedding, x)\n for x in decoder_inputs]\n\n output_symbols = num_symbols\n if num_decoder_symbols is not None:\n output_symbols = num_decoder_symbols\n if output_projection is None:\n cell = rnn_cell.OutputProjectionWrapper(cell, output_symbols)\n\n if isinstance(feed_previous, bool):\n loop_function = _extract_argmax_and_embed(\n embedding, output_projection, True) if feed_previous else None\n return tied_rnn_seq2seq(emb_encoder_inputs, emb_decoder_inputs, cell,\n loop_function=loop_function, dtype=dtype)\n\n # If feed_previous is a Tensor, we construct 2 graphs and use cond.\n def decoder(feed_previous_bool):\n loop_function = _extract_argmax_and_embed(\n embedding, output_projection, False) if feed_previous_bool else None\n reuse = None if feed_previous_bool else True\n with variable_scope.variable_scope(variable_scope.get_variable_scope(),\n reuse=reuse):\n outputs, state = tied_rnn_seq2seq(\n emb_encoder_inputs, emb_decoder_inputs, cell,\n loop_function=loop_function, dtype=dtype)\n state_list = [state]\n if nest.is_sequence(state):\n state_list = nest.flatten(state)\n return outputs + state_list\n\n outputs_and_state = control_flow_ops.cond(feed_previous,\n lambda: decoder(True),\n lambda: decoder(False))\n outputs_len = len(decoder_inputs) # Outputs length same as decoder inputs.\n state_list = outputs_and_state[outputs_len:]\n state = state_list[0]\n # Calculate zero-state to know it's structure.\n static_batch_size = encoder_inputs[0].get_shape()[0]\n for inp in encoder_inputs[1:]:\n static_batch_size.merge_with(inp.get_shape()[0])\n batch_size = static_batch_size.value\n if batch_size is None:\n batch_size = array_ops.shape(encoder_inputs[0])[0]\n zero_state = cell.zero_state(batch_size, dtype)\n if nest.is_sequence(zero_state):\n state = nest.pack_sequence_as(structure=zero_state,\n flat_sequence=state_list)\n return outputs_and_state[:outputs_len], state\n\n\ndef attention_decoder(decoder_inputs,\n initial_state,\n attention_states,\n cell,\n output_size=None,\n num_heads=1,\n loop_function=None,\n dtype=None,\n scope=None,\n initial_state_attention=False):\n \"\"\"RNN decoder with attention for the sequence-to-sequence model.\n\n In this context \"attention\" means that, during decoding, the RNN can look up\n information in the additional tensor attention_states, and it does this by\n focusing on a few entries from the tensor. This model has proven to yield\n especially good results in a number of sequence-to-sequence tasks. This\n implementation is based on http://arxiv.org/abs/1412.7449 (see below for\n details). It is recommended for complex sequence-to-sequence tasks.\n\n Args:\n decoder_inputs: A list of 2D Tensors [batch_size x input_size].\n initial_state: 2D Tensor [batch_size x cell.state_size].\n attention_states: 3D Tensor [batch_size x attn_length x attn_size].\n cell: rnn_cell.RNNCell defining the cell function and size.\n output_size: Size of the output vectors; if None, we use cell.output_size.\n num_heads: Number of attention heads that read from attention_states.\n loop_function: If not None, this function will be applied to i-th output\n in order to generate i+1-th input, and decoder_inputs will be ignored,\n except for the first element (\"GO\" symbol). This can be used for decoding,\n but also for training to emulate http://arxiv.org/abs/1506.03099.\n Signature -- loop_function(prev, i) = next\n * prev is a 2D Tensor of shape [batch_size x output_size],\n * i is an integer, the step number (when advanced control is needed),\n * next is a 2D Tensor of shape [batch_size x input_size].\n dtype: The dtype to use for the RNN initial state (default: tf.float32).\n scope: VariableScope for the created subgraph; default: \"attention_decoder\".\n initial_state_attention: If False (default), initial attentions are zero.\n If True, initialize the attentions from the initial state and attention\n states -- useful when we wish to resume decoding from a previously\n stored decoder state and attention states.\n\n Returns:\n A tuple of the form (outputs, state), where:\n outputs: A list of the same length as decoder_inputs of 2D Tensors of\n shape [batch_size x output_size]. These represent the generated outputs.\n Output i is computed from input i (which is either the i-th element\n of decoder_inputs or loop_function(output {i-1}, i)) as follows.\n First, we run the cell on a combination of the input and previous\n attention masks:\n cell_output, new_state = cell(linear(input, prev_attn), prev_state).\n Then, we calculate new attention masks:\n new_attn = softmax(V^T * tanh(W * attention_states + U * new_state))\n and then we calculate the output:\n output = linear(cell_output, new_attn).\n state: The state of each decoder cell the final time-step.\n It is a 2D Tensor of shape [batch_size x cell.state_size].\n\n Raises:\n ValueError: when num_heads is not positive, there are no inputs, shapes\n of attention_states are not set, or input size cannot be inferred\n from the input.\n \"\"\"\n if not decoder_inputs:\n raise ValueError(\"Must provide at least 1 input to attention decoder.\")\n if num_heads < 1:\n raise ValueError(\"With less than 1 heads, use a non-attention decoder.\")\n if attention_states.get_shape()[2].value is None:\n raise ValueError(\"Shape[2] of attention_states must be known: %s\"\n % attention_states.get_shape())\n if output_size is None:\n output_size = cell.output_size\n\n with variable_scope.variable_scope(\n scope or \"attention_decoder\", dtype=dtype) as scope:\n dtype = scope.dtype\n\n batch_size = array_ops.shape(decoder_inputs[0])[0] # Needed for reshaping.\n attn_length = attention_states.get_shape()[1].value\n if attn_length is None:\n attn_length = shape(attention_states)[1]\n attn_size = attention_states.get_shape()[2].value\n\n # To calculate W1 * h_t we use a 1-by-1 convolution, need to reshape before.\n hidden = array_ops.reshape(\n attention_states, [-1, attn_length, 1, attn_size])\n hidden_features = []\n v = []\n attention_vec_size = attn_size # Size of query vectors for attention.\n for a in xrange(num_heads):\n k = variable_scope.get_variable(\"AttnW_%d\" % a,\n [1, 1, attn_size, attention_vec_size])\n hidden_features.append(nn_ops.conv2d(hidden, k, [1, 1, 1, 1], \"SAME\"))\n v.append(\n variable_scope.get_variable(\"AttnV_%d\" % a, [attention_vec_size]))\n\n state = initial_state\n\n def attention(query):\n \"\"\"Put attention masks on hidden using hidden_features and query.\"\"\"\n ds = [] # Results of attention reads will be stored here.\n if nest.is_sequence(query): # If the query is a tuple, flatten it.\n query_list = nest.flatten(query)\n for q in query_list: # Check that ndims == 2 if specified.\n ndims = q.get_shape().ndims\n if ndims:\n assert ndims == 2\n query = array_ops.concat(query_list, 1)\n for a in xrange(num_heads):\n with variable_scope.variable_scope(\"Attention_%d\" % a):\n y = linear(query, attention_vec_size, True)\n y = array_ops.reshape(y, [-1, 1, 1, attention_vec_size])\n # Attention mask is a softmax of v^T * tanh(...).\n s = math_ops.reduce_sum(\n v[a] * math_ops.tanh(hidden_features[a] + y), [2, 3])\n a = nn_ops.softmax(s)\n # Now calculate the attention-weighted vector d.\n d = math_ops.reduce_sum(\n array_ops.reshape(a, [-1, attn_length, 1, 1]) * hidden,\n [1, 2])\n ds.append(array_ops.reshape(d, [-1, attn_size]))\n return ds\n\n outputs = []\n prev = None\n batch_attn_size = array_ops.stack([batch_size, attn_size])\n attns = [array_ops.zeros(batch_attn_size, dtype=dtype)\n for _ in xrange(num_heads)]\n for a in attns: # Ensure the second shape of attention vectors is set.\n a.set_shape([None, attn_size])\n if initial_state_attention:\n attns = attention(initial_state)\n for i, inp in enumerate(decoder_inputs):\n if i > 0:\n variable_scope.get_variable_scope().reuse_variables()\n # If loop_function is set, we use it instead of decoder_inputs.\n if loop_function is not None and prev is not None:\n with variable_scope.variable_scope(\"loop_function\", reuse=True):\n inp = loop_function(prev, i)\n # Merge input and previous attentions into one vector of the right size.\n input_size = inp.get_shape().with_rank(2)[1]\n if input_size.value is None:\n raise ValueError(\"Could not infer input size from input: %s\" % inp.name)\n x = linear([inp] + attns, input_size, True)\n # Run the RNN.\n cell_output, state = cell(x, state)\n # Run the attention mechanism.\n if i == 0 and initial_state_attention:\n with variable_scope.variable_scope(variable_scope.get_variable_scope(),\n reuse=True):\n attns = attention(state)\n else:\n attns = attention(state)\n\n with variable_scope.variable_scope(\"AttnOutputProjection\"):\n output = linear([cell_output] + attns, output_size, True)\n if loop_function is not None:\n prev = output\n outputs.append(output)\n\n return outputs, state\n\n\ndef embedding_attention_decoder(decoder_inputs,\n initial_state,\n attention_states,\n cell,\n num_symbols,\n embedding_size,\n num_heads=1,\n output_size=None,\n output_projection=None,\n feed_previous=False,\n update_embedding_for_previous=True,\n dtype=None,\n scope=None,\n initial_state_attention=False,\n mc_search = False):\n \"\"\"RNN decoder with embedding and attention and a pure-decoding option.\n\n Args:\n decoder_inputs: A list of 1D batch-sized int32 Tensors (decoder inputs).\n initial_state: 2D Tensor [batch_size x cell.state_size].\n attention_states: 3D Tensor [batch_size x attn_length x attn_size].\n cell: rnn_cell.RNNCell defining the cell function.\n num_symbols: Integer, how many symbols come into the embedding.\n embedding_size: Integer, the length of the embedding vector for each symbol.\n num_heads: Number of attention heads that read from attention_states.\n output_size: Size of the output vectors; if None, use output_size.\n output_projection: None or a pair (W, B) of output projection weights and\n biases; W has shape [output_size x num_symbols] and B has shape\n [num_symbols]; if provided and feed_previous=True, each fed previous\n output will first be multiplied by W and added B.\n feed_previous: Boolean; if True, only the first of decoder_inputs will be\n used (the \"GO\" symbol), and all other decoder inputs will be generated by:\n next = embedding_lookup(embedding, argmax(previous_output)),\n In effect, this implements a greedy decoder. It can also be used\n during training to emulate http://arxiv.org/abs/1506.03099.\n If False, decoder_inputs are used as given (the standard decoder case).\n update_embedding_for_previous: Boolean; if False and feed_previous=True,\n only the embedding for the first symbol of decoder_inputs (the \"GO\"\n symbol) will be updated by back propagation. Embeddings for the symbols\n generated from the decoder itself remain unchanged. This parameter has\n no effect if feed_previous=False.\n dtype: The dtype to use for the RNN initial states (default: tf.float32).\n scope: VariableScope for the created subgraph; defaults to\n \"embedding_attention_decoder\".\n initial_state_attention: If False (default), initial attentions are zero.\n If True, initialize the attentions from the initial state and attention\n states -- useful when we wish to resume decoding from a previously\n stored decoder state and attention states.\n\n Returns:\n A tuple of the form (outputs, state), where:\n outputs: A list of the same length as decoder_inputs of 2D Tensors with\n shape [batch_size x output_size] containing the generated outputs.\n state: The state of each decoder cell at the final time-step.\n It is a 2D Tensor of shape [batch_size x cell.state_size].\n\n Raises:\n ValueError: When output_projection has the wrong shape.\n \"\"\"\n if output_size is None:\n output_size = cell.output_size\n if output_projection is not None:\n proj_biases = ops.convert_to_tensor(output_projection[1], dtype=dtype)\n proj_biases.get_shape().assert_is_compatible_with([num_symbols])\n\n with variable_scope.variable_scope(\n scope or \"embedding_attention_decoder\", dtype=dtype) as scope:\n\n embedding = variable_scope.get_variable(\"embedding\",\n [num_symbols, embedding_size])\n\n loop_function = None\n if feed_previous == True:\n loop_function = _argmax_or_mcsearch(embedding, output_projection, update_embedding_for_previous, mc_search)\n # if isinstance(mc_search, bool):\n # if feed_previous == True and mc_search == True:\n # loop_function = _mc_argmax_and_embed(embedding, output_projection, update_embedding_for_previous)\n # elif feed_previous == True and mc_search == False:\n # loop_function = _extract_argmax_and_embed(embedding, output_projection, update_embedding_for_previous)\n # elif (feed_previous == True):\n # loop_function = control_flow_ops.cond(mc_search,\n # _mc_argmax_and_embed(embedding, output_projection, update_embedding_for_previous),\n # _extract_argmax_and_embed(embedding, output_projection, update_embedding_for_previous))\n\n emb_inp = [\n embedding_ops.embedding_lookup(embedding, i) for i in decoder_inputs]\n return attention_decoder(\n emb_inp,\n initial_state,\n attention_states,\n cell,\n output_size=output_size,\n num_heads=num_heads,\n loop_function=loop_function,\n initial_state_attention=initial_state_attention,\n scope=scope)\n\n\ndef embedding_attention_seq2seq(encoder_inputs,\n decoder_inputs,\n cell,\n num_encoder_symbols,\n num_decoder_symbols,\n embedding_size,\n num_heads=1,\n output_projection=None,\n feed_previous=False,\n dtype=None,\n scope=None,\n initial_state_attention=False,\n mc_search=False):\n\n with variable_scope.variable_scope(\n scope or \"embedding_attention_seq2seq\", dtype=dtype) as scope:\n dtype = scope.dtype\n # Encoder.\n encoder_cell = tf.contrib.rnn.EmbeddingWrapper(\n cell, embedding_classes=num_encoder_symbols,\n embedding_size=embedding_size)\n encoder_outputs, encoder_state = tf.contrib.rnn.static_rnn( # TODO bidirect-rnn\n encoder_cell, encoder_inputs, dtype=dtype)\n\n # First calculate a concatenation of encoder outputs to put attention on.\n top_states = [array_ops.reshape(e, [-1, 1, cell.output_size]) # top_states: [layer_num, T, 1, output_size]\n for e in encoder_outputs] # encoder_outputs: [layer_num, T, output_size]\n attention_states = array_ops.concat(top_states, 1)\n\n # Decoder.\n output_size = None\n if output_projection is None:\n cell = tf.contrib.rnn.OutputProjectionWrapper(cell, num_decoder_symbols)\n output_size = num_decoder_symbols\n\n if isinstance(feed_previous, bool):\n outputs, state = embedding_attention_decoder(\n decoder_inputs,\n encoder_state,\n attention_states,\n cell,\n num_decoder_symbols,\n embedding_size,\n num_heads=num_heads,\n output_size=output_size,\n output_projection=output_projection,\n feed_previous=feed_previous,\n initial_state_attention=initial_state_attention,\n mc_search=mc_search,\n scope=scope)\n return outputs, state, encoder_state\n\n # If feed_previous is a Tensor, we construct 2 graphs and use cond.\n def decoder(feed_previous_bool):\n reuse = None if feed_previous_bool else True\n with variable_scope.variable_scope(\n variable_scope.get_variable_scope(), reuse=reuse) as scope:\n outputs, state = embedding_attention_decoder(\n decoder_inputs,\n encoder_state,\n attention_states,\n cell,\n num_decoder_symbols,\n embedding_size,\n num_heads=num_heads,\n output_size=output_size,\n output_projection=output_projection,\n feed_previous=feed_previous_bool,\n update_embedding_for_previous=False,\n initial_state_attention=initial_state_attention,\n mc_search=mc_search,\n scope=scope)\n state_list = [state]\n if nest.is_sequence(state):\n state_list = nest.flatten(state)\n return outputs + state_list\n\n outputs_and_state = control_flow_ops.cond(feed_previous,\n lambda: decoder(True),\n lambda: decoder(False))\n outputs_len = len(decoder_inputs) # Outputs length same as decoder inputs.\n state_list = outputs_and_state[outputs_len:]\n state = state_list[0]\n if nest.is_sequence(encoder_state):\n state = nest.pack_sequence_as(structure=encoder_state,\n flat_sequence=state_list)\n return outputs_and_state[:outputs_len], state, encoder_state\n\n\ndef one2many_rnn_seq2seq(encoder_inputs,\n decoder_inputs_dict,\n cell,\n num_encoder_symbols,\n num_decoder_symbols_dict,\n embedding_size,\n feed_previous=False,\n dtype=None,\n scope=None):\n \"\"\"One-to-many RNN sequence-to-sequence model (multi-task).\n\n This is a multi-task sequence-to-sequence model with one encoder and multiple\n decoders. Reference to multi-task sequence-to-sequence learning can be found\n here: http://arxiv.org/abs/1511.06114\n\n Args:\n encoder_inputs: A list of 1D int32 Tensors of shape [batch_size].\n decoder_inputs_dict: A dictionany mapping decoder name (string) to\n the corresponding decoder_inputs; each decoder_inputs is a list of 1D\n Tensors of shape [batch_size]; num_decoders is defined as\n len(decoder_inputs_dict).\n cell: rnn_cell.RNNCell defining the cell function and size.\n num_encoder_symbols: Integer; number of symbols on the encoder side.\n num_decoder_symbols_dict: A dictionary mapping decoder name (string) to an\n integer specifying number of symbols for the corresponding decoder;\n len(num_decoder_symbols_dict) must be equal to num_decoders.\n embedding_size: Integer, the length of the embedding vector for each symbol.\n feed_previous: Boolean or scalar Boolean Tensor; if True, only the first of\n decoder_inputs will be used (the \"GO\" symbol), and all other decoder\n inputs will be taken from previous outputs (as in embedding_rnn_decoder).\n If False, decoder_inputs are used as given (the standard decoder case).\n dtype: The dtype of the initial state for both the encoder and encoder\n rnn cells (default: tf.float32).\n scope: VariableScope for the created subgraph; defaults to\n \"one2many_rnn_seq2seq\"\n\n Returns:\n A tuple of the form (outputs_dict, state_dict), where:\n outputs_dict: A mapping from decoder name (string) to a list of the same\n length as decoder_inputs_dict[name]; each element in the list is a 2D\n Tensors with shape [batch_size x num_decoder_symbol_list[name]]\n containing the generated outputs.\n state_dict: A mapping from decoder name (string) to the final state of the\n corresponding decoder RNN; it is a 2D Tensor of shape\n [batch_size x cell.state_size].\n \"\"\"\n outputs_dict = {}\n state_dict = {}\n\n with variable_scope.variable_scope(\n scope or \"one2many_rnn_seq2seq\", dtype=dtype) as scope:\n dtype = scope.dtype\n\n # Encoder.\n encoder_cell = tf.contrib.rnn.EmbeddingWrapper(\n cell, embedding_classes=num_encoder_symbols,\n embedding_size=embedding_size)\n _, encoder_state = rnn.rnn(encoder_cell, encoder_inputs, dtype=dtype)\n\n # Decoder.\n for name, decoder_inputs in decoder_inputs_dict.items():\n num_decoder_symbols = num_decoder_symbols_dict[name]\n\n with variable_scope.variable_scope(\"one2many_decoder_\" + str(\n name)) as scope:\n decoder_cell = rnn_cell.OutputProjectionWrapper(cell,\n num_decoder_symbols)\n if isinstance(feed_previous, bool):\n outputs, state = embedding_rnn_decoder(\n decoder_inputs, encoder_state, decoder_cell, num_decoder_symbols,\n embedding_size, feed_previous=feed_previous)\n else:\n # If feed_previous is a Tensor, we construct 2 graphs and use cond.\n def filled_embedding_rnn_decoder(feed_previous):\n \"\"\"The current decoder with a fixed feed_previous parameter.\"\"\"\n # pylint: disable=cell-var-from-loop\n reuse = None if feed_previous else True\n vs = variable_scope.get_variable_scope()\n with variable_scope.variable_scope(vs, reuse=reuse):\n outputs, state = embedding_rnn_decoder(\n decoder_inputs, encoder_state, decoder_cell,\n num_decoder_symbols, embedding_size,\n feed_previous=feed_previous)\n # pylint: enable=cell-var-from-loop\n state_list = [state]\n if nest.is_sequence(state):\n state_list = nest.flatten(state)\n return outputs + state_list\n\n outputs_and_state = control_flow_ops.cond(\n feed_previous,\n lambda: filled_embedding_rnn_decoder(True),\n lambda: filled_embedding_rnn_decoder(False))\n # Outputs length is the same as for decoder inputs.\n outputs_len = len(decoder_inputs)\n outputs = outputs_and_state[:outputs_len]\n state_list = outputs_and_state[outputs_len:]\n state = state_list[0]\n if nest.is_sequence(encoder_state):\n state = nest.pack_sequence_as(structure=encoder_state,\n flat_sequence=state_list)\n outputs_dict[name] = outputs\n state_dict[name] = state\n\n return outputs_dict, state_dict\n\n\n# def sequence_loss_by_mle(logits, targets, emb_dim, sequence_length, batch_size, name=None):\n# pass\n# if len(targets) != len(logits) or len(weights) != len(logits):\n# raise ValueError(\"Lengths of logits, weights, and targets must be the same \"\n# \"%d, %d, %d.\" % (len(logits), len(weights), len(targets)))\n# with ops.name_scope(name, \"sequence_loss_by_mle\",\n# logits + targets + weights):\n#\n# pretrain_loss = -tf.reduce_sum(\n# tf.one_hot(tf.to_int32(tf.reshape(targets, [-1])), emb_dim, 1.0, 0.0) * tf.log(\n# tf.clip_by_value(tf.reshape(logits, [-1, emb_dim]), 1e-20, 1.0)\n# )\n# ) / (sequence_length * batch_size)\n#\n#\n#\n# log_perp_list = []\n# for logit, target, weight in zip(logits, targets, weights):\n# pass\n\n# def sequence_loss_by_example(logits, targets, weights,\n# average_across_timesteps=True,\n# softmax_loss_function=None,up_reward=None, policy_gradient=None, name=None):\n# if len(targets) != len(logits) or len(weights) != len(logits):\n# raise ValueError(\"Lengths of logits, weights, and targets must be the same \"\n# \"%d, %d, %d.\" % (len(logits), len(weights), len(targets)))\n# with ops.name_scope(name, \"sequence_loss_by_example\",\n# logits + targets + weights):\n# log_perp_list = []\n# for logit, target, weight in zip(logits, targets, weights):\n# if softmax_loss_function is None:\n# # TODO(irving,ebrevdo): This reshape is needed because\n# # sequence_loss_by_example is called with scalars sometimes, which\n# # violates our general scalar strictness policy.\n# target = array_ops.reshape(target, [-1])\n# crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(\n# logits=logit, labels=target)\n# else:\n# #crossent = softmax_loss_function(logit, target)\n# crossent = tf.cond(up_reward,\n# lambda :policy_gradient(logit, target),\n# lambda :softmax_loss_function(logit,target))\n# log_perp_list.append(crossent * weight)\n# log_perps = math_ops.add_n(log_perp_list)\n# if average_across_timesteps:\n# total_size = math_ops.add_n(weights)\n# total_size += 1e-12 # Just to avoid division by 0 for all-0 weights.\n# log_perps /= total_size\n# return log_perps\n\ndef sequence_loss_by_example(logits, targets, weights,\n average_across_timesteps=True,\n softmax_loss_function=None, name=None):\n if len(targets) != len(logits) or len(weights) != len(logits):\n raise ValueError(\"Lengths of logits, weights, and targets must be the same \"\n \"%d, %d, %d.\" % (len(logits), len(weights), len(targets)))\n with ops.name_scope(name, \"sequence_loss_by_example\",\n logits + targets + weights):\n log_perp_list = []\n for logit, target, weight in zip(logits, targets, weights):\n if softmax_loss_function is None:\n # TODO(irving,ebrevdo): This reshape is needed because\n # sequence_loss_by_example is called with scalars sometimes, which\n # violates our general scalar strictness policy.\n target = array_ops.reshape(target, [-1])\n crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(\n logits=logit, labels=target)\n else:\n crossent = softmax_loss_function(logit, target)\n log_perp_list.append(crossent * weight)\n log_perps = math_ops.add_n(log_perp_list)\n if average_across_timesteps:\n total_size = math_ops.add_n(weights)\n total_size += 1e-12 # Just to avoid division by 0 for all-0 weights.\n log_perps /= total_size\n return log_perps\n\n\ndef sequence_loss(logits, targets, weights,\n average_across_timesteps=True, average_across_batch=True,\n softmax_loss_function=None, name=None):\n \"\"\"Weighted cross-entropy loss for a sequence of logits, batch-collapsed.\n\n Args:\n logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].\n targets: List of 1D batch-sized int32 Tensors of the same length as logits.\n weights: List of 1D batch-sized float-Tensors of the same length as logits.\n average_across_timesteps: If set, divide the returned cost by the total\n label weight.\n average_across_batch: If set, divide the returned cost by the batch size.\n softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch\n to be used instead of the standard softmax (the default if this is None).\n name: Optional name for this operation, defaults to \"sequence_loss\".\n\n Returns:\n A scalar float Tensor: The average log-perplexity per symbol (weighted).\n\n Raises:\n ValueError: If len(logits) is different from len(targets) or len(weights).\n \"\"\"\n with ops.name_scope(name, \"sequence_loss\", logits + targets + weights):\n cost = math_ops.reduce_sum(sequence_loss_by_example(\n logits, targets, weights,\n average_across_timesteps=average_across_timesteps,\n softmax_loss_function=softmax_loss_function))\n if average_across_batch:\n batch_size = array_ops.shape(targets[0])[0]\n return cost / math_ops.cast(batch_size, cost.dtype)\n else:\n return cost\n\ndef sequence_loss_by_mle(logits, targets, vocab_size, sequence_length, batch_size, output_projection=None):\n #print(\"logits: \", np.shape(logits[0]))\n #logits: [seq_len, batch_size, emb_dim]\n #targets: [seq_len, batch_size] =====transpose====> [batch_size, seq_len]\n # labels = tf.to_int32(tf.transpose(targets))\n #targets: [seq_len, batch_size] ====reshape[-1]====> [seq_len * batch_size]\n labels = tf.to_int32(tf.reshape(targets, [-1]))\n\n if output_projection is not None:\n #logits = nn_ops.xw_plus_b(logits, output_projection[0], output_projection[1])\n logits = [tf.matmul(logit, output_projection[0]) + output_projection[1] for logit in logits]\n\n reshape_logits = tf.reshape(logits, [-1, vocab_size]) #[seq_len * batch_size, vocab_size]\n\n prediction = tf.clip_by_value(reshape_logits, 1e-20, 1.0)\n\n pretrain_loss = -tf.reduce_sum(\n # [seq_len * batch_size , vocab_size]\n tf.one_hot(labels, vocab_size, 1.0, 0.0) * tf.log(prediction)\n ) / (sequence_length * batch_size)\n return pretrain_loss\n\n\ndef model_with_buckets(encoder_inputs, decoder_inputs, targets, weights, buckets, vocab_size, batch_size, seq2seq,\n output_projection=None, softmax_loss_function=None, per_example_loss=False, name=None):\n if len(encoder_inputs) < buckets[-1][0]:\n raise ValueError(\"Length of encoder_inputs (%d) must be at least that of la\"\n \"st bucket (%d).\" % (len(encoder_inputs), buckets[-1][0]))\n if len(targets) < buckets[-1][1]:\n raise ValueError(\"Length of targets (%d) must be at least that of last\"\n \"bucket (%d).\" % (len(targets), buckets[-1][1]))\n if len(weights) < buckets[-1][1]:\n raise ValueError(\"Length of weights (%d) must be at least that of last\"\n \"bucket (%d).\" % (len(weights), buckets[-1][1]))\n # ↑ 输入数据必须填充为最大长度\n\n all_inputs = encoder_inputs + decoder_inputs + targets + weights\n losses = []\n outputs = []\n encoder_states = []\n with ops.name_scope(name, \"model_with_buckets\", all_inputs):\n for j, bucket in enumerate(buckets): # 为每个bucket创建一个模型\n with variable_scope.variable_scope(variable_scope.get_variable_scope(),\n reuse=True if j > 0 else None):\n bucket_outputs, decoder_states, encoder_state = seq2seq(encoder_inputs[:bucket[0]],\n decoder_inputs[:bucket[1]])\n outputs.append(bucket_outputs)\n #print(\"bucket outputs: %s\" %bucket_outputs)\n encoder_states.append(encoder_state)\n if per_example_loss:\n losses.append(sequence_loss_by_example(\n outputs[-1], targets[:bucket[1]], weights[:bucket[1]],\n softmax_loss_function=softmax_loss_function))\n else: # 所有模型的loss都被保存起来\n # losses.append(sequence_loss_by_mle(outputs[-1], targets[:bucket[1]], vocab_size, bucket[1], batch_size, output_projection))\n losses.append(sequence_loss(outputs[-1], targets[:bucket[1]], weights[:bucket[1]], softmax_loss_function=softmax_loss_function))\n\n return outputs, losses, encoder_states\n"
] | [
[
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.python.ops.variable_scope.get_variable",
"tensorflow.multinomial",
"tensorflow.matmul",
"tensorflow.python.ops.math_ops.tanh",
"tensorflow.reshape",
"tensorflow.clip_by_value",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.one_hot",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.util.nest.is_sequence",
"tensorflow.python.ops.embedding_ops.embedding_lookup",
"tensorflow.python.shape",
"tensorflow.argmax",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.contrib.rnn.EmbeddingWrapper",
"tensorflow.python.ops.nn_ops.softmax",
"tensorflow.python.ops.variable_scope.get_variable_scope",
"tensorflow.python.util.nest.pack_sequence_as",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.math_ops.add_n",
"tensorflow.contrib.rnn.OutputProjectionWrapper",
"tensorflow.python.ops.nn_ops.sparse_softmax_cross_entropy_with_logits",
"tensorflow.log",
"tensorflow.contrib.rnn.static_rnn",
"tensorflow.python.ops.rnn.rnn",
"tensorflow.python.ops.math_ops.argmax",
"tensorflow.python.ops.nn_ops.xw_plus_b",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.array_ops.stop_gradient",
"tensorflow.python.ops.rnn_cell.OutputProjectionWrapper",
"tensorflow.python.ops.nn_ops.conv2d",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.framework.ops.name_scope"
]
] |
jgphilpott/sensors | [
"a43c9f70ed1c37d4cb1cf6e619d1b0e822b1048c"
] | [
"Radio/433MHz/ReceiveRF.py"
] | [
"import RPi.GPIO as GPIO\nfrom datetime import datetime\nimport matplotlib.pyplot as pyplot\n\nRECEIVE_PIN = 23\nMAX_DURATION = 5\nRECEIVED_SIGNAL = [[], []]\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(RECEIVE_PIN, GPIO.IN)\n\ncumulative_time = 0\nbeginning_time = datetime.now()\nprint('Started recording...')\n\nwhile cumulative_time < MAX_DURATION:\n\n time_delta = datetime.now() - beginning_time\n\n RECEIVED_SIGNAL[0].append(time_delta)\n RECEIVED_SIGNAL[1].append(GPIO.input(RECEIVE_PIN))\n\n cumulative_time = time_delta.seconds\n\nprint('Ended recording,', len(RECEIVED_SIGNAL[0]), 'samples recorded.')\n\nGPIO.cleanup()\n\nprint('Processing results...')\n\nfor i in range(len(RECEIVED_SIGNAL[0])):\n\n RECEIVED_SIGNAL[0][i] = RECEIVED_SIGNAL[0][i].seconds + RECEIVED_SIGNAL[0][i].microseconds/1000000.0\n\nprint('Plotting results...')\n\npyplot.plot(RECEIVED_SIGNAL[0], RECEIVED_SIGNAL[1])\npyplot.axis([0, MAX_DURATION, -1, 2])\npyplot.show()\n"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axis"
]
] |
diqiuzhuanzhuan/poros | [
"b4f0ff4c5094aa9df12a9195e5b3edd85c3460f4"
] | [
"poros/bert/modeling.py"
] | [
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at/dense\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"The main BERT model and related functions.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport copy\nimport json\nimport re\nimport numpy as np\nimport six\nimport tensorflow as tf\nfrom tensorflow import *\n\n\nclass BertConfig(object):\n \"\"\"Configuration for `BertModel`.\"\"\"\n\n def __init__(self,\n vocab_size,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=16,\n initializer_range=0.02):\n \"\"\"Constructs BertConfig.\n\n Args:\n vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler.\n hidden_dropout_prob: The dropout probability for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `BertModel`.\n initializer_range: The stdev of the truncated_normal_initializer for\n initializing all weight matrices.\n \"\"\"\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.hidden_act = hidden_act\n self.intermediate_size = intermediate_size\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.initializer_range = initializer_range\n\n @classmethod\n def from_dict(cls, json_object):\n \"\"\"Constructs a `BertConfig` from a Python dictionary of parameters.\"\"\"\n config = BertConfig(vocab_size=None)\n for (key, value) in six.iteritems(json_object):\n config.__dict__[key] = value\n return config\n\n @classmethod\n def from_json_file(cls, json_file):\n \"\"\"Constructs a `BertConfig` from a json file of parameters.\"\"\"\n with tf.io.gfile.GFile(json_file, \"r\") as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))\n\n def to_dict(self):\n \"\"\"Serializes this instance to a Python dictionary.\"\"\"\n output = copy.deepcopy(self.__dict__)\n return output\n\n def to_json_string(self):\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"\n\n\nclass BertLayer(tf.keras.layers.Layer):\n \"\"\"BERT model (\"Bidirectional Encoder Representations from Transformers\").\n\n Example usage:\n\n ```python\n # Already been converted into WordPiece token ids\n input_ids = tf.constant([[31, 51, 99], [15, 5, 0]])\n input_mask = tf.constant([[1, 1, 1], [1, 1, 0]])\n token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]])\n\n config = modeling.BertConfig(vocab_size=32000, hidden_size=512,\n num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)\n\n model = modeling.BertModel(config=config, is_training=True,\n input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids)\n\n label_embeddings = tf.Variable(...)\n pooled_output = model.get_pooled_output()\n logits = tf.matmul(pooled_output, label_embeddings)\n ...\n ```\n \"\"\"\n\n config = None\n\n def __init__(self,\n config,\n is_training):\n \"\"\"Constructor for BertModel.\n\n Args:\n config: `BertConfig` instance.\n is_training: bool. true for training model, false for eval model. Controls\n whether dropout will be applied.\n input_ids: int32 Tensor of shape [batch_size, seq_length].\n input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n use_one_hot_embeddings: (optional) bool. Whether to use one-hot word\n embeddings or tf.embedding_lookup() for the word embeddings.\n scope: (optional) variable scope. Defaults to \"bert\".\n\n Raises:\n ValueError: The config is invalid or one of the input tensor shapes\n is invalid.\n \"\"\"\n\n super(BertLayer, self).__init__()\n config = copy.deepcopy(config)\n if not is_training:\n config.hidden_dropout_prob = 0.0\n config.attention_probs_dropout_prob = 0.0\n self.config = config\n with tf.name_scope(name=\"bert\"):\n with tf.name_scope(\"embeddings\"):\n self.embedding_layer = EmbeddingLookupLayer(\n self.config.vocab_size,\n self.config.hidden_size,\n self.config.initializer_range,\n \"word_embeddings\"\n )\n self.embedding_postprocesser_layer = EmbeddingPostprocessorLayer(\n use_token_type=True,\n embedding_size=self.config.hidden_size,\n max_position_embeddings=self.config.max_position_embeddings,\n initializer_range=self.config.initializer_range,\n token_type_vocab_size=self.config.type_vocab_size,\n )\n with tf.name_scope(name=\"encoder\"):\n #from poros_train.some_layer import TransformerLayer\n self.transformer_layer = TransformerLayer(\n hidden_size=self.config.hidden_size,\n num_hidden_layers=self.config.num_hidden_layers,\n num_attention_heads=self.config.num_attention_heads,\n intermediate_size=self.config.intermediate_size,\n intermediate_act_fn=get_activation(self.config.hidden_act),\n hidden_dropout_prob=self.config.hidden_dropout_prob,\n attention_probs_dropout_prob=self.config.attention_probs_dropout_prob,\n initializer_range=self.config.initializer_range,\n do_return_all_layers=True)\n\n with tf.name_scope(\"pooler/dense\"):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token. We assume that this has been pre-trained\n self.pooler_layer = tf.keras.layers.Dense(\n self.config.hidden_size,\n activation=tf.tanh,\n kernel_initializer=create_initializer(\n self.config.initializer_range))\n self.pooler_layer.build(input_shape=[None, config.hidden_size])\n\n def call(self,\n input_ids,\n input_mask=None,\n token_type_ids=None,\n scope=\"bert\",\n use_one_hot_embeddings=False,\n training=False):\n \"\"\"\n Args:\n input_ids: int32 Tensor of shape [batch_size, seq_length].\n input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n scope: variable scope name, defaults to `bert`\n use_one_hot_embeddings: (optional) bool. Whether to use one-hot word\n embeddings or tf.embedding_lookup() for the word embeddings.\n \"\"\"\n\n input_shape = get_shape_list(input_ids, expected_rank=2)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n\n if input_mask is None:\n input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)\n\n if token_type_ids is None:\n token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)\n\n with tf.name_scope(scope):\n with tf.name_scope(\"embeddings\"):\n # Perform embedding lookup on the word ids.\n self.embedding_output, self.embedding_table = \\\n self.embedding_layer(input_ids, use_one_hot_embeddings)\n\n # Add positional embeddings and token type embeddings, then layer\n # normalize and perform dropout.\n self.embedding_output = self.embedding_postprocesser_layer(\n self.embedding_output,\n token_type_ids,\n self.config.hidden_dropout_prob,\n training=training\n )\n with tf.name_scope(\"encoder\"):\n # This converts a 2D mask of shape [batch_size, seq_length] to a 3D\n # mask of shape [batch_size, seq_length, seq_length] which is used\n # for the attention scores.\n attention_mask = create_attention_mask_from_input_mask(\n input_ids, input_mask)\n\n # Run the stacked transformer.\n # `sequence_output` shape = [batch_size, seq_length, hidden_size].\n\n self.all_encoder_layers = self.transformer_layer(self.embedding_output, attention_mask, training=training)\n\n self.sequence_output = self.all_encoder_layers[-1]\n # The \"pooler\" converts the encoded sequence tensor of shape\n # [batch_size, seq_length, hidden_size] to a tensor of shape\n # [batch_size, hidden_size]. This is necessary for segment-level\n # (or segment-pair-level) classification tasks where we need a fixed\n # dimensional representation of the segment.\n with tf.name_scope(\"pooler\"):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token. We assume that this has been pre-trained\n first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)\n self.pooled_output = self.pooler_layer(first_token_tensor)\n return self.pooled_output\n\n def get_pooled_output(self):\n return self.pooled_output\n\n def get_sequence_output(self):\n \"\"\"Gets final hidden layer of encoder.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n to the final hidden of the transformer encoder.\n \"\"\"\n return self.sequence_output\n\n def get_all_encoder_layers(self):\n return self.all_encoder_layers\n\n def get_embedding_output(self):\n \"\"\"Gets output of the embedding lookup (i.e., input to the transformer).\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n to the output of the embedding layer, after summing the word\n embeddings with the positional embeddings and the token type embeddings,\n then performing layer normalization. This is the input to the transformer.\n \"\"\"\n return self.embedding_output\n\n def get_embedding_table(self):\n return self.embedding_table\n\n\ndef gelu(x):\n \"\"\"Gaussian Error Linear Unit.\n\n This is a smoother version of the RELU.\n Original paper: https://arxiv.org/abs/1606.08415\n Args:\n x: float Tensor to perform activation.\n\n Returns:\n `x` with the GELU activation applied.\n \"\"\"\n cdf = 0.5 * (1.0 + tf.tanh(\n (np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))\n return x * cdf\n\n\ndef get_activation(activation_string):\n \"\"\"Maps a string to a Python function, e.g., \"relu\" => `tf.nn.relu`.\n\n Args:\n activation_string: String name of the activation function.\n\n Returns:\n A Python function corresponding to the activation function. If\n `activation_string` is None, empty, or \"linear\", this will return None.\n If `activation_string` is not a string, it will return `activation_string`.\n\n Raises:\n ValueError: The `activation_string` does not correspond to a known\n activation.\n \"\"\"\n\n # We assume that anything that\"s not a string is already an activation\n # function, so we just return it.\n if not isinstance(activation_string, six.string_types):\n return activation_string\n\n if not activation_string:\n return None\n\n act = activation_string.lower()\n if act == \"linear\":\n return None\n elif act == \"relu\":\n return tf.nn.relu\n elif act == \"gelu\":\n return gelu\n elif act == \"tanh\":\n return tf.tanh\n else:\n raise ValueError(\"Unsupported activation: %s\" % act)\n\n\ndef get_assignment_map_from_checkpoint(tvars, init_checkpoint):\n \"\"\"Compute the union of the current variables and checkpoint variables.\"\"\"\n assignment_map = {}\n initialized_variable_names = {}\n\n name_to_variable = collections.OrderedDict()\n for var in tvars:\n name = var.name\n m = re.match(\"^(.*):\\\\d+$\", name)\n if m is not None:\n name = m.group(1)\n name_to_variable[name] = var\n\n init_vars = tf.train.list_variables(init_checkpoint)\n tf.get_logger().info(\"variable num is {}\".format(len(init_vars)))\n assignment_map = collections.OrderedDict()\n for x in init_vars:\n (name, var) = (x[0], x[1])\n if name not in name_to_variable:\n continue\n assignment_map[name] = name\n initialized_variable_names[name] = 1\n initialized_variable_names[name + \":0\"] = 1\n\n return (assignment_map, initialized_variable_names)\n\n\ndef dropout(input_tensor, dropout_prob):\n \"\"\"Perform dropout.\n\n Args:\n input_tensor: float Tensor.\n dropout_prob: Python float. The probability of dropping out a value (NOT of\n *keeping* a dimension as in `tf.nn.dropout`).\n\n Returns:\n A version of `input_tensor` with dropout applied.\n \"\"\"\n if dropout_prob is None or dropout_prob == 0.0:\n return input_tensor\n\n output = tf.nn.dropout(input_tensor, dropout_prob)\n return output\n\n\ndef layer_norm(input_tensor, name=None):\n \"\"\"Run layer normalization on the last dimension of the tensor.\"\"\"\n return tf.keras.layers.LayerNormalization(epsilon=0.00001, name=name)(input_tensor)\n\n\ndef layer_norm_and_dropout(input_tensor, dropout_prob, name=None):\n \"\"\"Runs layer normalization followed by dropout.\"\"\"\n output_tensor = layer_norm(input_tensor, name)\n output_tensor = dropout(output_tensor, dropout_prob)\n return output_tensor\n\n\ndef create_initializer(initializer_range=0.02):\n \"\"\"Creates a `truncated_normal_initializer` with the given range.\"\"\"\n return tf.keras.initializers.TruncatedNormal(stddev=initializer_range)\n\n\nclass EmbeddingLookupLayer(tf.keras.layers.Layer):\n\n def __init__(self, vocab_size, embedding_size=128, initializer_range=0.02, name=\"word_embeddings\"):\n super(EmbeddingLookupLayer, self).__init__()\n truncated_normal = tf.initializers.TruncatedNormal(stddev=initializer_range)\n self.embedding_table = \\\n tf.Variable(name=name,\n initial_value=truncated_normal(shape=[vocab_size, embedding_size]))\n self.vocab_size = vocab_size\n self.embedding_size = embedding_size\n\n def call(self, input_ids, use_one_hot_embeddings):\n \"\"\"Looks up words embeddings for id tensor.\n\n Args:\n input_ids: int32 Tensor of shape [batch_size, seq_length] containing word\n\n Returns:\n float Tensor of shape [batch_size, seq_length, embedding_size].\n\n \"\"\"\n # If the input is a 2D tensor of shape [batch_size, seq_length], we\n # reshape to [batch_size, seq_length, 1].\n if input_ids.shape.ndims == 2:\n input_ids = tf.expand_dims(input_ids, axis=[-1])\n\n flat_input_ids = tf.reshape(input_ids, [-1])\n if use_one_hot_embeddings:\n one_hot_input_ids = tf.one_hot(flat_input_ids, depth=self.vocab_size)\n output = tf.matmul(one_hot_input_ids, self.embedding_table)\n else:\n output = tf.gather(self.embedding_table, flat_input_ids)\n\n input_shape = get_shape_list(input_ids)\n\n output = tf.reshape(output,\n input_shape[0:-1] + [input_shape[-1] * self.embedding_size])\n return output, self.embedding_table\n\n\n\nclass EmbeddingPostprocessorLayer(tf.keras.layers.Layer):\n\n def __init__(self, use_token_type=False,\n use_position_embeddings=True,\n token_type_embedding_name=\"token_type_embeddings\",\n position_embedding_name=\"position_embeddings\",\n initializer_range=0.02,\n token_type_vocab_size=16,\n embedding_size=768,\n max_position_embeddings=512):\n super(EmbeddingPostprocessorLayer, self).__init__()\n self.use_token_type = use_token_type\n self.use_position_embeddings = use_position_embeddings\n self.token_type_vocab_size = token_type_vocab_size\n self.embedding_size = embedding_size\n self.max_position_embeddings=512\n if self.use_token_type:\n self.token_type_table = tf.Variable(\n name=token_type_embedding_name,\n initial_value=create_initializer(initializer_range)(\n shape=[self.token_type_vocab_size, self.embedding_size])\n )\n\n if use_position_embeddings:\n self.full_position_embeddings = tf.Variable(\n name=position_embedding_name,\n initial_value=create_initializer(initializer_range)(\n shape=[max_position_embeddings, self.embedding_size])\n )\n with tf.name_scope(\"LayerNorm\"):\n self.layer_normalization = tf.keras.layers.LayerNormalization(epsilon=0.00001)\n self.layer_normalization.build(input_shape=[None, None, self.embedding_size])\n\n def call(self, input_tensor, token_type_ids, dropout_prob=0.1, training=False):\n input_shape = get_shape_list(input_tensor, expected_rank=3)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n width = input_shape[2]\n output = input_tensor\n tf.debugging.assert_equal(\n tf.constant(value=width, dtype=tf.int32), tf.constant(value=self.embedding_size, dtype=tf.int32),\n message=\"the dimension of input tensor is not equal to the embedding size, \"\n \"input_tensor is {}, embedding size is {}\".format(width, self.embedding_size)\n )\n\n if self.use_token_type:\n flat_token_type_ids = tf.reshape(token_type_ids, [-1])\n one_hot_ids = tf.one_hot(flat_token_type_ids, depth=self.token_type_vocab_size)\n token_type_embeddings = tf.matmul(one_hot_ids, self.token_type_table)\n token_type_embeddings = tf.reshape(token_type_embeddings, [batch_size, seq_length, width])\n output += token_type_embeddings\n\n if self.use_position_embeddings:\n assert_op = tf.debugging.assert_less_equal(seq_length, self.max_position_embeddings)\n with tf.control_dependencies([assert_op]):\n position_embeddings = tf.slice(self.full_position_embeddings, [0, 0],\n [seq_length, -1])\n num_dims = len(output.shape.as_list())\n # Only the last two dimensions are relevant (`seq_length` and `width`), so\n # we broadcast among the first dimensions, which is typically just\n # the batch size.\n position_broadcast_shape = []\n for _ in output.shape.as_list()[0:-2]:\n position_broadcast_shape.append(1)\n\n \"\"\"\n for _ in range(num_dims - 2):\n position_broadcast_shape.append(1)\n \"\"\"\n position_broadcast_shape.extend([seq_length, width])\n position_embeddings = tf.reshape(position_embeddings, position_broadcast_shape)\n output += position_embeddings\n\n output = self.layer_normalization(output)\n if training:\n output = dropout(output, dropout_prob)\n\n return output\n\n\ndef embedding_lookup(input_ids,\n vocab_size,\n embedding_size=128,\n initializer_range=0.02,\n word_embedding_name=\"word_embeddings\",\n use_one_hot_embeddings=False):\n \"\"\"Looks up words embeddings for id tensor.\n\n Args:\n input_ids: int32 Tensor of shape [batch_size, seq_length] containing word\n ids.\n vocab_size: int. Size of the embedding vocabulary.\n embedding_size: int. Width of the word embeddings.\n initializer_range: float. Embedding initialization range.\n word_embedding_name: string. Name of the embedding table.\n use_one_hot_embeddings: bool. If True, use one-hot method for word\n embeddings. If False, use `tf.gather()`.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, embedding_size].\n \"\"\"\n # This function assumes that the input is of shape [batch_size, seq_length,\n # num_inputs].\n #\n # If the input is a 2D tensor of shape [batch_size, seq_length], we\n # reshape to [batch_size, seq_length, 1].\n if input_ids.shape.ndims == 2:\n input_ids = tf.expand_dims(input_ids, axis=[-1])\n\n embedding_table = tf.Variable(\n name=word_embedding_name,\n initial_value=tf.initializers.TruncatedNormal(stddev=0.01)(shape=[vocab_size, embedding_size]))\n\n flat_input_ids = tf.reshape(input_ids, [-1])\n if use_one_hot_embeddings:\n one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)\n output = tf.matmul(one_hot_input_ids, embedding_table)\n else:\n output = tf.gather(embedding_table, flat_input_ids)\n\n input_shape = get_shape_list(input_ids)\n\n output = tf.reshape(output, input_shape[0:-1] + [input_shape[-1] * embedding_size])\n return (output, embedding_table)\n\n\ndef embedding_postprocessor(input_tensor,\n use_token_type=False,\n token_type_ids=None,\n token_type_vocab_size=16,\n token_type_embedding_name=\"token_type_embeddings\",\n use_position_embeddings=True,\n position_embedding_name=\"position_embeddings\",\n initializer_range=0.02,\n max_position_embeddings=512,\n dropout_prob=0.1):\n \"\"\"Performs various post-processing on a word embedding tensor.\n\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length,\n embedding_size].\n use_token_type: bool. Whether to add embeddings for `token_type_ids`.\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n Must be specified if `use_token_type` is True.\n token_type_vocab_size: int. The vocabulary size of `token_type_ids`.\n token_type_embedding_name: string. The name of the embedding table variable\n for token type ids.\n use_position_embeddings: bool. Whether to add position embeddings for the\n position of each token in the sequence.\n position_embedding_name: string. The name of the embedding table variable\n for positional embeddings.\n initializer_range: float. Range of the weight initialization.\n max_position_embeddings: int. Maximum sequence length that might ever be\n used with this model. This can be longer than the sequence length of\n input_tensor, but cannot be shorter.\n dropout_prob: float. Dropout probability applied to the final output tensor.\n\n Returns:\n float tensor with same shape as `input_tensor`.\n\n Raises:\n ValueError: One of the tensor shapes or input values is invalid.\n \"\"\"\n input_shape = get_shape_list(input_tensor, expected_rank=3)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n width = input_shape[2]\n\n output = input_tensor\n\n if use_token_type:\n if token_type_ids is None:\n raise ValueError(\"`token_type_ids` must be specified if\"\n \"`use_token_type` is True.\")\n token_type_table = tf.Variable(\n name=token_type_embedding_name,\n initial_value=create_initializer(initializer_range)(shape=[token_type_vocab_size, width]))\n # This vocab will be small so we always do one-hot here, since it is always\n # faster for a small vocabulary.\n flat_token_type_ids = tf.reshape(token_type_ids, [-1])\n one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)\n token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)\n token_type_embeddings = tf.reshape(token_type_embeddings,\n [batch_size, seq_length, width])\n output += token_type_embeddings\n\n if use_position_embeddings:\n assert_op = tf.debugging.assert_less_equal(seq_length, max_position_embeddings)\n with tf.control_dependencies([assert_op]):\n full_position_embeddings = tf.Variable(\n name=position_embedding_name,\n initial_value=create_initializer(initializer_range)(shape=[max_position_embeddings, width]))\n # Since the position embedding table is a learned variable, we create it\n # using a (long) sequence length `max_position_embeddings`. The actual\n # sequence length might be shorter than this, for faster training of\n # tasks that do not have long sequences.\n #\n # So `full_position_embeddings` is effectively an embedding table\n # for position [0, 1, 2, ..., max_position_embeddings-1], and the current\n # sequence has positions [0, 1, 2, ... seq_length-1], so we can just\n # perform a slice.\n position_embeddings = tf.slice(full_position_embeddings, [0, 0],\n [seq_length, -1])\n num_dims = len(output.shape.as_list())\n\n # Only the last two dimensions are relevant (`seq_length` and `width`), so\n # we broadcast among the first dimensions, which is typically just\n # the batch size.\n position_broadcast_shape = []\n for _ in range(num_dims - 2):\n position_broadcast_shape.append(1)\n position_broadcast_shape.extend([seq_length, width])\n position_embeddings = tf.reshape(position_embeddings,\n position_broadcast_shape)\n output += position_embeddings\n output = layer_norm_and_dropout(output, dropout_prob)\n return output\n\n\ndef create_attention_mask_from_input_mask(from_tensor, to_mask):\n \"\"\"Create 3D attention mask from a 2D tensor mask.\n if the position is need to be masked, set 1 to it\n\n Args:\n from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].\n to_mask: int32 Tensor of shape [batch_size, to_seq_length].\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length, to_seq_length].\n \"\"\"\n from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])\n batch_size = from_shape[0]\n from_seq_length = from_shape[1]\n\n to_shape = get_shape_list(to_mask, expected_rank=2)\n to_seq_length = to_shape[1]\n\n to_mask = tf.cast(\n tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)\n\n # We don't assume that `from_tensor` is a mask (although it could be). We\n # don't actually care if we attend *from* padding tokens (only *to* padding)\n # tokens so we create a tensor of all ones.\n #\n # `broadcast_ones` = [batch_size, from_seq_length, 1]\n broadcast_ones = tf.ones(\n shape=[batch_size, from_seq_length, 1], dtype=tf.float32)\n\n # Here we broadcast along two dimensions to create the mask.\n mask = broadcast_ones * to_mask\n\n return mask\n\n\nclass AttentionLayer(tf.keras.layers.Layer):\n\n def __init__(self,\n num_attention_heads=1,\n size_per_head=512,\n query_act=None,\n key_act=None,\n value_act=None,\n attention_probs_dropout_prob=0.0,\n initializer_range=0.02,\n do_return_2d_tensor=False,\n name_scope=\"attention\"):\n \"\"\"\n\n :rtype: object\n \"\"\"\n super(AttentionLayer, self).__init__()\n self.num_attention_heads = num_attention_heads\n self.size_per_head = size_per_head\n self.query_act = query_act\n self.key_act = key_act\n #self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.initializer_range = initializer_range\n self.do_return_2d_tensor = do_return_2d_tensor\n self.wq = tf.keras.layers.Dense(self.num_attention_heads * self.size_per_head,\n activation=query_act,\n kernel_initializer=create_initializer(initializer_range=0.01))\n self.wk = tf.keras.layers.Dense(self.num_attention_heads * self.size_per_head,\n activation=key_act,\n kernel_initializer=create_initializer(initializer_range=0.01))\n self.wv = tf.keras.layers.Dense(self.num_attention_heads * self.size_per_head,\n activation=value_act,\n kernel_initializer=create_initializer(initializer_range=0.01))\n with tf.name_scope(\"query\"):\n self.wq.build(input_shape=[None, size_per_head * num_attention_heads])\n with tf.name_scope(\"key\"):\n self.wk.build(input_shape=[None, size_per_head * num_attention_heads])\n with tf.name_scope(\"value\"):\n self.wv.build(input_shape=[None, size_per_head * num_attention_heads])\n\n def transpose_for_scores(self, x, batch_size):\n x = tf.reshape(x, shape=[batch_size, -1, self.num_attention_heads, self.size_per_head])\n # shape is [B, N, F, H]\n return tf.transpose(x, [0, 2, 1, 3])\n\n def call(self, q, k, v,\n attention_mask=None,\n batch_size=None,\n from_seq_length=None,\n to_seq_length=None):\n # Scalar dimensions referenced here:\n # B = batch size (number of sequences)\n # F = `from_tensor` sequence length\n # T = `to_tensor` sequence length\n # N = `num_attention_heads`\n # H = `size_per_head\n # q : [B, F, H]\n\n # convert to [B * F, N * H]\n\n from_shape = get_shape_list(q, expected_rank=[2, 3])\n to_shape = get_shape_list(k, expected_rank=[2, 3])\n\n if len(from_shape) != len(to_shape):\n raise ValueError(\n \"The rank of `from_tensor` must match the rank of `to_tensor`.\")\n\n if len(from_shape) == 3:\n batch_size = from_shape[0]\n from_seq_length = from_shape[1]\n to_seq_length = to_shape[1]\n elif len(from_shape) == 2:\n if (batch_size is None or from_seq_length is None or to_seq_length is None):\n raise ValueError(\n \"When passing in rank 2 tensors to attention_layer, the values \"\n \"for `batch_size`, `from_seq_length`, and `to_seq_length` \"\n \"must all be specified.\")\n\n q = reshape_to_matrix(q)\n # convert to [B * T, N * H]\n k = reshape_to_matrix(k)\n v = reshape_to_matrix(v)\n\n # query_layer: [B, F, N*H]\n query_layer = self.wq(q)\n key_layer = self.wk(k)\n value_layer = self.wv(v)\n\n query_layer = self.transpose_for_scores(query_layer, batch_size)\n key_layer = self.transpose_for_scores(key_layer, batch_size)\n # attention_score's shape is [B, N, F, T]\n attention_score = tf.matmul(query_layer, key_layer, transpose_b=True)\n attention_score = attention_score / tf.math.sqrt(float(self.size_per_head))\n\n if attention_mask is not None:\n # `attention_mask` = [B, 1, F, T]\n attention_mask = tf.expand_dims(attention_mask, axis=[1])\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0\n\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n attention_score += adder\n\n attention_score = tf.nn.softmax(attention_score)\n value_layer = self.transpose_for_scores(value_layer, batch_size)\n # `context_layer` = [B, N, F, H]\n context_layer = tf.matmul(attention_score, value_layer)\n # `context_layer` = [B, F, N, H]\n context_layer = tf.transpose(context_layer, [0, 2, 1, 3])\n # we need to concat all heads, now\n if self.do_return_2d_tensor:\n # `context_layer` = [B * F, N * H]\n context_layer = tf.reshape(context_layer,\n [-1,\n self.num_attention_heads * self.size_per_head])\n else:\n # `context_layer` = [B, F, N * H]\n context_layer = tf.reshape(context_layer,\n [batch_size, -1, self.num_attention_heads * self.size_per_head])\n\n return context_layer\n\n\nclass TransformerLayer(tf.keras.layers.Layer):\n\n def __init__(self,\n hidden_size,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n intermediate_act_fn=gelu,\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n initializer_range=0.02,\n do_return_all_layers=False):\n super(TransformerLayer, self).__init__()\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.intermediate_act_fn = intermediate_act_fn\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.initializer_range = initializer_range\n self.hidden_size = hidden_size\n self.do_return_all_layers = do_return_all_layers\n self.attention_layers = []\n self.attention_outputs = []\n self.attention_outputs_layer_norm = []\n self.intermediate_outputs = []\n self.size_per_head = int(self.hidden_size / self.num_attention_heads)\n self.outputs = []\n self.outputs_layer_norm = []\n for layer_idx in range(self.num_hidden_layers):\n with tf.name_scope(\"layer_%d\" % layer_idx):\n with tf.name_scope(\"attention\"):\n with tf.name_scope(\"self\"):\n layer = AttentionLayer(\n num_attention_heads=self.num_attention_heads,\n size_per_head=self.size_per_head,\n attention_probs_dropout_prob=self.attention_probs_dropout_prob,\n initializer_range=self.initializer_range,\n )\n self.attention_layers.append(layer)\n with tf.name_scope(\"output/dense\"):\n layer = tf.keras.layers.Dense(\n hidden_size,\n kernel_initializer=create_initializer(self.initializer_range))\n layer.build(input_shape=[None, self.hidden_size])\n self.attention_outputs.append(layer)\n with tf.name_scope(\"output/LayerNorm\"):\n layer = tf.keras.layers.LayerNormalization(epsilon=0.00001)\n layer.build(input_shape=[None, None, self.hidden_size])\n self.attention_outputs_layer_norm.append(layer)\n\n with tf.name_scope(\"intermediate/dense\"):\n layer = tf.keras.layers.Dense(\n self.intermediate_size,\n activation=self.intermediate_act_fn,\n kernel_initializer=create_initializer(self.initializer_range))\n layer.build(input_shape=[None, self.hidden_size])\n self.intermediate_outputs.append(layer)\n\n with tf.name_scope(\"output\"):\n with tf.name_scope(\"dense\"):\n layer = tf.keras.layers.Dense(\n hidden_size,\n kernel_initializer=create_initializer(self.initializer_range))\n layer.build(input_shape=[None, self.intermediate_size])\n self.outputs.append(layer)\n\n with tf.name_scope(\"LayerNorm\"):\n layer = tf.keras.layers.LayerNormalization(epsilon=0.00001)\n layer.build(input_shape=[None, None, self.hidden_size])\n self.outputs_layer_norm.append(layer)\n\n #self.attention_layers = tf.stack(self.attention_layers)\n #self.attention_outputs = tf.stack(self.attention_outputs)\n #self.attention_outputs_layer_norm = tf.stack(self.attention_outputs_layer_norm)\n #self.intermediate_outputs = tf.stack(self.intermediate_outputs)\n #self.outputs = tf.stack(self.outputs)\n #self.outputs_layer_norm = tf.stack(self.outputs_layer_norm)\n\n def call(self, input_tensor, attention_mask, training=False):\n #input_tensor = features[\"input_tensor\"]\n #attention_mask = features[\"attention_mask\"]\n input_shape = get_shape_list(input_tensor, expected_rank=3)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n input_width = input_shape[2]\n\n if input_width % self.num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (input_width, self.num_attention_heads))\n\n size_per_head = int(input_width / self.num_attention_heads)\n tf.debugging.assert_equal(size_per_head, self.size_per_head)\n\n # We keep the representation as a 2D tensor to avoid re-shaping it back and\n # forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on\n # the GPU/CPU but may not be free on the TPU, so we want to minimize them to\n # help the optimizer.\n # prev_output = reshape_to_matrix(input_tensor)\n prev_output = input_tensor\n\n all_layer_outputs = []\n\n for (attention_layer,\n attention_output_layer,\n attention_output_layer_norm,\n intermediate_output,\n output,\n output_layer_norm) in zip(self.attention_layers,\n self.attention_outputs,\n self.attention_outputs_layer_norm,\n self.intermediate_outputs,\n self.outputs,\n self.outputs_layer_norm):\n layer_input = prev_output\n attention_heads = []\n attention_head = attention_layer(layer_input, layer_input, layer_input, attention_mask)\n attention_heads.append(attention_head)\n if len(attention_heads) == 1:\n attention_output = attention_heads[0]\n else:\n # In the case where we have other sequences, we just concatenate\n # them to the self-attention head before the projection.\n attention_output = tf.concat(attention_heads, axis=-1)\n\n # Run a linear projection of `hidden_size` then add a residual\n # with `layer_input`.\n attention_output = attention_output_layer(attention_output)\n if training:\n attention_output = dropout(attention_output, self.hidden_dropout_prob)\n attention_output = attention_output_layer_norm(attention_output + layer_input)\n if training:\n attention_output = dropout(attention_output, self.hidden_dropout_prob)\n\n # The activation is only applied to the \"intermediate\" hidden layer.\n intermediate_output = intermediate_output(attention_output)\n\n # Down-project back to `hidden_size` then add the residual.\n layer_output = output(intermediate_output)\n if training:\n layer_output = dropout(layer_output, self.hidden_dropout_prob)\n layer_output = output_layer_norm(layer_output + attention_output)\n if training:\n layer_output = dropout(layer_output, self.hidden_dropout_prob)\n prev_output = layer_output\n all_layer_outputs.append(layer_output)\n\n if self.do_return_all_layers:\n final_outputs = []\n for layer_output in all_layer_outputs:\n final_output = reshape_from_matrix(layer_output, input_shape)\n final_outputs.append(final_output)\n return final_outputs\n else:\n final_output = reshape_from_matrix(prev_output, input_shape)\n return final_output\n\n\ndef get_shape_list(tensor, expected_rank=None, name=None):\n \"\"\"Returns a list of the shape of tensor, preferring static dimensions.\n\n Args:\n tensor: A tf.Tensor object to find the shape of.\n expected_rank: (optional) int. The expected rank of `tensor`. If this is\n specified and the `tensor` has a different rank, and exception will be\n thrown.\n name: Optional name of the tensor for the error message.\n\n Returns:\n A list of dimensions of the shape of tensor. All static dimensions will\n be returned as python integers, and dynamic dimensions will be returned\n as tf.Tensor scalars.\n \"\"\"\n if name is None and not tf.executing_eagerly():\n name = tensor.name\n\n if expected_rank is not None:\n assert_rank(tensor, expected_rank, name)\n\n shape = tensor.shape.as_list()\n\n non_static_indexes = []\n for (index, dim) in enumerate(shape):\n if dim is None:\n non_static_indexes.append(index)\n\n if not non_static_indexes:\n return shape\n\n dyn_shape = tf.shape(tensor)\n for index in non_static_indexes:\n shape[index] = dyn_shape[index]\n return shape\n\n\ndef reshape_to_matrix(input_tensor):\n \"\"\"Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).\"\"\"\n ndims = input_tensor.shape.ndims\n if ndims < 2:\n raise ValueError(\"Input tensor must have at least rank 2. Shape = %s\" %\n (input_tensor.shape))\n if ndims == 2:\n return input_tensor\n\n width = input_tensor.shape[-1]\n output_tensor = tf.reshape(input_tensor, [-1, width])\n return output_tensor\n\n\ndef reshape_from_matrix(output_tensor, orig_shape_list):\n \"\"\"Reshapes a rank 2 tensor back to its original rank >= 2 tensor.\"\"\"\n if len(orig_shape_list) == 2:\n return output_tensor\n\n output_shape = get_shape_list(output_tensor)\n\n orig_dims = orig_shape_list[0:-1]\n width = output_shape[-1]\n\n return tf.reshape(output_tensor, orig_dims + [width])\n\n\ndef assert_rank(tensor, expected_rank, name=None):\n \"\"\"Raises an exception if the tensor rank is not of the expected rank.\n\n Args:\n tensor: A tf.Tensor to check the rank of.\n expected_rank: Python integer or list of integers, expected rank.\n name: Optional name of the tensor for the error message.\n\n Raises:\n ValueError: If the expected shape doesn't match the actual shape.\n \"\"\"\n if name is None and not tf.executing_eagerly():\n name = tensor.name\n\n expected_rank_dict = {}\n if isinstance(expected_rank, six.integer_types):\n expected_rank_dict[expected_rank] = True\n else:\n for x in expected_rank:\n expected_rank_dict[x] = True\n\n actual_rank = tensor.shape.ndims\n if actual_rank not in expected_rank_dict:\n raise ValueError(\n \"For the tensor `%s` , the actual rank \"\n \"`%d` (shape = %s) is not equal to the expected rank `%s`\" %\n (name, actual_rank, str(tensor.shape), str(expected_rank)))\n\n\nif __name__ == \"__main__\":\n features = tf.initializers.TruncatedNormal()(shape=[8, 784])\n a = tf.keras.layers.LayerNormalization(epsilon=0.00001)(features)\n print(a)\n"
] | [
[
"tensorflow.io.gfile.GFile",
"tensorflow.initializers.TruncatedNormal",
"tensorflow.ones",
"tensorflow.matmul",
"tensorflow.reshape",
"tensorflow.executing_eagerly",
"tensorflow.control_dependencies",
"tensorflow.nn.softmax",
"tensorflow.one_hot",
"tensorflow.cast",
"tensorflow.shape",
"tensorflow.concat",
"tensorflow.transpose",
"tensorflow.train.list_variables",
"tensorflow.constant",
"tensorflow.squeeze",
"tensorflow.debugging.assert_equal",
"numpy.sqrt",
"tensorflow.nn.dropout",
"tensorflow.get_logger",
"tensorflow.zeros",
"tensorflow.expand_dims",
"tensorflow.name_scope",
"tensorflow.debugging.assert_less_equal",
"tensorflow.keras.layers.LayerNormalization",
"tensorflow.gather",
"tensorflow.slice",
"tensorflow.pow",
"tensorflow.keras.initializers.TruncatedNormal"
]
] |
unanan/Transformer_MultiModality | [
"fd07288908a8b9774d5adac498cf415874b36585"
] | [
"model/srcembed/crnn.py"
] | [
"import torch.nn as nn\n\n\nclass BidirectionalLSTM(nn.Module):\n \n def __init__(self, nIn, nHidden, nOut):\n super(BidirectionalLSTM, self).__init__()\n \n self.rnn = nn.LSTM(nIn, nHidden, bidirectional=True)\n self.embedding = nn.Linear(nHidden * 2, nOut)\n \n def forward(self, input):\n recurrent, _ = self.rnn(input)\n # print(input.shape, recurrent.shape)\n \n T, b, h = recurrent.size()\n t_rec = recurrent.view(T * b, h)\n \n output = self.embedding(t_rec) # [T * b, nOut]\n output = output.view(T, b, -1)\n \n return output\n\n\nclass CRNN(nn.Module):\n \n def __init__(self, imgH, nc, nclass, nh, n_rnn=2, leakyRelu=False):\n super(CRNN, self).__init__()\n assert imgH % 16 == 0, 'imgH has to be a multiple of 16'\n \n ks = [3, 3, 3, 3, 3, 3, 3, 3, 2] # [3, 3, 3, 3, 3, 3, 2]\n ps = [1, 1, 1, 1, 1, 1, 1, 1, 0] # [1, 1, 1, 1, 1, 1, 0]\n ss = [1, 1, 1, 1, 1, 1, 1, 1, 1] # [1, 1, 1, 1, 1, 1, 1]\n nm = [64, 128, 256, 256, 512, 512, 512, 512, 512] # [64, 128, 256, 256, 512, 512, 512]\n \n cnn = nn.Sequential()\n \n def convRelu(i, batchNormalization=False):\n nIn = nc if i == 0 else nm[i - 1]\n nOut = nm[i]\n cnn.add_module('conv{0}'.format(i),\n nn.Conv2d(nIn, nOut, ks[i], ss[i], ps[i]))\n if batchNormalization:\n cnn.add_module('batchnorm{0}'.format(i), nn.BatchNorm2d(nOut))\n # cnn.add_module('layernorm{0}'.format(i), nn.GroupNorm(1, nOut))\n if leakyRelu:\n cnn.add_module('relu{0}'.format(i),\n nn.LeakyReLU(0.2, inplace=True))\n else:\n cnn.add_module('relu{0}'.format(i), nn.ReLU(True))\n \n convRelu(0)\n cnn.add_module('pooling{0}'.format(0), nn.MaxPool2d(2, 2)) # 64x32x? # 64x16x64\n convRelu(1)\n cnn.add_module('pooling{0}'.format(1), nn.MaxPool2d(2, 2)) # 128x16x? # 128x8x32\n convRelu(2, True)\n convRelu(3)\n cnn.add_module('pooling{0}'.format(2),\n nn.MaxPool2d((2, 2), (2, 1), (0, 1))) # 256x8x? # 256x4x16\n convRelu(4, True)\n convRelu(5)\n cnn.add_module('pooling{0}'.format(3),\n nn.MaxPool2d((2, 2), (2, 1), (0, 1))) # 512x2x? # 512x2x16\n convRelu(6, True)\n convRelu(7)\n cnn.add_module('pooling{0}'.format(4),\n nn.MaxPool2d((2, 2), (2, 1), (0, 1))) # 512x2x? # 1024x2x16\n convRelu(8, True) # 512x1x? # 512x1x16\n # convRelu(6, True) # 512x1x16\n \n self.cnn = cnn\n # self.rnn = nn.Sequential(\n # BidirectionalLSTM(512, nh, nh),\n # BidirectionalLSTM(nh, nh, nclass))\n \n # self.load_state_dict(torch.load(\"./crnn.pth\"))\n # for p in self.parameters():\n # p.requires_grad = False\n \n def forward(self, input):\n # conv features\n output = self.cnn(input)\n # b, c, h, w = conv.size()\n # assert h == 1, \"the height of conv must be 1\"\n output = output.squeeze(2)\n output = output.permute(0, 2, 1) # [w, b, c]\n # # print(conv.shape)\n # # rnn features\n # output = self.rnn(conv)\n \n # print(output.shape)\n return output\n"
] | [
[
"torch.nn.Linear",
"torch.nn.LSTM",
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.nn.LeakyReLU",
"torch.nn.ReLU",
"torch.nn.Conv2d"
]
] |
chanvee/My_Neural_Transfer | [
"3fad9dbfa2f029e8583085871f4bb4e370e187cf"
] | [
"neural_style_transfer.py"
] | [
"'''Neural style transfer with Keras.\n\nRun the script with:\n```\npython neural_style_transfer.py path_to_your_base_image.jpg path_to_your_reference.jpg prefix_for_results\n```\ne.g.:\n```\npython neural_style_transfer.py img/tuebingen.jpg img/starry_night.jpg results/my_result\n```\nOptional parameters:\n```\n--iter, To specify the number of iterations the style transfer takes place (Default is 10)\n--content_weight, The weight given to the content loss (Default is 0.025)\n--style_weight, The weight given to the style loss (Default is 1.0)\n--tv_weight, The weight given to the total variation loss (Default is 1.0)\n```\n\nIt is preferable to run this script on GPU, for speed.\n\nExample result: https://twitter.com/fchollet/status/686631033085677568\n\n# Details\n\nStyle transfer consists in generating an image\nwith the same \"content\" as a base image, but with the\n\"style\" of a different picture (typically artistic).\n\nThis is achieved through the optimization of a loss function\nthat has 3 components: \"style loss\", \"content loss\",\nand \"total variation loss\":\n\n- The total variation loss imposes local spatial continuity between\nthe pixels of the combination image, giving it visual coherence.\n\n- The style loss is where the deep learning keeps in --that one is defined\nusing a deep convolutional neural network. Precisely, it consists in a sum of\nL2 distances between the Gram matrices of the representations of\nthe base image and the style reference image, extracted from\ndifferent layers of a convnet (trained on ImageNet). The general idea\nis to capture color/texture information at different spatial\nscales (fairly large scales --defined by the depth of the layer considered).\n\n - The content loss is a L2 distance between the features of the base\nimage (extracted from a deep layer) and the features of the combination image,\nkeeping the generated image close enough to the original one.\n\n# References\n - [A Neural Algorithm of Artistic Style](http://arxiv.org/abs/1508.06576)\n'''\n\nfrom __future__ import print_function\nfrom keras.preprocessing.image import load_img, img_to_array\nfrom scipy.misc import imsave\nimport numpy as np\nfrom scipy.optimize import fmin_l_bfgs_b\nimport time\nimport argparse\n\nfrom keras.applications import vgg19\nfrom keras import backend as K\n\nparser = argparse.ArgumentParser(description='Neural style transfer with Keras.')\nparser.add_argument('base_image_path', metavar='base', type=str,\n help='Path to the image to transform.')\nparser.add_argument('style_reference_image_path', metavar='ref', type=str,\n help='Path to the style reference image.')\nparser.add_argument('result_prefix', metavar='res_prefix', type=str,\n help='Prefix for the saved results.')\nparser.add_argument('--iter', type=int, default=10, required=False,\n help='Number of iterations to run.')\nparser.add_argument('--content_weight', type=float, default=0.025, required=False,\n help='Content weight.')\nparser.add_argument('--style_weight', type=float, default=1.0, required=False,\n help='Style weight.')\nparser.add_argument('--tv_weight', type=float, default=1.0, required=False,\n help='Total Variation weight.')\n\nargs = parser.parse_args()\nbase_image_path = args.base_image_path\nstyle_reference_image_path = args.style_reference_image_path\nresult_prefix = args.result_prefix\niterations = args.iter\n\n# these are the weights of the different loss components\ntotal_variation_weight = args.tv_weight\nstyle_weight = args.style_weight\ncontent_weight = args.content_weight\n\n# dimensions of the generated picture.\nwidth, height = load_img(base_image_path).size\nimg_nrows = 300\nimg_ncols = int(width * img_nrows / height)\n\n# util function to open, resize and format pictures into appropriate tensors\n\n\ndef preprocess_image(image_path):\n img = load_img(image_path, target_size=(img_nrows, img_ncols))\n img = img_to_array(img)\n img = np.expand_dims(img, axis=0)\n img = vgg19.preprocess_input(img)\n return img\n\n# util function to convert a tensor into a valid image\n\n\ndef deprocess_image(x):\n if K.image_data_format() == 'channels_first':\n x = x.reshape((3, img_nrows, img_ncols))\n x = x.transpose((1, 2, 0))\n else:\n x = x.reshape((img_nrows, img_ncols, 3))\n # Remove zero-center by mean pixel\n x[:, :, 0] += 103.939\n x[:, :, 1] += 116.779\n x[:, :, 2] += 123.68\n # 'BGR'->'RGB'\n x = x[:, :, ::-1]\n x = np.clip(x, 0, 255).astype('uint8')\n return x\n\n# get tensor representations of our images\nbase_image = K.variable(preprocess_image(base_image_path))\nstyle_reference_image = K.variable(preprocess_image(style_reference_image_path))\n\n# this will contain our generated image\nif K.image_data_format() == 'channels_first':\n combination_image = K.placeholder((1, 3, img_nrows, img_ncols))\nelse:\n combination_image = K.placeholder((1, img_nrows, img_ncols, 3))\n\n# combine the 3 images into a single Keras tensor\ninput_tensor = K.concatenate([base_image,\n style_reference_image,\n combination_image], axis=0)\n\n# build the VGG16 network with our 3 images as input\n# the model will be loaded with pre-trained ImageNet weights\nmodel = vgg19.VGG19(input_tensor=input_tensor,\n weights='imagenet', include_top=False)\nprint('Model loaded.')\n\n# get the symbolic outputs of each \"key\" layer (we gave them unique names).\noutputs_dict = dict([(layer.name, layer.output) for layer in model.layers])\n\n# compute the neural style loss\n# first we need to define 4 util functions\n\n# the gram matrix of an image tensor (feature-wise outer product)\n\n\ndef gram_matrix(x):\n assert K.ndim(x) == 3\n if K.image_data_format() == 'channels_first':\n features = K.batch_flatten(x)\n else:\n features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1)))\n gram = K.dot(features, K.transpose(features))\n return gram\n\n# the \"style loss\" is designed to maintain\n# the style of the reference image in the generated image.\n# It is based on the gram matrices (which capture style) of\n# feature maps from the style reference image\n# and from the generated image\n\n\ndef style_loss(style, combination):\n assert K.ndim(style) == 3\n assert K.ndim(combination) == 3\n S = gram_matrix(style)\n C = gram_matrix(combination)\n channels = 3\n size = img_nrows * img_ncols\n return K.sum(K.square(S - C)) / (4. * (channels ** 2) * (size ** 2))\n\n# an auxiliary loss function\n# designed to maintain the \"content\" of the\n# base image in the generated image\n\n\ndef content_loss(base, combination):\n return K.sum(K.square(combination - base))\n\n# the 3rd loss function, total variation loss,\n# designed to keep the generated image locally coherent\n\n\ndef total_variation_loss(x):\n assert K.ndim(x) == 4\n if K.image_data_format() == 'channels_first':\n a = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] - x[:, :, 1:, :img_ncols - 1])\n b = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] - x[:, :, :img_nrows - 1, 1:])\n else:\n a = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, 1:, :img_ncols - 1, :])\n b = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, :img_nrows - 1, 1:, :])\n return K.sum(K.pow(a + b, 1.25))\n\n# combine these loss functions into a single scalar\nloss = K.variable(0.)\nlayer_features = outputs_dict['block5_conv2']\nbase_image_features = layer_features[0, :, :, :]\ncombination_features = layer_features[2, :, :, :]\nloss += content_weight * content_loss(base_image_features,\n combination_features)\n\nfeature_layers = ['block1_conv1', 'block2_conv1',\n 'block3_conv1', 'block4_conv1',\n 'block5_conv1']\nfor layer_name in feature_layers:\n layer_features = outputs_dict[layer_name]\n style_reference_features = layer_features[1, :, :, :]\n combination_features = layer_features[2, :, :, :]\n sl = style_loss(style_reference_features, combination_features)\n loss += (style_weight / len(feature_layers)) * sl\nloss += total_variation_weight * total_variation_loss(combination_image)\n\n# get the gradients of the generated image wrt the loss\ngrads = K.gradients(loss, combination_image)\n\noutputs = [loss]\nif isinstance(grads, (list, tuple)):\n outputs += grads\nelse:\n outputs.append(grads)\n\nf_outputs = K.function([combination_image], outputs)\n\n\ndef eval_loss_and_grads(x):\n if K.image_data_format() == 'channels_first':\n x = x.reshape((1, 3, img_nrows, img_ncols))\n else:\n x = x.reshape((1, img_nrows, img_ncols, 3))\n outs = f_outputs([x])\n loss_value = outs[0]\n if len(outs[1:]) == 1:\n grad_values = outs[1].flatten().astype('float64')\n else:\n grad_values = np.array(outs[1:]).flatten().astype('float64')\n return loss_value, grad_values\n\n# this Evaluator class makes it possible\n# to compute loss and gradients in one pass\n# while retrieving them via two separate functions,\n# \"loss\" and \"grads\". This is done because scipy.optimize\n# requires separate functions for loss and gradients,\n# but computing them separately would be inefficient.\n\n\nclass Evaluator(object):\n\n def __init__(self):\n self.loss_value = None\n self.grads_values = None\n\n def loss(self, x):\n assert self.loss_value is None\n loss_value, grad_values = eval_loss_and_grads(x)\n self.loss_value = loss_value\n self.grad_values = grad_values\n return self.loss_value\n\n def grads(self, x):\n assert self.loss_value is not None\n grad_values = np.copy(self.grad_values)\n self.loss_value = None\n self.grad_values = None\n return grad_values\n\nevaluator = Evaluator()\n\n# run scipy-based optimization (L-BFGS) over the pixels of the generated image\n# so as to minimize the neural style loss\nx = preprocess_image(base_image_path)\n\nfor i in range(iterations):\n print('Start of iteration', i)\n start_time = time.time()\n x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(),\n fprime=evaluator.grads, maxfun=20)\n print('Current loss value:', min_val)\n # save current generated image\n img = deprocess_image(x.copy())\n fname = result_prefix + '_at_iteration_%d.png' % i\n imsave(fname, img)\n end_time = time.time()\n print('Image saved as', fname)\n print('Iteration %d completed in %ds' % (i, end_time - start_time))\n"
] | [
[
"numpy.array",
"scipy.misc.imsave",
"numpy.copy",
"numpy.clip",
"numpy.expand_dims"
]
] |
BillMills/argo-database | [
"a22d87fdeacf1a12280201b995509a671f9d90e4"
] | [
"test/netCDFToDocTest.py"
] | [
"# -*- coding: utf-8 -*-\n\n\nimport os\nimport sys\nimport pdb\nimport re\nimport numpy as np\nsys.path.append('..')\nsys.path.append('../add-profiles/')\nfrom argoDatabase import argoDatabase\nimport addFunctions as af\nimport unittest\nfrom datetime import datetime\nimport random\nimport warnings\nfrom numpy import warnings as npwarnings\nfrom argoDBClass import argoDBClass\n# Sometimes netcdf contain nan. This will suppress runtime warnings.\nwarnings.simplefilter('error', RuntimeWarning)\nnpwarnings.filterwarnings('ignore')\n\nclass netCDFToDocTest(argoDBClass):\n \n def test_document_creation(self):\n self.ad.addToDb = False\n df = self.df\n files = df.file.tolist()\n\n self.ad.add_locally(self.OUTPUTDIR, [files[2]])\n self.assertIsInstance(self.ad.documents, list, 'should be list')\n\n self.assertIsInstance(self.ad.documents[0], dict, 'should be dict')\n \n def test_required_keys(self):\n self.ad.addToDb = False\n df = self.df\n files = df.file.tolist()\n\n self.ad.add_locally(self.OUTPUTDIR, random.sample(files, 20))\n self.assertIsInstance(self.ad.documents, list, 'should be list')\n for doc in self.ad.documents:\n docKeys = doc.keys()\n for key, itemType in self.requiredKeys:\n self.assertIn(key, docKeys, 'missing key: {}'.format(key))\n item = doc[key]\n self.assertIsInstance(item, itemType, 'profile {2} key {0} is not of type {1}'.format(key, itemType, doc['_id']))\n\n def test_optional_keys(self):\n '''Used to find out why some fields are missing'''\n profiles = [ '5905059_1', '5905059_100', '5905059_99', '5905059_98', '5904663_97', '2900784_297' '2901182_8']\n df = self.df\n df['_id'] = df.profile.apply(lambda x: re.sub('_0{1,}', '_', x))\n df = df[ df['_id'].isin(profiles)]\n self.ad.addToDb = False\n files = df.file.tolist()\n \n incompleteDocs = ['5904663_97', ['pres_max_for_PSAL'],\n '2900784_297', ['pres_max_for_TEMP', 'pres_min_for_TEMP'],\n '2900784_297', ['VERTICAL_SAMPLING_SCHEME'] #should be mandatory\n ]\n \n self.ad.add_locally(self.OUTPUTDIR, files)\n\n for doc in self.ad.documents:\n docKeys = doc.keys()\n for key, itemType in self.optionalKeys:\n if key in docKeys:\n item = doc[key]\n self.assertEqual(type(doc[key]), itemType, 'item type should match for key: {}'.format(key))\n self.assertIsInstance(item, itemType)\n else:\n if key not in [ 'containsBGC', 'bgcMeas', 'isDeep' ]:\n print('profile: {0} missing key: {1}'.format(key, doc['_id']))\n\n #self.assertIn(key, doc.keys(), 'profile: {0} missing key: {1}'.format(key, doc['_id']))\n\n def test_ascending_profiles(self):\n '''profile should be acending'''\n profiles = [ '2902534_142']\n df = self.df\n df['_id'] = df.profile.apply(lambda x: re.sub('_0{1,}', '_', x))\n df = df[ df['_id'].isin(profiles)]\n files = df.file.tolist()\n \n self.ad.removeExisting = True\n self.addToDb=True\n self.ad.add_locally(self.OUTPUTDIR, files)\n\n coll = self.ad.create_collection()\n for _id in profiles:\n doc = coll.find_one({'_id': _id})\n self.assertTrue(doc['DIRECTION']!='D', 'should be acending')\n \n ''' TODO: FIND A DECENDING FLOAT TO CHECK\n def test_decending_profiles(self):\n profile should be decending\n profiles = [ '6901762_46']\n df = self.df\n df['_id'] = df.profile.apply(lambda x: re.sub('_0{1,}', '_', x))\n df = df[ df['_id'].isin(profiles)]\n files = df.file.tolist()\n \n self.ad.removeExisting = True\n self.addToDb=True\n self.ad.add_locally(self.OUTPUTDIR, files)\n\n coll = self.ad.create_collection()\n pdb.set_trace()\n for _id in profiles:\n doc = coll.find_one({'_id': _id})\n self.assertTrue(doc['DIRECTION']=='D', 'should be decending')\n '''\n\n def test_check_profiles(self):\n \n '''5904663_68 is missing position. Position qc should be a 9.\n 2903207_72 was reported to be missing pos sys, but it has it here.\n 5903593, 5904663 are reported to be missing bgc data.'''\n profiles = [ '5904663_68', '2903207_72', '5904663_97', '2900784_297', '2901182_8', '6901676_30']\n df = self.df\n df['_id'] = df.profile.apply(lambda x: re.sub('_0{1,}', '_', x))\n df = df[ df['_id'].isin(profiles)]\n files = df.file.tolist()\n \n self.ad.removeExisting = True\n self.addToDb=True\n self.ad.add_locally(self.OUTPUTDIR, files)\n\n self.assertTrue(True) # just checks if these dont crash routine\n\n def test_check_masked_adjusted_profiles(self):\n '''\n profiles that have been adjusted can have masked values. in this case, masked values are filled with NaN.\n '''\n profiles = ['6901676_30']\n df = self.df\n df['_id'] = df.profile.apply(lambda x: re.sub('_0{1,}', '_', x))\n df = df[ df['_id'].isin(profiles)]\n files = df.file.tolist()\n \n self.ad.removeExisting = True\n self.addToDb=True\n self.ad.add_locally(self.OUTPUTDIR, files)\n\n coll = self.ad.create_collection()\n for _id in profiles:\n doc = coll.find_one({'_id': _id})\n keys = doc['measurements'][0].keys()\n \n self.assertFalse('psal' in keys, 'psal should have been removed from _id {}'.format(_id))\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.warnings.filterwarnings"
]
] |
SousaPedro11/algoritmos | [
"86a3601912778d120b9ec8094267c26a7eb6d153"
] | [
"python_implementation/prova_03/matriz_esparsa.py"
] | [
"import os.path\nimport random\nfrom typing import List\n\nimport numpy as np\n\n\ndef __imprime_matriz(matriz: List[List]) -> None:\n try:\n print(f'Matriz de tamanho: {len(matriz)}')\n for linha in matriz:\n print(' '.join(str(x) for x in linha))\n print('\\n')\n except ValueError as e:\n print(e)\n\n\ndef __define_matriz(tamanho: int) -> List[List]:\n \"\"\"\n Metodo de definicao da matriz.\n\n :return: Matriz em forma de lista\n \"\"\"\n _matriz = __cria_matriz_quadrada(tamanho)\n return _matriz\n\n\ndef __sparse_elements(ordem: int, percent: int = 20):\n \"\"\"\n Cria a matriz esparsa os elementos nao nulos da matriz esparsa\n :param ordem: Ordem da matriz\n :param percent: Percentual inteiro de nao nulos\n :return: Matriz em estrutura de lista de listas\n \"\"\"\n _length = round(ordem ** 2 * percent / 100)\n values = list(np.random.randint(low=1, high=9, size=_length))\n rows = list(np.random.randint(low=0, high=ordem, size=_length))\n columns = list(np.random.randint(low=0, high=ordem, size=_length))\n\n cords = set(zip(rows, columns))\n while len(cords) < _length:\n cords.add((random.randrange(ordem), random.randrange(ordem)))\n _sparse = [[t[0], t[1], v] for t, v in zip(cords, values)]\n return _sparse\n\n\ndef __cria_matriz_quadrada(tamanho: int, sparse_elements: List[List] = None):\n \"\"\"\n Metodo que cria a matriz recursivamente seguindo um padrao de letras por quadrante.\n\n :param tamanho: Ordem da matriz\n :param i: Linha da matriz\n :return: Matriz preenchida\n \"\"\"\n _matriz = []\n for _ in range(tamanho):\n _linha = [0 for _ in range(tamanho)]\n _matriz.append(_linha)\n\n if sparse_elements:\n for cord in sparse_elements:\n _matriz[cord[0]][cord[1]] = cord[2]\n\n return _matriz\n\n\ndef salva_arquivo(element: List, name: str):\n \"\"\"\n Salva o arquivo com cada valor da lista em uma linha\n :param element: Lista de elementos\n :param name: Nome do arquivo\n :return: None\n \"\"\"\n with open(name, 'w', encoding='utf-8') as f:\n f.writelines('\\n'.join(str(x) for x in element))\n\n\ndef imprime_comparativos_arquivo(name_arq_1: str, name_arq_2: str):\n \"\"\"\n Compara quantos % um arquivo e maior que outro\n :param name_arq_1:\n :param name_arq_2:\n :return: None\n \"\"\"\n try:\n size_file_1 = os.path.getsize(name_arq_1)\n size_file_2 = os.path.getsize(name_arq_2)\n comparativo = 100 - (min(size_file_1, size_file_2) * 100 / max(size_file_1, size_file_2))\n print(f'Arquivo maior é {comparativo}% maior que o outro')\n except Exception as e:\n print(e.__str__())\n\n\ndef atualiza_matriz(matrix: List[List], sparce: List[List]):\n \"\"\"\n Atualiza os elementos de uma matriz a partir de uma lista de coordenadas-valor\n :param matrix: Matriz a ser atualizada\n :param sparce: Lista da coordenada-valor\n :return: Matriz atualizada\n \"\"\"\n if sparce:\n for cord in sparce:\n matrix[cord[0]][cord[1]] = cord[2]\n\n return matrix\n\n\ndef solucao_problema():\n # questao a, b, c e d\n # define ordem da matriz\n ordem_matriz = 10000\n # cria lista de elementos nao nulos de uma matriz esparsa\n _sparse_elements = __sparse_elements(ordem_matriz, 20)\n # cria matriz esparsa a partir da lista de elementos\n _matrix = __cria_matriz_quadrada(ordem_matriz, _sparse_elements)\n # define nome dos arquivos\n filename_matrix = 'arquivoA.txt'\n filename_sparse_elements = 'arquivoB.txt'\n # salva matriz e lista de elementos nos respectivos arquivos\n salva_arquivo(_matrix, filename_matrix)\n salva_arquivo(_sparse_elements, filename_sparse_elements)\n # imprime o comparativo dos tamanhos dos arquivos mostrando quantos % é maior\n imprime_comparativos_arquivo(filename_matrix, filename_sparse_elements)\n # define uma outra lista\n _sparse_elements_2 = __sparse_elements(ordem_matriz, 10)\n # atualiza a matriz a partir da segunda lista\n _matrix = atualiza_matriz(_matrix, _sparse_elements_2)\n # salva a matriz atualizada\n salva_arquivo(_matrix, 'arquivoC.txt')\n\n\nif __name__ == '__main__':\n solucao_problema()\n"
] | [
[
"numpy.random.randint"
]
] |
bettertony/Better | [
"edb58ac1a44692f4227d1c0f6cdde550eca13f4f"
] | [
"better/tools/indicator.py"
] | [
"from __future__ import division, print_function, absolute_import\nimport numpy as np\n\n\ndef max_drawdown(pc_array):\n \"\"\"calculate the max drawdown with the portfolio changes\n @:param pc_array: all the portfolio changes during a trading process\n @:return: max drawdown\n \"\"\"\n portfolio_values = []\n drawdown_list = []\n max_benefit = 0\n for i in range(pc_array.shape[0]):\n if i > 0:\n portfolio_values.append(portfolio_values[i - 1] * pc_array[i])\n else:\n portfolio_values.append(pc_array[i])\n if portfolio_values[i] > max_benefit:\n max_benefit = portfolio_values[i]\n drawdown_list.append(0.0)\n else:\n drawdown_list.append(1.0 - portfolio_values[i] / max_benefit)\n return max(drawdown_list)\n\n\ndef sharpe(pc_array):\n \"\"\"calculate sharpe ratio with the portfolio changes\n @:param pc_array: all the portfolio changes during a trading process\n @:return: sharpe ratio\n \"\"\"\n pc_array = pc_array-1.0\n return np.mean(pc_array)/np.std(pc_array)\n\n\ndef moving_accumulate(pc_array, n=48):\n acc = np.cumprod(pc_array)\n acc[n:] = acc[n:] / acc[:-n]\n return acc\n\n\ndef positive_count(pc_array):\n return np.sum(pc_array>1)\n\n\ndef negative_count(pc_array):\n return np.sum(pc_array<1)\n"
] | [
[
"numpy.std",
"numpy.sum",
"numpy.cumprod",
"numpy.mean"
]
] |
l2xBrain/chineseocr | [
"96b0fd9615ea4d3294e4d6e1972f5df70514ec60"
] | [
"neural/index.py"
] | [
"# coding: utf-8\nimport sys\nimport os\nimport tensorflow as tf\nfrom PIL import Image\nfrom six.moves import cPickle as Pickle\n\nroot_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\")\nsys.path.append(root_dir)\nimport dataset\nimport inference\n\nIMAGE_HEIGHT = 45\nIMAGE_WIDTH = 45\nIMAGE_CHANNEL = 1\nLEARNING_RATE_BASE = 1e-2\nLEARNING_RATE_DECAY = 0.99\nMOVING_AVERAGE = 0.99 # 平均滑动\nMAX_STEPS = 99999\nROTATE_ANGLE = 15\nROTATE_COUNTS = 6\nBATCH_SIZE = 1000\n\n# 路径设置\ndatasets_dir = os.path.join(root_dir, \"datasets\")\nmodels_dir = os.path.join(root_dir, \"models\")\nmodels_file = os.path.join(models_dir, \"model.ckpt\")\n\nimage_holder = tf.placeholder(tf.float32, [None, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNEL])\nlabel_holder = tf.placeholder(tf.int32, [None])\n\n\ndef train():\n\t\"\"\"\n\t\n\t:return: \n\t\"\"\"\n\t# 读取数据集\n\tfilenames = os.listdir(datasets_dir)\n\t# 过滤不合格数据集\n\tfor filename in filenames:\n\t\tif not os.path.splitext(filename)[1] == '.pickle':\n\t\t\tfilenames.remove(filename)\n\n\tlogits = inference.inference(image_holder, reuse=False)\n\tglobal_step = tf.Variable(0, trainable=False)\n\t# 定义滑动平滑平均值\n\tvariable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE, global_step)\n\tvariable_averages_op = variable_averages.apply(tf.trainable_variables())\n\t# 损失函数值\n\tloss = inference.loss(logits, label_holder)\n\t# 使用反向传播函数之前优化学习率\n\tlearning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, MAX_STEPS,\n\t\t\t\t\t\t\t\t\t\t\t decay_rate=LEARNING_RATE_DECAY)\n\t# 定义反向传播函数\n\ttrain_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)\n\t# 使用反向函数和滑动平滑值更新参数\n\ttrain_op = tf.group(train_step, variable_averages_op)\n\tsaver = tf.train.Saver()\n\twith tf.Session() as sess:\n\t\ttf.global_variables_initializer().run()\n\t\ttf.train.start_queue_runners()\n\t\tif not os.path.exists(models_dir):\n\t\t\tos.makedirs(models_dir)\n\t\tfor step in range(MAX_STEPS):\n\t\t\tfor filename in filenames:\n\t\t\t\ttrain_image, train_label = dataset.read(filename)\n\t\t\t\tassert isinstance(train_image, list)\n\t\t\t\tassert isinstance(train_label, list)\n\t\t\t\t_, loss_value = sess.run([train_op, loss], feed_dict={image_holder: train_image, label_holder: train_label})\n\t\t\tif step % 2 == 0:\n\t\t\t\tprint(\"after %d steps, the loss value is %g\" % (step, loss_value))\n\t\t\t\tsaver.save(sess, models_file, global_step=step)\n\n\ndef img_rotate(img_dir, file):\n\t\"\"\"\n\t\n\t:param img_dir: \n\t:param file: \n\t:return: \n\t\"\"\"\n\timg = Image.open(os.path.join(datasets_dir, file))\n\timage_list = []\n\tfor rotate_index in range(ROTATE_COUNTS):\n\t\timg = img.rotate(rotate_index * ROTATE_ANGLE)\n\t\timage_list.append(img)\n\n\nif __name__ == \"__main__\":\n\ttrain()\n"
] | [
[
"tensorflow.trainable_variables",
"tensorflow.train.start_queue_runners",
"tensorflow.group",
"tensorflow.Session",
"tensorflow.Variable",
"tensorflow.train.Saver",
"tensorflow.placeholder",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.train.exponential_decay",
"tensorflow.global_variables_initializer",
"tensorflow.train.GradientDescentOptimizer"
]
] |
djalmabright/statsmodels | [
"eb2432e1e4bd10b96ecde47552c9a9589441e7a2"
] | [
"statsmodels/tsa/base/tsa_model.py"
] | [
"from statsmodels.compat.python import long\nfrom statsmodels.compat.pandas import is_numeric_dtype\n\nimport numbers\n\nimport warnings\nimport numpy as np\nfrom pandas import (to_datetime, Int64Index, DatetimeIndex, Period,\n PeriodIndex, RangeIndex, Timestamp, Series, Index,\n Float64Index)\nfrom pandas.tseries.frequencies import to_offset\n\nfrom statsmodels.base import data\nimport statsmodels.base.model as base\nimport statsmodels.base.wrapper as wrap\nfrom statsmodels.tools.sm_exceptions import ValueWarning\n\n_tsa_doc = \"\"\"\n %(model)s\n\n Parameters\n ----------\n %(params)s\n dates : array-like of datetime, optional\n An array-like object of datetime objects. If a pandas object is given\n for endog or exog, it is assumed to have a DateIndex.\n freq : str, optional\n The frequency of the time-series. A Pandas offset or 'B', 'D', 'W',\n 'M', 'A', or 'Q'. This is optional if dates are given.\n %(extra_params)s\n %(extra_sections)s\n\"\"\"\n\n_model_doc = \"Timeseries model base class\"\n\n_generic_params = base._model_params_doc\n_missing_param_doc = base._missing_param_doc\n\n\nclass TimeSeriesModel(base.LikelihoodModel):\n\n __doc__ = _tsa_doc % {\"model\": _model_doc, \"params\": _generic_params,\n \"extra_params\": _missing_param_doc,\n \"extra_sections\": \"\"}\n\n def __init__(self, endog, exog=None, dates=None, freq=None,\n missing='none', **kwargs):\n super(TimeSeriesModel, self).__init__(endog, exog, missing=missing,\n **kwargs)\n\n # Date handling in indexes\n self._init_dates(dates, freq)\n\n def _init_dates(self, dates=None, freq=None):\n \"\"\"\n Initialize dates\n\n Parameters\n ----------\n dates : array_like, optional\n An array like object containing dates.\n freq : str, tuple, datetime.timedelta, DateOffset or None, optional\n A frequency specification for either `dates` or the row labels from\n the endog / exog data.\n\n Notes\n -----\n Creates `self._index` and related attributes. `self._index` is always\n a Pandas index, and it is always Int64Index, DatetimeIndex, or\n PeriodIndex.\n\n If Pandas objects, endog / exog may have any type of index. If it is\n an Int64Index with values 0, 1, ..., nobs-1 or if it is (coerceable to)\n a DatetimeIndex or PeriodIndex *with an associated frequency*, then it\n is called a \"supported\" index. Otherwise it is called an \"unsupported\"\n index.\n\n Supported indexes are standardized (i.e. a list of date strings is\n converted to a DatetimeIndex) and the result is put in `self._index`.\n\n Unsupported indexes are ignored, and a supported Int64Index is\n generated and put in `self._index`. Warnings are issued in this case\n to alert the user if the returned index from some operation (e.g.\n forecasting) is different from the original data's index. However,\n whenever possible (e.g. purely in-sample prediction), the original\n index is returned.\n\n The benefit of supported indexes is that they allow *forecasting*, i.e.\n it is possible to extend them in a reasonable way. Thus every model\n must have an underlying supported index, even if it is just a generated\n Int64Index.\n\n \"\"\"\n\n # Get our index from `dates` if available, otherwise from whatever\n # Pandas index we might have retrieved from endog, exog\n if dates is not None:\n index = dates\n else:\n index = self.data.row_labels\n\n # Sanity check that we don't have a `freq` without an index\n if index is None and freq is not None:\n raise ValueError('Frequency provided without associated index.')\n\n # If an index is available, see if it is a date-based index or if it\n # can be coerced to one. (If it can't we'll fall back, below, to an\n # internal, 0, 1, ... nobs-1 integer index for modeling purposes)\n inferred_freq = False\n if index is not None:\n # Try to coerce to date-based index\n if not isinstance(index, (DatetimeIndex, PeriodIndex)):\n try:\n # Only try to coerce non-numeric index types (string,\n # list of date-times, etc.)\n # Note that np.asarray(Float64Index([...])) yields an\n # object dtype array in earlier versions of Pandas (and so\n # will not have is_numeric_dtype == True), so explicitly\n # check for it here. But note also that in very early\n # Pandas (~0.12), Float64Index doesn't exist (and so the\n # Statsmodels compat makes it an empty tuple, so in that\n # case also check if the first element is a float.\n _index = np.asarray(index)\n if (is_numeric_dtype(_index) or\n isinstance(index, Float64Index) or\n (Float64Index == tuple() and\n isinstance(_index[0], float))):\n raise ValueError('Numeric index given')\n # If a non-index Pandas series was given, only keep its\n # values (because we must have a pd.Index type, below, and\n # pd.to_datetime will return a Series when passed\n # non-list-like objects)\n if isinstance(index, Series):\n index = index.values\n # All coercion is done via pd.to_datetime\n # Note: date coercion via pd.to_datetime does not handle\n # string versions of PeriodIndex objects most of the time.\n _index = to_datetime(index)\n # Older versions of Pandas can sometimes fail here and\n # return a numpy array - check to make sure it's an index\n if not isinstance(_index, Index):\n raise ValueError('Could not coerce to date index')\n index = _index\n except:\n # Only want to actually raise an exception if `dates` was\n # provided but can't be coerced. If we got the index from\n # the row_labels, we'll just ignore it and use the integer\n # index below\n if dates is not None:\n raise ValueError('Non-date index index provided to'\n ' `dates` argument.')\n # Now, if we were given, or coerced, a date-based index, make sure\n # it has an associated frequency\n if isinstance(index, (DatetimeIndex, PeriodIndex)):\n # If no frequency, try to get an inferred frequency\n if freq is None and index.freq is None:\n freq = index.inferred_freq\n # If we got an inferred frequncy, alert the user\n if freq is not None:\n inferred_freq = True\n if freq is not None:\n warnings.warn('No frequency information was'\n ' provided, so inferred frequency %s'\n ' will be used.'\n % freq, ValueWarning)\n\n # Convert the passed freq to a pandas offset object\n if freq is not None:\n freq = to_offset(freq)\n\n # Now, if no frequency information is available from the index\n # itself or from the `freq` argument, raise an exception\n if freq is None and index.freq is None:\n # But again, only want to raise the exception if `dates`\n # was provided.\n if dates is not None:\n raise ValueError('No frequency information was'\n ' provided with date index and no'\n ' frequency could be inferred.')\n # However, if the index itself has no frequency information but\n # the `freq` argument is available (or was inferred), construct\n # a new index with an associated frequency\n elif freq is not None and index.freq is None:\n resampled_index = type(index)(\n start=index[0], end=index[-1], freq=freq)\n if not inferred_freq and not resampled_index.equals(index):\n raise ValueError('The given frequency argument could'\n ' not be matched to the given index.')\n index = resampled_index\n # Finally, if the index itself has a frequency and there was\n # also a given frequency, raise an exception if they are not\n # equal\n elif (freq is not None and not inferred_freq and\n not (index.freq == freq)):\n raise ValueError('The given frequency argument is'\n ' incompatible with the given index.')\n # Finally, raise an exception if we could not coerce to date-based\n # but we were given a frequency argument\n elif freq is not None:\n raise ValueError('Given index could not be coerced to dates'\n ' but `freq` argument was provided.')\n\n # Get attributes of the index\n has_index = index is not None\n date_index = isinstance(index, (DatetimeIndex, PeriodIndex))\n int_index = isinstance(index, Int64Index)\n range_index = isinstance(index, RangeIndex)\n has_freq = index.freq is not None if date_index else None\n increment = Index(range(self.endog.shape[0]))\n is_increment = index.equals(increment) if int_index else None\n\n # Issue warnings for unsupported indexes\n if has_index and not (date_index or range_index or is_increment):\n warnings.warn('An unsupported index was provided and will be'\n ' ignored when e.g. forecasting.', ValueWarning)\n if date_index and not has_freq:\n warnings.warn('A date index has been provided, but it has no'\n ' associated frequency information and so will be'\n ' ignored when e.g. forecasting.', ValueWarning)\n\n # Construct the internal index\n index_generated = False\n\n if ((date_index and has_freq) or (int_index and is_increment) or\n range_index):\n _index = index\n else:\n _index = increment\n index_generated = True\n self._index = _index\n self._index_generated = index_generated\n self._index_none = index is None\n self._index_dates = date_index and not index_generated\n self._index_freq = self._index.freq if self._index_dates else None\n self._index_inferred_freq = inferred_freq\n\n # For backwards compatibility, set data.dates, data.freq\n self.data.dates = self._index if self._index_dates else None\n self.data.freq = self._index.freqstr if self._index_dates else None\n\n def _get_index_loc(self, key, base_index=None):\n \"\"\"\n Get the location of a specific key in an index\n\n Parameters\n ----------\n key : label\n The key for which to find the location if the underlying index is\n a DateIndex or a location if the underlying index is a RangeIndex\n or an Int64Index.\n base_index : pd.Index, optional\n Optionally the base index to search. If None, the model's index is\n searched.\n\n Returns\n -------\n loc : int\n The location of the key\n index : pd.Index\n The index including the key; this is a copy of the original index\n unless the index had to be expanded to accomodate `key`.\n index_was_expanded : bool\n Whether or not the index was expanded to accomodate `key`.\n\n Notes\n -----\n If `key` is past the end of of the given index, and the index is either\n an Int64Index or a date index, this function extends the index up to\n and including key, and then returns the location in the new index.\n\n \"\"\"\n if base_index is None:\n base_index = self._index\n\n index = base_index\n date_index = isinstance(base_index, (PeriodIndex, DatetimeIndex))\n int_index = isinstance(base_index, Int64Index)\n range_index = isinstance(base_index, RangeIndex)\n index_class = type(base_index)\n nobs = len(index)\n\n # Special handling for RangeIndex\n if range_index and isinstance(key, (int, long, np.integer)):\n # Negative indices (that lie in the Index)\n if key < 0 and -key <= nobs:\n key = nobs + key\n # Out-of-sample (note that we include key itself in the new index)\n elif key > nobs - 1:\n stop = base_index._start + (key + 1) * base_index._step\n index = RangeIndex(start=base_index._start,\n stop=stop,\n step=base_index._step)\n\n # Special handling for Int64Index\n if (not range_index and int_index and not date_index and\n isinstance(key, (int, long, np.integer))):\n # Negative indices (that lie in the Index)\n if key < 0 and -key <= nobs:\n key = nobs + key\n # Out-of-sample (note that we include key itself in the new index)\n elif key > base_index[-1]:\n index = Int64Index(np.arange(base_index[0], int(key + 1)))\n\n # Special handling for date indexes\n if date_index:\n # Integer key (i.e. already given a location)\n if isinstance(key, (int, long, np.integer)):\n # Negative indices (that lie in the Index)\n if key < 0 and -key < nobs:\n key = index[nobs + key]\n # Out-of-sample (note that we include key itself in the new\n # index)\n elif key > len(base_index) - 1:\n index = index_class(start=base_index[0],\n periods=int(key + 1),\n freq=base_index.freq)\n key = index[-1]\n else:\n key = index[key]\n # Other key types (i.e. string date or some datetime-like object)\n else:\n # Covert the key to the appropriate date-like object\n if index_class is PeriodIndex:\n date_key = Period(key, freq=base_index.freq)\n else:\n date_key = Timestamp(key)\n\n # Out-of-sample\n if date_key > base_index[-1]:\n # First create an index that may not always include `key`\n index = index_class(start=base_index[0], end=date_key,\n freq=base_index.freq)\n\n # Now make sure we include `key`\n if not index[-1] == date_key:\n index = index_class(start=base_index[0],\n periods=len(index) + 1,\n freq=base_index.freq)\n\n # Get the location\n if date_index:\n # (note that get_loc will throw a KeyError if key is invalid)\n loc = index.get_loc(key)\n elif int_index or range_index:\n # For Int64Index and RangeIndex, key is assumed to be the location\n # and not an index value (this assumption is required to support\n # RangeIndex)\n try:\n index[key]\n # We want to raise a KeyError in this case, to keep the exception\n # consistent across index types.\n # - Attempting to index with an out-of-bound location (e.g.\n # index[10] on an index of length 9) will raise an IndexError\n # (as of Pandas 0.22)\n # - Attemtping to index with a type that cannot be cast to integer\n # (e.g. a non-numeric string) will raise a ValueError if the\n # index is RangeIndex (otherwise will raise an IndexError)\n # (as of Pandas 0.22)\n except (IndexError, ValueError) as e:\n raise KeyError(str(e))\n loc = key\n else:\n loc = index.get_loc(key)\n\n # Check if we now have a modified index\n index_was_expanded = index is not base_index\n\n # Return the index through the end of the loc / slice\n if isinstance(loc, slice):\n end = loc.stop\n else:\n end = loc\n\n return loc, index[:end + 1], index_was_expanded\n\n def _get_index_label_loc(self, key, base_index=None):\n \"\"\"\n Get the location of a specific key in an index or model row labels\n\n Parameters\n ----------\n key : label\n The key for which to find the location if the underlying index is\n a DateIndex or is only being used as row labels, or a location if\n the underlying index is a RangeIndex or an Int64Index.\n base_index : pd.Index, optional\n Optionally the base index to search. If None, the model's index is\n searched.\n\n Returns\n -------\n loc : int\n The location of the key\n index : pd.Index\n The index including the key; this is a copy of the original index\n unless the index had to be expanded to accomodate `key`.\n index_was_expanded : bool\n Whether or not the index was expanded to accomodate `key`.\n\n Notes\n -----\n This method expands on `_get_index_loc` by first trying the given\n base index (or the model's index if the base index was not given) and\n then falling back to try again with the model row labels as the base\n index.\n\n \"\"\"\n try:\n loc, index, index_was_expanded = (\n self._get_index_loc(key, base_index))\n except KeyError as e:\n try:\n if not isinstance(key, (int, long, np.integer)):\n loc = self.data.row_labels.get_loc(key)\n else:\n raise\n # Require scalar\n # Pandas may return a slice if there are multiple matching\n # locations that are monotonic increasing (otherwise it may\n # return an array of integer locations, see below).\n if isinstance(loc, slice):\n loc = loc.start\n if isinstance(loc, np.ndarray):\n # Pandas may return a mask (boolean array), for e.g.:\n # pd.Index(list('abcb')).get_loc('b')\n if loc.dtype == bool:\n # Return the first True value\n # (we know there is at least one True value if we're\n # here because otherwise the get_loc call would have\n # raised an exception)\n loc = np.argmax(loc)\n # Finally, Pandas may return an integer array of\n # locations that match the given value, for e.g.\n # pd.DatetimeIndex(['2001-02', '2001-01']).get_loc('2001')\n # (this appears to be slightly undocumented behavior, since\n # only int, slice, and mask are mentioned in docs for\n # pandas.Index.get_loc as of 0.23.4)\n else:\n loc = loc[0]\n if not isinstance(loc, numbers.Integral):\n raise\n\n index = self.data.row_labels[:loc + 1]\n index_was_expanded = False\n except:\n raise e\n return loc, index, index_was_expanded\n\n def _get_prediction_index(self, start, end, index=None, silent=False):\n \"\"\"\n Get the location of a specific key in an index or model row labels\n\n Parameters\n ----------\n start : label\n The key at which to start prediction. Depending on the underlying\n model's index, may be an integer, a date (string, datetime object,\n pd.Timestamp, or pd.Period object), or some other object in the\n model's row labels.\n end : label\n The key at which to end prediction (note that this key will be\n *included* in prediction). Depending on the underlying\n model's index, may be an integer, a date (string, datetime object,\n pd.Timestamp, or pd.Period object), or some other object in the\n model's row labels.\n index : pd.Index, optional\n Optionally an index to associate the predicted results to. If None,\n an attempt is made to create an index for the predicted results\n from the model's index or model's row labels.\n silent : bool, optional\n Argument to silence warnings.\n\n Returns\n -------\n start : integer\n The index / observation location at which to begin prediction.\n end : int\n The index / observation location at which to end in-sample\n prediction. The maximum value for this is nobs-1.\n out_of_sample : int\n The number of observations to forecast after the end of the sample.\n prediction_index : pd.Index or None\n The index associated with the prediction results. This index covers\n the range [start, end + out_of_sample]. If the model has no given\n index and no given row labels (i.e. endog/exog is not Pandas), then\n this will be None.\n\n Notes\n -----\n The arguments `start` and `end` behave differently, depending on if\n they are integer or not. If either is an integer, then it is assumed\n to refer to a *location* in the index, not to an index value. On the\n other hand, if it is a date string or some other type of object, then\n it is assumed to refer to an index *value*. In all cases, the returned\n `start` and `end` values refer to index *locations* (so in the former\n case, the given location is validated and returned whereas in the\n latter case a location is found that corresponds to the given index\n value).\n\n This difference in behavior is necessary to support `RangeIndex`. This\n is because integers for a RangeIndex could refer either to index values\n or to index locations in an ambiguous way (while for `Int64Index`,\n since we have required them to be full indexes, there is no ambiguity).\n\n \"\"\"\n\n # Convert index keys (start, end) to index locations and get associated\n # indexes.\n try:\n start, start_index, start_oos = self._get_index_label_loc(start)\n except KeyError:\n raise KeyError('The `start` argument could not be matched to a'\n ' location related to the index of the data.')\n if end is None:\n end = max(start, len(self._index) - 1)\n try:\n end, end_index, end_oos = self._get_index_label_loc(end)\n except KeyError:\n raise KeyError('The `end` argument could not be matched to a'\n ' location related to the index of the data.')\n\n # Handle slices (if the given index keys cover more than one date)\n if isinstance(start, slice):\n start = start.start\n if isinstance(end, slice):\n end = end.stop - 1\n\n # Get the actual index for the prediction\n prediction_index = end_index[start:]\n\n # Validate prediction options\n if end < start:\n raise ValueError('Prediction must have `end` after `start`.')\n\n # Handle custom prediction index\n # First, if we were given an index, check that it's the right size and\n # use it if so\n if index is not None:\n if not len(prediction_index) == len(index):\n raise ValueError('Invalid `index` provided in prediction.'\n ' Must have length consistent with `start`'\n ' and `end` arguments.')\n # But if we weren't given Pandas input, this index will not be\n # used because the data will not be wrapped; in that case, issue\n # a warning\n if not isinstance(self.data, data.PandasData) and not silent:\n warnings.warn('Because the model data (`endog`, `exog`) were'\n ' not given as Pandas objects, the prediction'\n ' output will be Numpy arrays, and the given'\n ' `index` argument will only be used'\n ' internally.', ValueWarning)\n prediction_index = Index(index)\n # Now, if we *do not* have a supported index, but we were given some\n # kind of index...\n elif self._index_generated and not self._index_none:\n # If we are in sample, and have row labels, use them\n if self.data.row_labels is not None and not (start_oos or end_oos):\n prediction_index = self.data.row_labels[start:end + 1]\n # Otherwise, warn the user that they will get an Int64Index\n elif not silent:\n warnings.warn('No supported index is available.'\n ' Prediction results will be given with'\n ' an integer index beginning at `start`.',\n ValueWarning)\n elif self._index_none:\n prediction_index = None\n\n # For backwards compatibility, set `predict_*` values\n if prediction_index is not None:\n self.data.predict_start = prediction_index[0]\n self.data.predict_end = prediction_index[-1]\n self.data.predict_dates = prediction_index\n else:\n self.data.predict_start = None\n self.data.predict_end = None\n self.data.predict_dates = None\n\n # Compute out-of-sample observations\n nobs = len(self.endog)\n out_of_sample = max(end - (nobs - 1), 0)\n end -= out_of_sample\n\n return start, end, out_of_sample, prediction_index\n\n def _get_exog_names(self):\n return self.data.xnames\n\n def _set_exog_names(self, vals):\n if not isinstance(vals, list):\n vals = [vals]\n self.data.xnames = vals\n\n # overwrite with writable property for (V)AR models\n exog_names = property(_get_exog_names, _set_exog_names)\n\n\nclass TimeSeriesModelResults(base.LikelihoodModelResults):\n def __init__(self, model, params, normalized_cov_params, scale=1.):\n self.data = model.data\n super(TimeSeriesModelResults,\n self).__init__(model, params, normalized_cov_params, scale)\n\n\nclass TimeSeriesResultsWrapper(wrap.ResultsWrapper):\n _attrs = {}\n _wrap_attrs = wrap.union_dicts(base.LikelihoodResultsWrapper._wrap_attrs,\n _attrs)\n _methods = {'predict' : 'dates'}\n _wrap_methods = wrap.union_dicts(base.LikelihoodResultsWrapper._wrap_methods,\n _methods)\nwrap.populate_wrapper(TimeSeriesResultsWrapper, # noqa:E305\n TimeSeriesModelResults)\n\n\nif __name__ == \"__main__\":\n import statsmodels.api as sm\n import pandas\n\n data = sm.datasets.macrodata.load(as_pandas=False)\n\n #make a DataFrame\n #TODO: attach a DataFrame to some of the datasets, for quicker use\n dates = [str(int(x[0])) +':'+ str(int(x[1])) \\\n for x in data.data[['year','quarter']]]\n\n df = pandas.DataFrame(data.data[['realgdp','realinv','realcons']], index=dates)\n ex_mod = TimeSeriesModel(df)\n"
] | [
[
"pandas.to_datetime",
"pandas.Index",
"pandas.tseries.frequencies.to_offset",
"numpy.asarray",
"pandas.DataFrame",
"pandas.RangeIndex",
"pandas.Timestamp",
"numpy.argmax",
"pandas.Period"
]
] |
jhover/cshl-work | [
"ff1e1a3f6c4da7795f89dcc49204c8bb1f9c5f0f"
] | [
"cshlwork/phylotool.py"
] | [
"#!/usr/bin/env python\n#\n# https://www.biostars.org/p/224572/\n# preferred breakdown: animals, fungi, plants, protists, bacteria, archaea\n# ncbi taxon category codes: chordata 7711 vertebrata 7742 Insecta 50557 \n# # in DB\n# 1 => 'Eukaryota',\n# 2 => 'Animals', 1653\n# Vertebrata 877\n# 7 => 'Mammals', \n# 3 => 'Birds',\n# 9 => 'Reptiles',\n# 2 => 'Amphibians', \n# 4 => 'Fishes', \n# Invertebrates \n# 5 => 'Flatworms', 38\n# 10 => 'Roundworms' 119\n# 6 => 'Insects', 412\n# 8 => 'Other Animals', },\n# 3 => 'Fungi', \n# 4 => 'Other', \n# 5 => 'Plants', 459\n# 6 => 'Protists'},DB\n# 2 => 'Bacteria' -> all, \n# 3 => 'Archaea',\n# 4 => 'Viroids', \n# 5 => 'Viruses');\n# \n# areas?: eukaryote->animals\n# amph, birds, fishes, mammals, reptiles | flatworms , roundworms \n# eukaryote->plants \n# eukaryotes->fungi\n# eukaryotes->protists\n# \n# Convert newick tree to distance matrix in R: https://www.biostars.org/p/312148/\n# treeText <- readLines(tree.phy)\n# treeText <- paste0(treeText, collapse=\"\")\n# library(treeio)\n# tree <- read.tree(text = treeText) ## load tree \n# distMat <- cophenetic(tree) ## generate dist matrix\n#\n# See: https://biopython.org/wiki/Phylo_cookbook\n# for distance matrix calculation using biopython\n#\n\nimport argparse\nimport itertools\nimport logging\nimport os\n\nfrom collections import OrderedDict\n\nfrom Bio import Phylo\nimport numpy as np\nimport pandas as pd\n\n#pd.set_option(\"display.max_rows\", 15)\n#pd.set_option(\"display.max_columns\", 15)\n\nclass Phylogeny(object):\n \n def __init__(self):\n self.log = logging.getLogger(self.__class__.__name__)\n self.tree = None\n self.distmatrix = None\n self.df = None\n self.filepath = None\n \n def __repr__(self):\n pass\n \n def parsefile(self, filepath):\n \"\"\"\n Reads NHX format file,...\n \"\"\"\n self.log.debug(\"Reading file %s\" % filepath)\n self.filepath = filepath\n self.tree = Phylo.read(filepath, 'newick')\n self.log.debug(\"tree is %s\" % self.tree )\n #print(tree)\n \n \n def get_distance_matrix(self):\n \"\"\" \n We want a list of row/column names, and a corresponding 2D matrix\n \n names = ['75743','137246','7950']\n \n data = \n \n Note: Takes about ~2 seconds with 119 terminals on a 2019 Macbook Pro\n Takes about ~38 seconds with 459 terminals on a 2019 Macbook Pro \n Takes about ~7 minutes with 876 terminals on a 2019 Macbook Pro\n \n \"\"\"\n self.distmatrix = OrderedDict()\n terminals = self.tree.get_terminals()\n terminals.sort(key=lambda x: x.name, reverse=True)\n mdim = len(terminals)\n self.log.debug(\"%d terminals..\" % mdim)\n i = 0\n for x,y in itertools.combinations_with_replacement(terminals, 2):\n if i % 1000 == 0:\n self.log.debug(\"Handling combination %d\" % i)\n v = self.tree.distance(x, y)\n self.distmatrix[x.name] = self.distmatrix.get(x.name, OrderedDict())\n self.distmatrix[x.name][y.name] = v\n self.distmatrix[y.name] = self.distmatrix.get(y.name, OrderedDict())\n self.distmatrix[y.name][x.name] = v\n i += 1\n \n self.log.debug(\"Done computing distances. Filling diagonal...\")\n for x in terminals:\n self.distmatrix[x.name][x.name] = 0\n \n self.log.debug(self.distmatrix)\n\n colnames = list(self.distmatrix.keys())\n return ( colnames, self.distmatrix ) \n\n\n def to_df(self):\n #(allclades, distmatrix) = self.to_distance_matrix()\n #df = pd.DataFrame(data = distmatrix,\n # index = allclades,\n # columns = allclades)\n csvpath = \"%s.csv\" % self.filepath\n if os.path.exists(csvpath):\n self.df = pd.read_csv(csvpath, sep='\\t', index_col=0)\n \n else:\n if self.distmatrix is not None:\n self.log.debug(\"Found completed distmatrix. Converting...\")\n else:\n self.log.debug(\"No distmatrix found. Computing...\")\n self.get_distance_matrix()\n self.df = pd.DataFrame(self.distmatrix, columns = self.distmatrix.keys()) \n \n return self.df\n\n def to_csv(self):\n if self.df is not None:\n self.log.debug(\"DataFrame found. Using...\")\n else:\n self.log.debug(\"No DataFrame found. Computing...\")\n self.to_df()\n self.df.to_csv(\"%s.csv\" % self.filepath)\n \n \n\nif __name__ == '__main__':\n FORMAT='%(asctime)s (UTC) [ %(levelname)s ] %(filename)s:%(lineno)d %(name)s.%(funcName)s(): %(message)s'\n logging.basicConfig(format=FORMAT)\n \n parser = argparse.ArgumentParser()\n \n parser.add_argument('-d', '--debug', \n action=\"store_true\", \n dest='debug', \n help='debug logging')\n\n parser.add_argument('-v', '--verbose', \n action=\"store_true\", \n dest='verbose', \n help='verbose logging')\n\n parser.add_argument('infile', \n metavar='infile', \n type=str, \n help='a phylegeny file NHX')\n \n parser.add_argument('-c', '--config', \n action=\"store\", \n dest='conffile', \n default='~/etc/phylo.conf',\n help='Config file path [~/etc/phylo.conf]')\n\n \n args= parser.parse_args()\n \n if args.debug:\n logging.getLogger().setLevel(logging.DEBUG)\n if args.verbose:\n logging.getLogger().setLevel(logging.INFO)\n \n #cp = ConfigParser()\n #cp.read(args.conffile)\n \n p = Phylogeny()\n p.parsefile(args.infile)\n #(terminals, matrix) = p.get_distance_matrix()\n #print(terminals)\n df = p.to_df()\n print(df)\n p.to_csv()\n "
] | [
[
"pandas.read_csv"
]
] |
aspanghe/actionable-recourse | [
"e851de05ad32c077daf037a231addd271fcb1aac"
] | [
"examples/paper/demo_script_givemecredit.py"
] | [
"from examples.paper.initialize import *\nimport itertools\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport seaborn.apionly as sns\nfrom sklearn.linear_model import LogisticRegressionCV\nfrom sklearn.model_selection import train_test_split, KFold\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.linear_model import LogisticRegression\n\n\ndata_name = 'givemecredit'\ndata_file = data_dir / '%s/%s_training.csv' % (data_name, data_name)\noutput_dir = results_dir / data_name\nraw_df = pd.concat([pd.read_csv(data_file, index_col=0), ])\n\n# # 1. Out of Sample Costs\n## sample the dataset\n## preprocess and prepare\nsample_weights = raw_df['age'].apply(lambda x: x < 35).map({True: 0, False: 1.})\ndownsampled_givemecredit = raw_df.sample(n = 100000, weights = sample_weights.values)\ny = 1 - raw_df['SeriousDlqin2yrs'].reset_index(drop=True)\nX = raw_df.drop('SeriousDlqin2yrs', axis=1).fillna(0).reset_index(drop=True)\n\n## split datasets for classifier\nX_clf, X_audit_holdout, y_clf, y_audit_holdout = train_test_split(X, y, test_size=.25)\n\n# X_train = X.loc[lambda df: ~df.index.isin(flipset_full.index)]\n# X_audit_holdout = X.loc[lambda df: df.index.isin(flipset_full.index)]\n# y_train = y.loc[lambda df: ~df.index.isin(flipset_full.index)]\n# y_test = y.loc[lambda df: df.index.isin(flipset_full.index)]\n# X_downsampled_train, X_downsampled_test, y_downsampled_train, y_downsampled_test = (\n# train_test_split(X, y, test_size=.25)\n# )\n# X_downsampled = (downsampled_givemecredit\n# .drop('SeriousDlqin2yrs', axis=1)\n# .fillna(0)\n# .reset_index(drop=True)\n# )\n#\n\nbaseline_train_aucs, baseline_test_aucs = [], []\nbiased_train_aucs, biased_test_aucs = [], []\nfor i in range(1):\n print(\"Iteration %d...\" % i)\n X_train, X_test, y_train, y_test = train_test_split(\n X_clf, y_clf, test_size=.25\n )\n X_biased_train = X_train.loc[lambda df: df['age'] >= 35] #.drop('age', axis=1)\n y_biased_train = y_train.loc[X_biased_train.index]\n X_biased_test = X_test.loc[lambda df: df['age'] >= 35] #.drop('age', axis=1)\n y_biased_test = y_test.loc[X_biased_test.index]\n\n ## train\n clf_full = (\n LogisticRegressionCV(max_iter=1000, Cs=100)\n .fit(X_train, y_train)\n )\n clf_age_limited = (\n LogisticRegressionCV(max_iter=1000, Cs=100)\n .fit(X_biased_train, y_biased_train)\n )\n\n ## baseline classifier\n y_baseline_train_pred = clf_full.predict_proba(X_train)[:, 1]\n baseline_train_auc = roc_auc_score(y_train, y_baseline_train_pred)\n baseline_train_aucs.append(baseline_train_auc)\n\n y_baseline_test_pred = clf_full.predict_proba(X_test)[:, 1]\n baseline_test_auc = roc_auc_score(y_test, y_baseline_test_pred)\n baseline_test_aucs.append(baseline_test_auc)\n\n ## biased classifier\n y_biased_train_pred = clf_age_limited.predict_proba(X_biased_train)[:, 1]\n biased_train_auc = roc_auc_score(y_biased_train, y_biased_train_pred)\n biased_train_aucs.append(biased_train_auc)\n\n y_biased_test_pred = clf_age_limited.predict_proba(X_biased_test)[: , 1]\n biased_test_auc = roc_auc_score(y_biased_test, y_biased_test_pred)\n biased_test_aucs.append(biased_test_auc)\n\n\n\n# original dataset. put aside 10,000 as holdout.\n# remaining 40,000. split into age groups.\n# over 35 should be included in both\n# for the other add in the other.\n# don't include age as a feature\n\n# print(\"sample, run y_pred...\")\n# X_sample = X.sample(5000)\n# y_pred_all_full_model = clf_full.predict_proba(X_sample)[:, 1]\n# y_pred_all_downsampled = clf_age_limited.predict_proba(X_sample)[:, 1]\n\nprint(\"run on full holdout...\")\n# X_sample = X.sample(5000)\ny_pred_all_full_model = clf_full.predict_proba(X_audit_holdout)[:, 1]\ny_pred_all_downsampled = clf_age_limited.predict_proba(X_audit_holdout)[:, 1]\n\nexp_df = pd.DataFrame(columns=['y_true', 'y_full_score', 'y_downsampled_score', 'age', 'y_full_cost', 'y_downsampled_cost'],\n index = X_audit_holdout.index)\n\n\nexp_df['y_full_score'] = y_pred_all_full_model\nexp_df['y_downsampled_score'] = y_pred_all_downsampled\nexp_df['age'] = X_audit_holdout['age']\nexp_df['y_true'] = y.loc[X_audit_holdout.index]\nexp_df.to_csv(output_dir / '2018-11-19__demo-1__exp-df.csv')\n\nX_sample = (X_audit_holdout\n # .sample(10000)\n .copy()\n )\nexp_df_sample = exp_df.loc[X_sample.index]\n\ncoefficients = {}\nintercept = {}\ncoefficients['full'] = clf_full.coef_[0]\nintercept['full'] = clf_full.intercept_[0]\ncoefficients['downsampled'] = clf_age_limited.coef_[0]\nintercept['downsampled'] = clf_age_limited.intercept_[0]\n\n# utilization bounded\n# RealEstate should be positive\n# numtimes90 days > 0\n# monthly income >0\n# debt\np = 0.98\n\n# run audit\nfor dataset in ['full', 'downsampled']:\n y_col = 'y_%s_score' % dataset\n scores = exp_df_sample[y_col]\n # p = scores.median()\n denied_individuals = scores.loc[lambda s: s<=p].index\n # actionset\n action_set = ActionSet(X=X_audit_holdout)\n action_set['age'].mutable = False\n action_set['NumberOfDependents'].mutable = False\n action_set['DebtRatio'].step_direction = -1\n # action_set['NumberOfTime60-89DaysPastDueNotWorse'].step_direction = -1\n action_set.align(coefficients=coefficients[dataset])\n\n idx = 0\n flipsets = {}\n import time\n now = time.time()\n for i in denied_individuals:\n if idx % 100 == 0:\n print('finished %d points in %f...' % (idx, time.time() - now))\n now = time.time()\n\n x = X.values[i]\n fb = RecourseBuilder(coefficients=coefficients[dataset],\n intercept=intercept[dataset] - (np.log(p / (1. - p))),\n action_set=action_set,\n x=x)\n ## CPLEX\n cplex_output = fb.fit()\n flipsets[i] = cplex_output\n idx += 1\n\n ## plot cost\n flipset_df = pd.DataFrame.from_dict(flipsets, orient=\"index\")\n flipset_df.to_csv(output_dir / '2018-11-19__demo-1__flipsets-%s.csv' % dataset)\n\n\nflipset_full = pd.read_csv(output_dir / '2018-11-19__demo-1__flipsets-full.csv', index_col=0)\nflipset_age = pd.read_csv(output_dir / '2018-11-19__demo-1__flipsets-downsampled.csv', index_col=0)\nexp_df = pd.read_csv(output_dir / '2018-11-19__demo-1__exp-df.csv', index_col=0)\n\nexp_df = exp_df.loc[flipset_full.index]\nexp_df['y_downsampled_cost'] = flipset_age['total_cost']\nexp_df['y_full_cost'] = flipset_full['total_cost']\nflipset_full['y_true'] = exp_df['y_true'].loc[flipset_full.index]\nflipset_age['y_true'] = exp_df['y_true'].loc[flipset_age.index]\n\n### get flipset\n\n## min-cost:\ndataset = 'downsampled'\n# dataset = 'full'\nyoung_individuals = (exp_df\n .loc[lambda df: df['age'] < 30]\n .loc[lambda df: df['y_full_score'] > .96]\n .loc[lambda df: df['y_full_score'] < .98 ]\n # .loc[lambda df: df['y_downsampled_score']<.9]\n )\ni = young_individuals.index[0]\naction_set = ActionSet(X=X_audit_holdout)\naction_set['DebtRatio'].step_direction = -1\naction_set['age'].mutable = False\naction_set['NumberOfDependents'].mutable = False\n# action_set['MonthlyIncome'].mutable = False\naction_set['NumberOfTime60-89DaysPastDueNotWorse'].mutable = False\n# action_set['RevolvingUtilizationOfUnsecuredLines'].mutable = False\n# action_set['NumberOfOpenCreditLinesAndLoans'].mutable = False\n# action_set['NumberRealEstateLoansOrLines'].mutable = False\naction_set.align(coefficients=coefficients[dataset])\n\np = .97\nx = X.values[i]\nfb = RecourseBuilder(\n coefficients=coefficients[dataset],\n intercept=intercept[dataset] - (np.log(p / (1. - p))),\n action_set=action_set,\n x=x\n )\ncplex_output = fb.fit()\nif cplex_output['feasible']:\n full_actionset = []\n for feature in range(len(x)):\n orig_val = x[feature]\n changed_val = (x[feature] + cplex_output['actions'][feature])\n if not np.isclose(orig_val, changed_val):\n output = \"& \\\\textit{%s} & \" % X.columns[feature]\n output += \"%f\" % orig_val + \"\\\\longrightarrow & %f \\\\\\\\\" % changed_val\n full_actionset.append(output)\n print(\"\\n\".join(full_actionset))\nelse:\n print(\"infeasible\")\n\n\n# & \\textit{NumberOfTime60 - 89DaysPastDueNotWorse} & 0 & $\\longrightarrow$ & 2 \\\\\n\nfull_stats_unstacked = (\n exp_df[[ 'y_full_score', 'age', 'y_full_cost']]\n .dropna()\n .rename(columns={\n 'y_full_score':'score',\n 'y_full_cost': 'cost'\n })\n .assign(**{\"Training Run\": \"Full dataset\"})\n)\n\nage_stats_unstacked = (\n exp_df[[ 'y_downsampled_score', 'age', 'y_downsampled_cost']]\n .dropna()\n .rename(columns={\n 'y_downsampled_score': 'score',\n 'y_downsampled_cost': 'cost'\n })\n .assign(**{\"Training Run\": \"Age-limited dataset\"})\n)\n\nunstacked_combined_set = (pd.concat([\n age_stats_unstacked,\n full_stats_unstacked\n ])\n # .loc[lambda df: df['score'] < .96]\n .replace([np.inf, -np.inf], np.nan)\n .assign(age_cut=lambda df: pd.cut(df['age'], np.arange(25, 75, 5)))\n .dropna()\n )\n\ncombined_data_df = (pd.concat([\n (exp_df\n .assign(**{\"Training Run\": \"Full Dataset\" })\n [['y_full_cost', 'age', 'Training Run', 'y_true', 'y_full_score']]\n .rename(columns={'y_full_cost': 'total_cost', 'y_full_score': 'y_pred'})\n ),\n (exp_df\n .assign(**{\"Training Run\": \"Age-downsampled\"})\n [['y_downsampled_cost', 'age', 'Training Run', 'y_true', 'y_downsampled_score']]\n .rename(columns={'y_downsampled_cost': 'total_cost', 'y_downsampled_score': 'y_pred'})\n )]).replace([np.inf, -np.inf], np.nan)\n .assign(age_cut=lambda df: pd.cut(df['age'], np.arange(25, 95, 5)))\n .dropna()\n )\n\nmax_total_cost = combined_data_df['total_cost'].max()\nmapper = {\n 0: \"-1\",\n 1: \"+1\",\n 'Age-downsampled': \"Sample Pop.\",\n 'Full Dataset': \"Target Pop.\",\n }\n\nplt.rc(\"font\", size=20)\nfor y_true in [0, 1]:\n\n for training_run in ['Downsampled', 'Full Dataset']:\n\n plt.figure(figsize=(4, 4))\n ax = sns.violinplot(\n x='age_cut', y='total_cost',\n data=(combined_data_df\n .loc[lambda df: df['Training Run'] == training_run]\n .loc[lambda df: df['y_true'] == y_true]\n ),\n linewidth = 0.5, cut=0,\n scale='width', color=\"gold\", inner='quartile'\n )\n\n plt.ylim((0, max_total_cost))\n plt.ylabel(\"Cost of Recourse\")\n plt.xlabel(\"Age\")\n ax.set_ylim((0, 1))\n ax.set_xticks(np.arange(0, 14, 2)- 1,)\n ax.set_xticklabels( np.arange(20, 90, 10))\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n for l in ax.lines:\n l.set_linewidth(2.)\n l.set_linestyle('-')\n l.set_solid_capstyle('butt')\n ax.set_facecolor(\"white\")\n plt.savefig(output_dir / '2018-11-18__age-bins-by-sample-%s-y-%s.png' % (training_run.replace(' ', '-').lower(), y_true), bbox_inches=\"tight\")\n plt.savefig(output_dir / '2018-11-18__age-bins-by-sample-%s-y-%s.pdf' % (training_run.replace(' ', '-').lower(), y_true), bbox_inches=\"tight\")\n plt.close()\n\n\n\n\n\n### extra EDA\n### exploration...\nage_cutoff_exploration = False\nif age_cutoff_exploration:\n # ### Find the right age cutoffs for training\n # test different AUC cutoff ranges\n grid_search_age_cutoff = False\n if grid_search_age_cutoff:\n train_cutoffs = range(30, 50, 5)\n holdout_cutoffs = range(22, 35, 3)\n auc_df = pd.DataFrame(index=train_cutoffs, columns=holdout_cutoffs, )\n for train_cutoff, holdout_cutoff in itertools.product(train_cutoffs, holdout_cutoffs):\n ## split dataset\n # training dataset\n X_train = X.loc[lambda df: df['age'] > train_cutoff]\n y_train = y.loc[X_train.index]\n # holdout dataset\n X_holdout = X.loc[lambda df: df['age'] <= holdout_cutoff]\n y_holdout = y.loc[X_holdout.index]\n\n # fit/predict\n clf = LogisticRegression().fit(X_train, y_train)\n y_pred = clf.predict_proba(X_holdout)[:, 1]\n auc = roc_auc_score(y_holdout, y_pred)\n # cache auc\n auc_df.at[train_cutoff, holdout_cutoff] = auc\n\n plt.figure(figsize=(6, 3))\n sns.heatmap(auc_df.fillna(0).loc[auc_df.index[::-1]], )\n plt.ylabel('Training set > age')\n plt.xlabel('Testing set < age')\n plt.title('AUCs of Models Trained on\\nVarious dataset splits')\n plt.savefig(output_dir / '2018-08-11__aucs-of-age-based-dataset-splits.png')\n\n\n # ### Generate AUC scores for vanilla vs. hardweighted models\n def cross_val_scores_weighted(model, X, y, weights, cv=5, metrics=[sklearn.metrics.roc_auc_score]):\n kf = KFold(n_splits=cv)\n kf.get_n_splits(X)\n training_scores = []\n test_scores = []\n for train_index, test_index in kf.split(X.index):\n model_clone = sklearn.base.clone(model)\n X_train, X_audit_holdout = X.loc[train_index], X.loc[test_index]\n y_train, y_test = y.loc[train_index], y.loc[test_index]\n\n weights_train, weights_test = weights[train_index], weights[test_index]\n model_clone.fit(X_train, y_train, sample_weight=weights_train)\n y_pred = model_clone.predict_proba(X_audit_holdout)[:, 1]\n aucs = []\n\n y_pred_train = model_clone.predict_proba(X_train)[:, 1]\n y_pred_test = model_clone.predict_proba(X_audit_holdout)[:, 1]\n for i, metric in enumerate(metrics):\n test_score = metric(y_test, y_pred_test, sample_weight=np.abs(1 - weights_test))\n test_scores.append(score)\n\n training_score = metric(y_train, y_pred_train, sample_weight=np.abs(1 - weights_train))\n training_scores.append(training_score)\n return training_scores, test_scores\n\n\n weights = X['age'].pipe(lambda s: s > train_cutoff).map({True: .9, False: .1}).values\n logistic_model = LogisticRegression()\n weighted_training_scores, weighted_test_scores = cross_val_scores_weighted(logistic_model, X, y, weights, cv=10)\n\n unweighted_test_scores = []\n unweighted_train_scores = []\n kf = KFold(n_splits=10)\n kf.get_n_splits(X)\n for train_index, test_index in kf.split(X.index):\n X_train, X_audit_holdout = X.loc[train_index], X.loc[test_index]\n y_train, y_test = y.loc[train_index], y.loc[test_index]\n clf.fit(X_train, y_train)\n y_pred = clf.predict_proba(X_audit_holdout)[:, 1]\n score = roc_auc_score(y_test, y_pred)\n unweighted_test_scores.append(score)\n y_pred = clf.predict_proba(X_train)[:, 1]\n score = roc_auc_score(y_train, y_pred)\n unweighted_train_scores.append(score)\n\n hard_weighted_test_scores = []\n hard_weighted_train_scores = []\n kf = KFold(n_splits=10)\n kf.get_n_splits(X)\n X_s = X.loc[lambda df: df['age'] <= 35]\n y_s = y.loc[X_s.index].reset_index(drop=True)\n X_s = X_s.reset_index(drop=True)\n for train_index, test_index in kf.split(X_s.index):\n X_train, X_audit_holdout = X_s.loc[train_index], X_s.loc[test_index]\n y_train, y_test = y_s.loc[train_index], y_s.loc[test_index]\n clf.fit(X_train, y_train)\n y_pred = clf.predict_proba(X_audit_holdout)[:, 1]\n score = roc_auc_score(y_test, y_pred)\n hard_weighted_test_scores.append(score)\n y_pred = clf.predict_proba(X_train)[:, 1]\n score = roc_auc_score(y_train, y_pred)\n hard_weighted_train_scores.append(score)\n\n aucs = []\n for age_split in range(25, 65, 5):\n y_pred = exp_df.loc[lambda df: df['age'] >= age_split]['score']\n y_true = y.loc[y_pred.index]\n auc = roc_auc_score(y_true, y_pred)\n aucs.append(auc)\n pd.Series(aucs, index=range(25, 65, 5)).plot()\n plt.title('AUCs')\n plt.xlabel('AUC on Age > x')\n plt.vlines(train_cutoff, *plt.ylim(), linestyles='dashed')\n plt.text(train_cutoff * .98, .725, 'training\\ncutoff', horizontalalignment='right')\n\n # ### Another look at age-cutoff AUC: Aucs over trained across age\n train_cutoff = 30\n holdout_cutoff = 28\n exp_df = pd.DataFrame(columns=['score', 'age', 'inc_in_train'], index=X.index)\n exp_df.loc[X_train.index, 'inc_in_train'] = True\n exp_df.loc[X_holdout.index, 'inc_in_train'] = False\n X_train = X.loc[lambda df: df['age'] > train_cutoff]\n y_train = y.loc[X_train.index]\n # holdout dataset\n X_holdout = X.loc[lambda df: df['age'] <= holdout_cutoff]\n y_holdout = y.loc[X_holdout.index]\n\n aucs = []\n for age_split in range(25, 65, 5):\n y_pred = exp_df.loc[lambda df: df['age'] >= age_split]['score']\n y_true = y.loc[y_pred.index]\n auc = roc_auc_score(y_true, y_pred)\n aucs.append(auc)\n\n pd.Series(aucs, index=range(25, 65, 5)).plot()\n plt.title('AUCs')\n plt.xlabel('AUC on Age > x')\n plt.vlines(train_cutoff, *plt.ylim(), linestyles='dashed')\n plt.text(train_cutoff * 1.01, .705, 'training\\ncutoff', horizontalalignment='left')\n plt.savefig(output_dir / '2018-08-12__cv-auc-by-age.png', bbox_inches='tight')\n\n\n ## extra\n # ### How does average score differ across age?\n score_and_age = (\n exp_df\n .assign(c=1)\n .groupby(pd.cut(exp_df['score'], 50)).aggregate({'c':'sum', 'age':'mean'})\n .reset_index() # redo index\n .assign(score=lambda df: df['score'].apply(lambda x: x.left))\n .set_index('score')\n )\n\n cmap = plt.get_cmap('RdYlGn')\n ax = score_and_age['c'].plot(kind='bar', color=cmap(score_and_age['age'].pipe(lambda s: (s - s.min())/(s.max() - s.min()))))\n ax.set_xticks(range(0, 50, 10))\n ax.set_xticklabels(['%.01f' % s for s in np.arange(0, 1, .2)])\n ax.semilogy()\n\n plt.rc(\"font\", size=20)\n from mpl_toolkits.axes_grid1 import make_axes_locatable\n\n train_cutoff = 35\n flipset_df = pd.concat([flipset_df, exp_df.loc[flipset_df.index]], axis=1)\n flipset_df = flipset_df.assign(total_cost=lambda df: df['total_cost'].replace([-np.inf, np.inf], np.nan)).dropna()\n\n color_var = 'total_cost'\n x_axis_var = 'age'\n\n flipset_cost_and_age = (\n flipset_df.assign(c=1).pipe(lambda df:\n df.groupby(pd.cut(df[x_axis_var], 50)).aggregate({'c': 'sum', color_var: 'mean'})\n # redo index\n .reset_index()\n .assign(**{x_axis_var: lambda df: df[x_axis_var].apply(lambda x: x.left)})\n .set_index(x_axis_var)\n )\n )\n\n cmap = plt.get_cmap('RdYlGn')\n ax = (flipset_cost_and_age['c']\n .plot(kind='bar', color=cmap(\n flipset_cost_and_age[color_var].pipe(lambda s: (s - s.min()) / (s.max() - s.min()))\n )))\n\n ylim = ax.get_ylim()\n train_cutoff_x = float(np.digitize(train_cutoff, flipset_cost_and_age.index))\n ax.vlines(train_cutoff_x, *ylim, linestyles='dashed')\n ax.set_ylim(ylim)\n ax.text(train_cutoff_x * 1.05, 40, 'Training age\\ncutoff: %d' % train_cutoff)\n\n ax.set_xticks(range(0, 50, 10))\n ax.set_xticklabels(flipset_cost_and_age.index[::10])\n\n # ax.semilogy()\n divider = make_axes_locatable(ax)\n cax = divider.append_axes('right', size='5%', pad=0.15)\n norm = mpl.colors.Normalize(\n vmin=flipset_cost_and_age[color_var].min(),\n vmax=flipset_cost_and_age[color_var].max()\n )\n\n cb = mpl.colorbar.ColorbarBase(cax, cmap=cmap, norm=norm, orientation='vertical')\n cb.set_label('Average Cost of Bucket')\n\n ax.set_title('Average Age Across Cost Range\\nAge-Holdout: %d' % train_cutoff)\n ax.set_ylabel('Number of datapoints')\n\n plt.savefig(output_dir / '2018-08-12__hist-over-ages-and-costs__age-holdout-%d.png' % train_cutoff, bbox_inches=\"tight\")"
] | [
[
"matplotlib.pyplot.text",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.title",
"matplotlib.pyplot.get_cmap",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.close",
"sklearn.model_selection.KFold",
"matplotlib.pyplot.rc",
"sklearn.linear_model.LogisticRegression",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"sklearn.model_selection.train_test_split",
"matplotlib.colorbar.ColorbarBase",
"sklearn.linear_model.LogisticRegressionCV",
"sklearn.metrics.roc_auc_score"
]
] |
dmitrySorokin/raph | [
"57c6c95fe879f929b730255cc724f23f09fecb31"
] | [
"nethack_raph/Actions/FollowGuard.py"
] | [
"import numpy as np\n\nfrom nethack_raph.Actions.base import BaseAction\nfrom nethack_raph.myconstants import COLOR_BG_RED\n\n\nclass FollowGuard(BaseAction):\n def can(self, level):\n targets = np.zeros(level.shape, dtype=bool)\n for xy, m in level.monsters.items():\n if m.guard:\n targets[xy] = True\n\n return targets.any(), targets\n\n def execute(self, path):\n guard, *middle, one = path\n if middle:\n self.log(\"Going towards guard\")\n self.draw_path(path, color=COLOR_BG_RED)\n self.hero.move(middle[-1])\n return\n\n self.kernel().send(' ')\n"
] | [
[
"numpy.zeros"
]
] |
JunhoPark0314/herbarium | [
"a475908dddb042c8beb60aea5f3f4fafa87f527c"
] | [
"herbarium/engine/defaults.py"
] | [
"# -*- coding: utf-8 -*-\n# Copyright (c) Facebook, Inc. and its affiliates.\n\n\"\"\"\nThis file contains components with some default boilerplate logic user may need\nin training / testing. They will not work for everyone, but many users may find them useful.\n\nThe behavior of functions/classes in this file is subject to change,\nsince they are meant to represent the \"common default behavior\" people need in their projects.\n\"\"\"\n\nimport argparse\nimport logging\nimport os\nimport sys\nimport weakref\nfrom collections import OrderedDict\nfrom typing import Optional\nimport torch\nfrom fvcore.nn.precise_bn import get_bn_modules\nfrom omegaconf import OmegaConf\nfrom torch.nn.parallel import DistributedDataParallel\n\nimport herbarium.data.transforms as T\nfrom herbarium.checkpoint import Checkpointer\nfrom herbarium.config import CfgNode, LazyConfig\nfrom herbarium.data import (\n MetadataCatalog,\n build_general_test_loader,\n build_general_train_loader,\n)\nfrom herbarium.evaluation import (\n DatasetEvaluator,\n inference_on_dataset,\n print_csv_format,\n verify_results,\n)\nfrom herbarium.modeling import build_model\nfrom herbarium.solver import build_lr_scheduler, build_optimizer\nfrom herbarium.utils import comm\nfrom herbarium.utils.collect_env import collect_env_info\nfrom herbarium.utils.env import seed_all_rng\nfrom herbarium.utils.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter\nfrom herbarium.utils.file_io import PathManager\nfrom herbarium.utils.logger import setup_logger\n\nfrom . import hooks\nfrom .train_loop import AMPTrainer, SimpleTrainer, TrainerBase\n\n__all__ = [\n \"create_ddp_model\",\n \"default_argument_parser\",\n \"default_setup\",\n \"default_writers\",\n \"DefaultPredictor\",\n \"DefaultTrainer\",\n]\n\n\ndef create_ddp_model(model, *, fp16_compression=False, **kwargs):\n \"\"\"\n Create a DistributedDataParallel model if there are >1 processes.\n\n Args:\n model: a torch.nn.Module\n fp16_compression: add fp16 compression hooks to the ddp object.\n See more at https://pytorch.org/docs/stable/ddp_comm_hooks.html#torch.distributed.algorithms.ddp_comm_hooks.default_hooks.fp16_compress_hook\n kwargs: other arguments of :module:`torch.nn.parallel.DistributedDataParallel`.\n \"\"\" # noqa\n if comm.get_world_size() == 1:\n return model\n if \"device_ids\" not in kwargs:\n kwargs[\"device_ids\"] = [comm.get_local_rank()]\n ddp = DistributedDataParallel(model, **kwargs)\n if fp16_compression:\n from torch.distributed.algorithms.ddp_comm_hooks import default as comm_hooks\n\n ddp.register_comm_hook(state=None, hook=comm_hooks.fp16_compress_hook)\n return ddp\n\n\ndef default_argument_parser(epilog=None):\n \"\"\"\n Create a parser with some common arguments used by herbarium users.\n\n Args:\n epilog (str): epilog passed to ArgumentParser describing the usage.\n\n Returns:\n argparse.ArgumentParser:\n \"\"\"\n parser = argparse.ArgumentParser(\n epilog=epilog\n or f\"\"\"\nExamples:\n\nRun on single machine:\n $ {sys.argv[0]} --num-gpus 8 --config-file cfg.yaml\n\nChange some config options:\n $ {sys.argv[0]} --config-file cfg.yaml MODEL.WEIGHTS /path/to/weight.pth SOLVER.BASE_LR 0.001\n\nRun on multiple machines:\n (machine0)$ {sys.argv[0]} --machine-rank 0 --num-machines 2 --dist-url <URL> [--other-flags]\n (machine1)$ {sys.argv[0]} --machine-rank 1 --num-machines 2 --dist-url <URL> [--other-flags]\n\"\"\",\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n parser.add_argument(\"--config-file\", default=\"\", metavar=\"FILE\", help=\"path to config file\")\n parser.add_argument(\n \"--resume\",\n action=\"store_true\",\n help=\"Whether to attempt to resume from the checkpoint directory. \"\n \"See documentation of `DefaultTrainer.resume_or_load()` for what it means.\",\n )\n parser.add_argument(\"--eval-only\", action=\"store_true\", help=\"perform evaluation only\")\n parser.add_argument(\"--num-gpus\", type=int, default=1, help=\"number of gpus *per machine*\")\n parser.add_argument(\"--num-machines\", type=int, default=1, help=\"total number of machines\")\n parser.add_argument(\n \"--machine-rank\", type=int, default=0, help=\"the rank of this machine (unique per machine)\"\n )\n\n # PyTorch still may leave orphan processes in multi-gpu training.\n # Therefore we use a deterministic way to obtain port,\n # so that users are aware of orphan processes by seeing the port occupied.\n port = 2 ** 15 + 2 ** 14 + hash(os.getuid() if sys.platform != \"win32\" else 1) % 2 ** 14\n parser.add_argument(\n \"--dist-url\",\n default=\"tcp://127.0.0.1:{}\".format(port),\n help=\"initialization URL for pytorch distributed backend. See \"\n \"https://pytorch.org/docs/stable/distributed.html for details.\",\n )\n parser.add_argument(\n \"opts\",\n help=\"Modify config options by adding 'KEY VALUE' pairs at the end of the command. \"\n \"See config references at \"\n \"https://herbarium.readthedocs.io/modules/config.html#config-references\",\n default=None,\n nargs=argparse.REMAINDER,\n )\n return parser\n\n\ndef _try_get_key(cfg, *keys, default=None):\n \"\"\"\n Try select keys from cfg until the first key that exists. Otherwise return default.\n \"\"\"\n if isinstance(cfg, CfgNode):\n cfg = OmegaConf.create(cfg.dump())\n for k in keys:\n parts = k.split(\".\")\n # https://github.com/omry/omegaconf/issues/674\n for p in parts:\n if p not in cfg:\n break\n cfg = OmegaConf.select(cfg, p)\n else:\n return cfg\n return default\n\n\ndef _highlight(code, filename):\n try:\n import pygments\n except ImportError:\n return code\n\n from pygments.lexers import Python3Lexer, YamlLexer\n from pygments.formatters import Terminal256Formatter\n\n lexer = Python3Lexer() if filename.endswith(\".py\") else YamlLexer()\n code = pygments.highlight(code, lexer, Terminal256Formatter(style=\"monokai\"))\n return code\n\n\ndef default_setup(cfg, args):\n \"\"\"\n Perform some basic common setups at the beginning of a job, including:\n\n 1. Set up the herbarium logger\n 2. Log basic information about environment, cmdline arguments, and config\n 3. Backup the config to the output directory\n\n Args:\n cfg (CfgNode or omegaconf.DictConfig): the full config to be used\n args (argparse.NameSpace): the command line arguments to be logged\n \"\"\"\n output_dir = _try_get_key(cfg, \"OUTPUT_DIR\", \"output_dir\", \"train.output_dir\")\n if comm.is_main_process() and output_dir:\n PathManager.mkdirs(output_dir)\n\n rank = comm.get_rank()\n setup_logger(output_dir, distributed_rank=rank, name=\"fvcore\")\n logger = setup_logger(output_dir, distributed_rank=rank)\n\n logger.info(\"Rank of current process: {}. World size: {}\".format(rank, comm.get_world_size()))\n logger.info(\"Environment info:\\n\" + collect_env_info())\n\n logger.info(\"Command line arguments: \" + str(args))\n if hasattr(args, \"config_file\") and args.config_file != \"\":\n logger.info(\n \"Contents of args.config_file={}:\\n{}\".format(\n args.config_file,\n _highlight(PathManager.open(args.config_file, \"r\").read(), args.config_file),\n )\n )\n\n if comm.is_main_process() and output_dir:\n # Note: some of our scripts may expect the existence of\n # config.yaml in output directory\n path = os.path.join(output_dir, \"config.yaml\")\n if isinstance(cfg, CfgNode):\n logger.info(\"Running with full config:\\n{}\".format(_highlight(cfg.dump(), \".yaml\")))\n with PathManager.open(path, \"w\") as f:\n f.write(cfg.dump())\n else:\n LazyConfig.save(cfg, path)\n logger.info(\"Full config saved to {}\".format(path))\n\n # make sure each worker has a different, yet deterministic seed if specified\n seed = _try_get_key(cfg, \"SEED\", \"train.seed\", default=-1)\n seed_all_rng(None if seed < 0 else seed + rank)\n\n # cudnn benchmark has large overhead. It shouldn't be used considering the small size of\n # typical validation set.\n if not (hasattr(args, \"eval_only\") and args.eval_only):\n torch.backends.cudnn.benchmark = _try_get_key(\n cfg, \"CUDNN_BENCHMARK\", \"train.cudnn_benchmark\", default=False\n )\n\n\ndef default_writers(output_dir: str, max_iter: Optional[int] = None):\n \"\"\"\n Build a list of :class:`EventWriter` to be used.\n It now consists of a :class:`CommonMetricPrinter`,\n :class:`TensorboardXWriter` and :class:`JSONWriter`.\n\n Args:\n output_dir: directory to store JSON metrics and tensorboard events\n max_iter: the total number of iterations\n\n Returns:\n list[EventWriter]: a list of :class:`EventWriter` objects.\n \"\"\"\n return [\n # It may not always print what you want to see, since it prints \"common\" metrics only.\n CommonMetricPrinter(max_iter),\n JSONWriter(os.path.join(output_dir, \"metrics.json\")),\n TensorboardXWriter(output_dir),\n ]\n\n\nclass DefaultPredictor:\n \"\"\"\n Create a simple end-to-end predictor with the given config that runs on\n single device for a single input image.\n\n Compared to using the model directly, this class does the following additions:\n\n 1. Load checkpoint from `cfg.MODEL.WEIGHTS`.\n 2. Always take BGR image as the input and apply conversion defined by `cfg.INPUT.FORMAT`.\n 3. Apply resizing defined by `cfg.INPUT.{MIN,MAX}_SIZE_TEST`.\n 4. Take one input image and produce a single output, instead of a batch.\n\n This is meant for simple demo purposes, so it does the above steps automatically.\n This is not meant for benchmarks or running complicated inference logic.\n If you'd like to do anything more fancy, please refer to its source code as examples\n to build and use the model manually.\n\n Attributes:\n metadata (Metadata): the metadata of the underlying dataset, obtained from\n cfg.DATASETS.TEST.\n\n Examples:\n ::\n pred = DefaultPredictor(cfg)\n inputs = cv2.imread(\"input.jpg\")\n outputs = pred(inputs)\n \"\"\"\n\n def __init__(self, cfg):\n self.cfg = cfg.clone() # cfg can be modified by model\n self.model = build_model(self.cfg)\n self.model.eval()\n if len(cfg.DATASETS.TEST):\n self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])\n\n checkpointer = Checkpointer(self.model)\n checkpointer.load(cfg.MODEL.WEIGHTS)\n\n self.aug = T.ResizeShortestEdge(\n [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST\n )\n\n self.input_format = cfg.INPUT.FORMAT\n assert self.input_format in [\"RGB\", \"BGR\"], self.input_format\n\n def __call__(self, original_image):\n \"\"\"\n Args:\n original_image (np.ndarray): an image of shape (H, W, C) (in BGR order).\n\n Returns:\n predictions (dict):\n the output of the model for one image only.\n See :doc:`/tutorials/models` for details about the format.\n \"\"\"\n with torch.no_grad(): # https://github.com/sphinx-doc/sphinx/issues/4258\n # Apply pre-processing to image.\n if self.input_format == \"RGB\":\n # whether the model expects BGR inputs or RGB\n original_image = original_image[:, :, ::-1]\n height, width = original_image.shape[:2]\n image = self.aug.get_transform(original_image).apply_image(original_image)\n image = torch.as_tensor(image.astype(\"float32\").transpose(2, 0, 1))\n\n inputs = {\"image\": image, \"height\": height, \"width\": width}\n predictions = self.model([inputs])[0]\n return predictions\n\n\n\nclass DefaultTrainer(TrainerBase):\n \"\"\"\n A trainer with default training logic. It does the following:\n\n 1. Create a :class:`SimpleTrainer` using model, optimizer, dataloader\n defined by the given config. Create a LR scheduler defined by the config.\n 2. Load the last checkpoint or `cfg.MODEL.WEIGHTS`, if exists, when\n `resume_or_load` is called.\n 3. Register a few common hooks defined by the config.\n\n It is created to simplify the **standard model training workflow** and reduce code boilerplate\n for users who only need the standard training workflow, with standard features.\n It means this class makes *many assumptions* about your training logic that\n may easily become invalid in a new research. In fact, any assumptions beyond those made in the\n :class:`SimpleTrainer` are too much for research.\n\n The code of this class has been annotated about restrictive assumptions it makes.\n When they do not work for you, you're encouraged to:\n\n 1. Overwrite methods of this class, OR:\n 2. Use :class:`SimpleTrainer`, which only does minimal SGD training and\n nothing else. You can then add your own hooks if needed. OR:\n 3. Write your own training loop similar to `tools/plain_train_net.py`.\n\n See the :doc:`/tutorials/training` tutorials for more details.\n\n Note that the behavior of this class, like other functions/classes in\n this file, is not stable, since it is meant to represent the \"common default behavior\".\n It is only guaranteed to work well with the standard models and training workflow in herbarium.\n To obtain more stable behavior, write your own training logic with other public APIs.\n\n Examples:\n ::\n trainer = DefaultTrainer(cfg)\n trainer.resume_or_load() # load last checkpoint or MODEL.WEIGHTS\n trainer.train()\n\n Attributes:\n scheduler:\n checkpointer (Checkpointer):\n cfg (CfgNode):\n \"\"\"\n\n def __init__(self, cfg):\n \"\"\"\n Args:\n cfg (CfgNode):\n \"\"\"\n super().__init__()\n logger = logging.getLogger(\"herbarium\")\n if not logger.isEnabledFor(logging.INFO): # setup_logger is not called for d2\n setup_logger()\n cfg = DefaultTrainer.auto_scale_workers(cfg, comm.get_world_size())\n\n # Assume these objects must be constructed in this order.\n model = self.build_model(cfg)\n optimizer = self.build_optimizer(cfg, model)\n data_loader = self.build_train_loader(cfg)\n\n model = create_ddp_model(model, broadcast_buffers=False, find_unused_parameters=True)\n \n if cfg.SOLVER.AMP.ENABLED:\n trainer = AMPTrainer\n else:\n trainer = SimpleTrainer\n\n \n self._trainer = trainer(\n model, data_loader, optimizer\n )\n\n self.scheduler = self.build_lr_scheduler(cfg, optimizer)\n self.checkpointer = Checkpointer(\n # Assume you want to save checkpoints together with logs/statistics\n model,\n cfg.OUTPUT_DIR,\n trainer=weakref.proxy(self),\n )\n self.start_iter = 0\n self.max_iter = cfg.SOLVER.MAX_ITER\n self.cfg = cfg\n\n self.register_hooks(self.build_hooks())\n\n def resume_or_load(self, resume=True):\n \"\"\"\n If `resume==True` and `cfg.OUTPUT_DIR` contains the last checkpoint (defined by\n a `last_checkpoint` file), resume from the file. Resuming means loading all\n available states (eg. optimizer and scheduler) and update iteration counter\n from the checkpoint. ``cfg.MODEL.WEIGHTS`` will not be used.\n\n Otherwise, this is considered as an independent training. The method will load model\n weights from the file `cfg.MODEL.WEIGHTS` (but will not load other states) and start\n from iteration 0.\n\n Args:\n resume (bool): whether to do resume or not\n \"\"\"\n self.checkpointer.resume_or_load(self.cfg.MODEL.WEIGHTS, resume=resume)\n if resume and self.checkpointer.has_checkpoint():\n # The checkpoint stores the training iteration that just finished, thus we start\n # at the next iteration\n self.start_iter = self.iter + 1\n\n def build_hooks(self):\n \"\"\"\n Build a list of default hooks, including timing, evaluation,\n checkpointing, lr scheduling, precise BN, writing events.\n\n Returns:\n list[HookBase]:\n \"\"\"\n cfg = self.cfg.clone()\n cfg.defrost()\n cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN\n\n ret = [\n hooks.IterationTimer(),\n hooks.LRScheduler(),\n hooks.PreciseBN(\n # Run at the same freq as (but before) evaluation.\n cfg.TEST.EVAL_PERIOD,\n self.model,\n # Build a new data loader to not affect training\n self.build_train_loader(cfg),\n cfg.TEST.PRECISE_BN.NUM_ITER,\n )\n if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model)\n else None,\n ]\n\n # Do PreciseBN before checkpointer, because it updates the model and need to\n # be saved by checkpointer.\n # This is not always the best: if checkpointing has a different frequency,\n # some checkpoints may have more precise statistics than others.\n if comm.is_main_process():\n ret.append(hooks.PeriodicCheckpointer(self.checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD))\n\n def test_and_save_results():\n self._last_eval_results = self.test(self.cfg, self.model)\n return self._last_eval_results\n\n # Do evaluation after checkpointer, because then if it fails,\n # we can use the saved checkpoint to debug.\n ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results))\n\n if comm.is_main_process():\n # Here the default print/log frequency of each writer is used.\n # run writers in the end, so that evaluation metrics are written\n ret.append(hooks.PeriodicWriter(self.build_writers(), period=20))\n return ret\n\n def build_writers(self):\n \"\"\"\n Build a list of writers to be used using :func:`default_writers()`.\n If you'd like a different list of writers, you can overwrite it in\n your trainer.\n\n Returns:\n list[EventWriter]: a list of :class:`EventWriter` objects.\n \"\"\"\n return default_writers(self.cfg.OUTPUT_DIR, self.max_iter)\n\n def train(self):\n \"\"\"\n Run training.\n\n Returns:\n OrderedDict of results, if evaluation is enabled. Otherwise None.\n \"\"\"\n super().train(self.start_iter, self.max_iter)\n if len(self.cfg.TEST.EXPECTED_RESULTS) and comm.is_main_process():\n assert hasattr(\n self, \"_last_eval_results\"\n ), \"No evaluation results obtained during training!\"\n verify_results(self.cfg, self._last_eval_results)\n return self._last_eval_results\n\n def run_step(self):\n self._trainer.iter = self.iter\n self._trainer.run_step()\n\n @classmethod\n def build_model(cls, cfg):\n \"\"\"\n Returns:\n torch.nn.Module:\n\n It now calls :func:`herbarium.modeling.build_model`.\n Overwrite it if you'd like a different model.\n \"\"\"\n model = build_model(cfg)\n logger = logging.getLogger(__name__)\n logger.info(\"Model:\\n{}\".format(model))\n return model\n\n @classmethod\n def build_optimizer(cls, cfg, model):\n \"\"\"\n Returns:\n torch.optim.Optimizer:\n\n It now calls :func:`herbarium.solver.build_optimizer`.\n Overwrite it if you'd like a different optimizer.\n \"\"\"\n return build_optimizer(cfg, model)\n\n @classmethod\n def build_lr_scheduler(cls, cfg, optimizer):\n \"\"\"\n It now calls :func:`herbarium.solver.build_lr_scheduler`.\n Overwrite it if you'd like a different scheduler.\n \"\"\"\n return build_lr_scheduler(cfg, optimizer)\n\n @classmethod\n def build_train_loader(cls, cfg):\n \"\"\"\n Returns:\n iterable\n\n It now calls :func:`herbarium.data.build_detection_train_loader`.\n Overwrite it if you'd like a different data loader.\n \"\"\"\n return build_general_train_loader(cfg)\n\n @classmethod\n def build_test_loader(cls, cfg, dataset_name):\n \"\"\"\n Returns:\n iterable\n\n It now calls :func:`herbarium.data.build_detection_test_loader`.\n Overwrite it if you'd like a different data loader.\n \"\"\"\n return build_general_test_loader(cfg, dataset_name)\n\n @classmethod\n def build_evaluator(cls, cfg, dataset_name):\n \"\"\"\n Returns:\n DatasetEvaluator or None\n\n It is not implemented by default.\n \"\"\"\n raise NotImplementedError(\n \"\"\"\nIf you want DefaultTrainer to automatically run evaluation,\nplease implement `build_evaluator()` in subclasses (see train_net.py for example).\nAlternatively, you can call evaluation functions yourself (see Colab balloon tutorial for example).\n\"\"\"\n )\n\n @classmethod\n def test(cls, cfg, model, evaluators=None):\n \"\"\"\n Args:\n cfg (CfgNode):\n model (nn.Module):\n evaluators (list[DatasetEvaluator] or None): if None, will call\n :meth:`build_evaluator`. Otherwise, must have the same length as\n ``cfg.DATASETS.TEST``.\n\n Returns:\n dict: a dict of result metrics\n \"\"\"\n logger = logging.getLogger(__name__)\n if isinstance(evaluators, DatasetEvaluator):\n evaluators = [evaluators]\n if evaluators is not None:\n assert len(cfg.DATASETS.TEST) == len(evaluators), \"{} != {}\".format(\n len(cfg.DATASETS.TEST), len(evaluators)\n )\n\n results = OrderedDict()\n for idx, dataset_name in enumerate(cfg.DATASETS.TEST):\n data_loader = cls.build_test_loader(cfg, dataset_name)\n # When evaluators are passed in as arguments,\n # implicitly assume that evaluators can be created before data_loader.\n if evaluators is not None:\n evaluator = evaluators[idx]\n else:\n try:\n evaluator = cls.build_evaluator(cfg, dataset_name)\n except NotImplementedError:\n logger.warn(\n \"No evaluator found. Use `DefaultTrainer.test(evaluators=)`, \"\n \"or implement its `build_evaluator` method.\"\n )\n results[dataset_name] = {}\n continue\n results_i = inference_on_dataset(model, data_loader, evaluator)\n results[dataset_name] = results_i\n if comm.is_main_process():\n assert isinstance(\n results_i, dict\n ), \"Evaluator must return a dict on the main process. Got {} instead.\".format(\n results_i\n )\n logger.info(\"Evaluation results for {} in csv format:\".format(dataset_name))\n #print_csv_format(results_i)\n print(results_i)\n\n if len(results) == 1:\n results = list(results.values())[0]\n return results\n\n @staticmethod\n def auto_scale_workers(cfg, num_workers: int):\n \"\"\"\n When the config is defined for certain number of workers (according to\n ``cfg.SOLVER.REFERENCE_WORLD_SIZE``) that's different from the number of\n workers currently in use, returns a new cfg where the total batch size\n is scaled so that the per-GPU batch size stays the same as the\n original ``IMS_PER_BATCH // REFERENCE_WORLD_SIZE``.\n\n Other config options are also scaled accordingly:\n * training steps and warmup steps are scaled inverse proportionally.\n * learning rate are scaled proportionally, following :paper:`ImageNet in 1h`.\n\n For example, with the original config like the following:\n\n .. code-block:: yaml\n\n IMS_PER_BATCH: 16\n BASE_LR: 0.1\n REFERENCE_WORLD_SIZE: 8\n MAX_ITER: 5000\n STEPS: (4000,)\n CHECKPOINT_PERIOD: 1000\n\n When this config is used on 16 GPUs instead of the reference number 8,\n calling this method will return a new config with:\n\n .. code-block:: yaml\n\n IMS_PER_BATCH: 32\n BASE_LR: 0.2\n REFERENCE_WORLD_SIZE: 16\n MAX_ITER: 2500\n STEPS: (2000,)\n CHECKPOINT_PERIOD: 500\n\n Note that both the original config and this new config can be trained on 16 GPUs.\n It's up to user whether to enable this feature (by setting ``REFERENCE_WORLD_SIZE``).\n\n Returns:\n CfgNode: a new config. Same as original if ``cfg.SOLVER.REFERENCE_WORLD_SIZE==0``.\n \"\"\"\n old_world_size = cfg.SOLVER.REFERENCE_WORLD_SIZE\n if old_world_size == 0 or old_world_size == num_workers:\n return cfg\n cfg = cfg.clone()\n frozen = cfg.is_frozen()\n cfg.defrost()\n\n assert (\n cfg.SOLVER.IMS_PER_BATCH % old_world_size == 0\n ), \"Invalid REFERENCE_WORLD_SIZE in config!\"\n scale = num_workers / old_world_size\n bs = cfg.SOLVER.IMS_PER_BATCH = int(round(cfg.SOLVER.IMS_PER_BATCH * scale))\n lr = cfg.SOLVER.BASE_LR = cfg.SOLVER.BASE_LR * scale\n max_iter = cfg.SOLVER.MAX_ITER = int(round(cfg.SOLVER.MAX_ITER / scale))\n warmup_iter = cfg.SOLVER.WARMUP_ITERS = int(round(cfg.SOLVER.WARMUP_ITERS / scale))\n cfg.SOLVER.STEPS = tuple(int(round(s / scale)) for s in cfg.SOLVER.STEPS)\n cfg.TEST.EVAL_PERIOD = int(round(cfg.TEST.EVAL_PERIOD / scale))\n cfg.SOLVER.CHECKPOINT_PERIOD = int(round(cfg.SOLVER.CHECKPOINT_PERIOD / scale))\n cfg.SOLVER.REFERENCE_WORLD_SIZE = num_workers # maintain invariant\n logger = logging.getLogger(__name__)\n logger.info(\n f\"Auto-scaling the config to batch_size={bs}, learning_rate={lr}, \"\n f\"max_iter={max_iter}, warmup={warmup_iter}.\"\n )\n\n if frozen:\n cfg.freeze()\n return cfg\n\n\n# Access basic attributes from the underlying trainer\nfor _attr in [\"model\", \"data_loader\", \"optimizer\"]:\n setattr(\n DefaultTrainer,\n _attr,\n property(\n # getter\n lambda self, x=_attr: getattr(self._trainer, x),\n # setter\n lambda self, value, x=_attr: setattr(self._trainer, x, value),\n ),\n )"
] | [
[
"torch.no_grad",
"torch.nn.parallel.DistributedDataParallel"
]
] |
justinrgarrard/USEduData | [
"cafbc9a416264a7aa6a37876d058525a07ea5903"
] | [
"tests/tests.py"
] | [
"\"\"\"\nA testing script that evaluates each pipeline.\n\nShould only be run AFTER the main script has executed and\noutput files have been generated.\n\"\"\"\n\nimport os\nimport time\nimport shutil\nimport numpy as np\nimport pandas as pd\nimport unittest\nimport zipfile\nfrom pathlib import Path\n\n# Set shared input/output directories\nPARENT_DIR = os.path.dirname(os.path.realpath(__file__))\nPROJECT_DIR = os.path.join(PARENT_DIR, '..')\n\nINPUT_DIR = os.path.join(PROJECT_DIR, 'data/raw')\nOUTPUT_DIR = os.path.join(PROJECT_DIR, 'data/processed')\nSANITY_DIR = os.path.join(PROJECT_DIR, 'data/interim')\n\n\nclass FinanceDistrictPipelineTests(unittest.TestCase):\n def test_csv_to_standard(self):\n # Load in data\n output_filename = 'finance_districts.csv'\n output_data = pd.read_csv(os.path.join(OUTPUT_DIR, output_filename))\n\n # Test one entry to ensure that the transformed value matches\n ## Uses a hardcoded value from the source spreadsheet for input\n input_val = 281989\n\n output_row = output_data[output_data['YRDATA'] == 2016]\n output_row = output_row[output_row['STATE'] == 'IDAHO']\n output_row = output_row[output_row['NAME'] == 'MERIDIAN SCHOOL DISTRICT 2']\n output_val = output_row['TOTALREV'].iloc[0]\n\n assert (input_val == output_val)\n\n\nclass FinanceStatePipelineTests(unittest.TestCase):\n def test_district_to_state(self):\n # Load in data\n input_filename = 'finance_districts.csv'\n input_data = pd.read_csv(os.path.join(OUTPUT_DIR, input_filename))\n output_filename = 'finance_states.csv'\n output_data = pd.read_csv(os.path.join(OUTPUT_DIR, output_filename))\n\n # Test one entry to ensure that the transformed value matches\n input_row = input_data[input_data['YRDATA'] == 2016]\n input_row = input_row[input_row['STATE'] == 'IDAHO']\n input_val = input_row['TOTALREV'].sum()\n\n output_row = output_data[output_data['PRIMARY_KEY'] == '2016_IDAHO']\n output_val = output_row['TOTAL_REVENUE'].iloc[0]\n\n assert (input_val == output_val)\n\n def test_standard_to_all(self):\n # Load in data\n input_filename = 'finance_states.csv'\n input_data = pd.read_csv(os.path.join(OUTPUT_DIR, input_filename))\n output_filename = 'states_all.csv'\n output_data = pd.read_csv(os.path.join(OUTPUT_DIR, output_filename))\n\n # Test one entry to ensure that the transformed value matches\n input_row = input_data[input_data['PRIMARY_KEY'] == '2016_IDAHO']\n input_val = input_row['TOTAL_REVENUE'].iloc[0]\n\n output_row = output_data[output_data['PRIMARY_KEY'] == '2016_IDAHO']\n output_val = output_row['TOTAL_REVENUE'].iloc[0]\n\n assert (input_val == output_val)\n\n\nclass EnrollDistrictPipelineTests(unittest.TestCase):\n def test_csv_to_raw(self):\n # Load in data\n output_filename = 'enroll_districts_raw.csv'\n output_data = pd.read_csv(os.path.join(OUTPUT_DIR, output_filename))\n\n # Test one entry to ensure that the transformed value matches\n ## Uses a hardcoded value from the source spreadsheet for input\n input_val = 15585\n\n output_row = output_data[output_data['State Name'] == 'IDAHO']\n output_row = output_row[output_row['Agency Name'] == 'NAMPA SCHOOL DISTRICT']\n output_val = output_row['2017_A_A_A'].iloc[0]\n\n assert (input_val == output_val)\n\n def test_raw_to_standard(self):\n # Load in data\n input_filename = 'enroll_districts_raw.csv'\n input_data = pd.read_csv(os.path.join(OUTPUT_DIR, input_filename))\n output_filename = 'enroll_districts.csv'\n output_data = pd.read_csv(os.path.join(OUTPUT_DIR, output_filename))\n\n # Test one entry to ensure that the transformed value matches\n input_row = input_data[input_data['State Name'] == 'IDAHO']\n input_row = input_row[input_row['Agency Name'] == 'NAMPA SCHOOL DISTRICT']\n input_val = input_row['2017_A_A_A'].iloc[0]\n\n output_row = output_data[output_data['PRIMARY_KEY'] == '2017_NAMPA SCHOOL DISTRICT']\n output_val = output_row['A_A_A'].iloc[0]\n\n assert (input_val == output_val)\n\n\nclass EnrollStatePipelineTests(unittest.TestCase):\n def test_csv_to_raw(self):\n # Load in data\n output_filename = 'enroll_states_raw.csv'\n output_data = pd.read_csv(os.path.join(OUTPUT_DIR, output_filename))\n\n # Test one entry to ensure that the transformed value matches\n ## Uses a hardcoded value from the source spreadsheet for input\n input_val = 301186\n\n output_row = output_data[output_data['State Name'] == 'IDAHO']\n output_val = output_row['2017_A_A_A'].iloc[0]\n\n assert (input_val == output_val)\n\n def test_raw_to_standard(self):\n # Load in data\n input_filename = 'enroll_states_raw.csv'\n input_data = pd.read_csv(os.path.join(OUTPUT_DIR, input_filename))\n output_filename = 'enroll_states.csv'\n output_data = pd.read_csv(os.path.join(OUTPUT_DIR, output_filename))\n\n # Test one entry to ensure that the transformed value matches\n input_row = input_data[input_data['State Name'] == 'IDAHO']\n input_val = input_row['2017_A_A_A'].iloc[0]\n\n output_row = output_data[output_data['PRIMARY_KEY'] == '2017_IDAHO']\n output_val = output_row['A_A_A'].iloc[0]\n\n assert (input_val == output_val)\n\n def test_standard_to_summary(self):\n # Load in data\n input_filename = 'enroll_states.csv'\n input_data = pd.read_csv(os.path.join(OUTPUT_DIR, input_filename))\n output_filename = 'enroll_states_summary.csv'\n output_data = pd.read_csv(os.path.join(OUTPUT_DIR, output_filename))\n\n # Test one entry to ensure that the transformed value matches\n input_row = input_data[input_data['PRIMARY_KEY'] == '2017_IDAHO']\n input_val = input_row['A_A_A'].iloc[0]\n\n output_row = output_data[output_data['PRIMARY_KEY'] == '2017_IDAHO']\n output_val = output_row['GRADES_ALL_G'].iloc[0]\n\n assert (input_val == output_val)\n\n def test_standard_to_all(self):\n # Load in data\n input_filename = 'enroll_states.csv'\n input_data = pd.read_csv(os.path.join(OUTPUT_DIR, input_filename))\n output_filename = 'states_all.csv'\n output_data = pd.read_csv(os.path.join(OUTPUT_DIR, output_filename))\n\n # Test one entry to ensure that the transformed value matches\n input_row = input_data[input_data['PRIMARY_KEY'] == '2017_IDAHO']\n input_val = input_row['A_A_A'].iloc[0]\n\n output_row = output_data[output_data['PRIMARY_KEY'] == '2017_IDAHO']\n output_val = output_row['GRADES_ALL_G'].iloc[0]\n\n assert (input_val == output_val)\n\n\nclass AchievementStatePipelineTests(unittest.TestCase):\n def test_csv_to_raw(self):\n # Load in data\n output_filename = 'naep_states_raw.csv'\n output_data = pd.read_csv(os.path.join(OUTPUT_DIR, output_filename))\n\n # Test various entries to ensure that the transformed value matches\n ## Uses hardcoded values from the source spreadsheet for input\n input_vals_hardcoded = {'2019': 223,\n '2017': 223,\n '2015': 222,\n '2013': 219,\n '2011': 221,\n '2009': 221,\n '2007': 223,\n '2005': 222,\n '2003': 218,\n '2002': 220,\n '2000': np.nan,\n '1998': np.nan,\n '1994': np.nan,\n '1992': 219\n }\n\n for year, input_val in input_vals_hardcoded.items():\n try:\n output_row = output_data[output_data['YEAR'] == int(year)]\n output_row = output_row[output_row['STATE'] == 'IDAHO']\n output_row = output_row[output_row['DEMO'] == 'G04_A_A']\n output_row = output_row[output_row['TEST_SUBJECT'] == 'Reading']\n output_val = output_row['AVG_SCORE'].iloc[0]\n if np.isnan(input_val):\n assert (np.isnan(output_val))\n else:\n assert (input_val == output_val)\n except:\n print(f'{year}:{input_val}')\n raise Exception\n\n def test_raw_to_standard(self):\n # Load in data\n input_filename = 'naep_states_raw.csv'\n input_data = pd.read_csv(os.path.join(OUTPUT_DIR, input_filename))\n output_filename = 'naep_states.csv'\n output_data = pd.read_csv(os.path.join(OUTPUT_DIR, output_filename))\n\n # Test various entries to ensure that the transformed value matches\n input_val_years = ['2019', '2017', '2015', '2013', '2011', '2009', '2007',\n '2005', '2003', '2002', '2000', '1998', '1994', '1992']\n for year in input_val_years:\n try:\n input_row = input_data[input_data['YEAR'] == int(year)]\n input_row = input_row[input_row['STATE'] == 'IDAHO']\n input_row = input_row[input_row['DEMO'] == 'G04_A_A']\n input_row = input_row[input_row['TEST_SUBJECT'] == 'Reading']\n input_val = input_row['AVG_SCORE'].iloc[0]\n\n output_row = output_data[output_data['PRIMARY_KEY'] == '2019_IDAHO']\n output_val = output_row['G04_A_A_READING'].iloc[0]\n\n if np.isnan(input_val):\n assert (np.isnan(output_val))\n else:\n assert (input_val == output_val)\n except:\n print(f'{year}')\n raise Exception\n\n def test_standard_to_summary(self):\n # Load in data\n input_filename = 'naep_states.csv'\n input_data = pd.read_csv(os.path.join(OUTPUT_DIR, input_filename))\n output_filename = 'naep_states_summary.csv'\n output_data = pd.read_csv(os.path.join(OUTPUT_DIR, output_filename))\n\n # Test various entries to ensure that the transformed value matches\n input_val_years = ['2019', '2017', '2015', '2013', '2011', '2009', '2007',\n '2005', '2003', '2002', '2000', '1998', '1994', '1992']\n for year in input_val_years:\n try:\n input_row = input_data[input_data['PRIMARY_KEY'] == f'{year}_IDAHO']\n input_val = input_row['G04_A_A_READING'].iloc[0]\n\n output_row = output_data[output_data['PRIMARY_KEY'] == f'{year}_IDAHO']\n output_val = output_row['AVG_READING_4_SCORE'].iloc[0]\n\n if np.isnan(input_val):\n assert (np.isnan(output_val))\n else:\n assert (input_val == output_val)\n\n except:\n print(f'{year}')\n raise Exception\n\n def test_standard_to_all(self):\n # Load in data\n input_filename = 'naep_states.csv'\n input_data = pd.read_csv(os.path.join(OUTPUT_DIR, input_filename))\n output_filename = 'states_all_extended.csv'\n output_data = pd.read_csv(os.path.join(OUTPUT_DIR, output_filename))\n\n # Test various entries to ensure that the transformed value matches\n input_val_years = ['2019', '2017', '2015', '2013', '2011', '2009', '2007',\n '2005', '2003', '2002', '2000', '1998', '1994', '1992']\n for year in input_val_years:\n try:\n input_row = input_data[input_data['PRIMARY_KEY'] == f'{year}_IDAHO']\n input_val = input_row['G04_A_A_READING'].iloc[0]\n\n output_row = output_data[output_data['PRIMARY_KEY'] == f'{year}_IDAHO']\n output_val = output_row['G04_A_A_READING'].iloc[0]\n\n if np.isnan(input_val):\n assert (np.isnan(output_val))\n else:\n assert (input_val == output_val)\n\n except:\n print(f'{year}')\n raise Exception\n\n\nif __name__ == '__main__':\n unittest.main(warnings='ignore')\n"
] | [
[
"numpy.isnan"
]
] |
tntek/PSAT-GDA | [
"89647ee66692da02359be7ca240b96b5cdbab19f"
] | [
"network.py"
] | [
"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torchvision\nfrom torchvision import models\nfrom torch.autograd import Variable\nimport math\nimport torch.nn.utils.weight_norm as weightNorm\nfrom collections import OrderedDict\nfrom models.modeling import VisionTransformer_shot as ViT_seg\nfrom models.modeling import CONFIGS as CONFIGS_ViT_seg\n\n \ndef calc_coeff(iter_num, high=1.0, low=0.0, alpha=10.0, max_iter=10000.0):\n return np.float(2.0 * (high - low) / (1.0 + np.exp(-alpha*iter_num / max_iter)) - (high - low) + low)\n\ndef init_weights(m):\n classname = m.__class__.__name__\n if classname.find('Conv2d') != -1 or classname.find('ConvTranspose2d') != -1:\n nn.init.kaiming_uniform_(m.weight)\n nn.init.zeros_(m.bias)\n elif classname.find('BatchNorm') != -1:\n nn.init.normal_(m.weight, 1.0, 0.02)\n nn.init.zeros_(m.bias)\n elif classname.find('Linear') != -1:\n nn.init.xavier_normal_(m.weight)\n nn.init.zeros_(m.bias)\n\nvgg_dict = {\"vgg11\":models.vgg11, \"vgg13\":models.vgg13, \"vgg16\":models.vgg16, \"vgg19\":models.vgg19, \n\"vgg11bn\":models.vgg11_bn, \"vgg13bn\":models.vgg13_bn, \"vgg16bn\":models.vgg16_bn, \"vgg19bn\":models.vgg19_bn} \nclass VGGBase(nn.Module):\n def __init__(self, vgg_name):\n super(VGGBase, self).__init__()\n model_vgg = vgg_dict[vgg_name](pretrained=True)\n self.features = model_vgg.features\n self.classifier = nn.Sequential()\n for i in range(6):\n self.classifier.add_module(\"classifier\"+str(i), model_vgg.classifier[i])\n self.in_features = model_vgg.classifier[6].in_features\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), -1)\n x = self.classifier(x)\n return x\n\nres_dict = {\"resnet18\":models.resnet18, \"resnet34\":models.resnet34, \"resnet50\":models.resnet50, \n\"resnet101\":models.resnet101, \"resnet152\":models.resnet152, \"resnext50\":models.resnext50_32x4d, \"resnext101\":models.resnext101_32x8d}\n\nclass ViT(nn.Module):\n def __init__(self):\n super(ViT, self).__init__()\n config_vit = CONFIGS_ViT_seg['R50-ViT-B_16']\n config_vit.n_classes = 100\n config_vit.n_skip = 3\n config_vit.patches.grid = (int(224 / 16), int(224 / 16))\n self.feature_extractor = ViT_seg(config_vit, img_size=[224, 224], zero_head=True, num_classes=config_vit.n_classes)\n self.feature_extractor.load_from(weights=np.load(config_vit.pretrained_path))\n self.in_features = 768\n\n def forward(self, x):\n feat = self.feature_extractor(x)\n return feat\n\n\nclass ViT_tda(nn.Module):\n def __init__(self):\n super(ViT_tda, self).__init__()\n config_vit = CONFIGS_ViT_tda['R50-ViT-B_16']\n config_vit.n_classes = 100\n config_vit.n_skip = 3\n config_vit.patches.grid = (int(224 / 16), int(224 / 16))\n self.feature_extractor = ViT_seg_tda(config_vit, img_size=[224, 224], num_classes=config_vit.n_classes)\n self.feature_extractor.load_from(weights=np.load(config_vit.pretrained_path))\n self.in_features = 2048\n\n def forward(self, x):\n _, feat = self.feature_extractor(x)\n return feat\n\n\nclass ResBase(nn.Module):\n def __init__(self, res_name,se=False, nl=False):\n super(ResBase, self).__init__()\n model_resnet = res_dict[res_name](pretrained=True)\n self.conv1 = model_resnet.conv1\n self.bn1 = model_resnet.bn1\n self.relu = model_resnet.relu\n self.maxpool = model_resnet.maxpool\n self.layer1 = model_resnet.layer1\n self.layer2 = model_resnet.layer2\n self.layer3 = model_resnet.layer3\n self.layer4 = model_resnet.layer4\n self.avgpool = model_resnet.avgpool\n self.in_features = model_resnet.fc.in_features\n self.se=se\n self.nl=nl\n if self.se:\n self.SELayer=SELayer(self.in_features)\n if self.nl:\n self.nlLayer=NONLocalBlock2D(self.in_features)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n if self.se:\n x=self.SELayer(x)\n if self.nl:\n x=self.nlLayer(x)\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n\n return x\n\nclass feat_bootleneck(nn.Module):\n def __init__(self, feature_dim, bottleneck_dim=256, type=\"ori\"):\n super(feat_bootleneck, self).__init__()\n self.bn = nn.BatchNorm1d(bottleneck_dim, affine=True)\n self.relu = nn.ReLU(inplace=True)\n self.dropout = nn.Dropout(p=0.5)\n self.bottleneck = nn.Linear(feature_dim, bottleneck_dim)\n self.bottleneck.apply(init_weights)\n self.type = type\n\n def forward(self, x):\n x = self.bottleneck(x)\n if self.type == \"bn\":\n x = self.bn(x)\n return x\n\nclass feat_classifier(nn.Module):\n def __init__(self, class_num, bottleneck_dim=256, type=\"linear\"):\n super(feat_classifier, self).__init__()\n self.type = type\n if type == 'wn':\n self.fc = weightNorm(nn.Linear(bottleneck_dim, class_num), name=\"weight\")\n self.fc.apply(init_weights)\n else:\n self.fc = nn.Linear(bottleneck_dim, class_num)\n self.fc.apply(init_weights)\n\n def forward(self, x):\n x = self.fc(x)\n return x\n\nclass feat_classifier_two(nn.Module):\n def __init__(self, class_num, input_dim, bottleneck_dim=256):\n super(feat_classifier_two, self).__init__()\n self.type = type\n self.fc0 = nn.Linear(input_dim, bottleneck_dim)\n self.fc0.apply(init_weights)\n self.fc1 = nn.Linear(bottleneck_dim, class_num)\n self.fc1.apply(init_weights)\n\n def forward(self, x):\n x = self.fc0(x)\n x = self.fc1(x)\n return x\n\nclass Res50(nn.Module):\n def __init__(self):\n super(Res50, self).__init__()\n model_resnet = models.resnet50(pretrained=True)\n self.conv1 = model_resnet.conv1\n self.bn1 = model_resnet.bn1\n self.relu = model_resnet.relu\n self.maxpool = model_resnet.maxpool\n self.layer1 = model_resnet.layer1\n self.layer2 = model_resnet.layer2\n self.layer3 = model_resnet.layer3\n self.layer4 = model_resnet.layer4\n self.avgpool = model_resnet.avgpool\n self.in_features = model_resnet.fc.in_features\n self.fc = model_resnet.fc\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n y = self.fc(x)\n return x, y\nclass SELayer(nn.Module):\n def __init__(self, channel, reduction=16):\n super(SELayer, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Sequential(\n nn.Linear(channel, channel // reduction, bias=False),\n nn.ReLU(inplace=True),\n nn.Linear(channel // reduction, channel, bias=False),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n b, c, _, _ = x.size()\n y = self.avg_pool(x).view(b, c)\n y = self.fc(y).view(b, c, 1, 1)\n return x * y.expand_as(x)"
] | [
[
"torch.nn.Linear",
"torch.nn.init.kaiming_uniform_",
"torch.nn.Dropout",
"torch.nn.Sigmoid",
"torch.nn.Sequential",
"numpy.load",
"numpy.exp",
"torch.nn.ReLU",
"torch.nn.init.normal_",
"torch.nn.BatchNorm1d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.init.zeros_",
"torch.nn.init.xavier_normal_"
]
] |
lmmx/DALL-E | [
"76b36ddc5372444448dd93d1e29f434831d5e17c"
] | [
"dall_e/__init__.py"
] | [
"import io, requests\nimport torch\nimport torch.nn as nn\n\nfrom dall_e.encoder import Encoder\nfrom dall_e.decoder import Decoder\nfrom dall_e.utils import map_pixels, unmap_pixels\nfrom dall_e._model_dl import models_path # Set up models directory\nfrom dall_e import models # Load implicit namespace module\n\ndef load_model(path: str, device: torch.device = None) -> nn.Module:\n if path.startswith('http://') or path.startswith('https://'):\n resp = requests.get(path)\n resp.raise_for_status()\n \n with io.BytesIO(resp.content) as buf:\n return torch.load(buf, map_location=device)\n else:\n with open(path, 'rb') as f:\n return torch.load(f, map_location=device)\n"
] | [
[
"torch.load"
]
] |
xf4j/aapm_thoraric_challenge | [
"eb242ab5fb573152dd7490d06066e73182289406"
] | [
"main.py"
] | [
"from __future__ import division\nimport os\nimport numpy as np\nimport pickle\nimport pprint\nimport tensorflow as tf\n\nfrom utils import *\nfrom constants import *\nfrom model import UNet3D\n\nflags = tf.app.flags\nflags.DEFINE_integer(\"epoch\", 200, \"Epoch to train [200]\")\nflags.DEFINE_string(\"train_data_dir\", \"data_training\", \"Directory name of the data [data_training]\")\nflags.DEFINE_string(\"test_data_dir\", \"data_testing\", \"Directory name of the test data [data_testing]\")\nflags.DEFINE_string(\"output_dir\", \"data_output\", \"Directory name of the output data [data_output]\")\nflags.DEFINE_integer(\"step1_features_root\", 24, \"Number of features in the first filter in step 1 [24]\")\nflags.DEFINE_integer(\"step2_features_root\", 48, \"Number of features in the first filter [48]\")\nflags.DEFINE_integer(\"conv_size\", 3, \"Convolution kernel size in encoding and decoding paths [3]\")\nflags.DEFINE_integer(\"layers\", 3, \"Encoding and deconding layers [3]\")\nflags.DEFINE_string(\"loss_type\", \"cross_entropy\", \"Loss type in the model [cross_entropy]\")\nflags.DEFINE_float(\"dropout_ratio\", 0.5, \"Drop out ratio [0.5]\")\nflags.DEFINE_string(\"checkpoint_dir\", \"checkpoint\", \"Directory name to save the checkpoints [checkpoint]\")\nflags.DEFINE_string(\"log_dir\", \"logs\", \"Directory name to save logs [logs]\")\nflags.DEFINE_boolean(\"train\", False, \"True for training, False for testing [True]\")\nFLAGS = flags.FLAGS\n\ndef main(_):\n pp = pprint.PrettyPrinter()\n pp.pprint(flags.FLAGS.__flags)\n \n if FLAGS.test_data_dir == FLAGS.train_data_dir:\n testing_gt_available = True\n if os.path.exists(os.path.join(FLAGS.train_data_dir, 'files.log')):\n with open(os.path.join(FLAGS.train_data_dir, 'files.log'), 'r') as f:\n training_paths, testing_paths = pickle.load(f)\n else:\n # Phase 0\n all_subjects = [os.path.join(FLAGS.train_data_dir, name) for name in os.listdir(FLAGS.train_data_dir)]\n n_training = int(np.rint(len(all_subjects) * 2 / 3))\n training_paths = all_subjects[:n_training]\n testing_paths = all_subjects[n_training:]\n # Save the training paths and testing paths\n with open(os.path.join(FLAGS.train_data_dir, 'files.log'), 'w') as f:\n pickle.dump([training_paths, testing_paths], f)\n else:\n testing_gt_available = False\n training_paths = [os.path.join(FLAGS.train_data_dir, name)\n for name in os.listdir(FLAGS.train_data_dir) if '.hdf5' in name]\n testing_paths = [os.path.join(FLAGS.test_data_dir, name)\n for name in os.listdir(FLAGS.test_data_dir) if '.hdf5' in name]\n \n if not os.path.exists(FLAGS.checkpoint_dir):\n os.makedirs(FLAGS.checkpoint_dir)\n \n if not os.path.exists(FLAGS.log_dir):\n os.makedirs(FLAGS.log_dir)\n \n run_config = tf.ConfigProto()\n with tf.Session(config=run_config) as sess:\n unet_all = UNet3D(sess, checkpoint_dir=FLAGS.checkpoint_dir, log_dir=FLAGS.log_dir, training_paths=training_paths,\n testing_paths=testing_paths, nclass=N_CLASSES + 1, layers=FLAGS.layers,\n features_root=FLAGS.step1_features_root, conv_size=FLAGS.conv_size, dropout=FLAGS.dropout_ratio,\n loss_type=FLAGS.loss_type, roi=(-1, 'All'), im_size=ALL_IM_SIZE,\n testing_gt_available=testing_gt_available, class_weights=(1.0, 2.0, 1.0, 1.0, 1.0, 3.0))\n if FLAGS.train:\n train_config = {}\n train_config['epoch'] = FLAGS.epoch\n unet_all.train(train_config)\n else:\n if not os.path.exists(FLAGS.output_dir):\n os.makedirs(FLAGS.output_dir)\n \n unet_all.test(testing_paths, FLAGS.output_dir)\n\n tf.reset_default_graph()\n \n # Second step training\n rois = ['SpinalCord', 'Lung_R', 'Lung_L', 'Heart', 'Esophagus']\n im_sizes = [(160, 128, 64), (72, 192, 120), (72, 192, 120), (32, 160, 192), (80, 80, 64)]\n weights = [(1.0, 2.0), (1.0, 1.0), (1.0, 1.0), (1.0, 1.0), (1.0, 3.0)]\n \n for roi in range(5):\n run_config = tf.ConfigProto()\n # Build model\n with tf.Session(config=run_config) as sess:\n unet = UNet3D(sess, checkpoint_dir=FLAGS.checkpoint_dir, log_dir=FLAGS.log_dir, training_paths=training_paths,\n testing_paths=testing_paths, nclass=2, layers=FLAGS.layers, features_root=FLAGS.step2_features_root,\n conv_size=FLAGS.conv_size, dropout=FLAGS.dropout_ratio, loss_type=FLAGS.loss_type,\n roi=(roi, rois[roi]), im_size=im_sizes[roi], testing_gt_available=testing_gt_available,\n class_weights=weights[roi])\n \n if FLAGS.train:\n train_config = {}\n train_config['epoch'] = FLAGS.epoch\n unet.train(train_config)\n else:\n if not os.path.exists(FLAGS.output_dir):\n os.makedirs(FLAGS.output_dir)\n \n # Get result for single ROI\n unet.test(testing_paths, FLAGS.output_dir)\n \n tf.reset_default_graph()\n \n \nif __name__ == '__main__':\n tf.app.run()\n"
] | [
[
"tensorflow.ConfigProto",
"tensorflow.app.run",
"tensorflow.reset_default_graph",
"tensorflow.Session"
]
] |
riven314/ENetDepth_TimeAnlysis_Tmp | [
"29bd864adf91700799d87b449d0c4e389f7028bc"
] | [
"models/enet.py"
] | [
"import torch.nn as nn\nimport torch\n\n\nclass InitialBlock(nn.Module):\n\t\"\"\"The initial block is composed of two branches:\n\t1. a main branch which performs a regular convolution with stride 2;\n\t2. an extension branch which performs max-pooling.\n\n\tDoing both operations in parallel and concatenating their results\n\tallows for efficient downsampling and expansion. The main branch\n\toutputs 13 feature maps while the extension branch outputs 3, for a\n\ttotal of 16 feature maps after concatenation.\n\n\tKeyword arguments:\n\t- in_channels (int): the number of input channels.\n\t- out_channels (int): the number output channels.\n\t- kernel_size (int, optional): the kernel size of the filters used in\n\tthe convolution layer. Default: 3.\n\t- padding (int, optional): zero-padding added to both sides of the\n\tinput. Default: 0.\n\t- bias (bool, optional): Adds a learnable bias to the output if\n\t``True``. Default: False.\n\t- relu (bool, optional): When ``True`` ReLU is used as the activation\n\tfunction; otherwise, PReLU is used. Default: True.\n\n\t\"\"\"\n\n\tdef __init__(self,\n\t\t\t\t in_channels,\n\t\t\t\t out_channels,\n\t\t\t\t kernel_size=3,\n\t\t\t\t padding=0,\n\t\t\t\t bias=False,\n\t\t\t\t relu=True):\n\t\tsuper().__init__()\n\n\t\tif relu:\n\t\t\tactivation = nn.ReLU()\n\t\telse:\n\t\t\tactivation = nn.PReLU()\n\n\t\t# Main branch - As stated above the number of output channels for this\n\t\t# branch is the total minus 3, since the remaining channels come from\n\t\t# the extension branch\n\t\tself.main_branch = nn.Conv2d(\n\t\t\tin_channels,\n\t\t\tout_channels - 3,\n\t\t\tkernel_size=kernel_size,\n\t\t\tstride=2,\n\t\t\tpadding=padding,\n\t\t\tbias=bias)\n\n\t\t# Extension branch\n\t\tself.ext_branch = nn.MaxPool2d(kernel_size, stride=2, padding=padding)\n\n\t\t# Initialize batch normalization to be used after concatenation\n\t\tself.batch_norm = nn.BatchNorm2d(out_channels)\n\n\t\t# PReLU layer to apply after concatenating the branches\n\t\tself.out_prelu = activation\n\n\tdef forward(self, x):\n\t\tmain = self.main_branch(x)\n\t\text = self.ext_branch(x)\n\n\t\t# Concatenate branches\n\t\tout = torch.cat((main, ext), 1)\n\n\t\t# Apply batch normalization\n\t\tout = self.batch_norm(out)\n\n\t\treturn self.out_prelu(out)\n\n\nclass RegularBottleneck(nn.Module):\n\t\"\"\"Regular bottlenecks are the main building block of ENet.\n\tMain branch:\n\t1. Shortcut connection.\n\n\tExtension branch:\n\t1. 1x1 convolution which decreases the number of channels by\n\t``internal_ratio``, also called a projection;\n\t2. regular, dilated or asymmetric convolution;\n\t3. 1x1 convolution which increases the number of channels back to\n\t``channels``, also called an expansion;\n\t4. dropout as a regularizer.\n\n\tKeyword arguments:\n\t- channels (int): the number of input and output channels.\n\t- internal_ratio (int, optional): a scale factor applied to\n\t``channels`` used to compute the number of\n\tchannels after the projection. eg. given ``channels`` equal to 128 and\n\tinternal_ratio equal to 2 the number of channels after the projection\n\tis 64. Default: 4.\n\t- kernel_size (int, optional): the kernel size of the filters used in\n\tthe convolution layer described above in item 2 of the extension\n\tbranch. Default: 3.\n\t- padding (int, optional): zero-padding added to both sides of the\n\tinput. Default: 0.\n\t- dilation (int, optional): spacing between kernel elements for the\n\tconvolution described in item 2 of the extension branch. Default: 1.\n\tasymmetric (bool, optional): flags if the convolution described in\n\titem 2 of the extension branch is asymmetric or not. Default: False.\n\t- dropout_prob (float, optional): probability of an element to be\n\tzeroed. Default: 0 (no dropout).\n\t- bias (bool, optional): Adds a learnable bias to the output if\n\t``True``. Default: False.\n\t- relu (bool, optional): When ``True`` ReLU is used as the activation\n\tfunction; otherwise, PReLU is used. Default: True.\n\n\t\"\"\"\n\n\tdef __init__(self,\n\t\t\t\t channels,\n\t\t\t\t internal_ratio=4,\n\t\t\t\t kernel_size=3,\n\t\t\t\t padding=0,\n\t\t\t\t dilation=1,\n\t\t\t\t asymmetric=False,\n\t\t\t\t dropout_prob=0,\n\t\t\t\t bias=False,\n\t\t\t\t relu=True):\n\t\tsuper().__init__()\n\n\t\t# Check in the internal_scale parameter is within the expected range\n\t\t# [1, channels]\n\t\tif internal_ratio <= 1 or internal_ratio > channels:\n\t\t\traise RuntimeError(\"Value out of range. Expected value in the \"\n\t\t\t\t\t\t\t \"interval [1, {0}], got internal_scale={1}.\"\n\t\t\t\t\t\t\t .format(channels, internal_ratio))\n\n\t\tinternal_channels = channels // internal_ratio\n\n\t\tif relu:\n\t\t\tactivation = nn.ReLU()\n\t\telse:\n\t\t\tactivation = nn.PReLU()\n\n\t\t# Main branch - shortcut connection\n\n\t\t# Extension branch - 1x1 convolution, followed by a regular, dilated or\n\t\t# asymmetric convolution, followed by another 1x1 convolution, and,\n\t\t# finally, a regularizer (spatial dropout). Number of channels is constant.\n\n\t\t# 1x1 projection convolution\n\t\tself.ext_conv1 = nn.Sequential(\n\t\t\tnn.Conv2d(\n\t\t\t\tchannels,\n\t\t\t\tinternal_channels,\n\t\t\t\tkernel_size=1,\n\t\t\t\tstride=1,\n\t\t\t\tbias=bias), nn.BatchNorm2d(internal_channels), activation)\n\n\t\t# If the convolution is asymmetric we split the main convolution in\n\t\t# two. Eg. for a 5x5 asymmetric convolution we have two convolution:\n\t\t# the first is 5x1 and the second is 1x5.\n\t\tif asymmetric:\n\t\t\tself.ext_conv2 = nn.Sequential(\n\t\t\t\tnn.Conv2d(\n\t\t\t\t\tinternal_channels,\n\t\t\t\t\tinternal_channels,\n\t\t\t\t\tkernel_size=(kernel_size, 1),\n\t\t\t\t\tstride=1,\n\t\t\t\t\tpadding=(padding, 0),\n\t\t\t\t\tdilation=dilation,\n\t\t\t\t\tbias=bias), nn.BatchNorm2d(internal_channels), activation,\n\t\t\t\tnn.Conv2d(\n\t\t\t\t\tinternal_channels,\n\t\t\t\t\tinternal_channels,\n\t\t\t\t\tkernel_size=(1, kernel_size),\n\t\t\t\t\tstride=1,\n\t\t\t\t\tpadding=(0, padding),\n\t\t\t\t\tdilation=dilation,\n\t\t\t\t\tbias=bias), nn.BatchNorm2d(internal_channels), activation)\n\t\telse:\n\t\t\tself.ext_conv2 = nn.Sequential(\n\t\t\t\tnn.Conv2d(\n\t\t\t\t\tinternal_channels,\n\t\t\t\t\tinternal_channels,\n\t\t\t\t\tkernel_size=kernel_size,\n\t\t\t\t\tstride=1,\n\t\t\t\t\tpadding=padding,\n\t\t\t\t\tdilation=dilation,\n\t\t\t\t\tbias=bias), nn.BatchNorm2d(internal_channels), activation)\n\n\t\t# 1x1 expansion convolution\n\t\tself.ext_conv3 = nn.Sequential(\n\t\t\tnn.Conv2d(\n\t\t\t\tinternal_channels,\n\t\t\t\tchannels,\n\t\t\t\tkernel_size=1,\n\t\t\t\tstride=1,\n\t\t\t\tbias=bias), nn.BatchNorm2d(channels), activation)\n\n\t\tself.ext_regul = nn.Dropout2d(p=dropout_prob)\n\n\t\t# PReLU layer to apply after adding the branches\n\t\tself.out_prelu = activation\n\n\tdef forward(self, x):\n\t\t# Main branch shortcut\n\t\tmain = x\n\n\t\t# Extension branch\n\t\text = self.ext_conv1(x)\n\t\text = self.ext_conv2(ext)\n\t\text = self.ext_conv3(ext)\n\t\text = self.ext_regul(ext)\n\n\t\t# Add main and extension branches\n\t\tout = main + ext\n\n\t\treturn self.out_prelu(out)\n\n\nclass DownsamplingBottleneck(nn.Module):\n\t\"\"\"Downsampling bottlenecks further downsample the feature map size.\n\n\tMain branch:\n\t1. max pooling with stride 2; indices are saved to be used for\n\tunpooling later.\n\n\tExtension branch:\n\t1. 2x2 convolution with stride 2 that decreases the number of channels\n\tby ``internal_ratio``, also called a projection;\n\t2. regular convolution (by default, 3x3);\n\t3. 1x1 convolution which increases the number of channels to\n\t``out_channels``, also called an expansion;\n\t4. dropout as a regularizer.\n\n\tKeyword arguments:\n\t- in_channels (int): the number of input channels.\n\t- out_channels (int): the number of output channels.\n\t- internal_ratio (int, optional): a scale factor applied to ``channels``\n\tused to compute the number of channels after the projection. eg. given\n\t``channels`` equal to 128 and internal_ratio equal to 2 the number of\n\tchannels after the projection is 64. Default: 4.\n\t- kernel_size (int, optional): the kernel size of the filters used in\n\tthe convolution layer described above in item 2 of the extension branch.\n\tDefault: 3.\n\t- padding (int, optional): zero-padding added to both sides of the\n\tinput. Default: 0.\n\t- dilation (int, optional): spacing between kernel elements for the\n\tconvolution described in item 2 of the extension branch. Default: 1.\n\t- asymmetric (bool, optional): flags if the convolution described in\n\titem 2 of the extension branch is asymmetric or not. Default: False.\n\t- return_indices (bool, optional): if ``True``, will return the max\n\tindices along with the outputs. Useful when unpooling later.\n\t- dropout_prob (float, optional): probability of an element to be\n\tzeroed. Default: 0 (no dropout).\n\t- bias (bool, optional): Adds a learnable bias to the output if\n\t``True``. Default: False.\n\t- relu (bool, optional): When ``True`` ReLU is used as the activation\n\tfunction; otherwise, PReLU is used. Default: True.\n\n\t\"\"\"\n\n\tdef __init__(self,\n\t\t\t\t in_channels,\n\t\t\t\t out_channels,\n\t\t\t\t internal_ratio=4,\n\t\t\t\t kernel_size=3,\n\t\t\t\t padding=0,\n\t\t\t\t return_indices=False,\n\t\t\t\t dropout_prob=0,\n\t\t\t\t bias=False,\n\t\t\t\t relu=True):\n\t\tsuper().__init__()\n\n\t\t# Store parameters that are needed later\n\t\tself.return_indices = return_indices\n\n\t\t# Check in the internal_scale parameter is within the expected range\n\t\t# [1, channels]\n\t\tif internal_ratio <= 1 or internal_ratio > in_channels:\n\t\t\traise RuntimeError(\"Value out of range. Expected value in the \"\n\t\t\t\t\t\t\t \"interval [1, {0}], got internal_scale={1}. \"\n\t\t\t\t\t\t\t .format(in_channels, internal_ratio))\n\n\t\tinternal_channels = in_channels // internal_ratio\n\n\t\tif relu:\n\t\t\tactivation = nn.ReLU()\n\t\telse:\n\t\t\tactivation = nn.PReLU()\n\n\t\t# Main branch - max pooling followed by feature map (channels) padding\n\t\tself.main_max1 = nn.MaxPool2d(\n\t\t\tkernel_size,\n\t\t\tstride=2,\n\t\t\tpadding=padding,\n\t\t\treturn_indices=return_indices)\n\n\t\t# Extension branch - 2x2 convolution, followed by a regular, dilated or\n\t\t# asymmetric convolution, followed by another 1x1 convolution. Number\n\t\t# of channels is doubled.\n\n\t\t# 2x2 projection convolution with stride 2\n\t\tself.ext_conv1 = nn.Sequential(\n\t\t\tnn.Conv2d(\n\t\t\t\tin_channels,\n\t\t\t\tinternal_channels,\n\t\t\t\tkernel_size=2,\n\t\t\t\tstride=2,\n\t\t\t\tbias=bias), nn.BatchNorm2d(internal_channels), activation)\n\n\t\t# Convolution\n\t\tself.ext_conv2 = nn.Sequential(\n\t\t\tnn.Conv2d(\n\t\t\t\tinternal_channels,\n\t\t\t\tinternal_channels,\n\t\t\t\tkernel_size=kernel_size,\n\t\t\t\tstride=1,\n\t\t\t\tpadding=padding,\n\t\t\t\tbias=bias), nn.BatchNorm2d(internal_channels), activation)\n\n\t\t# 1x1 expansion convolution\n\t\tself.ext_conv3 = nn.Sequential(\n\t\t\tnn.Conv2d(\n\t\t\t\tinternal_channels,\n\t\t\t\tout_channels,\n\t\t\t\tkernel_size=1,\n\t\t\t\tstride=1,\n\t\t\t\tbias=bias), nn.BatchNorm2d(out_channels), activation)\n\n\t\tself.ext_regul = nn.Dropout2d(p=dropout_prob)\n\n\t\t# PReLU layer to apply after concatenating the branches\n\t\tself.out_prelu = activation\n\n\tdef forward(self, x):\n\t\t# Main branch shortcut\n\t\tif self.return_indices:\n\t\t\tmain, max_indices = self.main_max1(x)\n\t\telse:\n\t\t\tmain = self.main_max1(x)\n\n\t\t# Extension branch\n\t\text = self.ext_conv1(x)\n\t\text = self.ext_conv2(ext)\n\t\text = self.ext_conv3(ext)\n\t\text = self.ext_regul(ext)\n\n\t\t# Main branch channel padding\n\t\tn, ch_ext, h, w = ext.size()\n\t\tch_main = main.size()[1]\n\t\tpadding = torch.zeros(n, ch_ext - ch_main, h, w)\n\n\t\t# Before concatenating, check if main is on the CPU or GPU and\n\t\t# convert padding accordingly\n\t\tif main.is_cuda:\n\t\t\tpadding = padding.cuda()\n\n\t\t# Concatenate\n\t\tmain = torch.cat((main, padding), 1)\n\n\t\t# Add main and extension branches\n\t\tout = main + ext\n\n\t\treturn self.out_prelu(out), max_indices\n\n\nclass UpsamplingBottleneck(nn.Module):\n\t\"\"\"The upsampling bottlenecks upsample the feature map resolution using max\n\tpooling indices stored from the corresponding downsampling bottleneck.\n\n\tMain branch:\n\t1. 1x1 convolution with stride 1 that decreases the number of channels by\n\t``internal_ratio``, also called a projection;\n\t2. max unpool layer using the max pool indices from the corresponding\n\tdownsampling max pool layer.\n\n\tExtension branch:\n\t1. 1x1 convolution with stride 1 that decreases the number of channels by\n\t``internal_ratio``, also called a projection;\n\t2. transposed convolution (by default, 3x3);\n\t3. 1x1 convolution which increases the number of channels to\n\t``out_channels``, also called an expansion;\n\t4. dropout as a regularizer.\n\n\tKeyword arguments:\n\t- in_channels (int): the number of input channels.\n\t- out_channels (int): the number of output channels.\n\t- internal_ratio (int, optional): a scale factor applied to ``in_channels``\n\t used to compute the number of channels after the projection. eg. given\n\t ``in_channels`` equal to 128 and ``internal_ratio`` equal to 2 the number\n\t of channels after the projection is 64. Default: 4.\n\t- kernel_size (int, optional): the kernel size of the filters used in the\n\tconvolution layer described above in item 2 of the extension branch.\n\tDefault: 3.\n\t- padding (int, optional): zero-padding added to both sides of the input.\n\tDefault: 0.\n\t- dropout_prob (float, optional): probability of an element to be zeroed.\n\tDefault: 0 (no dropout).\n\t- bias (bool, optional): Adds a learnable bias to the output if ``True``.\n\tDefault: False.\n\t- relu (bool, optional): When ``True`` ReLU is used as the activation\n\tfunction; otherwise, PReLU is used. Default: True.\n\n\t\"\"\"\n\n\tdef __init__(self,\n\t\t\t\t in_channels,\n\t\t\t\t out_channels,\n\t\t\t\t internal_ratio=4,\n\t\t\t\t kernel_size=3,\n\t\t\t\t padding=0,\n\t\t\t\t dropout_prob=0,\n\t\t\t\t bias=False,\n\t\t\t\t relu=True):\n\t\tsuper().__init__()\n\n\t\t# Check in the internal_scale parameter is within the expected range\n\t\t# [1, channels]\n\t\tif internal_ratio <= 1 or internal_ratio > in_channels:\n\t\t\traise RuntimeError(\"Value out of range. Expected value in the \"\n\t\t\t\t\t\t\t \"interval [1, {0}], got internal_scale={1}. \"\n\t\t\t\t\t\t\t .format(in_channels, internal_ratio))\n\n\t\tinternal_channels = in_channels // internal_ratio\n\n\t\tif relu:\n\t\t\tactivation = nn.ReLU()\n\t\telse:\n\t\t\tactivation = nn.PReLU()\n\n\t\t# Main branch - max pooling followed by feature map (channels) padding\n\t\tself.main_conv1 = nn.Sequential(\n\t\t\tnn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias),\n\t\t\tnn.BatchNorm2d(out_channels))\n\n\t\t# Remember that the stride is the same as the kernel_size, just like\n\t\t# the max pooling layers\n\t\tself.main_unpool1 = nn.MaxUnpool2d(kernel_size=2)\n\n\t\t# Extension branch - 1x1 convolution, followed by a regular, dilated or\n\t\t# asymmetric convolution, followed by another 1x1 convolution. Number\n\t\t# of channels is doubled.\n\n\t\t# 1x1 projection convolution with stride 1\n\t\tself.ext_conv1 = nn.Sequential(\n\t\t\tnn.Conv2d(\n\t\t\t\tin_channels, internal_channels, kernel_size=1, bias=bias),\n\t\t\tnn.BatchNorm2d(internal_channels), activation)\n\n\t\t# Transposed convolution\n\t\tself.ext_conv2 = nn.Sequential(\n\t\t\tnn.ConvTranspose2d(\n\t\t\t\tinternal_channels,\n\t\t\t\tinternal_channels,\n\t\t\t\tkernel_size=kernel_size,\n\t\t\t\tstride=2,\n\t\t\t\tpadding=padding,\n\t\t\t\toutput_padding=1,\n\t\t\t\tbias=bias), nn.BatchNorm2d(internal_channels), activation)\n\n\t\t# 1x1 expansion convolution\n\t\tself.ext_conv3 = nn.Sequential(\n\t\t\tnn.Conv2d(\n\t\t\t\tinternal_channels, out_channels, kernel_size=1, bias=bias),\n\t\t\tnn.BatchNorm2d(out_channels), activation)\n\n\t\tself.ext_regul = nn.Dropout2d(p=dropout_prob)\n\n\t\t# PReLU layer to apply after concatenating the branches\n\t\tself.out_prelu = activation\n\n\tdef forward(self, x, max_indices):\n\t\t# Main branch shortcut\n\t\tmain = self.main_conv1(x)\n\t\tmain = self.main_unpool1(main, max_indices)\n\t\t# Extension branch\n\t\text = self.ext_conv1(x)\n\t\text = self.ext_conv2(ext)\n\t\text = self.ext_conv3(ext)\n\t\text = self.ext_regul(ext)\n\n\t\t# Add main and extension branches\n\t\tout = main + ext\n\n\t\treturn self.out_prelu(out)\n\n\nclass ENet(nn.Module):\n\t\"\"\"Generate the ENet model.\n\n\tKeyword arguments:\n\t- num_classes (int): the number of classes to segment.\n\t- encoder_relu (bool, optional): When ``True`` ReLU is used as the\n\tactivation function in the encoder blocks/layers; otherwise, PReLU\n\tis used. Default: False.\n\t- decoder_relu (bool, optional): When ``True`` ReLU is used as the\n\tactivation function in the decoder blocks/layers; otherwise, PReLU\n\tis used. Default: True.\n\n\t\"\"\"\n\n\tdef __init__(self, num_classes, encoder_relu=False, decoder_relu=True):\n\t\tsuper().__init__()\n\n\t\tself.initial_block = InitialBlock(3, 16, padding=1, relu=encoder_relu)\n\n\t\t# Stage 1 - Encoder\n\t\tself.downsample1_0 = DownsamplingBottleneck(\n\t\t\t16,\n\t\t\t64,\n\t\t\tpadding=1,\n\t\t\treturn_indices=True,\n\t\t\tdropout_prob=0.01,\n\t\t\trelu=encoder_relu)\n\t\tself.regular1_1 = RegularBottleneck(\n\t\t\t64, padding=1, dropout_prob=0.01, relu=encoder_relu)\n\t\tself.regular1_2 = RegularBottleneck(\n\t\t\t64, padding=1, dropout_prob=0.01, relu=encoder_relu)\n\t\tself.regular1_3 = RegularBottleneck(\n\t\t\t64, padding=1, dropout_prob=0.01, relu=encoder_relu)\n\t\tself.regular1_4 = RegularBottleneck(\n\t\t\t64, padding=1, dropout_prob=0.01, relu=encoder_relu)\n\n\t\t# Stage 2 - Encoder\n\t\tself.downsample2_0 = DownsamplingBottleneck(\n\t\t\t64,\n\t\t\t128,\n\t\t\tpadding=1,\n\t\t\treturn_indices=True,\n\t\t\tdropout_prob=0.1,\n\t\t\trelu=encoder_relu)\n\t\tself.regular2_1 = RegularBottleneck(\n\t\t\t128, padding=1, dropout_prob=0.1, relu=encoder_relu)\n\t\tself.dilated2_2 = RegularBottleneck(\n\t\t\t128, dilation=2, padding=2, dropout_prob=0.1, relu=encoder_relu)\n\t\tself.asymmetric2_3 = RegularBottleneck(\n\t\t\t128,\n\t\t\tkernel_size=5,\n\t\t\tpadding=2,\n\t\t\tasymmetric=True,\n\t\t\tdropout_prob=0.1,\n\t\t\trelu=encoder_relu)\n\t\tself.dilated2_4 = RegularBottleneck(\n\t\t\t128, dilation=4, padding=4, dropout_prob=0.1, relu=encoder_relu)\n\t\tself.regular2_5 = RegularBottleneck(\n\t\t\t128, padding=1, dropout_prob=0.1, relu=encoder_relu)\n\t\tself.dilated2_6 = RegularBottleneck(\n\t\t\t128, dilation=8, padding=8, dropout_prob=0.1, relu=encoder_relu)\n\t\tself.asymmetric2_7 = RegularBottleneck(\n\t\t\t128,\n\t\t\tkernel_size=5,\n\t\t\tasymmetric=True,\n\t\t\tpadding=2,\n\t\t\tdropout_prob=0.1,\n\t\t\trelu=encoder_relu)\n\t\tself.dilated2_8 = RegularBottleneck(\n\t\t\t128, dilation=16, padding=16, dropout_prob=0.1, relu=encoder_relu)\n\n\t\t# Stage 3 - Encoder\n\t\tself.regular3_0 = RegularBottleneck(\n\t\t\t128, padding=1, dropout_prob=0.1, relu=encoder_relu)\n\t\tself.dilated3_1 = RegularBottleneck(\n\t\t\t128, dilation=2, padding=2, dropout_prob=0.1, relu=encoder_relu)\n\t\tself.asymmetric3_2 = RegularBottleneck(\n\t\t\t128,\n\t\t\tkernel_size=5,\n\t\t\tpadding=2,\n\t\t\tasymmetric=True,\n\t\t\tdropout_prob=0.1,\n\t\t\trelu=encoder_relu)\n\t\tself.dilated3_3 = RegularBottleneck(\n\t\t\t128, dilation=4, padding=4, dropout_prob=0.1, relu=encoder_relu)\n\t\tself.regular3_4 = RegularBottleneck(\n\t\t\t128, padding=1, dropout_prob=0.1, relu=encoder_relu)\n\t\tself.dilated3_5 = RegularBottleneck(\n\t\t\t128, dilation=8, padding=8, dropout_prob=0.1, relu=encoder_relu)\n\t\tself.asymmetric3_6 = RegularBottleneck(\n\t\t\t128,\n\t\t\tkernel_size=5,\n\t\t\tasymmetric=True,\n\t\t\tpadding=2,\n\t\t\tdropout_prob=0.1,\n\t\t\trelu=encoder_relu)\n\t\tself.dilated3_7 = RegularBottleneck(\n\t\t\t128, dilation=16, padding=16, dropout_prob=0.1, relu=encoder_relu)\n\n\t\t# Stage 4 - Decoder\n\t\tself.upsample4_0 = UpsamplingBottleneck(\n\t\t\t128, 64, padding=1, dropout_prob=0.1, relu=decoder_relu)\n\t\tself.regular4_1 = RegularBottleneck(\n\t\t\t64, padding=1, dropout_prob=0.1, relu=decoder_relu)\n\t\tself.regular4_2 = RegularBottleneck(\n\t\t\t64, padding=1, dropout_prob=0.1, relu=decoder_relu)\n\n\t\t# Stage 5 - Decoder\n\t\tself.upsample5_0 = UpsamplingBottleneck(\n\t\t\t64, 16, padding=1, dropout_prob=0.1, relu=decoder_relu)\n\t\tself.regular5_1 = RegularBottleneck(\n\t\t\t16, padding=1, dropout_prob=0.1, relu=decoder_relu)\n\t\tself.transposed_conv = nn.ConvTranspose2d(\n\t\t\t16,\n\t\t\tnum_classes,\n\t\t\tkernel_size=3,\n\t\t\tstride=2,\n\t\t\tpadding=1,\n\t\t\toutput_padding=1,\n\t\t\tbias=False)\n\n\tdef forward(self, x):\n\t\t# Initial block\n\t\tx = self.initial_block(x)\n\n\t\t# Stage 1 - Encoder\n\t\tx, max_indices1_0 = self.downsample1_0(x)\n\t\tx = self.regular1_1(x)\n\t\tx = self.regular1_2(x)\n\t\tx = self.regular1_3(x)\n\t\tx = self.regular1_4(x)\n\n\t\t# Stage 2 - Encoder\n\t\tx, max_indices2_0 = self.downsample2_0(x)\n\t\tx = self.regular2_1(x)\n\t\tx = self.dilated2_2(x)\n\t\tx = self.asymmetric2_3(x)\n\t\tx = self.dilated2_4(x)\n\t\tx = self.regular2_5(x)\n\t\tx = self.dilated2_6(x)\n\t\tx = self.asymmetric2_7(x)\n\t\tx = self.dilated2_8(x)\n\n\t\t# Stage 3 - Encoder\n\t\tx = self.regular3_0(x)\n\t\tx = self.dilated3_1(x)\n\t\tx = self.asymmetric3_2(x)\n\t\tx = self.dilated3_3(x)\n\t\tx = self.regular3_4(x)\n\t\tx = self.dilated3_5(x)\n\t\tx = self.asymmetric3_6(x)\n\t\tx = self.dilated3_7(x)\n\n\t\t# Stage 4 - Decoder\n\t\tx = self.upsample4_0(x, max_indices2_0)\n\t\tx = self.regular4_1(x)\n\t\tx = self.regular4_2(x)\n\n\t\t# Stage 5 - Decoder\n\t\tx = self.upsample5_0(x, max_indices1_0)\n\t\tx = self.regular5_1(x)\n\t\tx = self.transposed_conv(x)\n\n\t\treturn x\n\n\nclass InitialBlockDepth(nn.Module):\n\t\"\"\"The initial block is composed of two branches:\n\t1. a main branch which performs a regular convolution with stride 2;\n\t2. an extension branch which performs max-pooling.\n\n\tDoing both operations in parallel and concatenating their results\n\tallows for efficient downsampling and expansion. The main branch\n\toutputs 12 feature maps while the extension branch outputs 4, for a\n\ttotal of 16 feature maps after concatenation.\n\n\tKeyword arguments:\n\t- in_channels (int): the number of input channels.\n\t- out_channels (int): the number output channels.\n\t- kernel_size (int, optional): the kernel size of the filters used in\n\tthe convolution layer. Default: 3.\n\t- padding (int, optional): zero-padding added to both sides of the\n\tinput. Default: 0.\n\t- bias (bool, optional): Adds a learnable bias to the output if\n\t``True``. Default: False.\n\t- relu (bool, optional): When ``True`` ReLU is used as the activation\n\tfunction; otherwise, PReLU is used. Default: True.\n\n\t\"\"\"\n\n\tdef __init__(self,\n\t\t\t\t in_channels,\n\t\t\t\t out_channels,\n\t\t\t\t kernel_size=3,\n\t\t\t\t padding=0,\n\t\t\t\t bias=False,\n\t\t\t\t relu=True):\n\t\tsuper().__init__()\n\n\t\tif relu:\n\t\t\tactivation = nn.ReLU()\n\t\telse:\n\t\t\tactivation = nn.PReLU()\n\n\t\t# Main branch - As stated above the number of output channels for this\n\t\t# branch is the total minus 3, since the remaining channels come from\n\t\t# the extension branch\n\t\tself.main_branch = nn.Conv2d(\n\t\t\tin_channels,\n\t\t\tout_channels - 4,\n\t\t\tkernel_size=kernel_size,\n\t\t\tstride=2,\n\t\t\tpadding=padding,\n\t\t\tbias=bias)\n\n\t\t# Extension branch\n\t\tself.ext_branch = nn.MaxPool2d(kernel_size, stride=2, padding=padding)\n\n\t\t# Initialize batch normalization to be used after concatenation\n\t\tself.batch_norm = nn.BatchNorm2d(out_channels)\n\n\t\t# PReLU layer to apply after concatenating the branches\n\t\tself.out_prelu = activation\n\n\tdef forward(self, x):\n\t\tmain = self.main_branch(x)\n\t\text = self.ext_branch(x)\n\n\t\t# Concatenate branches\n\t\tout = torch.cat((main, ext), 1)\n\n\t\t# Apply batch normalization\n\t\tout = self.batch_norm(out)\n\n\t\treturn self.out_prelu(out)\n\n\nclass ENetDepth(nn.Module):\n\t\"\"\"Generate the ENetDepth model.\n\n\tKeyword arguments:\n\t- num_classes (int): the number of classes to segment.\n\t- encoder_relu (bool, optional): When ``True`` ReLU is used as the\n\tactivation function in the encoder blocks/layers; otherwise, PReLU\n\tis used. Default: False.\n\t- decoder_relu (bool, optional): When ``True`` ReLU is used as the\n\tactivation function in the decoder blocks/layers; otherwise, PReLU\n\tis used. Default: True.\n\n\t\"\"\"\n\n\tdef __init__(self, num_classes, encoder_relu=False, decoder_relu=True):\n\t\tsuper().__init__()\n\n\t\tself.initial_block = InitialBlockDepth(4, 16, padding=1, relu=encoder_relu)\n\n\t\t# Stage 1 - Encoder\n\t\tself.downsample1_0 = DownsamplingBottleneck(\n\t\t\t16,\n\t\t\t64,\n\t\t\tpadding=1,\n\t\t\treturn_indices=True,\n\t\t\tdropout_prob=0.01,\n\t\t\trelu=encoder_relu)\n\t\tself.regular1_1 = RegularBottleneck(\n\t\t\t64, padding=1, dropout_prob=0.01, relu=encoder_relu)\n\t\tself.regular1_2 = RegularBottleneck(\n\t\t\t64, padding=1, dropout_prob=0.01, relu=encoder_relu)\n\t\tself.regular1_3 = RegularBottleneck(\n\t\t\t64, padding=1, dropout_prob=0.01, relu=encoder_relu)\n\t\tself.regular1_4 = RegularBottleneck(\n\t\t\t64, padding=1, dropout_prob=0.01, relu=encoder_relu)\n\n\t\t# Stage 2 - Encoder\n\t\tself.downsample2_0 = DownsamplingBottleneck(\n\t\t\t64,\n\t\t\t128,\n\t\t\tpadding=1,\n\t\t\treturn_indices=True,\n\t\t\tdropout_prob=0.1,\n\t\t\trelu=encoder_relu)\n\t\tself.regular2_1 = RegularBottleneck(\n\t\t\t128, padding=1, dropout_prob=0.1, relu=encoder_relu)\n\t\tself.dilated2_2 = RegularBottleneck(\n\t\t\t128, dilation=2, padding=2, dropout_prob=0.1, relu=encoder_relu)\n\t\tself.asymmetric2_3 = RegularBottleneck(\n\t\t\t128,\n\t\t\tkernel_size=5,\n\t\t\tpadding=2,\n\t\t\tasymmetric=True,\n\t\t\tdropout_prob=0.1,\n\t\t\trelu=encoder_relu)\n\t\tself.dilated2_4 = RegularBottleneck(\n\t\t\t128, dilation=4, padding=4, dropout_prob=0.1, relu=encoder_relu)\n\t\tself.regular2_5 = RegularBottleneck(\n\t\t\t128, padding=1, dropout_prob=0.1, relu=encoder_relu)\n\t\tself.dilated2_6 = RegularBottleneck(\n\t\t\t128, dilation=8, padding=8, dropout_prob=0.1, relu=encoder_relu)\n\t\tself.asymmetric2_7 = RegularBottleneck(\n\t\t\t128,\n\t\t\tkernel_size=5,\n\t\t\tasymmetric=True,\n\t\t\tpadding=2,\n\t\t\tdropout_prob=0.1,\n\t\t\trelu=encoder_relu)\n\t\tself.dilated2_8 = RegularBottleneck(\n\t\t\t128, dilation=16, padding=16, dropout_prob=0.1, relu=encoder_relu)\n\n\t\t# Stage 3 - Encoder\n\t\tself.regular3_0 = RegularBottleneck(\n\t\t\t128, padding=1, dropout_prob=0.1, relu=encoder_relu)\n\t\tself.dilated3_1 = RegularBottleneck(\n\t\t\t128, dilation=2, padding=2, dropout_prob=0.1, relu=encoder_relu)\n\t\tself.asymmetric3_2 = RegularBottleneck(\n\t\t\t128,\n\t\t\tkernel_size=5,\n\t\t\tpadding=2,\n\t\t\tasymmetric=True,\n\t\t\tdropout_prob=0.1,\n\t\t\trelu=encoder_relu)\n\t\tself.dilated3_3 = RegularBottleneck(\n\t\t\t128, dilation=4, padding=4, dropout_prob=0.1, relu=encoder_relu)\n\t\tself.regular3_4 = RegularBottleneck(\n\t\t\t128, padding=1, dropout_prob=0.1, relu=encoder_relu)\n\t\tself.dilated3_5 = RegularBottleneck(\n\t\t\t128, dilation=8, padding=8, dropout_prob=0.1, relu=encoder_relu)\n\t\tself.asymmetric3_6 = RegularBottleneck(\n\t\t\t128,\n\t\t\tkernel_size=5,\n\t\t\tasymmetric=True,\n\t\t\tpadding=2,\n\t\t\tdropout_prob=0.1,\n\t\t\trelu=encoder_relu)\n\t\tself.dilated3_7 = RegularBottleneck(\n\t\t\t128, dilation=16, padding=16, dropout_prob=0.1, relu=encoder_relu)\n\n\t\t# Stage 4 - Decoder\n\t\tself.upsample4_0 = UpsamplingBottleneck(\n\t\t\t128, 64, padding=1, dropout_prob=0.1, relu=decoder_relu)\n\t\tself.regular4_1 = RegularBottleneck(\n\t\t\t64, padding=1, dropout_prob=0.1, relu=decoder_relu)\n\t\tself.regular4_2 = RegularBottleneck(\n\t\t\t64, padding=1, dropout_prob=0.1, relu=decoder_relu)\n\n\t\t# Stage 5 - Decoder\n\t\tself.upsample5_0 = UpsamplingBottleneck(\n\t\t\t64, 16, padding=1, dropout_prob=0.1, relu=decoder_relu)\n\t\tself.regular5_1 = RegularBottleneck(\n\t\t\t16, padding=1, dropout_prob=0.1, relu=decoder_relu)\n\t\tself.transposed_conv = nn.ConvTranspose2d(\n\t\t\t16,\n\t\t\tnum_classes,\n\t\t\tkernel_size=3,\n\t\t\tstride=2,\n\t\t\tpadding=1,\n\t\t\toutput_padding=1,\n\t\t\tbias=False)\n\n\tdef forward(self, x):\n\t\t# Initial block\n\t\tx = self.initial_block(x)\n\n\t\t# Stage 1 - Encoder\n\t\tx, max_indices1_0 = self.downsample1_0(x)\n\t\tx = self.regular1_1(x)\n\t\tx = self.regular1_2(x)\n\t\tx = self.regular1_3(x)\n\t\tx = self.regular1_4(x)\n\n\t\t# Stage 2 - Encoder\n\t\tx, max_indices2_0 = self.downsample2_0(x)\n\t\tx = self.regular2_1(x)\n\t\tx = self.dilated2_2(x)\n\t\tx = self.asymmetric2_3(x)\n\t\tx = self.dilated2_4(x)\n\t\tx = self.regular2_5(x)\n\t\tx = self.dilated2_6(x)\n\t\tx = self.asymmetric2_7(x)\n\t\tx = self.dilated2_8(x)\n\n\t\t# Stage 3 - Encoder\n\t\tx = self.regular3_0(x)\n\t\tx = self.dilated3_1(x)\n\t\tx = self.asymmetric3_2(x)\n\t\tx = self.dilated3_3(x)\n\t\tx = self.regular3_4(x)\n\t\tx = self.dilated3_5(x)\n\t\tx = self.asymmetric3_6(x)\n\t\tx = self.dilated3_7(x)\n\n\t\t# Stage 4 - Decoder\n\t\tx = self.upsample4_0(x, max_indices2_0)\n\t\tx = self.regular4_1(x)\n\t\tx = self.regular4_2(x)\n\n\t\t# Stage 5 - Decoder\n\t\tx = self.upsample5_0(x, max_indices1_0)\n\t\tx = self.regular5_1(x)\n\t\tx = self.transposed_conv(x)\n\n\t\treturn x\n"
] | [
[
"torch.zeros",
"torch.cat",
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ConvTranspose2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.PReLU",
"torch.nn.MaxUnpool2d",
"torch.nn.Dropout2d"
]
] |
sanket-pixel/deep-video | [
"bc1465fe94149f84356fdf78caaa5b46e98738ae"
] | [
"TSN/demo.py"
] | [
"import cv2\nimport numpy as np\nfrom model import TSN\nimport os\nimport torch\nfrom dataset import RGBFrames\nfrom torchvision.transforms import Compose, Normalize, Resize, RandomHorizontalFlip, RandomApply, RandomCrop, RandomResizedCrop\nfrom torchvision import transforms\n\ndef get_file_names(path):\n with open(path) as f:\n files = f.readlines()\n files = [x.strip() for x in files]\n return files\n\ndef get_labels(path_to_classes):\n classes ={}\n with open(path_to_classes) as f:\n c = f.readlines()\n for x in c:\n classes[int(x.strip().split(\" \")[0])] = x.strip().split(\" \")[1]\n return classes\npath_to_validation = \"../data/mini_UCF/validation.txt\"\npath_to_classes ='../data/mini_UCF/classes.txt'\nroot_rgb = '../data/mini_UCF/'\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nvalidation_files = np.array(get_file_names(path_to_validation))\ntest_videos_idx= np.random.randint(0,len(validation_files),5)\ndataset_test = RGBFrames(path_to_validation,path_to_classes,root_rgb,mode=\"test\",transform = transforms.Compose([Resize((224,224)), Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]) ]))\nlabels = get_labels(path_to_classes)\ntsn_rgb = torch.load(\"trained_models/RGB_tsn.pth\")['model'].eval()\nfor idx in test_videos_idx:\n test_video = dataset_test[idx][0].unsqueeze(0).to(device)\n pred = int(torch.argmax(tsn_rgb(test_video), dim=1).data)\n predicted_action = labels[pred]\n original_video = cv2.VideoCapture(os.path.join(root_rgb,validation_files[idx]+'.avi'))\n total_frames = original_video.get(cv2.CAP_PROP_FRAME_COUNT)\n if (original_video.isOpened() == False):\n print(\"Error opening video file\")\n count = 0\n while (original_video.isOpened()):\n ret, frame = original_video.read()\n if ret == True:\n if(count>total_frames/2):\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(frame, predicted_action, (10,50), font, 1, (0, 0, 255), 2, cv2.LINE_AA)\n count+=1\n cv2.imshow('Frame', frame)\n cv2.waitKey(2)\n\n # Press Q on keyboard to exit\n if cv2.waitKey(25) & 0xFF == ord('q'):\n break\n # Break the loop\n else:\n break\n original_video.release()\n cv2.destroyAllWindows()\n\n\n\n\n\n"
] | [
[
"torch.cuda.is_available",
"torch.load"
]
] |
SMAPPNYU/smapp_text_classifier | [
"d4b75dbc3a5a8a9f7a01e69404efd4c5c51d062b"
] | [
"smapp_text_classifier/plot.py"
] | [
"'''\nOriginal code snippet found at:\nhttps://scikit-learn.org/stable/auto_examples/model_selection/plot_learning_curve.html#sphx-glr-auto-examples-model-selection-plot-learning-curve-py\n\n© 2007 - 2018, scikit-learn developers (BSD License)\n'''\nfrom textwrap import wrap\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.svm import SVC\nfrom sklearn.datasets import load_digits\nfrom sklearn.model_selection import learning_curve\nfrom sklearn.model_selection import ShuffleSplit\n\n\ndef plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,\n n_jobs=None, train_sizes=np.linspace(.1, 1.0, 10), fpath=None):\n \"\"\"\n Generate a simple plot of the test and training learning curve.\n\n Parameters\n ----------\n estimator : object type that implements the \"fit\" and \"predict\" methods\n An object of that type which is cloned for each validation.\n\n title : string\n Title for the chart.\n\n X : array-like, shape (n_samples, n_features)\n Training vector, where n_samples is the number of samples and\n n_features is the number of features.\n\n y : array-like, shape (n_samples) or (n_samples, n_features), optional\n Target relative to X for classification or regression;\n None for unsupervised learning.\n\n ylim : tuple, shape (ymin, ymax), optional\n Defines minimum and maximum yvalues plotted.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n - None, to use the default 3-fold cross-validation,\n - integer, to specify the number of folds.\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, if ``y`` is binary or multiclass,\n :class:`StratifiedKFold` used. If the estimator is not a classifier\n or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.\n\n Refer :ref:`User Guide <cross_validation>` for the various\n cross-validators that can be used here.\n\n n_jobs : int or None, optional (default=None)\n Number of jobs to run in parallel.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n train_sizes : array-like, shape (n_ticks,), dtype float or int\n Relative or absolute numbers of training examples that will be used to\n generate the learning curve. If the dtype is float, it is regarded as a\n fraction of the maximum size of the training set (that is determined\n by the selected validation method), i.e. it has to be within (0, 1].\n Otherwise it is interpreted as absolute sizes of the training sets.\n Note that for classification the number of samples usually have to\n be big enough to contain at least one sample from each class.\n (default: np.linspace(0.1, 1.0, 5))\n \"\"\"\n plt.figure()\n plt.title(\"\\n\".join(wrap(str(title), 60)))\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Score\")\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n\n plt.legend(loc=\"best\")\n plt.savefig(fpath)\n plt.ion()\n plt.show()\n return plt\n\nif __name__ == '__main__':\n digits = load_digits()\n X, y = digits.data, digits.target\n\n\n title = \"Learning Curves (Naive Bayes)\"\n # Cross validation with 100 iterations to get smoother mean test and train\n # score curves, each time with 20% data randomly selected as a validation set.\n cv = ShuffleSplit(n_splits=100, test_size=0.2, random_state=0)\n\n estimator = GaussianNB()\n plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)\n\n title = r\"Learning Curves (SVM, RBF kernel, $\\gamma=0.001$)\"\n # SVC is more expensive so we do a lower number of CV iterations:\n cv = ShuffleSplit(n_splits=10, test_size=0.2, random_state=0)\n estimator = SVC(gamma=0.001)\n plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)\n\n plt.show()\n"
] | [
[
"sklearn.datasets.load_digits",
"numpy.mean",
"sklearn.model_selection.learning_curve",
"matplotlib.pyplot.savefig",
"sklearn.svm.SVC",
"matplotlib.pyplot.fill_between",
"sklearn.naive_bayes.GaussianNB",
"matplotlib.pyplot.figure",
"numpy.std",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.ylabel",
"numpy.linspace",
"sklearn.model_selection.ShuffleSplit"
]
] |
warnerwarner/tensortools | [
"1b6b5c89e915a0b9e98459e0b9e9520893141457"
] | [
"tests/test_ktensor.py"
] | [
"\"\"\"Test KTensor functions.\"\"\"\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_almost_equal\n\nfrom tensortools import KTensor\n\n\ndef test_norm():\n\n rs = np.random.RandomState(123)\n U = KTensor([rs.randn(55, 3) for _ in range(3)])\n\n assert_almost_equal(U.norm(), np.linalg.norm(U.full()))\n"
] | [
[
"numpy.random.RandomState"
]
] |
alon/emolog | [
"ed6e9e30a46ffc04282527ee73aa3bb8605e2dc9"
] | [
"emolog_pc/misc/gtk_queue_samples_plotter.py"
] | [
"#!/usr/bin/env python\n\"\"\"\nExample of embedding matplotlib in an application and interacting with\na treeview to store data. Double click on an entry to update plot\ndata\n\n\"\"\"\nimport pygtk\npygtk.require('2.0')\nimport gtk\nfrom gtk import gdk\n\nimport matplotlib\nmatplotlib.use('GTKAgg') # or 'GTK'\nfrom matplotlib.backends.backend_gtk import FigureCanvasGTK as FigureCanvas\n\nfrom numpy.random import random\nfrom matplotlib.figure import Figure\n\n\nclass DataManager(gtk.Window):\n numRows, numCols = 20, 10\n\n data = random((numRows, numCols))\n\n def __init__(self):\n gtk.Window.__init__(self)\n self.set_default_size(600, 600)\n self.connect('destroy', lambda win: gtk.main_quit())\n\n self.set_title('GtkListStore demo')\n self.set_border_width(8)\n\n vbox = gtk.VBox(False, 8)\n self.add(vbox)\n\n label = gtk.Label('Double click a row to plot the data')\n\n vbox.pack_start(label, False, False)\n\n sw = gtk.ScrolledWindow()\n sw.set_shadow_type(gtk.SHADOW_ETCHED_IN)\n sw.set_policy(gtk.POLICY_NEVER,\n gtk.POLICY_AUTOMATIC)\n vbox.pack_start(sw, True, True)\n\n model = self.create_model()\n\n self.treeview = gtk.TreeView(model)\n self.treeview.set_rules_hint(True)\n\n # matplotlib stuff\n fig = Figure(figsize=(6, 4))\n\n self.canvas = FigureCanvas(fig) # a gtk.DrawingArea\n vbox.pack_start(self.canvas, True, True)\n ax = fig.add_subplot(111)\n self.line, = ax.plot(self.data[0, :], 'go') # plot the first row\n\n self.treeview.connect('row-activated', self.plot_row)\n sw.add(self.treeview)\n\n self.add_columns()\n\n self.add_events(gdk.BUTTON_PRESS_MASK |\n gdk.KEY_PRESS_MASK |\n gdk.KEY_RELEASE_MASK)\n\n def plot_row(self, treeview, path, view_column):\n ind, = path # get the index into data\n points = self.data[ind, :]\n self.line.set_ydata(points)\n self.canvas.draw()\n\n def add_columns(self):\n for i in range(self.numCols):\n column = gtk.TreeViewColumn('%d' % i, gtk.CellRendererText(), text=i)\n self.treeview.append_column(column)\n\n def create_model(self):\n types = [float]*self.numCols\n store = gtk.ListStore(*types)\n\n for row in self.data:\n store.append(row)\n return store\n\n\ndef gtk_queue_samples_plotter(q):\n manager = DataManager()\n manager.show_all()\n gtk.main()"
] | [
[
"matplotlib.use",
"numpy.random.random",
"matplotlib.backends.backend_gtk.FigureCanvasGTK",
"matplotlib.figure.Figure"
]
] |
manhhomienbienthuy/break_captcha_ver2 | [
"1fe323c5ed112ce3f071480863a87612459b7e7a"
] | [
"prepare_data.py"
] | [
"from __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport requests\r\nimport os\r\nfrom os import listdir\r\nfrom os.path import join, isfile\r\nfrom PIL import Image, ImageChops\r\nimport math\r\nimport numpy as np\r\nimport cv2\r\nimport random\r\nimport string\r\nfrom scipy.misc import imread\r\n\r\nchars_list = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'\r\nchars_dict = {c: chars_list.index(c) for c in chars_list}\r\n\r\nIMAGE_TOTAL = 1000\r\nRAW_PATH = \"data/raw/\"\r\nSLICED_PATH = \"data/sliced/\"\r\n\r\npart = 0\r\nlist_chars = [f for f in listdir('data/chars') if isfile(join('data/chars', f)) and 'jpg' in f]\r\n\r\ndef crawl_images():\r\n\turl = \"https://chuyencuadev.com/captcha\"\r\n\tfor i in range (1, IMAGE_TOTAL):\r\n\t\tfile_path = join(RAW_PATH,'{0:04}.jpg'.format(i))\r\n\t\tprint(file_path)\r\n\t\twith open(file_path, 'wb') as f:\r\n\t\t\tresponse = requests.get(url)\r\n\t\t\tif response.ok: f.write(response.content)\r\n\r\ndef process_directory(directory):\r\n file_list = []\r\n for file_name in listdir(directory):\r\n file_path = join(directory, file_name)\r\n if isfile(file_path) and 'jpg' in file_name:\r\n file_list.append(file_path)\r\n return file_list\r\n\r\ndef process_image(image_path):\r\n image = imread(image_path)\r\n image = image.reshape(1080,)\r\n return np.array([x/255. for x in image])\r\n\r\ndef reduce_noise(file_path):\r\n\tprint(file_path)\r\n\timg = cv2.imread(file_path)\r\n\tdst = cv2.fastNlMeansDenoisingColored(img,None,50,50,7,21)\r\n\tcv2.imwrite(file_path, dst)\r\n\timg = Image.open(file_path).convert('L')\r\n\timg = img.point(lambda x: 0 if x<128 else 255, '1')\r\n\timg.save(file_path)\r\n\r\ndef reduce_noise_dir(directory):\r\n\tlist_file = process_directory(directory)\r\n\tfor file_path in list_file:\r\n\t\tprint(file_path)\r\n\t\timg = cv2.imread(file_path)\r\n\t\tdst = cv2.fastNlMeansDenoisingColored(img,None,50,50,7,21)\r\n\t\tcv2.imwrite(file_path, dst)\r\n\t\timg = Image.open(file_path).convert('L')\r\n\t\timg = img.point(lambda x: 0 if x<128 else 255, '1')\r\n\t\timg.save(file_path)\r\n\r\ndef crop(file_path, out_directory):\r\n\tpart = 0\r\n\timg = Image.open(file_path)\r\n\tp = img.convert('P')\r\n\tw, h = p.size\r\n\r\n\tletters = []\r\n\tleft, right= -1, -1\r\n\tfound = False\r\n\tfor i in range(w):\r\n\t\tin_letter = False\r\n\t\tfor j in range(h):\r\n\t\t\tif p.getpixel((i,j)) == 0:\r\n\t\t\t\tin_letter = True\r\n\t\t\t\tbreak\r\n\t\tif not found and in_letter:\r\n\t\t\tfound = True\r\n\t\t\tleft = i\r\n\t\tif found and not in_letter and i-left > 25:\r\n\t\t\tfound = False\r\n\t\t\tright = i\r\n\t\t\tletters.append([left, right])\r\n\torigin = file_path.split('/')[-1].split('.')[0]\r\n\tfor [l,r] in letters:\r\n\t\tif r-l < 40:\r\n\t\t\tbbox = (l, 0, r, h)\r\n\t\t\tcrop = img.crop(bbox)\r\n\t\t\tcrop = crop.resize((30,60))\r\n\t\t\tcrop.save(join(out_directory, '{0:04}_{1}.jpg'.format(part, origin)))\r\n\t\t\tpart += 1\r\n\r\ndef crop_dir(raw_directory, out_directory):\r\n\tlist_file = process_directory(raw_directory)\r\n\tglobal part\r\n\tfor file_path in list_file:\r\n\t\tprint(file_path)\r\n\t\timg = Image.open(file_path)\r\n\t\tp = img.convert('P')\r\n\t\tw, h = p.size\r\n\r\n\t\tletters = []\r\n\t\tleft, right= -1, -1\r\n\t\tfound = False\r\n\t\tfor i in range(w):\r\n\t\t\tin_letter = False\r\n\t\t\tfor j in range(h):\r\n\t\t\t\tif p.getpixel((i,j)) == 0:\r\n\t\t\t\t\tin_letter = True\r\n\t\t\t\t\tbreak\r\n\t\t\tif not found and in_letter:\r\n\t\t\t\tfound = True\r\n\t\t\t\tleft = i\r\n\t\t\tif found and not in_letter and i-left > 25:\r\n\t\t\t\tfound = False\r\n\t\t\t\tright = i\r\n\t\t\t\tletters.append([left, right])\r\n\t\torigin = file_path.split('/')[-1].split('.')[0]\r\n\t\tfor [l,r] in letters:\r\n\t\t\tif r-l < 40:\r\n\t\t\t\tbbox = (l, 0, r, h)\r\n\t\t\t\tcrop = img.crop(bbox)\r\n\t\t\t\tcrop = crop.resize((30,60))\r\n\t\t\t\tcrop.save(join(out_directory, '{0:04}_{1}.jpg'.format(part, origin)))\r\n\t\t\t\tpart += 1\r\n\r\ndef adjust_dir(directory):\r\n\tlist_file = process_directory(directory)\r\n\tfor file_path in list_file:\r\n\t\timg = Image.open(file_path)\r\n\t\tp = img.convert('P')\r\n\t\tw, h = p.size\r\n\t\tstart, end = -1, -1\r\n\t\tfound = False\r\n\t\tfor j in range(h):\r\n\t\t\tin_letter = False\r\n\t\t\tfor i in range(w):\r\n\t\t\t\tif p.getpixel((i,j)) == 0:\r\n\t\t\t\t\tin_letter = True\r\n\t\t\t\t\tbreak\r\n\t\t\tif not found and in_letter:\r\n\t\t\t\tfound = True\r\n\t\t\t\tstart = j\r\n\t\t\tif found and not in_letter and j-start > 35:\r\n\t\t\t\tfound = False\r\n\t\t\t\tend = j\r\n\t\tbbox = (0, start, w, end)\r\n\t\tcrop = img.crop(bbox)\r\n\t\tcrop = crop.resize((30,36))\r\n\t\tcrop.save(file_path)\r\n\r\ndef rand_string(N=6):\r\n\treturn ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(N))\r\n\r\ndef rename(path, filename, letter):\r\n\tos.rename(join(path,filename), join(path, letter+'-' + rand_string() + '.jpg'))\r\n\r\ndef detect_char(path, filename):\r\n\tclass Fit:\r\n\t\tletter = None\r\n\t\tdifference = 0\r\n\tbest = Fit()\r\n\t_img = Image.open(join(path, filename))\r\n\tfor img_name in list_chars:\r\n\t\tcurrent = Fit()\r\n\t\timg = Image.open(join('data/chars', img_name))\r\n\t\tcurrent.letter = img_name.split('-')[0]\r\n\t\tdifference = ImageChops.difference(_img, img)\r\n\t\tfor x in range(difference.size[0]):\r\n\t\t\tfor y in range(difference.size[1]):\r\n\t\t\t\tcurrent.difference += difference.getpixel((x, y))/255.\r\n\t\tif not best.letter or best.difference > current.difference:\r\n\t\t\tbest = current\r\n\tif best.letter == filename.split('-')[0]: return\r\n\tprint(filename, best.letter)\r\n\trename(path, filename, best.letter)\r\n\r\ndef detect_dir(directory):\r\n\tfor f in listdir(directory):\r\n\t\tif isfile(join(directory, f)) and 'jpg' in f:\r\n\t\t\tdetect_char(directory, f)\r\n\r\nif __name__=='__main__':\r\n\tcrawl_images()\r\n\treduce_noise_dir(RAW_PATH)\r\n\tcrop_dir(RAW_PATH, SLICED_PATH)\r\n\tadjust_dir(SLICED_PATH)\r\n\tpass\r\n"
] | [
[
"scipy.misc.imread",
"numpy.array"
]
] |
lemonwaffle/nflows | [
"70a2417dc5bb41858bf12467789e4187f4ae212c"
] | [
"tests/transforms/splines/cubic_test.py"
] | [
"import torch\nimport torchtestcase\n\nfrom nflows.transforms import splines\n\n\nclass CubicSplineTest(torchtestcase.TorchTestCase):\n def test_forward_inverse_are_consistent(self):\n num_bins = 10\n shape = [2, 3, 4]\n\n unnormalized_widths = torch.randn(*shape, num_bins)\n unnormalized_heights = torch.randn(*shape, num_bins)\n unnorm_derivatives_left = torch.randn(*shape, 1)\n unnorm_derivatives_right = torch.randn(*shape, 1)\n\n def call_spline_fn(inputs, inverse=False):\n return splines.cubic_spline(\n inputs=inputs,\n unnormalized_widths=unnormalized_widths,\n unnormalized_heights=unnormalized_heights,\n unnorm_derivatives_left=unnorm_derivatives_left,\n unnorm_derivatives_right=unnorm_derivatives_right,\n inverse=inverse,\n )\n\n inputs = torch.rand(*shape)\n outputs, logabsdet = call_spline_fn(inputs, inverse=False)\n inputs_inv, logabsdet_inv = call_spline_fn(outputs, inverse=True)\n\n self.eps = 1e-4\n self.assertEqual(inputs, inputs_inv)\n self.assertEqual(logabsdet + logabsdet_inv, torch.zeros_like(logabsdet))\n\n\nclass UnconstrainedCubicSplineTest(torchtestcase.TorchTestCase):\n def test_forward_inverse_are_consistent(self):\n num_bins = 10\n shape = [2, 3, 4]\n\n unnormalized_widths = torch.randn(*shape, num_bins)\n unnormalized_heights = torch.randn(*shape, num_bins)\n unnorm_derivatives_left = torch.randn(*shape, 1)\n unnorm_derivatives_right = torch.randn(*shape, 1)\n\n def call_spline_fn(inputs, inverse=False):\n return splines.unconstrained_cubic_spline(\n inputs=inputs,\n unnormalized_widths=unnormalized_widths,\n unnormalized_heights=unnormalized_heights,\n unnorm_derivatives_left=unnorm_derivatives_left,\n unnorm_derivatives_right=unnorm_derivatives_right,\n inverse=inverse,\n )\n\n inputs = 3 * torch.randn(*shape) # Note inputs are outside [0,1].\n outputs, logabsdet = call_spline_fn(inputs, inverse=False)\n inputs_inv, logabsdet_inv = call_spline_fn(outputs, inverse=True)\n\n self.eps = 1e-4\n self.assertEqual(inputs, inputs_inv)\n self.assertEqual(logabsdet + logabsdet_inv, torch.zeros_like(logabsdet))\n"
] | [
[
"torch.zeros_like",
"torch.rand",
"torch.randn"
]
] |
rkimball/incubator-tvm | [
"85e42b6af38ea3bd0c99c8208d7baed5086a8959"
] | [
"tests/python/contrib/test_hexagon/test_usmp.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport sys\nimport pytest\nimport numpy as np\n\nimport tvm.testing\nfrom tvm import te\nfrom tvm import relay\nfrom tvm.relay.backend import Executor, Runtime\nfrom tvm.contrib.hexagon.session import Session\nfrom tvm.testing.usmp import is_tvm_backendallocworkspace_calls\n\nfrom tvm.contrib.hexagon.pytest_plugin import requires_hexagon_toolchain\n\nusmp_enabled = tvm.testing.parameter(False, True)\n\n\n@requires_hexagon_toolchain\ndef test_conv2d(hexagon_session: Session, aot_host_target, aot_target, usmp_enabled):\n dtype = \"float32\"\n input_shape = (1, 8, 8, 3)\n w1_shape = (5, 5, 3, 1)\n w2_shape = (5, 5, 1, 3)\n data = relay.var(\"data\", relay.TensorType(input_shape, dtype))\n weight1 = relay.var(\"weight1\", relay.TensorType(w1_shape, dtype))\n weight2 = relay.var(\"weight2\", relay.TensorType(w2_shape, dtype))\n y1 = relay.nn.conv2d(\n data,\n weight1,\n padding=(2, 2),\n kernel_size=(5, 5),\n data_layout=\"NHWC\",\n kernel_layout=\"HWIO\",\n out_dtype=\"float32\",\n )\n y2 = relay.nn.conv2d(\n y1,\n weight2,\n padding=(2, 2),\n kernel_size=(5, 5),\n data_layout=\"NHWC\",\n kernel_layout=\"HWIO\",\n out_dtype=\"float32\",\n )\n f = relay.Function([data, weight1, weight2], y2)\n relay_mod = tvm.IRModule.from_expr(f)\n relay_mod = relay.transform.InferType()(relay_mod)\n\n weight1_data = np.random.rand(w1_shape[0], w1_shape[1], w1_shape[2], w1_shape[3]).astype(\n dtype=dtype\n )\n weight2_data = np.random.rand(w2_shape[0], w2_shape[1], w2_shape[2], w2_shape[3]).astype(\n dtype=dtype\n )\n input_data = np.random.rand(\n input_shape[0], input_shape[1], input_shape[2], input_shape[3]\n ).astype(dtype=dtype)\n\n params = {\"weight1\": weight1_data, \"weight2\": weight2_data}\n inputs = {\"data\": input_data}\n\n with tvm.transform.PassContext(opt_level=3, config={\"tir.usmp.enable\": usmp_enabled}):\n lowered = tvm.relay.build(\n relay_mod,\n params=params,\n target=tvm.target.Target(aot_target, host=aot_host_target),\n runtime=Runtime(\"cpp\"),\n executor=Executor(\"aot\", {\"unpacked-api\": False, \"interface-api\": \"packed\"}),\n )\n\n assert is_tvm_backendallocworkspace_calls(lowered.lib) != usmp_enabled\n\n aot_mod = hexagon_session.get_executor_from_factory(lowered)\n aot_mod.set_input(**inputs)\n aot_mod.run()\n hexagon_output = aot_mod.get_output(0).numpy()\n\n target_llvm = tvm.target.Target(\"llvm\")\n with tvm.transform.PassContext(opt_level=3):\n llvm_lowered = tvm.relay.build(\n relay_mod,\n tvm.target.Target(target_llvm, host=target_llvm),\n runtime=Runtime(\"cpp\"),\n executor=Executor(\"graph\"),\n )\n\n llvm_graph_mod = tvm.contrib.graph_executor.GraphModule(llvm_lowered[\"default\"](tvm.cpu(0)))\n llvm_graph_mod.set_input(**params)\n llvm_graph_mod.run(**inputs)\n expected_output = llvm_graph_mod.get_output(0).numpy()\n\n tvm.testing.assert_allclose(hexagon_output, expected_output, rtol=1e-4, atol=1e-5)\n\n\nif __name__ == \"__main__\":\n sys.exit(pytest.main(sys.argv))\n"
] | [
[
"numpy.random.rand"
]
] |
Nayananga/gpt-2 | [
"edf42c3190188891fba043d1f6edf397bc3d197d"
] | [
"src/load_dataset.py"
] | [
"import glob\nimport os\n\nimport numpy as np\nimport tqdm\n\n\ndef load_dataset(enc, path, combine, encoding=None):\n paths = []\n if os.path.isfile(path):\n # Simple file\n paths.append(path)\n elif os.path.isdir(path):\n # Directory\n for (dirpath, _, fnames) in os.walk(path):\n for fname in fnames:\n paths.append(os.path.join(dirpath, fname))\n else:\n # Assume glob\n paths = glob.glob(path)\n\n token_chunks = []\n raw_text = ''\n for path in tqdm.tqdm(paths):\n if path.endswith('.npz'):\n # Pre-encoded\n with np.load(path) as npz:\n for item in npz.files:\n token_chunks.append(npz[item])\n else:\n # Plain text\n with open(path, 'r', encoding=encoding) as fp:\n raw_text += fp.read()\n if len(raw_text) >= combine:\n tokens = np.stack(enc.encode(raw_text))\n token_chunks.append(tokens)\n raw_text = ''\n else:\n raw_text += '<|endoftext|>'\n if raw_text:\n tokens = np.stack(enc.encode(raw_text))\n token_chunks.append(tokens)\n return token_chunks\n\n\ndef binary_search(f, lo, hi):\n if f(lo) or not f(hi):\n return None\n while hi > lo + 1:\n mid = (lo + hi) // 2\n if f(mid):\n hi = mid\n else:\n lo = mid\n return hi\n\n\nclass Sampler(object):\n \"\"\"Fairly samples a slice from a set of variable sized chunks.\n\n 'Fairly' means that the distribution is the same as sampling from one concatenated chunk,\n but without crossing chunk boundaries.\"\"\"\n\n def __init__(self, chunks, seed=None):\n self.chunks = chunks\n self.total_size = sum(chunk.shape[0] for chunk in chunks)\n self.boundaries = [0]\n for i in range(len(chunks)):\n self.boundaries.append(self.boundaries[-1] + chunks[i].shape[0])\n self.rs = np.random.RandomState(seed=seed)\n\n def sample(self, length):\n assert length < self.total_size // len(\n self.chunks\n ), \"Dataset files are too small to sample {} tokens at a time\".format(\n length)\n while True:\n index = self.rs.randint(0, self.total_size - length - 1)\n i = binary_search(lambda j: self.boundaries[j] > index, 0,\n len(self.boundaries) - 1) - 1\n if self.boundaries[i + 1] > index + length:\n within_chunk = index - self.boundaries[i]\n return self.chunks[i][within_chunk:within_chunk + length]\n"
] | [
[
"numpy.load",
"numpy.random.RandomState"
]
] |
ferdinandb/pyHEC | [
"d1f0cb5001b98176098b16abccc03d2396f54685"
] | [
"pyhec/core/util/parallel_processing_util.py"
] | [
"\"\"\"\nHelper functions for the parallel processing module\n\"\"\"\n\nimport pandas as pd\nimport sys\nimport csv\n\ndef read_csv(files, **kwargs):\n \"\"\"\n Reads a list of CSV files. This function is used for the overarching wrapper to read\n CSV files in parallel.\n\n :param files: A list of files that should be loaded.\n :param kwargs: Keyword arguments for pandas' DataFrame.read_csv()\n\n :return: A concatenated pandas DataFrame\n \"\"\"\n max_int = sys.maxsize\n while True:\n # Reading large CSV files might raise an OverflowError. Decrease the maxInt\n # value by factor 10 as long as the OverflowError occurs\n try:\n csv.field_size_limit(max_int)\n break\n except OverflowError:\n max_int = int(max_int/10)\n\n return pd.concat((pd.read_csv(f, engine='python', **kwargs) for f in files), ignore_index=True)\n"
] | [
[
"pandas.read_csv"
]
] |
kaaiian/CrabNet | [
"32791345522da1029e690512a04006695ff0eff7"
] | [
"data/get_dataset_size.py"
] | [
"import os\nimport numpy as np\nimport pandas as pd\n\nfrom tqdm import tqdm\nfrom utils.composition import _element_composition\n\n\n# %%\nbenchmark_data = os.listdir('data/benchmark_data')\nmatbench_data = os.listdir('data/matbench_cv')\n\n\n# %%\ncols = ['mat_prop', 'samples_total',\n 'samples_train', 'samples_val', 'samples_test',\n 'prop_train', 'prop_val', 'prop_test',]\ndf_benchmark = pd.DataFrame(columns=cols)\n\nfor prop in tqdm(benchmark_data, desc=\"Processing benchmark_data\"):\n df_train = pd.read_csv(f'data/benchmark_data/{prop}/train.csv')\n df_val = pd.read_csv(f'data/benchmark_data/{prop}/val.csv')\n df_test = pd.read_csv(f'data/benchmark_data/{prop}/test.csv')\n n_train = df_train['formula'].apply(_element_composition).apply(len).max()\n n_val = df_val['formula'].apply(_element_composition).apply(len).max()\n n_test = df_test['formula'].apply(_element_composition).apply(len).max()\n n_elements = max([n_train, n_val, n_test])\n total_samples = df_train.shape[0] + df_val.shape[0] + df_test.shape[0]\n df_row = {\n 'mat_prop': prop,\n 'n_elements': n_elements,\n 'samples_total': total_samples,\n 'samples_train': df_train.shape[0],\n 'samples_val': df_val.shape[0],\n 'samples_test': df_test.shape[0],\n 'prop_train': df_train.shape[0] / total_samples,\n 'prop_val': df_val.shape[0] / total_samples,\n 'prop_test': df_test.shape[0] / total_samples,\n }\n df_benchmark = df_benchmark.append(df_row, ignore_index=True)\n\n\n# %%\ncols = ['mat_prop', 'samples_total',\n 'samples_train', 'samples_val', 'samples_test',\n 'prop_train', 'prop_val', 'prop_test',]\ndf_matbench_cv = pd.DataFrame(columns=cols)\n\nfor prop in tqdm(matbench_data, desc=\"Processing matbench_data\"):\n df_train = pd.read_csv(f'data/matbench_cv/{prop}/train0.csv')\n df_val = pd.read_csv(f'data/matbench_cv/{prop}/val0.csv')\n df_test = pd.read_csv(f'data/matbench_cv/{prop}/test0.csv')\n n_train = df_train['formula'].apply(_element_composition).apply(len).max()\n n_val = df_val['formula'].apply(_element_composition).apply(len).max()\n n_test = df_test['formula'].apply(_element_composition).apply(len).max()\n n_elements = max([n_train, n_val, n_test])\n total_samples = df_train.shape[0] + df_val.shape[0] + df_test.shape[0]\n df_row = {\n 'mat_prop': prop,\n 'n_elements': n_elements,\n 'samples_total': total_samples,\n 'samples_train': df_train.shape[0],\n 'samples_val': df_val.shape[0],\n 'samples_test': df_test.shape[0],\n 'prop_train': df_train.shape[0] / total_samples,\n 'prop_val': df_val.shape[0] / total_samples,\n 'prop_test': df_test.shape[0] / total_samples,\n }\n df_matbench_cv = df_matbench_cv.append(df_row, ignore_index=True)\n\n\n# %%\ncols = ['mat_prop', 'samples_total']\ndf_matbench_all = pd.DataFrame(columns=cols)\n\nfor prop in tqdm(matbench_data, desc=\"Processing matbench_data\"):\n df_data = pd.read_csv(f'data/matbench/{prop}.csv')\n n_elements = df_data['formula'].apply(_element_composition).apply(len).max()\n df_row = {\n 'mat_prop': prop,\n 'n_elements': n_elements,\n 'samples_total': df_data.shape[0],\n }\n df_matbench_all = df_matbench_all.append(df_row, ignore_index=True)\n\n\n# %%\ndf_benchmark['log2_samples_train'] = df_benchmark['samples_train'].astype(float).apply(np.log2)\ndf_matbench_cv['log2_samples_train'] = df_matbench_cv['samples_train'].astype(float).apply(np.log2)\ndf_matbench_all['log2_samples_total'] = df_matbench_all['samples_total'].astype(float).apply(np.log2)\n\n\n# %%\nprint(df_benchmark.to_latex(index=False, escape=True, float_format=\"{:0.2f}\".format))\n\n# %%\nprint(df_matbench_cv.to_latex(index=False, escape=True, float_format=\"{:0.2f}\".format))\n\n# %%\nprint(df_matbench_all.to_latex(index=False, escape=True, float_format=\"{:0.2f}\".format))\n\n\n# %%\ndf_benchmark\ndf_matbench_cv\ndf_matbench_all\n\n\n"
] | [
[
"pandas.DataFrame",
"pandas.read_csv"
]
] |
spilchen/yahoo-baseball-assistant | [
"a2bed8059cd9201b145d83f5dca20e1b9f4c67e8"
] | [
"yahoo_fantasy_bot/tests/conftest.py"
] | [
"#!/usr/bin/python\n\nimport pytest\nimport pandas as pd\nimport numpy as np\nfrom yahoo_fantasy_bot import roster\n\nRBLDR_COLS = [\"player_id\", \"name\", \"eligible_positions\", \"selected_position\"]\nRSEL_COLS = [\"player_id\", \"name\", \"HR\", \"OBP\", \"W\", \"ERA\"]\n\n\[email protected]\ndef empty_roster():\n rcont = roster.Container()\n yield rcont\n\n\[email protected]\ndef bldr():\n b = roster.Builder([\"C\", \"1B\", \"2B\", \"SS\", \"3B\", \"LF\", \"CF\", \"RF\", \"Util\",\n \"SP\", \"SP\", \"SP\", \"SP\", \"SP\",\n \"RP\", \"RP\", \"RP\", \"RP\", \"RP\"])\n yield b\n\n\[email protected]\ndef fake_player_selector():\n player_pool = pd.DataFrame(\n [[1, \"Borders\", 15, 0.319, np.nan, np.nan],\n [2, \"Lee\", 6, 0.288, np.nan, np.nan],\n [3, \"McGriff\", 35, 0.400, np.nan, np.nan],\n [4, \"Fernandez\", 4, 0.352, np.nan, np.nan],\n [5, \"Gruber\", 31, 0.330, np.nan, np.nan],\n [6, \"Bell\", 21, 0.303, np.nan, np.nan],\n [7, \"Wilson\", 3, 0.300, np.nan, np.nan],\n [8, \"Felix\", 15, 0.318, np.nan, np.nan],\n [9, \"Olerud\", 14, 0.364, np.nan, np.nan],\n [10, \"Hill\", 12, 0.281, np.nan, np.nan],\n [11, \"Steib\", np.nan, np.nan, 18, 2.93],\n [12, \"Stottlemyre\", np.nan, np.nan, 13, 4.34],\n [13, \"Wells\", np.nan, np.nan, 11, 3.14],\n [14, \"Key\", np.nan, np.nan, 13, 4.25],\n [15, \"Cerutti\", np.nan, np.nan, 9, 4.76]], columns=RSEL_COLS)\n plyr_sel = roster.PlayerSelector(player_pool)\n yield plyr_sel\n"
] | [
[
"pandas.DataFrame"
]
] |
gesellkammer/lambdasim | [
"7cab12d69b412005665b527657905d4c6a5a0db4"
] | [
"lambdasim/sim.py"
] | [
"from __future__ import absolute_import, print_function, division\n\nimport os\nimport math\nimport numpy\nimport warnings\nfrom collections import namedtuple\n\n# 3rd party\nimport shapelib\nimport sndfileio\n\n# local\nfrom .conversions import *\nfrom .io import *\nfrom .filtering import *\nfrom .source import *\nfrom .config import config\nfrom .util import *\nfrom . import snd\nfrom .errors import *\nfrom .plotting import plot_array\n\n\nSample = namedtuple(\"Sample\", \"frames sr idx\")\n\n\nclass Wall(object):\n def __init__(self, envmatrix, angmatrix=None, bounds=None):\n self.envmat = envmatrix\n self.angmat = angmatrix\n if bounds:\n self.x0, self.y0, self.x1, self.y1 = bounds\n\n\n#################################################\n#\n# SIMULATION\n#\n#################################################\n\n\n\nclass Simulation(object):\n \"\"\"\n Defines a lambda simulation\n \"\"\"\n def __init__(self, samplerate=None, size=None, duration=1, resolution=None, c=343, rho=RHO_AIR):\n \"\"\"\n samplerate [Hz] : the audio samplerate of the simulation. This sets the time/space\n resolution of the grid. See `resolution`\n size [(m,m)] : size of the simulation in meters (x, y), or None to define it later\n duration [s] : duration of the simulation\n resolution [m] : alternative to samplerate, set the resolution of the grid\n by setting the distance of one step\n c [m/s] : the sound propagation sample_define\n rho [kg/m3] : the density of air\n\n Examples\n ========\n\n Simulation(samplerate=44100, size=(10, 6)) # 10x6 m\n\n s = Simulation(size=(10,6))\n s.nodesize = 0.001 # 1 mm nodesize\n\n s = Simulation(samplerate=44100).set_size_in_pixels(\n\n \"\"\"\n if samplerate is not None and resolution is not None:\n raise ValueError(\"The resolution of the simulation should be set either through `samplerate` or `resolution`, but not both\")\n if samplerate is None and resolution is None:\n samplerate = 44100\n elif resolution is not None:\n samplerate = nodesize2samplerate(resolution, c)\n self._samplerate = samplerate\n\n if size is not None:\n xpix, ypix = coord2pix(size[0], size[1], samplerate, c)\n else:\n xpix, ypix = 100, 100\n\n if xpix % 2 != 0:\n xpix += 1\n if ypix % 2 != 0:\n ypix += 1\n\n self._xsize = xpix\n self._ysize = ypix\n\n self.c = c\n self.set_duration(duration)\n self.rho = rho\n self.nfilters = 0\n self._lastwav = None\n self._lastsimfile = None\n self._rcefile = None\n self._autoadd = config['autoadd']\n self.sources = []\n self.samples = []\n self.filters = {}\n self.envmat = None\n self.angmat = None\n self.envgeom = None\n self.receivers = []\n self.reset()\n lambdabin = detect_lambda()\n if lambdabin is None:\n warnings.warn(\"Lambda has not been detected in your system\")\n\n\n def reset(self):\n \"\"\"\n Regenerates the internal state (environment matrix, angle matrix, etc.)\n The dimensions of the simulation (size, duration, etc.) remain the same\n All sources and walls are removed.\n \"\"\"\n assert (self._ysize % 2) == (self._xsize % 2) == 0\n self.sources = []\n self.samples = []\n self.filters = {}\n self.envmat = numpy.zeros((self._ysize, self._xsize), dtype=float)\n self.angmat = numpy.ones((self._ysize, self._xsize), dtype=float) * 400.\n self.envgeom = shapelib.box(0, 0, 0, 0)\n self.receivers = []\n\n def __str__(self):\n if self.nodesize < 1:\n nodesize_str = \"%d mm\" % (self.nodesize * 1000)\n elif self.nodesize < 10:\n frac, i = math.modf(self.nodesize)\n nodesize_str = \"%d m %d cm\" % (int(i), int(frac*100))\n else:\n nodesize_str = \"%.3f\" % self.nodesize\n msg = (\n \"Simulation\",\n \"----------\",\n \"nodesize : %s\" % nodesize_str,\n \"samplerate : %d Hz\" % self.samplerate,\n \"C : %.1f m/s\" % self.c,\n \"rho : %.3f kg/m3\" % self.rho,\n \"size : %.2f m x %.2f m\" % (self.size),\n \"matrix : %d pixels x %d pixels\" % self.size_in_pixels,\n \"num. sources: %d\" % len(self.sources),\n \"num. recs. : %d\" % len(self.receivers),\n \"num. samples: %d\" % len(self.samples),\n )\n return \"\\n\".join(msg)\n\n def __repr__(self):\n x, y = self.size\n return \"Simulation(samplerate={sr:.1f}, size=({x:.1f}, {y:.1f}), duration={dur:.3f})\".format(\n sr=self.samplerate, x=x, y=y, dur=self.duration)\n\n @property\n def samplerate(self):\n return self._samplerate\n\n @samplerate.setter\n def samplerate(self, sr):\n self._samplerate = sr\n\n @property \n def nodesize(self):\n \"\"\"\n The size of each node, in m\n \"\"\"\n return nodesize(self._samplerate, self.c)\n\n @nodesize.setter\n def nodesize(self, ns):\n self._samplerate = nodesize2samplerate(ns, self.c)\n\n @property\n def distancestep(self):\n \"\"\"The distance between two nodes\"\"\"\n return distancestep(self.samplerate, self.c)\n\n @property\n def timestep(self):\n \"\"\"The time resolution of the simulation\"\"\"\n return timestep(self.samplerate, self.c)\n\n @property\n def size_in_pixels(self):\n \"\"\" the size of the simulation in pixels \"\"\"\n return (self._xsize, self._ysize)\n\n def set_size_in_pixels(self, x, y):\n \"\"\" set the size of the simulation in pixels \"\"\"\n xold, yold = self.size_in_pixels\n if x % 2 != 0:\n x += 1\n warnings.warn(\"Setting x to an even number. x=%d\" % x)\n if y % 2 != 0:\n y += 1\n warnings.warn(\"Setting y to an even number. y=%d\" % y)\n self._xsize = x\n self._ysize = y\n env, src, ang = self.envmat, self.sourcesmat, self.angmat\n self.reset()\n y0, x0 = env.shape\n ymin, xmin = min(y, y0), min(x, x0)\n if xold != x or yold != y:\n if xold < x or yold < y:\n warnings.warn(\"The simulation space will be reduced. Anything outside the new boundaries will be cropped\")\n self.envmat = numpy.zeros((y, x), dtype=float)\n self.envmat[:ymin,:xmin] = env[:ymin,:xmin]\n if ang is not None:\n self.angmat = numpy.ones((y, x), dtype=float) * 400.\n self.angmat[:ymin,:xmin] = ang[:ymin,:xmin]\n if self.sources:\n numoldsources = len(self.sources)\n self.sources = [source for source in self.sources if source.xpix < x and source.ypix < y]\n if numoldsources > len(self.sources):\n warnings.warn(\"The simulation space has been reduced.\"\n \"Sources were defined outside of the new boundaries and have been removed\")\n return self\n\n @property\n def size(self):\n \"\"\" the real size of the simulation in meters \"\"\"\n return pix2coord(self._xsize, self._ysize, self.samplerate, self.c)\n\n def set_size(self, x, y):\n \"\"\"\n set the size of the simulation in meters.\n \"\"\"\n X, Y = coord2pix(x, y, self.samplerate, self.c)\n return self.set_size_in_pixels(X, Y)\n\n def calculate_deadnode_geom(self, wallgeom, eps=1e-6):\n g00 = shapelib.tight_envelope(wallgeom)\n x, y = self.size\n dead = shapelib.box(0, 0, x + eps, y + eps).difference(g00)\n return dead\n\n def set_size_from_geom(self, geom, margin=0.1, deadnodes=False):\n _, _, maxx, maxy = geom.bounds\n maxx += margin\n maxy += margin\n self.set_size(maxx, maxy)\n if deadnodes:\n dead = self.calculate_deadnode_geom(geom, eps=0.5)\n self.dead_from_geometry(dead)\n return self\n\n @property\n def duration(self):\n \"\"\" \n duration of this simulation in seconds. Always a multiple of timestep\n\n SEE ALSO: timestep, set_duration\n \"\"\"\n return self.timestep * self.steps\n\n def set_duration(self, duration_in_secs):\n \"\"\" \n set the duration of the simulation in seconds. \n The resulting duration will be rounded down to the nearest \n multiple of timestep\n\n SEE ALSO: timestep, duration\n \"\"\"\n steps = int(duration_in_secs / self.timestep)\n self.steps = steps\n return self\n\n #def gridbetween(self, x0, x1):\n # L = self.nodesize\n # X0, _ = snaptogrid(x0, 0, self.samplerate)\n # X1, _ = snaptogrid(x1, 0, self.samplerate)\n # xs = numpy.arange(X0, X1, L)\n # return xs\n\n def snaptogrid(self, n):\n n, _ = self.coord2pix(n, n)\n n1, _ = self.pix2coord(n, n)\n return n1\n\n def make_receiver(self, x, y, label=\"\"):\n \"\"\"\n Create a receiver to sense pressure at position (x, y)\n\n x, y : coord of the receiver\n label: a string identifying the receiver. This is useful\n (and necessary) when using multiple receivers\n \"\"\"\n px, py = map(int, self.coord2pix(x, y))\n self.envmat[py, px] = -2\n self.angmat[py, px] = 0\n self.receivers.append((x, y, label))\n X, Y = self.size_in_pixels\n # sort them the way they will be interpreted later\n self.receivers.sort(key=lambda rec: rec[1] * Y + rec[0])\n\n\n #def geometry_to_pixels(self, geom):\n # \"\"\"\n # convert a shapely geometry to the pixels that form it\n # \"\"\"\n # x0, y0, x1, y1 = geom.bounds\n # px0, py0 = self.coord2pix(x0, y0)\n # px1, py1 = self.coord2pix(x1, y1)\n # for py in range(max(0, py0 - 1), py1 + 1):\n # for px in range(max(0, px0 - 1), px1 + 1):\n # rx0, ry0 = self.pix2coord(px, py)\n # rx1, ry1 = rx0 + self.nodesize, ry0 + self.nodesize\n # pix = Polygon([(rx0, ry0), (rx1, ry0), (rx1, ry1), (rx0, ry1)])\n # if geom.intersects(pix):\n # yield (px, py)\n\n def source_point(self, x, y, kind, amp=1, freq=440, phase=0, idx=None):\n \"\"\"\n Add a point source to the simulation\n\n x, y : coordinates of the point, in m\n kind : sin, square, deltapulse, expdecay, hannsin,\n vel-sin, vel-square, vel-deltapulse, vel-hannsin,\n whitenoise, pinknoise, sample\n\n amp : the amplitude of the source.\n freq : the freq of the source, when applicable.\n idx : when applicable (\"sample\" source), the index of the sample.\n \"\"\"\n xpix, ypix = coord2pix(x, y, self.samplerate)\n source = Source(xpix, ypix, kind, amp, freq, phase, sampleidx=idx)\n if self._autoadd:\n self._add_source(source) \n return source\n\n def source_from_geometry(self, geom, kind, amp, freq=None, phase=None, sampleidx=None):\n \"\"\"\n geom: a shapely geometry\n kind: sin, square, deltapulse, expdecay, hannsin,\n vel-sin, vel-square, vel-deltapulse, vel-hannsin,\n whitenoise, pinknoise, sample\n amp: the amplitude, in Pa\n freq: the freq. of the source, if applicable\n phase: the phase of the source, if applicable\n sampleidx: the index of the sample, for a sample source (30)\n \"\"\"\n\n mat = self._rasterize(geom)\n return self.source_from_array(mat, kind=kind, amp=amp, freq=freq, phase=phase, sampleidx=sampleidx)\n\n def source_from_array(self, mat, kind, amp, freq=None, phase=None, sampleidx=None, adapt='crop'):\n \"\"\"\n Create a source where the array is non zero\n\n kind : sin, square, deltapulse, expdecay, hannsin,\n vel-sin, vel-square, vel-deltapulse, vel-hannsin,\n whitenoise, pinknoise, sample\n\n adapt: defines the behaviour when the matrix is bigger than\n the simulation space\n 'crop' --> crops the array to the simulation space\n 'grow' --> grows the simulation space to the dimensions of the array\n\n Example\n =======\n\n # Create a source from the red traces of an image\n mask = img_to_mask(imgpath, color=(255,0,0), distance=0.2)\n sim.source_from_array(mask, 'sin', 0.2, 220)\n \"\"\"\n sources = SourceList()\n if phase is None:\n phase = 0\n Y, X = numpy.nonzero(mat)\n for x, y in zip(X, Y):\n sources.append(Source(x, y, kind, amp, freq, phase, sampleidx=sampleidx))\n if self._autoadd:\n for source in sources:\n self._add_source(source)\n return sources\n\n\n # def ang_from_geometry(self, geom, angle=None):\n # \"\"\"\n # generate a matrix where for each pixel where the\n # geom is defined the given (or, when implemented, the calculated)\n # angle is given, or 400 (any value outside 360) to indicate an empty pixel\n # \"\"\"\n #\n # mat = self._geomgrid.rasterize(geom)\n # if angle is not None:\n # mat_fore = mat * angle\n # mat[mat == 0] *= 400\n # mat += mat_fore\n # else:\n # raise ValueError(\"calculating angles out of the geometry is not implemented YET\")\n # if self._autoadd:\n # self._add_ang(mat)\n # return mat\n\n def _add_ang(self, mat):\n if self.angmat is None:\n self.angmat = numpy.ones_like(self.envmat, dtype=float) * 400.0\n i = mat != 400\n self.angmat[i] = mat[i]\n\n def filter_from_geometry(self, geom, numid):\n \"\"\"\n geom = a shapely geometry\n numid = the .numid of the filter\n \"\"\"\n assert any(f.numid == numid for f_name, f in self.filters.iteritems())\n assert numid >= 2\n mat = self._rasterize(geom)\n I = mat > 0\n mat *= numid\n if self._autoadd:\n self.envmat[I] = mat[I]\n return mat\n\n def filter_define(self, name, filtertype, freq, param, dbgain=0):\n \"\"\"\n To apply a filter you first define it here, then use it\n in the simulation with something like `filter_from_geometry`,\n where you pass the .numid of the Filter defined here.\n Once a filter is defined, you can retrieve it later by calling\n `simulation.filters.get('filtername')`\n\n name: a name or a number to identify this filter.\n If a number if given, it will be used a id\n and will be the same when the simulation is \n loaded from a .sim file \n If a name is given, a number will be assigned.\n The numerical-id can be found as .numid\n filtertype : one of 'lpf', 'hpf', 'bpf'\n freq : cut-off freq\n param : Depending on filter, acts as Q, bandwidth, or shelf slope.\n dbgain : gain in db for peaking/shelving filters (defaults to 0).\n \"\"\"\n assert name not in self.filters\n self.nfilters += 1\n if isinstance(name, int):\n numid = name\n else:\n numid = self.nfilters + 1 \n assert numid >= 2\n f = Filter.butter(filtertype=filtertype, freq=freq, samplerate=samplerate, numid=numid)\n self.filters[name] = f\n return f\n\n def sample_define(self, source):\n \"\"\"\n To use a sample as source, first you define it here, then use the index\n to create a 'sample' source\n\n source: either the path to a soundfile, or a tuple (frames, samplerate)\n \"\"\"\n if isinstance(source, basestring):\n sample = sndfileio.sndread(source)\n frames, sr = sample.samples, sample.sr\n channels = sndfileio.numchannels(frames)\n if channels > 1:\n print(\"The sample has %d channels. Taking only the first one\" % channels)\n frames = sndfileio.getchannel(frames, 0)\n else:\n frames, sr = source\n duration = len(frames) / sr\n if duration > self.duration:\n print(\"The sample is %0.2f seconds long. Shortening to match the duration of the simulation\" % sample.duration)\n sample = sample[:self.duration]\n if sr != self.samplerate:\n print(\"The samplerate of the sample (%d) is different from the samplerate of the simulation (%d)\" % (sample.samplerate, self.samplerate))\n print(\"--> Resampling\")\n frames = sndfileio.resample(frames, sr, self.samplerate)\n self.samples.append(frames)\n idx = len(self.samples) - 1\n return Sample(frames, sr, idx)\n \n def wall_from_geometry(self, geom, param=1, calculate_angle=False):\n \"\"\"\n Creates a Wall from a geometry defined with `shapelib`\n\n geom : a geometry created with shapelib (or with Shapely)\n param: # TODO\n calculate_angle: calculate the angles from the geometry\n\n Returns --> a Wall\n \"\"\"\n # TODO: use self._rasterize, using shapelib.rasterize\n mat = geom_rasterize(geom, self.size, self.size_in_pixels)\n mat *= param\n if calculate_angle:\n ang = self.angles_from_geom(geom)\n else:\n ang = None\n wall = Wall(mat, ang)\n if self._autoadd: \n self._add_wall(wall, geom=geom)\n return wall\n\n def wall_from_array(self, A, adapt='crop'):\n \"\"\"\n If A is smaller than the simulation space, it will be padded\n with 0s.\n adapt -> 'crop': if A is smaller than the simulation matrix, it will be padded\n if bigger, it will be cropped.\n 'grow': if A is smaller than the simulation matrix, it will be padded\n if bigger, the simulation space will grow accordingly\n \"\"\"\n adapt_options = ('crop', 'grow')\n if adapt not in adapt_options:\n raise ValueError(\"adapt should be one of %s\" % str(adapt_options))\n nx, ny = self.size_in_pixels\n ay, ax = A.shape\n if adapt == 'crop':\n print(\"adapt: crop\")\n zeros = numpy.zeros((ny, nx))\n zeros[:ay, :ax] += A[:ny,:nx]\n wall = Wall(zeros)\n elif adapt == 'grow':\n print(\"adapt: grow\")\n if ax > nx or ay > ny:\n X = max(ax, nx)\n Y = max(ay, ny)\n self.set_size_in_pixels(X, Y)\n return self.wall_from_array(A, adapt='crop')\n if self._autoadd:\n self._add_wall(wall)\n return wall\n\n def wall_from_image(self, path, adapt='crop', color=(255, 255, 255), dist=0.98):\n \"\"\"Create a wall from an image.\n\n :param path: the path of the image defining a new wall\n :param adapt: 'crop' or 'grow', determines what happends when the image\n is bigger than the simulation space\n :param color: the color of the wall (r, g, b)\n :param dist: max distance to the color\n :return: adds a wall to the simulation, returns it\n \"\"\"\n mask = img_to_mask(path, color, dist)\n return self.wall_from_array(mask, adapt=adapt)\n\n def dead_from_geometry(self, geom):\n return self.wall_from_geometry(geom, param=DEADNODE)\n\n def coord2pix(self, x, y):\n return coord2pix(x, y, self.samplerate)\n\n def pix2coord(self, x, y):\n return pix2coord(x, y, self.samplerate)\n\n def plot_walls(self):\n \"\"\"\n plot the walls of the simulation\n \"\"\"\n X, Y = self.size\n plot_array(self.envmat, X, Y)\n\n def plot(self):\n colors = {\n 'env':0.3,\n 'src':1.0\n }\n mat = self.envmat * colors['env']\n mat += self.sourcesmat * colors['src']\n X, Y = self.size\n plot_array(mat, X, Y, cmap='spectral')\n\n def _add_wall(self, wall, geom=None):\n if wall.envmat.shape != self.envmat.shape:\n wall.envmat = wall.envmat[:self.envmat.shape[0], :self.envmat.shape[1]]\n if wall.angmat:\n wall.angmat = wall.angmat[:self.envmat.shape[0], :self.envmat.shape[1]]\n\n i = wall.envmat > self.envmat\n self.envmat[i] = wall.envmat[i]\n deadnodes = (wall.envmat == DEADNODE)\n if deadnodes.any():\n i = deadnodes * (self.envmat == 0) # only empty spaces can be declared as dead\n self.envmat[i] = DEADNODE\n if wall.angmat is not None:\n self.angmat[i] = wall.angmat[i]\n if geom is not None:\n self.envgeom = self.envgeom.union(geom)\n\n def _add_source(self, source):\n \"\"\" source can be an individual Source or a list of Sources \n (sources are always point sources) \"\"\"\n assert isinstance(source, Source)\n self.sources.append(source)\n \n @property\n def sourcesmat(self):\n \"\"\"\n generate a 2D array representing the position of sources\n \"\"\"\n out = numpy.zeros((self._ysize, self._xsize), dtype=float)\n for source in self.sources:\n out[source.ypix, source.xpix] = 1\n return out\n\n @property\n def simfile(self):\n return self._lastsimfile\n\n def write(self, outfile=None):\n \"\"\"\n write .sim file\n \"\"\"\n if outfile is None:\n if self._lastsimfile is None:\n warnings.warn(\"this Simulation has not been saved before. saving to a temporary file\")\n import tempfile\n outfile = tempfile.mktemp(suffix='.sim', dir=os.getcwd())\n else:\n outfile = self._lastsimfile\n print(\"overwriting sim file: %s\" % outfile)\n if not self.validate():\n raise SimValidationError(\"Error when validating this simulation\")\n\n outfile = os.path.splitext(outfile)[0] + \".sim\"\n f = open(outfile, 'w')\n f.write('LAMBDASIM200')\n f.write('DEF')\n # fwrite(simFile,[YSIZE XSIZE STEPS C L RHO],'double');\n header = numpy.array([self._ysize, self._xsize, self.steps, self.c, self.nodesize, self.rho], dtype=float)\n header.tofile(f)\n if self.envmat is not None:\n f.write('ENV')\n self.envmat.tofile(f)\n if self.angmat is not None:\n f.write('ANG')\n self.angmat.tofile(f)\n if self.filters:\n f.write('FLT')\n write_double(f, len(self.filters))\n for name, filterdef in self.filters.iteritems():\n filterdef.asarray().tofile(f)\n if self.samples:\n f.write('SMP')\n write_double(f, len(self.samples))\n for i, sample in enumerate(self.samples):\n write_double(f, i) # filter ID\n write_double(f, sample.samplerate)\n write_double(f, len(sample.samples))\n sample.samples.astype(float).tofile(f)\n if not self.sources:\n raise ValueError(\"no sources defined, can't write sim file!\")\n f.write('SRC')\n write_double(f, len(self.sources))\n for source in self.sources:\n a = source.asmatlab()\n a.tofile(f)\n self._lastsimfile = os.path.abspath(outfile)\n f.close()\n return self\n\n def _get_receivers_labels(self):\n labels = []\n unnamed = 0\n for rec in self.receivers:\n x, y, label = rec\n if not label:\n xpix, ypix = self.coord2pix(x, y)\n label = \"recv{num}-{xpix}x{ypix}\".format(num=unnamed, xpix=xpix, ypix=ypix)\n unnamed += 1\n labels.append(label)\n return labels\n\n def rce2wav(self, rcefile=None, resample=None, split=None):\n \"\"\"\n Convert the already rendered `rce` to a wav file\n \"\"\"\n if rcefile is None and self._lastsimfile is None:\n raise ValueError(\"this simulation has not been run to produce an rce file\")\n if rcefile is None:\n rcefile = os.path.splitext(self._lastsimfile)[0] + \".rce\"\n if not os.path.exists(rcefile):\n raise IOError(\"rce file not found\")\n labels = self._get_receivers_labels()\n wavfile = rce2wav(rcefile, self.samplerate, resample=resample, split=split, splitsuffixes=labels)\n self._lastwav = wavfile\n return wavfile\n\n @property\n def wavfile(self):\n return self._lastwav\n\n def split_rendered_audio(self, path=None):\n \"\"\"\n Split a rendered file into its channels with the labels given to \n the receivers.\n Leave path unspecified to use the last rendered .wav (.wavfile)\n \"\"\"\n if path is None:\n path = self.wavfile\n if not path or not os.path.exists(path):\n raise IOError(\".wav file not found! cannot split\")\n labels = self._get_receivers_labels()\n return snd.split_channels(self.wavfile, labels)\n\n def render_receivers(self, duration=None, resample=None):\n \"\"\"\n Render the receivers to a .rce file.\n\n :param duration: in secods, it overrides the duration of the simulation\n :param resample: in Hz, overrides the duration of the simulation\n :return: a Future holding the samples. If resample was given, they will be resampled\n to the given samplerate. Otherwise, the samples will have the samplerate of\n this simulation\n \"\"\"\n if not self.simfile:\n self.write()\n rcefile = os.path.splitext(self.simfile)[0] + '.rce'\n self._rcefile = rcefile\n simfile = os.path.abspath(self.simfile)\n args = ['-rce', '-exit', '-file', simfile]\n if duration is not None:\n iterations = duration2steps(duration, self.samplerate, self.c)\n args.extend(['-iterations', iterations])\n\n def render_and_load():\n proc = call_lambda(args)\n proc.wait()\n if os.path.exists(rcefile):\n samples = rce2array(rcefile)\n if resample:\n samples = sndfileio.resample(samples, self.samplerate, resample)\n return samples\n else:\n return None\n return scheduler.call_now(render_and_load)\n\n def render_wav(self, duration=None, resample=None, split=False):\n \"\"\"\n Render the simulation as a .wav\n\n :param duration: if given, overrides the duration of the simulation (in seconds)\n :param resample: if goven, overrides the samplerate of the simulation\n :param split: if True, for each receiver a soundfile will be generated\n if False, a soundfile with as many channels as receivers will be generated\n :return: a Future holding the samples as a multichannel numpy array\n \"\"\"\n labels = self._get_receivers_labels() if split else None\n samplerate = resample if resample is not None else self.samplerate\n def func():\n samples = self.render_receivers(duration=duration, resample=resample).result()\n if samples is not None:\n outfile = os.path.splitext(self.simfile)[0] + '.wav'\n return snd.sndwrite(samples, samplerate, outfile, labels)\n else:\n raise RuntimeError(\"Receivers did not render correctly\")\n return scheduler.call_now(func)\n\n def render_video(self, walls=True, duration=None, contrast=50, quality=100, fps=None, cmap=None):\n \"\"\"\n Render the simulation as video (async)\n\n walls (bool): whether to render the walls or not\n duration (s): render a part of the simulation, or\n None to render the whole duration\n contrast (0-100): sets the contrast\n quality (0-100) : the video quality\n fps : the fps of the video, None for default\n cmap (int): index of the colormap\n 0 - grayscale\n 1 - fire\n 2 - temperature\n\n Returns --> Future(subprocess)\n \"\"\"\n if not self.simfile:\n self.write()\n simfile = os.path.abspath(self.simfile)\n if \" \" in simfile:\n simfile = '\"%s\"' % simfile\n args = ['-avi', '-exit', '-file', simfile, '-contrast', contrast, '-quality', quality]\n if fps is not None:\n args.extend(['-avifps', str(fps)])\n if walls:\n args.append(\"-walls\")\n if duration is not None:\n iterations = duration2steps(duration, self.samplerate, self.c)\n args.extend(['-iterations', iterations])\n if cmap is not None:\n args.extend(['-colormap', cmap])\n subp = call_lambda(args)\n return scheduler.wrap_subproc(subp)\n\n def _rasterize(self, geom):\n return geom_rasterize(geom, self.size, self.size_in_pixels)\n\n def angles_from_geom(self, geom):\n from math import degrees\n sr = self.samplerate\n edge = shapelib.edge(geom)\n mat = self._rasterize(edge)\n xpixs, ypixs = numpy.nonzero(mat)\n angles = numpy.ones_like(mat) * 400.\n maxy, maxx = angles.shape\n for xpix, ypix in zip(xpixs, ypixs):\n if xpix < maxx and ypix < maxy:\n x, y = pix2coord(xpix, ypix, sr)\n angle = degrees(shapelib.angle_at(geom, (x, y)))\n angles[ypix, xpix] = angle\n return angles\n\n def opensim(self, vis=True, walls=True, contrast=50, cmap=None, pipe=None, fps=None, skip=None):\n \"\"\"\n Open this Simulation in Lambda\n \"\"\"\n if self.simfile is None:\n print(\"Simulation needs to be written\")\n self.write()\n return open_sim_in_lambda(self.simfile, vis=vis, walls=walls, contrast=contrast, cmap=cmap, pipe=pipe, fps=fps, skip=skip)\n\n def validate(self):\n \"\"\"\n Check that this Simulation makes sense and, if problems\n are found, tries to solve them.\n\n 1) Check for collisions between sources and walls\n 2) Check that the sample sources have corresponding\n samples defined\n \"\"\"\n # check dimensions\n xpix, ypix = self.size_in_pixels\n if xpix % 2 or ypix % 2:\n print(\"Pixel size should be even!\")\n return False\n # remove sources at walls\n envmat = self.envmat\n def wall_at_source(src):\n return envmat[src.ypix, src.xpix] > 0\n nsources = len(self.sources)\n self.sources = [source for source in self.sources if not wall_at_source(source)]\n if not self.sources:\n print(\"No sources defined!\")\n return False\n nsources2 = len(self.sources)\n if nsources2 < nsources:\n print(\"%d sources found at walls were removed\" % (nsources - nsources2))\n for source in self.sources:\n if source.kind == 'sample':\n if not (0 <= source.freq < len(self.samples)):\n raise SimValidationError(\"A sampled source was defined but no corresponding samples were found\")\n return True\n\n def export_walls(self, outfile, color=None, background=(0, 0, 0)):\n \"\"\"Export the walls defined in this simulation.\n\n outfile: the path to export the walls to. The format will be\n determined by the extension.\n Formats allowed: PNG\n \"\"\"\n ext = os.path.splitext(outfile)[1].lower()\n if ext == '.png':\n if color is None:\n color = config['pngcolors']['wall']\n mat = (abs(self.envmat) > 0).astype(int)\n if isinstance(color, tuple):\n if background == color:\n background = (255-color[0], 255-color[1], 255-color[2])\n colormap = lambda x: interpolate_color(x, background, color)\n else:\n colormap = color\n png_save(mat, outfile, colormap)\n\n\ndef readsim(path):\n \"\"\"\n read a .sim file\n\n Returns a Simulation\n\n #TODO: support SMP chunk\n \"\"\"\n def readassert(fileobj, s):\n out = fileobj.read(len(s))\n return out == s\n def toint(n):\n intn = int(n)\n assert n == intn\n return intn\n\n with open(path, 'rb') as f:\n readassert(f, 'LAMBDASIM200')\n readassert(f, 'DEF')\n ysize, xsize, steps, c, l, rho = read_doubles(f, 6)\n ysize, xsize, steps = map(toint, (ysize, xsize, steps))\n samplerate = nodesize2samplerate(l, c)\n envmat, angmat, sources = None, None, None\n def read_SRC(f):\n numsources = toint(read_double(f))\n srcs = []\n for i in range(int(numsources)):\n y, x, sourcetype, amp, freq, phase = read_doubles(f, 6)\n srcs.append( Source(x - 1, y - 1, sourcetype, amp, freq, phase) )\n return srcs\n while True:\n chunkheader = f.read(3)\n if chunkheader == 'ENV':\n envmat = read_numpy_mat(f, ysize, xsize)\n elif chunkheader == 'ANG':\n angmat = read_numpy_mat(f, ysize, xsize)\n elif chunkheader == 'SRC':\n sources = read_SRC(f)\n # SRC is always the last chunk, so exit the loop\n break\n else:\n raise ValueError(\"chunk %s not supported!\" % chunkheader)\n duration = simulationduration(samplerate, steps, c)\n sim = Simulation(samplerate=samplerate, duration=duration, c=c, rho=rho)\n sim.set_size_in_pixels(xsize, ysize)\n sim.envmat = envmat\n sim.angmat = angmat\n sim.sources = sources\n assert len(sources) > 0\n return sim\n\n\ndef simwrite(outfile, samplerate, dur_ms, c, rho, pixsize, sources,\n envmat=None, angmat=None, filters=None, samples=None):\n \"\"\"\n write .sim file\n\n outfile: the path of the .sim to write\n samplerate: the samplerate of the simulation\n dur_ms : duration in ms\n c : propagation speed\n rho : density\n pixsize: the size of the canvas in pixels (x, y)\n sources: a list of Sources\n envmat : a matrix representing the env., or None\n angmat : a matrix representing the angles, or None\n filters: a list of Filters, or None\n samples: a list of Samples, or None\n \"\"\"\n outfile = os.path.splitext(outfile)[0] + \".sim\"\n f = open(outfile, 'w')\n f.write('LAMBDASIM200')\n f.write('DEF')\n xsize, ysize = pixsize\n steps = duration2steps(dur_ms/1000., samplerate, c)\n ns = nodesize(samplerate, c)\n # write header\n numpy.array([ysize, xsize, steps, c, ns, rho], dtype=float).tofile(f)\n if envmat is not None:\n assert envmat.shape == (ysize, xsize)\n assert envmat.shape[0]%2 == 0\n assert envmat.shape[1]%2 == 0\n f.write('ENV')\n envmat.tofile(f)\n if angmat is not None:\n assert angmat.shape == (ysize, xsize)\n if envmat:\n assert angmat.shape == envmat.shape\n assert angmat.shape[0] % 2 == 0\n assert angmat.shape[1] % 2 == 0\n f.write('ANG')\n angmat.tofile(f)\n if filters:\n f.write('FLT')\n write_double(f, len(filters))\n for filt in filters:\n filt.asarray().tofile(f)\n if samples:\n f.write('SMP')\n write_double(f, len(samples))\n for i, sample in enumerate(samples):\n write_double(f, i) # filter ID\n write_double(f, sample.samplerate)\n write_double(f, len(sample.samples))\n sample.samples.astype(float).tofile(f)\n if not sources:\n raise ValueError(\"no sources defined, can't write sim file!\")\n f.write('SRC')\n write_double(f, len(sources))\n for source in sources:\n a = source.asmatlab()\n a.tofile(f)\n f.close()\n\n\ndef deriv2angle(deriv):\n \"\"\"\n convert a derivative to an angle in 360 form. \n 0 is N, 90 is E\n \"\"\"\n tan_alpha = deriv\n alpha = math.degrees(math.atan(tan_alpha))\n alpha = (360 - alpha + 90) % 360\n return alpha\n\n\ndef rce2array(rcefile):\n \"\"\"\n Read a .rce (raw pressure) file, returns a numpy array of shape (numframes, numchannels)\n for multichannel files, or shape = (numframes,) for mono files\n\n :param rcefile: the path to a .rce file\n \"\"\"\n f = open(rcefile, 'rb')\n numch = int(read_double(f))\n raw = numpy.fromfile(f, dtype=float)\n assert len(raw) % numch == 0\n if numch > 0:\n raw.shape = (len(raw) / numch, numch)\n return raw\n\n\ndef samples2wav(samples, samplerate, outfile, resample=None, split=False, splitsuffixes=None):\n def name_with_suffix(origname, suffix, ext=None):\n base, origext = os.path.splitext(origname)\n if ext is None:\n ext = origext\n return \"%s-%s.%s\" % (base, suffix, ext)\n if resample is not None:\n samples = sndfileio.resample(samples, samplerate, resample)\n samplerate = resample\n numch = samples.shape[1] if len(samples.shape) > 1 else 1\n\n if not split or numch == 1:\n sndfileio.sndwrite(samples, samplerate, outfile)\n return outfile\n else:\n outfiles = []\n if splitsuffixes is None:\n splitsuffixes = [str(i) for i in range(numch)]\n else:\n splitsuffixes = splitsuffixes[:numch]\n for i, suffix in enumerate(splitsuffixes):\n channelfile = name_with_suffix(outfile, suffix=suffix)\n outfiles.append(channelfile)\n sndfileio.sndwrite(samples[:,i], samplerate, channelfile)\n return outfiles\n\n\ndef rce2wav(rcefile, samplerate, resample=None, split=False, splitsuffixes=None):\n \"\"\"\n a rce file is a raw, float64 file containing pressure level at each frame.\n\n rcefile : path to the .rce file\n samplerate : samplerate of the simulation (sim.samplerate)\n resample : new samplerate or None to keep current samplerate\n split : create individual soundfiles for each receiver or\n a multichannel soundfile in the case of multiple\n receivers\n splitsuffixes : if multiple receivers are present and `split`\n is True, use these suffixes (the number must\n match the number of receivers)\n Otherwise, each soundfile will be suffixed\n with the index of each receiver\n\n FORMAT:\n 1 double: number of sources\n Each frame then contains the data for each source, interleaved\n\n NOTE:\n an .rce file is simply raw data. To load it: numpy.fromfile(path, dtype=float).\n The samplerate is not saved with the data, but it is the same used by the\n simulation which created it.\n\n Returns --> the names of the outfile(s) written\n \"\"\"\n samples = rce2array(rcefile)\n outfile = os.path.splitext(rcefile)[0] + \".wav\"\n return samples2wav(samples, samplerate=samplerate, outfile=outfile, resample=resample, split=split, splitsuffixes=splitsuffixes)\n\n\n\n\n\n\n"
] | [
[
"numpy.array",
"numpy.ones_like",
"numpy.zeros",
"numpy.ones",
"numpy.nonzero",
"numpy.fromfile"
]
] |
leilayasmeen/MSc_Thesis | [
"ee5e1782ab4a1d86c5dc0f5dc4111b4432ae204d"
] | [
"final_code/nn_training_scripts/mixup_generator.py"
] | [
"# This file is used when training neural networks with mixup\n# It should be called from the neural network training file.\n# It implements mixup, as descripted in the 2017 paper by Zhang et al.,\n# by creating training batches of mixed images\n# The code was adapted from the implementation at: \n# https://raw.githubusercontent.com/yu4u/mixup-generator/master/mixup_generator.py\n\nimport numpy as np\n\nclass MixupGenerator():\n def __init__(self, X_train, y_train, batch_size=32,\n alpha=0.2, shuffle=True, datagen=None):\n self.X_train = X_train\n self.y_train = y_train\n self.batch_size = batch_size\n self.alpha = alpha\n self.shuffle = shuffle\n self.sample_num = len(X_train)\n self.datagen = datagen\n\n def __call__(self):\n while True:\n indexes = self.__get_exploration_order()\n itr_num = int(len(indexes) // (self.batch_size * 2))\n\n for i in range(itr_num):\n batch_ids = indexes[i*self.batch_size*2:(i+1)*self.batch_size*2]\n X, y = self.__data_generation(batch_ids)\n\n yield X, y\n\n def __get_exploration_order(self):\n indexes = np.arange(self.sample_num)\n\n if self.shuffle:\n np.random.shuffle(indexes)\n\n return indexes\n\n # Create mixed images in each training batch\n def __data_generation(self, batch_ids):\n _, h, w, c = self.X_train.shape\n l = np.random.beta(self.alpha, self.alpha, self.batch_size)\n X_l = l.reshape(self.batch_size, 1, 1, 1)\n y_l = l.reshape(self.batch_size, 1)\n\n X1 = self.X_train[batch_ids[:self.batch_size]]\n X2 = self.X_train[batch_ids[self.batch_size:]]\n X = X1 * X_l + X2 * (1 - X_l)\n\n if self.datagen:\n for i in range(self.batch_size):\n X[i] = self.datagen.random_transform(X[i])\n X[i] = self.datagen.standardize(X[i])\n\n if isinstance(self.y_train, list):\n y = []\n\n for y_train_ in self.y_train:\n y1 = y_train_[batch_ids[:self.batch_size]]\n y2 = y_train_[batch_ids[self.batch_size:]]\n y.append(y1 * y_l + y2 * (1 - y_l))\n else:\n y1 = self.y_train[batch_ids[:self.batch_size]]\n y2 = self.y_train[batch_ids[self.batch_size:]]\n y = y1 * y_l + y2 * (1 - y_l)\n\n return X, y\n"
] | [
[
"numpy.random.beta",
"numpy.arange",
"numpy.random.shuffle"
]
] |
ivallesp/keras | [
"1a35ff2788b5e6880ceb8af82e1a8d5f72d0f76f"
] | [
"keras/metrics_test.py"
] | [
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Keras metrics functions.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nimport json\nimport math\nimport os\n\nfrom absl.testing import parameterized\nimport numpy as np\nfrom keras import backend\nfrom keras import combinations\nfrom keras import keras_parameterized\nfrom keras import layers\nfrom keras import metrics\nfrom keras import Model\nfrom keras import testing_utils\nfrom keras.engine import base_layer\nfrom keras.engine import training as training_mod\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass KerasSumTest(tf.test.TestCase, parameterized.TestCase):\n\n def test_sum(self):\n with self.test_session():\n m = metrics.Sum(name='my_sum')\n\n # check config\n self.assertEqual(m.name, 'my_sum')\n self.assertTrue(m.stateful)\n self.assertEqual(m.dtype, tf.float32)\n self.assertLen(m.variables, 1)\n self.evaluate(tf.compat.v1.variables_initializer(m.variables))\n\n # check initial state\n self.assertEqual(self.evaluate(m.total), 0)\n\n # check __call__()\n self.assertEqual(self.evaluate(m(100)), 100)\n self.assertEqual(self.evaluate(m.total), 100)\n\n # check update_state() and result() + state accumulation + tensor input\n update_op = m.update_state(tf.convert_to_tensor([1, 5]))\n self.evaluate(update_op)\n self.assertAlmostEqual(self.evaluate(m.result()), 106)\n self.assertEqual(self.evaluate(m.total), 106) # 100 + 1 + 5\n\n # check reset_states()\n m.reset_states()\n self.assertEqual(self.evaluate(m.total), 0)\n\n def test_sum_with_sample_weight(self):\n m = metrics.Sum(dtype=tf.float64)\n self.assertEqual(m.dtype, tf.float64)\n self.evaluate(tf.compat.v1.variables_initializer(m.variables))\n\n # check scalar weight\n result_t = m(100, sample_weight=0.5)\n self.assertEqual(self.evaluate(result_t), 50)\n self.assertEqual(self.evaluate(m.total), 50)\n\n # check weights not scalar and weights rank matches values rank\n result_t = m([1, 5], sample_weight=[1, 0.2])\n result = self.evaluate(result_t)\n self.assertAlmostEqual(result, 52., 4) # 50 + 1 + 5 * 0.2\n self.assertAlmostEqual(self.evaluate(m.total), 52., 4)\n\n # check weights broadcast\n result_t = m([1, 2], sample_weight=0.5)\n self.assertAlmostEqual(self.evaluate(result_t), 53.5, 1) # 52 + 0.5 + 1\n self.assertAlmostEqual(self.evaluate(m.total), 53.5, 1)\n\n # check weights squeeze\n result_t = m([1, 5], sample_weight=[[1], [0.2]])\n self.assertAlmostEqual(self.evaluate(result_t), 55.5, 1) # 53.5 + 1 + 1\n self.assertAlmostEqual(self.evaluate(m.total), 55.5, 1)\n\n # check weights expand\n result_t = m([[1], [5]], sample_weight=[1, 0.2])\n self.assertAlmostEqual(self.evaluate(result_t), 57.5, 2) # 55.5 + 1 + 1\n self.assertAlmostEqual(self.evaluate(m.total), 57.5, 1)\n\n # check values reduced to the dimensions of weight\n result_t = m([[[1., 2.], [3., 2.], [0.5, 4.]]], sample_weight=[0.5])\n result = np.round(self.evaluate(result_t), decimals=2)\n # result = (prev: 57.5) + 0.5 + 1 + 1.5 + 1 + 0.25 + 2\n self.assertAlmostEqual(result, 63.75, 2)\n self.assertAlmostEqual(self.evaluate(m.total), 63.75, 2)\n\n def test_sum_graph_with_placeholder(self):\n with tf.compat.v1.get_default_graph().as_default(), self.cached_session() as sess:\n m = metrics.Sum()\n v = tf.compat.v1.placeholder(tf.float32)\n w = tf.compat.v1.placeholder(tf.float32)\n self.evaluate(tf.compat.v1.variables_initializer(m.variables))\n\n # check __call__()\n result_t = m(v, sample_weight=w)\n result = sess.run(result_t, feed_dict=({v: 100, w: 0.5}))\n self.assertEqual(result, 50)\n self.assertEqual(self.evaluate(m.total), 50)\n\n # check update_state() and result()\n result = sess.run(result_t, feed_dict=({v: [1, 5], w: [1, 0.2]}))\n self.assertAlmostEqual(result, 52., 2) # 50 + 1 + 5 * 0.2\n self.assertAlmostEqual(self.evaluate(m.total), 52., 2)\n\n def test_save_restore(self):\n with self.test_session():\n checkpoint_directory = self.get_temp_dir()\n checkpoint_prefix = os.path.join(checkpoint_directory, 'ckpt')\n m = metrics.Sum()\n checkpoint = tf.train.Checkpoint(sum=m)\n self.evaluate(tf.compat.v1.variables_initializer(m.variables))\n\n # update state\n self.evaluate(m(100.))\n self.evaluate(m(200.))\n\n # save checkpoint and then add an update\n save_path = checkpoint.save(checkpoint_prefix)\n self.evaluate(m(1000.))\n\n # restore to the same checkpoint sum object (= 300)\n checkpoint.restore(save_path).assert_consumed().run_restore_ops()\n self.evaluate(m(300.))\n self.assertEqual(600., self.evaluate(m.result()))\n\n # restore to a different checkpoint sum object\n restore_sum = metrics.Sum()\n restore_checkpoint = tf.train.Checkpoint(sum=restore_sum)\n status = restore_checkpoint.restore(save_path)\n restore_update = restore_sum(300.)\n status.assert_consumed().run_restore_ops()\n self.evaluate(restore_update)\n self.assertEqual(600., self.evaluate(restore_sum.result()))\n\n\nclass MeanTest(keras_parameterized.TestCase):\n\n # TODO(b/120949004): Re-enable garbage collection check\n # @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)\n @keras_parameterized.run_all_keras_modes\n def test_mean(self):\n m = metrics.Mean(name='my_mean')\n\n # check config\n self.assertEqual(m.name, 'my_mean')\n self.assertTrue(m.stateful)\n self.assertEqual(m.dtype, tf.float32)\n self.assertEqual(len(m.variables), 2)\n self.evaluate(tf.compat.v1.variables_initializer(m.variables))\n\n # check initial state\n self.assertEqual(self.evaluate(m.total), 0)\n self.assertEqual(self.evaluate(m.count), 0)\n\n # check __call__()\n self.assertEqual(self.evaluate(m(100)), 100)\n self.assertEqual(self.evaluate(m.total), 100)\n self.assertEqual(self.evaluate(m.count), 1)\n\n # check update_state() and result() + state accumulation + tensor input\n update_op = m.update_state([\n tf.convert_to_tensor(1),\n tf.convert_to_tensor(5)\n ])\n self.evaluate(update_op)\n self.assertAlmostEqual(self.evaluate(m.result()), 106 / 3, 2)\n self.assertEqual(self.evaluate(m.total), 106) # 100 + 1 + 5\n self.assertEqual(self.evaluate(m.count), 3)\n\n # check reset_states()\n m.reset_states()\n self.assertEqual(self.evaluate(m.total), 0)\n self.assertEqual(self.evaluate(m.count), 0)\n\n # Check save and restore config\n m2 = metrics.Mean.from_config(m.get_config())\n self.assertEqual(m2.name, 'my_mean')\n self.assertTrue(m2.stateful)\n self.assertEqual(m2.dtype, tf.float32)\n self.assertEqual(len(m2.variables), 2)\n\n @testing_utils.run_v2_only\n def test_function_wrapped_reset_state(self):\n m = metrics.Mean(name='my_mean')\n\n # check reset_states in function.\n @tf.function\n def reset_in_fn():\n m.reset_states()\n return m.update_state(100)\n\n for _ in range(5):\n self.evaluate(reset_in_fn())\n self.assertEqual(self.evaluate(m.count), 1)\n\n @keras_parameterized.run_all_keras_modes\n def test_mean_with_sample_weight(self):\n m = metrics.Mean(dtype=tf.float64)\n self.assertEqual(m.dtype, tf.float64)\n self.evaluate(tf.compat.v1.variables_initializer(m.variables))\n\n # check scalar weight\n result_t = m(100, sample_weight=0.5)\n self.assertEqual(self.evaluate(result_t), 50 / 0.5)\n self.assertEqual(self.evaluate(m.total), 50)\n self.assertEqual(self.evaluate(m.count), 0.5)\n\n # check weights not scalar and weights rank matches values rank\n result_t = m([1, 5], sample_weight=[1, 0.2])\n result = self.evaluate(result_t)\n self.assertAlmostEqual(result, 52 / 1.7, 2)\n self.assertAlmostEqual(self.evaluate(m.total), 52, 2) # 50 + 1 + 5 * 0.2\n self.assertAlmostEqual(self.evaluate(m.count), 1.7, 2) # 0.5 + 1.2\n\n # check weights broadcast\n result_t = m([1, 2], sample_weight=0.5)\n self.assertAlmostEqual(self.evaluate(result_t), 53.5 / 2.7, 2)\n self.assertAlmostEqual(self.evaluate(m.total), 53.5, 2) # 52 + 0.5 + 1\n self.assertAlmostEqual(self.evaluate(m.count), 2.7, 2) # 1.7 + 0.5 + 0.5\n\n # check weights squeeze\n result_t = m([1, 5], sample_weight=[[1], [0.2]])\n self.assertAlmostEqual(self.evaluate(result_t), 55.5 / 3.9, 2)\n self.assertAlmostEqual(self.evaluate(m.total), 55.5, 2) # 53.5 + 1 + 1\n self.assertAlmostEqual(self.evaluate(m.count), 3.9, 2) # 2.7 + 1.2\n\n # check weights expand\n result_t = m([[1], [5]], sample_weight=[1, 0.2])\n self.assertAlmostEqual(self.evaluate(result_t), 57.5 / 5.1, 2)\n self.assertAlmostEqual(self.evaluate(m.total), 57.5, 2) # 55.5 + 1 + 1\n self.assertAlmostEqual(self.evaluate(m.count), 5.1, 2) # 3.9 + 1.2\n\n # check values reduced to the dimensions of weight\n result_t = m([[[1., 2.], [3., 2.], [0.5, 4.]]], sample_weight=[0.5])\n result = np.round(self.evaluate(result_t), decimals=2) # 58.5 / 5.6\n self.assertEqual(result, 10.45)\n self.assertEqual(np.round(self.evaluate(m.total), decimals=2), 58.54)\n self.assertEqual(np.round(self.evaluate(m.count), decimals=2), 5.6)\n\n @keras_parameterized.run_all_keras_modes\n def test_mean_graph_with_placeholder(self):\n with tf.compat.v1.get_default_graph().as_default(), self.cached_session() as sess:\n m = metrics.Mean()\n v = tf.compat.v1.placeholder(tf.float32)\n w = tf.compat.v1.placeholder(tf.float32)\n self.evaluate(tf.compat.v1.variables_initializer(m.variables))\n\n # check __call__()\n result_t = m(v, sample_weight=w)\n result = sess.run(result_t, feed_dict=({v: 100, w: 0.5}))\n self.assertEqual(self.evaluate(m.total), 50)\n self.assertEqual(self.evaluate(m.count), 0.5)\n self.assertEqual(result, 50 / 0.5)\n\n # check update_state() and result()\n result = sess.run(result_t, feed_dict=({v: [1, 5], w: [1, 0.2]}))\n self.assertAlmostEqual(self.evaluate(m.total), 52, 2) # 50 + 1 + 5 * 0.2\n self.assertAlmostEqual(self.evaluate(m.count), 1.7, 2) # 0.5 + 1.2\n self.assertAlmostEqual(result, 52 / 1.7, 2)\n\n @keras_parameterized.run_all_keras_modes\n def test_save_restore(self):\n checkpoint_directory = self.get_temp_dir()\n checkpoint_prefix = os.path.join(checkpoint_directory, 'ckpt')\n m = metrics.Mean()\n checkpoint = tf.train.Checkpoint(mean=m)\n self.evaluate(tf.compat.v1.variables_initializer(m.variables))\n\n # update state\n self.evaluate(m(100.))\n self.evaluate(m(200.))\n\n # save checkpoint and then add an update\n save_path = checkpoint.save(checkpoint_prefix)\n self.evaluate(m(1000.))\n\n # restore to the same checkpoint mean object\n checkpoint.restore(save_path).assert_consumed().run_restore_ops()\n self.evaluate(m(300.))\n self.assertEqual(200., self.evaluate(m.result()))\n\n # restore to a different checkpoint mean object\n restore_mean = metrics.Mean()\n restore_checkpoint = tf.train.Checkpoint(mean=restore_mean)\n status = restore_checkpoint.restore(save_path)\n restore_update = restore_mean(300.)\n status.assert_consumed().run_restore_ops()\n self.evaluate(restore_update)\n self.assertEqual(200., self.evaluate(restore_mean.result()))\n self.assertEqual(3, self.evaluate(restore_mean.count))\n\n @keras_parameterized.run_all_keras_modes\n def test_multiple_instances(self):\n m = metrics.Mean()\n m2 = metrics.Mean()\n\n self.assertEqual(m.name, 'mean')\n self.assertEqual(m2.name, 'mean')\n\n self.assertEqual([v.name for v in m.variables],\n testing_utils.get_expected_metric_variable_names(\n ['total', 'count']))\n self.assertEqual([v.name for v in m2.variables],\n testing_utils.get_expected_metric_variable_names(\n ['total', 'count'], name_suffix='_1'))\n\n self.evaluate(tf.compat.v1.variables_initializer(m.variables))\n self.evaluate(tf.compat.v1.variables_initializer(m2.variables))\n\n # check initial state\n self.assertEqual(self.evaluate(m.total), 0)\n self.assertEqual(self.evaluate(m.count), 0)\n self.assertEqual(self.evaluate(m2.total), 0)\n self.assertEqual(self.evaluate(m2.count), 0)\n\n # check __call__()\n self.assertEqual(self.evaluate(m(100)), 100)\n self.assertEqual(self.evaluate(m.total), 100)\n self.assertEqual(self.evaluate(m.count), 1)\n self.assertEqual(self.evaluate(m2.total), 0)\n self.assertEqual(self.evaluate(m2.count), 0)\n\n self.assertEqual(self.evaluate(m2([63, 10])), 36.5)\n self.assertEqual(self.evaluate(m2.total), 73)\n self.assertEqual(self.evaluate(m2.count), 2)\n self.assertEqual(self.evaluate(m.result()), 100)\n self.assertEqual(self.evaluate(m.total), 100)\n self.assertEqual(self.evaluate(m.count), 1)\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass KerasAccuracyTest(tf.test.TestCase):\n\n def test_accuracy(self):\n acc_obj = metrics.Accuracy(name='my_acc')\n\n # check config\n self.assertEqual(acc_obj.name, 'my_acc')\n self.assertTrue(acc_obj.stateful)\n self.assertEqual(len(acc_obj.variables), 2)\n self.assertEqual(acc_obj.dtype, tf.float32)\n self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))\n\n # verify that correct value is returned\n update_op = acc_obj.update_state([[1], [2], [3], [4]], [[1], [2], [3], [4]])\n self.evaluate(update_op)\n result = self.evaluate(acc_obj.result())\n self.assertEqual(result, 1) # 2/2\n\n # Check save and restore config\n a2 = metrics.Accuracy.from_config(acc_obj.get_config())\n self.assertEqual(a2.name, 'my_acc')\n self.assertTrue(a2.stateful)\n self.assertEqual(len(a2.variables), 2)\n self.assertEqual(a2.dtype, tf.float32)\n\n # check with sample_weight\n result_t = acc_obj([[2], [1]], [[2], [0]], sample_weight=[[0.5], [0.2]])\n result = self.evaluate(result_t)\n self.assertAlmostEqual(result, 0.96, 2) # 4.5/4.7\n\n def test_accuracy_ragged(self):\n acc_obj = metrics.Accuracy(name='my_acc')\n self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))\n\n # verify that correct value is returned\n rt1 = tf.ragged.constant([[1], [2], [3], [4]])\n rt2 = tf.ragged.constant([[1], [2], [3], [4]])\n update_op = acc_obj.update_state(rt1, rt2)\n self.evaluate(update_op)\n result = self.evaluate(acc_obj.result())\n self.assertEqual(result, 1) # 2/2\n\n # check with sample_weight\n rt1 = tf.ragged.constant([[2], [1]])\n rt2 = tf.ragged.constant([[2], [0]])\n sw_ragged = tf.ragged.constant([[0.5], [0.2]])\n result_t = acc_obj(rt1, rt2, sample_weight=sw_ragged)\n result = self.evaluate(result_t)\n self.assertAlmostEqual(result, 0.96, 2) # 4.5/4.7\n\n def test_binary_accuracy(self):\n acc_obj = metrics.BinaryAccuracy(name='my_acc')\n\n # check config\n self.assertEqual(acc_obj.name, 'my_acc')\n self.assertTrue(acc_obj.stateful)\n self.assertEqual(len(acc_obj.variables), 2)\n self.assertEqual(acc_obj.dtype, tf.float32)\n self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))\n\n # verify that correct value is returned\n update_op = acc_obj.update_state([[1], [0]], [[1], [0]])\n self.evaluate(update_op)\n result = self.evaluate(acc_obj.result())\n self.assertEqual(result, 1) # 2/2\n\n # check y_pred squeeze\n update_op = acc_obj.update_state([[1], [1]], [[[1]], [[0]]])\n self.evaluate(update_op)\n result = self.evaluate(acc_obj.result())\n self.assertAlmostEqual(result, 0.75, 2) # 3/4\n\n # check y_true squeeze\n result_t = acc_obj([[[1]], [[1]]], [[1], [0]])\n result = self.evaluate(result_t)\n self.assertAlmostEqual(result, 0.67, 2) # 4/6\n\n # check with sample_weight\n result_t = acc_obj([[1], [1]], [[1], [0]], [[0.5], [0.2]])\n result = self.evaluate(result_t)\n self.assertAlmostEqual(result, 0.67, 2) # 4.5/6.7\n\n def test_binary_accuracy_ragged(self):\n acc_obj = metrics.BinaryAccuracy(name='my_acc')\n self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))\n\n # verify that correct value is returned\n rt1 = tf.ragged.constant([[1], [0]])\n rt2 = tf.ragged.constant([[1], [0]])\n update_op = acc_obj.update_state(rt1, rt2)\n self.evaluate(update_op)\n result = self.evaluate(acc_obj.result())\n self.assertEqual(result, 1) # 2/2\n\n # check y_true squeeze only supported for dense tensors and is\n # not supported by ragged tensor (different ranks). --> error\n rt1 = tf.ragged.constant([[[1], [1]]])\n rt2 = tf.ragged.constant([[1], [0]])\n with self.assertRaises(ValueError):\n result_t = acc_obj(rt1, rt2)\n result = self.evaluate(result_t)\n\n def test_binary_accuracy_threshold(self):\n acc_obj = metrics.BinaryAccuracy(threshold=0.7)\n self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))\n result_t = acc_obj([[1], [1], [0], [0]], [[0.9], [0.6], [0.4], [0.8]])\n result = self.evaluate(result_t)\n self.assertAlmostEqual(result, 0.5, 2)\n\n def test_binary_accuracy_threshold_ragged(self):\n acc_obj = metrics.BinaryAccuracy(threshold=0.7)\n self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))\n rt1 = tf.ragged.constant([[1], [1], [0], [0]])\n rt2 = tf.ragged.constant([[0.9], [0.6], [0.4], [0.8]])\n result_t = acc_obj(rt1, rt2)\n result = self.evaluate(result_t)\n self.assertAlmostEqual(result, 0.5, 2)\n\n def test_categorical_accuracy(self):\n acc_obj = metrics.CategoricalAccuracy(name='my_acc')\n\n # check config\n self.assertEqual(acc_obj.name, 'my_acc')\n self.assertTrue(acc_obj.stateful)\n self.assertEqual(len(acc_obj.variables), 2)\n self.assertEqual(acc_obj.dtype, tf.float32)\n self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))\n\n # verify that correct value is returned\n update_op = acc_obj.update_state([[0, 0, 1], [0, 1, 0]],\n [[0.1, 0.1, 0.8], [0.05, 0.95, 0]])\n self.evaluate(update_op)\n result = self.evaluate(acc_obj.result())\n self.assertEqual(result, 1) # 2/2\n\n # check with sample_weight\n result_t = acc_obj([[0, 0, 1], [0, 1, 0]],\n [[0.1, 0.1, 0.8], [0.05, 0, 0.95]], [[0.5], [0.2]])\n result = self.evaluate(result_t)\n self.assertAlmostEqual(result, 0.93, 2) # 2.5/2.7\n\n def test_categorical_accuracy_ragged(self):\n acc_obj = metrics.CategoricalAccuracy(name='my_acc')\n self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))\n\n # verify that correct value is returned\n rt1 = tf.ragged.constant([[0, 0, 1], [0, 1, 0]])\n rt2 = tf.ragged.constant([[0.1, 0.1, 0.8], [0.05, 0.95, 0]])\n update_op = acc_obj.update_state(rt1, rt2)\n self.evaluate(update_op)\n result = self.evaluate(acc_obj.result())\n self.assertEqual(result, 1) # 2/2\n\n # check with sample_weight\n rt1 = tf.ragged.constant([[0, 0, 1], [0, 1, 0]])\n rt2 = tf.ragged.constant([[0.1, 0.1, 0.8], [0.05, 0, 0.95]])\n sample_weight = tf.ragged.constant([[0.5], [0.2]])\n with self.assertRaises(tf.errors.InvalidArgumentError):\n result_t = acc_obj(rt1, rt2, sample_weight)\n result = self.evaluate(result_t)\n\n def test_sparse_categorical_accuracy(self):\n acc_obj = metrics.SparseCategoricalAccuracy(name='my_acc')\n\n # check config\n self.assertEqual(acc_obj.name, 'my_acc')\n self.assertTrue(acc_obj.stateful)\n self.assertEqual(len(acc_obj.variables), 2)\n self.assertEqual(acc_obj.dtype, tf.float32)\n self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))\n\n # verify that correct value is returned\n update_op = acc_obj.update_state([[2], [1]],\n [[0.1, 0.1, 0.8], [0.05, 0.95, 0]])\n self.evaluate(update_op)\n result = self.evaluate(acc_obj.result())\n self.assertEqual(result, 1) # 2/2\n\n # check with sample_weight\n result_t = acc_obj([[2], [1]], [[0.1, 0.1, 0.8], [0.05, 0, 0.95]],\n [[0.5], [0.2]])\n result = self.evaluate(result_t)\n self.assertAlmostEqual(result, 0.93, 2) # 2.5/2.7\n\n def test_sparse_categorical_accuracy_ragged(self):\n acc_obj = metrics.SparseCategoricalAccuracy(name='my_acc')\n\n # verify that correct value is returned\n rt1 = tf.ragged.constant([[2], [1]])\n rt2 = tf.ragged.constant([[0.1, 0.1, 0.8], [0.05, 0.95, 0]])\n\n with self.assertRaises(tf.errors.InvalidArgumentError):\n # sparse_categorical_accuracy is not supported for composite/ragged\n # tensors.\n update_op = acc_obj.update_state(rt1, rt2)\n self.evaluate(update_op)\n\n def test_sparse_categorical_accuracy_mismatched_dims(self):\n acc_obj = metrics.SparseCategoricalAccuracy(name='my_acc')\n\n # check config\n self.assertEqual(acc_obj.name, 'my_acc')\n self.assertTrue(acc_obj.stateful)\n self.assertEqual(len(acc_obj.variables), 2)\n self.assertEqual(acc_obj.dtype, tf.float32)\n self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))\n\n # verify that correct value is returned\n update_op = acc_obj.update_state([2, 1], [[0.1, 0.1, 0.8], [0.05, 0.95, 0]])\n self.evaluate(update_op)\n result = self.evaluate(acc_obj.result())\n self.assertEqual(result, 1) # 2/2\n\n # check with sample_weight\n result_t = acc_obj([2, 1], [[0.1, 0.1, 0.8], [0.05, 0, 0.95]],\n [[0.5], [0.2]])\n result = self.evaluate(result_t)\n self.assertAlmostEqual(result, 0.93, 2) # 2.5/2.7\n\n def test_sparse_categorical_accuracy_mismatched_dims_dynamic(self):\n with tf.compat.v1.get_default_graph().as_default(), self.cached_session() as sess:\n acc_obj = metrics.SparseCategoricalAccuracy(name='my_acc')\n self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))\n\n t = tf.compat.v1.placeholder(tf.float32)\n p = tf.compat.v1.placeholder(tf.float32)\n w = tf.compat.v1.placeholder(tf.float32)\n\n result_t = acc_obj(t, p, w)\n result = sess.run(\n result_t,\n feed_dict=({\n t: [2, 1],\n p: [[0.1, 0.1, 0.8], [0.05, 0, 0.95]],\n w: [[0.5], [0.2]]\n }))\n self.assertAlmostEqual(result, 0.71, 2) # 2.5/2.7\n\n def test_get_acc(self):\n acc_fn = metrics.get('acc')\n self.assertEqual(acc_fn, metrics.accuracy)\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass CosineSimilarityTest(tf.test.TestCase):\n\n def l2_norm(self, x, axis):\n epsilon = 1e-12\n square_sum = np.sum(np.square(x), axis=axis, keepdims=True)\n x_inv_norm = 1 / np.sqrt(np.maximum(square_sum, epsilon))\n return np.multiply(x, x_inv_norm)\n\n def setup(self, axis=1):\n self.np_y_true = np.asarray([[1, 9, 2], [-5, -2, 6]], dtype=np.float32)\n self.np_y_pred = np.asarray([[4, 8, 12], [8, 1, 3]], dtype=np.float32)\n\n y_true = self.l2_norm(self.np_y_true, axis)\n y_pred = self.l2_norm(self.np_y_pred, axis)\n self.expected_loss = np.sum(np.multiply(y_true, y_pred), axis=(axis,))\n\n self.y_true = tf.constant(self.np_y_true)\n self.y_pred = tf.constant(self.np_y_pred)\n\n def test_config(self):\n cosine_obj = metrics.CosineSimilarity(\n axis=2, name='my_cos', dtype=tf.int32)\n self.assertEqual(cosine_obj.name, 'my_cos')\n self.assertEqual(cosine_obj._dtype, tf.int32)\n\n # Check save and restore config\n cosine_obj2 = metrics.CosineSimilarity.from_config(cosine_obj.get_config())\n self.assertEqual(cosine_obj2.name, 'my_cos')\n self.assertEqual(cosine_obj2._dtype, tf.int32)\n\n def test_unweighted(self):\n self.setup()\n cosine_obj = metrics.CosineSimilarity()\n self.evaluate(tf.compat.v1.variables_initializer(cosine_obj.variables))\n loss = cosine_obj(self.y_true, self.y_pred)\n expected_loss = np.mean(self.expected_loss)\n self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)\n\n def test_weighted(self):\n self.setup()\n cosine_obj = metrics.CosineSimilarity()\n self.evaluate(tf.compat.v1.variables_initializer(cosine_obj.variables))\n sample_weight = np.asarray([1.2, 3.4])\n loss = cosine_obj(\n self.y_true,\n self.y_pred,\n sample_weight=tf.constant(sample_weight))\n expected_loss = np.sum(\n self.expected_loss * sample_weight) / np.sum(sample_weight)\n self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)\n\n def test_axis(self):\n self.setup(axis=1)\n cosine_obj = metrics.CosineSimilarity(axis=1)\n self.evaluate(tf.compat.v1.variables_initializer(cosine_obj.variables))\n loss = cosine_obj(self.y_true, self.y_pred)\n expected_loss = np.mean(self.expected_loss)\n self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass MeanAbsoluteErrorTest(tf.test.TestCase):\n\n def test_config(self):\n mae_obj = metrics.MeanAbsoluteError(name='my_mae', dtype=tf.int32)\n self.assertEqual(mae_obj.name, 'my_mae')\n self.assertEqual(mae_obj._dtype, tf.int32)\n\n # Check save and restore config\n mae_obj2 = metrics.MeanAbsoluteError.from_config(mae_obj.get_config())\n self.assertEqual(mae_obj2.name, 'my_mae')\n self.assertEqual(mae_obj2._dtype, tf.int32)\n\n def test_unweighted(self):\n mae_obj = metrics.MeanAbsoluteError()\n self.evaluate(tf.compat.v1.variables_initializer(mae_obj.variables))\n y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),\n (1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))\n y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),\n (0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))\n\n update_op = mae_obj.update_state(y_true, y_pred)\n self.evaluate(update_op)\n result = mae_obj.result()\n self.assertAllClose(0.5, result, atol=1e-5)\n\n def test_weighted(self):\n mae_obj = metrics.MeanAbsoluteError()\n self.evaluate(tf.compat.v1.variables_initializer(mae_obj.variables))\n y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),\n (1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))\n y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),\n (0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))\n sample_weight = tf.constant((1., 1.5, 2., 2.5))\n result = mae_obj(y_true, y_pred, sample_weight=sample_weight)\n self.assertAllClose(0.54285, self.evaluate(result), atol=1e-5)\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass MeanAbsolutePercentageErrorTest(tf.test.TestCase):\n\n def test_config(self):\n mape_obj = metrics.MeanAbsolutePercentageError(\n name='my_mape', dtype=tf.int32)\n self.assertEqual(mape_obj.name, 'my_mape')\n self.assertEqual(mape_obj._dtype, tf.int32)\n\n # Check save and restore config\n mape_obj2 = metrics.MeanAbsolutePercentageError.from_config(\n mape_obj.get_config())\n self.assertEqual(mape_obj2.name, 'my_mape')\n self.assertEqual(mape_obj2._dtype, tf.int32)\n\n def test_unweighted(self):\n mape_obj = metrics.MeanAbsolutePercentageError()\n self.evaluate(tf.compat.v1.variables_initializer(mape_obj.variables))\n y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),\n (1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))\n y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),\n (0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))\n\n update_op = mape_obj.update_state(y_true, y_pred)\n self.evaluate(update_op)\n result = mape_obj.result()\n self.assertAllClose(35e7, result, atol=1e-5)\n\n def test_weighted(self):\n mape_obj = metrics.MeanAbsolutePercentageError()\n self.evaluate(tf.compat.v1.variables_initializer(mape_obj.variables))\n y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),\n (1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))\n y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),\n (0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))\n sample_weight = tf.constant((1., 1.5, 2., 2.5))\n result = mape_obj(y_true, y_pred, sample_weight=sample_weight)\n self.assertAllClose(40e7, self.evaluate(result), atol=1e-5)\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass MeanSquaredErrorTest(tf.test.TestCase):\n\n def test_config(self):\n mse_obj = metrics.MeanSquaredError(name='my_mse', dtype=tf.int32)\n self.assertEqual(mse_obj.name, 'my_mse')\n self.assertEqual(mse_obj._dtype, tf.int32)\n\n # Check save and restore config\n mse_obj2 = metrics.MeanSquaredError.from_config(mse_obj.get_config())\n self.assertEqual(mse_obj2.name, 'my_mse')\n self.assertEqual(mse_obj2._dtype, tf.int32)\n\n def test_unweighted(self):\n mse_obj = metrics.MeanSquaredError()\n self.evaluate(tf.compat.v1.variables_initializer(mse_obj.variables))\n y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),\n (1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))\n y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),\n (0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))\n\n update_op = mse_obj.update_state(y_true, y_pred)\n self.evaluate(update_op)\n result = mse_obj.result()\n self.assertAllClose(0.5, result, atol=1e-5)\n\n def test_weighted(self):\n mse_obj = metrics.MeanSquaredError()\n self.evaluate(tf.compat.v1.variables_initializer(mse_obj.variables))\n y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),\n (1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))\n y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),\n (0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))\n sample_weight = tf.constant((1., 1.5, 2., 2.5))\n result = mse_obj(y_true, y_pred, sample_weight=sample_weight)\n self.assertAllClose(0.54285, self.evaluate(result), atol=1e-5)\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass MeanSquaredLogarithmicErrorTest(tf.test.TestCase):\n\n def test_config(self):\n msle_obj = metrics.MeanSquaredLogarithmicError(\n name='my_msle', dtype=tf.int32)\n self.assertEqual(msle_obj.name, 'my_msle')\n self.assertEqual(msle_obj._dtype, tf.int32)\n\n # Check save and restore config\n msle_obj2 = metrics.MeanSquaredLogarithmicError.from_config(\n msle_obj.get_config())\n self.assertEqual(msle_obj2.name, 'my_msle')\n self.assertEqual(msle_obj2._dtype, tf.int32)\n\n def test_unweighted(self):\n msle_obj = metrics.MeanSquaredLogarithmicError()\n self.evaluate(tf.compat.v1.variables_initializer(msle_obj.variables))\n y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),\n (1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))\n y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),\n (0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))\n\n update_op = msle_obj.update_state(y_true, y_pred)\n self.evaluate(update_op)\n result = msle_obj.result()\n self.assertAllClose(0.24022, result, atol=1e-5)\n\n def test_weighted(self):\n msle_obj = metrics.MeanSquaredLogarithmicError()\n self.evaluate(tf.compat.v1.variables_initializer(msle_obj.variables))\n y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),\n (1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))\n y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),\n (0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))\n sample_weight = tf.constant((1., 1.5, 2., 2.5))\n result = msle_obj(y_true, y_pred, sample_weight=sample_weight)\n self.assertAllClose(0.26082, self.evaluate(result), atol=1e-5)\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass HingeTest(tf.test.TestCase):\n\n def test_config(self):\n hinge_obj = metrics.Hinge(name='hinge', dtype=tf.int32)\n self.assertEqual(hinge_obj.name, 'hinge')\n self.assertEqual(hinge_obj._dtype, tf.int32)\n\n # Check save and restore config\n hinge_obj2 = metrics.Hinge.from_config(hinge_obj.get_config())\n self.assertEqual(hinge_obj2.name, 'hinge')\n self.assertEqual(hinge_obj2._dtype, tf.int32)\n\n def test_unweighted(self):\n hinge_obj = metrics.Hinge()\n self.evaluate(tf.compat.v1.variables_initializer(hinge_obj.variables))\n y_true = tf.constant([[0, 1, 0, 1], [0, 0, 1, 1]])\n y_pred = tf.constant([[-0.3, 0.2, -0.1, 1.6],\n [-0.25, -1., 0.5, 0.6]])\n\n # metric = max(0, 1-y_true * y_pred), where y_true is -1/1\n\n # y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]\n # y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]\n # 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]\n # metric = [(0.7 + 0.8 + 0.9 + 0) / 4, (0.75 + 0 + 0.5 + 0.4) / 4]\n # = [0.6, 0.4125]\n # reduced metric = (0.6 + 0.4125) / 2\n\n update_op = hinge_obj.update_state(y_true, y_pred)\n self.evaluate(update_op)\n result = hinge_obj.result()\n self.assertAllClose(0.506, result, atol=1e-3)\n\n def test_weighted(self):\n hinge_obj = metrics.Hinge()\n self.evaluate(tf.compat.v1.variables_initializer(hinge_obj.variables))\n y_true = tf.constant([[-1, 1, -1, 1], [-1, -1, 1, 1]])\n y_pred = tf.constant([[-0.3, 0.2, -0.1, 1.6],\n [-0.25, -1., 0.5, 0.6]])\n sample_weight = tf.constant([1.5, 2.])\n\n # metric = max(0, 1-y_true * y_pred), where y_true is -1/1\n\n # y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]\n # 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]\n # metric = [(0.7 + 0.8 + 0.9 + 0) / 4, (0.75 + 0 + 0.5 + 0.4) / 4]\n # = [0.6, 0.4125]\n # weighted metric = [0.6 * 1.5, 0.4125 * 2]\n # reduced metric = (0.6 * 1.5 + 0.4125 * 2) / (1.5 + 2)\n\n result = hinge_obj(y_true, y_pred, sample_weight=sample_weight)\n self.assertAllClose(0.493, self.evaluate(result), atol=1e-3)\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass SquaredHingeTest(tf.test.TestCase):\n\n def test_config(self):\n sq_hinge_obj = metrics.SquaredHinge(name='sq_hinge', dtype=tf.int32)\n self.assertEqual(sq_hinge_obj.name, 'sq_hinge')\n self.assertEqual(sq_hinge_obj._dtype, tf.int32)\n\n # Check save and restore config\n sq_hinge_obj2 = metrics.SquaredHinge.from_config(sq_hinge_obj.get_config())\n self.assertEqual(sq_hinge_obj2.name, 'sq_hinge')\n self.assertEqual(sq_hinge_obj2._dtype, tf.int32)\n\n def test_unweighted(self):\n sq_hinge_obj = metrics.SquaredHinge()\n self.evaluate(tf.compat.v1.variables_initializer(sq_hinge_obj.variables))\n y_true = tf.constant([[0, 1, 0, 1], [0, 0, 1, 1]])\n y_pred = tf.constant([[-0.3, 0.2, -0.1, 1.6],\n [-0.25, -1., 0.5, 0.6]])\n\n # metric = max(0, 1-y_true * y_pred), where y_true is -1/1\n\n # y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]\n # y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]\n # 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]\n # max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]]\n # squared(max(0, 1 - y_true * y_pred)) = [[0.49, 0.64, 0.81, 0],\n # [0.5625, 0, 0.25, 0.16]]\n # metric = [(0.49 + 0.64 + 0.81 + 0) / 4, (0.5625 + 0 + 0.25 + 0.16) / 4]\n # = [0.485, 0.2431]\n # reduced metric = (0.485 + 0.2431) / 2\n\n update_op = sq_hinge_obj.update_state(y_true, y_pred)\n self.evaluate(update_op)\n result = sq_hinge_obj.result()\n self.assertAllClose(0.364, result, atol=1e-3)\n\n def test_weighted(self):\n sq_hinge_obj = metrics.SquaredHinge()\n self.evaluate(tf.compat.v1.variables_initializer(sq_hinge_obj.variables))\n y_true = tf.constant([[-1, 1, -1, 1], [-1, -1, 1, 1]])\n y_pred = tf.constant([[-0.3, 0.2, -0.1, 1.6],\n [-0.25, -1., 0.5, 0.6]])\n sample_weight = tf.constant([1.5, 2.])\n\n # metric = max(0, 1-y_true * y_pred), where y_true is -1/1\n\n # y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]\n # 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]\n # max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]]\n # squared(max(0, 1 - y_true * y_pred)) = [[0.49, 0.64, 0.81, 0],\n # [0.5625, 0, 0.25, 0.16]]\n # metric = [(0.49 + 0.64 + 0.81 + 0) / 4, (0.5625 + 0 + 0.25 + 0.16) / 4]\n # = [0.485, 0.2431]\n # weighted metric = [0.485 * 1.5, 0.2431 * 2]\n # reduced metric = (0.485 * 1.5 + 0.2431 * 2) / (1.5 + 2)\n\n result = sq_hinge_obj(y_true, y_pred, sample_weight=sample_weight)\n self.assertAllClose(0.347, self.evaluate(result), atol=1e-3)\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass CategoricalHingeTest(tf.test.TestCase):\n\n def test_config(self):\n cat_hinge_obj = metrics.CategoricalHinge(\n name='cat_hinge', dtype=tf.int32)\n self.assertEqual(cat_hinge_obj.name, 'cat_hinge')\n self.assertEqual(cat_hinge_obj._dtype, tf.int32)\n\n # Check save and restore config\n cat_hinge_obj2 = metrics.CategoricalHinge.from_config(\n cat_hinge_obj.get_config())\n self.assertEqual(cat_hinge_obj2.name, 'cat_hinge')\n self.assertEqual(cat_hinge_obj2._dtype, tf.int32)\n\n def test_unweighted(self):\n cat_hinge_obj = metrics.CategoricalHinge()\n self.evaluate(tf.compat.v1.variables_initializer(cat_hinge_obj.variables))\n y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),\n (1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))\n y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),\n (0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))\n\n update_op = cat_hinge_obj.update_state(y_true, y_pred)\n self.evaluate(update_op)\n result = cat_hinge_obj.result()\n self.assertAllClose(0.5, result, atol=1e-5)\n\n def test_weighted(self):\n cat_hinge_obj = metrics.CategoricalHinge()\n self.evaluate(tf.compat.v1.variables_initializer(cat_hinge_obj.variables))\n y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),\n (1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))\n y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),\n (0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))\n sample_weight = tf.constant((1., 1.5, 2., 2.5))\n result = cat_hinge_obj(y_true, y_pred, sample_weight=sample_weight)\n self.assertAllClose(0.5, self.evaluate(result), atol=1e-5)\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass RootMeanSquaredErrorTest(tf.test.TestCase):\n\n def test_config(self):\n rmse_obj = metrics.RootMeanSquaredError(name='rmse', dtype=tf.int32)\n self.assertEqual(rmse_obj.name, 'rmse')\n self.assertEqual(rmse_obj._dtype, tf.int32)\n\n rmse_obj2 = metrics.RootMeanSquaredError.from_config(rmse_obj.get_config())\n self.assertEqual(rmse_obj2.name, 'rmse')\n self.assertEqual(rmse_obj2._dtype, tf.int32)\n\n def test_unweighted(self):\n rmse_obj = metrics.RootMeanSquaredError()\n self.evaluate(tf.compat.v1.variables_initializer(rmse_obj.variables))\n y_true = tf.constant((2, 4, 6))\n y_pred = tf.constant((1, 3, 2))\n\n update_op = rmse_obj.update_state(y_true, y_pred)\n self.evaluate(update_op)\n result = rmse_obj.result()\n # error = [-1, -1, -4], square(error) = [1, 1, 16], mean = 18/3 = 6\n self.assertAllClose(math.sqrt(6), result, atol=1e-3)\n\n def test_weighted(self):\n rmse_obj = metrics.RootMeanSquaredError()\n self.evaluate(tf.compat.v1.variables_initializer(rmse_obj.variables))\n y_true = tf.constant((2, 4, 6, 8))\n y_pred = tf.constant((1, 3, 2, 3))\n sample_weight = tf.constant((0, 1, 0, 1))\n result = rmse_obj(y_true, y_pred, sample_weight=sample_weight)\n self.assertAllClose(math.sqrt(13), self.evaluate(result), atol=1e-3)\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass TopKCategoricalAccuracyTest(tf.test.TestCase):\n\n def test_config(self):\n a_obj = metrics.TopKCategoricalAccuracy(name='topkca', dtype=tf.int32)\n self.assertEqual(a_obj.name, 'topkca')\n self.assertEqual(a_obj._dtype, tf.int32)\n\n a_obj2 = metrics.TopKCategoricalAccuracy.from_config(a_obj.get_config())\n self.assertEqual(a_obj2.name, 'topkca')\n self.assertEqual(a_obj2._dtype, tf.int32)\n\n def test_correctness(self):\n a_obj = metrics.TopKCategoricalAccuracy()\n self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables))\n y_true = tf.constant([[0, 0, 1], [0, 1, 0]])\n y_pred = tf.constant([[0.1, 0.9, 0.8], [0.05, 0.95, 0]])\n\n result = a_obj(y_true, y_pred)\n self.assertEqual(1, self.evaluate(result)) # both the samples match\n\n # With `k` < 5.\n a_obj = metrics.TopKCategoricalAccuracy(k=1)\n self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables))\n result = a_obj(y_true, y_pred)\n self.assertEqual(0.5, self.evaluate(result)) # only sample #2 matches\n\n # With `k` > 5.\n y_true = tf.constant([[0, 0, 1, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0]])\n y_pred = tf.constant([[0.5, 0.9, 0.1, 0.7, 0.6, 0.5, 0.4],\n [0.05, 0.95, 0, 0, 0, 0, 0]])\n a_obj = metrics.TopKCategoricalAccuracy(k=6)\n self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables))\n result = a_obj(y_true, y_pred)\n self.assertEqual(0.5, self.evaluate(result)) # only 1 sample matches.\n\n def test_weighted(self):\n a_obj = metrics.TopKCategoricalAccuracy(k=2)\n self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables))\n y_true = tf.constant([[0, 1, 0], [1, 0, 0], [0, 0, 1]])\n y_pred = tf.constant([[0, 0.9, 0.1], [0, 0.9, 0.1], [0, 0.9, 0.1]])\n sample_weight = tf.constant((1.0, 0.0, 1.0))\n result = a_obj(y_true, y_pred, sample_weight=sample_weight)\n self.assertAllClose(1.0, self.evaluate(result), atol=1e-5)\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass SparseTopKCategoricalAccuracyTest(tf.test.TestCase):\n\n def test_config(self):\n a_obj = metrics.SparseTopKCategoricalAccuracy(\n name='stopkca', dtype=tf.int32)\n self.assertEqual(a_obj.name, 'stopkca')\n self.assertEqual(a_obj._dtype, tf.int32)\n\n a_obj2 = metrics.SparseTopKCategoricalAccuracy.from_config(\n a_obj.get_config())\n self.assertEqual(a_obj2.name, 'stopkca')\n self.assertEqual(a_obj2._dtype, tf.int32)\n\n def test_correctness(self):\n a_obj = metrics.SparseTopKCategoricalAccuracy()\n self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables))\n y_true = tf.constant([2, 1])\n y_pred = tf.constant([[0.1, 0.9, 0.8], [0.05, 0.95, 0]])\n\n result = a_obj(y_true, y_pred)\n self.assertEqual(1, self.evaluate(result)) # both the samples match\n\n # With `k` < 5.\n a_obj = metrics.SparseTopKCategoricalAccuracy(k=1)\n self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables))\n result = a_obj(y_true, y_pred)\n self.assertEqual(0.5, self.evaluate(result)) # only sample #2 matches\n\n # With `k` > 5.\n y_pred = tf.constant([[0.5, 0.9, 0.1, 0.7, 0.6, 0.5, 0.4],\n [0.05, 0.95, 0, 0, 0, 0, 0]])\n a_obj = metrics.SparseTopKCategoricalAccuracy(k=6)\n self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables))\n result = a_obj(y_true, y_pred)\n self.assertEqual(0.5, self.evaluate(result)) # only 1 sample matches.\n\n def test_weighted(self):\n a_obj = metrics.SparseTopKCategoricalAccuracy(k=2)\n self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables))\n y_true = tf.constant([1, 0, 2])\n y_pred = tf.constant([[0, 0.9, 0.1], [0, 0.9, 0.1], [0, 0.9, 0.1]])\n sample_weight = tf.constant((1.0, 0.0, 1.0))\n result = a_obj(y_true, y_pred, sample_weight=sample_weight)\n self.assertAllClose(1.0, self.evaluate(result), atol=1e-5)\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass LogCoshErrorTest(tf.test.TestCase):\n\n def setup(self):\n y_pred = np.asarray([1, 9, 2, -5, -2, 6]).reshape((2, 3))\n y_true = np.asarray([4, 8, 12, 8, 1, 3]).reshape((2, 3))\n\n self.batch_size = 6\n error = y_pred - y_true\n self.expected_results = np.log((np.exp(error) + np.exp(-error)) / 2)\n\n self.y_pred = tf.constant(y_pred, dtype=tf.float32)\n self.y_true = tf.constant(y_true)\n\n def test_config(self):\n logcosh_obj = metrics.LogCoshError(name='logcosh', dtype=tf.int32)\n self.assertEqual(logcosh_obj.name, 'logcosh')\n self.assertEqual(logcosh_obj._dtype, tf.int32)\n\n def test_unweighted(self):\n self.setup()\n logcosh_obj = metrics.LogCoshError()\n self.evaluate(tf.compat.v1.variables_initializer(logcosh_obj.variables))\n\n update_op = logcosh_obj.update_state(self.y_true, self.y_pred)\n self.evaluate(update_op)\n result = logcosh_obj.result()\n expected_result = np.sum(self.expected_results) / self.batch_size\n self.assertAllClose(result, expected_result, atol=1e-3)\n\n def test_weighted(self):\n self.setup()\n logcosh_obj = metrics.LogCoshError()\n self.evaluate(tf.compat.v1.variables_initializer(logcosh_obj.variables))\n sample_weight = tf.constant([1.2, 3.4], shape=(2, 1))\n result = logcosh_obj(self.y_true, self.y_pred, sample_weight=sample_weight)\n\n sample_weight = np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3))\n expected_result = np.multiply(self.expected_results, sample_weight)\n expected_result = np.sum(expected_result) / np.sum(sample_weight)\n self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass PoissonTest(tf.test.TestCase):\n\n def setup(self):\n y_pred = np.asarray([1, 9, 2, 5, 2, 6]).reshape((2, 3))\n y_true = np.asarray([4, 8, 12, 8, 1, 3]).reshape((2, 3))\n\n self.batch_size = 6\n self.expected_results = y_pred - np.multiply(y_true, np.log(y_pred))\n\n self.y_pred = tf.constant(y_pred, dtype=tf.float32)\n self.y_true = tf.constant(y_true)\n\n def test_config(self):\n poisson_obj = metrics.Poisson(name='poisson', dtype=tf.int32)\n self.assertEqual(poisson_obj.name, 'poisson')\n self.assertEqual(poisson_obj._dtype, tf.int32)\n\n poisson_obj2 = metrics.Poisson.from_config(poisson_obj.get_config())\n self.assertEqual(poisson_obj2.name, 'poisson')\n self.assertEqual(poisson_obj2._dtype, tf.int32)\n\n def test_unweighted(self):\n self.setup()\n poisson_obj = metrics.Poisson()\n self.evaluate(tf.compat.v1.variables_initializer(poisson_obj.variables))\n\n update_op = poisson_obj.update_state(self.y_true, self.y_pred)\n self.evaluate(update_op)\n result = poisson_obj.result()\n expected_result = np.sum(self.expected_results) / self.batch_size\n self.assertAllClose(result, expected_result, atol=1e-3)\n\n def test_weighted(self):\n self.setup()\n poisson_obj = metrics.Poisson()\n self.evaluate(tf.compat.v1.variables_initializer(poisson_obj.variables))\n sample_weight = tf.constant([1.2, 3.4], shape=(2, 1))\n\n result = poisson_obj(self.y_true, self.y_pred, sample_weight=sample_weight)\n sample_weight = np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3))\n expected_result = np.multiply(self.expected_results, sample_weight)\n expected_result = np.sum(expected_result) / np.sum(sample_weight)\n self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass KLDivergenceTest(tf.test.TestCase):\n\n def setup(self):\n y_pred = np.asarray([.4, .9, .12, .36, .3, .4]).reshape((2, 3))\n y_true = np.asarray([.5, .8, .12, .7, .43, .8]).reshape((2, 3))\n\n self.batch_size = 2\n self.expected_results = np.multiply(y_true, np.log(y_true / y_pred))\n\n self.y_pred = tf.constant(y_pred, dtype=tf.float32)\n self.y_true = tf.constant(y_true)\n\n def test_config(self):\n k_obj = metrics.KLDivergence(name='kld', dtype=tf.int32)\n self.assertEqual(k_obj.name, 'kld')\n self.assertEqual(k_obj._dtype, tf.int32)\n\n k_obj2 = metrics.KLDivergence.from_config(k_obj.get_config())\n self.assertEqual(k_obj2.name, 'kld')\n self.assertEqual(k_obj2._dtype, tf.int32)\n\n def test_unweighted(self):\n self.setup()\n k_obj = metrics.KLDivergence()\n self.evaluate(tf.compat.v1.variables_initializer(k_obj.variables))\n\n update_op = k_obj.update_state(self.y_true, self.y_pred)\n self.evaluate(update_op)\n result = k_obj.result()\n expected_result = np.sum(self.expected_results) / self.batch_size\n self.assertAllClose(result, expected_result, atol=1e-3)\n\n def test_weighted(self):\n self.setup()\n k_obj = metrics.KLDivergence()\n self.evaluate(tf.compat.v1.variables_initializer(k_obj.variables))\n\n sample_weight = tf.constant([1.2, 3.4], shape=(2, 1))\n result = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight)\n\n sample_weight = np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3))\n expected_result = np.multiply(self.expected_results, sample_weight)\n expected_result = np.sum(expected_result) / (1.2 + 3.4)\n self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass MeanRelativeErrorTest(tf.test.TestCase):\n\n def test_config(self):\n normalizer = tf.constant([1, 3], dtype=tf.float32)\n mre_obj = metrics.MeanRelativeError(normalizer=normalizer, name='mre')\n self.assertEqual(mre_obj.name, 'mre')\n self.assertArrayNear(self.evaluate(mre_obj.normalizer), [1, 3], 1e-1)\n\n mre_obj2 = metrics.MeanRelativeError.from_config(mre_obj.get_config())\n self.assertEqual(mre_obj2.name, 'mre')\n self.assertArrayNear(self.evaluate(mre_obj2.normalizer), [1, 3], 1e-1)\n\n def test_unweighted(self):\n np_y_pred = np.asarray([2, 4, 6, 8], dtype=np.float32)\n np_y_true = np.asarray([1, 3, 2, 3], dtype=np.float32)\n expected_error = np.mean(\n np.divide(np.absolute(np_y_pred - np_y_true), np_y_true))\n\n y_pred = tf.constant(np_y_pred, shape=(1, 4), dtype=tf.float32)\n y_true = tf.constant(np_y_true, shape=(1, 4))\n\n mre_obj = metrics.MeanRelativeError(normalizer=y_true)\n self.evaluate(tf.compat.v1.variables_initializer(mre_obj.variables))\n\n result = mre_obj(y_true, y_pred)\n self.assertAllClose(self.evaluate(result), expected_error, atol=1e-3)\n\n def test_weighted(self):\n np_y_pred = np.asarray([2, 4, 6, 8], dtype=np.float32)\n np_y_true = np.asarray([1, 3, 2, 3], dtype=np.float32)\n sample_weight = np.asarray([0.2, 0.3, 0.5, 0], dtype=np.float32)\n rel_errors = np.divide(np.absolute(np_y_pred - np_y_true), np_y_true)\n expected_error = np.sum(rel_errors * sample_weight)\n\n y_pred = tf.constant(np_y_pred, dtype=tf.float32)\n y_true = tf.constant(np_y_true)\n\n mre_obj = metrics.MeanRelativeError(normalizer=y_true)\n self.evaluate(tf.compat.v1.variables_initializer(mre_obj.variables))\n\n result = mre_obj(\n y_true, y_pred, sample_weight=tf.constant(sample_weight))\n self.assertAllClose(self.evaluate(result), expected_error, atol=1e-3)\n\n def test_zero_normalizer(self):\n y_pred = tf.constant([2, 4], dtype=tf.float32)\n y_true = tf.constant([1, 3])\n\n mre_obj = metrics.MeanRelativeError(normalizer=tf.compat.v1.zeros_like(y_true))\n self.evaluate(tf.compat.v1.variables_initializer(mre_obj.variables))\n\n result = mre_obj(y_true, y_pred)\n self.assertEqual(self.evaluate(result), 0)\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass MeanIoUTest(tf.test.TestCase):\n\n def test_config(self):\n m_obj = metrics.MeanIoU(num_classes=2, name='mean_iou')\n self.assertEqual(m_obj.name, 'mean_iou')\n self.assertEqual(m_obj.num_classes, 2)\n\n m_obj2 = metrics.MeanIoU.from_config(m_obj.get_config())\n self.assertEqual(m_obj2.name, 'mean_iou')\n self.assertEqual(m_obj2.num_classes, 2)\n\n def test_unweighted(self):\n y_pred = [0, 1, 0, 1]\n y_true = [0, 0, 1, 1]\n\n m_obj = metrics.MeanIoU(num_classes=2)\n self.evaluate(tf.compat.v1.variables_initializer(m_obj.variables))\n\n result = m_obj(y_true, y_pred)\n\n # cm = [[1, 1],\n # [1, 1]]\n # sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]\n # iou = true_positives / (sum_row + sum_col - true_positives))\n expected_result = (1 / (2 + 2 - 1) + 1 / (2 + 2 - 1)) / 2\n self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)\n\n def test_weighted(self):\n y_pred = tf.constant([0, 1, 0, 1], dtype=tf.float32)\n y_true = tf.constant([0, 0, 1, 1])\n sample_weight = tf.constant([0.2, 0.3, 0.4, 0.1])\n\n m_obj = metrics.MeanIoU(num_classes=2)\n self.evaluate(tf.compat.v1.variables_initializer(m_obj.variables))\n\n result = m_obj(y_true, y_pred, sample_weight=sample_weight)\n\n # cm = [[0.2, 0.3],\n # [0.4, 0.1]]\n # sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2, 0.1]\n # iou = true_positives / (sum_row + sum_col - true_positives))\n expected_result = (0.2 / (0.6 + 0.5 - 0.2) + 0.1 / (0.4 + 0.5 - 0.1)) / 2\n self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)\n\n def test_multi_dim_input(self):\n y_pred = tf.constant([[0, 1], [0, 1]], dtype=tf.float32)\n y_true = tf.constant([[0, 0], [1, 1]])\n sample_weight = tf.constant([[0.2, 0.3], [0.4, 0.1]])\n\n m_obj = metrics.MeanIoU(num_classes=2)\n self.evaluate(tf.compat.v1.variables_initializer(m_obj.variables))\n\n result = m_obj(y_true, y_pred, sample_weight=sample_weight)\n\n # cm = [[0.2, 0.3],\n # [0.4, 0.1]]\n # sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2, 0.1]\n # iou = true_positives / (sum_row + sum_col - true_positives))\n expected_result = (0.2 / (0.6 + 0.5 - 0.2) + 0.1 / (0.4 + 0.5 - 0.1)) / 2\n self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)\n\n def test_zero_valid_entries(self):\n m_obj = metrics.MeanIoU(num_classes=2)\n self.evaluate(tf.compat.v1.variables_initializer(m_obj.variables))\n self.assertAllClose(self.evaluate(m_obj.result()), 0, atol=1e-3)\n\n def test_zero_and_non_zero_entries(self):\n y_pred = tf.constant([1], dtype=tf.float32)\n y_true = tf.constant([1])\n\n m_obj = metrics.MeanIoU(num_classes=2)\n self.evaluate(tf.compat.v1.variables_initializer(m_obj.variables))\n result = m_obj(y_true, y_pred)\n\n # cm = [[0, 0],\n # [0, 1]]\n # sum_row = [0, 1], sum_col = [0, 1], true_positives = [0, 1]\n # iou = true_positives / (sum_row + sum_col - true_positives))\n expected_result = (0 + 1 / (1 + 1 - 1)) / 1\n self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)\n\n\nclass MeanTensorTest(tf.test.TestCase, parameterized.TestCase):\n\n @combinations.generate(combinations.combine(mode=['graph', 'eager']))\n def test_config(self):\n with self.test_session():\n m = metrics.MeanTensor(name='mean_by_element')\n\n # check config\n self.assertEqual(m.name, 'mean_by_element')\n self.assertTrue(m.stateful)\n self.assertEqual(m.dtype, tf.float32)\n self.assertEmpty(m.variables)\n\n with self.assertRaisesRegex(ValueError, 'does not have any result yet'):\n m.result()\n\n self.evaluate(m([[3], [5], [3]]))\n self.assertAllEqual(m._shape, [3, 1])\n\n m2 = metrics.MeanTensor.from_config(m.get_config())\n self.assertEqual(m2.name, 'mean_by_element')\n self.assertTrue(m2.stateful)\n self.assertEqual(m2.dtype, tf.float32)\n self.assertEmpty(m2.variables)\n\n @combinations.generate(combinations.combine(mode=['graph', 'eager']))\n def test_unweighted(self):\n with self.test_session():\n m = metrics.MeanTensor(dtype=tf.float64)\n\n # check __call__()\n self.assertAllClose(self.evaluate(m([100, 40])), [100, 40])\n self.assertAllClose(self.evaluate(m.total), [100, 40])\n self.assertAllClose(self.evaluate(m.count), [1, 1])\n\n # check update_state() and result() + state accumulation + tensor input\n update_op = m.update_state([\n tf.convert_to_tensor(1),\n tf.convert_to_tensor(5)\n ])\n self.evaluate(update_op)\n self.assertAllClose(self.evaluate(m.result()), [50.5, 22.5])\n self.assertAllClose(self.evaluate(m.total), [101, 45])\n self.assertAllClose(self.evaluate(m.count), [2, 2])\n\n # check reset_states()\n m.reset_states()\n self.assertAllClose(self.evaluate(m.total), [0, 0])\n self.assertAllClose(self.evaluate(m.count), [0, 0])\n\n @combinations.generate(combinations.combine(mode=['graph', 'eager']))\n def test_weighted(self):\n with self.test_session():\n m = metrics.MeanTensor(dtype=tf.float64)\n self.assertEqual(m.dtype, tf.float64)\n\n # check scalar weight\n result_t = m([100, 30], sample_weight=0.5)\n self.assertAllClose(self.evaluate(result_t), [100, 30])\n self.assertAllClose(self.evaluate(m.total), [50, 15])\n self.assertAllClose(self.evaluate(m.count), [0.5, 0.5])\n\n # check weights not scalar and weights rank matches values rank\n result_t = m([1, 5], sample_weight=[1, 0.2])\n result = self.evaluate(result_t)\n self.assertAllClose(result, [51 / 1.5, 16 / 0.7], 2)\n self.assertAllClose(self.evaluate(m.total), [51, 16])\n self.assertAllClose(self.evaluate(m.count), [1.5, 0.7])\n\n # check weights broadcast\n result_t = m([1, 2], sample_weight=0.5)\n self.assertAllClose(self.evaluate(result_t), [51.5 / 2, 17 / 1.2])\n self.assertAllClose(self.evaluate(m.total), [51.5, 17])\n self.assertAllClose(self.evaluate(m.count), [2, 1.2])\n\n # check weights squeeze\n result_t = m([1, 5], sample_weight=[[1], [0.2]])\n self.assertAllClose(self.evaluate(result_t), [52.5 / 3, 18 / 1.4])\n self.assertAllClose(self.evaluate(m.total), [52.5, 18])\n self.assertAllClose(self.evaluate(m.count), [3, 1.4])\n\n # check weights expand\n m = metrics.MeanTensor(dtype=tf.float64)\n self.evaluate(tf.compat.v1.variables_initializer(m.variables))\n result_t = m([[1], [5]], sample_weight=[1, 0.2])\n self.assertAllClose(self.evaluate(result_t), [[1], [5]])\n self.assertAllClose(self.evaluate(m.total), [[1], [1]])\n self.assertAllClose(self.evaluate(m.count), [[1], [0.2]])\n\n @combinations.generate(combinations.combine(mode=['graph', 'eager']))\n def test_invalid_value_shape(self):\n m = metrics.MeanTensor(dtype=tf.float64)\n m([1])\n with self.assertRaisesRegex(\n ValueError, 'MeanTensor input values must always have the same shape'):\n m([1, 5])\n\n @combinations.generate(combinations.combine(mode=['graph', 'eager']))\n def test_build_in_tf_function(self):\n \"\"\"Ensure that variables are created correctly in a tf function.\"\"\"\n m = metrics.MeanTensor(dtype=tf.float64)\n\n @tf.function\n def call_metric(x):\n return m(x)\n\n with self.test_session():\n self.assertAllClose(self.evaluate(call_metric([100, 40])), [100, 40])\n self.assertAllClose(self.evaluate(m.total), [100, 40])\n self.assertAllClose(self.evaluate(m.count), [1, 1])\n self.assertAllClose(self.evaluate(call_metric([20, 2])), [60, 21])\n\n @combinations.generate(combinations.combine(mode=['eager']))\n def test_in_keras_model(self):\n class ModelWithMetric(Model):\n\n def __init__(self):\n super(ModelWithMetric, self).__init__()\n self.dense1 = layers.Dense(\n 3, activation='relu', kernel_initializer='ones')\n self.dense2 = layers.Dense(\n 1, activation='sigmoid', kernel_initializer='ones')\n self.mean_tensor = metrics.MeanTensor()\n\n def call(self, x):\n x = self.dense1(x)\n x = self.dense2(x)\n self.mean_tensor(self.dense1.kernel)\n return x\n\n model = ModelWithMetric()\n model.compile(\n loss='mae',\n optimizer='rmsprop',\n run_eagerly=True)\n\n x = np.ones((100, 4))\n y = np.zeros((100, 1))\n model.evaluate(x, y, batch_size=50)\n self.assertAllClose(self.evaluate(model.mean_tensor.result()),\n np.ones((4, 3)))\n self.assertAllClose(self.evaluate(model.mean_tensor.total),\n np.full((4, 3), 2))\n self.assertAllClose(self.evaluate(model.mean_tensor.count),\n np.full((4, 3), 2))\n\n model.evaluate(x, y, batch_size=25)\n self.assertAllClose(self.evaluate(model.mean_tensor.result()),\n np.ones((4, 3)))\n self.assertAllClose(self.evaluate(model.mean_tensor.total),\n np.full((4, 3), 4))\n self.assertAllClose(self.evaluate(model.mean_tensor.count),\n np.full((4, 3), 4))\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass BinaryCrossentropyTest(tf.test.TestCase):\n\n def test_config(self):\n bce_obj = metrics.BinaryCrossentropy(\n name='bce', dtype=tf.int32, label_smoothing=0.2)\n self.assertEqual(bce_obj.name, 'bce')\n self.assertEqual(bce_obj._dtype, tf.int32)\n\n old_config = bce_obj.get_config()\n self.assertAllClose(old_config['label_smoothing'], 0.2, 1e-3)\n\n # Check save and restore config\n bce_obj2 = metrics.BinaryCrossentropy.from_config(old_config)\n self.assertEqual(bce_obj2.name, 'bce')\n self.assertEqual(bce_obj2._dtype, tf.int32)\n new_config = bce_obj2.get_config()\n self.assertDictEqual(old_config, new_config)\n\n def test_unweighted(self):\n bce_obj = metrics.BinaryCrossentropy()\n self.evaluate(tf.compat.v1.variables_initializer(bce_obj.variables))\n y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])\n y_pred = np.asarray([1, 1, 1, 0], dtype=np.float32).reshape([2, 2])\n result = bce_obj(y_true, y_pred)\n\n # EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999\n # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)\n # y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON]\n\n # Metric = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON))\n # = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON),\n # -log(Y_MAX + EPSILON), -log(1)]\n # = [(0 + 15.33) / 2, (0 + 0) / 2]\n # Reduced metric = 7.665 / 2\n\n self.assertAllClose(self.evaluate(result), 3.833, atol=1e-3)\n\n def test_unweighted_with_logits(self):\n bce_obj = metrics.BinaryCrossentropy(from_logits=True)\n self.evaluate(tf.compat.v1.variables_initializer(bce_obj.variables))\n\n y_true = tf.constant([[1, 0, 1], [0, 1, 1]])\n y_pred = tf.constant([[100.0, -100.0, 100.0],\n [100.0, 100.0, -100.0]])\n result = bce_obj(y_true, y_pred)\n\n # Metric = max(x, 0) - x * z + log(1 + exp(-abs(x)))\n # (where x = logits and z = y_true)\n # = [((100 - 100 * 1 + log(1 + exp(-100))) +\n # (0 + 100 * 0 + log(1 + exp(-100))) +\n # (100 - 100 * 1 + log(1 + exp(-100))),\n # ((100 - 100 * 0 + log(1 + exp(-100))) +\n # (100 - 100 * 1 + log(1 + exp(-100))) +\n # (0 + 100 * 1 + log(1 + exp(-100))))]\n # = [(0 + 0 + 0) / 3, 200 / 3]\n # Reduced metric = (0 + 66.666) / 2\n\n self.assertAllClose(self.evaluate(result), 33.333, atol=1e-3)\n\n def test_weighted(self):\n bce_obj = metrics.BinaryCrossentropy()\n self.evaluate(tf.compat.v1.variables_initializer(bce_obj.variables))\n y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])\n y_pred = np.asarray([1, 1, 1, 0], dtype=np.float32).reshape([2, 2])\n sample_weight = tf.constant([1.5, 2.])\n result = bce_obj(y_true, y_pred, sample_weight=sample_weight)\n\n # EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999\n # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)\n # y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON]\n\n # Metric = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON))\n # = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON),\n # -log(Y_MAX + EPSILON), -log(1)]\n # = [(0 + 15.33) / 2, (0 + 0) / 2]\n # Weighted metric = [7.665 * 1.5, 0]\n # Reduced metric = 7.665 * 1.5 / (1.5 + 2)\n\n self.assertAllClose(self.evaluate(result), 3.285, atol=1e-3)\n\n def test_weighted_from_logits(self):\n bce_obj = metrics.BinaryCrossentropy(from_logits=True)\n self.evaluate(tf.compat.v1.variables_initializer(bce_obj.variables))\n y_true = tf.constant([[1, 0, 1], [0, 1, 1]])\n y_pred = tf.constant([[100.0, -100.0, 100.0],\n [100.0, 100.0, -100.0]])\n sample_weight = tf.constant([2., 2.5])\n result = bce_obj(y_true, y_pred, sample_weight=sample_weight)\n\n # Metric = max(x, 0) - x * z + log(1 + exp(-abs(x)))\n # (where x = logits and z = y_true)\n # = [(0 + 0 + 0) / 3, 200 / 3]\n # Weighted metric = [0, 66.666 * 2.5]\n # Reduced metric = 66.666 * 2.5 / (2 + 2.5)\n\n self.assertAllClose(self.evaluate(result), 37.037, atol=1e-3)\n\n def test_label_smoothing(self):\n logits = tf.constant(((100., -100., -100.)))\n y_true = tf.constant(((1, 0, 1)))\n label_smoothing = 0.1\n # Metric: max(x, 0) - x * z + log(1 + exp(-abs(x)))\n # (where x = logits and z = y_true)\n # Label smoothing: z' = z * (1 - L) + 0.5L\n # After label smoothing, label 1 becomes 1 - 0.5L\n # label 0 becomes 0.5L\n # Applying the above two fns to the given input:\n # (100 - 100 * (1 - 0.5 L) + 0 +\n # 0 + 100 * (0.5 L) + 0 +\n # 0 + 100 * (1 - 0.5 L) + 0) * (1/3)\n # = (100 + 50L) * 1/3\n bce_obj = metrics.BinaryCrossentropy(\n from_logits=True, label_smoothing=label_smoothing)\n self.evaluate(tf.compat.v1.variables_initializer(bce_obj.variables))\n result = bce_obj(y_true, logits)\n expected_value = (100.0 + 50.0 * label_smoothing) / 3.0\n self.assertAllClose(expected_value, self.evaluate(result), atol=1e-3)\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass CategoricalCrossentropyTest(tf.test.TestCase):\n\n def test_config(self):\n cce_obj = metrics.CategoricalCrossentropy(\n name='cce', dtype=tf.int32, label_smoothing=0.2)\n self.assertEqual(cce_obj.name, 'cce')\n self.assertEqual(cce_obj._dtype, tf.int32)\n\n old_config = cce_obj.get_config()\n self.assertAllClose(old_config['label_smoothing'], 0.2, 1e-3)\n\n # Check save and restore config\n cce_obj2 = metrics.CategoricalCrossentropy.from_config(old_config)\n self.assertEqual(cce_obj2.name, 'cce')\n self.assertEqual(cce_obj2._dtype, tf.int32)\n new_config = cce_obj2.get_config()\n self.assertDictEqual(old_config, new_config)\n\n def test_unweighted(self):\n cce_obj = metrics.CategoricalCrossentropy()\n self.evaluate(tf.compat.v1.variables_initializer(cce_obj.variables))\n\n y_true = np.asarray([[0, 1, 0], [0, 0, 1]])\n y_pred = np.asarray([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])\n result = cce_obj(y_true, y_pred)\n\n # EPSILON = 1e-7, y = y_true, y` = y_pred\n # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)\n # y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]\n\n # Metric = -sum(y * log(y'), axis = -1)\n # = -((log 0.95), (log 0.1))\n # = [0.051, 2.302]\n # Reduced metric = (0.051 + 2.302) / 2\n\n self.assertAllClose(self.evaluate(result), 1.176, atol=1e-3)\n\n def test_unweighted_from_logits(self):\n cce_obj = metrics.CategoricalCrossentropy(from_logits=True)\n self.evaluate(tf.compat.v1.variables_initializer(cce_obj.variables))\n\n y_true = np.asarray([[0, 1, 0], [0, 0, 1]])\n logits = np.asarray([[1, 9, 0], [1, 8, 1]], dtype=np.float32)\n result = cce_obj(y_true, logits)\n\n # softmax = exp(logits) / sum(exp(logits), axis=-1)\n # xent = -sum(labels * log(softmax), 1)\n\n # exp(logits) = [[2.718, 8103.084, 1], [2.718, 2980.958, 2.718]]\n # sum(exp(logits), axis=-1) = [8106.802, 2986.394]\n # softmax = [[0.00033, 0.99954, 0.00012], [0.00091, 0.99817, 0.00091]]\n # log(softmax) = [[-8.00045, -0.00045, -9.00045],\n # [-7.00182, -0.00182, -7.00182]]\n # labels * log(softmax) = [[0, -0.00045, 0], [0, 0, -7.00182]]\n # xent = [0.00045, 7.00182]\n # Reduced xent = (0.00045 + 7.00182) / 2\n\n self.assertAllClose(self.evaluate(result), 3.5011, atol=1e-3)\n\n def test_weighted(self):\n cce_obj = metrics.CategoricalCrossentropy()\n self.evaluate(tf.compat.v1.variables_initializer(cce_obj.variables))\n\n y_true = np.asarray([[0, 1, 0], [0, 0, 1]])\n y_pred = np.asarray([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])\n sample_weight = tf.constant([1.5, 2.])\n result = cce_obj(y_true, y_pred, sample_weight=sample_weight)\n\n # EPSILON = 1e-7, y = y_true, y` = y_pred\n # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)\n # y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]\n\n # Metric = -sum(y * log(y'), axis = -1)\n # = -((log 0.95), (log 0.1))\n # = [0.051, 2.302]\n # Weighted metric = [0.051 * 1.5, 2.302 * 2.]\n # Reduced metric = (0.051 * 1.5 + 2.302 * 2.) / 3.5\n\n self.assertAllClose(self.evaluate(result), 1.338, atol=1e-3)\n\n def test_weighted_from_logits(self):\n cce_obj = metrics.CategoricalCrossentropy(from_logits=True)\n self.evaluate(tf.compat.v1.variables_initializer(cce_obj.variables))\n\n y_true = np.asarray([[0, 1, 0], [0, 0, 1]])\n logits = np.asarray([[1, 9, 0], [1, 8, 1]], dtype=np.float32)\n sample_weight = tf.constant([1.5, 2.])\n result = cce_obj(y_true, logits, sample_weight=sample_weight)\n\n # softmax = exp(logits) / sum(exp(logits), axis=-1)\n # xent = -sum(labels * log(softmax), 1)\n # xent = [0.00045, 7.00182]\n # weighted xent = [0.000675, 14.00364]\n # Reduced xent = (0.000675 + 14.00364) / (1.5 + 2)\n\n self.assertAllClose(self.evaluate(result), 4.0012, atol=1e-3)\n\n def test_label_smoothing(self):\n y_true = np.asarray([[0, 1, 0], [0, 0, 1]])\n logits = np.asarray([[1, 9, 0], [1, 8, 1]], dtype=np.float32)\n label_smoothing = 0.1\n\n # Label smoothing: z' = z * (1 - L) + L/n,\n # where L = label smoothing value and n = num classes\n # Label value 1 becomes: 1 - L + L/n\n # Label value 0 becomes: L/n\n # y_true with label_smoothing = [[0.0333, 0.9333, 0.0333],\n # [0.0333, 0.0333, 0.9333]]\n\n # softmax = exp(logits) / sum(exp(logits), axis=-1)\n # xent = -sum(labels * log(softmax), 1)\n # log(softmax) = [[-8.00045, -0.00045, -9.00045],\n # [-7.00182, -0.00182, -7.00182]]\n # labels * log(softmax) = [[-0.26641, -0.00042, -0.29971],\n # [-0.23316, -0.00006, -6.53479]]\n # xent = [0.56654, 6.76801]\n # Reduced xent = (0.56654 + 6.76801) / 2\n\n cce_obj = metrics.CategoricalCrossentropy(\n from_logits=True, label_smoothing=label_smoothing)\n self.evaluate(tf.compat.v1.variables_initializer(cce_obj.variables))\n loss = cce_obj(y_true, logits)\n self.assertAllClose(self.evaluate(loss), 3.667, atol=1e-3)\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass SparseCategoricalCrossentropyTest(tf.test.TestCase):\n\n def test_config(self):\n scce_obj = metrics.SparseCategoricalCrossentropy(\n name='scce', dtype=tf.int32)\n self.assertEqual(scce_obj.name, 'scce')\n self.assertEqual(scce_obj.dtype, tf.int32)\n old_config = scce_obj.get_config()\n self.assertDictEqual(old_config, json.loads(json.dumps(old_config)))\n\n # Check save and restore config\n scce_obj2 = metrics.SparseCategoricalCrossentropy.from_config(old_config)\n self.assertEqual(scce_obj2.name, 'scce')\n self.assertEqual(scce_obj2.dtype, tf.int32)\n new_config = scce_obj2.get_config()\n self.assertDictEqual(old_config, new_config)\n\n def test_unweighted(self):\n scce_obj = metrics.SparseCategoricalCrossentropy()\n self.evaluate(tf.compat.v1.variables_initializer(scce_obj.variables))\n\n y_true = np.asarray([1, 2])\n y_pred = np.asarray([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])\n result = scce_obj(y_true, y_pred)\n\n # EPSILON = 1e-7, y = y_true, y` = y_pred\n # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)\n # y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]\n # logits = log(y`) = [[-2.9957, -0.0513, -16.1181],\n # [-2.3026, -0.2231, -2.3026]]\n\n # softmax = exp(logits) / sum(exp(logits), axis=-1)\n # y = one_hot(y) = [[0, 1, 0], [0, 0, 1]]\n # xent = -sum(y * log(softmax), 1)\n\n # exp(logits) = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]\n # sum(exp(logits), axis=-1) = [1, 1]\n # softmax = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]\n # log(softmax) = [[-2.9957, -0.0513, -16.1181],\n # [-2.3026, -0.2231, -2.3026]]\n # y * log(softmax) = [[0, -0.0513, 0], [0, 0, -2.3026]]\n # xent = [0.0513, 2.3026]\n # Reduced xent = (0.0513 + 2.3026) / 2\n\n self.assertAllClose(self.evaluate(result), 1.176, atol=1e-3)\n\n def test_unweighted_from_logits(self):\n scce_obj = metrics.SparseCategoricalCrossentropy(from_logits=True)\n self.evaluate(tf.compat.v1.variables_initializer(scce_obj.variables))\n\n y_true = np.asarray([1, 2])\n logits = np.asarray([[1, 9, 0], [1, 8, 1]], dtype=np.float32)\n result = scce_obj(y_true, logits)\n\n # softmax = exp(logits) / sum(exp(logits), axis=-1)\n # y_true = one_hot(y_true) = [[0, 1, 0], [0, 0, 1]]\n # xent = -sum(y_true * log(softmax), 1)\n\n # exp(logits) = [[2.718, 8103.084, 1], [2.718, 2980.958, 2.718]]\n # sum(exp(logits), axis=-1) = [8106.802, 2986.394]\n # softmax = [[0.00033, 0.99954, 0.00012], [0.00091, 0.99817, 0.00091]]\n # log(softmax) = [[-8.00045, -0.00045, -9.00045],\n # [-7.00182, -0.00182, -7.00182]]\n # y_true * log(softmax) = [[0, -0.00045, 0], [0, 0, -7.00182]]\n # xent = [0.00045, 7.00182]\n # Reduced xent = (0.00045 + 7.00182) / 2\n\n self.assertAllClose(self.evaluate(result), 3.5011, atol=1e-3)\n\n def test_weighted(self):\n scce_obj = metrics.SparseCategoricalCrossentropy()\n self.evaluate(tf.compat.v1.variables_initializer(scce_obj.variables))\n\n y_true = np.asarray([1, 2])\n y_pred = np.asarray([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])\n sample_weight = tf.constant([1.5, 2.])\n result = scce_obj(y_true, y_pred, sample_weight=sample_weight)\n\n # EPSILON = 1e-7, y = y_true, y` = y_pred\n # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)\n # y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]\n # logits = log(y`) = [[-2.9957, -0.0513, -16.1181],\n # [-2.3026, -0.2231, -2.3026]]\n\n # softmax = exp(logits) / sum(exp(logits), axis=-1)\n # y = one_hot(y) = [[0, 1, 0], [0, 0, 1]]\n # xent = -sum(y * log(softmax), 1)\n\n # exp(logits) = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]\n # sum(exp(logits), axis=-1) = [1, 1]\n # softmax = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]\n # log(softmax) = [[-2.9957, -0.0513, -16.1181],\n # [-2.3026, -0.2231, -2.3026]]\n # y * log(softmax) = [[0, -0.0513, 0], [0, 0, -2.3026]]\n # xent = [0.0513, 2.3026]\n # Weighted xent = [0.051 * 1.5, 2.302 * 2.]\n # Reduced xent = (0.051 * 1.5 + 2.302 * 2.) / 3.5\n\n self.assertAllClose(self.evaluate(result), 1.338, atol=1e-3)\n\n def test_weighted_from_logits(self):\n scce_obj = metrics.SparseCategoricalCrossentropy(from_logits=True)\n self.evaluate(tf.compat.v1.variables_initializer(scce_obj.variables))\n\n y_true = np.asarray([1, 2])\n logits = np.asarray([[1, 9, 0], [1, 8, 1]], dtype=np.float32)\n sample_weight = tf.constant([1.5, 2.])\n result = scce_obj(y_true, logits, sample_weight=sample_weight)\n\n # softmax = exp(logits) / sum(exp(logits), axis=-1)\n # y_true = one_hot(y_true) = [[0, 1, 0], [0, 0, 1]]\n # xent = -sum(y_true * log(softmax), 1)\n # xent = [0.00045, 7.00182]\n # weighted xent = [0.000675, 14.00364]\n # Reduced xent = (0.000675 + 14.00364) / (1.5 + 2)\n\n self.assertAllClose(self.evaluate(result), 4.0012, atol=1e-3)\n\n def test_axis(self):\n scce_obj = metrics.SparseCategoricalCrossentropy(axis=0)\n self.evaluate(tf.compat.v1.variables_initializer(scce_obj.variables))\n\n y_true = np.asarray([1, 2])\n y_pred = np.asarray([[0.05, 0.1], [0.95, 0.8], [0, 0.1]])\n result = scce_obj(y_true, y_pred)\n\n # EPSILON = 1e-7, y = y_true, y` = y_pred\n # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)\n # y` = [[0.05, 0.1], [0.95, 0.8], [EPSILON, 0.1]]\n # logits = log(y`) = [[-2.9957, -2.3026],\n # [-0.0513, -0.2231],\n # [-16.1181, -2.3026]]\n\n # softmax = exp(logits) / sum(exp(logits), axis=-1)\n # y = one_hot(y) = [[0, 0], [1, 0], [0, 1]]\n # xent = -sum(y * log(softmax), 1)\n\n # exp(logits) = [[0.05, 0.1], [0.95, 0.8], [EPSILON, 0.1]]\n # sum(exp(logits)) = [1, 1]\n # softmax = [[0.05, 0.1], [0.95, 0.8], [EPSILON, 0.1]]\n # log(softmax) = [[-2.9957, -2.3026],\n # [-0.0513, -0.2231],\n # [-16.1181, -2.3026]]\n # y * log(softmax) = [[0, 0], [-0.0513, 0], [0, -2.3026]]\n # xent = [0.0513, 2.3026]\n # Reduced xent = (0.0513 + 2.3026) / 2\n\n self.assertAllClose(self.evaluate(result), 1.176, atol=1e-3)\n\n\nclass BinaryTruePositives(metrics.Metric):\n\n def __init__(self, name='binary_true_positives', **kwargs):\n super(BinaryTruePositives, self).__init__(name=name, **kwargs)\n self.true_positives = self.add_weight(name='tp', initializer='zeros')\n\n def update_state(self, y_true, y_pred, sample_weight=None):\n y_true = tf.cast(y_true, tf.bool)\n y_pred = tf.cast(y_pred, tf.bool)\n\n values = tf.logical_and(\n tf.equal(y_true, True), tf.equal(y_pred, True))\n values = tf.cast(values, self.dtype)\n if sample_weight is not None:\n sample_weight = tf.cast(sample_weight, dtype=self.dtype)\n sample_weight = tf.__internal__.ops.broadcast_weights(\n sample_weight, values)\n values = tf.multiply(values, sample_weight)\n self.true_positives.assign_add(tf.reduce_sum(values))\n\n def result(self):\n return self.true_positives\n\n\nclass BinaryTruePositivesViaControlFlow(metrics.Metric):\n\n def __init__(self, name='binary_true_positives', **kwargs):\n super(BinaryTruePositivesViaControlFlow, self).__init__(name=name, **kwargs)\n self.true_positives = self.add_weight(name='tp', initializer='zeros')\n\n def update_state(self, y_true, y_pred, sample_weight=None):\n y_true = tf.cast(y_true, tf.bool)\n y_pred = tf.cast(y_pred, tf.bool)\n\n for i in range(len(y_true)):\n for j in range(len(y_true[i])):\n if y_true[i][j] and y_pred[i][j]:\n if sample_weight is None:\n self.true_positives.assign_add(1)\n else:\n self.true_positives.assign_add(sample_weight[i][0])\n\n def result(self):\n if tf.constant(True):\n return self.true_positives\n return 0.0\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass CustomMetricsTest(tf.test.TestCase):\n\n def test_config(self):\n btp_obj = BinaryTruePositives(name='btp', dtype=tf.int32)\n self.assertEqual(btp_obj.name, 'btp')\n self.assertEqual(btp_obj.dtype, tf.int32)\n\n # Check save and restore config\n btp_obj2 = BinaryTruePositives.from_config(btp_obj.get_config())\n self.assertEqual(btp_obj2.name, 'btp')\n self.assertEqual(btp_obj2.dtype, tf.int32)\n\n def test_unweighted(self):\n btp_obj = BinaryTruePositives()\n self.evaluate(tf.compat.v1.variables_initializer(btp_obj.variables))\n y_true = tf.constant([[0, 0.9, 0, 1, 0], [0, 0, 1, 1, 1],\n [1, 1, 1, 1, 0], [0, 0, 0, 0, 1.5]])\n y_pred = tf.constant([[0, 0, 1, 5, 0], [1, 1, 1, 1, 1],\n [0, 1, 0, 1, 0], [1, 10, 1, 1, 1]])\n\n update_op = btp_obj.update_state(y_true, y_pred)\n self.evaluate(update_op)\n result = btp_obj.result()\n self.assertEqual(7, self.evaluate(result))\n\n def test_weighted(self):\n btp_obj = BinaryTruePositives()\n self.evaluate(tf.compat.v1.variables_initializer(btp_obj.variables))\n y_true = tf.constant([[0, 0.9, 0, 1, 0], [0, 0, 1, 1, 1],\n [1, 1, 1, 1, 0], [0, 0, 0, 0, 1.5]])\n y_pred = tf.constant([[0, 0, 1, 5, 0], [1, 1, 1, 1, 1],\n [0, 1, 0, 1, 0], [1, 10, 1, 1, 1]])\n sample_weight = tf.constant([[1.], [1.5], [2.], [2.5]])\n result = btp_obj(y_true, y_pred, sample_weight=sample_weight)\n self.assertEqual(12, self.evaluate(result))\n\n def test_autograph(self):\n metric = BinaryTruePositivesViaControlFlow()\n self.evaluate(tf.compat.v1.variables_initializer(metric.variables))\n y_true = tf.constant([[0, 0.9, 0, 1, 0], [0, 0, 1, 1, 1],\n [1, 1, 1, 1, 0], [0, 0, 0, 0, 1.5]])\n y_pred = tf.constant([[0, 0, 1, 5, 0], [1, 1, 1, 1, 1],\n [0, 1, 0, 1, 0], [1, 10, 1, 1, 1]])\n sample_weight = tf.constant([[1.], [1.5], [2.], [2.5]])\n\n @tf.function\n def compute_metric(y_true, y_pred, sample_weight):\n metric(y_true, y_pred, sample_weight)\n return metric.result()\n\n result = compute_metric(y_true, y_pred, sample_weight)\n self.assertEqual(12, self.evaluate(result))\n\n def test_metric_wrappers_autograph(self):\n def metric_fn(y_true, y_pred):\n x = tf.constant(0.0)\n for i in range(len(y_true)):\n for j in range(len(y_true[i])):\n if tf.equal(y_true[i][j], y_pred[i][j]) and y_true[i][j] > 0:\n x += 1.0\n return x\n\n mean_metric = metrics.MeanMetricWrapper(metric_fn)\n sum_metric = metrics.SumOverBatchSizeMetricWrapper(metric_fn)\n self.evaluate(tf.compat.v1.variables_initializer(mean_metric.variables))\n self.evaluate(tf.compat.v1.variables_initializer(sum_metric.variables))\n\n y_true = tf.constant([[0, 0, 0, 1, 0],\n [0, 0, 1, 1, 1],\n [1, 1, 1, 1, 0],\n [1, 1, 1, 0, 1]])\n y_pred = tf.constant([[0, 0, 1, 1, 0],\n [1, 1, 1, 1, 1],\n [0, 1, 0, 1, 0],\n [1, 1, 1, 1, 1]])\n\n @tf.function\n def tf_functioned_metric_fn(metric, y_true, y_pred):\n return metric(y_true, y_pred)\n\n metric_result = tf_functioned_metric_fn(mean_metric, y_true, y_pred)\n self.assertAllClose(self.evaluate(metric_result), 10, 1e-2)\n metric_result = tf_functioned_metric_fn(sum_metric, y_true, y_pred)\n self.assertAllClose(self.evaluate(metric_result), 10, 1e-2)\n\n def test_metric_not_tracked_as_sublayer_in_layer(self):\n\n class MyLayer(base_layer.Layer):\n\n def __init__(self, **kwargs):\n super(MyLayer, self).__init__(**kwargs)\n self.mean_obj = metrics.Mean(name='my_mean_obj')\n\n def call(self, x):\n self.add_metric(\n tf.reduce_sum(x), aggregation='mean', name='my_mean_tensor')\n self.add_metric(self.mean_obj(x))\n return x\n\n layer = MyLayer()\n x = np.ones((1, 1))\n layer(x)\n self.assertLen(list(layer._flatten_layers(include_self=False)), 0)\n self.assertLen(layer.metrics, 2)\n\n def test_metric_not_tracked_as_sublayer_in_model(self):\n\n class MyModel(training_mod.Model):\n\n def __init__(self, **kwargs):\n super(MyModel, self).__init__(**kwargs)\n self.mean_obj = metrics.Mean(name='my_mean_obj')\n\n def call(self, x):\n self.add_metric(\n tf.reduce_sum(x), aggregation='mean', name='my_mean_tensor')\n self.add_metric(self.mean_obj(x))\n return x\n\n model = MyModel()\n x = np.ones((1, 1))\n model(x)\n self.assertLen(list(model._flatten_layers(include_self=False)), 0)\n self.assertLen(model.layers, 0)\n self.assertLen(model.metrics, 2)\n\n\ndef _get_model(compile_metrics):\n model_layers = [\n layers.Dense(3, activation='relu', kernel_initializer='ones'),\n layers.Dense(1, activation='sigmoid', kernel_initializer='ones')]\n\n model = testing_utils.get_model_from_layers(model_layers, input_shape=(4,))\n model.compile(\n loss='mae',\n metrics=compile_metrics,\n optimizer='rmsprop',\n run_eagerly=testing_utils.should_run_eagerly())\n return model\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass ResetStatesTest(keras_parameterized.TestCase):\n\n def test_reset_states_false_positives(self):\n fp_obj = metrics.FalsePositives()\n model = _get_model([fp_obj])\n x = np.ones((100, 4))\n y = np.zeros((100, 1))\n model.evaluate(x, y)\n self.assertEqual(self.evaluate(fp_obj.accumulator), 100.)\n model.evaluate(x, y)\n self.assertEqual(self.evaluate(fp_obj.accumulator), 100.)\n\n def test_reset_states_false_negatives(self):\n fn_obj = metrics.FalseNegatives()\n model = _get_model([fn_obj])\n x = np.zeros((100, 4))\n y = np.ones((100, 1))\n model.evaluate(x, y)\n self.assertEqual(self.evaluate(fn_obj.accumulator), 100.)\n model.evaluate(x, y)\n self.assertEqual(self.evaluate(fn_obj.accumulator), 100.)\n\n def test_reset_states_true_negatives(self):\n tn_obj = metrics.TrueNegatives()\n model = _get_model([tn_obj])\n x = np.zeros((100, 4))\n y = np.zeros((100, 1))\n model.evaluate(x, y)\n self.assertEqual(self.evaluate(tn_obj.accumulator), 100.)\n model.evaluate(x, y)\n self.assertEqual(self.evaluate(tn_obj.accumulator), 100.)\n\n def test_reset_states_true_positives(self):\n tp_obj = metrics.TruePositives()\n model = _get_model([tp_obj])\n x = np.ones((100, 4))\n y = np.ones((100, 1))\n model.evaluate(x, y)\n self.assertEqual(self.evaluate(tp_obj.accumulator), 100.)\n model.evaluate(x, y)\n self.assertEqual(self.evaluate(tp_obj.accumulator), 100.)\n\n def test_reset_states_precision(self):\n p_obj = metrics.Precision()\n model = _get_model([p_obj])\n x = np.concatenate((np.ones((50, 4)), np.ones((50, 4))))\n y = np.concatenate((np.ones((50, 1)), np.zeros((50, 1))))\n model.evaluate(x, y)\n self.assertEqual(self.evaluate(p_obj.true_positives), 50.)\n self.assertEqual(self.evaluate(p_obj.false_positives), 50.)\n model.evaluate(x, y)\n self.assertEqual(self.evaluate(p_obj.true_positives), 50.)\n self.assertEqual(self.evaluate(p_obj.false_positives), 50.)\n\n def test_reset_states_recall(self):\n r_obj = metrics.Recall()\n model = _get_model([r_obj])\n x = np.concatenate((np.ones((50, 4)), np.zeros((50, 4))))\n y = np.concatenate((np.ones((50, 1)), np.ones((50, 1))))\n model.evaluate(x, y)\n self.assertEqual(self.evaluate(r_obj.true_positives), 50.)\n self.assertEqual(self.evaluate(r_obj.false_negatives), 50.)\n model.evaluate(x, y)\n self.assertEqual(self.evaluate(r_obj.true_positives), 50.)\n self.assertEqual(self.evaluate(r_obj.false_negatives), 50.)\n\n def test_reset_states_sensitivity_at_specificity(self):\n s_obj = metrics.SensitivityAtSpecificity(0.5, num_thresholds=1)\n model = _get_model([s_obj])\n x = np.concatenate((np.ones((25, 4)), np.zeros((25, 4)), np.zeros((25, 4)),\n np.ones((25, 4))))\n y = np.concatenate((np.ones((25, 1)), np.zeros((25, 1)), np.ones((25, 1)),\n np.zeros((25, 1))))\n\n for _ in range(2):\n model.evaluate(x, y)\n self.assertEqual(self.evaluate(s_obj.true_positives), 25.)\n self.assertEqual(self.evaluate(s_obj.false_positives), 25.)\n self.assertEqual(self.evaluate(s_obj.false_negatives), 25.)\n self.assertEqual(self.evaluate(s_obj.true_negatives), 25.)\n\n def test_reset_states_specificity_at_sensitivity(self):\n s_obj = metrics.SpecificityAtSensitivity(0.5, num_thresholds=1)\n model = _get_model([s_obj])\n x = np.concatenate((np.ones((25, 4)), np.zeros((25, 4)), np.zeros((25, 4)),\n np.ones((25, 4))))\n y = np.concatenate((np.ones((25, 1)), np.zeros((25, 1)), np.ones((25, 1)),\n np.zeros((25, 1))))\n\n for _ in range(2):\n model.evaluate(x, y)\n self.assertEqual(self.evaluate(s_obj.true_positives), 25.)\n self.assertEqual(self.evaluate(s_obj.false_positives), 25.)\n self.assertEqual(self.evaluate(s_obj.false_negatives), 25.)\n self.assertEqual(self.evaluate(s_obj.true_negatives), 25.)\n\n def test_reset_states_precision_at_recall(self):\n s_obj = metrics.PrecisionAtRecall(recall=0.5, num_thresholds=1)\n model = _get_model([s_obj])\n x = np.concatenate((np.ones((25, 4)), np.zeros((25, 4)), np.zeros((25, 4)),\n np.ones((25, 4))))\n y = np.concatenate((np.ones((25, 1)), np.zeros((25, 1)), np.ones((25, 1)),\n np.zeros((25, 1))))\n\n for _ in range(2):\n model.evaluate(x, y)\n self.assertEqual(self.evaluate(s_obj.true_positives), 25.)\n self.assertEqual(self.evaluate(s_obj.false_positives), 25.)\n self.assertEqual(self.evaluate(s_obj.false_negatives), 25.)\n self.assertEqual(self.evaluate(s_obj.true_negatives), 25.)\n\n def test_reset_states_recall_at_precision(self):\n s_obj = metrics.RecallAtPrecision(precision=0.5, num_thresholds=1)\n model = _get_model([s_obj])\n x = np.concatenate((np.ones((25, 4)), np.zeros((25, 4)), np.zeros((25, 4)),\n np.ones((25, 4))))\n y = np.concatenate((np.ones((25, 1)), np.zeros((25, 1)), np.ones((25, 1)),\n np.zeros((25, 1))))\n\n for _ in range(2):\n model.evaluate(x, y)\n self.assertEqual(self.evaluate(s_obj.true_positives), 25.)\n self.assertEqual(self.evaluate(s_obj.false_positives), 25.)\n self.assertEqual(self.evaluate(s_obj.false_negatives), 25.)\n self.assertEqual(self.evaluate(s_obj.true_negatives), 25.)\n\n def test_reset_states_auc(self):\n auc_obj = metrics.AUC(num_thresholds=3)\n model = _get_model([auc_obj])\n x = np.concatenate((np.ones((25, 4)), np.zeros((25, 4)), np.zeros((25, 4)),\n np.ones((25, 4))))\n y = np.concatenate((np.ones((25, 1)), np.zeros((25, 1)), np.ones((25, 1)),\n np.zeros((25, 1))))\n\n for _ in range(2):\n model.evaluate(x, y)\n self.assertEqual(self.evaluate(auc_obj.true_positives[1]), 25.)\n self.assertEqual(self.evaluate(auc_obj.false_positives[1]), 25.)\n self.assertEqual(self.evaluate(auc_obj.false_negatives[1]), 25.)\n self.assertEqual(self.evaluate(auc_obj.true_negatives[1]), 25.)\n\n def test_reset_states_auc_manual_thresholds(self):\n auc_obj = metrics.AUC(thresholds=[0.5])\n model = _get_model([auc_obj])\n x = np.concatenate((np.ones((25, 4)), np.zeros((25, 4)), np.zeros((25, 4)),\n np.ones((25, 4))))\n y = np.concatenate((np.ones((25, 1)), np.zeros((25, 1)), np.ones((25, 1)),\n np.zeros((25, 1))))\n\n for _ in range(2):\n model.evaluate(x, y)\n self.assertEqual(self.evaluate(auc_obj.true_positives[1]), 25.)\n self.assertEqual(self.evaluate(auc_obj.false_positives[1]), 25.)\n self.assertEqual(self.evaluate(auc_obj.false_negatives[1]), 25.)\n self.assertEqual(self.evaluate(auc_obj.true_negatives[1]), 25.)\n\n def test_reset_states_mean_iou(self):\n m_obj = metrics.MeanIoU(num_classes=2)\n model = _get_model([m_obj])\n x = np.asarray([[0, 0, 0, 0], [1, 1, 1, 1], [1, 0, 1, 0], [0, 1, 0, 1]],\n dtype=np.float32)\n y = np.asarray([[0], [1], [1], [1]], dtype=np.float32)\n model.evaluate(x, y)\n self.assertArrayNear(self.evaluate(m_obj.total_cm)[0], [1, 0], 1e-1)\n self.assertArrayNear(self.evaluate(m_obj.total_cm)[1], [3, 0], 1e-1)\n model.evaluate(x, y)\n self.assertArrayNear(self.evaluate(m_obj.total_cm)[0], [1, 0], 1e-1)\n self.assertArrayNear(self.evaluate(m_obj.total_cm)[1], [3, 0], 1e-1)\n\n def test_reset_states_recall_float64(self):\n # Test case for GitHub issue 36790.\n try:\n backend.set_floatx('float64')\n r_obj = metrics.Recall()\n model = _get_model([r_obj])\n x = np.concatenate((np.ones((50, 4)), np.zeros((50, 4))))\n y = np.concatenate((np.ones((50, 1)), np.ones((50, 1))))\n model.evaluate(x, y)\n self.assertEqual(self.evaluate(r_obj.true_positives), 50.)\n self.assertEqual(self.evaluate(r_obj.false_negatives), 50.)\n model.evaluate(x, y)\n self.assertEqual(self.evaluate(r_obj.true_positives), 50.)\n self.assertEqual(self.evaluate(r_obj.false_negatives), 50.)\n finally:\n backend.set_floatx('float32')\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"tensorflow.compat.v1.get_default_graph",
"numpy.exp",
"numpy.mean",
"numpy.multiply",
"tensorflow.cast",
"tensorflow.compat.v1.placeholder",
"numpy.full",
"tensorflow.compat.v1.variables_initializer",
"numpy.log",
"tensorflow.constant",
"tensorflow.compat.v1.zeros_like",
"numpy.square",
"numpy.zeros",
"tensorflow.reduce_sum",
"tensorflow.test.main",
"numpy.absolute",
"tensorflow.__internal__.ops.broadcast_weights",
"tensorflow.train.Checkpoint",
"tensorflow.multiply",
"tensorflow.convert_to_tensor",
"numpy.asarray",
"numpy.sum",
"numpy.ones",
"tensorflow.equal",
"tensorflow.ragged.constant",
"numpy.maximum"
]
] |
taneroksuz/riscv-sfpu | [
"dbbb87d44e4fc790fb169b88b9845b7ea90e3be8"
] | [
"scripts/fma.py"
] | [
"#!/usr/bin/env python\n\nimport binascii\nimport sys\nimport subprocess\nimport os\nimport struct\nimport numpy as np\n\ndef fdiv_single(float_a,float_b,float_c,f):\n a = struct.unpack('!f', float_a.decode('hex'))[0]\n b = struct.unpack('!f', float_b.decode('hex'))[0]\n sig_a = int((int(float_a,16) & int(\"80000000\",16)) >> 31)\n sig_b = int((int(float_b,16) & int(\"80000000\",16)) >> 31)\n exp_a = int((int(float_a,16) & int(\"7F800000\",16)) >> 23)\n exp_b = int((int(float_b,16) & int(\"7F800000\",16)) >> 23)\n man_a = int(int(float_a,16) & int(\"007FFFFF\",16))\n man_b = int(int(float_b,16) & int(\"007FFFFF\",16))\n\n if exp_b == 0 and man_b == 0:\n y0 = 0\n elif exp_b == 255:\n y0 = 0\n else:\n y0 = 1/b\n\n e0 = 1 - b * y0\n y1 = y0 + y0 * e0\n e1 = e0 * e0\n y2 = y1 + y1 * e1\n e2 = e1 * e1\n y3 = y2 + y2 * e2\n q0 = a * y3\n r0 = a - b * q0\n Q = q0 + r0 * y3\n\n # x = y0\n # r = 1 - b * x\n # x = x + r * x\n # r = r * r\n # x = x + r * x\n # r = 1 - b * x\n # x = x + r * x\n # r = 1 - b * x\n # x = x + r * x\n # y = a * x\n # r = a - b * y\n # y = y + r * x\n # r = a - b * y\n # Q = y + r * x\n\n res = np.float32(Q)\n\n res = hex(struct.unpack('<I', struct.pack('<f',res))[0])[2:]\n res = '{0:08X}'.format(int(res,16))\n if (int(float_a,16) & int(\"7FC00000\",16)) == int(\"7FC00000\",16):\n res = c\n elif (int(float_b,16) & int(\"7FC00000\",16)) == int(\"7FC00000\",16):\n res = c\n elif (int(float_a,16) & int(\"7F800000\",16)) == int(\"7F800000\",16):\n res = c\n elif (int(float_b,16) & int(\"7F800000\",16)) == int(\"7F800000\",16):\n res = c\n elif (int(float_a,16) & int(\"7FFFFFFF\",16)) == int(\"00000000\",16):\n res = c\n elif (int(float_b,16) & int(\"7FFFFFFF\",16)) == int(\"00000000\",16):\n res = c\n\n diff = '{0:08X}'.format(int(res,16) ^ int(c,16))\n if diff != \"00000000\":\n f.writelines(float_a + \" / \" + float_b + \" = \" + float_c + \" ^ \" + res + \" => \" + diff + \" : \"\n + str(struct.unpack('!f', float_a.decode('hex'))[0]) + \" / \"\n + str(struct.unpack('!f', float_b.decode('hex'))[0]) + \" = \"\n + str(struct.unpack('!f', float_c.decode('hex'))[0]) + \" ^ \"\n + str(struct.unpack('!f', res.decode('hex'))[0]) + \"\\n\")\n return res\n\nif __name__ == '__main__':\n\n if len(sys.argv) < 4:\n print('Expected usage: {0} <operation> <folder> <testfloat_gen>'.format(sys.argv[0]))\n sys.exit(1)\n\n operation = sys.argv[1]\n folder = sys.argv[2]\n testfloat = sys.argv[3]\n\n list_operation = [ \\\n ('f32_div',\"0\",\"0\",\"010\"), \\\n ('f32_sqrt',\"0\",\"0\",\"020\")]\n\n find = False\n for i in range(len(list_operation)):\n if operation == list_operation[i][0]:\n get_operation = list_operation[i]\n find = True\n break\n\n if not find:\n sys.exit(1)\n\n command = 'chmod +x {0}/testfloat_gen'.format(testfloat)\n output = subprocess.check_output(command.split())\n\n command = '{0}/testfloat_gen {1}'.format(testfloat,operation)\n output = subprocess.check_output(command.split())\n\n filename = folder + operation+\"_compare.output\"\n f = open(filename,\"w+\")\n\n wort = \"\"\n index = 0\n for i in range(len(output)):\n if output[i] != ' ' and output[i] != '\\n':\n wort = wort + output[i]\n elif output[i] == ' ':\n if index == 0:\n a = wort\n elif index == 1:\n b = wort\n elif index == 2:\n c = wort\n index = index + 1;\n wort = \"\"\n elif output[i] == '\\n':\n fdiv_single(a,b,c,f)\n index = 0\n wort = \"\"\n\n f.close()\n"
] | [
[
"numpy.float32"
]
] |
superdan-t/Not-A-Tank | [
"35784b955818b4b0db0a9dca1c5fd4cff9509cdd"
] | [
"TenacityRover/controller/imagezmq.py"
] | [
"\"\"\" imagezmq: Transport OpenCV images via ZMQ.\nClasses that transport OpenCV images from one computer to another. For example,\nOpenCV images gathered by a Raspberry Pi camera could be sent to another\ncomputer for displaying the images using cv2.imshow() or for further image\nprocessing. See API and Usage Examples for details.\nCopyright (c) 2017 by Jeff Bass.\nLicense: MIT, see LICENSE for more details.\n\"\"\"\n\nimport zmq\nimport numpy as np\nimport cv2\n\n\nclass ImageSender():\n \"\"\"Opens zmq REQ socket and sends images.\n Opens a zmq REQ socket on the image sending computer, often a\n Raspberry Pi, that will be sending OpenCV images and\n related text messages to the hub computer. Provides methods to\n send images or send jpg compressed images.\n Arguments:\n connect_to: the tcp address:port of the hub computer.\n \"\"\"\n\n def __init__(self, connect_to='tcp://127.0.0.1:5555'):\n \"\"\"Initializes zmq socket for sending images to the hub.\n Expects an open socket at the connect_to tcp address; it will\n connect to that remote socket after setting up the REQ\n socket on this computer.\n \"\"\"\n\n self.zmq_context = SerializingContext()\n self.zmq_socket = self.zmq_context.socket(zmq.REQ)\n self.zmq_socket.connect(connect_to)\n\n def send_image(self, msg, image):\n \"\"\"Sends OpenCV image and msg to hub computer.\n Arguments:\n msg: text message or image name.\n image: OpenCV image to send to hub.\n Returns:\n A text reply from hub.\n \"\"\"\n\n if image.flags['C_CONTIGUOUS']:\n # if image is already contiguous in memory just send it\n self.zmq_socket.send_array(image, msg, copy=False)\n else:\n # else make it contiguous before sending\n image = np.ascontiguousarray(image)\n self.zmq_socket.send_array(image, msg, copy=False)\n hub_reply = self.zmq_socket.recv() # receive the reply message\n return hub_reply\n\n def send_jpg(self, msg, jpg_buffer):\n \"\"\"Sends msg text and jpg buffer to hub computer.\n Arguments:\n msg: image name or message text.\n jpg_buffer: bytestring containing the jpg image to send to hub.\n Returns:\n A text reply from hub.\n \"\"\"\n\n self.zmq_socket.send_jpg(msg, jpg_buffer, copy=False)\n hub_reply = self.zmq_socket.recv() # receive the reply message\n return hub_reply\n\n\nclass ImageHub():\n \"\"\"Opens zmq REP socket and receives images.\n Opens a zmq REP socket on the hub compuer, for example,\n a Mac, that will be receiving and displaying or processing OpenCV images\n and related text messages. Provides methods to receive images or receive\n jpg compressed images.\n Arguments:\n open_port: (optional) the socket to open for receiving REQ requests.\n \"\"\"\n\n def __init__(self, open_port='tcp://*:5555'):\n \"\"\"Initializes zmq REP socket to receive images and text.\n \"\"\"\n\n self.zmq_context = SerializingContext()\n self.zmq_socket = self.zmq_context.socket(zmq.REP)\n self.zmq_socket.bind(open_port)\n\n def recv_image(self, copy=False):\n \"\"\"Receives OpenCV image and text msg.\n Arguments:\n copy: (optional) zmq copy flag.\n Returns:\n msg: text msg, often the image name.\n image: OpenCV image.\n \"\"\"\n\n msg, image = self.zmq_socket.recv_array(copy=False)\n return msg, image\n\n def recv_jpg(self, copy=False):\n \"\"\"Receives text msg, jpg buffer.\n Arguments:\n copy: (optional) zmq copy flag\n Returns:\n msg: text message, often image name\n jpg_buffer: bytestring jpg compressed image\n \"\"\"\n\n msg, jpg_buffer = self.zmq_socket.recv_jpg(copy=False)\n return msg, jpg_buffer\n\n def send_reply(self, reply_message=b'OK'):\n \"\"\"Sends the zmq REP reply message.\n Arguments:\n reply_message: reply message text, often just string 'OK'\n \"\"\"\n self.zmq_socket.send(reply_message)\n\n\nclass SerializingSocket(zmq.Socket):\n \"\"\"Numpy array serialization methods.\n Modelled on PyZMQ serialization examples.\n Used for sending / receiving OpenCV images, which are Numpy arrays.\n Also used for sending / receiving jpg compressed OpenCV images.\n \"\"\"\n\n def send_array(self, A, msg='NoName', flags=0, copy=True, track=False):\n \"\"\"Sends a numpy array with metadata and text message.\n Sends a numpy array with the metadata necessary for reconstructing\n the array (dtype,shape). Also sends a text msg, often the array or\n image name.\n Arguments:\n A: numpy array or OpenCV image.\n msg: (optional) array name, image name or text message.\n flags: (optional) zmq flags.\n copy: (optional) zmq copy flag.\n track: (optional) zmq track flag.\n \"\"\"\n\n md = dict(\n msg=msg,\n dtype=str(A.dtype),\n shape=A.shape,\n )\n self.send_json(md, flags | zmq.SNDMORE)\n return self.send(A, flags, copy=copy, track=track)\n\n def send_jpg(self,\n msg='NoName',\n jpg_buffer=b'00',\n flags=0,\n copy=True,\n track=False):\n \"\"\"Send a jpg buffer with a text message.\n Sends a jpg bytestring of an OpenCV image.\n Also sends text msg, often the image name.\n Arguments:\n msg: image name or text message.\n jpg_buffer: jpg buffer of compressed image to be sent.\n flags: (optional) zmq flags.\n copy: (optional) zmq copy flag.\n track: (optional) zmq track flag.\n \"\"\"\n\n md = dict(msg=msg, )\n self.send_json(md, flags | zmq.SNDMORE)\n return self.send(jpg_buffer, flags, copy=copy, track=track)\n\n def recv_array(self, flags=0, copy=True, track=False):\n \"\"\"Receives a numpy array with metadata and text message.\n Receives a numpy array with the metadata necessary\n for reconstructing the array (dtype,shape).\n Returns the array and a text msg, often the array or image name.\n Arguments:\n flags: (optional) zmq flags.\n copy: (optional) zmq copy flag.\n track: (optional) zmq track flag.\n Returns:\n msg: image name or text message.\n A: numpy array or OpenCV image reconstructed with dtype and shape.\n \"\"\"\n\n md = self.recv_json(flags=flags)\n msg = self.recv(flags=flags, copy=copy, track=track)\n A = np.frombuffer(msg, dtype=md['dtype'])\n return (md['msg'], A.reshape(md['shape']))\n\n def recv_jpg(self, flags=0, copy=True, track=False):\n \"\"\"Receives a jpg buffer and a text msg.\n Receives a jpg bytestring of an OpenCV image.\n Also receives a text msg, often the image name.\n Arguments:\n flags: (optional) zmq flags.\n copy: (optional) zmq copy flag.\n track: (optional) zmq track flag.\n Returns:\n msg: image name or text message.\n jpg_buffer: bytestring, containing jpg image.\n \"\"\"\n\n md = self.recv_json(flags=flags) # metadata text\n jpg_buffer = self.recv(flags=flags, copy=copy, track=track)\n return (md['msg'], jpg_buffer)\n\n\nclass SerializingContext(zmq.Context):\n _socket_class = SerializingSocket"
] | [
[
"numpy.ascontiguousarray",
"numpy.frombuffer"
]
] |
OumaimaHourrane/SmallDataLongDocuments | [
"57e883423dd4b9fcc5de191928ab4fadc404bcb2"
] | [
"oumaima/make_dataset.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 15 12:06:19 2020\n\n@author: jonas\n\n@tile: make_dataset\n\n@description: script to transform taxonomy from excel sheet to machine readable format in python.\n\"\"\"\n#%%\n'''import packages'''\nimport os\nimport sys\n\nimport pandas as pd \nimport pickle\n#%%\n\ndef import_raw_data():\n \n '''\n import and minimal processing of taxonomy from excel \n '''\n \n taxonomy = pd.read_excel (os.path.abspath(os.path.join('..', 'data/raw'))+'/tagging_table.xlsx')\n # get column names:\n columnNames = taxonomy.iloc[0] \n taxonomy = taxonomy[1:] \n taxonomy.columns = columnNames\n print('raw data shape:', taxonomy.shape)\n # delete entries without PIMS ID:\n taxonomy = taxonomy[taxonomy['PIMS #'].notna()] \n print('only entries with PIMS ID:', taxonomy.shape) \n # delete columns without names:\n taxonomy = taxonomy.loc[:, taxonomy.columns.notnull()] \n print('only columns with entries:', taxonomy.shape) \n # remove white spaces in column names and lowercase names:\n taxonomy.columns = taxonomy.columns.str.replace(' ', '_').str.lower()\n # rename pims id column:\n taxonomy = taxonomy.rename(columns={\"pims_#\": \"PIMS_ID\"})\n \n return taxonomy\n\ndef import_api_data():\n \n '''\n function that imports data from PIMS+ API. \n '''\n \ndef create_training_texts(dataframe, compare_with_API = False):\n \n \"\"\"\n 1. Takes in whole taxonomy and outputs different training data text fields and replaces \"nan\" with empty spaces. \n \"\"\"\n # objectives\n dataframe['objectives'] = dataframe['project_objective'].fillna('').astype(str) + dataframe['project_objective_2'].fillna('').astype(str)\n \n # rename description\n dataframe['description'] = dataframe['project_description'].fillna('').astype(str)\n\n \n # outcomes\n dataframe['outcomes'] = dataframe['outcome_1'].fillna('').astype(str)\n \n # outputs\n dataframe['outputs'] = dataframe[['output_1.1', 'output_1.2', 'output_1.3',\n 'output_1.4', 'output_1.5', 'outcome_2', 'output_2.1', 'output_2.2',\n 'output_2.3', 'output_2.4', 'output_2.5', 'outcome_3', 'output_3.1',\n 'output_3.2', 'output_3.3', 'output_3.4', 'output_3.5', 'outcome_4',\n 'output_4.1', 'output_4.2', 'output_4.3', 'output_4.4', 'output_4.5', \n 'outcome_5', 'output_5.1', 'output_5.2', 'output_5.3',\n 'output_5.4_(no_entry)', 'output_5.5_(no_entry)',\n 'outcome_6_(no_entry)', 'output_6.1', 'output_6.2', 'output_6.3',\n 'output_6.4_(no_entry)', 'output_6.5_(no_entry)',\n 'outcome_7_(no_entry)', 'output_7.1', 'output_7.2_(no_entry)',\n 'output_7.3_(no_entry)', 'output_7.4_(no_entry)','output_7.5_(no_entry)']].fillna('').astype(str).agg(' '.join, axis=1)\n\n \n dataframe['logframe'] = dataframe[['objectives', 'outcomes', 'outputs']].agg(' '.join, axis=1)\n \n dataframe['all_text'] = dataframe['description'] + dataframe['logframe']\n\n print('extracting and merging done!')\n \n \"\"\"\n 2. Create dataframe with only raw text fields and PimsIDs\n \"\"\"\n raw_text = taxonomy[['PIMS_ID', 'all_text', 'logframe', 'description', 'objectives', 'outcomes', 'outputs']]\n \n \n \"\"\" \n 3. if bool is set as True: Compare with downloaded logframes, descriptions and objectives from PIMS+ to see if they \n match in length and compleetness.\n - Replace empty fields with non-empty fiels if applicable.\n \n \"\"\"\n \n if compare_with_API == True:\n '''compare with PIMS+ projects/logframes etc and only keep most relevant'''\n print('Pims_plus API is considered to complete training data')\n \n else:\n print('only taxonomy data is considered')\n \n \n '''pickle data'''\n with open(os.path.abspath(os.path.join('..', 'data/interim'))+'/raw_text.pkl', 'wb') as handle:\n pickle.dump(raw_text, handle, protocol=pickle.HIGHEST_PROTOCOL)\n \n #pickle data also for personal QA project:\n # with open(os.path.join('/Users/jonas/Google Drive/github_repos/ClosedDomainQA/data')+'/raw_text.pkl', 'wb') as handle:\n # pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)\n \n return raw_text \n \n\ndef create_subset(dataframe, column_title):\n \n '''\n Takes datafram as input and column name and outputs a dataframe with two columns: project_id and column without empty fields.\n - may be appended with more meta_data than only project_id for downstream tasks.\n \n '''\n \n print('deleting all empty fields and creating subset for:', column_title) \n #keep only projects with column_title that contain alphabetic letter:\n data = dataframe[dataframe[column_title].str.contains('[A-Za-z]')]\n\n\n\n \n data = data[['PIMS_ID', column_title]]\n print('remaining projects with non empty field', column_title, data.shape)\n \n #reset index\n data = data.reset_index(drop=True)\n \n #rename text column to text\n data = data.rename(columns={column_title: \"text\"})\n\n\n '''pickle data'''\n with open(os.path.abspath(os.path.join('..', 'data/interim'))+'/'+column_title+'.pkl', 'wb') as handle:\n pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)\n \n #pickle data also for personal QA project:\n # with open(os.path.join('/Users/jonas/Google Drive/github_repos/ClosedDomainQA/data')+'/'+column_title+'.pkl', 'wb') as handle:\n # pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n return data\n\n\ndef labeling_encoding(dataframe, categories, label_powerset = bool):\n '''\n Function that encodes the label and gives option for different classification architectures (for example label_powerset)\n '''\n # generate binary values using get_dummies\n # categories is a list of categories from the main dataframe to be converted into on-hot encoding labels\n df = pd.DataFrame(dataframe, columns=categories)\n dum_df = pd.get_dummies(df, columns=categories)\n # merge with main df on key values\n df = df.join(dum_df)\n return df\n \n\n#%%\n\ntaxonomy = import_raw_data()\n\nraw_text = create_training_texts(taxonomy)\n\ndata = create_subset(taxonomy, \"logframe\")\n\nprint(data)\n#%%\n\n"
] | [
[
"pandas.DataFrame",
"pandas.get_dummies"
]
] |
gregoryng/SpaceXtract | [
"e011b43b442e6f50095ec1e1289904cad83379a5"
] | [
"src/Analysis/analyse_raw_telemetry.py"
] | [
"import numpy as np\nimport json\n\nfrom pandas.util.testing import all_timeseries_index_generator\nfrom scipy.signal import savgol_filter\nfrom scipy.optimize import curve_fit\nimport scipy\nimport matplotlib.pyplot as plt\nfrom math import fabs, pi, asin, sin, log, degrees, acos\nimport trendline\nfrom scipy.interpolate import interp1d\nimport sys\nfrom collections import OrderedDict\nfrom skaero.atmosphere import coesa\nimport math\nimport scipy.signal as ss\n\n\n\ndef read_list(file):\n data = json.loads(file.readline())\n\n for x in data:\n data[x] = [data[x]]\n\n for line in file:\n dict = json.loads(line)\n\n for x in dict:\n data[x].append(dict[x])\n\n return data\n\n\ndef gravity(altitude):\n return 4*10**14/(6.375*10**6 + altitude)**2\n\n\n\ndef refine_graph(x, y):\n new_x = [x[0]]\n new_y = [y[0]]\n\n\n for i in range(1, len(x)):\n if y[i] != new_y[-1]:\n new_x.append(x[i])\n new_y.append(y[i])\n\n\n return new_x, new_y\n\n\n\ndef refine_altitude(time, altitude, blur=True):\n new_time = [time[0]]\n new_altitude = [altitude[0]]\n\n for i in range(1, len(time)):\n if blur and altitude[i]/1000 > 0 and altitude[i]/1000 != int(altitude[i]/1000):\n continue\n\n if new_altitude[-1] < altitude[i]:\n new_altitude.append(altitude[i])\n new_time.append(time[i])\n\n elif new_altitude[-1] > altitude[i] and altitude[i] != altitude[i-1]:\n new_altitude.append(altitude[i-1])\n new_time.append(time[i-1])\n\n if new_time[-1] != time[-1]:\n new_time.append(time[-1])\n new_altitude.append(altitude[-1])\n\n\n for i in range(len(new_altitude)):\n if new_altitude[i] >= 100000:\n new_altitude[i] -= 500\n elif new_altitude[i] > 50:\n new_altitude[i] -= 50\n\n\n return new_time, new_altitude\n\n\ndef derivative(x_axis, y_axis, dx):\n \"\"\"\n Calculate the derivative of f(x)\n :param x_axis: list of the x axis data\n :param y_axis: list of the y axis data\n :param dx: difference of x axis\n :return: f'(x)\n \"\"\"\n der = (dx//2+dx%2)*[0]\n\n for i in range(len(x_axis) - dx):\n der.append((y_axis[i + dx] - y_axis[i]) / (x_axis[i + dx] - x_axis[i]))\n\n der += dx // 2 * [der[-1]]\n\n for i in range(dx//2+dx%2):\n der[i] = der[dx//2+dx%2+1]\n\n return der\n\n\ndef flip_direction(x_axis, y_axis, flip):\n for i in range(1, len(x_axis)):\n if x_axis[i] > flip:\n y_axis[i] = -y_axis[i]\n\n\ndef find_MECO(acceleration):\n return np.where(acceleration < 5)[0][0]\n\n\ndef find_altitude_graph(time, altitude, blur=False, interp=False):\n altitude = np.multiply(1000, altitude)\n temp_time, temp_altitude = refine_altitude(time, altitude, blur=blur)\n\n f = interp1d(temp_time, temp_altitude, kind=3)\n\n global altitude_time, ALTITUDE_INTERVAL\n\n t = np.arange(temp_time[0], temp_time[-1], ALTITUDE_INTERVAL)\n\n if interp:\n return np.interp(altitude_time, t, f(t))\n\n return np.interp(altitude_time, temp_time, temp_altitude)\n\n\ndef pythagoras(hypotenuse, leg):\n return [max(0, h**2-l**2)**0.5 for h, l in zip(hypotenuse, leg)]\n\n\ndef final_altitude(velocity, altitude):\n u = 4*10**14\n return -u/(velocity**2/2 - u/(altitude+6.375*10**6)) - 6.375*10**6\n\n\n\n\ndef find_angle_graph(velocity, vertical_velocity, interp=False):\n angle = []\n\n for i in range(len(velocity)):\n if velocity[i] == 0:\n angle.append(angle[-1])\n else:\n ratio = max(-1, min(vertical_velocity[i] / velocity[i], 1))\n angle.append(asin(ratio))\n\n angle = savgol_filter(angle, 5, 1)\n\n if interp:\n angle = savgol_filter(angle, 11, 1)\n return ss.medfilt(angle, kernel_size=7)\n\n return angle\n\n\n\n\n\n\ndef find_downrange_graph(time, horizontal_velocity, d0=0, dx=1):\n downrange_distance = [d0]\n\n for i in range(dx, len(time)):\n downrange_distance.append(downrange_distance[-1] +\n (time[i] - time[i - dx]) * (\n horizontal_velocity[i] + horizontal_velocity[i - dx]) / 2\n )\n\n return downrange_distance\n\n\ndef find_flip_point(y_axis, y_der, thresh = 1, dx = 10, start_index = 0):\n small = [i for i, y in enumerate(y_axis) if fabs(y) < thresh]\n\n if len(small) == 0:\n return None\n\n for i in small:\n if i >= dx and i < len(y_axis)-dx:\n if y_der[i-dx] * y_der[i+dx] < 0:\n return i\n\n return None\n\n\n\n\ndef smooth_altitude_with_velocity(altitude):\n new_altitude = []\n\n velocity_altitude = find_downrange_graph(altitude_time, velocity, d0=altitude[0])\n velocity_altitude_rev = find_downrange_graph(altitude_time, velocity[::-1], d0=altitude[-1])\n\n for i in range(len(altitude_time)):\n if velocity_altitude[i] - altitude[i] < 0 or velocity_altitude[i] < 1000:\n new_altitude.append(velocity_altitude[i])\n vertical_velocity[i] = velocity[i]\n\n elif (velocity_altitude_rev[-(i + 1)] - altitude[i] < 0 and fabs(altitude[i]) < 10000) or fabs(altitude[i]) < 1000:\n new_altitude.append(altitude[i])\n # new_altitude.append(velocity_altitude[-1] - velocity_altitude[i])\n vertical_velocity[i] = np.sign(vertical_velocity[i - 1]) * velocity[i]\n else:\n new_altitude.append(altitude[i])\n\n return new_altitude\n\n\n\n\n\ndef find_gap(data):\n delta_time = [0] + [data['time'][i] - data['time'][i - 1] for i in range(1, len(data['time']))]\n max_index = np.argmax(delta_time)\n\n start = data['time'][max_index - 1]\n end = data['time'][max_index]\n\n if max_index == 0 or end - start < MIN_COAST:\n return -1\n\n return max_index-1\n\n\ndef find_flip_point2(time, velocity, vertical_velocity, acceleration):\n dv = np.subtract(velocity, vertical_velocity)\n meco_time = time[find_MECO(acceleration)]\n\n flip_time = [t for t, v in zip(time, dv) if v <= 0 and meco_time < t < meco_time + 120]\n\n if len(flip_time) > 0:\n return flip_time[0]\n return None\n\n\ndef acceleration_func(x, Isp, m_dot, m0):\n return 9.8*Isp*9*m_dot/(m0-9*m_dot*x)\n\ndef velocity_func(x, Ve, m_dot, m0, g0):\n return Ve*np.log(m0/(m0-9*m_dot*x)) - g0\n\n\ndef get_atmos_data(altitude):\n \"\"\"\n This function calculates data about the atmosphere\n :param altitude: Distance from the surface of the Earth [m]\n :return:\n \"\"\"\n h, T, P, rho = coesa.table(altitude)\n\n if np.isnan(P) or type(P) == complex:\n P = 0\n rho = 0\n\n return h, T, P, rho\n\n\ndef get_q(velocity, altitude):\n return 0.5*get_atmos_data(1000*altitude)[-1]*velocity**2\n\n\n\n\n\n\nMAX_STRING_LENGTH = 1024\n\nVELOCITY_INTERVAL = 0.1\nALTITUDE_INTERVAL = 1\nMIN_COAST = 600\n\n\n# Get the data\nfile = open(sys.argv[1], 'r')\ndata = read_list(file)\n\n# Set the end points of the data\nstart = data['time'][0]\nend = data['time'][-1]\n\n\nend_index = find_gap(data)\n\nstart = data['time'][0]\nend = data['time'][end_index]\n\nif end_index is not None and end_index != -1:\n data['time'] = data['time'][:end_index+1]\n data['velocity'] = data['velocity'][:end_index+1]\n data['altitude'] = data['altitude'][:end_index+1]\n\nflip = len(sys.argv) >= 4\n\nif data['altitude'][-1] - data['altitude'][0] < 50:\n stage = 1\nelse:\n stage = 2\n\n# Set time interval for the data\nvelocity_time = np.arange(start, end, VELOCITY_INTERVAL)\naltitude_time = np.arange(start, end, ALTITUDE_INTERVAL)\n\n# Smooth altitude and velocity data\naltitude = find_altitude_graph(data['time'], data['altitude'], interp=False)\naltitude = np.maximum(0, altitude)\n\n\nvelocity = np.interp(altitude_time, *refine_graph(data['time'], data['velocity']))\n\n\n# Find vertical velocity\nvertical_velocity = derivative(altitude_time, altitude, 3)\nvertical_velocity = [np.sign(vv)*min(fabs(vv), vt) for vv, vt in zip(vertical_velocity, velocity)]\n\n\naltitude = smooth_altitude_with_velocity(altitude)\n\nangle = find_angle_graph(velocity, vertical_velocity, interp=(stage == 2))\n\n\nmed_vertical_velocity = ss.medfilt(vertical_velocity, kernel_size=7)\nwie_vertical_velocity = ss.wiener(np.array(med_vertical_velocity))\nsav = savgol_filter(wie_vertical_velocity, 5, 1)\nvertical_velocity = sav\n\n\n\nacceleration = derivative(altitude_time, velocity, 1)\n\nmed_acceleration = ss.medfilt(acceleration, kernel_size=7)\nwie_acceleration = ss.wiener(np.array(med_acceleration))\nacceleration = np.add(wie_acceleration, np.multiply(list(map(gravity, altitude)), np.sin(angle)))\nacceleration = savgol_filter(acceleration, 3, 1)\n\n\nFLIP_TIME = find_flip_point2(altitude_time, velocity, vertical_velocity, acceleration)\n\nhorizontal_velocity = pythagoras(velocity, vertical_velocity)\n\n\nhorizontal_velocity_org = horizontal_velocity\n\n\nif FLIP_TIME is not None:\n flip_direction(altitude_time, horizontal_velocity, FLIP_TIME)\n\n\nmed_horizontal_velocity = ss.medfilt(horizontal_velocity, kernel_size=15)\nwie_horizontal_velocity = ss.wiener(np.array(med_horizontal_velocity))\nsav = savgol_filter(wie_horizontal_velocity, 15, 3)\nhorizontal_velocity = np.minimum(velocity, sav)\n\n\ndownrange_distance = find_downrange_graph(altitude_time, horizontal_velocity)\n\nindex = find_MECO(acceleration)\nv0 = vertical_velocity[index]\na0 = altitude[index]\n\n#index = np.argmax(altitude, axis=0)\n#apogee = velocity_time[index]\n\n#index -= 100\n#p0=[288, 300, 5.5*10**5]\n#param_bounds=([200, 250, 4.5*10**5], [400, 400, 6.5*10**5])\n#popt, pcov = curve_fit(acceleration_func, velocity_time[:index], acceleration[:index], p0=p0, bounds=param_bounds)\n#print('Isp = {}, m_dot = {}, mass = {:.2f}'.format(*popt))\n\ndownrange_distance = np.multiply(0.001, downrange_distance)\naltitude = np.multiply(0.001, altitude)\naltitude = np.maximum(0, altitude)\n\n\nout_file = open(sys.argv[2], 'w')\nout_string = ''\n\nvelocity_time = np.subtract(velocity_time, data['time'][0])\n\nindex = np.where(np.abs(acceleration) > 1)\n\nfor i in range(min(len(altitude_time)-10, index[0][-1]+10)):\n data_dict = OrderedDict([\n ('time', float('{:.3f}'.format(altitude_time[i]))),\n ('velocity', float('{:.3f}'.format(velocity[i]))),\n ('altitude', float('{:.3f}'.format(altitude[i]))),\n ('velocity_y', float('{:.3f}'.format(vertical_velocity[i]))),\n ('velocity_x', float('{:.3f}'.format(horizontal_velocity[i]))),\n ('acceleration', float('{:.3f}'.format(acceleration[i]))),\n ('downrange_distance', float('{:.3f}'.format(downrange_distance[i]))),\n ('angle', float('{:.3f}'.format(degrees(angle[i])))),\n ('q', get_q(velocity[i], altitude[i]))\n ])\n\n out_string += str(json.dumps(data_dict)) + '\\n'\n\n if len(out_string) >= MAX_STRING_LENGTH:\n out_file.write(out_string)\n out_string = ''\n\nout_file.write(out_string)\n\n\n\n"
] | [
[
"scipy.interpolate.interp1d",
"numpy.array",
"scipy.signal.savgol_filter",
"numpy.isnan",
"numpy.sin",
"numpy.minimum",
"numpy.log",
"numpy.interp",
"numpy.multiply",
"numpy.sign",
"scipy.signal.medfilt",
"numpy.subtract",
"numpy.arange",
"numpy.argmax",
"numpy.abs",
"numpy.where",
"numpy.maximum"
]
] |
JaniAnttonenp/ml-fairness | [
"d76786f5bfb00239a8a68d6de69b9889cf7cf61e"
] | [
"environments/recommenders/recsim_samplers.py"
] | [
"# coding=utf-8\n# Copyright 2020 The ML Fairness Gym Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Samplers for Recsim simulations.\"\"\"\nimport numpy as np\nfrom recsim import document\nfrom recsim import user\n\nMAXINT = np.iinfo(np.int32).max\n\n\nclass SequentialSampler(document.AbstractDocumentSampler):\n \"\"\"Iterates over a sequence of candidate documents.\"\"\"\n\n def __init__(self, documents, doc_ctor, repeat=True):\n self._corpus = documents\n self._idx = 0\n self._doc_ctor = doc_ctor\n self.repeat = repeat\n\n def reset_sampler(self):\n self._idx = 0\n\n def size(self):\n return len(self._corpus)\n\n def sample_document(self):\n \"\"\"Returns the next document.\n\n If the sampler is a repeating sampler (constructed with repeat=True),\n it will back to the start if the corpus is exhausted.\n\n Raises:\n IndexError: if self.repeat is False and the corpus is exhausted.\n \"\"\"\n if self._idx >= len(self._corpus):\n if not self.repeat:\n raise IndexError('Attempting to sample more items than available.')\n self.reset_sampler()\n doc = self._corpus[self._idx]\n self._idx += 1\n return doc\n\n\nclass SingletonSampler(SequentialSampler):\n \"\"\"Iterates over a sequence of candidate documents only once.\"\"\"\n\n def __init__(self, documents, doc_ctor):\n super(SingletonSampler, self).__init__(documents, doc_ctor, repeat=False)\n\n\nclass ConstructionSampler(user.AbstractUserSampler):\n \"\"\"Constructs a new user with a unique user id for each sample.\"\"\"\n\n def __init__(self, user_ctor, seed):\n \"\"\"Initializes the ConstructionSampler.\n\n Args:\n user_ctor: A User constructor with two arguments: (user_id, seed)\n seed: Random seed for the sampler.\n \"\"\"\n super(ConstructionSampler, self).__init__(user_ctor=user_ctor, seed=seed)\n self.user_id = -1\n\n def sample_user(self):\n \"\"\"Generates a new user with a unique user id..\"\"\"\n self.user_id += 1\n return self._user_ctor(self.user_id, seed=self._rng.randint(0, MAXINT))\n\n\nclass UserPoolSampler(user.AbstractUserSampler):\n \"\"\"Samples users from a fixed pool read in at initialization.\"\"\"\n\n def __init__(self,\n users,\n user_ctor,\n seed=None,\n partitions=None,\n partition_seed=100):\n \"\"\"Initializes the UserPoolSampler.\n\n Args:\n users: A list of `AbstractUsers`.\n user_ctor: Constructor for the user class.\n seed: Random seed for the pool sampler.\n partitions: A list of floats that describe how to partition the users.\n For example: [0.3, 0.3, 0.4] would create 3 partitions, with 30%, 30%\n and 40% of the users, respectively.\n partition_seed: Used to control how users are randomly allocated to\n partitions.\n \"\"\"\n super(UserPoolSampler, self).__init__(seed=seed,\n user_ctor=user_ctor)\n self._users = {user.user_id: user for user in users}\n self._partitions = [np.array(list(self._users.keys()))]\n self._active_pool = 0\n\n if partitions is not None and not np.isclose(np.sum(partitions), 1.0):\n raise ValueError('Partitions must sum to 1.')\n\n # Shuffle the keys to create a random partition.\n partition_rng = np.random.RandomState(partition_seed)\n partition_rng.shuffle(self._partitions[0])\n if partitions is not None:\n cutpoints = (np.cumsum(partitions)*len(self._users)).astype(np.int32)\n # The final cutpoint at len does not need to be specified.\n self._partitions = np.split(self._partitions[0], cutpoints[:-1])\n\n for partition in self._partitions:\n assert partition.size, (\n 'Empty partition! Used cutpoints %s to cut a list of len %d.' %\n (cutpoints, len(self._users.keys())))\n\n def size(self):\n return len(self._users)\n\n def sample_user(self):\n # Random choice over keys from the current partition of users.\n user_id = self._rng.choice(list(self._partitions[self._active_pool]))\n return self.get_user(user_id)\n\n def get_user(self, user_id):\n return self._users[user_id]\n\n def set_active_pool(self, pool):\n if pool > len(self._partitions):\n raise ValueError('Trying to select pool %d but there are only %d pools.' %\n (pool, len(self._partitions)))\n self._active_pool = pool\n"
] | [
[
"numpy.random.RandomState",
"numpy.sum",
"numpy.split",
"numpy.cumsum",
"numpy.iinfo"
]
] |
csajedi/mergelife | [
"1a7ab53f0705a4bdef851e132dbd6f25ac4673dc"
] | [
"python/mergelife.py"
] | [
"import numpy as np\nfrom scipy.ndimage import convolve\nimport scipy\nimport scipy.stats\nimport ctypes\nfrom PIL import Image\nimport dp\nimport logging\n\nPATH = \"C:\\\\Users\\\\jeffh\\\\temp\\mlife\\\\\"\nlogger = logging.getLogger(\"mergelife\")\n\n# The color table.\nCOLOR_TABLE = [\n [0, 0, 0], # Black 0\n [255, 0, 0], # Red 1\n [0, 255, 0], # Green 2\n [255, 255, 0], # Yellow 3\n [0, 0, 255], # Blue 4\n [255, 0, 255], # Purple 5\n [0, 255, 255], # Cyan 6\n [255, 255, 255] # White 7\n]\n\n\ndef parse_update_rule(code):\n code = fromHex(code)\n\n sorted_code = []\n for i, x in enumerate(code):\n rng = int(x[0] * 8)\n if x[1] > 0:\n pct = x[1] / 127.0\n else:\n pct = x[1] / 128.0\n sorted_code.append((2048 if rng == 2040 else rng, pct, i))\n\n sorted_code = sorted(sorted_code)\n return sorted_code\n\n\ndef update_step(ml_instance):\n kernel = [[1, 1, 1], [1, 0, 1], [1, 1, 1]]\n THIRD = 1.0 / 3.0\n\n # Get important values\n sorted_rule = ml_instance['sorted_rule']\n height = ml_instance['height']\n width = ml_instance['width']\n changed = np.zeros((height, width), dtype=np.bool)\n\n # Swap lattice\n t = ml_instance['lattice'][1]\n ml_instance['lattice'][1] = ml_instance['lattice'][0]\n ml_instance['lattice'][0] = t\n\n # Get current and previous lattice\n prev_data = ml_instance['lattice'][1]['data']\n current_data = ml_instance['lattice'][0]['data']\n\n # Merge RGB\n data_avg = np.dot(prev_data, [THIRD, THIRD, THIRD])\n data_avg = data_avg.astype(int)\n pad_val = scipy.stats.mode(data_avg, axis=None)[0]\n pad_val = int(pad_val)\n data_cnt = convolve(data_avg, kernel, cval=pad_val, mode='constant')\n\n # Perform update\n for limit, pct, cidx in sorted_rule:\n mask = data_cnt < limit\n mask = np.logical_and(mask, np.logical_not(changed))\n changed = np.logical_or(changed, mask)\n\n if pct < 0:\n pct = abs(pct)\n cidx = cidx + 1\n if cidx >= len(COLOR_TABLE):\n cidx = 0\n\n d = COLOR_TABLE[cidx] - prev_data[mask]\n current_data[mask] = prev_data[mask] + np.floor(d * pct)\n ml_instance['lattice'][0]['eval'] = {\n 'mode': pad_val,\n 'merge': data_avg,\n 'neighbor': data_cnt\n }\n\n ml_instance['time_step'] += 1\n return current_data\n\n\ndef toHex(code):\n result = \"\"\n\n for rng, pct in code:\n if len(result) > 0:\n result += \"-\"\n result += \"%02x\" % int(rng)\n result += \"%02x\" % ctypes.c_ubyte(int(pct)).value\n\n return result\n\n\ndef fromHex(str):\n result = []\n str = str.replace('-', '')\n for i in range(len(COLOR_TABLE)):\n idx = i * 4\n rng = str[idx:idx + 2]\n pct = str[idx + 2:idx + 4]\n rng = int(rng, 16)\n pct = int(pct, 16)\n\n pct = ctypes.c_byte(pct).value # Twos complement\n result.append((rng, pct))\n\n return result\n\n\ndef random_update_rule():\n result = []\n for i in range(len(COLOR_TABLE)):\n rng = np.random.randint(0, 255)\n pct = np.random.randint(-128, 128)\n\n result.append((rng, pct))\n return toHex(result)\n\n\ndef randomize_lattice(ml_instance):\n height = ml_instance['height']\n width = ml_instance['width']\n ml_instance['track'] = {}\n ml_instance['time_step'] = 0\n ml_instance['lattice'][0]['data'] = np.random.randint(0, 256, size=(height, width, 3), dtype=np.uint8)\n ml_instance['lattice'][1]['data'] = np.copy(ml_instance['lattice'][0]['data'])\n\n\ndef save_image(ml_instance, filename):\n lattice = ml_instance['lattice'][0]['data']\n newimage = Image.new('RGB', (len(lattice[0]), len(lattice))) # type, size\n newimage.putdata([tuple(p) for row in lattice for p in row])\n newimage.save(filename) # takes type from filename extension\n\n\ndef new_ml_instance(height, width, rule_str):\n result = {\n 'height': height,\n 'width': width,\n 'rule_str': rule_str,\n 'sorted_rule': parse_update_rule(rule_str),\n 'time_step': 0,\n 'track': {},\n 'lattice': [\n {'data': None, 'eval': None},\n {'data': None, 'eval': None}\n ]\n }\n\n randomize_lattice(result)\n result['lattice'][1]['data'] = np.copy(result['lattice'][0]['data'])\n return result\n\n\ndef calc_objective_stats(ml_instance):\n height = ml_instance['height']\n width = ml_instance['width']\n time_step = ml_instance['time_step']\n\n e1 = ml_instance['lattice'][0]['eval']\n e2 = ml_instance['lattice'][1]['eval']\n\n if e1 is None or e2 is None:\n return {'mage': 0, 'mode': 0, 'mc': 0, 'bg': 0, 'fg': 0, 'act': 0, 'chaos': 0}\n\n d1_avg = e1['merge']\n d2_avg = e2['merge']\n\n # What percent of the grid is the mode, what percent is the background\n md1 = e1['mode']\n md2 = e2['mode']\n\n mode_mask = (d2_avg == md2)\n # mode_equal = sum(mode_equal.ravel()) / (height*width)\n\n # has been mode for >5\n if 'eval-md-cnt' in ml_instance['track']:\n mode_cnt = ml_instance['track']['eval-md-cnt']\n else:\n mode_cnt = np.zeros((height, width), dtype=np.int)\n ml_instance['track']['eval-md-cnt'] = mode_cnt\n\n mode_cnt[np.logical_not(mode_mask)] = 0\n mode_cnt += mode_mask\n\n mc = np.sum(mode_cnt > 50)\n\n # has been color (not mode) for >5\n if 'eval-same-cnt' in ml_instance['track']:\n same_cnt = ml_instance['track']['eval-same-cnt']\n else:\n same_cnt = np.zeros((height, width), dtype=np.int)\n ml_instance['track']['eval-same-cnt'] = same_cnt\n\n same_mask = (d1_avg == d2_avg)\n same_mask = np.logical_and(same_mask, np.logical_not(mode_mask))\n same_cnt[np.logical_not(same_mask)] = 0\n same_cnt += same_mask\n\n sc = np.sum(same_cnt > 5)\n\n # How long has the mode been its value\n mage = ml_instance['track'].get(\"eval-mode-age\", 0)\n lm = ml_instance['track'].get('eval-last-mode-val', md2)\n if lm != md2:\n lm = md2\n mage = 0\n else:\n mage += 1\n\n # how long ago was a pixel the mode\n if 'eval-last-mode' in ml_instance['track']:\n last_mode = ml_instance['track']['eval-last-mode']\n else:\n last_mode = np.zeros((height, width), dtype=np.int)\n ml_instance['track']['eval-last-mode'] = last_mode\n\n last_mode[mode_mask] = time_step\n ml_instance['track']['eval-mode-age'] = mage\n ml_instance['track']['eval-last-mode-val'] = lm\n\n # Find the active cells\n # An active cell has not been a background cell for 5 steps, but was a background cell in the last 25 steps\n if time_step >= 25:\n t = (ml_instance['time_step'] - last_mode)\n t1 = t > 5\n t2 = t < 25\n t = np.logical_and(t1, t2)\n sp = np.sum(t)\n else:\n sp = 0\n\n # Combine\n size = height * width\n cnt_bg = mc\n cnt_fg = sc\n cnt_act = sp\n cnt_chaos = (height * width) - (cnt_bg + cnt_fg + cnt_act)\n\n cnt_bg /= size\n cnt_fg /= size\n cnt_act /= size\n cnt_chaos /= size\n\n logger.debug(\"{}:Mode Count: {}, Stable BG: {}, Stable FG: {}, Active: {}, Chaos: {}, Mage: {}\".format(\n ml_instance['time_step'], mc, cnt_bg,\n cnt_fg, cnt_act, cnt_chaos, mage))\n\n return {'mage': mage, 'mode': md2, 'mc': mc, 'bg': cnt_bg, 'fg': cnt_fg, 'act': cnt_act, 'chaos': cnt_chaos}\n\n\ndef is_lattice_stable(ml_instance, o=None):\n if o is None:\n o = calc_objective_stats(ml_instance)\n\n cnt_bg = o['bg']\n\n if ml_instance['time_step'] > 100:\n if cnt_bg < 0.01:\n return True\n\n # Time to stop?\n mc_nochange = ml_instance['track'].get('eval-mc-nochange', 0)\n last_mc = ml_instance['track'].get('eval-last-mc', 0)\n\n if last_mc == o['mc']:\n mc_nochange += 1\n if mc_nochange > 100:\n return True\n else:\n mc_nochange = 0\n last_mc = o['mc']\n\n ml_instance['track']['eval-mc-nochange'] = mc_nochange\n ml_instance['track']['eval-last-mc'] = last_mc\n\n return ml_instance['time_step'] > 1000\n\n\ndef count_discrete(ml_instance):\n states = set()\n lattice = ml_instance['lattice'][0]['data']\n for row in range(lattice.shape[0]):\n for col in range(lattice.shape[1]):\n states.add(str(lattice[row][col]))\n return len(states)\n\ndef calc_stat_largest_rect(ml_inst,o):\n height = ml_inst['height']\n width = ml_inst['width']\n e2 = ml_inst['lattice'][1]['eval']\n mr = dp.max_size(e2['merge'], o['mode'])\n return (mr[0] * mr[1]) / (height * width)\n\n\ndef calc_objective_function(ml_inst, objective, dump=False):\n\n randomize_lattice(ml_inst)\n\n done = False\n while not done:\n update_step(ml_inst)\n o = calc_objective_stats(ml_inst)\n if dump:\n print(o)\n done = is_lattice_stable(ml_inst, o)\n\n stats = {\n 'steps': ml_inst['time_step'],\n 'foreground': o['fg'],\n 'active': o['act'],\n 'rect': calc_stat_largest_rect(ml_inst,o),\n 'mage' : o['mage']\n }\n\n score = 0\n for rule in objective:\n actual = stats[rule['stat']]\n rule_min = rule['min']\n rule_max = rule['max']\n range = rule_max - rule_min\n ideal = range / 2\n\n if actual < rule_min:\n # too small\n adjust = rule['min_weight']\n elif actual > rule_max:\n # too big\n adjust = rule['max_weight']\n else:\n adjust = ((range / 2) - abs(actual - ideal)) / (range / 2)\n adjust *= rule['weight']\n pass\n\n score += adjust\n #print(\"{}:{}:{}\".format(rule,actual,adjust))\n #print(score)\n\n return {'time_step':ml_inst['time_step'],'score':score}\n\n\ndef objective_function(ml_inst,cycles,objective,dump=False):\n lst = []\n steps = 0\n for i in range(cycles):\n if dump:\n print(\"Cycle #{}\".format(i))\n result = calc_objective_function(ml_inst,objective,dump)\n lst.append(result['score'])\n steps+=result['time_step']\n return {'time_step':steps,'score':np.max(lst)}\n"
] | [
[
"scipy.stats.mode",
"scipy.ndimage.convolve",
"numpy.logical_or",
"numpy.dot",
"numpy.logical_not",
"numpy.max",
"numpy.zeros",
"numpy.sum",
"numpy.copy",
"numpy.logical_and",
"numpy.random.randint",
"numpy.floor"
]
] |
millicentli/clin-bias-summarization | [
"bab94e8480b98109fe8b390a99317f635fce66b4"
] | [
"scripts/make_targets.py"
] | [
"# code is a mess since it's mostly exported from a jupyter notebook\nimport os\nimport pandas as pd\nimport Constants\nfrom sklearn.model_selection import KFold\nfrom readers import PhenotypingReader, InHospitalMortalityReader\nimport yaml\nfrom argparse import ArgumentParser\nfrom pathlib import Path\n\nparser = ArgumentParser()\nparser.add_argument('--processed_df', type=Path, required=True)\nparser.add_argument('--mimic_benchmark_dir', type = Path, required = True)\nparser.add_argument('--output_dir', type = Path, required = True)\nargs = parser.parse_args()\n\ndef preprocessing(row):\n '''\n Input: a list of tokens\n Output: a list of string, with each string having MAX_SEQ_LEN-2 tokens\n Uses a sliding window approach, with the window sliding (SLIDING_DIST tokens) each time\n '''\n n = int(len(row.toks)/Constants.SLIDING_DIST)\n seqs = []\n if n == 0: # note shorter than SLIDING_DIST tokens\n seqs.append(' '.join(row.toks))\n else:\n for j in range(min(n, Constants.MAX_NUM_SEQ)):\n seqs.append(' '.join(row.toks[j*Constants.SLIDING_DIST:(j+1)*Constants.SLIDING_DIST+(Constants.MAX_SEQ_LEN - Constants.SLIDING_DIST-2)]))\n return seqs\n\n# df = pd.read_hdf(args.processed_df.resolve(), key='df')\ndf = pd.read_pickle(args.processed_df.resolve())\ndf['seqs'] = df.apply(preprocessing, axis = 1)\ndf['num_seqs'] = df.seqs.apply(len)\nassert(df.seqs.apply(lambda x: any([len(i)== 0 for i in x])).sum() == 0)\n\ndf['note_id'] = df['note_id'].astype(str)\ndf = df[~pd.isnull(df['oasis'])]\nMAX_AGG_SEQUENCE_LENGTH = Constants.MAX_AGG_SEQUENCE_LEN\n\nroot_folder = args.mimic_benchmark_dir/'root/'\nother_features = ['age', 'oasis', 'oasis_prob', 'sofa', 'sapsii', 'sapsii_prob']\n\n'''\nIn-Hospital Mortality\nUsing the first 48 hours of patient information within their ICU stay, predict whether or not the patient will die in hospital.\nSubjects/targets are extracted by MIMIC-Benchmarks script. Their script only extracts numeric data, while we want to use only notes.\nTheir script also defines a new time scale, so that t=0 is when the patient first enters the ICU.\n\nWhat we do is:\n- Using the MIMIC-Benchmarks InHospitalMortalityReader, read in each patient, to get the target.\nWe know the period of interest will be 0-48 hours, where 0 is the intime to the ICU.\n- For each patient, we obtain their icustay_id from the episode file in their data folder\n- We obtain their (hadm_id, intime) from all_stays.csv using their icustay_id\n- With this information, along with the 48 hour period length, we can index\ninto df to obtain a set of note_ids corresponding to that period\n- To construct a training set for each individual, we take sequences from the\nlast k notes, until the patient runs out of notes, or we reach the max_agg_sequence_length.\n- We only use sequences from the following note types: Nursing, Nursing/Other,\nPhysician\n- We take the last k notes, because they are more likely to be informative of\nthe target, compared to the first notes\n- We assign a new ID for this aggregated note, which is a combination of their\nsubject ID and episode number\n'''\n\n\ntrain_reader = InHospitalMortalityReader(dataset_dir=args.mimic_benchmark_dir/'in-hospital-mortality' / 'train')\ntest_reader = InHospitalMortalityReader(dataset_dir=args.mimic_benchmark_dir/'in-hospital-mortality' / 'test')\nall_stays = pd.read_csv(os.path.join(root_folder, 'all_stays.csv'), parse_dates = ['INTIME']).set_index('ICUSTAY_ID')\n\ndef read_patient(name, period_length, allowed_types, eps = 0.001, dtype = 'train', return_intime = False):\n # given a file name, retrieve all notes from t=-eps to period_length+eps\n subj_id = int(name.split('_')[0])\n stay = pd.read_csv(os.path.join(root_folder, dtype, str(subj_id), name.split('_')[1]+'.csv'))\n assert(stay.shape[0] == 1)\n row = stay.iloc[0]\n\n icuid = row['Icustay']\n hadm_id = all_stays.loc[icuid]['HADM_ID']\n intime = all_stays.loc[icuid]['INTIME']\n result = df[(df['subject_id'] == subj_id) & (df['hadm_id'] == hadm_id)\n & (df['charttime'] >= intime) & (df['charttime'] < intime+pd.Timedelta(hours = period_length + eps))\n & (df['category'].isin(allowed_types))]\n if return_intime:\n return (intime, result)\n else:\n return result\n\ndef agg_notes(notes, first = False, intime = None, timeDiff = pd.Timedelta(hours = 48)):\n notes = notes.sort_values(by = 'charttime', ascending = False)\n seqs = []\n note_ids = []\n if first:\n note_to_take = None\n firstgood = notes[notes.category.isin(['Nursing', 'Physician '])]\n if firstgood.shape[0] > 0 and (firstgood.iloc[0]['charttime'] - intime) <= timeDiff:\n note_to_take = firstgood.iloc[0]\n elif (notes.iloc[0]['charttime'] - intime) <= timeDiff:\n note_to_take = notes.iloc[0]\n if note_to_take is not None:\n seqs = note_to_take['seqs']\n note_ids.append(note_to_take['note_id'])\n\n else:\n for idx, row in notes.iterrows():\n if len(seqs) + row.num_seqs <= MAX_AGG_SEQUENCE_LENGTH:\n seqs = row.seqs + seqs\n note_ids = [row.note_id] + note_ids\n return {**{\n 'insurance': notes.iloc[0]['insurance'],\n 'gender': notes.iloc[0]['gender'],\n 'ethnicity_to_use': notes.iloc[0]['ethnicity_to_use'],\n 'language_to_use': notes.iloc[0]['language_to_use'],\n 'subject_id': notes.iloc[0]['subject_id'],\n 'hadm_id': notes.iloc[0]['hadm_id'],\n 'seqs': seqs,\n 'note_ids': note_ids,\n 'num_seqs': len(seqs),\n }, **{i: notes.iloc[0][i] for i in other_features}}\n\ntemp = []\nfor i in range(train_reader.get_number_of_examples()):\n ex = train_reader.read_example(i)\n notes = read_patient(ex['name'], 48, ['Nursing', 'Physician ', 'Nursing/other'])\n if len(notes) > 0: #no notes of interest within first 48 hours\n dic = agg_notes(notes)\n dic['inhosp_mort'] = ex['y']\n dic['note_id'] = ''.join(ex['name'].split('_')[:2]) + 'a'\n dic['fold'] = 'train'\n temp.append(dic)\n\nfor i in range(test_reader.get_number_of_examples()):\n ex = test_reader.read_example(i)\n notes = read_patient(ex['name'], 48, ['Nursing', 'Physician ', 'Nursing/other'], dtype = 'test')\n if len(notes) > 0: #no notes of interest within first 48 hours\n dic = agg_notes(notes)\n dic['inhosp_mort'] = ex['y']\n dic['note_id'] = ''.join(ex['name'].split('_')[:2])+ 'a'\n dic['fold'] = 'test'\n temp.append(dic)\nt2 = pd.DataFrame(temp)\n# split training set into folds, stratify by inhosp_mort\nsubjects = t2.loc[t2['fold'] != 'test',['subject_id', 'inhosp_mort']].groupby('subject_id').first().reset_index()\nkf = KFold(n_splits = 10, shuffle = True, random_state = 42)\nfor c,j in enumerate(kf.split(subjects, groups = subjects['inhosp_mort'])):\n for k in j[1]:\n t2.loc[t2['subject_id'] == subjects.loc[k]['subject_id'], 'fold'] = str(c+1)\n# t2.to_hdf(args.output_dir / 'inhosp_mort', key='df')\nt2.to_pickle(args.output_dir / 'inhosp_mort')\n\n'''\nPhenotyping using all patient notes\n- Using the MIMIC-Benchmarks PhenotypingReader, read in each patient, to get\nthe targets and the period length (which is the length of stay). We know the period of interest will be 0 to los + $\\epsilon$,\nwhere 0 is the intime to the ICU, and $\\epsilon$ is a small number (so discharge notes are included).\n- We obtain (hadm_id, intime) usin the same method above\n- With this information, along with the los + $\\epsilon$ hour period length, we\ncan index into df to obtain a set of note_ids corresponding to that period\n- We construct sequences using the last k notes, in the same manner as above.\n- We only use sequences from the following note types: Nursing, Nursing/Other,\nPhysician, Discharge Summary\n- We also add in the following targets, aggregated from the specific\nphenotypes: Any acute, Any chronic, Any disease\n'''\n\nwith open('../icd9_codes.yml', 'r') as f:\n ccs = pd.DataFrame.from_dict(yaml.load(f)).T\n\ntarget_names = list(pd.read_csv(os.path.join(root_folder, 'phenotype_labels.csv')).columns)\nacutes = [i for i in target_names if ccs.loc[i, 'type'] == 'acute']\nchronics = [i for i in target_names if ccs.loc[i, 'type'] == 'chronic']\ntrain_reader = PhenotypingReader(dataset_dir=args.mimic_benchmark_dir/'phenotyping' / 'train')\ntest_reader = PhenotypingReader(dataset_dir=args.mimic_benchmark_dir/'phenotyping' / 'test')\ntemp = []\ndef has_any(dic, keys):\n return any([dic[i] == 1 for i in keys])\n\nfor i in range(train_reader.get_number_of_examples()):\n ex = train_reader.read_example(i)\n notes = read_patient(ex['name'], float(ex['t']), ['Nursing', 'Physician ', 'Nursing/other', 'Discharge summary'])\n if len(notes) > 0:\n dic = agg_notes(notes)\n for tar, y in zip(target_names, ex['y']):\n dic[tar] = y\n dic['any_acute'] = has_any(dic, acutes)\n dic['any_chronic'] = has_any(dic, chronics)\n dic['any_disease'] = has_any(dic, target_names)\n\n dic['note_id'] = ''.join(ex['name'].split('_')[:2]) + 'b'\n dic['fold'] = 'train'\n temp.append(dic)\n\nfor i in range(test_reader.get_number_of_examples()):\n ex = test_reader.read_example(i)\n notes = read_patient(ex['name'], float(ex['t']), ['Nursing', 'Physician ', 'Nursing/other', 'Discharge summary'], dtype = 'test')\n if len(notes) > 0:\n dic = agg_notes(notes)\n for tar, y in zip(target_names, ex['y']):\n dic[tar] = y\n dic['any_acute'] = has_any(dic, acutes)\n dic['any_chronic'] = has_any(dic, chronics)\n dic['any_disease'] = has_any(dic, target_names)\n\n dic['note_id'] = ''.join(ex['name'].split('_')[:2]) + 'b'\n dic['fold'] = 'test'\n temp.append(dic)\n\ncols = target_names + ['any_chronic', 'any_acute', 'any_disease']\nt3 = pd.DataFrame(temp)\nsubjects = t3.loc[t3['fold'] != 'test',['subject_id', 'any_disease']].groupby('subject_id').first().reset_index()\nkf = KFold(n_splits = 10, shuffle = True, random_state = 42)\nfor c,j in enumerate(kf.split(subjects, groups = subjects['any_disease'])):\n for k in j[1]:\n t3.loc[t3['subject_id'] == subjects.loc[k]['subject_id'], 'fold'] = str(c+1)\n\n# t3.to_hdf(args.output_dir / 'phenotype_all', key='df')\nt3.to_pickle(args.output_dir / 'phenotype_all')\n\n'''\nPhenotyping using the first patient note\n- Using the MIMIC-Benchmarks PhenotypingReader, read in each patient, to get\nthe targets and the period length (which is the length of stay). We know the period of interest will be 0 to los + $\\epsilon$,\nwhere 0 is the intime to the ICU, and $\\epsilon$ is a small number (so discharge notes are included).\n- We obtain (hadm_id, intime) usin the same method above\n- With this information, along with the los + $\\epsilon$ hour period length, we\ncan index into df. We take the first nursing or physician note within the first 48 hours of a person's stay.\nIf this does not exist, we take the first nursing/other note within the first 48 hours.\n- If they do not have a nursing note within 48 hours of their intime, the\npatient is dropped.\n'''\n\ntrain_reader = PhenotypingReader(dataset_dir=args.mimic_benchmark_dir/'phenotyping' / 'train')\ntest_reader = PhenotypingReader(dataset_dir=args.mimic_benchmark_dir/'phenotyping' / 'test')\ntemp = []\nfor i in range(train_reader.get_number_of_examples()):\n ex = train_reader.read_example(i)\n intime, notes = read_patient(ex['name'], float(ex['t']), ['Nursing', 'Physician ', 'Nursing/other'], return_intime = True)\n if len(notes) > 0:\n dic = agg_notes(notes, first = True, intime = intime)\n if len(dic['seqs']) == 0:\n continue\n for tar, y in zip(target_names, ex['y']):\n dic[tar] = y\n dic['any_acute'] = has_any(dic, acutes)\n dic['any_chronic'] = has_any(dic, chronics)\n dic['any_disease'] = has_any(dic, target_names)\n\n dic['note_id'] = dic['note_ids'][0]\n del dic['note_ids']\n dic['fold'] = 'train'\n temp.append(dic)\n\nfor i in range(test_reader.get_number_of_examples()):\n ex = test_reader.read_example(i)\n intime, notes = read_patient(ex['name'], float(ex['t']), ['Nursing', 'Physician ', 'Nursing/other'], dtype = 'test', return_intime = True)\n if len(notes) > 0:\n dic = agg_notes(notes, first = True, intime = intime)\n if len(dic['seqs']) == 0:\n continue\n for tar, y in zip(target_names, ex['y']):\n dic[tar] = y\n dic['any_acute'] = has_any(dic, acutes)\n dic['any_chronic'] = has_any(dic, chronics)\n dic['any_disease'] = has_any(dic, target_names)\n\n dic['note_id'] = dic['note_ids'][0]\n del dic['note_ids']\n dic['fold'] = 'test'\n temp.append(dic)\nt4 = pd.DataFrame(temp)\nt4 = pd.merge(t4, df[['note_id', 'category']], on = 'note_id', how = 'left')\nsubjects = t4.loc[t4['fold'] != 'test',['subject_id', 'any_disease']].groupby('subject_id').first().reset_index()\nkf = KFold(n_splits = 10, shuffle = True, random_state = 42)\nfor c,j in enumerate(kf.split(subjects, groups = subjects['any_disease'])):\n for k in j[1]:\n t4.loc[t4['subject_id'] == subjects.loc[k]['subject_id'], 'fold'] = str(c+1)\n# t4.to_hdf(args.output_dir / 'phenotype_first', key='df')\nt4.to_pickle(args.output_dir / 'phenotype_first')\nt4.to_pickle(args.output_dir / 'phenotype_first')\n"
] | [
[
"pandas.isnull",
"pandas.Timedelta",
"pandas.merge",
"pandas.DataFrame",
"sklearn.model_selection.KFold"
]
] |
bitnot/ud120 | [
"344469e5e0e875473f2070e333f706d7987b4993"
] | [
"naive_bayes/basic_classifier/ClassifyNB.py"
] | [
"from sklearn.naive_bayes import GaussianNB\n\ndef classify(features_train, labels_train):\n classifier = GaussianNB()\n classifier.fit(features_train, labels_train)\n return classifier\n"
] | [
[
"sklearn.naive_bayes.GaussianNB"
]
] |
cognirob/crow_vision_yolact | [
"9c547be146ddd73f33e36e0683b2179468cd4250"
] | [
"run_test_on_multi_dataset.py"
] | [
"import os\nimport numpy as np\n\ntrained_model = './data/yolact/weights/weights_yolact_kuka_14/crow_plus_base_56_330000.pth'\nscore_threshold = '0.15'\ntop_k = '15'\nmax_images='1000'\nimage_source_cams=[1,1,1,1,1,1,1,1]\n\ncommand = 'python eval.py --trained_model='+trained_model+' --score_threshold='+score_threshold+' --top_k='+top_k+' --max_images='+max_images+' --dataset=kuka_env_pybullet_dataset_test_cam'\nfor cam in np.nonzero(image_source_cams)[0]:\n cam=str(cam)\n print('testing on camera '+cam+':') \n os.system(command+cam)"
] | [
[
"numpy.nonzero"
]
] |
EnricoReg/asynch-rl | [
"acd01a49a7a4b8ff4ff0694d1e24274ba87691ee"
] | [
"asynch_rl/rl/resources/sim_agent.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 8 14:23:05 2020\n\n@author: Enrico Regolin\n\"\"\"\n\n# Importing the libraries\n# required if current directory is not found\nimport sys \nimport os\nos.environ[\"KMP_DUPLICATE_LIB_OK\"]=\"TRUE\"\n\n \nfrom pathlib import Path as createPath\n#%%\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport torch\nfrom torch.nn.utils import clip_grad_value_ , clip_grad_norm_\nimport asyncio\nimport time\n\nfrom copy import deepcopy\n\n#%%\n# my libraries\n\nfrom .memory import ReplayMemory\n#from rl.utilities import check_WhileTrue_timeout\n\n#%%\nDEBUG = False\n\nclass SimulationAgent:\n \n ##################################################################################\n def __init__(self,sim_agent_id, env = None, rl_mode = 'DQL', model_qv= None, model_pg = None,model_v = None,\\\n n_frames = 1, net_name = 'no_name' ,epsilon = 0.9, ctrlr_probability = 0, save_movie = False, \\\n max_consecutive_env_fails = 3, max_steps_single_run = 200, show_rendering = True, \n use_reinforce = False, storage_path = None, \\\n tot_iterations = 1000, live_plot = False, verbosity = 0 , noise_sd = 0.05, \\\n movie_frequency = 10, max_n_single_runs = 1000, save_sequences = False, reward_history_span = 200):\n \n self.storage_path = storage_path\n \n self.reward_history = []\n self.reward_history_span = reward_history_span\n \n self.prob_correction = 0.2 # probability of \"correction\" of \"non sense random inputs\" generated\n\n self.use_reinforce = use_reinforce\n \n self.beta_PG = 1 \n self.gamma = 0.99\n\n self.rl_mode = rl_mode\n \n self.save_sequences = save_sequences\n self.reset_full_sequences()\n \n # for testing only\n self.max_n_single_runs = max_n_single_runs\n\n self.is_running = False\n self.share_conv_layers = False\n \n self.sim_agent_id = sim_agent_id\n self.net_name = net_name\n \n self.save_movie = save_movie\n \n #self.move_to_cuda = torch.cuda.is_available() and move_to_cuda\n\n self.internal_memory_size = np.round(1.2*tot_iterations)\n self.internal_memory = ReplayMemory(size = self.internal_memory_size)\n \n # instantiate DQN (env is required for NN dimension)\n self.env = env\n \n if self.env is not None:\n self.n_actions = self.env.get_actions_structure()\n else:\n self.n_actions = None\n \n #(self.env.n_bins_act+1)**(self.env.act_shape[0]) # n_actions depends strictly on the type of environment\n \n if self.rl_mode != 'AC':\n self.model_qv = model_qv\n if 'AC' in self.rl_mode :\n self.model_pg = model_pg\n self.model_v = model_v\n \n self.n_frames = n_frames\n \n self.epsilon = epsilon\n self.ctrlr_probability = ctrlr_probability\n \n # verbosity determines how many messages are displayed\n self.tot_iterations = tot_iterations \n self.verbosity = verbosity\n\n self.display_status_frequency = self.tot_iterations \n if self.verbosity == 2:\n self.display_status_frequency = int(round(self.tot_iterations/10))\n elif self.verbosity == 1:\n self.display_status_frequency = int(round(self.tot_iterations/4))\n \n self.show_rendering = show_rendering\n self.live_plot = live_plot\n self.movie_frequency = movie_frequency\n \n self.max_consecutive_env_fails = max_consecutive_env_fails\n \n self.max_steps_single_run = max_steps_single_run\n \n ########## required to avoid RL level setting max steps as None, while still allowing it to change it\n @property\n def max_steps_single_run(self):\n return self._max_steps_single_run\n @max_steps_single_run.setter\n def max_steps_single_run(self,value):\n if self.env is not None:\n self._max_steps_single_run = np.maximum(value, self.env.get_max_iterations())\n else:\n self._max_steps_single_run = value\n \n \n ################################################################################## \n #initialize variables at the beginning of each run (to reset iterations) \n def run_variables_init(self):\n \n self.loss_policy = 0 \n \n self.agent_run_variables = dict(\n iteration = 1,\n single_run = 0,\n cum_reward = 0,\n fails_count = 0,\n consecutive_fails = 0,\n steps_since_start = 0,\n failed_iteration = False,\n successful_runs = 0)\n\n\n ################################################################################## \n # required since ray wrapper doesn't allow accessing attributes\n async def isRunning(self):\n return self.is_running\n\n ################################################################################## \n # required to check if model is inherited\n def getAttributeValue(self, attribute):\n if attribute in self.__dict__.keys():\n return self.__dict__[attribute] \n\n ################################################################################## \n # required since ray wrapper doesn't allow accessing attributes\n def getAttributes(self):\n return [key for key in self.__dict__.keys()] \n \n ################################################################################## \n # required since ray wrapper doesn't allow accessing attributes\n def setAttribute(self,attribute,value):\n if attribute in self.__dict__.keys():\n self.__dict__[attribute] = value\n \n \n ################################################################################## \n def update_model_weights_only(self, model_weights, model = 'model_qv'):\n \"\"\" updates model_pg weights based on external model leaving the optimizer in its current state\"\"\"\n for k,v in model_weights.items():\n self.__dict__[model].state_dict()[k] *= 0\n self.__dict__[model].state_dict()[k] += v\n \n ################################################################################## \n def renderAnimation(self, action = 0, done = False, reward_np=0):\n iter_params = list( map(self.agent_run_variables.get, ['iteration', 'single_run', 'cum_reward']) )\n if self.env.env_type == 'RobotEnv':\n self.renderRobotEnv(iter_params, done, reward_np)\n elif self.env.env_type == 'CartPole':\n self.renderCartPole(iter_params, action)\n\n ################################################################################## \n def renderCartPole(self, iter_params, action):\n self.env.render(iter_params=iter_params, action = action)\n \n ################################################################################## \n def renderRobotEnv(self, iter_params, done, reward_np):\n # render animation\n \n if self.show_rendering:\n if self.live_plot:\n self.env.render('plot',iter_params=iter_params)\n plt.show()\n elif not (self.agent_run_variables['single_run'] % self.movie_frequency) :\n frame = self.env.render('animation',iter_params=iter_params)\n self.ims.append(frame) \n if done:\n iter_params[0]+=1\n iter_params[2]+=reward_np\n frame = self.env.render('animation',iter_params=iter_params)\n for _ in range(10):\n self.ims.append(frame) \n \n ##################################################################################\n def controller_selector(self, pctg_ctrl = 0):\n if pctg_ctrl > 0:\n return random.random() <= pctg_ctrl\n else:\n return random.random() <= self.ctrlr_probability\n \n ##################################################################################\n def reset_full_sequences(self):\n self.states_full_sequences = []\n self.ctrl_full_sequences = [] \n \n ##################################################################################\n def get_full_sequences(self):\n return self.states_full_sequences , self.ctrl_full_sequences \n \n ##################################################################################\n def get_full_sequence_and_reset(self, include_ctrl = False):\n states,ctrls = self.get_full_sequences()\n self.reset_full_sequences()\n if include_ctrl:\n return states,ctrls\n else:\n return states\n \n ##################################################################################\n def reset_agent(self, pctg_ctrl = 0, evaluate = False, info = None):\n \n if self.save_sequences and self.env.env.get_complete_sequence() is not None:\n state_sequence, ctrl_sequence = self.env.env.get_complete_sequence()\n if not hasattr(self, 'states_full_sequences'):\n self.states_full_sequences = [state_sequence]\n self.ctrl_full_sequences = [ctrl_sequence]\n else:\n self.states_full_sequences.append(state_sequence)\n self.ctrl_full_sequences.append(ctrl_sequence)\n \n state_obs = self.env.reset(save_history = (not self.agent_run_variables['single_run'] % self.movie_frequency), evaluate = evaluate)\n \n if state_obs is None:\n # initial action is do nothing\n action = torch.zeros([self.n_actions], dtype=torch.bool)\n action[round((self.n_actions-1)/2)] = 1\n state_obs, reward, done, info = self.env.action(action)\n \n state = self.env.get_net_input(state_obs, reset = True)\n \n single_run_log = self.trainVariablesUpdate(reset_variables = True, info = info)\n self.update_sim_log(single_run_log)\n self.reset_simulation = False\n \n # the decision to use the controller in a given run (instead of the model output) is taken before the run starts\n use_controller = self.controller_selector(pctg_ctrl)\n \n return state, use_controller\n\n ##################################################################################\n def update_sim_log(self, single_run_log):\n \n if single_run_log is not None:\n \n if single_run_log[0,0] > 1 and not np.isnan(single_run_log[0,1] ):\n \n if self.simulation_log is None:\n self.simulation_log = single_run_log\n else:\n self.simulation_log = np.append(self.simulation_log, single_run_log, axis = 0)\n \n \n ##################################################################################\n def initialize_pg_lists(self):\n self.traj_rewards = []\n self.traj_output_maps = []\n self.traj_state_value = []\n self.traj_log_prob = []\n self.traj_entropy = []\n self.advantage_loss = 0\n self.loss_policy = 0\n self.map_est_loss = 0\n self.state_sequences = []\n \n self.map_est_sequences = []\n\n \n ##################################################################################\n def pg_calculation(self, reward, state_0, prob_distr_map, action_idx, done, info):\n \"\"\" core of PG functionality. here gradients are calculated (model update occurs in rl_env)\"\"\"\n \n valid = (info['outcome'] != 'fail')\n prob, out_map = prob_distr_map\n \n if valid:\n loss_pg = 0\n loss_v = 0\n loss_map = 0\n \n advantage = torch.tensor(0)\n entropy = torch.sum(-torch.log(prob)*prob)\n \n self.traj_rewards.append(reward)\n \n if self.rl_mode == 'AC':\n with torch.no_grad():\n self.traj_state_value.append(self.model_v(state_0))\n elif self.rl_mode == 'parallelAC':\n with torch.no_grad():\n self.traj_state_value.append(torch.max(self.model_qv(state_0)))\n \n self.traj_log_prob.append(torch.log(prob[:,action_idx]))\n self.traj_output_maps.append(out_map)\n \n self.traj_entropy.append(entropy)\n self.state_sequences.append(state_0) \n \n if self.env.env_type == 'RobotEnv' and info['robot_map'] is not None and self.model_pg.partial_outputs:\n self.map_est_sequences.append(info['robot_map'])\n \n if done:\n R = 0\n for i in range(len(self.traj_rewards)):\n R = self.traj_rewards[-1-i] + self.gamma* R\n \n if self.use_reinforce:\n advantage = R\n else:\n advantage = R - self.traj_state_value[-1-i]\n\n self.loss_policy += -advantage*self.traj_log_prob[-1-i] - self.beta_PG*self.traj_entropy[-1-i]\n \n #self.advantage_loss += ( R - torch.max(self.model_qv(self.state_sequences[-1-i].float())) )**2\n if self.rl_mode == 'AC':\n state_value, output_map_v = self.model_v(self.state_sequences[-1-i], return_map = True) #.float())\n self.advantage_loss += ( R - state_value )**2\n \n if self.env.env_type == 'RobotEnv' and info['robot_map'] is not None: # and output_map is not None:\n self.map_est_loss += 1/2*torch.sum( (torch.tensor(info['robot_map'])- self.traj_output_maps[-1-i])**2 + (torch.tensor(info['robot_map'])- output_map_v)**2 )\n map_loss_scalar = self.map_est_loss.item()\n else:\n map_loss_scalar = 0\n \n if self.share_conv_layers:\n total_loss = self.advantage_loss/(1e-5+self.advantage_loss.item()) \\\n + self.loss_policy/(1e-5+abs(self.loss_policy.item())) \\\n + self.map_est_loss/(1e-5+map_loss_scalar)\n else:\n total_loss = self.advantage_loss + self.loss_policy + self.map_est_loss\n \n \n total_loss.backward()\n\n loss_pg = np.round(self.loss_policy.item()/len(self.traj_rewards),3)\n if self.rl_mode == 'AC':\n loss_v = np.round(self.advantage_loss.item()/len(self.traj_rewards),3)\n if torch.is_tensor(self.map_est_loss):\n loss_map = np.round(self.map_est_loss.item()/len(self.traj_rewards),3)\n \n self.initialize_pg_lists()\n\n return loss_pg, entropy.item(), loss_v, loss_map\n\n else:\n self.loss_policy = 0\n return np.nan, np.nan, np.nan, np.nan\n\n ##################################################################################\n def initialize_run(self):\n \"\"\" initialize sim run environment\"\"\"\n \n force_stop = False\n\n if 'AC' in self.rl_mode:\n # check if model contains nan\n for k,v in self.model_pg.state_dict().items():\n if torch.isnan(v).any():\n print('nan tensor from start')\n force_stop = True\n self.model_pg.optimizer.zero_grad()\n self.model_v.optimizer.zero_grad()\n self.initialize_pg_lists()\n\n # initialize environment\n fig_film = None\n if self.show_rendering:\n self.ims = []\n fig_film = plt.figure()\n if self.live_plot:\n self.env.render_mode = 'plot'\n \n self.reset_simulation = True\n self.stop_run = False\n self.simulation_log = None\n self.run_variables_init()\n\n self.pg_loss_hist = []\n self.entropy_hist = []\n self.advantage_hist = []\n self.loss_map_hist = []\n done = False\n self.traj_stats = []\n \n return force_stop, done, fig_film\n\n\n ##################################################################################\n def sim_routine(self, state, loss_pg , use_controller, use_NN, test_qv):\n \"\"\" single iteration routine for env simulation\"\"\"\n \n force_stop = False\n \n action, action_index, noise_added, prob_distr_map = self.getNextAction(state, use_controller=use_controller, use_NN = use_NN, test_qv=test_qv )\n \n if use_NN and DEBUG:\n print(f'action_index: {action_index}')\n \n state_1, reward_np, done , info = self.stepAndRecord(state, action, action_index, noise_added)\n \n if use_NN and (info['outcome'] is not None) and (info['outcome'] != 'fail') and (info['outcome'] != 'opponent'):\n self.agent_run_variables['successful_runs'] += 1\n \n if 'AC' in self.rl_mode:\n loss_pg, entropy_i, advantage, loss_map = self.pg_calculation(reward_np, state, prob_distr_map, action_index, done, info )\n force_stop = np.isnan(loss_pg)\n self.entropy_hist.append(entropy_i)\n if done:\n self.pg_loss_hist.append(loss_pg)\n self.advantage_hist.append(advantage)\n self.loss_map_hist.append(loss_map) \n if self.verbosity > 0:\n self.displayStatus() \n self.trainVariablesUpdate(reward_np, done, force_stop, no_step = ('no step' in info), info = info )\n \n state = state_1\n\n return done, state, force_stop , loss_pg, info\n\n\n ##################################################################################\n def extract_gradients(self, force_stop):\n \"\"\" extract gradients from PG and V model to be shared with main model for update\"\"\"\n \n pg_info = None\n nan_grad = False\n if 'AC' in self.rl_mode:\n if force_stop:\n pg_info = (None,None,None,None,None,None, False)\n else:\n grad_dict_pg = {}\n grad_dict_v = {}\n for name, param in self.model_pg.named_parameters():\n if param.grad is not None:\n grad_dict_pg[name] = param.grad.clone()\n else:\n grad_dict_pg[name] = 0\n \n if self.rl_mode == 'AC':\n if param.grad is not None:\n\n for name, param in self.model_v.named_parameters():\n if torch.isnan(param.grad).any():\n nan_grad = True\n break\n else:\n grad_dict_v[name] = param.grad.clone()\n \n else:\n grad_dict_v[name] = 0\n\n\n if not nan_grad:\n pg_info = (grad_dict_pg, grad_dict_v, np.average(self.pg_loss_hist) , \\\n round(np.average(self.entropy_hist),4),np.average(self.advantage_hist), \\\n np.average(self.loss_map_hist), True )\n else:\n pg_info = (None,None,None,None,None,None,False)\n print('remote worker has diverging gradients')\n return pg_info\n\n\n ##################################################################################\n def run_synch(self, use_NN = False, pctg_ctrl = 0, test_qv = False):\n \"\"\"synchronous implementation of sim-run \"\"\"\n \n force_stop, done, fig_film = self.initialize_run()\n info = {}\n \n while not self.stop_run :\n \n if self.reset_simulation:\n loss_pg = 0\n if self.agent_run_variables['single_run'] >= self.max_n_single_runs+1:\n break\n state, use_controller = self.reset_agent(pctg_ctrl, evaluate = use_NN , info = info)\n \n if 'state' in locals():\n done, state, force_stop , loss_pg, info = self.sim_routine(state, loss_pg , use_controller, use_NN, test_qv)\n else:\n force_stop = True\n self.stop_run = True \n \n if 'AC' in self.rl_mode and self.reset_simulation and not (use_NN or done):\n self.stop_run = True\n force_stop = True \n\n single_run_log = self.trainVariablesUpdate(reset_variables = True, info = info)\n self.update_sim_log(single_run_log)\n self.endOfRunRoutine(fig_film = fig_film)\n plt.close(fig_film)\n \n pg_info = self.extract_gradients(force_stop)\n\n return self.simulation_log, self.agent_run_variables['single_run'], self.agent_run_variables['successful_runs'], \\\n self.traj_stats, self.internal_memory.fill_ratio, pg_info #[0]\n # self.simulation_log contains duration and cumulative reward of every single-run\n\n\n ##################################################################################\n async def run(self, use_NN = False, pctg_ctrl = 0, test_qv = False):\n \"\"\"synchronous implementation of sim-run \"\"\"\n \n force_stop, done, fig_film = self.initialize_run()\n info = {}\n self.is_running = True\n self.external_force_stop = False\n \n while not (self.stop_run or self.external_force_stop):\n \n await asyncio.sleep(0.00001)\n \n if self.reset_simulation:\n loss_pg = 0\n if self.agent_run_variables['single_run'] >= self.max_n_single_runs+1:\n break\n state, use_controller = self.reset_agent(pctg_ctrl, evaluate = use_NN )\n\n if 'state' in locals():\n done, state, force_stop , loss_pg, info = self.sim_routine(state, loss_pg , use_controller, use_NN, test_qv)\n else:\n force_stop = True\n self.stop_run = True \n \n if 'AC' in self.rl_mode and self.reset_simulation and not done and not self.stop_run:\n self.stop_run = True\n force_stop = True\n\n single_run_log = self.trainVariablesUpdate(reset_variables = True, info = info)\n self.update_sim_log(single_run_log)\n self.endOfRunRoutine(fig_film = fig_film)\n plt.close(fig_film)\n \n pg_info = self.extract_gradients(self.external_force_stop or force_stop)\n self.is_running = False\n\n return self.simulation_log, self.agent_run_variables['single_run'], self.agent_run_variables['successful_runs'], \\\n self.traj_stats , self.internal_memory.fill_ratio, pg_info #[0]\n # self.simulation_log contains duration and cumulative reward of every single-run\n\n\n ################################################################################\n async def force_stop(self):\n self.external_force_stop = True\n\n \n ################################################################################\n def emptyLocalMemory(self):\n return self.internal_memory.getMemoryAndEmpty()\n \n \n ################################################################################\n def stepAndRecord(self,state, action,action_index, noise_added):\n \n #################\n try:\n # in case of infeasibility issues, random_gen allows a feasible input to be re-generated inside the environment, to accelerate the learning process\n action_bool_array = action.detach().numpy()\n state_obs_1, reward_np, done, info = self.env.action(action_bool_array)\n \n if np.isnan(reward_np) or np.isinf(reward_np):\n self.agent_run_variables['failed_iteration'] = True\n \n elif 'move changed' in info:\n action = 0*action\n action_index = self.env.get_action_idx(info['move changed'])\n action[ action_index ] = 1\n elif self.show_rendering and DEBUG:\n # we show the selected action of the not corrected ones\n print(f'selected action = {self.env.boolarray_to_action(action_bool_array)}')\n\n except Exception:\n self.agent_run_variables['failed_iteration'] = True\n\n ################# \n if not self.agent_run_variables['failed_iteration']:\n\n if self.show_rendering:\n self.renderAnimation(action, done, reward_np)\n ## restructure new data\n state_1 = self.env.get_net_input(state_obs_1, state_tensor_z1 = state )\n\n action = action.unsqueeze(0)\n reward = torch.from_numpy(np.array([reward_np], dtype=np.float32)).unsqueeze(0)\n \n if self.rl_mode != 'AC':\n info_out = None\n if 'robot_map' in info:\n info_out = torch.tensor(info['robot_map']).unsqueeze(0).float()\n # build \"new transition\" and add it to replay memory\n new_transition = (state, action, reward, state_1, done, action_index, info_out)\n self.internal_memory.addInstance(new_transition)\n \n if not 'outcome' in info:\n info['outcome'] = None\n \n else:\n reward_np = np.nan\n state = None\n state_1 = None\n done = None\n info = {'outcome' : 'fail'}\n \n return state_1, reward_np, done, info\n \n ##################################################################################\n def endOfRunRoutine(self, fig_film):\n # end of training routine\n if self.verbosity > 0:\n print(f'completed runs: {self.agent_run_variables[\"single_run\"]}')\n display_failed_its = round(100*self.agent_run_variables[\"fails_count\"]/self.agent_run_variables[\"iteration\"],1)\n print(f\"failed iterations: {display_failed_its}%\")\n display_failed_runs = round(100*self.agent_run_variables[\"fails_count\"]/(self.agent_run_variables[\"fails_count\"] + self.agent_run_variables[\"single_run\"]),1)\n print(f\"failed runs: {display_failed_runs}%\")\n \n if self.show_rendering and self.agent_run_variables[\"single_run\"]>=self.movie_frequency and \\\n not self.live_plot and not self.agent_run_variables[\"consecutive_fails\"] >= 3:\n \n try:\n kernel_exec = False\n ip = get_ipython()\n if ip.has_trait('kernel'):\n kernel_exec = True\n except Exception:\n kernel_exec = False\n \n ani = None\n if self.env.env_type == 'RobotEnv':\n ani , filename , duration=self.getVideoRobotEnv(fig_film)\n elif self.env.env_type == 'CartPole':\n ani , filename, duration=self.getVideoCartPole(fig_film)\n\n\n if ani is not None:\n\n if not kernel_exec:\n\n #print(f'duration = {duration}s')\n plt.show(block=False)\n plt.waitforbuttonpress(round(duration))\n \n if self.save_movie:\n \n if 'AC' in self.rl_mode :\n net_type = self.model_pg.net_type\n elif self.rl_mode == 'DQL':\n net_type = self.model_qv.net_type\n else:\n raise('RL mode not defined') \n \n store_path= os.path.join( self.storage_path, 'video' )\n createPath(store_path).mkdir(parents=True, exist_ok=True)\n \n full_filename = os.path.join(store_path, filename)\n ani.save(full_filename)\n \n\n ##################################################################################\n def getVideoCartPole(self, fig_film):\n filename = self.net_name +'.mp4'\n interval = round(0.5*self.env.dt*1000)\n duration = round(0.001*len(self.ims)*interval,1)\n ani = self.env.get_gif(fig = fig_film, save_history = (not self.agent_run_variables['single_run'] % self.movie_frequency) )\n return ani, filename, duration\n\n ##################################################################################\n def getVideoRobotEnv(self, fig_film):\n # in case it was a live_plot test\n #if self.live_plot:\n # self.env.render_mode = 'animation'\n filename = self.net_name +'_'+ str(self.rl_mode) +'.mp4'\n interval = round(0.5*self.env.dt*1000)\n ani = animation.ArtistAnimation(fig_film, self.ims, interval=interval, blit=True)\n duration = round(0.001*len(self.ims)*interval,1)\n return ani, filename, duration\n\n ##################################################################################\n def getNextAction(self,state, use_controller = False, use_NN = False, test_qv = False):\n # initialize action\n prob_distrib = None\n noise_added = False\n action = torch.zeros([self.n_actions], dtype=torch.bool)\n # PG only uses greedy approach \n if 'AC' in self.rl_mode:\n prob_distrib, map_output = self.model_pg.cpu()(state, return_map = True)\n\n try:\n action_index = torch.multinomial(prob_distrib, 1, replacement=True)\n except Exception:\n action_index = torch.argmax(prob_distrib)\n print('torch.multinomial failed')\n \n elif self.rl_mode == 'DQL':\n if use_controller and not use_NN:\n action_index = torch.tensor(self.env.get_control_idx(discretized = True), dtype = torch.int8)\n elif use_NN:\n qvals = self.model_qv.cpu()(state)\n action_index = torch.argmax(qvals)\n else:\n if random.random() <= self.epsilon and not use_NN:\n #action_index = torch.randint(self.n_actions, torch.Size([]), dtype=torch.int8)\n #action_index = torch.randint(self.n_actions, (1,), dtype=torch.int8)\n action_index = torch.tensor(random.randint(0,self.n_actions-1))\n else:\n # this function allows to randomly choose other good performing q_values\n qvals = self.model_qv.cpu()(state)\n action_index = torch.argmax(qvals)\n #action_index = prob_action_idx_qval(qvals) \n \n #prob_distrib = torch.softmax(qvals,-1)\n #action_index = torch.multinomial(prob_distrib, 1, replacement=True)\n \n action[action_index] = 1\n \n return action, action_index, noise_added, (prob_distrib, map_output) if 'AC' in self.rl_mode else None\n\n \n ##################################################################################\n def trainVariablesUpdate(self, reward_np = 0, done = False, force_stop = False, \\\n reset_variables = False, no_step = False, info = None):\n # output is the model output given a certain state in\n \n if reset_variables:\n\n if self.agent_run_variables['single_run']>0: \n cum_reward_data = self.agent_run_variables['cum_reward']\n \"\"\"\n if self.env.env_type == 'Frx' and self.agent_run_variables['steps_since_start']>0:\n if info is not None:\n cum_reward_data = info['return']\n #print(f'single run return: {cum_reward_data}')\n else:\n cum_reward_data = 0\n \"\"\"\n single_run_log = np.array([self.agent_run_variables['steps_since_start'] , cum_reward_data ] )[np.newaxis,:]\n \n else:\n single_run_log = None \n\n self.agent_run_variables['steps_since_start'] = 0\n self.agent_run_variables['failed_iteration'] = False\n \n self.agent_run_variables['single_run'] +=1\n \n self.reward_history.append(self.agent_run_variables['cum_reward'])\n self.agent_run_variables['cum_reward'] = 0\n\n return single_run_log\n \n else:\n if self.agent_run_variables['failed_iteration']:\n self.agent_run_variables['iteration'] += 1\n self.agent_run_variables['fails_count'] += 1\n self.agent_run_variables['consecutive_fails'] += 1\n else:\n ## end of iteration - training parameters update (also used in testing)\n self.agent_run_variables['iteration'] += 1\n self.agent_run_variables['consecutive_fails'] = 0\n # keep track of the length of the current run and of the gained rewards\n self.agent_run_variables['steps_since_start'] += 1*int(not no_step)\n self.agent_run_variables['cum_reward'] += reward_np[0] if isinstance(reward_np, np.ndarray) else reward_np \n \n # update of reset_simulation and stop_run\n if self.agent_run_variables['failed_iteration'] or done or self.agent_run_variables['steps_since_start'] > self.max_steps_single_run +1 : # (+1 is added to be able to verify the condition in the next step)\n self.reset_simulation = True\n if done and 'termination' in info:\n self.traj_stats.append(info['termination'])\n\n if self.agent_run_variables['consecutive_fails'] >= self.max_consecutive_env_fails or self.agent_run_variables['iteration'] >= self.tot_iterations or force_stop:\n self.stop_run = True\n pass\n \n ##################################################################################\n def displayStatus(self, loss_np = []):\n if self.agent_run_variables['iteration'] % self.display_status_frequency == 0: #update weights has not occurred but we're in the \"display iteration\"\n perc_completed = round(self.agent_run_variables['iteration']/self.tot_iterations*100,1) \n if not loss_np:\n print(f'agent = {self.sim_agent_id}, iteration = {self.agent_run_variables[\"iteration\"]}, completed = {round(perc_completed,1)}%')\n else:\n print(f\"agent = {self.sim_agent_id}, iteration = {self.agent_run_variables['iteration']}, completed = {round(perc_completed,1)}%, loss = {np.round(loss_np,2)}, epsilon = {round(self.epsilon,2)}\") \n\n\ndef prob_action_idx_qval(qvals):\n _, indices = torch.sort(-qvals)\n p = np.zeros(indices.shape[1])\n for c,i in enumerate(indices.squeeze(0).numpy()):\n p[i] = 2.**(-c-1)\n return torch.multinomial(torch.tensor(p), 1, replacement=True).squeeze(0)\n\n\n\n#%%\n\n\nfrom ...envs.gymstyle_envs import DiscrGymStyleRobot\nfrom ...nns.robot_net import ConvModel\n\nif __name__ == \"__main__\":\n #env = SimulationAgent(0, env = GymStyle_Robot(n_bins_act=2), model = ConvModel())\n gym_env = DiscrGymStyleRobot(n_bins_act=2)\n \n agent = SimulationAgent(0, env=gym_env, n_frames=4 , model_qv = ConvModel() )\n agent.max_steps_single_run = 50\n \n \n agent.save_movie = False\n agent.movie_frequency = 1\n agent.tot_iterations = 100\n agent.run_synch()\n\n"
] | [
[
"torch.isnan",
"torch.multinomial",
"torch.is_tensor",
"torch.tensor",
"numpy.append",
"torch.zeros",
"numpy.array",
"numpy.zeros",
"numpy.round",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"torch.log",
"numpy.average",
"torch.argmax",
"torch.sort",
"numpy.isinf",
"numpy.isnan",
"torch.no_grad",
"matplotlib.animation.ArtistAnimation"
]
] |
Torato-Taraka/Fingerprint | [
"badea84d89db696a5b090130498343df032ed6f8"
] | [
"minutiae_former.py"
] | [
"import argparse\nfrom myPackage import tools as tl\nfrom myPackage import preprocess\nfrom myPackage import minutiaeExtraction as minExtract\nfrom enhancementFP import image_enhance as img_e\nfrom os.path import basename, splitext\nimport os\nimport time\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef minutiae_extract(path, results):\n plot = False\n # ratio = 0.2\n # Extract names\n all_images = []\n for f in os.listdir(path):\n all_images.append(os.path.join(path, f))\n # Split train and test data\n # train_data, test_data = tl.split_train_test(all_images, ratio)\n print(\"\\nAll_images size: {}\\n\".format(len(all_images)))\n all_times= []\n for image in all_images:\n start = time.time()\n name = splitext(basename(image))[0]\n print(image)\n print(\"\\nProcessing image '{}'\".format(name))\n cleaned_img = preprocess.blurrImage(image, name, plot)\n enhanced_img = img_e.image_enhance(cleaned_img, name, plot)\n print(type(enhanced_img))\n cleaned_img = preprocess.cleanImage(enhanced_img, name, plot)\n skeleton = preprocess.thinImage(cleaned_img, name, plot)\n minExtract.process(skeleton, name, plot, results)\n all_times.append((time.time()-start))\n mean, std = 0, 0\n mean = np.mean(all_times)\n std = np.std(all_times)\n print(\"\\n\\nAlgorithm takes {:2.3f} (+/-{:2.3f}) seconds per image\".format(mean, std))\n\n \nif __name__ == \"__main__\" :\n minutiae_extract(\"test\\\\\", \"test_result\\\\\")"
] | [
[
"numpy.std",
"numpy.mean"
]
] |
shiv-io/pandas | [
"a1d9c96389b27e55d1d9910f97ae42fdf3e2b393"
] | [
"pandas/core/internals/managers.py"
] | [
"from __future__ import annotations\n\nfrom collections import defaultdict\nimport itertools\nfrom typing import (\n Any,\n Callable,\n DefaultDict,\n Hashable,\n Sequence,\n TypeVar,\n cast,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import (\n internals as libinternals,\n lib,\n)\nfrom pandas._libs.internals import BlockPlacement\nfrom pandas._typing import (\n ArrayLike,\n DtypeObj,\n Shape,\n npt,\n type_t,\n)\nfrom pandas.errors import PerformanceWarning\nfrom pandas.util._validators import validate_bool_kwarg\n\nfrom pandas.core.dtypes.cast import infer_dtype_from_scalar\nfrom pandas.core.dtypes.common import (\n ensure_platform_int,\n is_1d_only_ea_dtype,\n is_dtype_equal,\n is_list_like,\n)\nfrom pandas.core.dtypes.dtypes import ExtensionDtype\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame,\n ABCSeries,\n)\nfrom pandas.core.dtypes.missing import (\n array_equals,\n isna,\n)\n\nimport pandas.core.algorithms as algos\nfrom pandas.core.arrays._mixins import NDArrayBackedExtensionArray\nfrom pandas.core.arrays.sparse import SparseDtype\nfrom pandas.core.construction import (\n ensure_wrapped_if_datetimelike,\n extract_array,\n)\nfrom pandas.core.indexers import maybe_convert_indices\nfrom pandas.core.indexes.api import (\n Float64Index,\n Index,\n ensure_index,\n)\nfrom pandas.core.internals.base import (\n DataManager,\n SingleDataManager,\n interleaved_dtype,\n)\nfrom pandas.core.internals.blocks import (\n Block,\n CategoricalBlock,\n DatetimeTZBlock,\n ExtensionBlock,\n ensure_block_shape,\n extend_blocks,\n get_block_type,\n maybe_coerce_values,\n new_block,\n)\nfrom pandas.core.internals.ops import (\n blockwise_all,\n operate_blockwise,\n)\n\n# TODO: flexible with index=None and/or items=None\n\nT = TypeVar(\"T\", bound=\"BaseBlockManager\")\n\n\nclass BaseBlockManager(DataManager):\n \"\"\"\n Core internal data structure to implement DataFrame, Series, etc.\n\n Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a\n lightweight blocked set of labeled data to be manipulated by the DataFrame\n public API class\n\n Attributes\n ----------\n shape\n ndim\n axes\n values\n items\n\n Methods\n -------\n set_axis(axis, new_labels)\n copy(deep=True)\n\n get_dtypes\n\n apply(func, axes, block_filter_fn)\n\n get_bool_data\n get_numeric_data\n\n get_slice(slice_like, axis)\n get(label)\n iget(loc)\n\n take(indexer, axis)\n reindex_axis(new_labels, axis)\n reindex_indexer(new_labels, indexer, axis)\n\n delete(label)\n insert(loc, label, value)\n set(label, value)\n\n Parameters\n ----------\n blocks: Sequence of Block\n axes: Sequence of Index\n verify_integrity: bool, default True\n\n Notes\n -----\n This is *not* a public API class\n \"\"\"\n\n __slots__ = ()\n\n _blknos: np.ndarray\n _blklocs: np.ndarray\n blocks: tuple[Block, ...]\n axes: list[Index]\n\n ndim: int\n _known_consolidated: bool\n _is_consolidated: bool\n\n def __init__(self, blocks, axes, verify_integrity: bool = True):\n raise NotImplementedError\n\n @classmethod\n def from_blocks(cls: type_t[T], blocks: list[Block], axes: list[Index]) -> T:\n raise NotImplementedError\n\n @property\n def blknos(self):\n \"\"\"\n Suppose we want to find the array corresponding to our i'th column.\n\n blknos[i] identifies the block from self.blocks that contains this column.\n\n blklocs[i] identifies the column of interest within\n self.blocks[self.blknos[i]]\n \"\"\"\n if self._blknos is None:\n # Note: these can be altered by other BlockManager methods.\n self._rebuild_blknos_and_blklocs()\n\n return self._blknos\n\n @property\n def blklocs(self):\n \"\"\"\n See blknos.__doc__\n \"\"\"\n if self._blklocs is None:\n # Note: these can be altered by other BlockManager methods.\n self._rebuild_blknos_and_blklocs()\n\n return self._blklocs\n\n def make_empty(self: T, axes=None) -> T:\n \"\"\"return an empty BlockManager with the items axis of len 0\"\"\"\n if axes is None:\n axes = [Index([])] + self.axes[1:]\n\n # preserve dtype if possible\n if self.ndim == 1:\n assert isinstance(self, SingleBlockManager) # for mypy\n blk = self.blocks[0]\n arr = blk.values[:0]\n bp = BlockPlacement(slice(0, 0))\n nb = blk.make_block_same_class(arr, placement=bp)\n blocks = [nb]\n else:\n blocks = []\n return type(self).from_blocks(blocks, axes)\n\n def __nonzero__(self) -> bool:\n return True\n\n # Python3 compat\n __bool__ = __nonzero__\n\n def _normalize_axis(self, axis: int) -> int:\n # switch axis to follow BlockManager logic\n if self.ndim == 2:\n axis = 1 if axis == 0 else 0\n return axis\n\n def set_axis(self, axis: int, new_labels: Index) -> None:\n # Caller is responsible for ensuring we have an Index object.\n self._validate_set_axis(axis, new_labels)\n self.axes[axis] = new_labels\n\n @property\n def is_single_block(self) -> bool:\n # Assumes we are 2D; overridden by SingleBlockManager\n return len(self.blocks) == 1\n\n def _rebuild_blknos_and_blklocs(self) -> None:\n \"\"\"\n Update mgr._blknos / mgr._blklocs.\n \"\"\"\n new_blknos = np.empty(self.shape[0], dtype=np.intp)\n new_blklocs = np.empty(self.shape[0], dtype=np.intp)\n new_blknos.fill(-1)\n new_blklocs.fill(-1)\n\n for blkno, blk in enumerate(self.blocks):\n rl = blk.mgr_locs\n new_blknos[rl.indexer] = blkno\n new_blklocs[rl.indexer] = np.arange(len(rl))\n\n if (new_blknos == -1).any():\n # TODO: can we avoid this? it isn't cheap\n raise AssertionError(\"Gaps in blk ref_locs\")\n\n self._blknos = new_blknos\n self._blklocs = new_blklocs\n\n @property\n def items(self) -> Index:\n return self.axes[0]\n\n def get_dtypes(self):\n dtypes = np.array([blk.dtype for blk in self.blocks])\n return dtypes.take(self.blknos)\n\n @property\n def arrays(self) -> list[ArrayLike]:\n \"\"\"\n Quick access to the backing arrays of the Blocks.\n\n Only for compatibility with ArrayManager for testing convenience.\n Not to be used in actual code, and return value is not the same as the\n ArrayManager method (list of 1D arrays vs iterator of 2D ndarrays / 1D EAs).\n \"\"\"\n return [blk.values for blk in self.blocks]\n\n def __repr__(self) -> str:\n output = type(self).__name__\n for i, ax in enumerate(self.axes):\n if i == 0:\n output += f\"\\nItems: {ax}\"\n else:\n output += f\"\\nAxis {i}: {ax}\"\n\n for block in self.blocks:\n output += f\"\\n{block}\"\n return output\n\n def apply(\n self: T,\n f,\n align_keys: list[str] | None = None,\n ignore_failures: bool = False,\n **kwargs,\n ) -> T:\n \"\"\"\n Iterate over the blocks, collect and create a new BlockManager.\n\n Parameters\n ----------\n f : str or callable\n Name of the Block method to apply.\n align_keys: List[str] or None, default None\n ignore_failures: bool, default False\n **kwargs\n Keywords to pass to `f`\n\n Returns\n -------\n BlockManager\n \"\"\"\n assert \"filter\" not in kwargs\n\n align_keys = align_keys or []\n result_blocks: list[Block] = []\n # fillna: Series/DataFrame is responsible for making sure value is aligned\n\n aligned_args = {k: kwargs[k] for k in align_keys}\n\n for b in self.blocks:\n\n if aligned_args:\n\n for k, obj in aligned_args.items():\n if isinstance(obj, (ABCSeries, ABCDataFrame)):\n # The caller is responsible for ensuring that\n # obj.axes[-1].equals(self.items)\n if obj.ndim == 1:\n kwargs[k] = obj.iloc[b.mgr_locs.indexer]._values\n else:\n kwargs[k] = obj.iloc[:, b.mgr_locs.indexer]._values\n else:\n # otherwise we have an ndarray\n kwargs[k] = obj[b.mgr_locs.indexer]\n\n try:\n if callable(f):\n applied = b.apply(f, **kwargs)\n else:\n applied = getattr(b, f)(**kwargs)\n except (TypeError, NotImplementedError):\n if not ignore_failures:\n raise\n continue\n result_blocks = extend_blocks(applied, result_blocks)\n\n if ignore_failures:\n return self._combine(result_blocks)\n\n return type(self).from_blocks(result_blocks, self.axes)\n\n def where(self: T, other, cond, align: bool, errors: str) -> T:\n if align:\n align_keys = [\"other\", \"cond\"]\n else:\n align_keys = [\"cond\"]\n other = extract_array(other, extract_numpy=True)\n\n return self.apply(\n \"where\",\n align_keys=align_keys,\n other=other,\n cond=cond,\n errors=errors,\n )\n\n def setitem(self: T, indexer, value) -> T:\n return self.apply(\"setitem\", indexer=indexer, value=value)\n\n def putmask(self, mask, new, align: bool = True):\n\n if align:\n align_keys = [\"new\", \"mask\"]\n else:\n align_keys = [\"mask\"]\n new = extract_array(new, extract_numpy=True)\n\n return self.apply(\n \"putmask\",\n align_keys=align_keys,\n mask=mask,\n new=new,\n )\n\n def diff(self: T, n: int, axis: int) -> T:\n axis = self._normalize_axis(axis)\n return self.apply(\"diff\", n=n, axis=axis)\n\n def interpolate(self: T, **kwargs) -> T:\n return self.apply(\"interpolate\", **kwargs)\n\n def shift(self: T, periods: int, axis: int, fill_value) -> T:\n axis = self._normalize_axis(axis)\n if fill_value is lib.no_default:\n fill_value = None\n\n if axis == 0 and self.ndim == 2 and self.nblocks > 1:\n # GH#35488 we need to watch out for multi-block cases\n # We only get here with fill_value not-lib.no_default\n ncols = self.shape[0]\n if periods > 0:\n indexer = np.array(\n [-1] * periods + list(range(ncols - periods)), dtype=np.intp\n )\n else:\n nper = abs(periods)\n indexer = np.array(\n list(range(nper, ncols)) + [-1] * nper, dtype=np.intp\n )\n result = self.reindex_indexer(\n self.items,\n indexer,\n axis=0,\n fill_value=fill_value,\n allow_dups=True,\n consolidate=False,\n )\n return result\n\n return self.apply(\"shift\", periods=periods, axis=axis, fill_value=fill_value)\n\n def fillna(self: T, value, limit, inplace: bool, downcast) -> T:\n return self.apply(\n \"fillna\", value=value, limit=limit, inplace=inplace, downcast=downcast\n )\n\n def downcast(self: T) -> T:\n return self.apply(\"downcast\")\n\n def astype(self: T, dtype, copy: bool = False, errors: str = \"raise\") -> T:\n return self.apply(\"astype\", dtype=dtype, copy=copy, errors=errors)\n\n def convert(\n self: T,\n copy: bool = True,\n datetime: bool = True,\n numeric: bool = True,\n timedelta: bool = True,\n ) -> T:\n return self.apply(\n \"convert\",\n copy=copy,\n datetime=datetime,\n numeric=numeric,\n timedelta=timedelta,\n )\n\n def replace(self: T, to_replace, value, inplace: bool, regex: bool) -> T:\n assert np.ndim(value) == 0, value\n return self.apply(\n \"replace\", to_replace=to_replace, value=value, inplace=inplace, regex=regex\n )\n\n def replace_list(\n self: T,\n src_list: list[Any],\n dest_list: list[Any],\n inplace: bool = False,\n regex: bool = False,\n ) -> T:\n \"\"\"do a list replace\"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n bm = self.apply(\n \"_replace_list\",\n src_list=src_list,\n dest_list=dest_list,\n inplace=inplace,\n regex=regex,\n )\n bm._consolidate_inplace()\n return bm\n\n def to_native_types(self: T, **kwargs) -> T:\n \"\"\"\n Convert values to native types (strings / python objects) that are used\n in formatting (repr / csv).\n \"\"\"\n return self.apply(\"to_native_types\", **kwargs)\n\n def is_consolidated(self) -> bool:\n \"\"\"\n Return True if more than one block with the same dtype\n \"\"\"\n if not self._known_consolidated:\n self._consolidate_check()\n return self._is_consolidated\n\n def _consolidate_check(self) -> None:\n dtypes = [blk.dtype for blk in self.blocks if blk._can_consolidate]\n self._is_consolidated = len(dtypes) == len(set(dtypes))\n self._known_consolidated = True\n\n @property\n def is_numeric_mixed_type(self) -> bool:\n return all(block.is_numeric for block in self.blocks)\n\n @property\n def any_extension_types(self) -> bool:\n \"\"\"Whether any of the blocks in this manager are extension blocks\"\"\"\n return any(block.is_extension for block in self.blocks)\n\n @property\n def is_view(self) -> bool:\n \"\"\"return a boolean if we are a single block and are a view\"\"\"\n if len(self.blocks) == 1:\n return self.blocks[0].is_view\n\n # It is technically possible to figure out which blocks are views\n # e.g. [ b.values.base is not None for b in self.blocks ]\n # but then we have the case of possibly some blocks being a view\n # and some blocks not. setting in theory is possible on the non-view\n # blocks w/o causing a SettingWithCopy raise/warn. But this is a bit\n # complicated\n\n return False\n\n def _get_data_subset(self: T, predicate: Callable) -> T:\n blocks = [blk for blk in self.blocks if predicate(blk.values)]\n return self._combine(blocks, copy=False)\n\n def get_bool_data(self: T, copy: bool = False) -> T:\n \"\"\"\n Select blocks that are bool-dtype and columns from object-dtype blocks\n that are all-bool.\n\n Parameters\n ----------\n copy : bool, default False\n Whether to copy the blocks\n \"\"\"\n\n new_blocks = []\n\n for blk in self.blocks:\n if blk.dtype == bool:\n new_blocks.append(blk)\n\n elif blk.is_object:\n nbs = blk._split()\n for nb in nbs:\n if nb.is_bool:\n new_blocks.append(nb)\n\n return self._combine(new_blocks, copy)\n\n def get_numeric_data(self: T, copy: bool = False) -> T:\n \"\"\"\n Parameters\n ----------\n copy : bool, default False\n Whether to copy the blocks\n \"\"\"\n return self._combine([b for b in self.blocks if b.is_numeric], copy)\n\n def _combine(\n self: T, blocks: list[Block], copy: bool = True, index: Index | None = None\n ) -> T:\n \"\"\"return a new manager with the blocks\"\"\"\n if len(blocks) == 0:\n if self.ndim == 2:\n # retain our own Index dtype\n if index is not None:\n axes = [self.items[:0], index]\n else:\n axes = [self.items[:0]] + self.axes[1:]\n return self.make_empty(axes)\n return self.make_empty()\n\n # FIXME: optimization potential\n indexer = np.sort(np.concatenate([b.mgr_locs.as_array for b in blocks]))\n inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0])\n\n new_blocks: list[Block] = []\n for b in blocks:\n b = b.copy(deep=copy)\n b.mgr_locs = BlockPlacement(inv_indexer[b.mgr_locs.indexer])\n new_blocks.append(b)\n\n axes = list(self.axes)\n if index is not None:\n axes[-1] = index\n axes[0] = self.items.take(indexer)\n\n return type(self).from_blocks(new_blocks, axes)\n\n @property\n def nblocks(self) -> int:\n return len(self.blocks)\n\n def copy(self: T, deep=True) -> T:\n \"\"\"\n Make deep or shallow copy of BlockManager\n\n Parameters\n ----------\n deep : bool or string, default True\n If False, return shallow copy (do not copy data)\n If 'all', copy data and a deep copy of the index\n\n Returns\n -------\n BlockManager\n \"\"\"\n # this preserves the notion of view copying of axes\n if deep:\n # hit in e.g. tests.io.json.test_pandas\n\n def copy_func(ax):\n return ax.copy(deep=True) if deep == \"all\" else ax.view()\n\n new_axes = [copy_func(ax) for ax in self.axes]\n else:\n new_axes = list(self.axes)\n\n res = self.apply(\"copy\", deep=deep)\n res.axes = new_axes\n\n if deep:\n res._consolidate_inplace()\n return res\n\n def consolidate(self: T) -> T:\n \"\"\"\n Join together blocks having same dtype\n\n Returns\n -------\n y : BlockManager\n \"\"\"\n if self.is_consolidated():\n return self\n\n bm = type(self)(self.blocks, self.axes, verify_integrity=False)\n bm._is_consolidated = False\n bm._consolidate_inplace()\n return bm\n\n def _consolidate_inplace(self) -> None:\n if not self.is_consolidated():\n self.blocks = tuple(_consolidate(self.blocks))\n self._is_consolidated = True\n self._known_consolidated = True\n self._rebuild_blknos_and_blklocs()\n\n def reindex_indexer(\n self: T,\n new_axis: Index,\n indexer,\n axis: int,\n fill_value=None,\n allow_dups: bool = False,\n copy: bool = True,\n consolidate: bool = True,\n only_slice: bool = False,\n ) -> T:\n \"\"\"\n Parameters\n ----------\n new_axis : Index\n indexer : ndarray of int64 or None\n axis : int\n fill_value : object, default None\n allow_dups : bool, default False\n copy : bool, default True\n consolidate: bool, default True\n Whether to consolidate inplace before reindexing.\n only_slice : bool, default False\n Whether to take views, not copies, along columns.\n\n pandas-indexer with -1's only.\n \"\"\"\n if indexer is None:\n if new_axis is self.axes[axis] and not copy:\n return self\n\n result = self.copy(deep=copy)\n result.axes = list(self.axes)\n result.axes[axis] = new_axis\n return result\n\n if consolidate:\n self._consolidate_inplace()\n\n # some axes don't allow reindexing with dups\n if not allow_dups:\n self.axes[axis]._validate_can_reindex(indexer)\n\n if axis >= self.ndim:\n raise IndexError(\"Requested axis not found in manager\")\n\n if axis == 0:\n new_blocks = self._slice_take_blocks_ax0(\n indexer, fill_value=fill_value, only_slice=only_slice\n )\n else:\n new_blocks = [\n blk.take_nd(\n indexer,\n axis=1,\n fill_value=(\n fill_value if fill_value is not None else blk.fill_value\n ),\n )\n for blk in self.blocks\n ]\n\n new_axes = list(self.axes)\n new_axes[axis] = new_axis\n\n return type(self).from_blocks(new_blocks, new_axes)\n\n def _slice_take_blocks_ax0(\n self,\n slice_or_indexer: slice | np.ndarray,\n fill_value=lib.no_default,\n only_slice: bool = False,\n ) -> list[Block]:\n \"\"\"\n Slice/take blocks along axis=0.\n\n Overloaded for SingleBlock\n\n Parameters\n ----------\n slice_or_indexer : slice or np.ndarray[int64]\n fill_value : scalar, default lib.no_default\n only_slice : bool, default False\n If True, we always return views on existing arrays, never copies.\n This is used when called from ops.blockwise.operate_blockwise.\n\n Returns\n -------\n new_blocks : list of Block\n \"\"\"\n allow_fill = fill_value is not lib.no_default\n\n sl_type, slobj, sllen = _preprocess_slice_or_indexer(\n slice_or_indexer, self.shape[0], allow_fill=allow_fill\n )\n\n if self.is_single_block:\n blk = self.blocks[0]\n\n if sl_type == \"slice\":\n # GH#32959 EABlock would fail since we can't make 0-width\n # TODO(EA2D): special casing unnecessary with 2D EAs\n if sllen == 0:\n return []\n bp = BlockPlacement(slice(0, sllen))\n return [blk.getitem_block_columns(slobj, new_mgr_locs=bp)]\n elif not allow_fill or self.ndim == 1:\n if allow_fill and fill_value is None:\n fill_value = blk.fill_value\n\n if not allow_fill and only_slice:\n # GH#33597 slice instead of take, so we get\n # views instead of copies\n blocks = [\n blk.getitem_block_columns(\n slice(ml, ml + 1), new_mgr_locs=BlockPlacement(i)\n )\n for i, ml in enumerate(slobj)\n ]\n # We have\n # all(np.shares_memory(nb.values, blk.values) for nb in blocks)\n return blocks\n else:\n bp = BlockPlacement(slice(0, sllen))\n return [\n blk.take_nd(\n slobj,\n axis=0,\n new_mgr_locs=bp,\n fill_value=fill_value,\n )\n ]\n\n if sl_type == \"slice\":\n blknos = self.blknos[slobj]\n blklocs = self.blklocs[slobj]\n else:\n blknos = algos.take_nd(\n self.blknos, slobj, fill_value=-1, allow_fill=allow_fill\n )\n blklocs = algos.take_nd(\n self.blklocs, slobj, fill_value=-1, allow_fill=allow_fill\n )\n\n # When filling blknos, make sure blknos is updated before appending to\n # blocks list, that way new blkno is exactly len(blocks).\n blocks = []\n group = not only_slice\n for blkno, mgr_locs in libinternals.get_blkno_placements(blknos, group=group):\n if blkno == -1:\n # If we've got here, fill_value was not lib.no_default\n\n blocks.append(\n self._make_na_block(placement=mgr_locs, fill_value=fill_value)\n )\n else:\n blk = self.blocks[blkno]\n\n # Otherwise, slicing along items axis is necessary.\n if not blk._can_consolidate and not blk._validate_ndim:\n # i.e. we dont go through here for DatetimeTZBlock\n # A non-consolidatable block, it's easy, because there's\n # only one item and each mgr loc is a copy of that single\n # item.\n for mgr_loc in mgr_locs:\n newblk = blk.copy(deep=False)\n newblk.mgr_locs = BlockPlacement(slice(mgr_loc, mgr_loc + 1))\n blocks.append(newblk)\n\n else:\n # GH#32779 to avoid the performance penalty of copying,\n # we may try to only slice\n taker = blklocs[mgr_locs.indexer]\n max_len = max(len(mgr_locs), taker.max() + 1)\n if only_slice:\n taker = lib.maybe_indices_to_slice(taker, max_len)\n\n if isinstance(taker, slice):\n nb = blk.getitem_block_columns(taker, new_mgr_locs=mgr_locs)\n blocks.append(nb)\n elif only_slice:\n # GH#33597 slice instead of take, so we get\n # views instead of copies\n for i, ml in zip(taker, mgr_locs):\n slc = slice(i, i + 1)\n bp = BlockPlacement(ml)\n nb = blk.getitem_block_columns(slc, new_mgr_locs=bp)\n # We have np.shares_memory(nb.values, blk.values)\n blocks.append(nb)\n else:\n nb = blk.take_nd(taker, axis=0, new_mgr_locs=mgr_locs)\n blocks.append(nb)\n\n return blocks\n\n def _make_na_block(self, placement: BlockPlacement, fill_value=None) -> Block:\n\n if fill_value is None:\n fill_value = np.nan\n block_shape = list(self.shape)\n block_shape[0] = len(placement)\n\n dtype, fill_value = infer_dtype_from_scalar(fill_value)\n # error: Argument \"dtype\" to \"empty\" has incompatible type \"Union[dtype,\n # ExtensionDtype]\"; expected \"Union[dtype, None, type, _SupportsDtype, str,\n # Tuple[Any, int], Tuple[Any, Union[int, Sequence[int]]], List[Any], _DtypeDict,\n # Tuple[Any, Any]]\"\n block_values = np.empty(block_shape, dtype=dtype) # type: ignore[arg-type]\n block_values.fill(fill_value)\n return new_block(block_values, placement=placement, ndim=block_values.ndim)\n\n def take(self: T, indexer, axis: int = 1, verify: bool = True) -> T:\n \"\"\"\n Take items along any axis.\n\n indexer : np.ndarray or slice\n axis : int, default 1\n verify : bool, default True\n Check that all entries are between 0 and len(self) - 1, inclusive.\n Pass verify=False if this check has been done by the caller.\n\n Returns\n -------\n BlockManager\n \"\"\"\n # We have 6 tests that get here with a slice\n indexer = (\n np.arange(indexer.start, indexer.stop, indexer.step, dtype=\"int64\")\n if isinstance(indexer, slice)\n else np.asanyarray(indexer, dtype=\"int64\")\n )\n\n n = self.shape[axis]\n indexer = maybe_convert_indices(indexer, n, verify=verify)\n\n new_labels = self.axes[axis].take(indexer)\n return self.reindex_indexer(\n new_axis=new_labels,\n indexer=indexer,\n axis=axis,\n allow_dups=True,\n consolidate=False,\n )\n\n\nclass BlockManager(libinternals.BlockManager, BaseBlockManager):\n \"\"\"\n BaseBlockManager that holds 2D blocks.\n \"\"\"\n\n ndim = 2\n\n # ----------------------------------------------------------------\n # Constructors\n\n def __init__(\n self,\n blocks: Sequence[Block],\n axes: Sequence[Index],\n verify_integrity: bool = True,\n ):\n\n if verify_integrity:\n # Assertion disabled for performance\n # assert all(isinstance(x, Index) for x in axes)\n\n for block in blocks:\n if self.ndim != block.ndim:\n raise AssertionError(\n f\"Number of Block dimensions ({block.ndim}) must equal \"\n f\"number of axes ({self.ndim})\"\n )\n if isinstance(block, DatetimeTZBlock) and block.values.ndim == 1:\n # TODO: remove once fastparquet no longer needs this\n # error: Incompatible types in assignment (expression has type\n # \"Union[ExtensionArray, ndarray]\", variable has type\n # \"DatetimeArray\")\n block.values = ensure_block_shape( # type: ignore[assignment]\n block.values, self.ndim\n )\n try:\n block._cache.clear()\n except AttributeError:\n # _cache not initialized\n pass\n\n self._verify_integrity()\n\n def _verify_integrity(self) -> None:\n mgr_shape = self.shape\n tot_items = sum(len(x.mgr_locs) for x in self.blocks)\n for block in self.blocks:\n if block.shape[1:] != mgr_shape[1:]:\n raise construction_error(tot_items, block.shape[1:], self.axes)\n if len(self.items) != tot_items:\n raise AssertionError(\n \"Number of manager items must equal union of \"\n f\"block items\\n# manager items: {len(self.items)}, # \"\n f\"tot_items: {tot_items}\"\n )\n\n @classmethod\n def from_blocks(cls, blocks: list[Block], axes: list[Index]) -> BlockManager:\n \"\"\"\n Constructor for BlockManager and SingleBlockManager with same signature.\n \"\"\"\n return cls(blocks, axes, verify_integrity=False)\n\n # ----------------------------------------------------------------\n # Indexing\n\n def fast_xs(self, loc: int) -> ArrayLike:\n \"\"\"\n Return the array corresponding to `frame.iloc[loc]`.\n\n Parameters\n ----------\n loc : int\n\n Returns\n -------\n np.ndarray or ExtensionArray\n \"\"\"\n if len(self.blocks) == 1:\n return self.blocks[0].iget((slice(None), loc))\n\n dtype = interleaved_dtype([blk.dtype for blk in self.blocks])\n\n n = len(self)\n if isinstance(dtype, ExtensionDtype):\n # we'll eventually construct an ExtensionArray.\n result = np.empty(n, dtype=object)\n # TODO: let's just use dtype.empty?\n else:\n result = np.empty(n, dtype=dtype)\n\n result = ensure_wrapped_if_datetimelike(result)\n\n for blk in self.blocks:\n # Such assignment may incorrectly coerce NaT to None\n # result[blk.mgr_locs] = blk._slice((slice(None), loc))\n for i, rl in enumerate(blk.mgr_locs):\n result[rl] = blk.iget((i, loc))\n\n if isinstance(dtype, ExtensionDtype):\n result = dtype.construct_array_type()._from_sequence(result, dtype=dtype)\n\n return result\n\n def iget(self, i: int) -> SingleBlockManager:\n \"\"\"\n Return the data as a SingleBlockManager.\n \"\"\"\n block = self.blocks[self.blknos[i]]\n values = block.iget(self.blklocs[i])\n\n # shortcut for select a single-dim from a 2-dim BM\n bp = BlockPlacement(slice(0, len(values)))\n values = maybe_coerce_values(values)\n nb = type(block)(values, placement=bp, ndim=1)\n return SingleBlockManager(nb, self.axes[1])\n\n def iget_values(self, i: int) -> ArrayLike:\n \"\"\"\n Return the data for column i as the values (ndarray or ExtensionArray).\n \"\"\"\n block = self.blocks[self.blknos[i]]\n values = block.iget(self.blklocs[i])\n return values\n\n @property\n def column_arrays(self) -> list[np.ndarray]:\n \"\"\"\n Used in the JSON C code to access column arrays.\n This optimizes compared to using `iget_values` by converting each\n block.values to a np.ndarray only once up front\n \"\"\"\n # special casing datetimetz to avoid conversion through object dtype\n arrays = [\n blk.values._ndarray\n if isinstance(blk, DatetimeTZBlock)\n else np.asarray(blk.values)\n for blk in self.blocks\n ]\n result = []\n for i in range(len(self.items)):\n arr = arrays[self.blknos[i]]\n if arr.ndim == 2:\n values = arr[self.blklocs[i]]\n else:\n values = arr\n result.append(values)\n return result\n\n def iset(self, loc: int | slice | np.ndarray, value: ArrayLike):\n \"\"\"\n Set new item in-place. Does not consolidate. Adds new Block if not\n contained in the current set of items\n \"\"\"\n value = extract_array(value, extract_numpy=True)\n # FIXME: refactor, clearly separate broadcasting & zip-like assignment\n # can prob also fix the various if tests for sparse/categorical\n if self._blklocs is None and self.ndim > 1:\n self._rebuild_blknos_and_blklocs()\n\n # Note: we exclude DTA/TDA here\n vdtype = getattr(value, \"dtype\", None)\n value_is_extension_type = is_1d_only_ea_dtype(vdtype)\n\n # categorical/sparse/datetimetz\n if value_is_extension_type:\n\n def value_getitem(placement):\n return value\n\n else:\n if value.ndim == 2:\n value = value.T\n else:\n value = ensure_block_shape(value, ndim=2)\n\n def value_getitem(placement):\n return value[placement.indexer]\n\n if value.shape[1:] != self.shape[1:]:\n raise AssertionError(\n \"Shape of new values must be compatible with manager shape\"\n )\n\n if lib.is_integer(loc):\n # We have 6 tests where loc is _not_ an int.\n # In this case, get_blkno_placements will yield only one tuple,\n # containing (self._blknos[loc], BlockPlacement(slice(0, 1, 1)))\n\n # error: Incompatible types in assignment (expression has type\n # \"List[Union[int, slice, ndarray]]\", variable has type \"Union[int,\n # slice, ndarray]\")\n loc = [loc] # type: ignore[assignment]\n\n # Accessing public blknos ensures the public versions are initialized\n blknos = self.blknos[loc]\n blklocs = self.blklocs[loc].copy()\n\n unfit_mgr_locs = []\n unfit_val_locs = []\n removed_blknos = []\n for blkno, val_locs in libinternals.get_blkno_placements(blknos, group=True):\n blk = self.blocks[blkno]\n blk_locs = blklocs[val_locs.indexer]\n if blk.should_store(value):\n blk.set_inplace(blk_locs, value_getitem(val_locs))\n else:\n unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs])\n unfit_val_locs.append(val_locs)\n\n # If all block items are unfit, schedule the block for removal.\n if len(val_locs) == len(blk.mgr_locs):\n removed_blknos.append(blkno)\n else:\n blk.delete(blk_locs)\n self._blklocs[blk.mgr_locs.indexer] = np.arange(len(blk))\n\n if len(removed_blknos):\n # Remove blocks & update blknos accordingly\n is_deleted = np.zeros(self.nblocks, dtype=np.bool_)\n is_deleted[removed_blknos] = True\n\n new_blknos = np.empty(self.nblocks, dtype=np.intp)\n new_blknos.fill(-1)\n new_blknos[~is_deleted] = np.arange(self.nblocks - len(removed_blknos))\n self._blknos = new_blknos[self._blknos]\n self.blocks = tuple(\n blk for i, blk in enumerate(self.blocks) if i not in set(removed_blknos)\n )\n\n if unfit_val_locs:\n unfit_mgr_locs = np.concatenate(unfit_mgr_locs)\n unfit_count = len(unfit_mgr_locs)\n\n new_blocks: list[Block] = []\n if value_is_extension_type:\n # This code (ab-)uses the fact that EA blocks contain only\n # one item.\n # TODO(EA2D): special casing unnecessary with 2D EAs\n new_blocks.extend(\n new_block(\n values=value,\n ndim=self.ndim,\n placement=slice(mgr_loc, mgr_loc + 1),\n )\n for mgr_loc in unfit_mgr_locs\n )\n\n self._blknos[unfit_mgr_locs] = np.arange(unfit_count) + len(self.blocks)\n self._blklocs[unfit_mgr_locs] = 0\n\n else:\n # unfit_val_locs contains BlockPlacement objects\n unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:])\n\n new_blocks.append(\n new_block(\n values=value_getitem(unfit_val_items),\n ndim=self.ndim,\n placement=unfit_mgr_locs,\n )\n )\n\n self._blknos[unfit_mgr_locs] = len(self.blocks)\n self._blklocs[unfit_mgr_locs] = np.arange(unfit_count)\n\n self.blocks += tuple(new_blocks)\n\n # Newly created block's dtype may already be present.\n self._known_consolidated = False\n\n def insert(self, loc: int, item: Hashable, value: ArrayLike) -> None:\n \"\"\"\n Insert item at selected position.\n\n Parameters\n ----------\n loc : int\n item : hashable\n value : np.ndarray or ExtensionArray\n \"\"\"\n # insert to the axis; this could possibly raise a TypeError\n new_axis = self.items.insert(loc, item)\n\n if value.ndim == 2:\n value = value.T\n else:\n value = ensure_block_shape(value, ndim=self.ndim)\n\n block = new_block(values=value, ndim=self.ndim, placement=slice(loc, loc + 1))\n\n for blkno, count in _fast_count_smallints(self.blknos[loc:]):\n blk = self.blocks[blkno]\n if count == len(blk.mgr_locs):\n blk.mgr_locs = blk.mgr_locs.add(1)\n else:\n new_mgr_locs = blk.mgr_locs.as_array.copy()\n new_mgr_locs[new_mgr_locs >= loc] += 1\n blk.mgr_locs = BlockPlacement(new_mgr_locs)\n\n # Accessing public blklocs ensures the public versions are initialized\n if loc == self.blklocs.shape[0]:\n # np.append is a lot faster, let's use it if we can.\n self._blklocs = np.append(self._blklocs, 0)\n self._blknos = np.append(self._blknos, len(self.blocks))\n else:\n self._blklocs = np.insert(self._blklocs, loc, 0)\n self._blknos = np.insert(self._blknos, loc, len(self.blocks))\n\n self.axes[0] = new_axis\n self.blocks += (block,)\n\n self._known_consolidated = False\n\n if len(self.blocks) > 100:\n warnings.warn(\n \"DataFrame is highly fragmented. This is usually the result \"\n \"of calling `frame.insert` many times, which has poor performance. \"\n \"Consider using pd.concat instead. To get a de-fragmented frame, \"\n \"use `newframe = frame.copy()`\",\n PerformanceWarning,\n stacklevel=5,\n )\n\n def idelete(self, indexer) -> BlockManager:\n \"\"\"\n Delete selected locations, returning a new BlockManager.\n \"\"\"\n is_deleted = np.zeros(self.shape[0], dtype=np.bool_)\n is_deleted[indexer] = True\n taker = (~is_deleted).nonzero()[0]\n\n nbs = self._slice_take_blocks_ax0(taker, only_slice=True)\n new_columns = self.items[~is_deleted]\n axes = [new_columns, self.axes[1]]\n return type(self)(tuple(nbs), axes)\n\n # ----------------------------------------------------------------\n # Block-wise Operation\n\n def grouped_reduce(self: T, func: Callable, ignore_failures: bool = False) -> T:\n \"\"\"\n Apply grouped reduction function blockwise, returning a new BlockManager.\n\n Parameters\n ----------\n func : grouped reduction function\n ignore_failures : bool, default False\n Whether to drop blocks where func raises TypeError.\n\n Returns\n -------\n BlockManager\n \"\"\"\n result_blocks: list[Block] = []\n\n for blk in self.blocks:\n if blk.is_object:\n # split on object-dtype blocks bc some columns may raise\n # while others do not.\n for sb in blk._split():\n try:\n applied = sb.apply(func)\n except (TypeError, NotImplementedError):\n if not ignore_failures:\n raise\n continue\n result_blocks = extend_blocks(applied, result_blocks)\n else:\n try:\n applied = blk.apply(func)\n except (TypeError, NotImplementedError):\n if not ignore_failures:\n raise\n continue\n result_blocks = extend_blocks(applied, result_blocks)\n\n if len(result_blocks) == 0:\n index = Index([None]) # placeholder\n else:\n index = Index(range(result_blocks[0].values.shape[-1]))\n\n if ignore_failures:\n return self._combine(result_blocks, copy=False, index=index)\n\n return type(self).from_blocks(result_blocks, [self.axes[0], index])\n\n def reduce(\n self: T, func: Callable, ignore_failures: bool = False\n ) -> tuple[T, np.ndarray]:\n \"\"\"\n Apply reduction function blockwise, returning a single-row BlockManager.\n\n Parameters\n ----------\n func : reduction function\n ignore_failures : bool, default False\n Whether to drop blocks where func raises TypeError.\n\n Returns\n -------\n BlockManager\n np.ndarray\n Indexer of mgr_locs that are retained.\n \"\"\"\n # If 2D, we assume that we're operating column-wise\n assert self.ndim == 2\n\n res_blocks: list[Block] = []\n for blk in self.blocks:\n nbs = blk.reduce(func, ignore_failures)\n res_blocks.extend(nbs)\n\n index = Index([None]) # placeholder\n if ignore_failures:\n if res_blocks:\n indexer = np.concatenate([blk.mgr_locs.as_array for blk in res_blocks])\n new_mgr = self._combine(res_blocks, copy=False, index=index)\n else:\n indexer = []\n new_mgr = type(self).from_blocks([], [self.items[:0], index])\n else:\n indexer = np.arange(self.shape[0])\n new_mgr = type(self).from_blocks(res_blocks, [self.items, index])\n return new_mgr, indexer\n\n def operate_blockwise(self, other: BlockManager, array_op) -> BlockManager:\n \"\"\"\n Apply array_op blockwise with another (aligned) BlockManager.\n \"\"\"\n return operate_blockwise(self, other, array_op)\n\n def _equal_values(self: BlockManager, other: BlockManager) -> bool:\n \"\"\"\n Used in .equals defined in base class. Only check the column values\n assuming shape and indexes have already been checked.\n \"\"\"\n return blockwise_all(self, other, array_equals)\n\n def quantile(\n self: T,\n *,\n qs: Float64Index,\n axis: int = 0,\n interpolation=\"linear\",\n ) -> T:\n \"\"\"\n Iterate over blocks applying quantile reduction.\n This routine is intended for reduction type operations and\n will do inference on the generated blocks.\n\n Parameters\n ----------\n axis: reduction axis, default 0\n consolidate: bool, default True. Join together blocks having same\n dtype\n interpolation : type of interpolation, default 'linear'\n qs : list of the quantiles to be computed\n\n Returns\n -------\n BlockManager\n \"\"\"\n # Series dispatches to DataFrame for quantile, which allows us to\n # simplify some of the code here and in the blocks\n assert self.ndim >= 2\n assert is_list_like(qs) # caller is responsible for this\n assert axis == 1 # only ever called this way\n\n new_axes = list(self.axes)\n new_axes[1] = Float64Index(qs)\n\n blocks = [\n blk.quantile(axis=axis, qs=qs, interpolation=interpolation)\n for blk in self.blocks\n ]\n\n return type(self)(blocks, new_axes)\n\n # ----------------------------------------------------------------\n\n def unstack(self, unstacker, fill_value) -> BlockManager:\n \"\"\"\n Return a BlockManager with all blocks unstacked..\n\n Parameters\n ----------\n unstacker : reshape._Unstacker\n fill_value : Any\n fill_value for newly introduced missing values.\n\n Returns\n -------\n unstacked : BlockManager\n \"\"\"\n new_columns = unstacker.get_new_columns(self.items)\n new_index = unstacker.new_index\n\n allow_fill = not unstacker.mask.all()\n\n new_blocks: list[Block] = []\n columns_mask: list[np.ndarray] = []\n\n for blk in self.blocks:\n blk_cols = self.items[blk.mgr_locs.indexer]\n new_items = unstacker.get_new_columns(blk_cols)\n new_placement = new_columns.get_indexer(new_items)\n\n blocks, mask = blk._unstack(\n unstacker,\n fill_value,\n new_placement=new_placement,\n allow_fill=allow_fill,\n )\n\n new_blocks.extend(blocks)\n columns_mask.extend(mask)\n\n new_columns = new_columns[columns_mask]\n\n bm = BlockManager(new_blocks, [new_columns, new_index])\n return bm\n\n def to_dict(self, copy: bool = True):\n \"\"\"\n Return a dict of str(dtype) -> BlockManager\n\n Parameters\n ----------\n copy : bool, default True\n\n Returns\n -------\n values : a dict of dtype -> BlockManager\n \"\"\"\n\n bd: dict[str, list[Block]] = {}\n for b in self.blocks:\n bd.setdefault(str(b.dtype), []).append(b)\n\n # TODO(EA2D): the combine will be unnecessary with 2D EAs\n return {dtype: self._combine(blocks, copy=copy) for dtype, blocks in bd.items()}\n\n def as_array(\n self,\n transpose: bool = False,\n dtype: npt.DTypeLike | None = None,\n copy: bool = False,\n na_value=lib.no_default,\n ) -> np.ndarray:\n \"\"\"\n Convert the blockmanager data into an numpy array.\n\n Parameters\n ----------\n transpose : bool, default False\n If True, transpose the return array.\n dtype : object, default None\n Data type of the return array.\n copy : bool, default False\n If True then guarantee that a copy is returned. A value of\n False does not guarantee that the underlying data is not\n copied.\n na_value : object, default lib.no_default\n Value to be used as the missing value sentinel.\n\n Returns\n -------\n arr : ndarray\n \"\"\"\n if len(self.blocks) == 0:\n arr = np.empty(self.shape, dtype=float)\n return arr.transpose() if transpose else arr\n\n # We want to copy when na_value is provided to avoid\n # mutating the original object\n copy = copy or na_value is not lib.no_default\n\n if self.is_single_block:\n blk = self.blocks[0]\n if blk.is_extension:\n # Avoid implicit conversion of extension blocks to object\n\n # error: Item \"ndarray\" of \"Union[ndarray, ExtensionArray]\" has no\n # attribute \"to_numpy\"\n arr = blk.values.to_numpy( # type: ignore[union-attr]\n # pandas/core/internals/managers.py:1428: error: Argument \"dtype\" to\n # \"to_numpy\" of \"ExtensionArray\" has incompatible type\n # \"Optional[Union[dtype[Any], None, type, _SupportsDType, str,\n # Union[Tuple[Any, int], Tuple[Any, Union[SupportsIndex,\n # Sequence[SupportsIndex]]], List[Any], _DTypeDict, Tuple[Any,\n # Any]]]]\"; expected \"Optional[Union[ExtensionDtype, Union[str,\n # dtype[Any]], Type[str], Type[float], Type[int], Type[complex],\n # Type[bool], Type[object]]]\"\n dtype=dtype, # type: ignore[arg-type]\n na_value=na_value,\n ).reshape(blk.shape)\n else:\n arr = np.asarray(blk.get_values())\n if dtype:\n arr = arr.astype(dtype, copy=False)\n else:\n arr = self._interleave(dtype=dtype, na_value=na_value)\n # The underlying data was copied within _interleave\n copy = False\n\n if copy:\n arr = arr.copy()\n\n if na_value is not lib.no_default:\n arr[isna(arr)] = na_value\n\n return arr.transpose() if transpose else arr\n\n def _interleave(\n self,\n dtype: npt.DTypeLike | ExtensionDtype | None = None,\n na_value=lib.no_default,\n ) -> np.ndarray:\n \"\"\"\n Return ndarray from blocks with specified item order\n Items must be contained in the blocks\n \"\"\"\n if not dtype:\n dtype = interleaved_dtype([blk.dtype for blk in self.blocks])\n\n # TODO: https://github.com/pandas-dev/pandas/issues/22791\n # Give EAs some input on what happens here. Sparse needs this.\n if isinstance(dtype, SparseDtype):\n dtype = dtype.subtype\n elif isinstance(dtype, ExtensionDtype):\n dtype = np.dtype(\"object\")\n elif is_dtype_equal(dtype, str):\n dtype = np.dtype(\"object\")\n\n # error: Argument \"dtype\" to \"empty\" has incompatible type\n # \"Union[ExtensionDtype, str, dtype[Any], Type[object], None]\"; expected\n # \"Union[dtype[Any], None, type, _SupportsDType, str, Union[Tuple[Any, int],\n # Tuple[Any, Union[int, Sequence[int]]], List[Any], _DTypeDict,\n # Tuple[Any, Any]]]\"\n result = np.empty(self.shape, dtype=dtype) # type: ignore[arg-type]\n\n itemmask = np.zeros(self.shape[0])\n\n for blk in self.blocks:\n rl = blk.mgr_locs\n if blk.is_extension:\n # Avoid implicit conversion of extension blocks to object\n\n # error: Item \"ndarray\" of \"Union[ndarray, ExtensionArray]\" has no\n # attribute \"to_numpy\"\n arr = blk.values.to_numpy( # type: ignore[union-attr]\n # pandas/core/internals/managers.py:1485: error: Argument \"dtype\" to\n # \"to_numpy\" of \"ExtensionArray\" has incompatible type\n # \"Union[dtype[Any], None, type, _SupportsDType, str, Tuple[Any,\n # Union[SupportsIndex, Sequence[SupportsIndex]]], List[Any],\n # _DTypeDict, Tuple[Any, Any], ExtensionDtype]\"; expected\n # \"Optional[Union[ExtensionDtype, Union[str, dtype[Any]], Type[str],\n # Type[float], Type[int], Type[complex], Type[bool], Type[object]]]\"\n # [arg-type]\n dtype=dtype, # type: ignore[arg-type]\n na_value=na_value,\n )\n else:\n # error: Argument 1 to \"get_values\" of \"Block\" has incompatible type\n # \"Union[ExtensionDtype, str, dtype[Any], Type[object], None]\"; expected\n # \"Union[dtype[Any], ExtensionDtype, None]\"\n arr = blk.get_values(dtype) # type: ignore[arg-type]\n result[rl.indexer] = arr\n itemmask[rl.indexer] = 1\n\n if not itemmask.all():\n raise AssertionError(\"Some items were not contained in blocks\")\n\n return result\n\n\nclass SingleBlockManager(BaseBlockManager, SingleDataManager):\n \"\"\"manage a single block with\"\"\"\n\n ndim = 1\n _is_consolidated = True\n _known_consolidated = True\n __slots__ = ()\n is_single_block = True\n\n def __init__(\n self,\n block: Block,\n axis: Index,\n verify_integrity: bool = False,\n fastpath=lib.no_default,\n ):\n # Assertions disabled for performance\n # assert isinstance(block, Block), type(block)\n # assert isinstance(axis, Index), type(axis)\n\n if fastpath is not lib.no_default:\n warnings.warn(\n \"The `fastpath` keyword is deprecated and will be removed \"\n \"in a future version.\",\n FutureWarning,\n stacklevel=2,\n )\n\n self.axes = [axis]\n self.blocks = (block,)\n\n @classmethod\n def from_blocks(cls, blocks: list[Block], axes: list[Index]) -> SingleBlockManager:\n \"\"\"\n Constructor for BlockManager and SingleBlockManager with same signature.\n \"\"\"\n assert len(blocks) == 1\n assert len(axes) == 1\n return cls(blocks[0], axes[0], verify_integrity=False)\n\n @classmethod\n def from_array(cls, array: ArrayLike, index: Index) -> SingleBlockManager:\n \"\"\"\n Constructor for if we have an array that is not yet a Block.\n \"\"\"\n block = new_block(array, placement=slice(0, len(index)), ndim=1)\n return cls(block, index)\n\n def __getstate__(self):\n block_values = [b.values for b in self.blocks]\n block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks]\n axes_array = list(self.axes)\n\n extra_state = {\n \"0.14.1\": {\n \"axes\": axes_array,\n \"blocks\": [\n {\"values\": b.values, \"mgr_locs\": b.mgr_locs.indexer}\n for b in self.blocks\n ],\n }\n }\n\n # First three elements of the state are to maintain forward\n # compatibility with 0.13.1.\n return axes_array, block_values, block_items, extra_state\n\n def __setstate__(self, state):\n def unpickle_block(values, mgr_locs, ndim: int) -> Block:\n # TODO(EA2D): ndim would be unnecessary with 2D EAs\n # older pickles may store e.g. DatetimeIndex instead of DatetimeArray\n values = extract_array(values, extract_numpy=True)\n return new_block(values, placement=mgr_locs, ndim=ndim)\n\n if isinstance(state, tuple) and len(state) >= 4 and \"0.14.1\" in state[3]:\n state = state[3][\"0.14.1\"]\n self.axes = [ensure_index(ax) for ax in state[\"axes\"]]\n ndim = len(self.axes)\n self.blocks = tuple(\n unpickle_block(b[\"values\"], b[\"mgr_locs\"], ndim=ndim)\n for b in state[\"blocks\"]\n )\n else:\n raise NotImplementedError(\"pre-0.14.1 pickles are no longer supported\")\n\n self._post_setstate()\n\n def _post_setstate(self):\n pass\n\n @property\n def _block(self) -> Block:\n return self.blocks[0]\n\n @property\n def _blknos(self):\n \"\"\"compat with BlockManager\"\"\"\n return None\n\n @property\n def _blklocs(self):\n \"\"\"compat with BlockManager\"\"\"\n return None\n\n def getitem_mgr(self, indexer) -> SingleBlockManager:\n # similar to get_slice, but not restricted to slice indexer\n blk = self._block\n array = blk._slice(indexer)\n if array.ndim > 1:\n # This will be caught by Series._get_values\n raise ValueError(\"dimension-expanding indexing not allowed\")\n\n bp = BlockPlacement(slice(0, len(array)))\n block = blk.make_block_same_class(array, placement=bp)\n\n new_idx = self.index[indexer]\n return type(self)(block, new_idx)\n\n def get_slice(self, slobj: slice, axis: int = 0) -> SingleBlockManager:\n # Assertion disabled for performance\n # assert isinstance(slobj, slice), type(slobj)\n if axis >= self.ndim:\n raise IndexError(\"Requested axis not found in manager\")\n\n blk = self._block\n array = blk._slice(slobj)\n bp = BlockPlacement(slice(0, len(array)))\n block = blk.make_block_same_class(array, placement=bp)\n new_index = self.index._getitem_slice(slobj)\n return type(self)(block, new_index)\n\n @property\n def index(self) -> Index:\n return self.axes[0]\n\n @property\n def dtype(self) -> DtypeObj:\n return self._block.dtype\n\n def get_dtypes(self) -> np.ndarray:\n return np.array([self._block.dtype])\n\n def external_values(self):\n \"\"\"The array that Series.values returns\"\"\"\n return self._block.external_values()\n\n def internal_values(self):\n \"\"\"The array that Series._values returns\"\"\"\n return self._block.values\n\n def array_values(self):\n \"\"\"The array that Series.array returns\"\"\"\n return self._block.array_values\n\n @property\n def _can_hold_na(self) -> bool:\n return self._block._can_hold_na\n\n def is_consolidated(self) -> bool:\n return True\n\n def _consolidate_check(self):\n pass\n\n def _consolidate_inplace(self):\n pass\n\n def idelete(self, indexer) -> SingleBlockManager:\n \"\"\"\n Delete single location from SingleBlockManager.\n\n Ensures that self.blocks doesn't become empty.\n \"\"\"\n self._block.delete(indexer)\n self.axes[0] = self.axes[0].delete(indexer)\n return self\n\n def fast_xs(self, loc):\n \"\"\"\n fast path for getting a cross-section\n return a view of the data\n \"\"\"\n raise NotImplementedError(\"Use series._values[loc] instead\")\n\n def set_values(self, values: ArrayLike):\n \"\"\"\n Set the values of the single block in place.\n\n Use at your own risk! This does not check if the passed values are\n valid for the current Block/SingleBlockManager (length, dtype, etc).\n \"\"\"\n self.blocks[0].values = values\n self.blocks[0]._mgr_locs = BlockPlacement(slice(len(values)))\n\n def _equal_values(self: T, other: T) -> bool:\n \"\"\"\n Used in .equals defined in base class. Only check the column values\n assuming shape and indexes have already been checked.\n \"\"\"\n # For SingleBlockManager (i.e.Series)\n if other.ndim != 1:\n return False\n left = self.blocks[0].values\n right = other.blocks[0].values\n return array_equals(left, right)\n\n\n# --------------------------------------------------------------------\n# Constructor Helpers\n\n\ndef create_block_manager_from_blocks(\n blocks: list[Block], axes: list[Index], consolidate: bool = True\n) -> BlockManager:\n try:\n mgr = BlockManager(blocks, axes)\n\n except ValueError as err:\n arrays = [blk.values for blk in blocks]\n tot_items = sum(arr.shape[0] for arr in arrays)\n raise construction_error(tot_items, arrays[0].shape[1:], axes, err)\n\n if consolidate:\n mgr._consolidate_inplace()\n return mgr\n\n\n# We define this here so we can override it in tests.extension.test_numpy\ndef _extract_array(obj):\n return extract_array(obj, extract_numpy=True)\n\n\ndef create_block_manager_from_arrays(\n arrays,\n names: Index,\n axes: list[Index],\n consolidate: bool = True,\n) -> BlockManager:\n # Assertions disabled for performance\n # assert isinstance(names, Index)\n # assert isinstance(axes, list)\n # assert all(isinstance(x, Index) for x in axes)\n\n arrays = [_extract_array(x) for x in arrays]\n\n try:\n blocks = _form_blocks(arrays, names, axes, consolidate)\n mgr = BlockManager(blocks, axes)\n except ValueError as e:\n raise construction_error(len(arrays), arrays[0].shape, axes, e)\n if consolidate:\n mgr._consolidate_inplace()\n return mgr\n\n\ndef construction_error(\n tot_items: int,\n block_shape: Shape,\n axes: list[Index],\n e: ValueError | None = None,\n):\n \"\"\"raise a helpful message about our construction\"\"\"\n passed = tuple(map(int, [tot_items] + list(block_shape)))\n # Correcting the user facing error message during dataframe construction\n if len(passed) <= 2:\n passed = passed[::-1]\n\n implied = tuple(len(ax) for ax in axes)\n # Correcting the user facing error message during dataframe construction\n if len(implied) <= 2:\n implied = implied[::-1]\n\n # We return the exception object instead of raising it so that we\n # can raise it in the caller; mypy plays better with that\n if passed == implied and e is not None:\n return e\n if block_shape[0] == 0:\n return ValueError(\"Empty data passed with indices specified.\")\n return ValueError(f\"Shape of passed values is {passed}, indices imply {implied}\")\n\n\n# -----------------------------------------------------------------------\n\n\ndef _form_blocks(\n arrays: list[ArrayLike], names: Index, axes: list[Index], consolidate: bool\n) -> list[Block]:\n # put \"leftover\" items in float bucket, where else?\n # generalize?\n items_dict: DefaultDict[str, list] = defaultdict(list)\n extra_locs = []\n\n names_idx = names\n if names_idx.equals(axes[0]):\n names_indexer = np.arange(len(names_idx))\n else:\n # Assertion disabled for performance\n # assert names_idx.intersection(axes[0]).is_unique\n names_indexer = names_idx.get_indexer_for(axes[0])\n\n for i, name_idx in enumerate(names_indexer):\n if name_idx == -1:\n extra_locs.append(i)\n continue\n\n v = arrays[name_idx]\n\n block_type = get_block_type(v)\n items_dict[block_type.__name__].append((i, v))\n\n blocks: list[Block] = []\n if len(items_dict[\"NumericBlock\"]):\n numeric_blocks = _multi_blockify(\n items_dict[\"NumericBlock\"], consolidate=consolidate\n )\n blocks.extend(numeric_blocks)\n\n if len(items_dict[\"DatetimeLikeBlock\"]):\n dtlike_blocks = _multi_blockify(\n items_dict[\"DatetimeLikeBlock\"], consolidate=consolidate\n )\n blocks.extend(dtlike_blocks)\n\n if len(items_dict[\"DatetimeTZBlock\"]):\n dttz_blocks = [\n DatetimeTZBlock(\n ensure_block_shape(extract_array(array), 2),\n placement=BlockPlacement(i),\n ndim=2,\n )\n for i, array in items_dict[\"DatetimeTZBlock\"]\n ]\n blocks.extend(dttz_blocks)\n\n if len(items_dict[\"ObjectBlock\"]) > 0:\n object_blocks = _simple_blockify(\n items_dict[\"ObjectBlock\"], np.object_, consolidate=consolidate\n )\n blocks.extend(object_blocks)\n\n if len(items_dict[\"CategoricalBlock\"]) > 0:\n cat_blocks = [\n CategoricalBlock(array, placement=BlockPlacement(i), ndim=2)\n for i, array in items_dict[\"CategoricalBlock\"]\n ]\n blocks.extend(cat_blocks)\n\n if len(items_dict[\"ExtensionBlock\"]):\n external_blocks = [\n ExtensionBlock(array, placement=BlockPlacement(i), ndim=2)\n for i, array in items_dict[\"ExtensionBlock\"]\n ]\n\n blocks.extend(external_blocks)\n\n if len(extra_locs):\n shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:])\n\n # empty items -> dtype object\n block_values = np.empty(shape, dtype=object)\n block_values.fill(np.nan)\n\n na_block = new_block(block_values, placement=extra_locs, ndim=2)\n blocks.append(na_block)\n\n return blocks\n\n\ndef _simple_blockify(tuples, dtype, consolidate: bool) -> list[Block]:\n \"\"\"\n return a single array of a block that has a single dtype; if dtype is\n not None, coerce to this dtype\n \"\"\"\n if not consolidate:\n return _tuples_to_blocks_no_consolidate(tuples, dtype=dtype)\n\n values, placement = _stack_arrays(tuples, dtype)\n\n # TODO: CHECK DTYPE?\n if dtype is not None and values.dtype != dtype: # pragma: no cover\n values = values.astype(dtype)\n\n block = new_block(values, placement=BlockPlacement(placement), ndim=2)\n return [block]\n\n\ndef _multi_blockify(tuples, dtype: DtypeObj | None = None, consolidate: bool = True):\n \"\"\"return an array of blocks that potentially have different dtypes\"\"\"\n\n if not consolidate:\n return _tuples_to_blocks_no_consolidate(tuples, dtype=dtype)\n\n # group by dtype\n grouper = itertools.groupby(tuples, lambda x: x[1].dtype)\n\n new_blocks = []\n for dtype, tup_block in grouper:\n\n # error: Argument 2 to \"_stack_arrays\" has incompatible type\n # \"Union[ExtensionDtype, str, dtype[Any], Type[str], Type[float], Type[int],\n # Type[complex], Type[bool], Type[object], None]\"; expected \"dtype[Any]\"\n values, placement = _stack_arrays(\n list(tup_block), dtype # type: ignore[arg-type]\n )\n\n block = new_block(values, placement=BlockPlacement(placement), ndim=2)\n new_blocks.append(block)\n\n return new_blocks\n\n\ndef _tuples_to_blocks_no_consolidate(tuples, dtype: DtypeObj | None) -> list[Block]:\n # tuples produced within _form_blocks are of the form (placement, array)\n if dtype is not None:\n return [\n new_block(\n np.atleast_2d(x[1].astype(dtype, copy=False)), placement=x[0], ndim=2\n )\n for x in tuples\n ]\n return [new_block(np.atleast_2d(x[1]), placement=x[0], ndim=2) for x in tuples]\n\n\ndef _stack_arrays(tuples, dtype: np.dtype):\n\n placement, arrays = zip(*tuples)\n\n first = arrays[0]\n shape = (len(arrays),) + first.shape\n\n stacked = np.empty(shape, dtype=dtype)\n for i, arr in enumerate(arrays):\n stacked[i] = arr\n\n return stacked, placement\n\n\ndef _consolidate(blocks: tuple[Block, ...]) -> list[Block]:\n \"\"\"\n Merge blocks having same dtype, exclude non-consolidating blocks\n \"\"\"\n # sort by _can_consolidate, dtype\n gkey = lambda x: x._consolidate_key\n grouper = itertools.groupby(sorted(blocks, key=gkey), gkey)\n\n new_blocks: list[Block] = []\n for (_can_consolidate, dtype), group_blocks in grouper:\n merged_blocks = _merge_blocks(\n list(group_blocks), dtype=dtype, can_consolidate=_can_consolidate\n )\n new_blocks = extend_blocks(merged_blocks, new_blocks)\n return new_blocks\n\n\ndef _merge_blocks(\n blocks: list[Block], dtype: DtypeObj, can_consolidate: bool\n) -> list[Block]:\n\n if len(blocks) == 1:\n return blocks\n\n if can_consolidate:\n\n # TODO: optimization potential in case all mgrs contain slices and\n # combination of those slices is a slice, too.\n new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks])\n\n new_values: ArrayLike\n\n if isinstance(blocks[0].dtype, np.dtype):\n # error: List comprehension has incompatible type List[Union[ndarray,\n # ExtensionArray]]; expected List[Union[complex, generic,\n # Sequence[Union[int, float, complex, str, bytes, generic]],\n # Sequence[Sequence[Any]], SupportsArray]]\n new_values = np.vstack([b.values for b in blocks]) # type: ignore[misc]\n else:\n bvals = [blk.values for blk in blocks]\n bvals2 = cast(Sequence[NDArrayBackedExtensionArray], bvals)\n new_values = bvals2[0]._concat_same_type(bvals2, axis=0)\n\n argsort = np.argsort(new_mgr_locs)\n new_values = new_values[argsort]\n new_mgr_locs = new_mgr_locs[argsort]\n\n bp = BlockPlacement(new_mgr_locs)\n return [new_block(new_values, placement=bp, ndim=2)]\n\n # can't consolidate --> no merge\n return blocks\n\n\ndef _fast_count_smallints(arr: np.ndarray) -> np.ndarray:\n \"\"\"Faster version of set(arr) for sequences of small numbers.\"\"\"\n counts = np.bincount(arr.astype(np.int_))\n nz = counts.nonzero()[0]\n return np.c_[nz, counts[nz]]\n\n\ndef _preprocess_slice_or_indexer(\n slice_or_indexer: slice | np.ndarray, length: int, allow_fill: bool\n):\n if isinstance(slice_or_indexer, slice):\n return (\n \"slice\",\n slice_or_indexer,\n libinternals.slice_len(slice_or_indexer, length),\n )\n else:\n if (\n not isinstance(slice_or_indexer, np.ndarray)\n or slice_or_indexer.dtype.kind != \"i\"\n ):\n dtype = getattr(slice_or_indexer, \"dtype\", None)\n raise TypeError(type(slice_or_indexer), dtype)\n\n indexer = ensure_platform_int(slice_or_indexer)\n if not allow_fill:\n indexer = maybe_convert_indices(indexer, length)\n return \"fancy\", indexer, len(indexer)\n"
] | [
[
"pandas._libs.internals.get_blkno_placements",
"pandas.core.indexes.api.ensure_index",
"pandas.core.construction.extract_array",
"pandas._libs.lib.is_integer",
"pandas.core.internals.blocks.ensure_block_shape",
"numpy.dtype",
"pandas._libs.internals.BlockPlacement",
"pandas.core.internals.ops.operate_blockwise",
"numpy.concatenate",
"pandas.core.dtypes.common.ensure_platform_int",
"pandas.core.dtypes.missing.isna",
"numpy.empty",
"pandas._libs.lib.maybe_indices_to_slice",
"pandas.core.indexes.api.Index",
"pandas.core.dtypes.missing.array_equals",
"pandas.core.internals.ops.blockwise_all",
"pandas.util._validators.validate_bool_kwarg",
"numpy.arange",
"pandas._libs.internals.slice_len",
"numpy.ndim",
"numpy.append",
"pandas.core.internals.base.interleaved_dtype",
"numpy.vstack",
"numpy.atleast_2d",
"pandas.core.dtypes.common.is_dtype_equal",
"numpy.array",
"pandas._libs.lib.get_reverse_indexer",
"numpy.zeros",
"pandas.core.internals.blocks.get_block_type",
"pandas.core.internals.blocks.new_block",
"pandas.core.construction.ensure_wrapped_if_datetimelike",
"numpy.argsort",
"pandas.core.dtypes.common.is_list_like",
"pandas.core.internals.blocks.extend_blocks",
"pandas.core.dtypes.common.is_1d_only_ea_dtype",
"numpy.insert",
"pandas.core.dtypes.cast.infer_dtype_from_scalar",
"numpy.asarray",
"pandas.core.indexers.maybe_convert_indices",
"pandas.core.indexes.api.Float64Index",
"pandas.core.internals.blocks.maybe_coerce_values",
"pandas.core.algorithms.take_nd",
"numpy.asanyarray"
]
] |
SkyLord2/ResNetByTFKeras | [
"70d00ef1c559d7d5ab895d3dfce6074fd316793b"
] | [
"resnet.py"
] | [
"# -*- coding: utf-8 -*-\n# @Time : 2020/10/22 20:14\n# @Author : cds\n# @Site : https://github.com/SkyLord2?tab=repositories\n# @Email: [email protected]\n# @File : resnet.py\n# @Software: PyCharm\n\nfrom tensorflow.keras import layers,Model,Sequential\n\nclass BasicBlock(layers.Layer):\n expansion=1\n def __init__(self,out_channel,strides=1,downsample=None,**kwargs):\n super(BasicBlock,self).__init__(**kwargs)\n \n self.conv1 = layers.Conv2D(out_channel,kernel_size=3,strides=strides,padding=\"SAME\",use_bias=False)\n self.bn1 = layers.BatchNormalization(momentum=0.9, epsilon=1e-5)\n\n self.conv2 = layers.Conv2D(out_channel,kernel_size=3,strides=1,padding=\"SAME\",use_bias=False)\n self.bn2 = layers.BatchNormalization(momentum=0.9,epsilon=1e-5)\n\n # 下采样函数\n self.downsample = downsample\n self.relu = layers.ReLU()\n self.add = layers.Add()\n def call(self,inputs,training=False):\n identify = inputs\n if(self.downsample is not None):\n identify = self.downsample(inputs)\n x = self.conv1(inputs)\n x = self.bn1(x,training=training)\n x = self.relu(x)\n\n x = self.conv2(x)\n x = self.bn2(x,training=training)\n\n x = self.add([identify,x])\n x = self.relu(x)\n return x\n\nclass Bottleneck(layers.Layer):\n expansion = 4\n def __init__(self,out_channel,strides=1,downsample=None,**kwargs):\n super(Bottleneck,self).__init__(**kwargs)\n \n self.conv1 = layers.Conv2D(out_channel,kernel_size=1,use_bias=False,name=\"conv1\")\n self.bn1 = layers.BatchNormalization(momentum=0.9,epsilon=1e-5, name=\"conv1/BatchNorm\")\n\n self.conv2 = layers.Conv2D(out_channel,kernel_size=3,strides=strides,padding=\"SAME\",use_bias=False,name=\"conv2\")\n self.bn2 = layers.BatchNormalization(momentum=0.9,epsilon=1e-5,name=\"conv2/BatchNorm\")\n\n self.conv3 = layers.Conv2D(out_channel*self.expansion,kernel_size=1,use_bias=False,name=\"conv3\")\n self.bn3 = layers.BatchNormalization(momentum=0.9,epsilon=1e-5,name=\"conv3/BatchNorm\")\n\n self.relu = layers.ReLU()\n self.downsample = downsample\n self.add = layers.Add()\n def call(self,inputs,training=False):\n identity = inputs\n if(self.downsample is not None):\n identity = self.downsample(inputs)\n \n x = self.conv1(inputs)\n x = self.bn1(x,training=training)\n x = self.relu(x)\n\n x = self.conv2(x)\n x = self.bn2(x,training=training)\n x = self.relu(x)\n\n x = self.conv3(x)\n x = self.bn3(x,training=training)\n\n x = self.add([identity,x])\n x = self.relu(x)\n\n return x\n\ndef _make_layer(block,in_channel,channel,block_num,name,strides=1):\n downsample = None\n if(strides!=1 or in_channel != channel*block.expansion):\n downsample = Sequential([\n #layers.Conv2D(channel*block.expansion,kernel_size=1,padding=\"SAME\",use_bias=False,name=\"conv1\"),\n layers.Conv2D(channel*block.expansion,kernel_size=1,strides=strides,use_bias=False,name=\"conv1\"),\n layers.BatchNormalization(momentum=0.9,epsilon=1.001e-5,name=\"BatchNorm\")],name=\"shortcut\")\n \n layer_list = []\n layer_list.append(block(channel,strides,downsample,name=\"unit_1\"))\n\n for index in range(1,block_num):\n layer_list.append(block(channel,name=\"unit_\"+str(index+1)))\n\n return Sequential(layer_list,name=name)\n\ndef _resnet(block,blocks_num,im_width=224,im_height=224,channel=3,num_classes=1000,include_top=True):\n input_image = layers.Input(shape=(im_height,im_width,channel),dtype=\"float32\")\n x = layers.Conv2D(filters=64,kernel_size=7,strides=2,padding=\"SAME\",use_bias=False,name=\"conv1\")(input_image)\n x = layers.BatchNormalization(momentum=0.9,epsilon=1e-5,name=\"conv1/BatchNorm\")(x)\n x = layers.ReLU()(x)\n x = layers.MaxPool2D(pool_size=3,strides=2,padding=\"SAME\")(x)\n\n print(\"-----------------------------block_1-------------------------------------\")\n print(\"\\ndata shape:\", x.shape)\n x = _make_layer(block, x.shape[-1], 64, blocks_num[0], name=\"block_1\")(x)\n print(\"-----------------------------block_2-------------------------------------\")\n print(\"\\ndata shape:\", x.shape)\n x = _make_layer(block, x.shape[-1], 128, blocks_num[1], strides=2, name=\"block_2\")(x)\n print(\"-----------------------------block_3-------------------------------------\")\n print(\"\\ndata shape:\", x.shape)\n x = _make_layer(block, x.shape[-1], 256, blocks_num[2], strides=2, name=\"block_3\")(x)\n print(\"-----------------------------block_4-------------------------------------\")\n print(\"\\ndata shape:\", x.shape)\n x = _make_layer(block, x.shape[-1], 512, blocks_num[3], strides=2, name=\"block_4\")(x)\n\n if(include_top):\n x = layers.GlobalAvgPool2D()(x)\n x = layers.Dense(num_classes,name=\"logits\")(x)\n predict = layers.Softmax()(x)\n else:\n predict = x\n model = Model(inputs=input_image,outputs=predict)\n \n return model\n\ndef resnet18(im_width=224,im_height=224,channel=3,num_classes=1000):\n return _resnet(BasicBlock,[2,2,2,2],im_width, im_height,channel,num_classes)"
] | [
[
"tensorflow.keras.layers.Add",
"tensorflow.keras.layers.Input",
"tensorflow.keras.layers.MaxPool2D",
"tensorflow.keras.layers.GlobalAvgPool2D",
"tensorflow.keras.Sequential",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.Model",
"tensorflow.keras.layers.Softmax",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.ReLU"
]
] |
AlbertoBarbado/unsupervised-outlier-transparency | [
"d80637cd0a9f70a3ff5f327952b4bd6377826707"
] | [
"lib/common.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 6 10:13:42 2019\n\n@author: alber\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn import svm\nfrom sklearn.preprocessing import StandardScaler\nfrom joblib import Parallel, delayed\nfrom sklearn.tree import _tree\n\ndef train_one_class_svm(dataset_mat, numerical_cols, categorical_cols, dct_params):\n \"\"\"\n # TODO\n \"\"\"\n \n # Scaling numerical data\n sc = StandardScaler()\n \n if len(numerical_cols):\n X_train = dataset_mat[numerical_cols]\n X_train = sc.fit_transform(X_train)\n else:\n X_train = dataset_mat\n \n X_train_model = X_train\n \n for col in categorical_cols:\n X_train_model = np.insert(X_train_model, np.shape(X_train_model)[1], dataset_mat[col].values, axis=1)\n \n # Train OneClassSVM\n model = svm.OneClassSVM(**dct_params)\n model.fit(X_train_model)\n preds = pd.DataFrame({\"predictions\":list(model.predict(X_train_model))}) \n preds[\"distances\"] = model.decision_function(X_train_model)\n df_anomalies = pd.merge(dataset_mat, preds, left_index=True, right_index=True)\n \n return df_anomalies, model\n\ndef grid_search(dataset_mat, numerical_cols, categorical_cols, dct_joblib):\n \"\"\"\n # TODO\n \"\"\"\n features = list(dataset_mat.columns)\n \n # REAL\n def grid(arg):\n \"\"\"\n # TODO\n \"\"\"\n params = {}\n params['nu'] = arg['nu']\n params['kernel'] = arg['kernel']\n params['gamma'] = arg['gamma']\n \n # Train model\n df_anomalies_all, clf = train_one_class_svm(dataset_mat, numerical_cols, categorical_cols, params)\n # Distance of OO to decision funcion\n d_pi = np.abs(clf.decision_function(np.matrix([0]*(len(features))))[0]) # dimension = len_features + 1 \n # Standarize distances\n df_anomalies_all['distances'] = df_anomalies_all[\"distances\"]/(1-d_pi)\n scoring = df_anomalies_all[df_anomalies_all['predictions']==1]['distances'].max() - np.abs(df_anomalies_all[df_anomalies_all['predictions']==-1]['distances']).max()\n return {'nu':arg['nu'], 'kernel':arg['kernel'], 'gamma':arg['gamma'], 'scoring':scoring}\n \n arg_instances=[{'nu':nu, 'kernel':'rbf', 'gamma':gamma} for nu in np.arange(0.1,1.0,0.1) for gamma in np.arange(0.1,1.0,0.1)]\n results = Parallel(**dct_joblib)(map(delayed(grid), arg_instances))\n \n # Choose the best result\n dct_best = {'nu':-1, 'kernel':\"rbf\", 'gamma':-1, 'scoring':-np.inf}\n for dct_results in results:\n if dct_results['scoring'] > dct_best['scoring']:\n dct_best = dct_results\n \n return dct_best\n\ndef tree_to_code(tree, feature_names):\n tree_ = tree.tree_\n feature_name = [\n feature_names[i] if i != _tree.TREE_UNDEFINED else \"undefined!\"\n for i in tree_.feature\n ]\n print (\"def tree({}):\".format(\", \".join(feature_names)))\n\n def recurse(node, depth):\n indent = \" \" * depth\n if tree_.feature[node] != _tree.TREE_UNDEFINED:\n name = feature_name[node]\n threshold = tree_.threshold[node]\n print (\"{}if {} <= {}:\".format(indent, name, threshold))\n recurse(tree_.children_left[node], depth + 1)\n print (\"{}else: # if {} > {}\".format(indent, name, threshold))\n recurse(tree_.children_right[node], depth + 1)\n else:\n print (\"{}return {}\".format(indent, tree_.value[node]))\n\n recurse(0, 1)"
] | [
[
"pandas.merge",
"sklearn.preprocessing.StandardScaler",
"numpy.shape",
"numpy.arange",
"sklearn.svm.OneClassSVM",
"numpy.abs"
]
] |
NoelShin/PixelPick | [
"f0ae7d35f62c1dda70f5bff1689177a513ab6259"
] | [
"datasets/voc.py"
] | [
"import os\nfrom glob import glob\nfrom random import random, randint, uniform\nimport pickle as pkl\n\nimport numpy as np\nimport cv2\nfrom PIL import Image, ImageFile\nimport torch\nfrom torchvision.datasets import VOCSegmentation\nfrom torchvision.transforms import ColorJitter, RandomApply, RandomGrayscale, CenterCrop, Normalize\nimport torchvision.transforms.functional as tf\nfrom tqdm import tqdm\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\n\nclass VOC2012Segmentation:\n def __init__(self, args, val=False, query=False):\n super(VOC2012Segmentation, self).__init__()\n self.dir_checkpoints = f\"{args.dir_root}/checkpoints/{args.experim_name}\"\n self.ignore_index = args.ignore_index\n self.size_base = args.size_base\n self.size_crop = (args.size_crop, args.size_crop)\n self.stride_total = args.stride_total\n\n if args.use_augmented_dataset and not val:\n self.voc = AugmentedVOC(args.dir_augmented_dataset)\n else:\n self.voc = VOCSegmentation(f\"{args.dir_dataset}\", image_set='val' if val else 'train', download=False)\n print(\"# images:\", len(self.voc))\n\n self.geometric_augmentations = args.augmentations[\"geometric\"]\n self.photometric_augmentations = args.augmentations[\"photometric\"]\n self.normalize = Normalize(mean=args.mean, std=args.std)\n if query:\n self.geometric_augmentations[\"random_scale\"] = False\n self.geometric_augmentations[\"crop\"] = False\n self.geometric_augmentations[\"random_hflip\"] = False\n\n if self.geometric_augmentations[\"crop\"]:\n self.mean = tuple((np.array(args.mean) * 255.0).astype(np.uint8).tolist())\n\n # generate initial queries\n n_pixels_per_img = args.n_pixels_by_us\n init_n_pixels = args.n_init_pixels if args.n_init_pixels > 0 else n_pixels_per_img\n\n self.queries, self.n_pixels_total = None, -1\n path_queries = f\"{args.dir_dataset}/init_labelled_pixels_{args.seed}.pkl\"\n if n_pixels_per_img != 0 and not val:\n n_pixels_total = 0\n\n if os.path.isfile(path_queries):\n self.queries = pkl.load(open(path_queries, \"rb\"))\n for q in self.queries:\n n_pixels_total += q.sum()\n\n else:\n os.makedirs(f\"{self.dir_checkpoints}/0_query\", exist_ok=True)\n\n list_queries = list()\n for i in tqdm(range(len(self.voc))):\n label = self.voc[i][1]\n w, h = label.size\n\n if n_pixels_per_img == 0:\n n_pixels_per_img = h * w\n elif n_pixels_per_img != 0 and init_n_pixels > 0:\n n_pixels_per_img = init_n_pixels\n else:\n raise NotImplementedError\n\n # generate queries whose size is set to base_size (longer side), i.e. 400 as default\n h, w = self._compute_base_size(h, w)\n\n queries_flat = np.zeros((h * w), dtype=np.bool)\n\n # filter void pixels - boundary pixels that the original labels have (fyi, 5 pixels thickness)\n label = label.resize((w, h), Image.NEAREST) # note that downsampling method should be Image.NEAREST\n label = np.asarray(label, dtype=np.int32)\n\n label_flatten = label.flatten()\n ind_void_pixels = np.where(label_flatten == 255)[0]\n\n ind_non_void_pixels = np.setdiff1d(range(len(queries_flat)), ind_void_pixels) # remove void pixels\n assert len(ind_non_void_pixels) <= len(queries_flat)\n\n # for a very rare case where the number of non_void_pixels is not large enough to sample from\n if len(ind_non_void_pixels) < n_pixels_per_img:\n n_pixels_per_img = len(ind_non_void_pixels)\n\n ind_chosen_pixels = np.random.choice(ind_non_void_pixels, n_pixels_per_img, replace=False)\n\n queries_flat[ind_chosen_pixels] += True\n queries = queries_flat.reshape((h, w))\n\n list_queries.append(queries)\n n_pixels_total += queries.sum()\n\n pkl.dump(list_queries, open(f\"{path_queries}\", 'wb'))\n # Note that images of voc dataset vary from image to image thus can't use np.stack().\n self.queries = list_queries\n pkl.dump(self.queries, open(f\"{self.dir_checkpoints}/0_query/label.pkl\", 'wb'))\n\n self.n_pixels_total = n_pixels_total\n print(\"# labelled pixels used for training:\", n_pixels_total)\n self.val, self.query = val, query\n\n def label_queries(self, queries, nth_query=None):\n assert len(queries) == len(self.queries), f\"{queries.shape}, {len(self.queries)}\"\n previous = self.n_pixels_total\n\n list_queries = list()\n n_pixels_total = 0\n for q, m in zip(queries, self.queries):\n new_m = np.logical_or(q, m)\n list_queries.append(new_m)\n n_pixels_total += new_m.sum()\n self.queries, self.n_pixels_total = list_queries, n_pixels_total\n\n if isinstance(nth_query, int):\n os.makedirs(f\"{self.dir_checkpoints}/{nth_query}_query\", exist_ok=True)\n pkl.dump(self.queries, open(f\"{self.dir_checkpoints}/{nth_query}_query/label.pkl\", 'wb'))\n\n print(\"# labelled pixels is changed from {} to {} (delta: {})\".format(previous, n_pixels_total, n_pixels_total - previous))\n\n def _compute_base_size(self, h, w):\n if w > h:\n h = int(float(h) / w * self.size_base)\n w = self.size_base\n else:\n w = int(float(w) / h * self.size_base)\n h = self.size_base\n return h, w\n\n def _geometric_augmentations(self, x, y, queries=None):\n w, h = x.size\n h, w = self._compute_base_size(h, w)\n\n x, y = tf.resize(x, [h, w], Image.BILINEAR), tf.resize(y, [h, w], Image.NEAREST)\n\n if self.geometric_augmentations[\"random_scale\"]:\n rs = uniform(0.5, 2.0)\n\n h, w = int(h * rs), int(w * rs)\n x, y = tf.resize(x, [h, w], Image.BILINEAR), tf.resize(y, [h, w], Image.NEAREST)\n\n if queries is not None:\n queries = Image.fromarray(queries).resize((w, h), Image.NEAREST)\n\n if self.geometric_augmentations[\"crop\"]:\n w, h = x.size\n pad_h, pad_w = max(self.size_crop[0] - h, 0), max(self.size_crop[1] - w, 0)\n self.pad_size = (pad_h, pad_w)\n\n x = tf.pad(x, [0, 0, pad_w, pad_h], fill=self.mean, padding_mode=\"constant\")\n y = tf.pad(y, [0, 0, pad_w, pad_h], fill=self.ignore_index, padding_mode=\"constant\")\n\n w, h = x.size\n start_h, start_w = randint(0, h - self.size_crop[0]), randint(0, w - self.size_crop[1])\n\n x = tf.crop(x, top=start_h, left=start_w, height=self.size_crop[0], width=self.size_crop[1])\n y = tf.crop(y, top=start_h, left=start_w, height=self.size_crop[0], width=self.size_crop[1])\n\n if queries is not None:\n queries = tf.crop(queries, top=start_h, left=start_w, height=self.size_crop[0], width=self.size_crop[1])\n\n if self.geometric_augmentations[\"random_hflip\"]:\n if random() > 0.5:\n x, y = tf.hflip(x), tf.hflip(y)\n if queries is not None:\n queries = tf.hflip(queries)\n return x, y, queries\n\n def _photometric_augmentations(self, x):\n if self.photometric_augmentations[\"random_color_jitter\"]:\n color_jitter = ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1)\n x = RandomApply([color_jitter], p=0.8)(x)\n\n if self.photometric_augmentations[\"random_grayscale\"]:\n x = RandomGrayscale(0.2)(x)\n\n if self.photometric_augmentations[\"random_gaussian_blur\"]:\n w, h = x.size\n smaller_length = min(w, h)\n x = GaussianBlur(kernel_size=int((0.1 * smaller_length // 2 * 2) + 1))(x)\n return x\n\n def __len__(self):\n return len(self.voc)\n\n def __getitem__(self, ind):\n dict_data = dict()\n (x, y), queries = self.voc[ind], self.queries[ind] if self.queries is not None else None\n\n if self.val:\n dict_data.update({\"x\": self.normalize(tf.to_tensor(x)), \"y\": self._image_to_tensor(y)})\n\n else:\n x, y, queries = self._geometric_augmentations(x, y, queries)\n\n if not self.query:\n x = self._photometric_augmentations(x)\n\n dict_data.update({\"x\": self.normalize(tf.to_tensor(x)),\n \"y\": self._image_to_tensor(y),\n \"queries\": torch.tensor(np.array(queries, np.bool), dtype=torch.bool)})\n return dict_data\n\n @staticmethod\n def _image_to_tensor(pil_image):\n return torch.tensor(np.array(pil_image, dtype=np.uint8), dtype=torch.long)\n\n\nclass AugmentedVOC:\n def __init__(self, root):\n assert os.path.isdir(root)\n self.voc = list(zip(sorted(glob(f\"{root}/images/*\")), sorted(glob(f\"{root}/annot/*\"))))\n\n def __len__(self):\n return len(self.voc)\n\n def __getitem__(self, ind):\n p_img, p_annot = self.voc[ind]\n assert p_img.split('/')[-1].split('.')[0] == p_annot.split('/')[-1].split('.')[0]\n\n return Image.open(p_img), Image.open(p_annot)\n\n\nclass GaussianBlur(object):\n # Implements Gaussian blur as described in the SimCLR paper\n def __init__(self, kernel_size, min=0.1, max=2.0):\n self.min = min\n self.max = max\n # kernel size is set to be 10% of the image height/width\n self.kernel_size = kernel_size\n\n def __call__(self, sample):\n sample = np.array(sample)\n\n # blur the image with a 50% chance\n prob = np.random.random_sample()\n\n if prob < 0.5:\n sigma = (self.max - self.min) * np.random.random_sample() + self.min\n sample = cv2.GaussianBlur(sample, (self.kernel_size, self.kernel_size), sigma)\n return sample\n\n\nif __name__ == '__main__':\n from args import Arguments\n args = Arguments().parse_args()\n VOC2012Segmentation(args)"
] | [
[
"numpy.logical_or",
"numpy.array",
"numpy.random.choice",
"numpy.asarray",
"numpy.zeros",
"numpy.random.random_sample",
"numpy.where"
]
] |
ddavid/kafvio | [
"33f36140e392ef5b421d3a323d785749d2aa9a17"
] | [
"scripts/gen_anchors.py"
] | [
"'''\nCreated on Feb 20, 2017\n\n@author: jumabek\n'''\nfrom os import listdir\nfrom os.path import isfile, join\nimport argparse\n#import cv2\nimport numpy as np\nimport sys\nimport os\nimport shutil\nimport random \nimport math\n\nwidth_in_cfg_file = 416.\nheight_in_cfg_file = 416.\n\ndef IOU(x,centroids):\n similarities = []\n k = len(centroids)\n for centroid in centroids:\n c_w,c_h = centroid\n w,h = x\n if c_w>=w and c_h>=h:\n similarity = w*h/(c_w*c_h)\n elif c_w>=w and c_h<=h:\n similarity = w*c_h/(w*h + (c_w-w)*c_h)\n elif c_w<=w and c_h>=h:\n similarity = c_w*h/(w*h + c_w*(c_h-h))\n else: #means both w,h are bigger than c_w and c_h respectively\n similarity = (c_w*c_h)/(w*h)\n similarities.append(similarity) # will become (k,) shape\n return np.array(similarities) \n\ndef avg_IOU(X,centroids):\n n,d = X.shape\n sum = 0.\n for i in range(X.shape[0]):\n #note IOU() will return array which contains IoU for each centroid and X[i] // slightly ineffective, but I am too lazy\n sum+= max(IOU(X[i],centroids)) \n return sum/n\n\ndef write_anchors_to_file(centroids,X,anchor_file):\n f = open(anchor_file,'w')\n \n anchors = centroids.copy()\n print(anchors.shape)\n\n for i in range(anchors.shape[0]):\n anchors[i][0]*=width_in_cfg_file/32.\n anchors[i][1]*=height_in_cfg_file/32.\n \n\n widths = anchors[:,0]\n sorted_indices = np.argsort(widths)\n\n print('Anchors = ', anchors[sorted_indices])\n \n for i in sorted_indices[:-1]:\n f.write('%0.2f,%0.2f, '%(anchors[i,0],anchors[i,1]))\n\n #there should not be comma after last anchor, that's why\n f.write('%0.2f,%0.2f\\n'%(anchors[sorted_indices[-1:],0],anchors[sorted_indices[-1:],1]))\n \n f.write('%f\\n'%(avg_IOU(X,centroids)))\n print()\n\ndef kmeans(X,centroids,eps,anchor_file):\n \n N = X.shape[0]\n iterations = 0\n k,dim = centroids.shape\n prev_assignments = np.ones(N)*(-1) \n iter = 0\n old_D = np.zeros((N,k))\n\n while True:\n D = [] \n iter+=1 \n for i in range(N):\n d = 1 - IOU(X[i],centroids)\n D.append(d)\n D = np.array(D) # D.shape = (N,k)\n \n print(\"iter {}: dists = {}\".format(iter,np.sum(np.abs(old_D-D))))\n \n #assign samples to centroids \n assignments = np.argmin(D,axis=1)\n \n if (assignments == prev_assignments).all() :\n print(\"Centroids = \",centroids)\n write_anchors_to_file(centroids,X,anchor_file)\n return\n\n #calculate new centroids\n centroid_sums=np.zeros((k,dim),np.float)\n for i in range(N):\n centroid_sums[assignments[i]]+=X[i] \n for j in range(k): \n centroids[j] = centroid_sums[j]/(np.sum(assignments==j))\n \n prev_assignments = assignments.copy() \n old_D = D.copy() \n\ndef main(argv):\n parser = argparse.ArgumentParser()\n parser.add_argument('-filelist', default = '\\\\path\\\\to\\\\voc\\\\filelist\\\\train.txt', \n help='path to filelist\\n' )\n parser.add_argument('-output_dir', default = 'generated_anchors/anchors', type = str, \n help='Output anchor directory\\n' ) \n parser.add_argument('-num_clusters', default = 0, type = int, \n help='number of clusters\\n' ) \n\n \n args = parser.parse_args()\n \n if not os.path.exists(args.output_dir):\n os.mkdir(args.output_dir)\n\n f = open(args.filelist)\n \n lines = [line.rstrip('\\n') for line in f.readlines()]\n \n annotation_dims = []\n\n size = np.zeros((1,1,3))\n for line in lines:\n \n #line = line.replace('images','labels')\n #line = line.replace('img1','labels')\n line = line.replace('JPEGImages','labels') \n \n\n line = line.replace('.jpg','.txt')\n line = line.replace('.png','.txt')\n print(line)\n f2 = open(line)\n for line in f2.readlines():\n line = line.rstrip('\\n')\n w,h = line.split(' ')[3:] \n #print(w,h)\n annotation_dims.append(map(float,(w,h)))\n annotation_dims = np.array(annotation_dims)\n \n eps = 0.005\n \n if args.num_clusters == 0:\n for num_clusters in range(1,11): #we make 1 through 10 clusters \n anchor_file = join( args.output_dir,'anchors%d.txt'%(num_clusters))\n\n indices = [ random.randrange(annotation_dims.shape[0]) for i in range(num_clusters)]\n centroids = annotation_dims[indices]\n kmeans(annotation_dims,centroids,eps,anchor_file)\n print('centroids.shape', centroids.shape)\n else:\n anchor_file = join( args.output_dir,'anchors%d.txt'%(args.num_clusters))\n indices = [ random.randrange(annotation_dims.shape[0]) for i in range(args.num_clusters)]\n centroids = annotation_dims[indices]\n kmeans(annotation_dims,centroids,eps,anchor_file)\n print('centroids.shape', centroids.shape)\n\nif __name__==\"__main__\":\n main(sys.argv)\n"
] | [
[
"numpy.array",
"numpy.zeros",
"numpy.argmin",
"numpy.sum",
"numpy.ones",
"numpy.argsort",
"numpy.abs"
]
] |
zhiyiYo/Alpha-Gobang-Zero | [
"b0e90ae456b02754956be83a0d6495391390e666"
] | [
"utils/draw_model.py"
] | [
"# coding:utf-8\nimport torch\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom alphazero import PolicyValueNet\n\n\nnet = PolicyValueNet(is_use_gpu=False)\nwith SummaryWriter('log', comment='策略-价值模型') as w:\n w.add_graph(net, torch.zeros(1, 6, 9, 9))\n"
] | [
[
"torch.zeros",
"torch.utils.tensorboard.SummaryWriter"
]
] |
newsettle/ns4_chatbot | [
"526b97aa31292c28d10518bbfaa7466b8ba109ee"
] | [
"business/classify/data_process.py"
] | [
"#-*- coding:utf-8 -*-\nimport time\nfrom bs4 import BeautifulSoup\nfrom scipy.sparse import csc_matrix\nimport pandas as pd\nimport jieba,re,sys\nfrom gensim import corpora, models\nimport gensim\nimport logging as logger\nfrom time import time\nfrom sqlalchemy import create_engine\nimport sys,numpy as np\nfrom common.common import duration\nimport progressbar\n\nclass DataProcess(object):\n\n\tdef __init__(self):\n\t\treload(sys)\n\t\tif sys.getdefaultencoding() != 'utf-8':\n\t\t\tsys.setdefaultencoding('utf-8')\n\t\tlogger.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logger.DEBUG)\n\n\t\tself.stopwords = self.load_stopword()\n\n\tdef process_line(self,html):\n\t\tcontent_with_alphabet_num = self.clean_html(html)\n\t\tpure_content = self.filter_only_Chinese(content_with_alphabet_num)\n\t\tcut_text = self.cut_words(pure_content, self.stopwords)\n\t\treturn cut_text\n\n\tdef clean_process(self,df):\n\t\tt = time()\n\n\t\tpb = progressbar.ProgressBar(len(df))\n\t\tpb.start()\n\n\t\tMAX_LENGTH = 10000\n\t\tfor index, row in df.iterrows():\n\n\t\t\tcut_text = self.process_line(row['email_html'])\n\n\t\t\ttry:\n\t\t\t\tpb.update(index)\n\t\t\texcept ValueError:\n\t\t\t\tpass\n\n\t\t\tif len(cut_text) > MAX_LENGTH: cut_text = cut_text[0:MAX_LENGTH]\n\t\t\tdf.loc[index,'html_cut'] = cut_text\n\n\t\t\t#每列遍历,都看看长度是否大于某个值,截断\n\n\t\t\tfor k, v in row.iteritems():\n\t\t\t\tif v is None : continue\n\t\t\t\tif len(v)>MAX_LENGTH:\n\t\t\t\t\tlogger.warn(\"列[%r]的值长度[%d]大于[%d],截断\",k,len(v),MAX_LENGTH)\n\t\t\t\t\t# row[k] = v[0:MAX_LENGTH]\n\t\t\t\t\tdf.loc[index, k] = v[0:MAX_LENGTH]\n\n\t\t\t# 进度条\n\t\t\ttry:\n\t\t\t\tpb.update(index)\n\t\t\texcept ValueError:\n\t\t\t\tpass\n\t\tpb.finish()\n\t\tduration(t, \"清洗数据:去除HTML tag,去除无用字符\")\n\n\t\treturn df\n\n\n\tdef caculate_tfidf(self,df):\n\t\tlogger.debug(\"一共%d行文本数据\",len(df))\n\n\t\tall_rows = []\n\t\t#每行,再把空格链接的大字符串变成数组\n\t\tfor one in df['html_cut']:\n\t\t\t#print(one)\n\t\t\t#print (\"------------------\")\n\t\t\tcut_content_list = one.split(\" \")\n\t\t\tall_rows.append(cut_content_list)\n\n\t\t#得到词典\t,去重的\n\t\tdictionary = corpora.Dictionary(all_rows)\n\t\tlogger.debug(\"词袋一共%d个词\",len(dictionary.keys()))\n\n\t\t#把他所有的句子变成one-hot的词袋向量,corpus里面是一个向量数组\n\t\t#corpus = [dictionary.doc2bow(one_row) for one_row in all_rows]\n\t\tcorpus = []\n\t\tfor index, one_row in df.iterrows():\n\t\t\thtml_cut = one_row['html_cut'].split(\" \")\n\t\t\tbow = dictionary.doc2bow(html_cut)#频次数组\n\t\t\tone_row['doc2bow'] = bow #每一行增加一个字段bow,存在这个语料的向量\n\t\t\tone_row['hash'] = hash(str(bow)) #每一行增加一个字段hash,hash这个语料\n\n\t\t\tcorpus.append(bow)\n\n\t\tlogger.debug(\"语料行数:%d\",len(corpus))\n\n\t\t#从one-hot向量 ---> 生成tf-idf模型\n\t\ttfidf_model = models.TfidfModel(corpus,dictionary)\n\t\tlogger.debug(\"由语料生成TFIDF模型\")\n\t\tprint (tfidf_model)\n\n\t\t#对每行语料进行tf-idf向量化\n\t\tcorpus_tfidf = tfidf_model[corpus]\n\t\tscipy_csc_matrix =gensim.matutils.corpus2csc(corpus_tfidf,num_terms=len(dictionary.keys()))\n\t\tdata = csc_matrix(scipy_csc_matrix).T.toarray()\n\t\tlogger.debug(\"得到每行语料的tfidf向量,是一个矩阵,Shape:%r\",data.shape)\n\n\t\treturn data,dictionary,tfidf_model\n\n\t#把一行文档,转成一个tfidf的向量,维度是词表,值是tfidf值\n\t#入参是一个字符串list[\"我 唉 北京\",\"天安门 上 太阳\",\"升 起 国旗\",...]\n\t#出参是还你一个tfidf数组\n\tdef get_tfidf_vector(self,doc_list,dictionary,tfidf_model):\n\n\t\tcorpus = [ doc.split(\" \") for doc in doc_list] #\t[\"xx xxx xx\"=>['xx','xxx','xx'],...\n\t\t# logger.debug(\"得到的语料为:%r\",corpus)\n\t\tdoc_bows = []\n\t\tdocs_tfidf = []\n\t\tfor c in corpus:\n\t\t\ttry:\n\t\t\t\tdoc_bow = dictionary.doc2bow(c) # ['xx','xxx','xx']=>[12,24,12],词表中的id\n\t\t\t\t# logger.debug(doc_bow)\n\t\t\t\tdoc_bows.append(doc_bow)\n\n\t\t\t\tdoc_tfidf = tfidf_model[doc_bow] # 从模型中得到对应的tfidf\n\t\t\t\tdocs_tfidf.append(doc_tfidf)\n\t\t\texcept TypeError as e:\n\t\t\t\tlogger.error(\"处理语料成为one hot失败:%r\", c)\n\n\t\tlogger.debug(\"正在转化%d个分词行成为OneHot tfidf向量\",len(doc_bows))\n\n\t\t#从doc_tfidf变成一个稀硫矩阵,doc_tfidf是gensim的一个类,而得到的也是一个压缩矩阵\n\t\t_csc_matrix = gensim.matutils.corpus2csc(docs_tfidf,num_terms=len(dictionary.keys()))\n\t\tdata = csc_matrix(_csc_matrix).T.toarray()#还原成标准矩阵,需要转置一下\n\t\tlogger.debug(\"得到的tfidf向量维度为:%r\",data.shape)\n\t\treturn data\n\n\n\tdef filter_only_Chinese(self,content):\n\t\tr = '[a-zA-Z0-9’!\"#$%&\\'()*+,-./:;<=>?@,。?★、…【】《》?“”‘’![\\\\]^_`{|}~]+'\n\t\treturn re.sub(r, '', content)\n\n\t#这个是业务的一个大类,\n\t#我是是按照: 系统->大类->分类来分的\n\t#这个类,亚可正在整理,会定义出来,目前先按照rule_type_code来聚类\n\tdef get_big_class(self,con):\n\n\t\tdf = pd.DataFrame(np.array(\n\t\t\t\t[['NEW_SETTLEMENT', 'RESPCODE'],\n\t\t\t\t ['NEW_SETTLEMENT', 'BATCH_EXCEPTION_TRADE'],\n\t\t\t\t ['NEW_SETTLEMENT', 'TRANSFER_PROCESSING_TIMEOUT'],\n\t\t\t\t ['NEW_SETTLEMENT', 'NETWORK_EXCEPTION'],\n\t\t\t\t ['NEW_SETTLEMENT', 'STATISTICALTRADERATE'],\n\t\t\t\t ['NEW_SETTLEMENT', 'TRADE_OVERTIME'],\n\t\t\t\t ['NEW_SETTLEMENT', 'MID_STATE'],\n\t\t\t\t ['CASH_COMPASS', 'TRADE_USETIME'],\n\t\t\t\t ['CASH_COMPASS', 'RESPCODE'],\n\t\t\t\t ['CASH_COMPASS', 'TRADE_OVERTIME'],\n\t\t\t\t ['CASH_COMPASS', 'TIMER_START'],\n\t\t\t\t ['CASH_COMPASS', 'TIMER_OVERTIME'],\n\t\t\t\t ['CASH_COMPASS', 'BANK_TRADE_USETIME'],\n\t\t\t\t ['CASH_COMPASS', 'ERR_CODE'],\n\t\t\t\t ['CASH_COMPASS', 'DATA_OVER_STOCK'],\n\t\t\t\t ['CASH_COMPASS', 'NETWORK_EXCEPTION']]),\n\t\t\t\t# index=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16],\n\t\t\t\tcolumns=['business_system_code','rule_type_code'])\n\t\t# df = pd.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)),\n\t\t# columns = ['a', 'b', 'c', 'd', 'e'])\n\t\treturn df\n\n\t\t# sql = \"SELECT distinct alarm.business_system_code,alarm.rule_type_code \\\n\t\t# \t FROM work_order, alarm \\\n\t\t# \t WHERE work_order.alarm_id = alarm.alarm_id \";\n\t\t# df = pd.read_sql(sql,con=con)\n\t\t# return df#['business_system_code','rule_type_code']\n\n\tdef test_html_parse(self):\n\t\tstop_words = self.load_stopword()\n\t\twith open(\"1.html\",\"r\") as f:\n\t\t\thtml = f.read()\n\t\t\tpure_content = self.process_line(html,stop_words)\n\t\t\tprint(pure_content)\n\n\n\tdef connect_db(self,db_ip,db_name,user,passwd):\n\t\tconn = create_engine(\"mysql+mysqldb://{}:{}@{}:3306/{}?charset=utf8\".format(user,passwd,db_ip,db_name)\n\t\t\t\t\t\t\t , pool_recycle=3600)\n\n\t\treturn conn\n\n\tdef load_data(self,conn):\n\t\t#NEW_SETTLEMENT\n\n\t\t#故障码类型的,准确出警的\n\t\tsql = \" \\\n\t\tSELECT \t\\\n\t\t\twork_order_id,alarm.alarm_id,\\\n\t\t\twork_order_title,email_title, \\\n\t\t\talarm.business_system_code,important_level_code,\\\n\t\t\tdispose_result_code,dispose_result_name, \\\n\t\t\twork_order_type_code,alarm.rule_type_code, \\\n\t\t\talarm_content,email_html,alarm_level,alarm_title \\\n\t\tFROM work_order, alarm \\\n\t\tWHERE work_order.alarm_id = alarm.alarm_id \\\n\t\tLIMIT 10000 \"\n\n\t\tdf = pd.read_sql(sql,con=conn)\n\t\treturn df\n\n\tdef clean_html(self,html):\n\t\tbs = BeautifulSoup(html,\"lxml\")\n\t\treturn bs.getText().strip()\n\n\tdef load_stopword(self):\n\t\tf_stop = open('corpus/stopwords.txt')\n\t\tsw = [line.strip() for line in f_stop]\n\t\tf_stop.close()\n\t\treturn sw\n\n\tdef cut_words(self,line,stop_words):\n\t\tseg_list = jieba.cut(line,cut_all=False)\n\t\treturn \" \".join([word for word in seg_list if word not in stop_words])\n\nif __name__ == '__main__':\n\tdp = DataProcess()\n# \tcon = dp.connect_db(db_ip,db_name,user,passwd)\n\tprint(dp.get_big_class(None))\n\tprint(\"完事了!\")"
] | [
[
"numpy.array",
"pandas.read_sql",
"scipy.sparse.csc_matrix"
]
] |
dfuttu1/AGNN | [
"579a28388ba3e28d3382ef71c4ab089bedb4705a"
] | [
"code/datasets.py"
] | [
"import os.path as osp\nimport numpy as np\nimport scipy.sparse as sp\nimport networkx as nx\nimport os\nimport torch\nimport sys\nimport argparse\nimport numpy as np\nfrom torch_geometric.utils import to_undirected\nfrom tqdm import tqdm\nfrom torch_geometric.data import InMemoryDataset, Data, DataLoader\nfrom get_adj import get_undirected_adj, get_directed_adj, get_pr_directed_adj, get_appr_directed_adj, get_second_directed_adj\n\n# citation and Amazon co-porchase datasets\nclass Datasets(InMemoryDataset):\n r\"\"\"\n For citation datasets, nodes represent documents and edges represent citation links.\n Training, validation and test splits are given by binary masks.\n\n For Amazon co-purchase, nodes represent goods, edges indicate that two goods are \n frequently bought together, node features are bag-of-words encoded product reviews, \n and class labels are given by the product category.\n\n Args:\n root (string): Root directory where the dataset should be saved.\n name (string): The name of the dataset (:obj:`\"cora_ml\"`,\n :obj:`\"citeseer\"`, :obj:`\"amazon_computer\", :obj:`\"amazon_photo\").\n transform (callable, optional): A function/transform that takes in an\n :obj:`torch_geometric.data.Data` object and returns a transformed\n version. The data object will be transformed before every access.\n (default: :obj:`None`)\n pre_transform (callable, optional): A function/transform that takes in\n an :obj:`torch_geometric.data.Data` object and returns a\n transformed version. The data object will be transformed before\n being saved to disk. (default: :obj:`None`)\n \"\"\"\n\n def __init__(self, root, name, adj_type=None, transform=None, pre_transform=None):\n self.name = name\n self.adj_type = adj_type\n super(Datasets, self).__init__(root, transform, pre_transform)\n self.data, self.slices = torch.load(self.processed_paths[0])\n\n @property\n def raw_dir(self):\n return osp.join(self.root, self.name, 'raw')\n\n @property\n def processed_dir(self):\n return osp.join(self.root, self.name, 'processed')\n\n @property\n def raw_file_names(self):\n return\n\n @property\n def processed_file_names(self):\n return 'data.pt'\n\n # def download(self):\n # return\n\n def process(self):\n data = process_datasets(self.raw_dir, self.name, self.adj_type)\n # data = read_planetoid_data(self.raw_dir, self.name)\n data = data if self.pre_transform is None else self.pre_transform(data)\n torch.save(self.collate([data]), self.processed_paths[0])\n\n def __repr__(self):\n return '{}()'.format(self.name)\n\n\ndef sparse_mx_to_torch_sparse_tensor(sparse_mx):\n \"\"\"Convert a scipy sparse matrix to a torch sparse tensor.\"\"\"\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)\n\ndef process_datasets(path=\"./data/citeseer/raw/\", dataset='citeseer', adj_type='di'):\n se = 1020\n if dataset == 'cora_ml' or 'citeseer':\n se = 177\n os.makedirs(path, exist_ok=True)\n dataset_path = os.path.join(path, '{}.npz'.format(dataset))\n g = load_npz_dataset(dataset_path)\n adj, features, labels = g['A'], g['X'], g['z']\n \n # Set new random splits:\n # * 20 * num_classes labels for training\n # * 500 labels for validation\n # * the rest for testing\n\n mask = train_test_split(labels, seed=se, train_examples_per_class=20, val_size=500, test_size=None)\n\n mask['train'] = torch.from_numpy(mask['train']).bool()\n mask['val'] = torch.from_numpy(mask['val']).bool()\n mask['test'] = torch.from_numpy(mask['test']).bool()\n\n coo = adj.tocoo()\n # incoming edges and outgoing edge\n indices = np.vstack((coo.row, coo.col))\n indices2 = np.vstack((coo.col, coo.row))\n\n indices = torch.from_numpy(indices).long()\n indices2 = torch.from_numpy(indices2).long()\n features = torch.from_numpy(features.todense()).float()\n labels = torch.from_numpy(labels).long()\n\n if adj_type == 'un':\n print(\"Processing to undirected adj matrix\")\n indices = to_undirected(indices)\n # normlize the symmetric adjacency matrix\n edge_index, edge_weight = get_undirected_adj(indices, features.shape[0], features.dtype)\n data = Data(x=features, edge_index=edge_index, edge_weight=edge_weight, y=labels)\n elif adj_type == 'di':\n print(\"Processing to directed adj matrix\")\n # normlize the asymmetric adjacency matrix\n edge_index1, edge_weight1 = get_directed_adj(indices, features.shape[0], features.dtype)\n data = Data(x=features, edge_index=edge_index1, edge_weight=edge_weight1, y=labels)\n edge_index2, edge_weight2 = get_directed_adj(indices2, features.shape[0], features.dtype)\n data.edge_index2 = edge_index2\n data.edge_weight2 = edge_weight2\n elif adj_type == 'or':\n print(\"Processing to original directed adj\")\n data = Data(x=features, edge_index=indices, edge_weight=None, y=labels)\n else:\n print(\"Unsupported adj type.\")\n sys.exit()\n \n data.train_mask = mask['train']\n data.val_mask = mask['val']\n data.test_mask = mask['test']\n\n return data\n\ndef load_npz_dataset(file_name):\n \"\"\"Load a graph from a Numpy binary file.\n\n Parameters\n ----------\n file_name : str\n Name of the file to load.\n\n Returns\n -------\n graph : dict\n Dictionary that contains:\n * 'A' : The adjacency matrix in sparse matrix format\n * 'X' : The attribute matrix in sparse matrix format\n * 'z' : The ground truth class labels\n * Further dictionaries mapping node, class and attribute IDs\n\n \"\"\"\n if not file_name.endswith('.npz'):\n file_name += '.npz'\n with np.load(file_name, allow_pickle=True) as loader:\n loader = dict(loader)\n # edge_index = loader['adj_indices'].copy()\n A = sp.csr_matrix((loader['adj_data'], loader['adj_indices'],\n loader['adj_indptr']), shape=loader['adj_shape'])\n\n X = sp.csr_matrix((loader['attr_data'], loader['attr_indices'],\n loader['attr_indptr']), shape=loader['attr_shape'])\n\n z = loader.get('labels')\n\n graph = {\n 'A': A,\n 'X': X,\n 'z': z\n }\n\n idx_to_node = loader.get('idx_to_node')\n if idx_to_node:\n idx_to_node = idx_to_node.tolist()\n graph['idx_to_node'] = idx_to_node\n\n idx_to_attr = loader.get('idx_to_attr')\n if idx_to_attr:\n idx_to_attr = idx_to_attr.tolist()\n graph['idx_to_attr'] = idx_to_attr\n\n idx_to_class = loader.get('idx_to_class')\n if idx_to_class:\n idx_to_class = idx_to_class.tolist()\n graph['idx_to_class'] = idx_to_class\n\n return graph\n\ndef sample_per_class(random_state, labels, num_examples_per_class, forbidden_indices=None):\n num_samples = labels.shape[0]\n num_classes = labels.max()+1\n sample_indices_per_class = {index: [] for index in range(num_classes)}\n\n # get indices sorted by class\n for class_index in range(num_classes):\n for sample_index in range(num_samples):\n if labels[sample_index] == class_index:\n if forbidden_indices is None or sample_index not in forbidden_indices:\n sample_indices_per_class[class_index].append(sample_index)\n\n # get specified number of indices for each class\n return np.concatenate(\n [random_state.choice(sample_indices_per_class[class_index], num_examples_per_class, replace=False)\n for class_index in range(len(sample_indices_per_class))\n ])\n\n\ndef get_train_val_test_split(random_state,\n labels,\n train_examples_per_class=None, val_examples_per_class=None,\n test_examples_per_class=None,\n train_size=None, val_size=None, test_size=None):\n num_samples = labels.shape[0]\n num_classes = labels.max()+1\n remaining_indices = list(range(num_samples))\n\n if train_examples_per_class is not None:\n train_indices = sample_per_class(\n random_state, labels, train_examples_per_class)\n else:\n # select train examples with no respect to class distribution\n train_indices = random_state.choice(\n remaining_indices, train_size, replace=False)\n\n if val_examples_per_class is not None:\n val_indices = sample_per_class(\n random_state, labels, val_examples_per_class, forbidden_indices=train_indices)\n else:\n remaining_indices = np.setdiff1d(remaining_indices, train_indices)\n val_indices = random_state.choice(\n remaining_indices, val_size, replace=False)\n\n forbidden_indices = np.concatenate((train_indices, val_indices))\n if test_examples_per_class is not None:\n test_indices = sample_per_class(random_state, labels, test_examples_per_class,\n forbidden_indices=forbidden_indices)\n elif test_size is not None:\n remaining_indices = np.setdiff1d(remaining_indices, forbidden_indices)\n test_indices = random_state.choice(\n remaining_indices, test_size, replace=False)\n else:\n test_indices = np.setdiff1d(remaining_indices, forbidden_indices)\n\n # assert that there are no duplicates in sets\n assert len(set(train_indices)) == len(train_indices)\n assert len(set(val_indices)) == len(val_indices)\n assert len(set(test_indices)) == len(test_indices)\n # assert sets are mutually exclusive\n assert len(set(train_indices) - set(val_indices)\n ) == len(set(train_indices))\n assert len(set(train_indices) - set(test_indices)\n ) == len(set(train_indices))\n assert len(set(val_indices) - set(test_indices)) == len(set(val_indices))\n if test_size is None and test_examples_per_class is None:\n # all indices must be part of the split\n assert len(np.concatenate(\n (train_indices, val_indices, test_indices))) == num_samples\n\n if train_examples_per_class is not None:\n train_labels = labels[train_indices]\n train_sum = np.sum(train_labels, axis=0)\n # assert all classes have equal cardinality\n assert np.unique(train_sum).size == 1\n\n if val_examples_per_class is not None:\n val_labels = labels[val_indices]\n val_sum = np.sum(val_labels, axis=0)\n # assert all classes have equal cardinality\n assert np.unique(val_sum).size == 1\n\n if test_examples_per_class is not None:\n test_labels = labels[test_indices]\n test_sum = np.sum(test_labels, axis=0)\n # assert all classes have equal cardinality\n assert np.unique(test_sum).size == 1\n\n return train_indices, val_indices, test_indices\n\ndef train_test_split(labels, seed, train_examples_per_class=None, val_examples_per_class=None, test_examples_per_class=None, train_size=None, val_size=None, test_size=None):\n random_state = np.random.RandomState(seed)\n train_indices, val_indices, test_indices = get_train_val_test_split(\n random_state, labels, train_examples_per_class, val_examples_per_class, test_examples_per_class, train_size, val_size, test_size)\n\n #print('number of training: {}'.format(len(train_indices)))\n #print('number of validation: {}'.format(len(val_indices)))\n #print('number of testing: {}'.format(len(test_indices)))\n\n train_mask = np.zeros((labels.shape[0], 1), dtype=int)\n train_mask[train_indices, 0] = 1\n train_mask = np.squeeze(train_mask, 1)\n val_mask = np.zeros((labels.shape[0], 1), dtype=int)\n val_mask[val_indices, 0] = 1\n val_mask = np.squeeze(val_mask, 1)\n test_mask = np.zeros((labels.shape[0], 1), dtype=int)\n test_mask[test_indices, 0] = 1\n test_mask = np.squeeze(test_mask, 1)\n mask = {}\n mask['train'] = train_mask\n mask['val'] = val_mask\n mask['test'] = test_mask\n return mask\n\n####################################### NA\ncmd_opt = argparse.ArgumentParser()\ngraph_args, _ = cmd_opt.parse_known_args()\n\ndef load_ENAS_data(dataset_path, n_types=6, batch_size=64, adj_type='di', with_y=True, burn_in=1000):\n # load ENAS format NNs to pyg_graphs\n g_list = []\n max_n = 0 # maximum number of nodes\n with open(dataset_path, 'r') as f:\n for i, row in enumerate(tqdm(f)):\n if i < burn_in:\n continue\n if row is None:\n break\n if with_y:\n row, y = eval(row)\n else:\n row = eval(row)\n y = 0.0\n # decode data to pyggraph\n g = decode_ENAS_to_pygraph(row, y, adj_type)\n max_n = max(max_n, g.num_nodes)\n g_list.append(g)\n graph_args.num_vertex_type = n_types + 2\n graph_args.max_n = max_n # maximum number of nodes\n ng = len(g_list)\n print('# node types: %d' % graph_args.num_vertex_type)\n print('maximum # nodes: %d' % graph_args.max_n)\n # split train/test data\n train_data = g_list[:int(ng*0.9)]\n test_data = g_list[int(ng*0.9):]\n # construct batch\n train_data_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)\n test_data_loader = DataLoader(test_data, batch_size=batch_size, shuffle=True)\n\n return train_data_loader, test_data_loader\n\ndef one_hot(idx, length):\n idx = torch.LongTensor([idx]).unsqueeze(0)\n x = torch.zeros((1, length)).scatter_(1, idx, 1)\n return x\n\ndef decode_ENAS_to_pygraph(row, y, adj_type, n_types=6):\n n_types += 2 # add start_type 0, end_type 1\n\n adj = np.zeros((n_types, n_types))\n x = []\n if type(row) == str:\n row = eval(row) # convert string to list of lists\n n = len(row) # n+2 is the real number of vertices in the DAG\n x += [one_hot(0, n_types)]\n # ignore start vertex\n node_types2 = []\n for i, node in enumerate(row):\n node_type = node[0] + 2 # convert 0, 1, 2... to 2, 3, 4...\n node_types2 += [node_type]\n x += [one_hot(node_type, n_types)]\n adj[i, i+1] = 1\n for j, edge in enumerate(node[1:]):\n if edge == 1:\n adj[j, i + 1] = 1\n\n # output node\n node_type = 1\n x += [one_hot(node_type, n_types)]\n adj[n, n + 1] = 1\n\n nx_graph = nx.DiGraph(adj)\n x = torch.cat(x, dim=0).float()\n\n ro, col = torch.tensor(list(nx_graph.edges)).t().contiguous()\n \n indices = np.vstack((ro, col))\n indices2 = np.vstack((col, ro))\n\n indices = torch.from_numpy(indices).long()\n indices2 = torch.from_numpy(indices2).long()\n if adj_type == 'di':\n edge_index1, edge_weight1 = get_directed_adj(indices, x.shape[0], x.dtype)\n graph = Data(x=x, edge_index=edge_index1, edge_weight=edge_weight1, y=y)\n edge_index2, edge_weight2 = get_directed_adj(indices2, x.shape[0], x.dtype)\n graph.edge_index2 = edge_index2\n graph.edge_weight2 = edge_weight2\n elif adj_type == 'ib':\n edge_index1, edge_weight1 = get_appr_directed_adj(0.1, indices, x.shape[0], x.dtype) \n graph = Data(x=x, edge_index=edge_index1, edge_weight=edge_weight1, y=y)\n edge_index2, edge_weight2 = get_second_directed_adj(indices, x.shape[0], x.dtype)\n graph.edge_index2 = edge_index2\n graph.edge_weight2 = edge_weight2\n\n return graph\n\n########################################## BN\ndef load_BN_data(dataset_path, n_types=6, batch_size=64, adj_type='di', with_y=True):\n # load raw Bayesian network strings to pyg_graphs\n g_list = []\n max_n = 0 # maximum number of nodes\n with open(dataset_path, 'r') as f:\n for i, row in enumerate(tqdm(f)):\n if row is None:\n break\n if with_y:\n row, y = eval(row)\n else:\n row = eval(row)\n y = 0.0\n g = decode_BN_to_pygraph(row, y, adj_type)\n max_n = max(max_n, g.num_nodes)\n assert(max_n == g.num_nodes) # all BNs should have the same node number\n g_list.append(g)\n graph_args.num_vertex_type = n_types + 2\n graph_args.max_n = max_n # maximum number of nodes\n ng = len(g_list)\n print('# node types: %d' % graph_args.num_vertex_type)\n print('maximum # nodes: %d' % graph_args.max_n)\n # random.Random(rand_seed).shuffle(g_list)\n train_data = g_list[:int(ng*0.9)]\n test_data = g_list[int(ng*0.9):]\n train_data_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)\n test_data_loader = DataLoader(test_data, batch_size=batch_size, shuffle=True)\n\n return train_data_loader, test_data_loader\n\ndef decode_BN_to_pygraph(row, y, adj_type, n_types=8):\n n_types += 2 # add start_type 0, end_type 1\n\n adj = np.zeros((n_types, n_types))\n x = []\n if type(row) == str:\n row = eval(row) # convert string to list of lists\n n = len(row) # n+2 is the real number of vertices in the DAG\n end_vertices = [True] * n\n x += [one_hot(0, n_types)]\n # ignore start vertex\n node_types2 = []\n for i, node in enumerate(row):\n node_type = node[0] + 2 # convert 0, 1, 2... to 2, 3, 4...\n node_types2 += [node_type]\n x += [one_hot(node_type, n_types)]\n if sum(node[1:]) == 0: # if no connections from previous nodes, connect from input\n adj[0, i + 1] = 1\n else:\n for j, edge in enumerate(node[1:]):\n if edge == 1:\n adj[j + 1, i + 1] = 1\n end_vertices[j] = False\n # output node\n node_type = 1\n x += [one_hot(node_type, n_types)]\n for j, flag in enumerate(end_vertices): # connect all loose-end vertices to the output node\n if flag == True:\n adj[j + 1, n + 1] = 1\n\n nx_graph = nx.DiGraph(adj)\n x = torch.cat(x, dim=0).float()\n\n ro, col = torch.tensor(list(nx_graph.edges)).t().contiguous()\n \n indices = np.vstack((ro, col))\n indices2 = np.vstack((col, ro))\n\n indices = torch.from_numpy(indices).long()\n indices2 = torch.from_numpy(indices2).long()\n if adj_type == 'di':\n edge_index1, edge_weight1 = get_directed_adj(indices, x.shape[0], x.dtype)\n graph = Data(x=x, edge_index=edge_index1, edge_weight=edge_weight1, y=y)\n edge_index2, edge_weight2 = get_directed_adj(indices2, x.shape[0], x.dtype)\n graph.edge_index2 = edge_index2\n graph.edge_weight2 = edge_weight2\n elif adj_type == 'ib':\n edge_index1, edge_weight1 = get_appr_directed_adj(0.1, indices, x.shape[0], x.dtype) \n graph = Data(x=x, edge_index=edge_index1, edge_weight=edge_weight1, y=y)\n edge_index2, edge_weight2 = get_second_directed_adj(indices, x.shape[0], x.dtype)\n graph.edge_index2 = edge_index2\n graph.edge_weight2 = edge_weight2\n\n return graph\n\nif __name__ == \"__main__\":\n ############################# node classification task: cora_ml, citeseer, am_computer, am_photo\n dataset = Datasets('./code/data/', 'citeseer', adj_type='di')\n\n\n ############################# graph regression task: NA\n train_data, test_data, graph_args = load_ENAS_data(\"./code/data/na/raw/final_structures6.txt\", n_types=6)\n\n for batch in train_data:\n print(batch)\n edge_index = batch.edge_index\n adj = sp.coo_matrix((torch.ones(edge_index.shape[1]), (edge_index[0,:], edge_index[1,:])),\n shape=(batch.num_nodes, batch.num_nodes),\n dtype=np.float32)\n adj = sparse_mx_to_torch_sparse_tensor(adj)\n print(\"adj0: \", adj.to_dense()[0][:16])\n print(\"adj1: \", adj.to_dense()[1][:16])\n print(\"adj8: \", adj.to_dense()[8][:16])\n print(\"adj9: \", adj.to_dense()[9][:16])\n break"
] | [
[
"torch.Size",
"numpy.concatenate",
"torch.zeros",
"torch.cat",
"numpy.setdiff1d",
"numpy.zeros",
"numpy.random.RandomState",
"numpy.sum",
"numpy.load",
"torch.sparse.FloatTensor",
"torch.from_numpy",
"torch.ones",
"torch.LongTensor",
"torch.load",
"numpy.unique",
"scipy.sparse.csr_matrix",
"numpy.squeeze",
"numpy.vstack"
]
] |
jeremiedecock/botsim | [
"73262092a8769c331edb96e083e32156f33bf948"
] | [
"utils/plot_part_dat.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Copyright (c) 2015 Jérémie DECOCK ([email protected])\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\nimport argparse\n\ndef parse_part_log_file(filename):\n log_data = np.loadtxt(filename)\n\n data_dict = {}\n data_dict[\"time_sec\"] = log_data[:, 0]\n data_dict[\"position_x\"] = log_data[:, 1]\n data_dict[\"position_y\"] = log_data[:, 2]\n data_dict[\"position_z\"] = log_data[:, 3]\n data_dict[\"angle_x\"] = log_data[:, 4]\n data_dict[\"angle_y\"] = log_data[:, 5]\n data_dict[\"angle_z\"] = log_data[:, 6]\n data_dict[\"angle_w\"] = log_data[:, 7]\n data_dict[\"linear_velocity_x\"] = log_data[:, 8]\n data_dict[\"linear_velocity_y\"] = log_data[:, 9]\n data_dict[\"linear_velocity_z\"] = log_data[:,10]\n data_dict[\"angular_velocity_x\"] = log_data[:,11]\n data_dict[\"angular_velocity_y\"] = log_data[:,12]\n data_dict[\"angular_velocity_z\"] = log_data[:,13]\n data_dict[\"total_force_x\"] = log_data[:,14]\n data_dict[\"total_force_y\"] = log_data[:,15]\n data_dict[\"total_force_z\"] = log_data[:,16]\n data_dict[\"total_torque_x\"] = log_data[:,17]\n data_dict[\"total_torque_y\"] = log_data[:,18]\n data_dict[\"total_torque_z\"] = log_data[:,19]\n\n return data_dict\n\n\ndef main():\n \"\"\"Main function\"\"\"\n\n # PARSE OPTIONS ###################\n\n parser = argparse.ArgumentParser(description='Plot one or several part(s).')\n parser.add_argument('filenames', nargs='+', metavar='FILE', help='DAT file to read')\n parser.add_argument(\"--title\", \"-t\", help=\"set the title of the figure\", metavar=\"STRING\")\n args = parser.parse_args()\n\n title = args.title\n\n # PLOT DATA #######################\n\n fig = plt.figure(figsize=(16.0, 10.0))\n #fig = plt.figure()\n ax = fig.add_subplot(111)\n\n #ax.grid(True)\n\n for index, filename in enumerate(args.filenames):\n print(index, filename)\n\n data_dict = parse_part_log_file(filename)\n\n ax.plot(data_dict[\"time_sec\"], data_dict[\"position_z\"], label=filename)\n\n # TITLE AND LABELS ################\n\n FONTSIZE = 26\n FONTSIZE_S = 22\n\n if title is None:\n title = \"Parts position with respect to time.\"\n\n ax.set_title(title, fontsize=FONTSIZE)\n ax.set_xlabel(\"Time (sec)\", fontsize=FONTSIZE)\n ax.set_ylabel(\"Position\", fontsize=FONTSIZE)\n\n ax.legend(loc='best', fontsize=FONTSIZE_S)\n\n # SAVE FILES ######################\n\n fig_filename = \"parts.pdf\"\n plt.savefig(fig_filename)\n\n # PLOT ############################\n\n plt.show()\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"matplotlib.pyplot.savefig",
"numpy.loadtxt",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
eteq/python-skyfield | [
"e524e069a52711c707e85a42ce86cc8e06f71862"
] | [
"skyfield/searchlib.py"
] | [
"\"\"\"Routines to search for maxima and zero crossings.\"\"\"\n\nfrom __future__ import print_function, division\n\nfrom numpy import (\n concatenate, diff, flatnonzero, linspace, multiply, sign, sort\n)\nfrom .constants import DAY_S\nEPSILON = 0.001 / DAY_S\n\ndef find_discrete(start_time, end_time, f, epsilon=EPSILON, num=12):\n \"\"\"Find the times when a function changes value.\n\n Search between ``start_time`` and ``end_time``, which should both be\n :class:`~skyfield.timelib.Time` objects, for the occasions where the\n function ``f`` changes from one value to another. Use this to\n search for events like sunrise or moon phases.\n\n A tuple of two arrays is returned. The first array gives the times\n at which the input function changes, and the second array specifies\n the new value of the function at each corresponding time.\n\n This is an expensive operation as it needs to repeatedly call the\n function to narrow down the times that it changes. It continues\n searching until it knows each time to at least an accuracy of\n ``epsilon`` Julian days. At each step, it creates an array of\n ``num`` new points between the lower and upper bound that it has\n established for each transition. These two values can be changed to\n tune the behavior of the search.\n\n \"\"\"\n ts = start_time.ts\n jd0 = start_time.tt\n jd1 = end_time.tt\n if jd0 >= jd1:\n raise ValueError('your start_time {0} is later than your end_time {1}'\n .format(start_time, end_time))\n\n periods = (jd1 - jd0) / f.rough_period\n if periods < 1.0:\n periods = 1.0\n\n jd = linspace(jd0, jd1, int(periods * num))\n return _find_discrete(ts, jd, f, epsilon, num)\n\n# TODO: pass in `y` so it can be precomputed?\n\ndef _find_discrete(ts, jd, f, epsilon, num):\n \"\"\"Algorithm core, for callers that already have a `jd` vector.\"\"\"\n end_mask = linspace(0.0, 1.0, num)\n start_mask = end_mask[::-1]\n o = multiply.outer\n\n while True:\n t = ts.tt_jd(jd)\n y = f(t)\n\n indices = flatnonzero(diff(y))\n if not len(indices):\n # Nothing found, so immediately return empty arrays.\n ends = jd.take(indices)\n y = y.take(indices)\n break\n\n starts = jd.take(indices)\n ends = jd.take(indices + 1)\n\n # Since we start with equal intervals, they all should fall\n # below epsilon at around the same time; so for efficiency we\n # only test the first pair.\n if ends[0] - starts[0] <= epsilon:\n y = y.take(indices + 1)\n # Keep only the last of several zero crossings that might\n # possibly be separated by less than epsilon.\n mask = concatenate(((diff(ends) > 3.0 * epsilon), (True,)))\n ends = ends[mask]\n y = y[mask]\n break\n\n jd = o(starts, start_mask).flatten() + o(ends, end_mask).flatten()\n\n return ts.tt_jd(ends), y\n\ndef find_maxima(start_time, end_time, f, epsilon, num):\n ts = start_time.ts\n jd0 = start_time.tt\n jd1 = end_time.tt\n rough_period = f.rough_period\n\n if jd0 >= jd1:\n raise ValueError('start_time {0} is not earlier than end_time {1}'\n .format(start_time, end_time))\n\n # We find maxima by investigating every point that is higher than\n # both points next to it. This presents a problem: if the initial\n # heights are, for example, [1.7, 1.1, 0.3, ...], there might be a\n # maximum 1.8 hidden between the first two heights, but it would not\n # meet the criteria for further investigation. To remedy this, we\n # put an extra point out beyond each end of our range, then filter\n # our final result to remove maxima that fall outside the range.\n bump = rough_period / num\n bumps = int((jd1 - jd0) / bump) + 3\n jd = linspace(jd0 - bump, jd1 + bump, bumps)\n\n end_mask = linspace(0.0, 1.0, num)\n start_mask = end_mask[::-1]\n o = multiply.outer\n\n while True:\n t = ts.tt_jd(jd)\n y = f(t)\n\n # Because artifical functions, like those in our units tests and\n # those that users might experiment with, might exhibit little\n # plateaus around a maximum - where the sign of the difference\n # drops to 0 before going negative - we do a little extra work\n # here. Naming a rising edge with \"1\", a plateau with \"2\", and\n # a falling edge with \"3\", we look for the two patterns:\n #\n # +1,-1 +1,0,-1\n # . ._.\n # / \\ / \\\n # . . . .\n\n n = sign(diff(y))\n rising = n == 1.0\n flat = n == 0.0\n falling = n == -1.0\n indices2 = flatnonzero(rising[:-1] & falling[1:])\n indices3 = flatnonzero(rising[:-2] & flat[1:-1] & falling[2:])\n\n if len(indices3):\n # The uncommon, artificial case, that requires a bit of\n # extra effort to keep the resulting arrays sorted.\n start_indices = concatenate((indices2, indices3 + 1))\n end_indices = concatenate((indices2 + 2, indices3 + 2))\n sort(start_indices)\n sort(end_indices)\n elif len(indices2):\n # The common case: at least one maxima exists, and all\n # maxima were simple peaks.\n start_indices = indices2\n end_indices = indices2 + 2\n else:\n # No maxima found.\n jd = y = y[0:0]\n break\n\n starts = jd.take(start_indices)\n ends = jd.take(end_indices)\n\n # Since we start with equal intervals, they all should fall\n # below epsilon at around the same time; so for efficiency we\n # only test the first pair.\n if ends[0] - starts[0] <= epsilon:\n jd = ends\n y = y.take(end_indices)\n\n # Filter out maxima that fell slightly outside our bounds.\n keepers = (jd >= jd0) & (jd <= jd1)\n jd = jd[keepers]\n y = y[keepers]\n\n # Keep only the first of several maxima that are separated\n # by less than epsilon.\n if len(jd):\n mask = concatenate(((True,), diff(jd) > epsilon))\n jd = jd[mask]\n y = y[mask]\n\n break\n\n jd = o(starts, start_mask).flatten() + o(ends, end_mask).flatten()\n\n return ts.tt_jd(jd), y\n"
] | [
[
"numpy.concatenate",
"numpy.diff",
"numpy.sort",
"numpy.linspace",
"numpy.flatnonzero"
]
] |
Arpita-25/invincible | [
"124beceec3c45458fe19b95a59eb7c01dd74e85b"
] | [
"WebD_File/try_on/cp_dataset.py"
] | [
"# coding=utf-8\nimport torch\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\n\nfrom PIL import Image\nfrom PIL import ImageDraw\n\nimport os.path as osp\nimport numpy as np\nimport json\n\n\nclass CPDataset(data.Dataset):\n \"\"\"Dataset for CP-VTON+.\n \"\"\"\n\n def __init__(self, opt):\n super(CPDataset, self).__init__()\n # base setting\n self.opt = opt\n self.root = opt.dataroot\n self.datamode = opt.datamode # train or test or self-defined\n self.stage = opt.stage # GMM or TOM\n self.data_list = opt.data_list\n self.fine_height = opt.fine_height\n self.fine_width = opt.fine_width\n self.radius = opt.radius\n self.data_path = osp.join(opt.dataroot, opt.datamode)\n self.transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n # load data list\n im_names = []\n c_names = []\n with open(osp.join(opt.dataroot, opt.data_list), 'r') as f:\n for line in f.readlines():\n im_name, c_name = line.strip().split()\n im_names.append(im_name)\n c_names.append(c_name)\n\n self.im_names = im_names\n self.c_names = c_names\n\n def name(self):\n return \"CPDataset\"\n\n def __getitem__(self, index):\n c_name = self.c_names[index]\n im_name = self.im_names[index]\n if self.stage == 'GMM':\n c = Image.open(osp.join(self.data_path, 'cloth', c_name))\n cm = Image.open(osp.join(self.data_path, 'cloth-mask', c_name)).convert('L')\n else:\n c = Image.open(osp.join(self.data_path, 'GMM','test','warp-cloth', im_name)) # c_name, if that is used when saved\n cm = Image.open(osp.join(self.data_path, 'GMM','test','warp-mask', im_name)).convert('L') # c_name, if that is used when saved\n\n c = self.transform(c) # [-1,1]\n cm_array = np.array(cm)\n cm_array = (cm_array >= 128).astype(np.float32)\n cm = torch.from_numpy(cm_array) # [0,1]\n cm.unsqueeze_(0)\n\n # person image\n im = Image.open(osp.join(self.data_path, 'image', im_name))\n im = self.transform(im) # [-1,1]\n\n \"\"\"\n LIP labels\n \n [(0, 0, 0), # 0=Background\n (128, 0, 0), # 1=Hat\n (255, 0, 0), # 2=Hair\n (0, 85, 0), # 3=Glove\n (170, 0, 51), # 4=SunGlasses\n (255, 85, 0), # 5=UpperClothes\n (0, 0, 85), # 6=Dress\n (0, 119, 221), # 7=Coat\n (85, 85, 0), # 8=Socks\n (0, 85, 85), # 9=Pants\n (85, 51, 0), # 10=Jumpsuits\n (52, 86, 128), # 11=Scarf\n (0, 128, 0), # 12=Skirt\n (0, 0, 255), # 13=Face\n (51, 170, 221), # 14=LeftArm\n (0, 255, 255), # 15=RightArm\n (85, 255, 170), # 16=LeftLeg\n (170, 255, 85), # 17=RightLeg\n (255, 255, 0), # 18=LeftShoe\n (255, 170, 0) # 19=RightShoe\n (170, 170, 50) # 20=Skin/Neck/Chest (Newly added after running dataset_neck_skin_correction.py)\n ]\n \"\"\"\n\n # load parsing image\n parse_name = im_name.replace('.jpg', '.png')\n im_parse = Image.open(\n # osp.join(self.data_path, 'image-parse', parse_name)).convert('L')\n osp.join(self.data_path, 'image-parse-new', parse_name)).convert('L') # updated new segmentation\n parse_array = np.array(im_parse)\n im_mask = Image.open(\n osp.join(self.data_path, 'image-mask', parse_name)).convert('L')\n mask_array = np.array(im_mask)\n\n # parse_shape = (parse_array > 0).astype(np.float32) # CP-VTON body shape\n # Get shape from body mask (CP-VTON+)\n parse_shape = (mask_array > 0).astype(np.float32)\n\n if self.stage == 'GMM':\n parse_head = (parse_array == 1).astype(np.float32) + \\\n (parse_array == 4).astype(np.float32) + \\\n (parse_array == 13).astype(\n np.float32) # CP-VTON+ GMM input (reserved regions)\n else:\n parse_head = (parse_array == 1).astype(np.float32) + \\\n (parse_array == 2).astype(np.float32) + \\\n (parse_array == 4).astype(np.float32) + \\\n (parse_array == 9).astype(np.float32) + \\\n (parse_array == 12).astype(np.float32) + \\\n (parse_array == 13).astype(np.float32) + \\\n (parse_array == 16).astype(np.float32) + \\\n (parse_array == 17).astype(\n np.float32) # CP-VTON+ TOM input (reserved regions)\n\n parse_cloth = (parse_array == 5).astype(np.float32) + \\\n (parse_array == 6).astype(np.float32) + \\\n (parse_array == 7).astype(np.float32) # upper-clothes labels\n\n # shape downsample\n parse_shape_ori = Image.fromarray((parse_shape*255).astype(np.uint8))\n parse_shape = parse_shape_ori.resize(\n (self.fine_width//16, self.fine_height//16), Image.BILINEAR)\n parse_shape = parse_shape.resize(\n (self.fine_width, self.fine_height), Image.BILINEAR)\n parse_shape_ori = parse_shape_ori.resize(\n (self.fine_width, self.fine_height), Image.BILINEAR)\n shape_ori = self.transform(parse_shape_ori) # [-1,1]\n shape = self.transform(parse_shape) # [-1,1]\n phead = torch.from_numpy(parse_head) # [0,1]\n # phand = torch.from_numpy(parse_hand) # [0,1]\n pcm = torch.from_numpy(parse_cloth) # [0,1]\n\n # upper cloth\n im_c = im * pcm + (1 - pcm) # [-1,1], fill 1 for other parts\n im_h = im * phead - (1 - phead) # [-1,1], fill -1 for other parts\n\n # load pose points\n pose_name = im_name.replace('.jpg', '_keypoints.json')\n with open(osp.join(self.data_path, 'pose', pose_name), 'r') as f:\n pose_label = json.load(f)\n pose_data = pose_label['people'][0]['pose_keypoints']\n pose_data = np.array(pose_data)\n pose_data = pose_data.reshape((-1, 3))\n\n point_num = pose_data.shape[0]\n pose_map = torch.zeros(point_num, self.fine_height, self.fine_width)\n r = self.radius\n im_pose = Image.new('L', (self.fine_width, self.fine_height))\n pose_draw = ImageDraw.Draw(im_pose)\n for i in range(point_num):\n one_map = Image.new('L', (self.fine_width, self.fine_height))\n draw = ImageDraw.Draw(one_map)\n pointx = pose_data[i, 0]\n pointy = pose_data[i, 1]\n if pointx > 1 and pointy > 1:\n draw.rectangle((pointx-r, pointy-r, pointx +\n r, pointy+r), 'white', 'white')\n pose_draw.rectangle(\n (pointx-r, pointy-r, pointx+r, pointy+r), 'white', 'white')\n one_map = self.transform(one_map)\n pose_map[i] = one_map[0]\n\n # just for visualization\n im_pose = self.transform(im_pose)\n\n # cloth-agnostic representation\n agnostic = torch.cat([shape, im_h, pose_map], 0)\n\n if self.stage == 'GMM':\n im_g = Image.open(r'C:\\Users\\lenovo\\Desktop\\try_on/grid.png')\n im_g = self.transform(im_g)\n else:\n im_g = ''\n\n pcm.unsqueeze_(0) # CP-VTON+\n\n result = {\n 'c_name': c_name, # for visualization\n 'im_name': im_name, # for visualization or ground truth\n 'cloth': c, # for input\n 'cloth_mask': cm, # for input\n 'image': im, # for visualization\n 'agnostic': agnostic, # for input\n 'parse_cloth': im_c, # for ground truth\n 'shape': shape, # for visualization\n 'head': im_h, # for visualization\n 'pose_image': im_pose, # for visualization\n 'grid_image': im_g, # for visualization\n 'parse_cloth_mask': pcm, # for CP-VTON+, TOM input\n 'shape_ori': shape_ori, # original body shape without resize\n }\n\n return result\n\n def __len__(self):\n return len(self.im_names)\n\n\nclass CPDataLoader(object):\n def __init__(self, opt, dataset):\n super(CPDataLoader, self).__init__()\n\n if opt.shuffle:\n train_sampler = torch.utils.data.sampler.RandomSampler(dataset)\n else:\n train_sampler = None\n\n self.data_loader = torch.utils.data.DataLoader(\n dataset, batch_size=opt.batch_size, shuffle=(\n train_sampler is None),\n num_workers=opt.workers, pin_memory=True, sampler=train_sampler)\n self.dataset = dataset\n self.data_iter = self.data_loader.__iter__()\n\n def next_batch(self):\n try:\n batch = self.data_iter.__next__()\n except StopIteration:\n self.data_iter = self.data_loader.__iter__()\n batch = self.data_iter.__next__()\n\n return batch\n\n\nif __name__ == \"__main__\":\n print(\"Check the dataset for geometric matching module!\")\n\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--dataroot\", default=\"data\")\n parser.add_argument(\"--datamode\", default=\"train\")\n parser.add_argument(\"--stage\", default=\"GMM\")\n parser.add_argument(\"--data_list\", default=\"train_pairs.txt\")\n parser.add_argument(\"--fine_width\", type=int, default=192)\n parser.add_argument(\"--fine_height\", type=int, default=256)\n parser.add_argument(\"--radius\", type=int, default=3)\n parser.add_argument(\"--shuffle\", action='store_true',\n help='shuffle input data')\n parser.add_argument('-b', '--batch-size', type=int, default=4)\n parser.add_argument('-j', '--workers', type=int, default=1)\n\n opt = parser.parse_args()\n dataset = CPDataset(opt)\n data_loader = CPDataLoader(opt, dataset)\n\n print('Size of the dataset: %05d, dataloader: %04d'\n % (len(dataset), len(data_loader.data_loader)))\n first_item = dataset.__getitem__(0)\n first_batch = data_loader.next_batch()\n\n from IPython import embed\n embed()\n"
] | [
[
"torch.zeros",
"numpy.array",
"torch.cat",
"torch.utils.data.sampler.RandomSampler",
"torch.from_numpy",
"torch.utils.data.DataLoader"
]
] |
ggoom/necstlab-damage-segmentation | [
"9adf087ea41f1cdd6d2b0eb5a546606c32a73590"
] | [
"train_segmentation_model.py"
] | [
"import shutil\nimport os\nimport random\nimport numpy as np\nimport yaml\nfrom pathlib import Path\nfrom datetime import datetime\nimport pytz\nimport matplotlib.pyplot as plt\nfrom keras.callbacks import ModelCheckpoint, TensorBoard, CSVLogger\nfrom image_utils import TensorBoardImage, ImagesAndMasksGenerator, trainGenerator\nimport git\nfrom gcp_utils import copy_folder_locally_if_missing\nfrom models import generate_compiled_segmentation_model\n\n\nmetadata_file_name = 'metadata.yaml'\ntmp_directory = Path('./tmp')\n\n\ndef sample_image_and_mask_paths(generator, n_paths):\n random.seed(0)\n rand_inds = [random.randint(0, len(generator.image_filenames)-1) for _ in range(n_paths)]\n image_paths = list(np.asarray(generator.image_filenames)[rand_inds])\n mask_paths = list(np.asarray(generator.mask_filenames)[rand_inds])\n # mask_paths = [{c: list(np.asarray(generator.mask_filenames[c]))[i] for c in generator.mask_filenames} for i in rand_inds]\n return list(zip(image_paths, mask_paths))\n\n\ndef train(gcp_bucket, config_file):\n\n start_dt = datetime.now()\n\n with Path(config_file).open('r') as f:\n train_config = yaml.safe_load(f)['train_config']\n\n assert \"gs://\" in gcp_bucket\n\n # clean up the tmp directory\n try:\n shutil.rmtree(tmp_directory.as_posix())\n except FileNotFoundError:\n pass\n tmp_directory.mkdir()\n\n local_dataset_dir = Path(tmp_directory, 'datasets')\n\n copy_folder_locally_if_missing(os.path.join(gcp_bucket, 'datasets', train_config['dataset_id']),\n local_dataset_dir)\n\n model_id = \"{}_{}\".format(train_config['model_id_prefix'], datetime.now(pytz.UTC).strftime('%Y%m%dT%H%M%SZ'))\n model_dir = Path(tmp_directory, 'models', model_id)\n model_dir.mkdir(parents=True)\n\n plots_dir = Path(model_dir, 'plots')\n plots_dir.mkdir(parents=True)\n\n logs_dir = Path(model_dir, 'logs')\n logs_dir.mkdir(parents=True)\n\n with Path(local_dataset_dir, train_config['dataset_id'], 'config.yaml').open('r') as f:\n dataset_config = yaml.safe_load(f)['dataset_config']\n\n with Path(model_dir, 'config.yaml').open('w') as f:\n yaml.safe_dump({'train_config': train_config}, f)\n\n target_size = dataset_config['target_size']\n batch_size = train_config['batch_size']\n epochs = train_config['epochs']\n augmentation_type = train_config['data_augmentation']['augmentation_type']\n\n if augmentation_type == 'necstlab': # necstlab's workflow\n train_generator = ImagesAndMasksGenerator(\n Path(local_dataset_dir, train_config['dataset_id'], 'train').as_posix(),\n rescale=1./255,\n target_size=target_size,\n batch_size=batch_size,\n shuffle=True,\n random_rotation=train_config['data_augmentation']['necstlab_augmentation']['random_90-degree_rotations'],\n seed=train_config['training_data_shuffle_seed'])\n\n validation_generator = ImagesAndMasksGenerator(\n Path(local_dataset_dir, train_config['dataset_id'],\n 'validation').as_posix(),\n rescale=1./255,\n target_size=target_size,\n batch_size=batch_size)\n elif augmentation_type == 'bio': # new workflow\n bio_augmentation = train_config['data_augmentation']['bio_augmentation']\n augmentation_dict = dict(rotation_range=bio_augmentation['rotation_range'],\n width_shift_range=bio_augmentation['width_shift_range'],\n height_shift_range=bio_augmentation['height_shift_range'],\n shear_range=bio_augmentation['shear_range'],\n zoom_range=bio_augmentation['zoom_range'],\n horizontal_flip=bio_augmentation['horizontal_flip'],\n fill_mode=bio_augmentation['fill_mode'],\n cval=0)\n train_generator = trainGenerator(\n batch_size=batch_size,\n train_path=Path(local_dataset_dir, train_config['dataset_id'], 'train').as_posix(),\n image_folder='images',\n mask_folder='masks',\n aug_dict=augmentation_dict,\n target_size=target_size,\n seed=train_config['training_data_shuffle_seed'])\n\n validation_generator = trainGenerator(\n batch_size=batch_size,\n train_path=Path(local_dataset_dir, train_config['dataset_id'], 'validation').as_posix(),\n image_folder='images',\n mask_folder='masks',\n aug_dict=augmentation_dict,\n target_size=target_size,\n seed=train_config['training_data_shuffle_seed'])\n\n compiled_model = generate_compiled_segmentation_model(\n train_config['segmentation_model']['model_name'],\n train_config['segmentation_model']['model_parameters'],\n 1,\n train_config['loss'],\n train_config['optimizer'])\n\n model_checkpoint_callback = ModelCheckpoint(Path(model_dir, 'model.hdf5').as_posix(),\n monitor='loss', verbose=1, save_best_only=True)\n tensorboard_callback = TensorBoard(log_dir=logs_dir.as_posix(), batch_size=batch_size, write_graph=True,\n write_grads=False, write_images=True, update_freq='epoch')\n\n # n_sample_images = 20\n # train_image_and_mask_paths = sample_image_and_mask_paths(train_generator, n_sample_images)\n # validation_image_and_mask_paths = sample_image_and_mask_paths(validation_generator, n_sample_images)\n\n # tensorboard_image_callback = TensorBoardImage(\n # log_dir=logs_dir.as_posix(),\n # images_and_masks_paths=train_image_and_mask_paths + validation_image_and_mask_paths)\n\n csv_logger_callback = CSVLogger(Path(model_dir, 'metrics.csv').as_posix(), append=True)\n\n results = compiled_model.fit_generator(\n train_generator,\n steps_per_epoch=len(train_generator) if augmentation_type == 'necstlab' else train_config['data_augmentation']['bio_augmentation']['steps_per_epoch'],\n epochs=epochs,\n validation_data=validation_generator,\n validation_steps=len(validation_generator) if augmentation_type == 'necstlab' else train_config['data_augmentation']['bio_augmentation']['validation_steps'],\n callbacks=[model_checkpoint_callback, tensorboard_callback, csv_logger_callback])\n\n metric_names = ['loss'] + [m.name for m in compiled_model.metrics]\n\n for metric_name in metric_names:\n\n fig, ax = plt.subplots()\n for split in ['train', 'validate']:\n\n key_name = metric_name\n if split == 'validate':\n key_name = 'val_' + key_name\n\n ax.plot(range(epochs), results.history[key_name], label=split)\n ax.set_xlabel('epochs')\n if metric_name == 'loss':\n ax.set_ylabel(compiled_model.loss.__name__)\n else:\n ax.set_ylabel(metric_name)\n ax.legend()\n if metric_name == 'loss':\n fig.savefig(Path(plots_dir, compiled_model.loss.__name__ + '.png').as_posix())\n else:\n fig.savefig(Path(plots_dir, metric_name + '.png').as_posix())\n\n # mosaic plot\n fig2, axes = plt.subplots(nrows=2, ncols=3, figsize=(10, 6))\n counter_m = 0\n counter_n = 0\n for metric_name in metric_names:\n\n for split in ['train', 'validate']:\n\n key_name = metric_name\n if split == 'validate':\n key_name = 'val_' + key_name\n\n axes[counter_m, counter_n].plot(range(epochs), results.history[key_name], label=split)\n axes[counter_m, counter_n].set_xlabel('epochs')\n if metric_name == 'loss':\n axes[counter_m, counter_n].set_ylabel(compiled_model.loss.__name__)\n else:\n axes[counter_m, counter_n].set_ylabel(metric_name)\n axes[counter_m, counter_n].legend()\n\n counter_n += 1\n if counter_n == 3: # 3 plots per row\n counter_m += 1\n counter_n = 0\n\n fig2.tight_layout()\n fig2.delaxes(axes[1][2])\n fig2.savefig(Path(plots_dir, 'metrics_mosaic.png').as_posix())\n\n metadata = {\n 'gcp_bucket': gcp_bucket,\n 'created_datetime': datetime.now(pytz.UTC).strftime('%Y%m%dT%H%M%SZ'),\n 'num_classes': 1,\n 'target_size': target_size,\n 'git_hash': git.Repo(search_parent_directories=True).head.object.hexsha,\n 'original_config_filename': config_file,\n 'elapsed_minutes': round((datetime.now() - start_dt).total_seconds() / 60, 1),\n 'dataset_config': dataset_config,\n 'train_config': train_config\n }\n\n with Path(model_dir, metadata_file_name).open('w') as f:\n yaml.safe_dump(metadata, f)\n\n os.system(\"gsutil -m cp -r '{}' '{}'\".format(Path(tmp_directory, 'models').as_posix(), gcp_bucket))\n\n shutil.rmtree(tmp_directory.as_posix())\n\n\nif __name__ == \"__main__\":\n import argparse\n import sys\n\n argparser = argparse.ArgumentParser(sys.argv[0])\n argparser.add_argument(\n '--gcp-bucket',\n type=str,\n help='The GCP bucket where the prepared data is located and to use to store the trained model.')\n argparser.add_argument(\n '--config-file',\n type=str,\n help='The location of the train configuration file.')\n\n train(**argparser.parse_args().__dict__)\n"
] | [
[
"numpy.asarray",
"matplotlib.pyplot.subplots"
]
] |
lzha106/FDDB-tools-for-windows | [
"6a5d3f4381c87d8e22f66f83268549e040054ce5"
] | [
"mtcnn_fddb/mtcnn_face_det.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport argparse\nimport tensorflow as tf\nimport numpy as np\nimport src.align.detect_face\nimport src.align as align\nimport cv2\n\ndef detect_face(img, pnet, rnet, onet):\n minsize = 20 # minimum size of face\n threshold = [0.6, 0.7, 0.7] # three steps's threshold\n factor = 0.709 # scale factor\n regions = []\n\n bounding_boxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)\n nrof_faces = bounding_boxes.shape[0]\n if nrof_faces > 0:\n det = bounding_boxes[:, 0:5]\n det_arr = []\n img_size = np.asarray(img.shape)[0:2]\n if nrof_faces > 1:\n for i in range(nrof_faces):\n det_arr.append(np.squeeze(det[i]))\n else:\n det_arr.append(np.squeeze(det))\n\n for i, det in enumerate(det_arr):\n det = np.squeeze(det)\n margin = 2\n\n bb = np.zeros(5, dtype=np.int32)\n bb[0] = np.maximum(det[0] - margin / 2.0, 0.0)\n bb[1] = np.maximum(det[1] - margin / 2.0, 0.0)\n bb[2] = np.minimum(det[2] + margin / 2.0, img_size[1])\n bb[3] = np.minimum(det[3] + margin / 2.0, img_size[0])\n # conver to width and height\n bb[2] -= bb[0]\n bb[3] -= bb[1]\n bb[4] = det[4]*10000\n regions.append(bb)\n\n return regions, nrof_faces\n\ndef main(args):\n with tf.Graph().as_default():\n\n with tf.Session() as sess:\n pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)\n print('Loading feature extraction model')\n\n result_file = open(args.output_file, 'w+')\n\n with open(args.fddb_file_folder_dir + 'FDDB-all.txt', 'r') as fddb_file_list:\n file_list = fddb_file_list.read().splitlines()\n\n image_count = 0\n fddb_image_dir = args.fddb_image_folder_dir\n for file_name in file_list:\n img_name = fddb_image_dir + file_name + \".jpg\"\n img = cv2.imread(img_name, 1)\n regions, num_of_faces = detect_face(img, pnet, rnet, onet)\n\n # write result to output file in format\n # file_name\n # num_of_faces\n # bx0, by0, bw0, bh0, prob0\n # ...\n result_file.write(file_name)\n result_file.write(\"\\n\")\n result_file.write(str(num_of_faces) + \"\\n\")\n\n for items in regions:\n face_item = str(items).strip(\"[]\").lstrip()+\"\\n\"\n result_file.write(face_item)\n\n image_count += 1\n print(\"Processed \" + str(image_count) + \" images\")\n\n # For debug to show the image and rect\n # bb = np.zeros(5, dtype=np.int32)\n # bb[0] = regions[0][0]\n # bb[1] = regions[0][1]\n # bb[2] = regions[0][2]\n # bb[3] = regions[0][3]\n #\n # print(img.shape)\n # print(bb[0], bb[1], bb[2], bb[3])\n # cv2.rectangle(img, (bb[0], bb[1]), (bb[2]+bb[0], bb[1]+ bb[3]),\n # (255, 0, 0), 2)\n # cv2.imshow(\"face\", img)\n # key = cv2.waitKey(0)\n # cv2.destroyAllWindows()\n\n result_file.close()\n\ndef parse_arguments(argv):\n parser = argparse.ArgumentParser()\n\n parser.add_argument('fddb_file_folder_dir', type=str,\n help='Could be a directory containing fddb txt')\n parser.add_argument('fddb_image_folder_dir', type=str,\n help='Could be a directory containing fddb image')\n parser.add_argument('output_file', type=str,\n help='Could be the output file name including path')\n\n return parser.parse_args(argv)\nif __name__ == '__main__':\n main(parse_arguments(sys.argv[1:]))\n"
] | [
[
"numpy.asarray",
"numpy.zeros",
"numpy.minimum",
"tensorflow.Graph",
"tensorflow.Session",
"numpy.squeeze",
"numpy.maximum"
]
] |
LiCHOTHU/ocean-kp | [
"2102bda2e51233baad0da12a6b1f168a7882564b"
] | [
"rlkit/envs/humanoid_multi_dir.py"
] | [
"import numpy as np\r\n\r\nfrom gym.envs.mujoco import HumanoidEnv as HumanoidEnv\r\nfrom . import register_env\r\n\r\ndef mass_center(model, sim):\r\n mass = np.expand_dims(model.body_mass, 1)\r\n xpos = sim.data.xipos\r\n return (np.sum(mass * xpos, 0) / np.sum(mass))\r\n\r\n\r\n@register_env('humanoid-multi-dir')\r\nclass HumanoidMultiDirEnv(HumanoidEnv):\r\n\r\n def __init__(self, task={}, n_tasks=2, randomize_tasks=True, n_dirs=3, max_eps=700, seed=0):\r\n self._max_eps = max_eps\r\n self._num_steps = 0\r\n self.tasks = self.sample_tasks(n_tasks, n_dirs)\r\n self._goal_dirs = self.tasks[0]['dir']\r\n self._goal_steps = self.tasks[0]['step']\r\n self._goal_dir = self.tasks[0].get('dir', [1])[0]\r\n self._goal = self._goal_dir\r\n # self.reset_task(0)\r\n super(HumanoidMultiDirEnv, self).__init__()\r\n self.seed(seed)\r\n\r\n def step(self, action):\r\n pos_before = np.copy(mass_center(self.model, self.sim)[:2])\r\n self.do_simulation(action, self.frame_skip)\r\n pos_after = mass_center(self.model, self.sim)[:2]\r\n\r\n alive_bonus = 5.0\r\n data = self.sim.data\r\n goal_direction = (np.cos(self._goal_dir), np.sin(self._goal_dir))\r\n lin_vel_cost = 0.25 * np.sum(goal_direction * (pos_after - pos_before)) / self.model.opt.timestep\r\n quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum()\r\n quad_impact_cost = .5e-6 * np.square(data.cfrc_ext).sum()\r\n quad_impact_cost = min(quad_impact_cost, 10)\r\n reward = lin_vel_cost - quad_ctrl_cost - quad_impact_cost + alive_bonus\r\n qpos = self.sim.data.qpos\r\n done = bool((qpos[2] < 1.0) or (qpos[2] > 2.0))\r\n\r\n self._num_steps += 1\r\n self._goal_dir = self._goal_dirs[np.searchsorted(self._goal_steps, self._num_steps)]\r\n\r\n return self._get_obs(), reward, done, dict(reward_linvel=lin_vel_cost,\r\n reward_quadctrl=-quad_ctrl_cost,\r\n reward_alive=alive_bonus,\r\n reward_impact=-quad_impact_cost)\r\n\r\n def _get_obs(self):\r\n data = self.sim.data\r\n return np.concatenate([data.qpos.flat[2:],\r\n data.qvel.flat,\r\n data.cinert.flat,\r\n data.cvel.flat,\r\n data.qfrc_actuator.flat,\r\n data.cfrc_ext.flat])\r\n\r\n def get_all_task_idx(self):\r\n return range(len(self.tasks))\r\n\r\n def reset_task(self, idx):\r\n self._task = self.tasks[idx]\r\n self._num_steps = 0\r\n self._goal_steps = self._task['step']\r\n self._goal_dirs = self._task['dir']\r\n self._goal_dir = self._goal_dirs[np.searchsorted(self._goal_steps, self._num_steps)]\r\n self._goal = self._goal_dir\r\n # self._goal = self._task['goal'] # assume parameterization of task by single vector\r\n self.reset()\r\n\r\n def sample_tasks(self, num_tasks, num_dirs):\r\n # velocities = np.random.uniform(0., 1.0 * np.pi, size=(num_tasks,))\r\n directions = np.random.uniform(0., 2.0 * np.pi, size=(num_tasks, num_dirs))\r\n change_steps = np.sort(np.array([self._max_eps * i / num_dirs for i in range(1, num_dirs)]) + np.random.uniform(-0.05*self._max_eps, 0.05*self._max_eps, size=(num_tasks, num_dirs - 1)))\r\n tasks = []\r\n for i in range(num_tasks):\r\n tasks.append({'dir': directions[i], 'step': change_steps[i]})\r\n # tasks = [{'goal': d} for d in directions]\r\n return tasks\r\n\r\n def save_all_tasks(self, save_dir):\r\n import pickle\r\n import os\r\n with open(os.path.join(save_dir, 'goals.pkl'), 'wb') as f:\r\n pickle.dump(self.tasks, f)"
] | [
[
"numpy.concatenate",
"numpy.square",
"numpy.sin",
"numpy.sum",
"numpy.random.uniform",
"numpy.cos",
"numpy.searchsorted",
"numpy.expand_dims"
]
] |
wbhu/SphericalViewSynthesis | [
"e2af7edf1cc3c98835382c0016fb0a4e868faeb3"
] | [
"dataset/transform.py"
] | [
"#!/usr/bin/env python\n\"\"\"\n File Name : SparseVec-transform\n date : 4/11/2019\n Author : wenbo\n Email : [email protected]\n Description :\n _ _\n ( |---/ )\n ) . . (\n________________________,--._(___Y___)_,--._______________________\n `--' `--'\n\"\"\"\nimport numpy as np\nimport torch\n\n\nclass Compose(object):\n def __init__(self, transforms):\n self.transforms = transforms\n\n def __call__(self, image, label):\n for t in self.transforms:\n image, label = t(image, label)\n return image, label\n\n\nclass ToTensor(object):\n # Converts numpy.ndarray (H x W x C) to a torch.FloatTensor of shape (C x H x W).\n def __call__(self, image, label):\n if not isinstance(image, np.ndarray) or not isinstance(label, np.ndarray):\n raise (RuntimeError(\"segtransform.ToTensor() only handle np.ndarray\"\n \"[eg: data readed by cv2.imread()].\\n\"))\n if len(image.shape) > 3 or len(image.shape) < 2:\n raise (RuntimeError(\"segtransform.ToTensor() only handle np.ndarray with 3 dims or 2 dims.\\n\"))\n if len(image.shape) == 2:\n image = np.expand_dims(image, axis=2)\n\n image = torch.from_numpy(image.transpose((2, 0, 1)))\n if not isinstance(image, torch.FloatTensor):\n image = image.float()\n label = torch.from_numpy(label.transpose((2, 0, 1)))\n if not isinstance(label, torch.FloatTensor):\n label = label.float()\n return image, label\n\n\nclass Normalize(object):\n # Normalize tensor with mean and standard deviation along channel: channel = (channel - mean) / std\n def __init__(self, mean, std=None):\n if std is None:\n assert len(mean) > 0\n else:\n assert len(mean) == len(std)\n self.mean = mean\n self.std = std\n\n def __call__(self, image, label):\n if self.std is None:\n for t, m in zip(image, self.mean):\n t.sub_(m)\n for t, m in zip(label, self.mean):\n t.sub_(m)\n else:\n for t, m, s in zip(image, self.mean, self.std):\n t.sub_(m).div_(s)\n for t, m, s in zip(label, self.mean, self.std):\n t.sub_(m).div_(s)\n return image, label\n"
] | [
[
"numpy.expand_dims"
]
] |
hdevillepoix/astroquery | [
"ce8c500c28424fe841e04741d4230b8f695ee194"
] | [
"astroquery/nist/tests/test_nist.py"
] | [
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport os\n\nimport numpy.testing as npt\nimport pytest\nfrom astropy.table import Table\nimport astropy.units as u\n\nfrom ...utils.testing_tools import MockResponse\nfrom ... import nist\n\nDATA_FILES = {'lines': 'nist_out.html'}\n\n\ndef data_path(filename):\n data_dir = os.path.join(os.path.dirname(__file__), 'data')\n return os.path.join(data_dir, filename)\n\n\[email protected]\ndef patch_get(request):\n try:\n mp = request.getfixturevalue(\"monkeypatch\")\n except AttributeError: # pytest < 3\n mp = request.getfuncargvalue(\"monkeypatch\")\n mp.setattr(nist.Nist, '_request', get_mockreturn)\n return mp\n\n\ndef get_mockreturn(method, url, params=None, timeout=10, **kwargs):\n filename = data_path(DATA_FILES['lines'])\n content = open(filename, 'rb').read()\n return MockResponse(content, **kwargs)\n\n\ndef test_parse_wavelength():\n minwav, maxwav, unit = nist.core._parse_wavelength(4000 * u.AA,\n 7000 * u.AA)\n npt.assert_approx_equal(minwav, 4000, significant=4)\n npt.assert_approx_equal(maxwav, 7000, significant=4)\n assert unit == nist.core.Nist.unit_code['Angstrom']\n\n\ndef test_query_async(patch_get):\n response = nist.core.Nist.query_async(4000 * u.nm, 7000 * u.nm,\n \"H I\", get_query_payload=True)\n assert response['spectra'] == \"H I\"\n assert response['unit'] == nist.core.Nist.unit_code['nm']\n response = nist.core.Nist.query_async(4000 * u.nm, 7000 * u.nm, \"H I\")\n assert response is not None\n\n\ndef test_query(patch_get):\n result = nist.core.Nist.query(4000 * u.nm, 7000 * u.nm, \"H I\")\n assert isinstance(result, Table)\n"
] | [
[
"numpy.testing.assert_approx_equal"
]
] |
JonDamFlindt/DM562-Rabbits-and-Foxes | [
"a80d3d936b0c7d377db649f83495c24e700446d6"
] | [
"reporting.py"
] | [
"import matplotlib as matplot\nimport matplotlib.pyplot as plt\nfrom results import *\n\n\ndef print_summary(results: SimulationStats):\n \"\"\"Prints a short summary regarding the populations of the simulation.\"\"\"\n\n for item in [['FOXES', results.foxes],['RABBITS', results.rabbits]]:\n animal = item[0]\n data = item[1]\n print(f\"\"\"\n[{animal}]\nPopulation\ntotal: {data.total}\nmin: {min(data.size_per_step)}\navg: {sum(data.size_per_step)/len(data.size_per_step)}\nmax: {max(data.size_per_step)}\n\nDeaths\ntotal: {data.dead_by_old_age + data.dead_by_starvation + data.dead_by_predation}\nold age: {data.dead_by_old_age}\nstarvation: {data.dead_by_starvation}\npredation: {data.dead_by_predation}\n\"\"\")\n\n\n# Since we have to plot each population and their total,\n# we will use for-loops with preset colors and styles\nplt_style = ['--C1', '--C7', '-k']\nplt_legend = ['Foxes','Rabbits','Total']\n\n\ndef _setup_plot(data, plot_title: str, x_label: str, y_label: str):\n \"\"\"Generic function to set up the plot.\"\"\"\n plt.title(plot_title)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n for i in range(len(data)):\n plt.plot(data[i], \n plt_style[i],\n linewidth=1,\n scalex=True)\n plt.legend(plt_legend)\n\n\ndef plot_pop_size(results: SimulationStats):\n \"\"\"Plots the size of the population for each step.\"\"\"\n total_size_per_step = [results.foxes.size_per_step[i] + results.rabbits.size_per_step[i] for i in range(len(results.foxes.size_per_step))]\n plt_data = [results.foxes.size_per_step,\n results.rabbits.size_per_step,\n total_size_per_step]\n _setup_plot(plt_data, 'Population over time', 'Steps', 'Population size (living)')\n plt.show()\n \n\ndef plot_lifespan(results: SimulationStats):\n \"\"\"Plots the lifespan of every individual that has ever lived during the simulation.\"\"\"\n plt_data = [results.foxes.age_at_death, results.rabbits.age_at_death]\n _setup_plot(plt_data, 'Lifespans', 'Individuals', 'Age')\n plt.show()\n\n\ndef plot_energy(results: SimulationStats):\n \"\"\"Plots the average energy of the animals per step.\"\"\"\n plt_data = [results.foxes.avg_energy_per_step,\n results.rabbits.avg_energy_per_step,\n results.avg_energy_per_step]\n _setup_plot(plt_data, 'Average energy over time', 'Steps', 'Energy')\n plt.show()\n\n\ndef plot_kills(results: SimulationStats):\n \"\"\"Plots kills on the grid.\"\"\"\n max_kills = max([kill for patch in results.kills_per_patch for kill in patch])\n colormap = matplot.colors.LinearSegmentedColormap.from_list('kills',['white','yellow','orange','red'], 256)\n plt.imshow(results.kills_per_patch, cmap=colormap)\n plt.title('Distribution of kills by predators.')\n plt.colorbar(ticks=range(max_kills+1))\n plt.show()\n \n"
] | [
[
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"matplotlib.colors.LinearSegmentedColormap.from_list"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.