repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
miyosuda/rodent
[ "3d60a234eecd5e2476b186365eb121f0f3655c2e" ]
[ "examples/02_nav_maze_static/main.py" ]
[ "# -*- coding: utf-8 -*-\nimport numpy as np\nimport pygame, sys\nfrom pygame.locals import *\n\nfrom nav_maze_static_environment import NavMazeStaticEnvironment\n\nBLACK = (0, 0, 0)\n\n\nclass RandomAgent(object):\n def __init__(self, action_num):\n self.action_num = action_num\n\n def choose_action(self, state):\n return np.random.randint(self.action_num)\n\n\nclass Display(object):\n def __init__(self, display_size):\n self.width = display_size[0]\n self.height = display_size[1]\n\n self.env = NavMazeStaticEnvironment(\n width=self.width, height=self.height)\n self.agent = RandomAgent(self.env.get_action_size())\n\n pygame.init()\n\n self.surface = pygame.display.set_mode(display_size, 0, 24)\n pygame.display.set_caption('rodentia')\n\n self.last_state = self.env.reset()\n\n def update(self):\n self.surface.fill(BLACK)\n self.process()\n pygame.display.update()\n\n \"\"\"\n def get_manual_action(self):\n pressed = pygame.key.get_pressed()\n\n if pressed[K_q]:\n return 0\n if pressed[K_e]:\n return 1\n if pressed[K_a]:\n return 2\n if pressed[K_d]:\n return 3\n if pressed[K_w]:\n return 4\n if pressed[K_s]:\n return 5\n return -1\n \"\"\"\n\n def process(self):\n #action = self.get_manual_action()\n action = self.agent.choose_action(self.last_state)\n\n state, reward, terminal = self.env.step(action=action)\n\n if reward != 0:\n print(\"reward={}\".format(reward))\n\n image = pygame.image.frombuffer(state, (self.width, self.height),\n 'RGB')\n self.surface.blit(image, (0, 0))\n\n self.last_state = state\n\n if terminal:\n self.last_state = self.env.reset()\n\n\ndef main():\n display_size = (256, 256)\n display = Display(display_size)\n clock = pygame.time.Clock()\n\n running = True\n FPS = 60\n\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n running = False\n\n display.update()\n clock.tick(FPS)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.random.randint" ] ]
popura/deepy-pytorch
[ "71d87a82e937d82b9b149041280a392cc24b7299" ]
[ "deepy/data/audio/transform.py" ]
[ "import random\n\nimport torch\n\nfrom deepy.data.transform import Transform, SeparatedTransform\nfrom deepy.data.transform import PairedTransform, PairedCompose, ToPairedTransform\nfrom deepy.nn import functional as myF\n\n\nclass RandomCrop(Transform):\n def __init__(self, length: int, generator=None):\n self.length = length\n self.generator = generator\n \n def __call__(self, data):\n signal_length = data.size(-1)\n start_index = torch.randint(0, signal_length - self.length + 1,\n (1, ),\n generator=self.generator)\n end_index = start_index + self.length\n return data[..., start_index:end_index]\n \n def __repr__(self):\n return self.__class__.__name__ + '(length={})'.format(self.length)\n\n\nclass RandomFrames(RandomCrop):\n def __init__(self, n_frames=5, generator=None):\n super().__init__(length=n_frames, generator=generator)\n self.n_frames = n_frames\n\n def __repr__(self):\n return self.__class__.__name__ + '(n_frames={})'.format(self.n_frames)\n\n\nclass Windowing(Transform):\n def __init__(self, n_frames=5, stride=1, n_signals=None):\n self.n_frames = n_frames\n if not stride == 1:\n raise NotImplementedError\n self.stride = stride\n self.n_signals = n_signals\n \n def __call__(self, data):\n total_frames = data.size(-1)\n\n if self.n_signals == None:\n n_signals = total_frames - self.n_frames + 1\n else:\n n_signals = self.n_signals\n\n return torch.stack([data[..., i:i+self.n_frames] for i in range(n_signals)], dim=1)\n\n def __repr__(self):\n return self.__class__.__name__ + '(n_frames={}, stride={})'.format(self.n_frames, self.stride)\n\n\nclass Plane2Vector(Transform):\n def __init__(self):\n pass\n\n def __call__(self, data):\n return torch.cat([data[..., i, :] for i in range(data.size(-2))], dim=-1)\n \n\n\nclass ToVector(Transform):\n def __init__(self):\n pass\n\n def __call__(self, data):\n return data.reshape(-1, )\n \n def __repr__(self):\n return self.__class__.__name__\n\n\nclass PickUpChannel(Transform):\n def __init__(self, chidx=0):\n self.chidx = chidx\n\n def __call__(self, data):\n return data[self.chidx]\n \n def __repr__(self):\n return self.__class__.__name__ + '(chidx={})'.format(self.chidx)\n" ]
[ [ "torch.randint" ] ]
ikamensh/machin
[ "af7b423c47bc1412530cf6c96c11bd3af9b3e239", "af7b423c47bc1412530cf6c96c11bd3af9b3e239" ]
[ "machin/frame/buffers/buffer.py", "test/frame/algorithms/test_maddpg.py" ]
[ "from typing import Union, Dict, List, Tuple, Any, Callable\nfrom ..transition import (\n Transition,\n Scalar,\n TransitionStorageSmart,\n TransitionStorageBasic,\n)\nimport torch as t\nimport random\n\n\nclass Buffer:\n def __init__(self, buffer_size, buffer_device=\"cpu\", *_, **__):\n \"\"\"\n Create a buffer instance.\n\n Buffer stores a series of transition objects and functions\n as a ring buffer. **It is not thread-safe**.\n\n See Also:\n :class:`.Transition`\n\n\n During sampling, the tensors in \"state\", \"action\" and \"next_state\"\n dictionaries, along with \"reward\", will be concatenated in dimension 0.\n any other custom keys specified in ``**kwargs`` will not be\n concatenated.\n\n Args:\n buffer_size: Maximum buffer size.\n buffer_device: Device where buffer is stored.\n \"\"\"\n self.buffer_size = buffer_size\n self.buffer_device = buffer_device\n self.buffer = TransitionStorageSmart(buffer_size)\n self.index = 0\n\n def append(\n self,\n transition: Union[Transition, Dict],\n required_attrs=(\"state\", \"action\", \"next_state\", \"reward\", \"terminal\"),\n ):\n \"\"\"\n Store a transition object to buffer.\n\n Args:\n transition: A transition object.\n required_attrs: Required attributes. Could be an empty tuple if\n no attribute is required.\n\n Raises:\n ``ValueError`` if transition object doesn't have required\n attributes in ``required_attrs`` or has different attributes\n compared to other transition objects stored in buffer.\n \"\"\"\n if isinstance(transition, dict):\n transition = Transition(**transition)\n elif isinstance(transition, Transition):\n pass\n else: # pragma: no cover\n raise RuntimeError(\n \"Transition object must be a dict or an instance\"\n \" of the Transition class\"\n )\n if not transition.has_keys(required_attrs):\n missing_keys = set(required_attrs) - set(transition.keys())\n raise ValueError(f\"Transition object missing attributes: {missing_keys}\")\n transition.to(self.buffer_device)\n\n if self.size() != 0 and self.buffer[0].keys() != transition.keys():\n raise ValueError(\"Transition object has different attributes!\")\n\n return self.buffer.store(transition)\n\n def size(self):\n \"\"\"\n Returns:\n Length of current buffer.\n \"\"\"\n return len(self.buffer)\n\n def clear(self):\n \"\"\"\n Remove all entries from the buffer\n \"\"\"\n self.buffer.clear()\n\n @staticmethod\n def sample_method_random_unique(\n buffer: List[Transition], batch_size: int\n ) -> Tuple[int, List[Transition]]:\n \"\"\"\n Sample unique random samples from buffer.\n\n Note:\n Sampled size could be any value from 0 to ``batch_size``.\n \"\"\"\n if len(buffer) < batch_size:\n batch = random.sample(buffer, len(buffer))\n real_num = len(buffer)\n else:\n batch = random.sample(buffer, batch_size)\n real_num = batch_size\n return real_num, batch\n\n @staticmethod\n def sample_method_random(\n buffer: List[Transition], batch_size: int\n ) -> Tuple[int, List[Transition]]:\n \"\"\"\n Sample random samples from buffer.\n\n Note:\n Sampled size could be any value from 0 to ``batch_size``.\n \"\"\"\n indexes = [random.randint(0, len(buffer) - 1) for _ in range(batch_size)]\n batch = [buffer[i] for i in indexes]\n return batch_size, batch\n\n @staticmethod\n def sample_method_all(buffer: List[Transition], _) -> Tuple[int, List[Transition]]:\n \"\"\"\n Sample all samples from buffer. Always return the whole buffer,\n will ignore the ``batch_size`` parameter.\n \"\"\"\n return len(buffer), buffer\n\n def sample_batch(\n self,\n batch_size: int,\n concatenate: bool = True,\n device: Union[str, t.device] = None,\n sample_method: Union[Callable, str] = \"random_unique\",\n sample_attrs: List[str] = None,\n additional_concat_attrs: List[str] = None,\n *_,\n **__,\n ) -> Any:\n \"\"\"\n Sample a random batch from buffer.\n\n See Also:\n Default sample methods are defined as static class methods.\n\n :meth:`.Buffer.sample_method_random_unique`\n\n :meth:`.Buffer.sample_method_random`\n\n :meth:`.Buffer.sample_method_all`\n\n Note:\n \"Concatenation\"\n means ``torch.cat([...], dim=0)`` for tensors,\n and ``torch.tensor([...]).view(batch_size, 1)`` for scalars.\n\n Warnings:\n Custom attributes must not contain tensors. And only scalar custom\n attributes can be concatenated, such as ``int``, ``float``,\n ``bool``.\n\n Args:\n batch_size: A hint size of the result sample. actual sample size\n depends on your sample method.\n sample_method: Sample method, could be one of:\n ``\"random\", \"random_unique\", \"all\"``,\n or a function:\n ``func(list, batch_size)->(list, result_size)``\n concatenate: Whether concatenate state, action and next_state\n in dimension 0.\n If ``True``, for each value in dictionaries of major\n attributes. and each value of sub attributes, returns\n a concatenated tensor. Custom Attributes specified in\n ``additional_concat_attrs`` will also be concatenated.\n If ``False``, return a list of tensors.\n device: Device to copy to.\n sample_attrs: If sample_keys is specified, then only specified keys\n of the transition object will be sampled. You may use\n ``\"*\"`` as a wildcard to collect remaining\n **custom keys** as a ``dict``, you cannot collect major\n and sub attributes using this.\n Invalid sample attributes will be ignored.\n additional_concat_attrs: additional **custom keys** needed to be\n concatenated, will only work if ``concatenate`` is\n ``True``.\n\n Returns:\n 1. Batch size, Sampled attribute values in the same order as\n ``sample_keys``.\n\n 2. Sampled attribute values is a tuple. Or ``None`` if sampled\n batch size is zero (E.g.: if buffer is empty or your sample\n size is 0 and you are not sampling using the \"all\" method).\n\n - For major attributes, result are dictionaries of tensors with\n the same keys in your transition objects.\n\n - For sub attributes, result are tensors.\n\n - For custom attributes, if they are not in\n ``additional_concat_attrs``, then lists, otherwise tensors.\n \"\"\"\n if isinstance(sample_method, str):\n if not hasattr(self, \"sample_method_\" + sample_method):\n raise RuntimeError(\n f\"Cannot find specified sample method: {sample_method}\"\n )\n sample_method = getattr(self, \"sample_method_\" + sample_method)\n batch_size, batch = sample_method(self.buffer, batch_size)\n\n if device is None:\n device = self.buffer_device\n\n return (\n batch_size,\n self.post_process_batch(\n batch, device, concatenate, sample_attrs, additional_concat_attrs\n ),\n )\n\n @classmethod\n def post_process_batch(\n cls,\n batch: List[Transition],\n device: Union[str, t.device],\n concatenate: bool,\n sample_attrs: List[str],\n additional_concat_attrs: List[str],\n ):\n \"\"\"\n Post-process (concatenate) sampled batch.\n \"\"\"\n result = []\n used_keys = []\n\n if len(batch) == 0:\n return None\n if sample_attrs is None:\n sample_attrs = batch[0].keys() if batch else []\n if additional_concat_attrs is None:\n additional_concat_attrs = []\n\n major_attr = set(batch[0].major_attr)\n sub_attr = set(batch[0].sub_attr)\n custom_attr = set(batch[0].custom_attr)\n for attr in sample_attrs:\n if attr in major_attr:\n tmp_dict = {}\n for sub_k in batch[0][attr].keys():\n tmp_dict[sub_k] = cls.make_tensor_from_batch(\n [item[attr][sub_k].to(device) for item in batch],\n device,\n concatenate,\n )\n result.append(tmp_dict)\n used_keys.append(attr)\n elif attr in sub_attr:\n result.append(\n cls.make_tensor_from_batch(\n [item[attr] for item in batch], device, concatenate\n )\n )\n used_keys.append(attr)\n elif attr == \"*\":\n # select custom keys\n tmp_dict = {}\n for remain_k in batch[0].keys():\n if (\n remain_k not in major_attr\n and remain_k not in sub_attr\n and remain_k not in used_keys\n ):\n tmp_dict[remain_k] = cls.make_tensor_from_batch(\n [item[remain_k] for item in batch],\n device,\n concatenate and remain_k in additional_concat_attrs,\n )\n result.append(tmp_dict)\n elif attr in custom_attr:\n result.append(\n cls.make_tensor_from_batch(\n [item[attr] for item in batch],\n device,\n concatenate and attr in additional_concat_attrs,\n )\n )\n used_keys.append(attr)\n return tuple(result)\n\n @staticmethod\n def make_tensor_from_batch(\n batch: List[Union[Scalar, t.Tensor]],\n device: Union[str, t.device],\n concatenate: bool,\n ):\n \"\"\"\n Make a tensor from a batch of data.\n Will concatenate input tensors in dimension 0.\n Or create a tensor of size (batch_size, 1) for scalars.\n\n Args:\n batch: Batch data.\n device: Device to move data to\n concatenate: Whether performing concatenation.\n\n Returns:\n Original batch if batch is empty,\n or tensor depends on your data (if concatenate),\n or original batch (if not concatenate).\n \"\"\"\n if concatenate and len(batch) != 0:\n item = batch[0]\n batch_size = len(batch)\n if t.is_tensor(item):\n batch = [it.to(device) for it in batch]\n return t.cat(batch, dim=0).to(device)\n else:\n try:\n return t.tensor(batch, device=device).view(batch_size, -1)\n except Exception:\n raise ValueError(f\"Batch not concatenable: {batch}\")\n else:\n return batch\n\n def __reduce__(self):\n # for pickling\n return self.__class__, (self.buffer_size, self.buffer_device)\n", "from machin.model.nets.base import static_module_wrapper as smw\nfrom machin.frame.algorithms.maddpg import MADDPG\nfrom machin.utils.learning_rate import gen_learning_rate_func\nfrom machin.utils.logging import default_logger as logger\nfrom machin.utils.helper_classes import Counter\nfrom machin.utils.conf import Config\nfrom machin.env.utils.openai_gym import disable_view_window\nfrom torch.optim.lr_scheduler import LambdaLR\nfrom copy import deepcopy\n\nimport pytest\nimport torch as t\nimport torch.nn as nn\n\nfrom .utils import Smooth\n\nfrom test.util_create_ma_env import create_env\nfrom test.util_fixtures import *\n\n\nclass Actor(nn.Module):\n def __init__(self, state_dim, action_dim, action_range):\n super().__init__()\n\n self.fc1 = nn.Linear(state_dim, 16)\n self.fc2 = nn.Linear(16, 16)\n self.fc3 = nn.Linear(16, action_dim)\n self.action_range = action_range\n\n def forward(self, state):\n a = t.relu(self.fc1(state))\n a = t.relu(self.fc2(a))\n a = t.tanh(self.fc3(a)) * self.action_range\n return a\n\n\nclass ActorDiscrete(nn.Module):\n def __init__(self, state_dim, action_dim):\n super().__init__()\n\n self.fc1 = nn.Linear(state_dim, 16)\n self.fc2 = nn.Linear(16, 16)\n self.fc3 = nn.Linear(16, action_dim)\n\n def forward(self, state):\n a = t.relu(self.fc1(state))\n a = t.relu(self.fc2(a))\n a = t.softmax(self.fc3(a), dim=1)\n return a\n\n\nclass Critic(nn.Module):\n def __init__(self, state_dim, action_dim):\n # This critic implementation is shared by the prey(DDPG) and\n # predators(MADDPG)\n # Note: For MADDPG\n # state_dim is the dimension of all states from all agents.\n # action_dim is the dimension of all actions from all agents.\n super().__init__()\n\n self.fc1 = nn.Linear(state_dim + action_dim, 16)\n self.fc2 = nn.Linear(16, 16)\n self.fc3 = nn.Linear(16, 1)\n\n def forward(self, state, action):\n state_action = t.cat([state, action], 1)\n q = t.relu(self.fc1(state_action))\n q = t.relu(self.fc2(q))\n q = self.fc3(q)\n return q\n\n\nclass TestMADDPG:\n # configs and definitions\n @pytest.fixture(scope=\"class\")\n def train_config(self):\n disable_view_window()\n c = Config()\n # the cooperative environment environment provided in\n # https://github.com/openai/multiagent-particle-envs\n c.env_name = \"simple_spread\"\n c.env = create_env(c.env_name)\n c.env.discrete_action_input = True\n c.agent_num = 3\n c.action_num = c.env.action_space[0].n\n c.observe_dim = c.env.observation_space[0].shape[0]\n # for contiguous tests\n c.test_action_dim = 5\n c.test_action_range = 1\n c.test_observe_dim = 5\n c.test_agent_num = 3\n c.max_episodes = 1000\n c.max_steps = 200\n c.replay_size = 100000\n # from https://github.com/wsjeon/maddpg-rllib/tree/master/plots\n # PROBLEM: I have no idea how they calculate the rewards\n # I cannot replicate their reward curve\n c.solved_reward = -15\n c.solved_repeat = 5\n return c\n\n @pytest.fixture(scope=\"function\")\n def maddpg(self, train_config, device, dtype):\n c = train_config\n # for simplicity, prey will be trained with predators,\n # Predator can get the observation of prey, same for prey.\n actor = smw(\n ActorDiscrete(c.observe_dim, c.action_num).type(dtype).to(device),\n device,\n device,\n )\n critic = smw(\n Critic(c.observe_dim * c.agent_num, c.action_num * c.agent_num)\n .type(dtype)\n .to(device),\n device,\n device,\n )\n # set visible indexes to [[0], [1], [2]] is equivalent to using DDPG\n maddpg = MADDPG(\n [deepcopy(actor) for _ in range(c.test_agent_num)],\n [deepcopy(actor) for _ in range(c.test_agent_num)],\n [deepcopy(critic) for _ in range(c.test_agent_num)],\n [deepcopy(critic) for _ in range(c.test_agent_num)],\n t.optim.Adam,\n nn.MSELoss(reduction=\"sum\"),\n replay_device=\"cpu\",\n replay_size=c.replay_size,\n pool_type=\"thread\",\n )\n return maddpg\n\n @pytest.fixture(scope=\"function\")\n def maddpg_disc(self, train_config, device, dtype):\n c = train_config\n actor = smw(\n ActorDiscrete(c.test_observe_dim, c.test_action_dim).type(dtype).to(device),\n device,\n device,\n )\n critic = smw(\n Critic(\n c.test_observe_dim * c.test_agent_num,\n c.test_action_dim * c.test_agent_num,\n )\n .type(dtype)\n .to(device),\n device,\n device,\n )\n\n maddpg = MADDPG(\n [deepcopy(actor) for _ in range(c.test_agent_num)],\n [deepcopy(actor) for _ in range(c.test_agent_num)],\n [deepcopy(critic) for _ in range(c.test_agent_num)],\n [deepcopy(critic) for _ in range(c.test_agent_num)],\n t.optim.Adam,\n nn.MSELoss(reduction=\"sum\"),\n replay_device=\"cpu\",\n replay_size=c.replay_size,\n )\n return maddpg\n\n @pytest.fixture(scope=\"function\")\n def maddpg_cont(self, train_config, device, dtype):\n c = train_config\n actor = smw(\n Actor(c.test_observe_dim, c.test_action_dim, c.test_action_range)\n .type(dtype)\n .to(device),\n device,\n device,\n )\n critic = smw(\n Critic(\n c.test_observe_dim * c.test_agent_num,\n c.test_action_dim * c.test_agent_num,\n )\n .type(dtype)\n .to(device),\n device,\n device,\n )\n\n maddpg = MADDPG(\n [deepcopy(actor) for _ in range(c.test_agent_num)],\n [deepcopy(actor) for _ in range(c.test_agent_num)],\n [deepcopy(critic) for _ in range(c.test_agent_num)],\n [deepcopy(critic) for _ in range(c.test_agent_num)],\n t.optim.Adam,\n nn.MSELoss(reduction=\"sum\"),\n replay_device=\"cpu\",\n replay_size=c.replay_size,\n )\n return maddpg\n\n @pytest.fixture(scope=\"function\")\n def maddpg_vis(self, train_config, device, dtype, tmpdir):\n c = train_config\n tmp_dir = tmpdir.make_numbered_dir()\n actor = smw(\n Actor(c.test_observe_dim, c.test_action_dim, c.test_action_range)\n .type(dtype)\n .to(device),\n device,\n device,\n )\n critic = smw(\n Critic(\n c.test_observe_dim * c.test_agent_num,\n c.test_action_dim * c.test_agent_num,\n )\n .type(dtype)\n .to(device),\n device,\n device,\n )\n\n maddpg = MADDPG(\n [deepcopy(actor) for _ in range(c.test_agent_num)],\n [deepcopy(actor) for _ in range(c.test_agent_num)],\n [deepcopy(critic) for _ in range(c.test_agent_num)],\n [deepcopy(critic) for _ in range(c.test_agent_num)],\n t.optim.Adam,\n nn.MSELoss(reduction=\"sum\"),\n replay_device=\"cpu\",\n replay_size=c.replay_size,\n visualize=True,\n visualize_dir=str(tmp_dir),\n )\n return maddpg\n\n @pytest.fixture(scope=\"function\")\n def maddpg_lr(self, train_config, device, dtype):\n c = train_config\n actor = smw(\n Actor(c.test_observe_dim, c.test_action_dim, c.test_action_range)\n .type(dtype)\n .to(device),\n device,\n device,\n )\n critic = smw(\n Critic(\n c.test_observe_dim * c.test_agent_num,\n c.test_action_dim * c.test_agent_num,\n )\n .type(dtype)\n .to(device),\n device,\n device,\n )\n lr_func = gen_learning_rate_func([(0, 1e-3), (200000, 3e-4)], logger=logger)\n with pytest.raises(TypeError, match=\"missing .+ positional argument\"):\n _ = MADDPG(\n [deepcopy(actor) for _ in range(c.test_agent_num)],\n [deepcopy(actor) for _ in range(c.test_agent_num)],\n [deepcopy(critic) for _ in range(c.test_agent_num)],\n [deepcopy(critic) for _ in range(c.test_agent_num)],\n t.optim.Adam,\n nn.MSELoss(reduction=\"sum\"),\n replay_device=\"cpu\",\n replay_size=c.replay_size,\n lr_scheduler=LambdaLR,\n )\n maddpg = MADDPG(\n [deepcopy(actor) for _ in range(c.test_agent_num)],\n [deepcopy(actor) for _ in range(c.test_agent_num)],\n [deepcopy(critic) for _ in range(c.test_agent_num)],\n [deepcopy(critic) for _ in range(c.test_agent_num)],\n t.optim.Adam,\n nn.MSELoss(reduction=\"sum\"),\n replay_device=\"cpu\",\n replay_size=c.replay_size,\n lr_scheduler=LambdaLR,\n lr_scheduler_args=(\n [(lr_func,)] * c.test_agent_num,\n [(lr_func,)] * c.test_agent_num,\n ),\n )\n return maddpg\n\n @pytest.fixture(scope=\"function\")\n def maddpg_train(self, train_config):\n c = train_config\n # for simplicity, prey will be trained with predators,\n # Predator can get the observation of prey, same for prey.\n actor = smw(ActorDiscrete(c.observe_dim, c.action_num), \"cpu\", \"cpu\")\n critic = smw(\n Critic(c.observe_dim * c.agent_num, c.action_num * c.agent_num),\n \"cpu\",\n \"cpu\",\n )\n # set visible indexes to [[0], [1], [2]] is equivalent to using DDPG\n maddpg = MADDPG(\n [deepcopy(actor) for _ in range(c.test_agent_num)],\n [deepcopy(actor) for _ in range(c.test_agent_num)],\n [deepcopy(critic) for _ in range(c.test_agent_num)],\n [deepcopy(critic) for _ in range(c.test_agent_num)],\n t.optim.Adam,\n nn.MSELoss(reduction=\"sum\"),\n replay_device=\"cpu\",\n replay_size=c.replay_size,\n pool_type=\"thread\",\n )\n return maddpg\n\n ########################################################################\n # Test for MADDPG contiguous domain acting\n ########################################################################\n def test_contiguous_act(self, train_config, maddpg_cont, dtype):\n c = train_config\n states = [\n {\"state\": t.zeros([1, c.test_observe_dim], dtype=dtype)}\n ] * c.test_agent_num\n maddpg_cont.act(states)\n maddpg_cont.act(states, use_target=True)\n maddpg_cont.act_with_noise(states, noise_param=(0, 1.0), mode=\"uniform\")\n maddpg_cont.act_with_noise(states, noise_param=(0, 1.0), mode=\"normal\")\n maddpg_cont.act_with_noise(\n states, noise_param=(0, 1.0, -1.0, 1.0), mode=\"clipped_normal\"\n )\n maddpg_cont.act_with_noise(states, noise_param={\"mu\": 0, \"sigma\": 1}, mode=\"ou\")\n with pytest.raises(ValueError, match=\"Unknown noise type\"):\n maddpg_cont.act_with_noise(\n states, noise_param=None, mode=\"some_unknown_noise\"\n )\n\n ########################################################################\n # Test for MADDPG discrete domain acting\n ########################################################################\n def test_discrete_act(self, train_config, maddpg_disc, dtype):\n c = train_config\n states = [\n {\"state\": t.zeros([1, c.test_observe_dim], dtype=dtype)}\n ] * c.test_agent_num\n maddpg_disc.act_discrete(states)\n maddpg_disc.act_discrete(states, use_target=True)\n maddpg_disc.act_discrete_with_noise(states)\n maddpg_disc.act_discrete_with_noise(states, use_target=True)\n\n ########################################################################\n # Test for MADDPG criticizing\n ########################################################################\n def test__criticize(self, train_config, maddpg_cont, dtype):\n c = train_config\n states = [\n {\"state\": t.zeros([1, c.test_observe_dim], dtype=dtype)}\n ] * c.test_agent_num\n actions = [\n {\"action\": t.zeros([1, c.test_action_dim], dtype=dtype)}\n ] * c.test_agent_num\n maddpg_cont._criticize(states, actions, 0)\n maddpg_cont._criticize(states, actions, 1, use_target=True)\n\n ########################################################################\n # Test for MADDPG storage\n ########################################################################\n def test_store(self, train_config, maddpg_cont, dtype):\n c = train_config\n old_state = state = t.zeros([1, c.test_observe_dim], dtype=dtype)\n action = t.zeros([1, c.test_action_dim], dtype=dtype)\n maddpg_cont.store_transitions(\n [\n {\n \"state\": {\"state\": old_state},\n \"action\": {\"action\": action},\n \"next_state\": {\"state\": state},\n \"reward\": 0,\n \"terminal\": False,\n }\n ]\n * c.test_agent_num\n )\n maddpg_cont.store_episodes(\n [\n [\n {\n \"state\": {\"state\": old_state},\n \"action\": {\"action\": action},\n \"next_state\": {\"state\": state},\n \"reward\": 0,\n \"terminal\": False,\n }\n ]\n ]\n * c.test_agent_num\n )\n\n ########################################################################\n # Test for MADDPG update\n ########################################################################\n def test_update(self, train_config, maddpg_cont, dtype):\n c = train_config\n old_state = state = t.zeros([1, c.test_observe_dim], dtype=dtype)\n action = t.zeros([1, c.test_action_dim], dtype=dtype)\n maddpg_cont.store_episodes(\n [\n [\n {\n \"state\": {\"state\": old_state},\n \"action\": {\"action\": action},\n \"next_state\": {\"state\": state},\n \"reward\": 0,\n \"terminal\": False,\n }\n ]\n ]\n * c.test_agent_num\n )\n maddpg_cont.update(\n update_value=True,\n update_policy=True,\n update_target=True,\n concatenate_samples=True,\n )\n\n def test_vis_update(self, train_config, maddpg_vis, dtype):\n c = train_config\n old_state = state = t.zeros([1, c.test_observe_dim], dtype=dtype)\n action = t.zeros([1, c.test_action_dim], dtype=dtype)\n maddpg_vis.store_episodes(\n [\n [\n {\n \"state\": {\"state\": old_state},\n \"action\": {\"action\": action},\n \"next_state\": {\"state\": state},\n \"reward\": 0,\n \"terminal\": False,\n }\n ]\n ]\n * c.test_agent_num\n )\n maddpg_vis.update(\n update_value=True,\n update_policy=True,\n update_target=True,\n concatenate_samples=True,\n )\n\n ########################################################################\n # Test for MADDPG save & load\n ########################################################################\n def test_save_load(self, train_config, maddpg_cont, tmpdir):\n save_dir = tmpdir.make_numbered_dir()\n maddpg_cont.save(\n model_dir=str(save_dir),\n network_map={\"critic_target\": \"critic_t\", \"actor_target\": \"actor_t\"},\n version=1000,\n )\n maddpg_cont.load(\n model_dir=str(save_dir),\n network_map={\"critic_target\": \"critic_t\", \"actor_target\": \"actor_t\"},\n version=1000,\n )\n\n ########################################################################\n # Test for MADDPG lr_scheduler\n ########################################################################\n def test_lr_scheduler(self, train_config, maddpg_lr):\n maddpg_lr.update_lr_scheduler()\n\n ########################################################################\n # Test for MADDPG config & init\n ########################################################################\n def test_config_init(self, train_config):\n c = train_config\n config = MADDPG.generate_config({})\n config[\"frame_config\"][\"models\"] = [\n [\"Actor\"] * c.test_agent_num,\n [\"Actor\"] * c.test_agent_num,\n [\"Critic\"] * c.test_agent_num,\n [\"Critic\"] * c.test_agent_num,\n ]\n config[\"frame_config\"][\"model_args\"] = [[()] * c.test_agent_num] * 4\n config[\"frame_config\"][\"model_kwargs\"] = (\n [\n [\n {\n \"state_dim\": c.test_observe_dim,\n \"action_dim\": c.test_action_dim,\n \"action_range\": c.test_action_range,\n }\n ]\n * c.test_agent_num\n ]\n * 2\n + [\n [\n {\n \"state_dim\": c.test_observe_dim * c.test_agent_num,\n \"action_dim\": c.test_action_dim * c.test_agent_num,\n }\n ]\n * c.test_agent_num\n ]\n * 2\n )\n\n maddpg = MADDPG.init_from_config(config)\n\n old_state = state = t.zeros([1, c.test_observe_dim], dtype=t.float32)\n action = t.zeros([1, c.test_action_dim], dtype=t.float32)\n maddpg.store_episodes(\n [\n [\n {\n \"state\": {\"state\": old_state},\n \"action\": {\"action\": action},\n \"next_state\": {\"state\": state},\n \"reward\": 0,\n \"terminal\": False,\n }\n ]\n ]\n * c.test_agent_num\n )\n maddpg.update()\n\n ########################################################################\n # Test for MADDPG full training.\n ########################################################################\n def test_full_train(self, train_config, maddpg_train):\n c = train_config\n\n # begin training\n episode, step = Counter(), Counter()\n\n # first for prey, second for pred\n smoother = Smooth()\n reward_fulfilled = Counter()\n terminal = False\n\n env = c.env\n while episode < c.max_episodes:\n episode.count()\n\n # batch size = 1\n total_reward = 0\n states = [t.tensor(st, dtype=t.float32) for st in env.reset()]\n\n while not terminal and step <= c.max_steps:\n step.count()\n with t.no_grad():\n old_states = states\n\n # agent model inference\n results = maddpg_train.act_discrete_with_noise(\n [{\"state\": st.unsqueeze(0)} for st in states]\n )\n actions = [int(r[0]) for r in results]\n action_probs = [r[1] for r in results]\n\n states, rewards, terminals, _ = env.step(actions)\n states = [t.tensor(st, dtype=t.float32) for st in states]\n\n total_reward += float(sum(rewards)) / c.agent_num\n\n maddpg_train.store_transitions(\n [\n {\n \"state\": {\"state\": ost.unsqueeze(0)},\n \"action\": {\"action\": act},\n \"next_state\": {\"state\": st.unsqueeze(0)},\n \"reward\": float(rew),\n \"terminal\": term or step == c.max_steps,\n }\n for ost, act, st, rew, term in zip(\n old_states, action_probs, states, rewards, terminals\n )\n ]\n )\n\n # update\n if episode > 5:\n for i in range(step.get()):\n maddpg_train.update()\n\n # total reward is divided by steps here, since:\n # \"Agents are rewarded based on minimum agent distance\n # to each landmark, penalized for collisions\"\n smoother.update(total_reward / step.get())\n logger.info(f\"Episode {episode} total steps={step}\")\n step.reset()\n terminal = False\n\n logger.info(f\"Episode {episode} total reward={smoother.value:.2f}\")\n\n if smoother.value > c.solved_reward and episode > 20:\n reward_fulfilled.count()\n if reward_fulfilled >= c.solved_repeat:\n logger.info(\"Environment solved!\")\n return\n else:\n reward_fulfilled.reset()\n\n pytest.fail(\"MADDPG Training failed.\")\n" ]
[ [ "torch.tensor", "torch.is_tensor", "torch.cat" ], [ "torch.zeros", "torch.cat", "torch.tensor", "torch.nn.Linear", "torch.no_grad", "torch.nn.MSELoss" ] ]
akshit-protonn/models
[ "38c8c6fe4144c93d6aadd19981c2b90570c29eba", "38c8c6fe4144c93d6aadd19981c2b90570c29eba", "38c8c6fe4144c93d6aadd19981c2b90570c29eba", "38c8c6fe4144c93d6aadd19981c2b90570c29eba", "38c8c6fe4144c93d6aadd19981c2b90570c29eba", "38c8c6fe4144c93d6aadd19981c2b90570c29eba", "38c8c6fe4144c93d6aadd19981c2b90570c29eba", "38c8c6fe4144c93d6aadd19981c2b90570c29eba", "38c8c6fe4144c93d6aadd19981c2b90570c29eba", "38c8c6fe4144c93d6aadd19981c2b90570c29eba", "38c8c6fe4144c93d6aadd19981c2b90570c29eba", "38c8c6fe4144c93d6aadd19981c2b90570c29eba", "38c8c6fe4144c93d6aadd19981c2b90570c29eba", "38c8c6fe4144c93d6aadd19981c2b90570c29eba", "38c8c6fe4144c93d6aadd19981c2b90570c29eba", "38c8c6fe4144c93d6aadd19981c2b90570c29eba", "38c8c6fe4144c93d6aadd19981c2b90570c29eba", "38c8c6fe4144c93d6aadd19981c2b90570c29eba", "38c8c6fe4144c93d6aadd19981c2b90570c29eba" ]
[ "official/nlp/modeling/networks/encoder_scaffold_test.py", "research/delf/delf/python/detect_to_retrieve/cluster_delf_features.py", "official/vision/beta/projects/yt8m/modeling/yt8m_model_test.py", "official/vision/beta/modeling/factory_test.py", "research/object_detection/utils/variables_helper.py", "official/nlp/data/create_pretraining_data_test.py", "official/nlp/modeling/networks/classification.py", "official/nlp/nhnet/optimizer.py", "official/vision/image_classification/callbacks.py", "official/nlp/bert/tf1_checkpoint_converter_lib.py", "official/vision/beta/evaluation/coco_evaluator.py", "official/vision/beta/modeling/backbones/efficientnet.py", "official/vision/beta/projects/movinet/export_saved_model.py", "official/vision/beta/serving/export_base.py", "official/nlp/continuous_finetune_lib_test.py", "official/nlp/keras_nlp/layers/self_attention_mask.py", "official/nlp/modeling/layers/transformer.py", "official/nlp/modeling/layers/mat_mul_with_margin_test.py", "research/object_detection/utils/dataset_util.py" ]
[ "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for EncoderScaffold network.\"\"\"\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import\nfrom official.modeling import activations\nfrom official.nlp.modeling import layers\nfrom official.nlp.modeling.networks import encoder_scaffold\n\n\n# Test class that wraps a standard transformer layer. If this layer is called\n# at any point, the list passed to the config object will be filled with a\n# boolean 'True'. We register this class as a Keras serializable so we can\n# test serialization below.\[email protected]_keras_serializable(package=\"TestOnly\")\nclass ValidatedTransformerLayer(layers.Transformer):\n\n def __init__(self, call_list, call_class=None, **kwargs):\n super(ValidatedTransformerLayer, self).__init__(**kwargs)\n self.list = call_list\n self.call_class = call_class\n\n def call(self, inputs):\n self.list.append(True)\n return super(ValidatedTransformerLayer, self).call(inputs)\n\n def get_config(self):\n config = super(ValidatedTransformerLayer, self).get_config()\n config[\"call_list\"] = self.list\n config[\"call_class\"] = tf.keras.utils.get_registered_name(self.call_class)\n return config\n\n\[email protected]_keras_serializable(package=\"TestLayerOnly\")\nclass TestLayer(tf.keras.layers.Layer):\n pass\n\n\n# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It\n# guarantees forward compatibility of this code for the V2 switchover.\n@keras_parameterized.run_all_keras_modes\nclass EncoderScaffoldLayerClassTest(keras_parameterized.TestCase):\n\n def tearDown(self):\n super(EncoderScaffoldLayerClassTest, self).tearDown()\n tf.keras.mixed_precision.set_global_policy(\"float32\")\n\n @parameterized.named_parameters(\n dict(testcase_name=\"only_final_output\", return_all_layer_outputs=False),\n dict(testcase_name=\"all_layer_outputs\", return_all_layer_outputs=True))\n def test_network_creation(self, return_all_layer_outputs):\n hidden_size = 32\n sequence_length = 21\n num_hidden_instances = 3\n embedding_cfg = {\n \"vocab_size\": 100,\n \"type_vocab_size\": 16,\n \"hidden_size\": hidden_size,\n \"seq_length\": sequence_length,\n \"max_seq_length\": sequence_length,\n \"initializer\": tf.keras.initializers.TruncatedNormal(stddev=0.02),\n \"dropout_rate\": 0.1,\n }\n\n call_list = []\n hidden_cfg = {\n \"num_attention_heads\":\n 2,\n \"intermediate_size\":\n 3072,\n \"intermediate_activation\":\n activations.gelu,\n \"dropout_rate\":\n 0.1,\n \"attention_dropout_rate\":\n 0.1,\n \"kernel_initializer\":\n tf.keras.initializers.TruncatedNormal(stddev=0.02),\n \"call_list\":\n call_list\n }\n # Create a small EncoderScaffold for testing.\n test_network = encoder_scaffold.EncoderScaffold(\n num_hidden_instances=num_hidden_instances,\n pooled_output_dim=hidden_size,\n pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(\n stddev=0.02),\n hidden_cls=ValidatedTransformerLayer,\n hidden_cfg=hidden_cfg,\n embedding_cfg=embedding_cfg,\n layer_norm_before_pooling=True,\n return_all_layer_outputs=return_all_layer_outputs)\n # Create the inputs (note that the first dimension is implicit).\n word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n output_data, pooled = test_network([word_ids, mask, type_ids])\n\n if return_all_layer_outputs:\n self.assertIsInstance(output_data, list)\n self.assertLen(output_data, num_hidden_instances)\n data = output_data[-1]\n else:\n data = output_data\n self.assertIsInstance(test_network.hidden_layers, list)\n self.assertLen(test_network.hidden_layers, num_hidden_instances)\n self.assertIsInstance(test_network.pooler_layer, tf.keras.layers.Dense)\n\n expected_data_shape = [None, sequence_length, hidden_size]\n expected_pooled_shape = [None, hidden_size]\n self.assertAllEqual(expected_data_shape, data.shape.as_list())\n self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())\n\n # The default output dtype is float32.\n self.assertAllEqual(tf.float32, data.dtype)\n self.assertAllEqual(tf.float32, pooled.dtype)\n\n # If call_list[0] exists and is True, the passed layer class was\n # instantiated from the given config properly.\n self.assertNotEmpty(call_list)\n self.assertTrue(call_list[0], \"The passed layer class wasn't instantiated.\")\n\n self.assertTrue(hasattr(test_network, \"_output_layer_norm\"))\n\n def test_network_creation_with_float16_dtype(self):\n tf.keras.mixed_precision.set_global_policy(\"mixed_float16\")\n hidden_size = 32\n sequence_length = 21\n embedding_cfg = {\n \"vocab_size\": 100,\n \"type_vocab_size\": 16,\n \"hidden_size\": hidden_size,\n \"seq_length\": sequence_length,\n \"max_seq_length\": sequence_length,\n \"initializer\": tf.keras.initializers.TruncatedNormal(stddev=0.02),\n \"dropout_rate\": 0.1,\n }\n hidden_cfg = {\n \"num_attention_heads\":\n 2,\n \"intermediate_size\":\n 3072,\n \"intermediate_activation\":\n activations.gelu,\n \"dropout_rate\":\n 0.1,\n \"attention_dropout_rate\":\n 0.1,\n \"kernel_initializer\":\n tf.keras.initializers.TruncatedNormal(stddev=0.02),\n }\n # Create a small EncoderScaffold for testing.\n test_network = encoder_scaffold.EncoderScaffold(\n num_hidden_instances=3,\n pooled_output_dim=hidden_size,\n pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(\n stddev=0.02),\n hidden_cfg=hidden_cfg,\n embedding_cfg=embedding_cfg)\n # Create the inputs (note that the first dimension is implicit).\n word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n data, pooled = test_network([word_ids, mask, type_ids])\n\n expected_data_shape = [None, sequence_length, hidden_size]\n expected_pooled_shape = [None, hidden_size]\n self.assertAllEqual(expected_data_shape, data.shape.as_list())\n self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())\n\n # If float_dtype is set to float16, the data output is float32 (from a layer\n # norm) and pool output should be float16.\n self.assertAllEqual(tf.float32, data.dtype)\n self.assertAllEqual(tf.float16, pooled.dtype)\n\n def test_network_invocation(self):\n hidden_size = 32\n sequence_length = 21\n vocab_size = 57\n num_types = 7\n embedding_cfg = {\n \"vocab_size\": vocab_size,\n \"type_vocab_size\": num_types,\n \"hidden_size\": hidden_size,\n \"seq_length\": sequence_length,\n \"max_seq_length\": sequence_length,\n \"initializer\": tf.keras.initializers.TruncatedNormal(stddev=0.02),\n \"dropout_rate\": 0.1,\n }\n hidden_cfg = {\n \"num_attention_heads\":\n 2,\n \"intermediate_size\":\n 3072,\n \"intermediate_activation\":\n activations.gelu,\n \"dropout_rate\":\n 0.1,\n \"attention_dropout_rate\":\n 0.1,\n \"kernel_initializer\":\n tf.keras.initializers.TruncatedNormal(stddev=0.02),\n }\n # Create a small EncoderScaffold for testing.\n test_network = encoder_scaffold.EncoderScaffold(\n num_hidden_instances=3,\n pooled_output_dim=hidden_size,\n pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(\n stddev=0.02),\n hidden_cfg=hidden_cfg,\n embedding_cfg=embedding_cfg,\n dict_outputs=True)\n\n # Create the inputs (note that the first dimension is implicit).\n word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n outputs = test_network([word_ids, mask, type_ids])\n\n # Create a model based off of this network:\n model = tf.keras.Model([word_ids, mask, type_ids], outputs)\n\n # Invoke the model. We can't validate the output data here (the model is too\n # complex) but this will catch structural runtime errors.\n batch_size = 3\n word_id_data = np.random.randint(\n vocab_size, size=(batch_size, sequence_length))\n mask_data = np.random.randint(2, size=(batch_size, sequence_length))\n type_id_data = np.random.randint(\n num_types, size=(batch_size, sequence_length))\n preds = model.predict([word_id_data, mask_data, type_id_data])\n self.assertEqual(preds[\"pooled_output\"].shape, (3, hidden_size))\n\n # Creates a EncoderScaffold with max_sequence_length != sequence_length\n num_types = 7\n embedding_cfg = {\n \"vocab_size\": vocab_size,\n \"type_vocab_size\": num_types,\n \"hidden_size\": hidden_size,\n \"seq_length\": sequence_length,\n \"max_seq_length\": sequence_length * 2,\n \"initializer\": tf.keras.initializers.TruncatedNormal(stddev=0.02),\n \"dropout_rate\": 0.1,\n }\n hidden_cfg = {\n \"num_attention_heads\":\n 2,\n \"intermediate_size\":\n 3072,\n \"intermediate_activation\":\n activations.gelu,\n \"dropout_rate\":\n 0.1,\n \"attention_dropout_rate\":\n 0.1,\n \"kernel_initializer\":\n tf.keras.initializers.TruncatedNormal(stddev=0.02),\n }\n # Create a small EncoderScaffold for testing.\n test_network = encoder_scaffold.EncoderScaffold(\n num_hidden_instances=3,\n pooled_output_dim=hidden_size,\n pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(\n stddev=0.02),\n hidden_cfg=hidden_cfg,\n embedding_cfg=embedding_cfg)\n outputs = test_network([word_ids, mask, type_ids])\n model = tf.keras.Model([word_ids, mask, type_ids], outputs)\n _ = model.predict([word_id_data, mask_data, type_id_data])\n\n def test_serialize_deserialize(self):\n # Create a network object that sets all of its config options.\n hidden_size = 32\n sequence_length = 21\n embedding_cfg = {\n \"vocab_size\": 100,\n \"type_vocab_size\": 16,\n \"hidden_size\": hidden_size,\n \"seq_length\": sequence_length,\n \"max_seq_length\": sequence_length,\n \"initializer\": tf.keras.initializers.TruncatedNormal(stddev=0.02),\n \"dropout_rate\": 0.1,\n }\n hidden_cfg = {\n \"num_attention_heads\":\n 2,\n \"intermediate_size\":\n 3072,\n \"intermediate_activation\":\n activations.gelu,\n \"dropout_rate\":\n 0.1,\n \"attention_dropout_rate\":\n 0.1,\n \"kernel_initializer\":\n tf.keras.initializers.TruncatedNormal(stddev=0.02),\n }\n # Create a small EncoderScaffold for testing.\n network = encoder_scaffold.EncoderScaffold(\n num_hidden_instances=3,\n pooled_output_dim=hidden_size,\n pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(\n stddev=0.02),\n hidden_cfg=hidden_cfg,\n embedding_cfg=embedding_cfg)\n\n # Create another network object from the first object's config.\n new_network = encoder_scaffold.EncoderScaffold.from_config(\n network.get_config())\n\n # Validate that the config can be forced to JSON.\n _ = new_network.to_json()\n\n # If the serialization was successful, the new config should match the old.\n self.assertAllEqual(network.get_config(), new_network.get_config())\n\n\nclass Embeddings(tf.keras.Model):\n\n def __init__(self, vocab_size, hidden_size):\n super().__init__()\n self.inputs = [\n tf.keras.layers.Input(\n shape=(None,), dtype=tf.int32, name=\"input_word_ids\"),\n tf.keras.layers.Input(shape=(None,), dtype=tf.int32, name=\"input_mask\")\n ]\n self.attention_mask = layers.SelfAttentionMask()\n self.embedding_layer = layers.OnDeviceEmbedding(\n vocab_size=vocab_size,\n embedding_width=hidden_size,\n initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),\n name=\"word_embeddings\")\n\n def call(self, inputs):\n word_ids, mask = inputs\n word_embeddings = self.embedding_layer(word_ids)\n return word_embeddings, self.attention_mask([word_embeddings, mask])\n\n\n@keras_parameterized.run_all_keras_modes\nclass EncoderScaffoldEmbeddingNetworkTest(keras_parameterized.TestCase):\n\n def test_network_invocation(self):\n hidden_size = 32\n sequence_length = 21\n vocab_size = 57\n\n # Build an embedding network to swap in for the default network. This one\n # will have 2 inputs (mask and word_ids) instead of 3, and won't use\n # positional embeddings.\n network = Embeddings(vocab_size, hidden_size)\n\n hidden_cfg = {\n \"num_attention_heads\":\n 2,\n \"intermediate_size\":\n 3072,\n \"intermediate_activation\":\n activations.gelu,\n \"dropout_rate\":\n 0.1,\n \"attention_dropout_rate\":\n 0.1,\n \"kernel_initializer\":\n tf.keras.initializers.TruncatedNormal(stddev=0.02),\n }\n\n # Create a small EncoderScaffold for testing.\n test_network = encoder_scaffold.EncoderScaffold(\n num_hidden_instances=3,\n pooled_output_dim=hidden_size,\n pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(\n stddev=0.02),\n hidden_cfg=hidden_cfg,\n embedding_cls=network)\n\n # Create the inputs (note that the first dimension is implicit).\n word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n data, pooled = test_network([word_ids, mask])\n\n # Create a model based off of this network:\n model = tf.keras.Model([word_ids, mask], [data, pooled])\n\n # Invoke the model. We can't validate the output data here (the model is too\n # complex) but this will catch structural runtime errors.\n batch_size = 3\n word_id_data = np.random.randint(\n vocab_size, size=(batch_size, sequence_length))\n mask_data = np.random.randint(2, size=(batch_size, sequence_length))\n _ = model.predict([word_id_data, mask_data])\n\n def test_serialize_deserialize(self):\n hidden_size = 32\n sequence_length = 21\n vocab_size = 57\n\n # Build an embedding network to swap in for the default network. This one\n # will have 2 inputs (mask and word_ids) instead of 3, and won't use\n # positional embeddings.\n\n word_ids = tf.keras.layers.Input(\n shape=(sequence_length,), dtype=tf.int32, name=\"input_word_ids\")\n mask = tf.keras.layers.Input(\n shape=(sequence_length,), dtype=tf.int32, name=\"input_mask\")\n embedding_layer = layers.OnDeviceEmbedding(\n vocab_size=vocab_size,\n embedding_width=hidden_size,\n initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),\n name=\"word_embeddings\")\n word_embeddings = embedding_layer(word_ids)\n attention_mask = layers.SelfAttentionMask()([word_embeddings, mask])\n network = tf.keras.Model([word_ids, mask],\n [word_embeddings, attention_mask])\n\n hidden_cfg = {\n \"num_attention_heads\":\n 2,\n \"intermediate_size\":\n 3072,\n \"intermediate_activation\":\n activations.gelu,\n \"dropout_rate\":\n 0.1,\n \"attention_dropout_rate\":\n 0.1,\n \"kernel_initializer\":\n tf.keras.initializers.TruncatedNormal(stddev=0.02),\n }\n\n # Create a small EncoderScaffold for testing.\n test_network = encoder_scaffold.EncoderScaffold(\n num_hidden_instances=3,\n pooled_output_dim=hidden_size,\n pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(\n stddev=0.02),\n hidden_cfg=hidden_cfg,\n embedding_cls=network,\n embedding_data=embedding_layer.embeddings)\n\n # Create another network object from the first object's config.\n new_network = encoder_scaffold.EncoderScaffold.from_config(\n test_network.get_config())\n\n # Validate that the config can be forced to JSON.\n _ = new_network.to_json()\n\n # If the serialization was successful, the new config should match the old.\n self.assertAllEqual(test_network.get_config(), new_network.get_config())\n\n # Create a model based off of the old and new networks:\n word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n\n data, pooled = new_network([word_ids, mask])\n new_model = tf.keras.Model([word_ids, mask], [data, pooled])\n\n data, pooled = test_network([word_ids, mask])\n model = tf.keras.Model([word_ids, mask], [data, pooled])\n\n # Copy the weights between models.\n new_model.set_weights(model.get_weights())\n\n # Invoke the models.\n batch_size = 3\n word_id_data = np.random.randint(\n vocab_size, size=(batch_size, sequence_length))\n mask_data = np.random.randint(2, size=(batch_size, sequence_length))\n data, cls = model.predict([word_id_data, mask_data])\n new_data, new_cls = new_model.predict([word_id_data, mask_data])\n\n # The output should be equal.\n self.assertAllEqual(data, new_data)\n self.assertAllEqual(cls, new_cls)\n\n # We should not be able to get a reference to the embedding data.\n with self.assertRaisesRegex(RuntimeError, \".*does not have a reference.*\"):\n new_network.get_embedding_table()\n\n\n@keras_parameterized.run_all_keras_modes\nclass EncoderScaffoldHiddenInstanceTest(keras_parameterized.TestCase):\n\n def test_network_invocation(self):\n hidden_size = 32\n sequence_length = 21\n vocab_size = 57\n num_types = 7\n\n embedding_cfg = {\n \"vocab_size\": vocab_size,\n \"type_vocab_size\": num_types,\n \"hidden_size\": hidden_size,\n \"seq_length\": sequence_length,\n \"max_seq_length\": sequence_length,\n \"initializer\": tf.keras.initializers.TruncatedNormal(stddev=0.02),\n \"dropout_rate\": 0.1,\n }\n\n call_list = []\n hidden_cfg = {\n \"num_attention_heads\":\n 2,\n \"intermediate_size\":\n 3072,\n \"intermediate_activation\":\n activations.gelu,\n \"dropout_rate\":\n 0.1,\n \"attention_dropout_rate\":\n 0.1,\n \"kernel_initializer\":\n tf.keras.initializers.TruncatedNormal(stddev=0.02),\n \"call_list\":\n call_list\n }\n # Create a small EncoderScaffold for testing. This time, we pass an already-\n # instantiated layer object.\n\n xformer = ValidatedTransformerLayer(**hidden_cfg)\n\n test_network = encoder_scaffold.EncoderScaffold(\n num_hidden_instances=3,\n pooled_output_dim=hidden_size,\n pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(\n stddev=0.02),\n hidden_cls=xformer,\n embedding_cfg=embedding_cfg)\n\n # Create the inputs (note that the first dimension is implicit).\n word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n data, pooled = test_network([word_ids, mask, type_ids])\n\n # Create a model based off of this network:\n model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])\n\n # Invoke the model. We can't validate the output data here (the model is too\n # complex) but this will catch structural runtime errors.\n batch_size = 3\n word_id_data = np.random.randint(\n vocab_size, size=(batch_size, sequence_length))\n mask_data = np.random.randint(2, size=(batch_size, sequence_length))\n type_id_data = np.random.randint(\n num_types, size=(batch_size, sequence_length))\n _ = model.predict([word_id_data, mask_data, type_id_data])\n\n # If call_list[0] exists and is True, the passed layer class was\n # called as part of the graph creation.\n self.assertNotEmpty(call_list)\n self.assertTrue(call_list[0], \"The passed layer class wasn't instantiated.\")\n\n @parameterized.parameters(True, False)\n def test_serialize_deserialize(self, use_hidden_cls_instance):\n hidden_size = 32\n sequence_length = 21\n vocab_size = 57\n num_types = 7\n\n embedding_cfg = {\n \"vocab_size\": vocab_size,\n \"type_vocab_size\": num_types,\n \"hidden_size\": hidden_size,\n \"seq_length\": sequence_length,\n \"max_seq_length\": sequence_length,\n \"initializer\": tf.keras.initializers.TruncatedNormal(stddev=0.02),\n \"dropout_rate\": 0.1,\n }\n\n call_list = []\n hidden_cfg = {\n \"num_attention_heads\":\n 2,\n \"intermediate_size\":\n 3072,\n \"intermediate_activation\":\n activations.gelu,\n \"dropout_rate\":\n 0.1,\n \"attention_dropout_rate\":\n 0.1,\n \"kernel_initializer\":\n tf.keras.initializers.TruncatedNormal(stddev=0.02),\n \"call_list\":\n call_list,\n \"call_class\":\n TestLayer\n }\n # Create a small EncoderScaffold for testing. This time, we pass an already-\n # instantiated layer object.\n kwargs = dict(\n num_hidden_instances=3,\n pooled_output_dim=hidden_size,\n pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(\n stddev=0.02),\n embedding_cfg=embedding_cfg)\n\n if use_hidden_cls_instance:\n xformer = ValidatedTransformerLayer(**hidden_cfg)\n test_network = encoder_scaffold.EncoderScaffold(\n hidden_cls=xformer, **kwargs)\n else:\n test_network = encoder_scaffold.EncoderScaffold(\n hidden_cls=ValidatedTransformerLayer, hidden_cfg=hidden_cfg, **kwargs)\n\n # Create another network object from the first object's config.\n new_network = encoder_scaffold.EncoderScaffold.from_config(\n test_network.get_config())\n\n # Validate that the config can be forced to JSON.\n _ = new_network.to_json()\n\n # If the serialization was successful, the new config should match the old.\n self.assertAllEqual(test_network.get_config(), new_network.get_config())\n\n # Create a model based off of the old and new networks:\n word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n\n data, pooled = new_network([word_ids, mask, type_ids])\n new_model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])\n\n data, pooled = test_network([word_ids, mask, type_ids])\n model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])\n\n # Copy the weights between models.\n new_model.set_weights(model.get_weights())\n\n # Invoke the models.\n batch_size = 3\n word_id_data = np.random.randint(\n vocab_size, size=(batch_size, sequence_length))\n mask_data = np.random.randint(2, size=(batch_size, sequence_length))\n type_id_data = np.random.randint(\n num_types, size=(batch_size, sequence_length))\n data, cls = model.predict([word_id_data, mask_data, type_id_data])\n new_data, new_cls = new_model.predict(\n [word_id_data, mask_data, type_id_data])\n\n # The output should be equal.\n self.assertAllEqual(data, new_data)\n self.assertAllEqual(cls, new_cls)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# Lint as: python3\n# Copyright 2019 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Clusters DELF features using the K-means algorithm.\n\nAll DELF local feature descriptors for a given dataset's index images are loaded\nas the input.\n\nNote that:\n- we only use features extracted from whole images (no features from boxes are\n used).\n- the codebook should be trained on Paris images for Oxford retrieval\n experiments, and vice-versa.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport os\nimport sys\nimport time\n\nfrom absl import app\nimport numpy as np\nimport tensorflow as tf\n\nfrom delf import feature_io\nfrom delf.python.datasets.revisited_op import dataset\n\ncmd_args = None\n\n# Extensions.\n_DELF_EXTENSION = '.delf'\n\n# Default DELF dimensionality.\n_DELF_DIM = 128\n\n# Pace to report log when collecting features.\n_STATUS_CHECK_ITERATIONS = 100\n\n\nclass _IteratorInitHook(tf.estimator.SessionRunHook):\n \"\"\"Hook to initialize data iterator after session is created.\"\"\"\n\n def __init__(self):\n super(_IteratorInitHook, self).__init__()\n self.iterator_initializer_fn = None\n\n def after_create_session(self, session, coord):\n \"\"\"Initialize the iterator after the session has been created.\"\"\"\n del coord\n self.iterator_initializer_fn(session)\n\n\ndef main(argv):\n if len(argv) > 1:\n raise RuntimeError('Too many command-line arguments.')\n\n # Process output directory.\n if tf.io.gfile.exists(cmd_args.output_cluster_dir):\n raise RuntimeError(\n 'output_cluster_dir = %s already exists. This may indicate that a '\n 'previous run already wrote checkpoints in this directory, which would '\n 'lead to incorrect training. Please re-run this script by specifying an'\n ' inexisting directory.' % cmd_args.output_cluster_dir)\n else:\n tf.io.gfile.makedirs(cmd_args.output_cluster_dir)\n\n # Read list of index images from dataset file.\n print('Reading list of index images from dataset file...')\n _, index_list, _ = dataset.ReadDatasetFile(cmd_args.dataset_file_path)\n num_images = len(index_list)\n print('done! Found %d images' % num_images)\n\n # Loop over list of index images and collect DELF features.\n features_for_clustering = []\n start = time.clock()\n print('Starting to collect features from index images...')\n for i in range(num_images):\n if i > 0 and i % _STATUS_CHECK_ITERATIONS == 0:\n elapsed = (time.clock() - start)\n print('Processing index image %d out of %d, last %d '\n 'images took %f seconds' %\n (i, num_images, _STATUS_CHECK_ITERATIONS, elapsed))\n start = time.clock()\n\n features_filename = index_list[i] + _DELF_EXTENSION\n features_fullpath = os.path.join(cmd_args.features_dir, features_filename)\n _, _, features, _, _ = feature_io.ReadFromFile(features_fullpath)\n if features.size != 0:\n assert features.shape[1] == _DELF_DIM\n for feature in features:\n features_for_clustering.append(feature)\n\n features_for_clustering = np.array(features_for_clustering, dtype=np.float32)\n print('All features were loaded! There are %d features, each with %d '\n 'dimensions' %\n (features_for_clustering.shape[0], features_for_clustering.shape[1]))\n\n # Run K-means clustering.\n def _get_input_fn():\n \"\"\"Helper function to create input function and hook for training.\n\n Returns:\n input_fn: Input function for k-means Estimator training.\n init_hook: Hook used to load data during training.\n \"\"\"\n init_hook = _IteratorInitHook()\n\n def _input_fn():\n \"\"\"Produces tf.data.Dataset object for k-means training.\n\n Returns:\n Tensor with the data for training.\n \"\"\"\n features_placeholder = tf.compat.v1.placeholder(\n tf.float32, features_for_clustering.shape)\n delf_dataset = tf.data.Dataset.from_tensor_slices((features_placeholder))\n delf_dataset = delf_dataset.shuffle(1000).batch(\n features_for_clustering.shape[0])\n iterator = tf.compat.v1.data.make_initializable_iterator(delf_dataset)\n\n def _initializer_fn(sess):\n \"\"\"Initialize dataset iterator, feed in the data.\"\"\"\n sess.run(\n iterator.initializer,\n feed_dict={features_placeholder: features_for_clustering})\n\n init_hook.iterator_initializer_fn = _initializer_fn\n return iterator.get_next()\n\n return _input_fn, init_hook\n\n input_fn, init_hook = _get_input_fn()\n\n kmeans = tf.compat.v1.estimator.experimental.KMeans(\n num_clusters=cmd_args.num_clusters,\n model_dir=cmd_args.output_cluster_dir,\n use_mini_batch=False,\n )\n\n print('Starting K-means clustering...')\n start = time.clock()\n for i in range(cmd_args.num_iterations):\n kmeans.train(input_fn, hooks=[init_hook])\n average_sum_squared_error = kmeans.evaluate(\n input_fn, hooks=[init_hook])['score'] / features_for_clustering.shape[0]\n elapsed = (time.clock() - start)\n print('K-means iteration %d (out of %d) took %f seconds, '\n 'average-sum-of-squares: %f' %\n (i, cmd_args.num_iterations, elapsed, average_sum_squared_error))\n start = time.clock()\n\n print('K-means clustering finished!')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.register('type', 'bool', lambda v: v.lower() == 'true')\n parser.add_argument(\n '--dataset_file_path',\n type=str,\n default='/tmp/gnd_roxford5k.mat',\n help=\"\"\"\n Dataset file for Revisited Oxford or Paris dataset, in .mat format. The\n list of index images loaded from this file is used to collect local\n features, which are assumed to be in <image_name>.delf file format.\n \"\"\")\n parser.add_argument(\n '--features_dir',\n type=str,\n default='/tmp/features',\n help=\"\"\"\n Directory where DELF feature files are to be found.\n \"\"\")\n parser.add_argument(\n '--num_clusters',\n type=int,\n default=1024,\n help=\"\"\"\n Number of clusters to use.\n \"\"\")\n parser.add_argument(\n '--num_iterations',\n type=int,\n default=50,\n help=\"\"\"\n Number of iterations to use.\n \"\"\")\n parser.add_argument(\n '--output_cluster_dir',\n type=str,\n default='/tmp/cluster',\n help=\"\"\"\n Directory where clustering outputs are written to. This directory should\n not exist before running this script; it will be created during\n clustering.\n \"\"\")\n cmd_args, unparsed = parser.parse_known_args()\n app.run(main=main, argv=[sys.argv[0]] + unparsed)\n", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for yt8m network.\"\"\"\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf\n\nfrom official.vision.beta.projects.yt8m.configs import yt8m as yt8m_cfg\nfrom official.vision.beta.projects.yt8m.modeling import yt8m_model\n\n\nclass YT8MNetworkTest(parameterized.TestCase, tf.test.TestCase):\n \"\"\"Class for testing yt8m network.\"\"\"\n\n # test_yt8m_network_creation arbitrary params\n @parameterized.parameters((32, 1152)) # 1152 = 1024 + 128\n def test_yt8m_network_creation(self, num_frames, feature_dims):\n \"\"\"Test for creation of a YT8M Model.\n\n Args:\n num_frames: number of frames.\n feature_dims: indicates total dimension size of the features.\n \"\"\"\n input_specs = tf.keras.layers.InputSpec(shape=[num_frames, feature_dims])\n\n num_classes = 3862\n model = yt8m_model.YT8MModel(\n input_params=yt8m_cfg.YT8MTask.model,\n num_frames=num_frames,\n num_classes=num_classes,\n input_specs=input_specs)\n\n # batch = 2 -> arbitrary value for test\n inputs = np.random.rand(2 * num_frames, feature_dims)\n logits = model(inputs)\n self.assertAllEqual([2, num_classes], logits.numpy().shape)\n\n def test_serialize_deserialize(self):\n model = yt8m_model.YT8MModel(input_params=yt8m_cfg.YT8MTask.model)\n\n config = model.get_config()\n new_model = yt8m_model.YT8MModel.from_config(config)\n\n # If the serialization was successful,\n # the new config should match the old.\n self.assertAllEqual(model.get_config(), new_model.get_config())\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Tests for factory.py.\"\"\"\n\n# Import libraries\nfrom absl.testing import parameterized\nimport tensorflow as tf\n\nfrom official.vision.beta.configs import backbones\nfrom official.vision.beta.configs import backbones_3d\nfrom official.vision.beta.configs import image_classification as classification_cfg\nfrom official.vision.beta.configs import maskrcnn as maskrcnn_cfg\nfrom official.vision.beta.configs import retinanet as retinanet_cfg\nfrom official.vision.beta.configs import video_classification as video_classification_cfg\nfrom official.vision.beta.modeling import factory\nfrom official.vision.beta.modeling import factory_3d\n\n\nclass ClassificationModelBuilderTest(parameterized.TestCase, tf.test.TestCase):\n\n @parameterized.parameters(\n ('resnet', (224, 224), 5e-5),\n ('resnet', (224, 224), None),\n ('resnet', (None, None), 5e-5),\n ('resnet', (None, None), None),\n )\n def test_builder(self, backbone_type, input_size, weight_decay):\n num_classes = 2\n input_specs = tf.keras.layers.InputSpec(\n shape=[None, input_size[0], input_size[1], 3])\n model_config = classification_cfg.ImageClassificationModel(\n num_classes=num_classes,\n backbone=backbones.Backbone(type=backbone_type))\n l2_regularizer = (\n tf.keras.regularizers.l2(weight_decay) if weight_decay else None)\n _ = factory.build_classification_model(\n input_specs=input_specs,\n model_config=model_config,\n l2_regularizer=l2_regularizer)\n\n\nclass MaskRCNNBuilderTest(parameterized.TestCase, tf.test.TestCase):\n\n @parameterized.parameters(\n ('resnet', (640, 640)),\n ('resnet', (None, None)),\n )\n def test_builder(self, backbone_type, input_size):\n num_classes = 2\n input_specs = tf.keras.layers.InputSpec(\n shape=[None, input_size[0], input_size[1], 3])\n model_config = maskrcnn_cfg.MaskRCNN(\n num_classes=num_classes,\n backbone=backbones.Backbone(type=backbone_type))\n l2_regularizer = tf.keras.regularizers.l2(5e-5)\n _ = factory.build_maskrcnn(\n input_specs=input_specs,\n model_config=model_config,\n l2_regularizer=l2_regularizer)\n\n\nclass RetinaNetBuilderTest(parameterized.TestCase, tf.test.TestCase):\n\n @parameterized.parameters(\n ('resnet', (640, 640), False),\n ('resnet', (None, None), True),\n )\n def test_builder(self, backbone_type, input_size, has_att_heads):\n num_classes = 2\n input_specs = tf.keras.layers.InputSpec(\n shape=[None, input_size[0], input_size[1], 3])\n if has_att_heads:\n attribute_heads_config = [\n retinanet_cfg.AttributeHead(name='att1'),\n retinanet_cfg.AttributeHead(\n name='att2', type='classification', size=2),\n ]\n else:\n attribute_heads_config = None\n model_config = retinanet_cfg.RetinaNet(\n num_classes=num_classes,\n backbone=backbones.Backbone(type=backbone_type),\n head=retinanet_cfg.RetinaNetHead(\n attribute_heads=attribute_heads_config))\n l2_regularizer = tf.keras.regularizers.l2(5e-5)\n _ = factory.build_retinanet(\n input_specs=input_specs,\n model_config=model_config,\n l2_regularizer=l2_regularizer)\n if has_att_heads:\n self.assertEqual(model_config.head.attribute_heads[0].as_dict(),\n dict(name='att1', type='regression', size=1))\n self.assertEqual(model_config.head.attribute_heads[1].as_dict(),\n dict(name='att2', type='classification', size=2))\n\n\nclass VideoClassificationModelBuilderTest(parameterized.TestCase,\n tf.test.TestCase):\n\n @parameterized.parameters(\n ('resnet_3d', (8, 224, 224), 5e-5),\n ('resnet_3d', (None, None, None), 5e-5),\n )\n def test_builder(self, backbone_type, input_size, weight_decay):\n input_specs = tf.keras.layers.InputSpec(\n shape=[None, input_size[0], input_size[1], input_size[2], 3])\n model_config = video_classification_cfg.VideoClassificationModel(\n backbone=backbones_3d.Backbone3D(type=backbone_type))\n l2_regularizer = (\n tf.keras.regularizers.l2(weight_decay) if weight_decay else None)\n _ = factory_3d.build_video_classification_model(\n input_specs=input_specs,\n model_config=model_config,\n num_classes=2,\n l2_regularizer=l2_regularizer)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Helper functions for manipulating collections of variables during training.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\nimport os\nimport re\n\nimport tensorflow.compat.v1 as tf\nimport tf_slim as slim\n\nfrom tensorflow.python.ops import variables as tf_variables\n\n\n# Maps checkpoint types to variable name prefixes that are no longer\n# supported\nDETECTION_FEATURE_EXTRACTOR_MSG = \"\"\"\\\nThe checkpoint type 'detection' is not supported when it contains variable\nnames with 'feature_extractor'. Please download the new checkpoint file\nfrom model zoo.\n\"\"\"\n\nDEPRECATED_CHECKPOINT_MAP = {\n 'detection': ('feature_extractor', DETECTION_FEATURE_EXTRACTOR_MSG)\n}\n\n\n# TODO(derekjchow): Consider replacing with tf.contrib.filter_variables in\n# tensorflow/contrib/framework/python/ops/variables.py\ndef filter_variables(variables, filter_regex_list, invert=False):\n \"\"\"Filters out the variables matching the filter_regex.\n\n Filter out the variables whose name matches the any of the regular\n expressions in filter_regex_list and returns the remaining variables.\n Optionally, if invert=True, the complement set is returned.\n\n Args:\n variables: a list of tensorflow variables.\n filter_regex_list: a list of string regular expressions.\n invert: (boolean). If True, returns the complement of the filter set; that\n is, all variables matching filter_regex are kept and all others discarded.\n\n Returns:\n a list of filtered variables.\n \"\"\"\n kept_vars = []\n variables_to_ignore_patterns = list([fre for fre in filter_regex_list if fre])\n for var in variables:\n add = True\n for pattern in variables_to_ignore_patterns:\n if re.match(pattern, var.op.name):\n add = False\n break\n if add != invert:\n kept_vars.append(var)\n return kept_vars\n\n\ndef multiply_gradients_matching_regex(grads_and_vars, regex_list, multiplier):\n \"\"\"Multiply gradients whose variable names match a regular expression.\n\n Args:\n grads_and_vars: A list of gradient to variable pairs (tuples).\n regex_list: A list of string regular expressions.\n multiplier: A (float) multiplier to apply to each gradient matching the\n regular expression.\n\n Returns:\n grads_and_vars: A list of gradient to variable pairs (tuples).\n \"\"\"\n variables = [pair[1] for pair in grads_and_vars]\n matching_vars = filter_variables(variables, regex_list, invert=True)\n for var in matching_vars:\n logging.info('Applying multiplier %f to variable [%s]',\n multiplier, var.op.name)\n grad_multipliers = {var: float(multiplier) for var in matching_vars}\n return slim.learning.multiply_gradients(grads_and_vars,\n grad_multipliers)\n\n\ndef freeze_gradients_matching_regex(grads_and_vars, regex_list):\n \"\"\"Freeze gradients whose variable names match a regular expression.\n\n Args:\n grads_and_vars: A list of gradient to variable pairs (tuples).\n regex_list: A list of string regular expressions.\n\n Returns:\n grads_and_vars: A list of gradient to variable pairs (tuples) that do not\n contain the variables and gradients matching the regex.\n \"\"\"\n variables = [pair[1] for pair in grads_and_vars]\n matching_vars = filter_variables(variables, regex_list, invert=True)\n kept_grads_and_vars = [pair for pair in grads_and_vars\n if pair[1] not in matching_vars]\n for var in matching_vars:\n logging.info('Freezing variable [%s]', var.op.name)\n return kept_grads_and_vars\n\n\ndef get_variables_available_in_checkpoint(variables,\n checkpoint_path,\n include_global_step=True):\n \"\"\"Returns the subset of variables available in the checkpoint.\n\n Inspects given checkpoint and returns the subset of variables that are\n available in it.\n\n TODO(rathodv): force input and output to be a dictionary.\n\n Args:\n variables: a list or dictionary of variables to find in checkpoint.\n checkpoint_path: path to the checkpoint to restore variables from.\n include_global_step: whether to include `global_step` variable, if it\n exists. Default True.\n\n Returns:\n A list or dictionary of variables.\n Raises:\n ValueError: if `variables` is not a list or dict.\n \"\"\"\n if isinstance(variables, list):\n variable_names_map = {}\n for variable in variables:\n if isinstance(variable, tf_variables.PartitionedVariable):\n name = variable.name\n else:\n name = variable.op.name\n variable_names_map[name] = variable\n elif isinstance(variables, dict):\n variable_names_map = variables\n else:\n raise ValueError('`variables` is expected to be a list or dict.')\n ckpt_reader = tf.train.NewCheckpointReader(checkpoint_path)\n ckpt_vars_to_shape_map = ckpt_reader.get_variable_to_shape_map()\n if not include_global_step:\n ckpt_vars_to_shape_map.pop(tf.GraphKeys.GLOBAL_STEP, None)\n vars_in_ckpt = {}\n for variable_name, variable in sorted(variable_names_map.items()):\n if variable_name in ckpt_vars_to_shape_map:\n if ckpt_vars_to_shape_map[variable_name] == variable.shape.as_list():\n vars_in_ckpt[variable_name] = variable\n else:\n logging.warning('Variable [%s] is available in checkpoint, but has an '\n 'incompatible shape with model variable. Checkpoint '\n 'shape: [%s], model variable shape: [%s]. This '\n 'variable will not be initialized from the checkpoint.',\n variable_name, ckpt_vars_to_shape_map[variable_name],\n variable.shape.as_list())\n else:\n logging.warning('Variable [%s] is not available in checkpoint',\n variable_name)\n if isinstance(variables, list):\n return list(vars_in_ckpt.values())\n return vars_in_ckpt\n\n\ndef get_global_variables_safely():\n \"\"\"If not executing eagerly, returns tf.global_variables().\n\n Raises a ValueError if eager execution is enabled,\n because the variables are not tracked when executing eagerly.\n\n If executing eagerly, use a Keras model's .variables property instead.\n\n Returns:\n The result of tf.global_variables()\n \"\"\"\n with tf.init_scope():\n if tf.executing_eagerly():\n raise ValueError(\"Global variables collection is not tracked when \"\n \"executing eagerly. Use a Keras model's `.variables` \"\n \"attribute instead.\")\n return tf.global_variables()\n\n\ndef ensure_checkpoint_supported(checkpoint_path, checkpoint_type, model_dir):\n \"\"\"Ensures that the given checkpoint can be properly loaded.\n\n Performs the following checks\n 1. Raises an error if checkpoint_path and model_dir are same.\n 2. Checks that checkpoint_path does not contain a deprecated checkpoint file\n by inspecting its variables.\n\n Args:\n checkpoint_path: str, path to checkpoint.\n checkpoint_type: str, denotes the type of checkpoint.\n model_dir: The model directory to store intermediate training checkpoints.\n\n Raises:\n RuntimeError: If\n 1. We detect an deprecated checkpoint file.\n 2. model_dir and checkpoint_path are in the same directory.\n \"\"\"\n variables = tf.train.list_variables(checkpoint_path)\n\n if checkpoint_type in DEPRECATED_CHECKPOINT_MAP:\n blocked_prefix, msg = DEPRECATED_CHECKPOINT_MAP[checkpoint_type]\n for var_name, _ in variables:\n if var_name.startswith(blocked_prefix):\n tf.logging.error('Found variable name - %s with prefix %s', var_name,\n blocked_prefix)\n raise RuntimeError(msg)\n\n checkpoint_path_dir = os.path.abspath(os.path.dirname(checkpoint_path))\n model_dir = os.path.abspath(model_dir)\n\n if model_dir == checkpoint_path_dir:\n raise RuntimeError(\n ('Checkpoint dir ({}) and model_dir ({}) cannot be same.'.format(\n checkpoint_path_dir, model_dir) +\n (' Please set model_dir to a different path.')))\n", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for official.nlp.data.create_pretraining_data.\"\"\"\nimport random\n\nimport tensorflow as tf\n\nfrom official.nlp.data import create_pretraining_data as cpd\n\n_VOCAB_WORDS = [\"vocab_1\", \"vocab_2\"]\n\n\nclass CreatePretrainingDataTest(tf.test.TestCase):\n\n def assertTokens(self, input_tokens, output_tokens, masked_positions,\n masked_labels):\n # Ensure the masked positions are unique.\n self.assertCountEqual(masked_positions, set(masked_positions))\n\n # Ensure we can reconstruct the input from the output.\n reconstructed_tokens = output_tokens\n for pos, label in zip(masked_positions, masked_labels):\n reconstructed_tokens[pos] = label\n self.assertEqual(input_tokens, reconstructed_tokens)\n\n # Ensure each label is valid.\n for pos, label in zip(masked_positions, masked_labels):\n output_token = output_tokens[pos]\n if (output_token == \"[MASK]\" or output_token in _VOCAB_WORDS or\n output_token == input_tokens[pos]):\n continue\n self.fail(\"invalid mask value: {}\".format(output_token))\n\n def test_wordpieces_to_grams(self):\n tests = [\n ([\"That\", \"cone\"], [(0, 1), (1, 2)]),\n ([\"That\", \"cone\", \"##s\"], [(0, 1), (1, 3)]),\n ([\"Swit\", \"##zer\", \"##land\"], [(0, 3)]),\n ([\"[CLS]\", \"Up\", \"##dog\"], [(1, 3)]),\n ([\"[CLS]\", \"Up\", \"##dog\", \"[SEP]\", \"Down\"], [(1, 3), (4, 5)]),\n ]\n for inp, expected in tests:\n output = cpd._wordpieces_to_grams(inp)\n self.assertEqual(expected, output)\n\n def test_window(self):\n input_list = [1, 2, 3, 4]\n window_outputs = [\n (1, [[1], [2], [3], [4]]),\n (2, [[1, 2], [2, 3], [3, 4]]),\n (3, [[1, 2, 3], [2, 3, 4]]),\n (4, [[1, 2, 3, 4]]),\n (5, []),\n ]\n for window, expected in window_outputs:\n output = cpd._window(input_list, window)\n self.assertEqual(expected, list(output))\n\n def test_create_masked_lm_predictions(self):\n tokens = [\"[CLS]\", \"a\", \"##a\", \"b\", \"##b\", \"c\", \"##c\", \"[SEP]\"]\n rng = random.Random(123)\n for _ in range(0, 5):\n output_tokens, masked_positions, masked_labels = (\n cpd.create_masked_lm_predictions(\n tokens=tokens,\n masked_lm_prob=1.0,\n max_predictions_per_seq=3,\n vocab_words=_VOCAB_WORDS,\n rng=rng,\n do_whole_word_mask=False,\n max_ngram_size=None))\n self.assertEqual(len(masked_positions), 3)\n self.assertEqual(len(masked_labels), 3)\n self.assertTokens(tokens, output_tokens, masked_positions, masked_labels)\n\n def test_create_masked_lm_predictions_whole_word(self):\n tokens = [\"[CLS]\", \"a\", \"##a\", \"b\", \"##b\", \"c\", \"##c\", \"[SEP]\"]\n rng = random.Random(345)\n for _ in range(0, 5):\n output_tokens, masked_positions, masked_labels = (\n cpd.create_masked_lm_predictions(\n tokens=tokens,\n masked_lm_prob=1.0,\n max_predictions_per_seq=3,\n vocab_words=_VOCAB_WORDS,\n rng=rng,\n do_whole_word_mask=True,\n max_ngram_size=None))\n # since we can't get exactly three tokens without breaking a word we\n # only take two.\n self.assertEqual(len(masked_positions), 2)\n self.assertEqual(len(masked_labels), 2)\n self.assertTokens(tokens, output_tokens, masked_positions, masked_labels)\n # ensure that we took an entire word.\n self.assertIn(masked_labels, [[\"a\", \"##a\"], [\"b\", \"##b\"], [\"c\", \"##c\"]])\n\n def test_create_masked_lm_predictions_ngram(self):\n tokens = [\"[CLS]\"] + [\"tok{}\".format(i) for i in range(0, 512)] + [\"[SEP]\"]\n rng = random.Random(345)\n for _ in range(0, 5):\n output_tokens, masked_positions, masked_labels = (\n cpd.create_masked_lm_predictions(\n tokens=tokens,\n masked_lm_prob=1.0,\n max_predictions_per_seq=76,\n vocab_words=_VOCAB_WORDS,\n rng=rng,\n do_whole_word_mask=True,\n max_ngram_size=3))\n self.assertEqual(len(masked_positions), 76)\n self.assertEqual(len(masked_labels), 76)\n self.assertTokens(tokens, output_tokens, masked_positions, masked_labels)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Classification and regression network.\"\"\"\n# pylint: disable=g-classes-have-attributes\nimport collections\nimport tensorflow as tf\n\n\[email protected]_keras_serializable(package='Text')\nclass Classification(tf.keras.Model):\n \"\"\"Classification network head for BERT modeling.\n\n This network implements a simple classifier head based on a dense layer. If\n num_classes is one, it can be considered as a regression problem.\n\n *Note* that the network is constructed by\n [Keras Functional API](https://keras.io/guides/functional_api/).\n\n Args:\n input_width: The innermost dimension of the input tensor to this network.\n num_classes: The number of classes that this network should classify to. If\n equal to 1, a regression problem is assumed.\n activation: The activation, if any, for the dense layer in this network.\n initializer: The initializer for the dense layer in this network. Defaults\n to a Glorot uniform initializer.\n output: The output style for this network. Can be either `logits` or\n `predictions`.\n \"\"\"\n\n def __init__(self,\n input_width,\n num_classes,\n initializer='glorot_uniform',\n output='logits',\n **kwargs):\n\n cls_output = tf.keras.layers.Input(\n shape=(input_width,), name='cls_output', dtype=tf.float32)\n\n logits = tf.keras.layers.Dense(\n num_classes,\n activation=None,\n kernel_initializer=initializer,\n name='predictions/transform/logits')(\n cls_output)\n\n if output == 'logits':\n output_tensors = logits\n elif output == 'predictions':\n policy = tf.keras.mixed_precision.global_policy()\n if policy.name == 'mixed_bfloat16':\n # b/158514794: bf16 is not stable with post-softmax cross-entropy.\n policy = tf.float32\n output_tensors = tf.keras.layers.Activation(\n tf.nn.log_softmax, dtype=policy)(\n logits)\n else:\n raise ValueError(\n ('Unknown `output` value \"%s\". `output` can be either \"logits\" or '\n '\"predictions\"') % output)\n\n super(Classification, self).__init__(\n inputs=[cls_output], outputs=output_tensors, **kwargs)\n\n # b/164516224\n # Once we've created the network using the Functional API, we call\n # super().__init__ as though we were invoking the Functional API Model\n # constructor, resulting in this object having all the properties of a model\n # created using the Functional API. Once super().__init__ is called, we\n # can assign attributes to `self` - note that all `self` assignments are\n # below this line.\n config_dict = {\n 'input_width': input_width,\n 'num_classes': num_classes,\n 'initializer': initializer,\n 'output': output,\n }\n # We are storing the config dict as a namedtuple here to ensure checkpoint\n # compatibility with an earlier version of this model which did not track\n # the config dict attribute. TF does not track immutable attrs which\n # do not contain Trackables, so by creating a config namedtuple instead of\n # a dict we avoid tracking it.\n config_cls = collections.namedtuple('Config', config_dict.keys())\n self._config = config_cls(**config_dict)\n self.logits = logits\n\n def get_config(self):\n return dict(self._config._asdict())\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n return cls(**config)\n", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Optimizer and learning rate scheduler.\"\"\"\n\nimport tensorflow as tf\n\nfrom official.modeling.hyperparams import params_dict\n\n\nclass LearningRateSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):\n \"\"\"Learning rate schedule.\"\"\"\n\n def __init__(self, initial_learning_rate, hidden_size, warmup_steps):\n \"\"\"Initialize configuration of the learning rate schedule.\n\n Args:\n initial_learning_rate: A float, the initial learning rate.\n hidden_size: An integer, the model dimension in the hidden layers.\n warmup_steps: An integer, the number of steps required for linear warmup.\n \"\"\"\n super(LearningRateSchedule, self).__init__()\n self.initial_learning_rate = initial_learning_rate\n self.hidden_size = hidden_size\n self.warmup_steps = tf.cast(warmup_steps, tf.float32)\n\n def __call__(self, global_step):\n \"\"\"Calculate learning rate with linear warmup and rsqrt decay.\n\n Args:\n global_step: An integer, the current global step used for learning rate\n calculation.\n\n Returns:\n A float, the learning rate needs to be used for current global step.\n \"\"\"\n with tf.name_scope('learning_rate_schedule'):\n global_step = tf.cast(global_step, tf.float32)\n learning_rate = self.initial_learning_rate\n learning_rate *= (self.hidden_size**-0.5)\n # Apply linear warmup\n learning_rate *= tf.minimum(1.0, global_step / self.warmup_steps)\n # Apply rsqrt decay\n learning_rate /= tf.sqrt(tf.maximum(global_step, self.warmup_steps))\n return learning_rate\n\n def get_config(self):\n \"\"\"Get the configuration of the learning rate schedule.\"\"\"\n return {\n 'initial_learning_rate': self.initial_learning_rate,\n 'hidden_size': self.hidden_size,\n 'warmup_steps': self.warmup_steps,\n }\n\n\ndef create_optimizer(params: params_dict.ParamsDict):\n \"\"\"Creates optimizer.\"\"\"\n lr_schedule = LearningRateSchedule(params.learning_rate, params.hidden_size,\n params.learning_rate_warmup_steps)\n return tf.keras.optimizers.Adam(\n learning_rate=lr_schedule,\n beta_1=params.adam_beta1,\n beta_2=params.adam_beta2,\n epsilon=params.adam_epsilon)\n", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Common modules for callbacks.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\n# from __future__ import google_type_annotations\nfrom __future__ import print_function\n\nimport os\nfrom typing import Any, List, MutableMapping, Text\n\nfrom absl import logging\nimport tensorflow as tf\n\nfrom official.modeling import optimization\nfrom official.utils.misc import keras_utils\n\n\ndef get_callbacks(\n model_checkpoint: bool = True,\n include_tensorboard: bool = True,\n time_history: bool = True,\n track_lr: bool = True,\n write_model_weights: bool = True,\n apply_moving_average: bool = False,\n initial_step: int = 0,\n batch_size: int = 0,\n log_steps: int = 0,\n model_dir: str = None,\n backup_and_restore: bool = False) -> List[tf.keras.callbacks.Callback]:\n \"\"\"Get all callbacks.\"\"\"\n model_dir = model_dir or ''\n callbacks = []\n if model_checkpoint:\n ckpt_full_path = os.path.join(model_dir, 'model.ckpt-{epoch:04d}')\n callbacks.append(\n tf.keras.callbacks.ModelCheckpoint(\n ckpt_full_path, save_weights_only=True, verbose=1))\n if backup_and_restore:\n backup_dir = os.path.join(model_dir, 'tmp')\n callbacks.append(\n tf.keras.callbacks.experimental.BackupAndRestore(backup_dir))\n if include_tensorboard:\n callbacks.append(\n CustomTensorBoard(\n log_dir=model_dir,\n track_lr=track_lr,\n initial_step=initial_step,\n write_images=write_model_weights,\n profile_batch=0))\n if time_history:\n callbacks.append(\n keras_utils.TimeHistory(\n batch_size,\n log_steps,\n logdir=model_dir if include_tensorboard else None))\n if apply_moving_average:\n # Save moving average model to a different file so that\n # we can resume training from a checkpoint\n ckpt_full_path = os.path.join(model_dir, 'average',\n 'model.ckpt-{epoch:04d}')\n callbacks.append(\n AverageModelCheckpoint(\n update_weights=False,\n filepath=ckpt_full_path,\n save_weights_only=True,\n verbose=1))\n callbacks.append(MovingAverageCallback())\n return callbacks\n\n\ndef get_scalar_from_tensor(t: tf.Tensor) -> int:\n \"\"\"Utility function to convert a Tensor to a scalar.\"\"\"\n t = tf.keras.backend.get_value(t)\n if callable(t):\n return t()\n else:\n return t\n\n\nclass CustomTensorBoard(tf.keras.callbacks.TensorBoard):\n \"\"\"A customized TensorBoard callback that tracks additional datapoints.\n\n Metrics tracked:\n - Global learning rate\n\n Attributes:\n log_dir: the path of the directory where to save the log files to be parsed\n by TensorBoard.\n track_lr: `bool`, whether or not to track the global learning rate.\n initial_step: the initial step, used for preemption recovery.\n **kwargs: Additional arguments for backwards compatibility. Possible key is\n `period`.\n \"\"\"\n\n # TODO(b/146499062): track params, flops, log lr, l2 loss,\n # classification loss\n\n def __init__(self,\n log_dir: str,\n track_lr: bool = False,\n initial_step: int = 0,\n **kwargs):\n super(CustomTensorBoard, self).__init__(log_dir=log_dir, **kwargs)\n self.step = initial_step\n self._track_lr = track_lr\n\n def on_batch_begin(self,\n epoch: int,\n logs: MutableMapping[str, Any] = None) -> None:\n self.step += 1\n if logs is None:\n logs = {}\n logs.update(self._calculate_metrics())\n super(CustomTensorBoard, self).on_batch_begin(epoch, logs)\n\n def on_epoch_begin(self,\n epoch: int,\n logs: MutableMapping[str, Any] = None) -> None:\n if logs is None:\n logs = {}\n metrics = self._calculate_metrics()\n logs.update(metrics)\n for k, v in metrics.items():\n logging.info('Current %s: %f', k, v)\n super(CustomTensorBoard, self).on_epoch_begin(epoch, logs)\n\n def on_epoch_end(self,\n epoch: int,\n logs: MutableMapping[str, Any] = None) -> None:\n if logs is None:\n logs = {}\n metrics = self._calculate_metrics()\n logs.update(metrics)\n super(CustomTensorBoard, self).on_epoch_end(epoch, logs)\n\n def _calculate_metrics(self) -> MutableMapping[str, Any]:\n logs = {}\n # TODO(b/149030439): disable LR reporting.\n # if self._track_lr:\n # logs['learning_rate'] = self._calculate_lr()\n return logs\n\n def _calculate_lr(self) -> int:\n \"\"\"Calculates the learning rate given the current step.\"\"\"\n return get_scalar_from_tensor(\n self._get_base_optimizer()._decayed_lr(var_dtype=tf.float32)) # pylint:disable=protected-access\n\n def _get_base_optimizer(self) -> tf.keras.optimizers.Optimizer:\n \"\"\"Get the base optimizer used by the current model.\"\"\"\n\n optimizer = self.model.optimizer\n\n # The optimizer might be wrapped by another class, so unwrap it\n while hasattr(optimizer, '_optimizer'):\n optimizer = optimizer._optimizer # pylint:disable=protected-access\n\n return optimizer\n\n\nclass MovingAverageCallback(tf.keras.callbacks.Callback):\n \"\"\"A Callback to be used with a `ExponentialMovingAverage` optimizer.\n\n Applies moving average weights to the model during validation time to test\n and predict on the averaged weights rather than the current model weights.\n Once training is complete, the model weights will be overwritten with the\n averaged weights (by default).\n\n Attributes:\n overwrite_weights_on_train_end: Whether to overwrite the current model\n weights with the averaged weights from the moving average optimizer.\n **kwargs: Any additional callback arguments.\n \"\"\"\n\n def __init__(self, overwrite_weights_on_train_end: bool = False, **kwargs):\n super(MovingAverageCallback, self).__init__(**kwargs)\n self.overwrite_weights_on_train_end = overwrite_weights_on_train_end\n\n def set_model(self, model: tf.keras.Model):\n super(MovingAverageCallback, self).set_model(model)\n assert isinstance(self.model.optimizer,\n optimization.ExponentialMovingAverage)\n self.model.optimizer.shadow_copy(self.model)\n\n def on_test_begin(self, logs: MutableMapping[Text, Any] = None):\n self.model.optimizer.swap_weights()\n\n def on_test_end(self, logs: MutableMapping[Text, Any] = None):\n self.model.optimizer.swap_weights()\n\n def on_train_end(self, logs: MutableMapping[Text, Any] = None):\n if self.overwrite_weights_on_train_end:\n self.model.optimizer.assign_average_vars(self.model.variables)\n\n\nclass AverageModelCheckpoint(tf.keras.callbacks.ModelCheckpoint):\n \"\"\"Saves and, optionally, assigns the averaged weights.\n\n Taken from tfa.callbacks.AverageModelCheckpoint.\n\n Attributes:\n update_weights: If True, assign the moving average weights to the model, and\n save them. If False, keep the old non-averaged weights, but the saved\n model uses the average weights. See `tf.keras.callbacks.ModelCheckpoint`\n for the other args.\n \"\"\"\n\n def __init__(self,\n update_weights: bool,\n filepath: str,\n monitor: str = 'val_loss',\n verbose: int = 0,\n save_best_only: bool = False,\n save_weights_only: bool = False,\n mode: str = 'auto',\n save_freq: str = 'epoch',\n **kwargs):\n self.update_weights = update_weights\n super().__init__(filepath, monitor, verbose, save_best_only,\n save_weights_only, mode, save_freq, **kwargs)\n\n def set_model(self, model):\n if not isinstance(model.optimizer, optimization.ExponentialMovingAverage):\n raise TypeError('AverageModelCheckpoint is only used when training'\n 'with MovingAverage')\n return super().set_model(model)\n\n def _save_model(self, epoch, logs):\n assert isinstance(self.model.optimizer,\n optimization.ExponentialMovingAverage)\n\n if self.update_weights:\n self.model.optimizer.assign_average_vars(self.model.variables)\n return super()._save_model(epoch, logs)\n else:\n # Note: `model.get_weights()` gives us the weights (non-ref)\n # whereas `model.variables` returns references to the variables.\n non_avg_weights = self.model.get_weights()\n self.model.optimizer.assign_average_vars(self.model.variables)\n # result is currently None, since `super._save_model` doesn't\n # return anything, but this may change in the future.\n result = super()._save_model(epoch, logs)\n self.model.set_weights(non_avg_weights)\n return result\n", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Convert checkpoints created by Estimator (tf1) to be Keras compatible.\"\"\"\n\nimport numpy as np\nimport tensorflow.compat.v1 as tf # TF 1.x\n\n# Mapping between old <=> new names. The source pattern in original variable\n# name will be replaced by destination pattern.\nBERT_NAME_REPLACEMENTS = (\n (\"bert\", \"bert_model\"),\n (\"embeddings/word_embeddings\", \"word_embeddings/embeddings\"),\n (\"embeddings/token_type_embeddings\",\n \"embedding_postprocessor/type_embeddings\"),\n (\"embeddings/position_embeddings\",\n \"embedding_postprocessor/position_embeddings\"),\n (\"embeddings/LayerNorm\", \"embedding_postprocessor/layer_norm\"),\n (\"attention/self\", \"self_attention\"),\n (\"attention/output/dense\", \"self_attention_output\"),\n (\"attention/output/LayerNorm\", \"self_attention_layer_norm\"),\n (\"intermediate/dense\", \"intermediate\"),\n (\"output/dense\", \"output\"),\n (\"output/LayerNorm\", \"output_layer_norm\"),\n (\"pooler/dense\", \"pooler_transform\"),\n)\n\nBERT_V2_NAME_REPLACEMENTS = (\n (\"bert/\", \"\"),\n (\"encoder\", \"transformer\"),\n (\"embeddings/word_embeddings\", \"word_embeddings/embeddings\"),\n (\"embeddings/token_type_embeddings\", \"type_embeddings/embeddings\"),\n (\"embeddings/position_embeddings\", \"position_embedding/embeddings\"),\n (\"embeddings/LayerNorm\", \"embeddings/layer_norm\"),\n (\"attention/self\", \"self_attention\"),\n (\"attention/output/dense\", \"self_attention/attention_output\"),\n (\"attention/output/LayerNorm\", \"self_attention_layer_norm\"),\n (\"intermediate/dense\", \"intermediate\"),\n (\"output/dense\", \"output\"),\n (\"output/LayerNorm\", \"output_layer_norm\"),\n (\"pooler/dense\", \"pooler_transform\"),\n (\"cls/predictions\", \"bert/cls/predictions\"),\n (\"cls/predictions/output_bias\", \"cls/predictions/output_bias/bias\"),\n (\"cls/seq_relationship/output_bias\", \"predictions/transform/logits/bias\"),\n (\"cls/seq_relationship/output_weights\",\n \"predictions/transform/logits/kernel\"),\n)\n\nBERT_PERMUTATIONS = ()\n\nBERT_V2_PERMUTATIONS = ((\"cls/seq_relationship/output_weights\", (1, 0)),)\n\n\ndef _bert_name_replacement(var_name, name_replacements):\n \"\"\"Gets the variable name replacement.\"\"\"\n for src_pattern, tgt_pattern in name_replacements:\n if src_pattern in var_name:\n old_var_name = var_name\n var_name = var_name.replace(src_pattern, tgt_pattern)\n tf.logging.info(\"Converted: %s --> %s\", old_var_name, var_name)\n return var_name\n\n\ndef _has_exclude_patterns(name, exclude_patterns):\n \"\"\"Checks if a string contains substrings that match patterns to exclude.\"\"\"\n for p in exclude_patterns:\n if p in name:\n return True\n return False\n\n\ndef _get_permutation(name, permutations):\n \"\"\"Checks whether a variable requires transposition by pattern matching.\"\"\"\n for src_pattern, permutation in permutations:\n if src_pattern in name:\n tf.logging.info(\"Permuted: %s --> %s\", name, permutation)\n return permutation\n\n return None\n\n\ndef _get_new_shape(name, shape, num_heads):\n \"\"\"Checks whether a variable requires reshape by pattern matching.\"\"\"\n if \"self_attention/attention_output/kernel\" in name:\n return tuple([num_heads, shape[0] // num_heads, shape[1]])\n if \"self_attention/attention_output/bias\" in name:\n return shape\n\n patterns = [\n \"self_attention/query\", \"self_attention/value\", \"self_attention/key\"\n ]\n for pattern in patterns:\n if pattern in name:\n if \"kernel\" in name:\n return tuple([shape[0], num_heads, shape[1] // num_heads])\n if \"bias\" in name:\n return tuple([num_heads, shape[0] // num_heads])\n return None\n\n\ndef create_v2_checkpoint(model,\n src_checkpoint,\n output_path,\n checkpoint_model_name=\"model\"):\n \"\"\"Converts a name-based matched TF V1 checkpoint to TF V2 checkpoint.\"\"\"\n # Uses streaming-restore in eager model to read V1 name-based checkpoints.\n model.load_weights(src_checkpoint).assert_existing_objects_matched()\n if hasattr(model, \"checkpoint_items\"):\n checkpoint_items = model.checkpoint_items\n else:\n checkpoint_items = {}\n\n checkpoint_items[checkpoint_model_name] = model\n checkpoint = tf.train.Checkpoint(**checkpoint_items)\n checkpoint.save(output_path)\n\n\ndef convert(checkpoint_from_path,\n checkpoint_to_path,\n num_heads,\n name_replacements,\n permutations,\n exclude_patterns=None):\n \"\"\"Migrates the names of variables within a checkpoint.\n\n Args:\n checkpoint_from_path: Path to source checkpoint to be read in.\n checkpoint_to_path: Path to checkpoint to be written out.\n num_heads: The number of heads of the model.\n name_replacements: A list of tuples of the form (match_str, replace_str)\n describing variable names to adjust.\n permutations: A list of tuples of the form (match_str, permutation)\n describing permutations to apply to given variables. Note that match_str\n should match the original variable name, not the replaced one.\n exclude_patterns: A list of string patterns to exclude variables from\n checkpoint conversion.\n\n Returns:\n A dictionary that maps the new variable names to the Variable objects.\n A dictionary that maps the old variable names to the new variable names.\n \"\"\"\n with tf.Graph().as_default():\n tf.logging.info(\"Reading checkpoint_from_path %s\", checkpoint_from_path)\n reader = tf.train.NewCheckpointReader(checkpoint_from_path)\n name_shape_map = reader.get_variable_to_shape_map()\n new_variable_map = {}\n conversion_map = {}\n for var_name in name_shape_map:\n if exclude_patterns and _has_exclude_patterns(var_name, exclude_patterns):\n continue\n # Get the original tensor data.\n tensor = reader.get_tensor(var_name)\n\n # Look up the new variable name, if any.\n new_var_name = _bert_name_replacement(var_name, name_replacements)\n\n # See if we need to reshape the underlying tensor.\n new_shape = None\n if num_heads > 0:\n new_shape = _get_new_shape(new_var_name, tensor.shape, num_heads)\n if new_shape:\n tf.logging.info(\"Veriable %s has a shape change from %s to %s\",\n var_name, tensor.shape, new_shape)\n tensor = np.reshape(tensor, new_shape)\n\n # See if we need to permute the underlying tensor.\n permutation = _get_permutation(var_name, permutations)\n if permutation:\n tensor = np.transpose(tensor, permutation)\n\n # Create a new variable with the possibly-reshaped or transposed tensor.\n var = tf.Variable(tensor, name=var_name)\n\n # Save the variable into the new variable map.\n new_variable_map[new_var_name] = var\n\n # Keep a list of converter variables for sanity checking.\n if new_var_name != var_name:\n conversion_map[var_name] = new_var_name\n\n saver = tf.train.Saver(new_variable_map)\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n tf.logging.info(\"Writing checkpoint_to_path %s\", checkpoint_to_path)\n saver.save(sess, checkpoint_to_path, write_meta_graph=False)\n\n tf.logging.info(\"Summary:\")\n tf.logging.info(\" Converted %d variable name(s).\", len(new_variable_map))\n tf.logging.info(\" Converted: %s\", str(conversion_map))\n", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"The COCO-style evaluator.\n\nThe following snippet demonstrates the use of interfaces:\n\n evaluator = COCOEvaluator(...)\n for _ in range(num_evals):\n for _ in range(num_batches_per_eval):\n predictions, groundtruth = predictor.predict(...) # pop a batch.\n evaluator.update_state(groundtruths, predictions)\n evaluator.result() # finish one full eval and reset states.\n\nSee also: https://github.com/cocodataset/cocoapi/\n\"\"\"\n\nimport atexit\nimport tempfile\n# Import libraries\nfrom absl import logging\nimport numpy as np\nfrom pycocotools import cocoeval\nimport six\nimport tensorflow as tf\n\nfrom official.vision.beta.evaluation import coco_utils\n\n\nclass COCOEvaluator(object):\n \"\"\"COCO evaluation metric class.\"\"\"\n\n def __init__(self,\n annotation_file,\n include_mask,\n need_rescale_bboxes=True,\n per_category_metrics=False):\n \"\"\"Constructs COCO evaluation class.\n\n The class provides the interface to COCO metrics_fn. The\n _update_op() takes detections from each image and push them to\n self.detections. The _evaluate() loads a JSON file in COCO annotation format\n as the groundtruths and runs COCO evaluation.\n\n Args:\n annotation_file: a JSON file that stores annotations of the eval dataset.\n If `annotation_file` is None, groundtruth annotations will be loaded\n from the dataloader.\n include_mask: a boolean to indicate whether or not to include the mask\n eval.\n need_rescale_bboxes: If true bboxes in `predictions` will be rescaled back\n to absolute values (`image_info` is needed in this case).\n per_category_metrics: Whether to return per category metrics.\n \"\"\"\n if annotation_file:\n if annotation_file.startswith('gs://'):\n _, local_val_json = tempfile.mkstemp(suffix='.json')\n tf.io.gfile.remove(local_val_json)\n\n tf.io.gfile.copy(annotation_file, local_val_json)\n atexit.register(tf.io.gfile.remove, local_val_json)\n else:\n local_val_json = annotation_file\n self._coco_gt = coco_utils.COCOWrapper(\n eval_type=('mask' if include_mask else 'box'),\n annotation_file=local_val_json)\n self._annotation_file = annotation_file\n self._include_mask = include_mask\n self._per_category_metrics = per_category_metrics\n self._metric_names = [\n 'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'ARmax1', 'ARmax10',\n 'ARmax100', 'ARs', 'ARm', 'ARl'\n ]\n self._required_prediction_fields = [\n 'source_id', 'num_detections', 'detection_classes', 'detection_scores',\n 'detection_boxes'\n ]\n self._need_rescale_bboxes = need_rescale_bboxes\n if self._need_rescale_bboxes:\n self._required_prediction_fields.append('image_info')\n self._required_groundtruth_fields = [\n 'source_id', 'height', 'width', 'classes', 'boxes'\n ]\n if self._include_mask:\n mask_metric_names = ['mask_' + x for x in self._metric_names]\n self._metric_names.extend(mask_metric_names)\n self._required_prediction_fields.extend(['detection_masks'])\n self._required_groundtruth_fields.extend(['masks'])\n\n self.reset_states()\n\n @property\n def name(self):\n return 'coco_metric'\n\n def reset_states(self):\n \"\"\"Resets internal states for a fresh run.\"\"\"\n self._predictions = {}\n if not self._annotation_file:\n self._groundtruths = {}\n\n def result(self):\n \"\"\"Evaluates detection results, and reset_states.\"\"\"\n metric_dict = self.evaluate()\n # Cleans up the internal variables in order for a fresh eval next time.\n self.reset_states()\n return metric_dict\n\n def evaluate(self):\n \"\"\"Evaluates with detections from all images with COCO API.\n\n Returns:\n coco_metric: float numpy array with shape [24] representing the\n coco-style evaluation metrics (box and mask).\n \"\"\"\n if not self._annotation_file:\n logging.info('There is no annotation_file in COCOEvaluator.')\n gt_dataset = coco_utils.convert_groundtruths_to_coco_dataset(\n self._groundtruths)\n coco_gt = coco_utils.COCOWrapper(\n eval_type=('mask' if self._include_mask else 'box'),\n gt_dataset=gt_dataset)\n else:\n logging.info('Using annotation file: %s', self._annotation_file)\n coco_gt = self._coco_gt\n coco_predictions = coco_utils.convert_predictions_to_coco_annotations(\n self._predictions)\n coco_dt = coco_gt.loadRes(predictions=coco_predictions)\n image_ids = [ann['image_id'] for ann in coco_predictions]\n\n coco_eval = cocoeval.COCOeval(coco_gt, coco_dt, iouType='bbox')\n coco_eval.params.imgIds = image_ids\n coco_eval.evaluate()\n coco_eval.accumulate()\n coco_eval.summarize()\n coco_metrics = coco_eval.stats\n\n if self._include_mask:\n mcoco_eval = cocoeval.COCOeval(coco_gt, coco_dt, iouType='segm')\n mcoco_eval.params.imgIds = image_ids\n mcoco_eval.evaluate()\n mcoco_eval.accumulate()\n mcoco_eval.summarize()\n mask_coco_metrics = mcoco_eval.stats\n\n if self._include_mask:\n metrics = np.hstack((coco_metrics, mask_coco_metrics))\n else:\n metrics = coco_metrics\n\n metrics_dict = {}\n for i, name in enumerate(self._metric_names):\n metrics_dict[name] = metrics[i].astype(np.float32)\n\n # Adds metrics per category.\n if self._per_category_metrics:\n metrics_dict.update(self._retrieve_per_category_metrics(coco_eval))\n\n if self._include_mask:\n metrics_dict.update(self._retrieve_per_category_metrics(\n mcoco_eval, prefix='mask'))\n\n return metrics_dict\n\n def _retrieve_per_category_metrics(self, coco_eval, prefix=''):\n \"\"\"Retrieves and per-category metrics and retuns them in a dict.\n\n Args:\n coco_eval: a cocoeval.COCOeval object containing evaluation data.\n prefix: str, A string used to prefix metric names.\n\n Returns:\n metrics_dict: A dictionary with per category metrics.\n \"\"\"\n\n metrics_dict = {}\n if prefix:\n prefix = prefix + ' '\n\n if hasattr(coco_eval, 'category_stats'):\n for category_index, category_id in enumerate(coco_eval.params.catIds):\n if self._annotation_file:\n coco_category = self._coco_gt.cats[category_id]\n # if 'name' is available use it, otherwise use `id`\n category_display_name = coco_category.get('name', category_id)\n else:\n category_display_name = category_id\n\n metrics_dict[prefix + 'Precision mAP ByCategory/{}'.format(\n category_display_name\n )] = coco_eval.category_stats[0][category_index].astype(np.float32)\n metrics_dict[prefix + 'Precision mAP ByCategory@50IoU/{}'.format(\n category_display_name\n )] = coco_eval.category_stats[1][category_index].astype(np.float32)\n metrics_dict[prefix + 'Precision mAP ByCategory@75IoU/{}'.format(\n category_display_name\n )] = coco_eval.category_stats[2][category_index].astype(np.float32)\n metrics_dict[prefix + 'Precision mAP ByCategory (small) /{}'.format(\n category_display_name\n )] = coco_eval.category_stats[3][category_index].astype(np.float32)\n metrics_dict[prefix + 'Precision mAP ByCategory (medium) /{}'.format(\n category_display_name\n )] = coco_eval.category_stats[4][category_index].astype(np.float32)\n metrics_dict[prefix + 'Precision mAP ByCategory (large) /{}'.format(\n category_display_name\n )] = coco_eval.category_stats[5][category_index].astype(np.float32)\n metrics_dict[prefix + 'Recall AR@1 ByCategory/{}'.format(\n category_display_name\n )] = coco_eval.category_stats[6][category_index].astype(np.float32)\n metrics_dict[prefix + 'Recall AR@10 ByCategory/{}'.format(\n category_display_name\n )] = coco_eval.category_stats[7][category_index].astype(np.float32)\n metrics_dict[prefix + 'Recall AR@100 ByCategory/{}'.format(\n category_display_name\n )] = coco_eval.category_stats[8][category_index].astype(np.float32)\n metrics_dict[prefix + 'Recall AR (small) ByCategory/{}'.format(\n category_display_name\n )] = coco_eval.category_stats[9][category_index].astype(np.float32)\n metrics_dict[prefix + 'Recall AR (medium) ByCategory/{}'.format(\n category_display_name\n )] = coco_eval.category_stats[10][category_index].astype(np.float32)\n metrics_dict[prefix + 'Recall AR (large) ByCategory/{}'.format(\n category_display_name\n )] = coco_eval.category_stats[11][category_index].astype(np.float32)\n\n return metrics_dict\n\n def _process_predictions(self, predictions):\n image_scale = np.tile(predictions['image_info'][:, 2:3, :], (1, 1, 2))\n predictions['detection_boxes'] = (\n predictions['detection_boxes'].astype(np.float32))\n predictions['detection_boxes'] /= image_scale\n if 'detection_outer_boxes' in predictions:\n predictions['detection_outer_boxes'] = (\n predictions['detection_outer_boxes'].astype(np.float32))\n predictions['detection_outer_boxes'] /= image_scale\n\n def _convert_to_numpy(self, groundtruths, predictions):\n \"\"\"Converts tesnors to numpy arrays.\"\"\"\n if groundtruths:\n labels = tf.nest.map_structure(lambda x: x.numpy(), groundtruths)\n numpy_groundtruths = {}\n for key, val in labels.items():\n if isinstance(val, tuple):\n val = np.concatenate(val)\n numpy_groundtruths[key] = val\n else:\n numpy_groundtruths = groundtruths\n\n if predictions:\n outputs = tf.nest.map_structure(lambda x: x.numpy(), predictions)\n numpy_predictions = {}\n for key, val in outputs.items():\n if isinstance(val, tuple):\n val = np.concatenate(val)\n numpy_predictions[key] = val\n else:\n numpy_predictions = predictions\n\n return numpy_groundtruths, numpy_predictions\n\n def update_state(self, groundtruths, predictions):\n \"\"\"Update and aggregate detection results and groundtruth data.\n\n Args:\n groundtruths: a dictionary of Tensors including the fields below.\n See also different parsers under `../dataloader` for more details.\n Required fields:\n - source_id: a numpy array of int or string of shape [batch_size].\n - height: a numpy array of int of shape [batch_size].\n - width: a numpy array of int of shape [batch_size].\n - num_detections: a numpy array of int of shape [batch_size].\n - boxes: a numpy array of float of shape [batch_size, K, 4].\n - classes: a numpy array of int of shape [batch_size, K].\n Optional fields:\n - is_crowds: a numpy array of int of shape [batch_size, K]. If the\n field is absent, it is assumed that this instance is not crowd.\n - areas: a numy array of float of shape [batch_size, K]. If the\n field is absent, the area is calculated using either boxes or\n masks depending on which one is available.\n - masks: a numpy array of float of shape\n [batch_size, K, mask_height, mask_width],\n predictions: a dictionary of tensors including the fields below.\n See different parsers under `../dataloader` for more details.\n Required fields:\n - source_id: a numpy array of int or string of shape [batch_size].\n - image_info [if `need_rescale_bboxes` is True]: a numpy array of\n float of shape [batch_size, 4, 2].\n - num_detections: a numpy array of\n int of shape [batch_size].\n - detection_boxes: a numpy array of float of shape [batch_size, K, 4].\n - detection_classes: a numpy array of int of shape [batch_size, K].\n - detection_scores: a numpy array of float of shape [batch_size, K].\n Optional fields:\n - detection_masks: a numpy array of float of shape\n [batch_size, K, mask_height, mask_width].\n Raises:\n ValueError: if the required prediction or groundtruth fields are not\n present in the incoming `predictions` or `groundtruths`.\n \"\"\"\n groundtruths, predictions = self._convert_to_numpy(groundtruths,\n predictions)\n for k in self._required_prediction_fields:\n if k not in predictions:\n raise ValueError(\n 'Missing the required key `{}` in predictions!'.format(k))\n if self._need_rescale_bboxes:\n self._process_predictions(predictions)\n for k, v in six.iteritems(predictions):\n if k not in self._predictions:\n self._predictions[k] = [v]\n else:\n self._predictions[k].append(v)\n\n if not self._annotation_file:\n assert groundtruths\n for k in self._required_groundtruth_fields:\n if k not in groundtruths:\n raise ValueError(\n 'Missing the required key `{}` in groundtruths!'.format(k))\n for k, v in six.iteritems(groundtruths):\n if k not in self._groundtruths:\n self._groundtruths[k] = [v]\n else:\n self._groundtruths[k].append(v)\n", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Contains definitions of EfficientNet Networks.\"\"\"\n\nimport math\nfrom typing import Any, List, Tuple\n\n# Import libraries\n\nimport tensorflow as tf\n\nfrom official.modeling import hyperparams\nfrom official.modeling import tf_utils\nfrom official.vision.beta.modeling.backbones import factory\nfrom official.vision.beta.modeling.layers import nn_blocks\nfrom official.vision.beta.modeling.layers import nn_layers\n\nlayers = tf.keras.layers\n\n# The fixed EfficientNet-B0 architecture discovered by NAS.\n# Each element represents a specification of a building block:\n# (block_fn, block_repeats, kernel_size, strides, expand_ratio, in_filters,\n# out_filters, is_output)\nEN_B0_BLOCK_SPECS = [\n ('mbconv', 1, 3, 1, 1, 32, 16, False),\n ('mbconv', 2, 3, 2, 6, 16, 24, True),\n ('mbconv', 2, 5, 2, 6, 24, 40, True),\n ('mbconv', 3, 3, 2, 6, 40, 80, False),\n ('mbconv', 3, 5, 1, 6, 80, 112, True),\n ('mbconv', 4, 5, 2, 6, 112, 192, False),\n ('mbconv', 1, 3, 1, 6, 192, 320, True),\n]\n\nSCALING_MAP = {\n 'b0': dict(width_scale=1.0, depth_scale=1.0),\n 'b1': dict(width_scale=1.0, depth_scale=1.1),\n 'b2': dict(width_scale=1.1, depth_scale=1.2),\n 'b3': dict(width_scale=1.2, depth_scale=1.4),\n 'b4': dict(width_scale=1.4, depth_scale=1.8),\n 'b5': dict(width_scale=1.6, depth_scale=2.2),\n 'b6': dict(width_scale=1.8, depth_scale=2.6),\n 'b7': dict(width_scale=2.0, depth_scale=3.1),\n}\n\n\nclass BlockSpec():\n \"\"\"A container class that specifies the block configuration for MnasNet.\"\"\"\n\n def __init__(self, block_fn: str, block_repeats: int, kernel_size: int,\n strides: int, expand_ratio: float, in_filters: int,\n out_filters: int, is_output: bool, width_scale: float,\n depth_scale: float):\n self.block_fn = block_fn\n self.block_repeats = round_repeats(block_repeats, depth_scale)\n self.kernel_size = kernel_size\n self.strides = strides\n self.expand_ratio = expand_ratio\n self.in_filters = nn_layers.round_filters(in_filters, width_scale)\n self.out_filters = nn_layers.round_filters(out_filters, width_scale)\n self.is_output = is_output\n\n\ndef round_repeats(repeats: int, multiplier: float, skip: bool = False) -> int:\n \"\"\"Returns rounded number of filters based on depth multiplier.\"\"\"\n if skip or not multiplier:\n return repeats\n return int(math.ceil(multiplier * repeats))\n\n\ndef block_spec_decoder(specs: List[Tuple[Any, ...]], width_scale: float,\n depth_scale: float) -> List[BlockSpec]:\n \"\"\"Decodes and returns specs for a block.\"\"\"\n decoded_specs = []\n for s in specs:\n s = s + (\n width_scale,\n depth_scale,\n )\n decoded_specs.append(BlockSpec(*s))\n return decoded_specs\n\n\[email protected]_keras_serializable(package='Vision')\nclass EfficientNet(tf.keras.Model):\n \"\"\"Creates an EfficientNet family model.\n\n This implements the EfficientNet model from:\n Mingxing Tan, Quoc V. Le.\n EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks.\n (https://arxiv.org/pdf/1905.11946)\n \"\"\"\n\n def __init__(self,\n model_id: str,\n input_specs: tf.keras.layers.InputSpec = layers.InputSpec(\n shape=[None, None, None, 3]),\n se_ratio: float = 0.0,\n stochastic_depth_drop_rate: float = 0.0,\n kernel_initializer: str = 'VarianceScaling',\n kernel_regularizer: tf.keras.regularizers.Regularizer = None,\n bias_regularizer: tf.keras.regularizers.Regularizer = None,\n activation: str = 'relu',\n use_sync_bn: bool = False,\n norm_momentum: float = 0.99,\n norm_epsilon: float = 0.001,\n **kwargs):\n \"\"\"Initializes an EfficientNet model.\n\n Args:\n model_id: A `str` of model ID of EfficientNet.\n input_specs: A `tf.keras.layers.InputSpec` of the input tensor.\n se_ratio: A `float` of squeeze and excitation ratio for inverted\n bottleneck blocks.\n stochastic_depth_drop_rate: A `float` of drop rate for drop connect layer.\n kernel_initializer: A `str` for kernel initializer of convolutional\n layers.\n kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for\n Conv2D. Default to None.\n bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.\n Default to None.\n activation: A `str` of name of the activation function.\n use_sync_bn: If True, use synchronized batch normalization.\n norm_momentum: A `float` of normalization momentum for the moving average.\n norm_epsilon: A `float` added to variance to avoid dividing by zero.\n **kwargs: Additional keyword arguments to be passed.\n \"\"\"\n self._model_id = model_id\n self._input_specs = input_specs\n self._se_ratio = se_ratio\n self._stochastic_depth_drop_rate = stochastic_depth_drop_rate\n self._use_sync_bn = use_sync_bn\n self._activation = activation\n self._kernel_initializer = kernel_initializer\n self._norm_momentum = norm_momentum\n self._norm_epsilon = norm_epsilon\n self._kernel_regularizer = kernel_regularizer\n self._bias_regularizer = bias_regularizer\n if use_sync_bn:\n self._norm = layers.experimental.SyncBatchNormalization\n else:\n self._norm = layers.BatchNormalization\n\n if tf.keras.backend.image_data_format() == 'channels_last':\n bn_axis = -1\n else:\n bn_axis = 1\n\n # Build EfficientNet.\n inputs = tf.keras.Input(shape=input_specs.shape[1:])\n width_scale = SCALING_MAP[model_id]['width_scale']\n depth_scale = SCALING_MAP[model_id]['depth_scale']\n\n # Build stem.\n x = layers.Conv2D(\n filters=nn_layers.round_filters(32, width_scale),\n kernel_size=3,\n strides=2,\n use_bias=False,\n padding='same',\n kernel_initializer=self._kernel_initializer,\n kernel_regularizer=self._kernel_regularizer,\n bias_regularizer=self._bias_regularizer)(\n inputs)\n x = self._norm(\n axis=bn_axis, momentum=norm_momentum, epsilon=norm_epsilon)(\n x)\n x = tf_utils.get_activation(activation)(x)\n\n # Build intermediate blocks.\n endpoints = {}\n endpoint_level = 2\n decoded_specs = block_spec_decoder(EN_B0_BLOCK_SPECS, width_scale,\n depth_scale)\n\n for i, specs in enumerate(decoded_specs):\n x = self._block_group(\n inputs=x, specs=specs, name='block_group_{}'.format(i))\n if specs.is_output:\n endpoints[str(endpoint_level)] = x\n endpoint_level += 1\n\n # Build output specs for downstream tasks.\n self._output_specs = {l: endpoints[l].get_shape() for l in endpoints}\n\n # Build the final conv for classification.\n x = layers.Conv2D(\n filters=nn_layers.round_filters(1280, width_scale),\n kernel_size=1,\n strides=1,\n use_bias=False,\n padding='same',\n kernel_initializer=self._kernel_initializer,\n kernel_regularizer=self._kernel_regularizer,\n bias_regularizer=self._bias_regularizer)(\n x)\n x = self._norm(\n axis=bn_axis, momentum=norm_momentum, epsilon=norm_epsilon)(\n x)\n endpoints[str(endpoint_level)] = tf_utils.get_activation(activation)(x)\n\n super(EfficientNet, self).__init__(\n inputs=inputs, outputs=endpoints, **kwargs)\n\n def _block_group(self,\n inputs: tf.Tensor,\n specs: BlockSpec,\n name: str = 'block_group'):\n \"\"\"Creates one group of blocks for the EfficientNet model.\n\n Args:\n inputs: A `tf.Tensor` of size `[batch, channels, height, width]`.\n specs: The specifications for one inverted bottleneck block group.\n name: A `str` name for the block.\n\n Returns:\n The output `tf.Tensor` of the block layer.\n \"\"\"\n if specs.block_fn == 'mbconv':\n block_fn = nn_blocks.InvertedBottleneckBlock\n else:\n raise ValueError('Block func {} not supported.'.format(specs.block_fn))\n\n x = block_fn(\n in_filters=specs.in_filters,\n out_filters=specs.out_filters,\n expand_ratio=specs.expand_ratio,\n strides=specs.strides,\n kernel_size=specs.kernel_size,\n se_ratio=self._se_ratio,\n stochastic_depth_drop_rate=self._stochastic_depth_drop_rate,\n kernel_initializer=self._kernel_initializer,\n kernel_regularizer=self._kernel_regularizer,\n bias_regularizer=self._bias_regularizer,\n activation=self._activation,\n use_sync_bn=self._use_sync_bn,\n norm_momentum=self._norm_momentum,\n norm_epsilon=self._norm_epsilon)(\n inputs)\n\n for _ in range(1, specs.block_repeats):\n x = block_fn(\n in_filters=specs.out_filters, # Set 'in_filters' to 'out_filters'.\n out_filters=specs.out_filters,\n expand_ratio=specs.expand_ratio,\n strides=1, # Fix strides to 1.\n kernel_size=specs.kernel_size,\n se_ratio=self._se_ratio,\n stochastic_depth_drop_rate=self._stochastic_depth_drop_rate,\n kernel_initializer=self._kernel_initializer,\n kernel_regularizer=self._kernel_regularizer,\n bias_regularizer=self._bias_regularizer,\n activation=self._activation,\n use_sync_bn=self._use_sync_bn,\n norm_momentum=self._norm_momentum,\n norm_epsilon=self._norm_epsilon)(\n x)\n\n return tf.identity(x, name=name)\n\n def get_config(self):\n config_dict = {\n 'model_id': self._model_id,\n 'se_ratio': self._se_ratio,\n 'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate,\n 'kernel_initializer': self._kernel_initializer,\n 'kernel_regularizer': self._kernel_regularizer,\n 'bias_regularizer': self._bias_regularizer,\n 'activation': self._activation,\n 'use_sync_bn': self._use_sync_bn,\n 'norm_momentum': self._norm_momentum,\n 'norm_epsilon': self._norm_epsilon\n }\n return config_dict\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n return cls(**config)\n\n @property\n def output_specs(self):\n \"\"\"A dict of {level: TensorShape} pairs for the model output.\"\"\"\n return self._output_specs\n\n\[email protected]_backbone_builder('efficientnet')\ndef build_efficientnet(\n input_specs: tf.keras.layers.InputSpec,\n backbone_config: hyperparams.Config,\n norm_activation_config: hyperparams.Config,\n l2_regularizer: tf.keras.regularizers.Regularizer = None) -> tf.keras.Model:\n \"\"\"Builds EfficientNet backbone from a config.\"\"\"\n backbone_type = backbone_config.type\n backbone_cfg = backbone_config.get()\n assert backbone_type == 'efficientnet', (f'Inconsistent backbone type '\n f'{backbone_type}')\n\n return EfficientNet(\n model_id=backbone_cfg.model_id,\n input_specs=input_specs,\n stochastic_depth_drop_rate=backbone_cfg.stochastic_depth_drop_rate,\n se_ratio=backbone_cfg.se_ratio,\n activation=norm_activation_config.activation,\n use_sync_bn=norm_activation_config.use_sync_bn,\n norm_momentum=norm_activation_config.norm_momentum,\n norm_epsilon=norm_activation_config.norm_epsilon,\n kernel_regularizer=l2_regularizer)\n", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\nr\"\"\"Exports models to tf.saved_model.\n\nExport example:\n\n```shell\npython3 export_saved_model.py \\\n --output_path=/tmp/movinet/ \\\n --model_id=a0 \\\n --causal=True \\\n --use_2plus1d=False \\\n --num_classes=600 \\\n --checkpoint_path=\"\"\n```\n\nTo use an exported saved_model in various applications:\n\n```python\nimport tensorflow as tf\nimport tensorflow_hub as hub\n\nsaved_model_path = ...\n\ninputs = tf.keras.layers.Input(\n shape=[None, None, None, 3],\n dtype=tf.float32)\n\nencoder = hub.KerasLayer(saved_model_path, trainable=True)\noutputs = encoder(inputs)\n\nmodel = tf.keras.Model(inputs, outputs)\n\nexample_input = tf.ones([1, 8, 172, 172, 3])\noutputs = model(example_input, states)\n```\n\"\"\"\n\nfrom typing import Sequence\n\nfrom absl import app\nfrom absl import flags\nimport tensorflow as tf\n\nfrom official.vision.beta.projects.movinet.modeling import movinet\nfrom official.vision.beta.projects.movinet.modeling import movinet_model\n\nflags.DEFINE_string(\n 'output_path', '/tmp/movinet/',\n 'Path to saved exported saved_model file.')\nflags.DEFINE_string(\n 'model_id', 'a0', 'MoViNet model name.')\nflags.DEFINE_bool(\n 'causal', False, 'Run the model in causal mode.')\nflags.DEFINE_bool(\n 'use_2plus1d', False, 'Use (2+1)D features instead of 3D.')\nflags.DEFINE_integer(\n 'num_classes', 600, 'The number of classes for prediction.')\nflags.DEFINE_string(\n 'checkpoint_path', '',\n 'Checkpoint path to load. Leave blank for default initialization.')\n\nFLAGS = flags.FLAGS\n\n\ndef main(argv: Sequence[str]) -> None:\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n\n # Use dimensions of 1 except the channels to export faster,\n # since we only really need the last dimension to build and get the output\n # states. These dimensions will be set to `None` once the model is built.\n input_shape = [1, 1, 1, 1, 3]\n\n backbone = movinet.Movinet(\n FLAGS.model_id, causal=FLAGS.causal, use_2plus1d=FLAGS.use_2plus1d)\n model = movinet_model.MovinetClassifier(\n backbone, num_classes=FLAGS.num_classes, output_states=FLAGS.causal)\n model.build(input_shape)\n\n if FLAGS.checkpoint_path:\n model.load_weights(FLAGS.checkpoint_path)\n\n if FLAGS.causal:\n # Call the model once to get the output states. Call again with `states`\n # input to ensure that the inputs with the `states` argument is built\n _, states = model(dict(image=tf.ones(input_shape), states={}))\n _, states = model(dict(image=tf.ones(input_shape), states=states))\n\n input_spec = tf.TensorSpec(\n shape=[None, None, None, None, 3],\n dtype=tf.float32,\n name='inputs')\n\n state_specs = {}\n for name, state in states.items():\n shape = state.shape\n if len(state.shape) == 5:\n shape = [None, state.shape[1], None, None, state.shape[-1]]\n new_spec = tf.TensorSpec(shape=shape, dtype=state.dtype, name=name)\n state_specs[name] = new_spec\n\n specs = (input_spec, state_specs)\n\n # Define a tf.keras.Model with custom signatures to allow it to accept\n # a state dict as an argument. We define it inline here because\n # we first need to determine the shape of the state tensors before\n # applying the `input_signature` argument to `tf.function`.\n class ExportStateModule(tf.Module):\n \"\"\"Module with state for exporting to saved_model.\"\"\"\n\n def __init__(self, model):\n self.model = model\n\n @tf.function(input_signature=[input_spec])\n def __call__(self, inputs):\n return self.model(dict(image=inputs, states={}))\n\n @tf.function(input_signature=[input_spec])\n def base(self, inputs):\n return self.model(dict(image=inputs, states={}))\n\n @tf.function(input_signature=specs)\n def stream(self, inputs, states):\n return self.model(dict(image=inputs, states=states))\n\n module = ExportStateModule(model)\n\n tf.saved_model.save(module, FLAGS.output_path)\n else:\n _ = model(tf.ones(input_shape))\n tf.keras.models.save_model(model, FLAGS.output_path)\n\n print(' ----- Done. Saved Model is saved at {}'.format(FLAGS.output_path))\n\n\nif __name__ == '__main__':\n app.run(main)\n", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Base class for model export.\"\"\"\n\nimport abc\nfrom typing import Dict, List, Mapping, Optional, Text\n\nimport tensorflow as tf\n\nfrom official.core import export_base\nfrom official.modeling.hyperparams import config_definitions as cfg\n\n\nclass ExportModule(export_base.ExportModule, metaclass=abc.ABCMeta):\n \"\"\"Base Export Module.\"\"\"\n\n def __init__(self,\n params: cfg.ExperimentConfig,\n *,\n batch_size: int,\n input_image_size: List[int],\n num_channels: int = 3,\n model: Optional[tf.keras.Model] = None):\n \"\"\"Initializes a module for export.\n\n Args:\n params: Experiment params.\n batch_size: The batch size of the model input. Can be `int` or None.\n input_image_size: List or Tuple of size of the input image. For 2D image,\n it is [height, width].\n num_channels: The number of the image channels.\n model: A tf.keras.Model instance to be exported.\n \"\"\"\n self.params = params\n self._batch_size = batch_size\n self._input_image_size = input_image_size\n self._num_channels = num_channels\n if model is None:\n model = self._build_model() # pylint: disable=assignment-from-none\n super().__init__(params=params, model=model)\n\n def _decode_image(self, encoded_image_bytes: str) -> tf.Tensor:\n \"\"\"Decodes an image bytes to an image tensor.\n\n Use `tf.image.decode_image` to decode an image if input is expected to be 2D\n image; otherwise use `tf.io.decode_raw` to convert the raw bytes to tensor\n and reshape it to desire shape.\n\n Args:\n encoded_image_bytes: An encoded image string to be decoded.\n\n Returns:\n A decoded image tensor.\n \"\"\"\n if len(self._input_image_size) == 2:\n # Decode an image if 2D input is expected.\n image_tensor = tf.image.decode_image(\n encoded_image_bytes, channels=self._num_channels)\n image_tensor.set_shape((None, None, self._num_channels))\n else:\n # Convert raw bytes into a tensor and reshape it, if not 2D input.\n image_tensor = tf.io.decode_raw(encoded_image_bytes, out_type=tf.uint8)\n image_tensor = tf.reshape(image_tensor,\n self._input_image_size + [self._num_channels])\n return image_tensor\n\n def _decode_tf_example(\n self, tf_example_string_tensor: tf.train.Example) -> tf.Tensor:\n \"\"\"Decodes a TF Example to an image tensor.\n\n Args:\n tf_example_string_tensor: A tf.train.Example of encoded image and other\n information.\n\n Returns:\n A decoded image tensor.\n \"\"\"\n keys_to_features = {'image/encoded': tf.io.FixedLenFeature((), tf.string)}\n parsed_tensors = tf.io.parse_single_example(\n serialized=tf_example_string_tensor, features=keys_to_features)\n image_tensor = self._decode_image(parsed_tensors['image/encoded'])\n return image_tensor\n\n def _build_model(self, **kwargs):\n \"\"\"Returns a model built from the params.\"\"\"\n return None\n\n @tf.function\n def inference_from_image_tensors(\n self, inputs: tf.Tensor) -> Mapping[str, tf.Tensor]:\n return self.serve(inputs)\n\n @tf.function\n def inference_from_image_bytes(self, inputs: tf.Tensor):\n with tf.device('cpu:0'):\n images = tf.nest.map_structure(\n tf.identity,\n tf.map_fn(\n self._decode_image,\n elems=inputs,\n fn_output_signature=tf.TensorSpec(\n shape=[None] * len(self._input_image_size) +\n [self._num_channels],\n dtype=tf.uint8),\n parallel_iterations=32))\n images = tf.stack(images)\n return self.serve(images)\n\n @tf.function\n def inference_from_tf_example(self,\n inputs: tf.Tensor) -> Mapping[str, tf.Tensor]:\n with tf.device('cpu:0'):\n images = tf.nest.map_structure(\n tf.identity,\n tf.map_fn(\n self._decode_tf_example,\n elems=inputs,\n # Height/width of the shape of input images is unspecified (None)\n # at the time of decoding the example, but the shape will\n # be adjusted to conform to the input layer of the model,\n # by _run_inference_on_image_tensors() below.\n fn_output_signature=tf.TensorSpec(\n shape=[None] * len(self._input_image_size) +\n [self._num_channels],\n dtype=tf.uint8),\n dtype=tf.uint8,\n parallel_iterations=32))\n images = tf.stack(images)\n return self.serve(images)\n\n def get_inference_signatures(self, function_keys: Dict[Text, Text]):\n \"\"\"Gets defined function signatures.\n\n Args:\n function_keys: A dictionary with keys as the function to create signature\n for and values as the signature keys when returns.\n\n Returns:\n A dictionary with key as signature key and value as concrete functions\n that can be used for tf.saved_model.save.\n \"\"\"\n signatures = {}\n for key, def_name in function_keys.items():\n if key == 'image_tensor':\n input_signature = tf.TensorSpec(\n shape=[self._batch_size] + [None] * len(self._input_image_size) +\n [self._num_channels],\n dtype=tf.uint8)\n signatures[\n def_name] = self.inference_from_image_tensors.get_concrete_function(\n input_signature)\n elif key == 'image_bytes':\n input_signature = tf.TensorSpec(\n shape=[self._batch_size], dtype=tf.string)\n signatures[\n def_name] = self.inference_from_image_bytes.get_concrete_function(\n input_signature)\n elif key == 'serve_examples' or key == 'tf_example':\n input_signature = tf.TensorSpec(\n shape=[self._batch_size], dtype=tf.string)\n signatures[\n def_name] = self.inference_from_tf_example.get_concrete_function(\n input_signature)\n else:\n raise ValueError('Unrecognized `input_type`')\n return signatures\n", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nfrom absl import flags\nfrom absl.testing import flagsaver\nfrom absl.testing import parameterized\nimport tensorflow as tf\n\n# pylint: disable=unused-import\nfrom official.common import registry_imports\n# pylint: enable=unused-import\nfrom official.common import flags as tfm_flags\nfrom official.core import task_factory\nfrom official.core import train_lib\nfrom official.core import train_utils\nfrom official.nlp import continuous_finetune_lib\n\nFLAGS = flags.FLAGS\n\ntfm_flags.define_flags()\n\n\nclass ContinuousFinetuneTest(tf.test.TestCase, parameterized.TestCase):\n\n def setUp(self):\n super().setUp()\n self._model_dir = os.path.join(self.get_temp_dir(), 'model_dir')\n\n def testContinuousFinetune(self):\n pretrain_steps = 1\n src_model_dir = self.get_temp_dir()\n flags_dict = dict(\n experiment='mock',\n mode='continuous_train_and_eval',\n model_dir=self._model_dir,\n params_override={\n 'task': {\n 'init_checkpoint': src_model_dir,\n },\n 'trainer': {\n 'continuous_eval_timeout': 1,\n 'steps_per_loop': 1,\n 'train_steps': 1,\n 'validation_steps': 1,\n 'best_checkpoint_export_subdir': 'best_ckpt',\n 'best_checkpoint_eval_metric': 'acc',\n 'optimizer_config': {\n 'optimizer': {\n 'type': 'sgd'\n },\n 'learning_rate': {\n 'type': 'constant'\n }\n }\n }\n })\n\n with flagsaver.flagsaver(**flags_dict):\n # Train and save some checkpoints.\n params = train_utils.parse_configuration(flags.FLAGS)\n distribution_strategy = tf.distribute.get_strategy()\n with distribution_strategy.scope():\n task = task_factory.get_task(params.task, logging_dir=src_model_dir)\n _ = train_lib.run_experiment(\n distribution_strategy=distribution_strategy,\n task=task,\n mode='train',\n params=params,\n model_dir=src_model_dir)\n\n params = train_utils.parse_configuration(FLAGS)\n eval_metrics = continuous_finetune_lib.run_continuous_finetune(\n FLAGS.mode,\n params,\n FLAGS.model_dir,\n run_post_eval=True,\n pretrain_steps=pretrain_steps)\n self.assertIn('best_acc', eval_metrics)\n\n self.assertFalse(\n tf.io.gfile.exists(os.path.join(FLAGS.model_dir, 'checkpoint')))\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Keras layer that creates a self-attention mask.\"\"\"\n\nimport tensorflow as tf\n\n\[email protected]_keras_serializable(package='keras_nlp')\nclass SelfAttentionMask(tf.keras.layers.Layer):\n \"\"\"Create 3D attention mask from a 2D tensor mask.\n\n inputs[0]: from_tensor: 2D or 3D Tensor of shape\n [batch_size, from_seq_length, ...].\n inputs[1]: to_mask: int32 Tensor of shape [batch_size, to_seq_length].\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length, to_seq_length].\n \"\"\"\n\n def call(self, inputs, to_mask):\n from_shape = tf.shape(inputs)\n batch_size = from_shape[0]\n from_seq_length = from_shape[1]\n\n to_shape = tf.shape(to_mask)\n to_seq_length = to_shape[1]\n\n to_mask = tf.cast(\n tf.reshape(to_mask, [batch_size, 1, to_seq_length]),\n dtype=inputs.dtype)\n\n # We don't assume that `from_tensor` is a mask (although it could be). We\n # don't actually care if we attend *from* padding tokens (only *to* padding)\n # tokens so we create a tensor of all ones.\n #\n # `broadcast_ones` = [batch_size, from_seq_length, 1]\n broadcast_ones = tf.ones(\n shape=[batch_size, from_seq_length, 1], dtype=inputs.dtype)\n\n # Here we broadcast along two dimensions to create the mask.\n mask = broadcast_ones * to_mask\n\n return mask\n", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Keras-based transformer block layer.\"\"\"\n# pylint: disable=g-classes-have-attributes\n\nimport gin\nimport tensorflow as tf\n\nfrom official.nlp import keras_nlp\nfrom official.nlp.modeling.layers import attention\nfrom official.nlp.modeling.layers import multi_channel_attention\nfrom official.nlp.modeling.layers.util import tf_function_if_eager\n\n\[email protected]_keras_serializable(package=\"Text\")\nclass Transformer(keras_nlp.layers.TransformerEncoderBlock):\n \"\"\"Transformer layer.\n\n This layer implements the Transformer from \"Attention Is All You Need\".\n (https://arxiv.org/abs/1706.03762).\n\n Args:\n num_attention_heads: Number of attention heads.\n intermediate_size: Size of the intermediate layer.\n intermediate_activation: Activation for the intermediate layer.\n dropout_rate: Dropout probability for the post-attention and output dropout.\n attention_dropout_rate: Dropout probability for within the attention layer.\n output_range: the sequence output range, [0, output_range) by slicing the\n target sequence. `None` means the target sequence is not sliced.\n kernel_initializer: Initializer for dense layer kernels.\n bias_initializer: Initializer for dense layer biases.\n kernel_regularizer: Regularizer for dense layer kernels.\n bias_regularizer: Regularizer for dense layer biases.\n activity_regularizer: Regularizer for dense layer activity.\n kernel_constraint: Constraint for dense layer kernels.\n bias_constraint: Constraint for dense layer kernels.\n use_bias: Whether to enable use_bias in attention layer. If set False,\n use_bias in attention layer is disabled.\n norm_first: Whether to normalize inputs to attention and intermediate dense\n layers. If set False, output of attention and intermediate dense layers is\n normalized.\n norm_epsilon: Epsilon value to initialize normalization layers.\n intermediate_dropout: Dropout probability for intermediate_dropout_layer.\n attention_initializer: Initializer for kernels of attention layers. If set\n `None`, attention layers use kernel_initializer as initializer for kernel.\n \"\"\"\n\n def __init__(self,\n num_attention_heads,\n intermediate_size,\n intermediate_activation,\n dropout_rate=0.0,\n attention_dropout_rate=0.0,\n output_range=None,\n kernel_initializer=\"glorot_uniform\",\n bias_initializer=\"zeros\",\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n use_bias=True,\n norm_first=False,\n norm_epsilon=1e-12,\n intermediate_dropout=0.0,\n attention_initializer=None,\n **kwargs):\n super().__init__(\n num_attention_heads=num_attention_heads,\n inner_dim=intermediate_size,\n inner_activation=intermediate_activation,\n output_dropout=dropout_rate,\n attention_dropout=attention_dropout_rate,\n output_range=output_range,\n kernel_initializer=kernel_initializer,\n bias_initializer=bias_initializer,\n kernel_regularizer=kernel_regularizer,\n bias_regularizer=bias_regularizer,\n activity_regularizer=activity_regularizer,\n kernel_constraint=kernel_constraint,\n bias_constraint=bias_constraint,\n use_bias=use_bias,\n norm_first=norm_first,\n norm_epsilon=norm_epsilon,\n inner_dropout=intermediate_dropout,\n attention_initializer=attention_initializer,\n **kwargs)\n\n\[email protected]_keras_serializable(package=\"Text\")\[email protected]\nclass CompiledTransformer(Transformer):\n\n @tf_function_if_eager(experimental_compile=True)\n def call(self, inputs):\n return super().call(inputs)\n\n\[email protected]_keras_serializable(package=\"Text\")\nclass TransformerDecoderBlock(tf.keras.layers.Layer):\n \"\"\"Single transformer layer for decoder.\n\n It has three sub-layers:\n (1) a multi-head self-attention mechanism.\n (2) a encoder-decoder attention.\n (3) a positionwise fully connected feed-forward network.\n\n Args:\n num_attention_heads: Number of attention heads.\n intermediate_size: Size of the intermediate layer.\n intermediate_activation: Activation for the intermediate layer.\n dropout_rate: Dropout probability for the post-attention and output dropout.\n attention_dropout_rate: Dropout probability for within the attention layer.\n multi_channel_cross_attention: Whether to use `MultiChannelAttention` for\n cross-attention between target sequences and source sequences.\n kernel_initializer: Initializer for dense layer kernels.\n bias_initializer: Initializer for dense layer biases.\n kernel_regularizer: Regularizer for dense layer kernels.\n bias_regularizer: Regularizer for dense layer biases.\n activity_regularizer: Regularizer for dense layer activity.\n kernel_constraint: Constraint for dense layer kernels.\n bias_constraint: Constraint for dense layer kernels.\n use_bias: Whether to enable use_bias in attention layer. If set False,\n use_bias in attention layer is disabled.\n norm_first: Whether to normalize inputs to attention and intermediate dense\n layers. If set False, output of attention and intermediate dense layers is\n normalized.\n norm_epsilon: Epsilon value to initialize normalization layers.\n intermediate_dropout: Dropout probability for intermediate_dropout_layer.\n attention_initializer: Initializer for kernels of attention layers. If set\n `None`, attention layers use kernel_initializer as initializer for kernel.\n \"\"\"\n\n def __init__(self,\n num_attention_heads,\n intermediate_size,\n intermediate_activation,\n dropout_rate=0.0,\n attention_dropout_rate=0.0,\n multi_channel_cross_attention=False,\n kernel_initializer=\"glorot_uniform\",\n bias_initializer=\"zeros\",\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n use_bias=True,\n norm_first=False,\n norm_epsilon=1e-12,\n intermediate_dropout=0.0,\n attention_initializer=None,\n **kwargs):\n super().__init__(**kwargs)\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.intermediate_activation = tf.keras.activations.get(\n intermediate_activation)\n self.dropout_rate = dropout_rate\n self.attention_dropout_rate = attention_dropout_rate\n self.multi_channel_cross_attention = multi_channel_cross_attention\n self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)\n self._bias_initializer = tf.keras.initializers.get(bias_initializer)\n self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)\n self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)\n self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer)\n self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)\n self._bias_constraint = tf.keras.constraints.get(bias_constraint)\n self._use_bias = use_bias\n self._norm_first = norm_first\n self._norm_epsilon = norm_epsilon\n self._intermediate_dropout = intermediate_dropout\n if attention_initializer:\n self._attention_initializer = tf.keras.initializers.get(\n attention_initializer)\n else:\n self._attention_initializer = self._kernel_initializer\n if self.multi_channel_cross_attention:\n self._cross_attention_cls = multi_channel_attention.MultiChannelAttention\n else:\n self._cross_attention_cls = attention.MultiHeadAttention\n\n def build(self, input_shape):\n target_tensor_shape = tf.TensorShape(input_shape[0])\n if len(target_tensor_shape.as_list()) != 3:\n raise ValueError(\"TransformerLayer expects a three-dimensional input of \"\n \"shape [batch, sequence, width].\")\n hidden_size = target_tensor_shape[2]\n if hidden_size % self.num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (hidden_size, self.num_attention_heads))\n self.attention_head_size = int(hidden_size) // self.num_attention_heads\n common_kwargs = dict(\n bias_initializer=self._bias_initializer,\n kernel_regularizer=self._kernel_regularizer,\n bias_regularizer=self._bias_regularizer,\n activity_regularizer=self._activity_regularizer,\n kernel_constraint=self._kernel_constraint,\n bias_constraint=self._bias_constraint)\n # Self attention.\n self.self_attention = attention.CachedAttention(\n num_heads=self.num_attention_heads,\n key_dim=self.attention_head_size,\n dropout=self.attention_dropout_rate,\n use_bias=self._use_bias,\n kernel_initializer=self._attention_initializer,\n name=\"self_attention\",\n **common_kwargs)\n self.self_attention_output_dense = tf.keras.layers.experimental.EinsumDense(\n \"abc,cd->abd\",\n output_shape=(None, hidden_size),\n bias_axes=\"d\",\n kernel_initializer=self._kernel_initializer,\n name=\"output\",\n **common_kwargs)\n self.self_attention_dropout = tf.keras.layers.Dropout(\n rate=self.dropout_rate)\n self.self_attention_layer_norm = (\n tf.keras.layers.LayerNormalization(\n name=\"self_attention_layer_norm\",\n axis=-1,\n epsilon=self._norm_epsilon,\n dtype=\"float32\"))\n # Encoder-decoder attention.\n self.encdec_attention = self._cross_attention_cls(\n num_heads=self.num_attention_heads,\n key_dim=self.attention_head_size,\n dropout=self.attention_dropout_rate,\n output_shape=hidden_size,\n use_bias=self._use_bias,\n kernel_initializer=self._attention_initializer,\n name=\"attention/encdec\",\n **common_kwargs)\n\n self.encdec_attention_dropout = tf.keras.layers.Dropout(\n rate=self.dropout_rate)\n self.encdec_attention_layer_norm = (\n tf.keras.layers.LayerNormalization(\n name=\"attention/encdec_output_layer_norm\",\n axis=-1,\n epsilon=self._norm_epsilon,\n dtype=\"float32\"))\n\n # Feed-forward projection.\n self.intermediate_dense = tf.keras.layers.experimental.EinsumDense(\n \"abc,cd->abd\",\n output_shape=(None, self.intermediate_size),\n bias_axes=\"d\",\n kernel_initializer=self._kernel_initializer,\n name=\"intermediate\",\n **common_kwargs)\n self.intermediate_activation_layer = tf.keras.layers.Activation(\n self.intermediate_activation)\n self._intermediate_dropout_layer = tf.keras.layers.Dropout(\n rate=self._intermediate_dropout)\n self.output_dense = tf.keras.layers.experimental.EinsumDense(\n \"abc,cd->abd\",\n output_shape=(None, hidden_size),\n bias_axes=\"d\",\n kernel_initializer=self._kernel_initializer,\n name=\"output\",\n **common_kwargs)\n self.output_dropout = tf.keras.layers.Dropout(rate=self.dropout_rate)\n self.output_layer_norm = tf.keras.layers.LayerNormalization(\n name=\"output_layer_norm\", axis=-1,\n epsilon=self._norm_epsilon, dtype=\"float32\")\n super().build(input_shape)\n\n def get_config(self):\n config = {\n \"num_attention_heads\":\n self.num_attention_heads,\n \"intermediate_size\":\n self.intermediate_size,\n \"intermediate_activation\":\n self.intermediate_activation,\n \"dropout_rate\":\n self.dropout_rate,\n \"attention_dropout_rate\":\n self.attention_dropout_rate,\n \"multi_channel_cross_attention\":\n self.multi_channel_cross_attention,\n \"kernel_initializer\":\n tf.keras.initializers.serialize(self._kernel_initializer),\n \"bias_initializer\":\n tf.keras.initializers.serialize(self._bias_initializer),\n \"kernel_regularizer\":\n tf.keras.regularizers.serialize(self._kernel_regularizer),\n \"bias_regularizer\":\n tf.keras.regularizers.serialize(self._bias_regularizer),\n \"activity_regularizer\":\n tf.keras.regularizers.serialize(self._activity_regularizer),\n \"kernel_constraint\":\n tf.keras.constraints.serialize(self._kernel_constraint),\n \"bias_constraint\":\n tf.keras.constraints.serialize(self._bias_constraint),\n \"use_bias\":\n self._use_bias,\n \"norm_first\":\n self._norm_first,\n \"norm_epsilon\":\n self._norm_epsilon,\n \"intermediate_dropout\":\n self._intermediate_dropout,\n \"attention_initializer\":\n tf.keras.initializers.serialize(self._attention_initializer)\n }\n base_config = super().get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n def common_layers_with_encoder(self):\n \"\"\"Gets layer objects that can make a Transformer encoder block.\"\"\"\n return [\n self.self_attention, self.self_attention_layer_norm,\n self.intermediate_dense, self.output_dense, self.output_layer_norm\n ]\n\n def call(self, inputs, cache=None, decode_loop_step=None):\n if self.multi_channel_cross_attention:\n if len(inputs) != 5:\n raise ValueError(\n \"TransformerDecoderBlock must have 5 inputs, when it uses \"\n \"multi_channel_cross_attention. But it got: %d\" % len(inputs))\n elif len(inputs) != 4:\n raise ValueError(\n \"TransformerDecoderBlock must have 4 inputs, but it got: %d\" %\n len(inputs))\n input_tensor, memory, attention_mask, self_attention_mask = inputs[:4]\n source_tensor = input_tensor\n if self._norm_first:\n input_tensor = self.self_attention_layer_norm(input_tensor)\n self_attention_output, cache = self.self_attention(\n query=input_tensor,\n value=input_tensor,\n attention_mask=self_attention_mask,\n cache=cache,\n decode_loop_step=decode_loop_step)\n self_attention_output = self.self_attention_dropout(self_attention_output)\n if self._norm_first:\n self_attention_output = source_tensor + self_attention_output\n else:\n self_attention_output = self.self_attention_layer_norm(\n input_tensor + self_attention_output)\n if self._norm_first:\n source_self_attention_output = self_attention_output\n self_attention_output = self.encdec_attention_layer_norm(\n self_attention_output)\n cross_attn_inputs = dict(\n query=self_attention_output,\n value=memory,\n attention_mask=attention_mask)\n if self.multi_channel_cross_attention:\n # Accesses the 5-th input tensor for the doc-attention probabilities.\n cross_attn_inputs[\"context_attention_weights\"] = inputs[-1]\n attention_output = self.encdec_attention(**cross_attn_inputs)\n attention_output = self.encdec_attention_dropout(attention_output)\n if self._norm_first:\n attention_output = source_self_attention_output + attention_output\n else:\n attention_output = self.encdec_attention_layer_norm(\n self_attention_output + attention_output)\n if self._norm_first:\n source_attention_output = attention_output\n attention_output = self.output_layer_norm(attention_output)\n\n intermediate_output = self.intermediate_dense(attention_output)\n intermediate_output = self.intermediate_activation_layer(\n intermediate_output)\n intermediate_output = self._intermediate_dropout_layer(intermediate_output)\n layer_output = self.output_dense(intermediate_output)\n layer_output = self.output_dropout(layer_output)\n if self._norm_first:\n layer_output = source_attention_output + layer_output\n else:\n layer_output = self.output_layer_norm(layer_output + attention_output)\n return layer_output, cache\n", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for mat_mul_with_margin layer.\"\"\"\n\nimport tensorflow as tf\n\nfrom tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import\nfrom official.nlp.modeling.layers import mat_mul_with_margin\n\n\nclass MatMulWithMarginTest(keras_parameterized.TestCase):\n\n def test_layer_invocation(self):\n \"\"\"Validate that the Keras object can be created and invoked.\"\"\"\n input_width = 512\n test_layer = mat_mul_with_margin.MatMulWithMargin()\n # Create a 2-dimensional input (the first dimension is implicit).\n left_encoded = tf.keras.Input(shape=(input_width,), dtype=tf.float32)\n right_encoded = tf.keras.Input(shape=(input_width,), dtype=tf.float32)\n left_logits, right_logits = test_layer(left_encoded, right_encoded)\n\n # Validate that the outputs are of the expected shape.\n expected_output_shape = [None, None]\n self.assertEqual(expected_output_shape, left_logits.shape.as_list())\n self.assertEqual(expected_output_shape, right_logits.shape.as_list())\n\n def test_serialize_deserialize(self):\n # Create a layer object that sets all of its config options.\n layer = mat_mul_with_margin.MatMulWithMargin()\n\n # Create another layer object from the first object's config.\n new_layer = mat_mul_with_margin.MatMulWithMargin.from_config(\n layer.get_config())\n\n # If the serialization was successful, the new config should match the old.\n self.assertAllEqual(layer.get_config(), new_layer.get_config())\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Utility functions for creating TFRecord data sets.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v1 as tf\n\n\ndef int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\ndef int64_list_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))\n\n\ndef bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\ndef bytes_list_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))\n\n\ndef float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))\n\n\ndef float_list_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n\n\ndef read_examples_list(path):\n \"\"\"Read list of training or validation examples.\n\n The file is assumed to contain a single example per line where the first\n token in the line is an identifier that allows us to find the image and\n annotation xml for that example.\n\n For example, the line:\n xyz 3\n would allow us to find files xyz.jpg and xyz.xml (the 3 would be ignored).\n\n Args:\n path: absolute path to examples list file.\n\n Returns:\n list of example identifiers (strings).\n \"\"\"\n with tf.gfile.GFile(path) as fid:\n lines = fid.readlines()\n return [line.strip().split(' ')[0] for line in lines]\n\n\ndef recursive_parse_xml_to_dict(xml):\n \"\"\"Recursively parses XML contents to python dict.\n\n We assume that `object` tags are the only ones that can appear\n multiple times at the same level of a tree.\n\n Args:\n xml: xml tree obtained by parsing XML file contents using lxml.etree\n\n Returns:\n Python dictionary holding XML contents.\n \"\"\"\n if not xml:\n return {xml.tag: xml.text}\n result = {}\n for child in xml:\n child_result = recursive_parse_xml_to_dict(child)\n if child.tag != 'object':\n result[child.tag] = child_result[child.tag]\n else:\n if child.tag not in result:\n result[child.tag] = []\n result[child.tag].append(child_result[child.tag])\n return {xml.tag: result}\n" ]
[ [ "tensorflow.keras.layers.Input", "tensorflow.keras.mixed_precision.set_global_policy", "tensorflow.keras.Input", "tensorflow.keras.utils.get_registered_name", "tensorflow.test.main", "tensorflow.keras.utils.register_keras_serializable", "tensorflow.keras.Model", "tensorflow.keras.initializers.TruncatedNormal", "numpy.random.randint" ], [ "tensorflow.io.gfile.exists", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.compat.v1.estimator.experimental.KMeans", "tensorflow.io.gfile.makedirs", "tensorflow.compat.v1.data.make_initializable_iterator", "tensorflow.compat.v1.placeholder", "numpy.array" ], [ "numpy.random.rand", "tensorflow.keras.layers.InputSpec", "tensorflow.test.main" ], [ "tensorflow.keras.regularizers.l2", "tensorflow.keras.layers.InputSpec", "tensorflow.test.main" ], [ "tensorflow.compat.v1.train.list_variables", "tensorflow.compat.v1.global_variables", "tensorflow.compat.v1.logging.error", "tensorflow.compat.v1.executing_eagerly", "tensorflow.compat.v1.train.NewCheckpointReader", "tensorflow.compat.v1.init_scope" ], [ "tensorflow.test.main" ], [ "tensorflow.keras.layers.Activation", "tensorflow.keras.layers.Dense", "tensorflow.keras.utils.register_keras_serializable", "tensorflow.keras.mixed_precision.global_policy", "tensorflow.keras.layers.Input" ], [ "tensorflow.maximum", "tensorflow.cast", "tensorflow.minimum", "tensorflow.keras.optimizers.Adam", "tensorflow.name_scope" ], [ "tensorflow.keras.callbacks.ModelCheckpoint", "tensorflow.keras.backend.get_value", "tensorflow.keras.callbacks.experimental.BackupAndRestore" ], [ "tensorflow.compat.v1.Variable", "numpy.reshape", "tensorflow.compat.v1.train.Checkpoint", "tensorflow.compat.v1.global_variables_initializer", "tensorflow.compat.v1.Session", "tensorflow.compat.v1.Graph", "tensorflow.compat.v1.logging.info", "numpy.transpose", "tensorflow.compat.v1.train.NewCheckpointReader", "tensorflow.compat.v1.train.Saver" ], [ "numpy.hstack", "numpy.tile", "numpy.concatenate", "tensorflow.io.gfile.remove", "tensorflow.io.gfile.copy" ], [ "tensorflow.identity", "tensorflow.keras.utils.register_keras_serializable", "tensorflow.keras.backend.image_data_format", "tensorflow.keras.Input" ], [ "tensorflow.keras.models.save_model", "tensorflow.ones", "tensorflow.saved_model.save", "tensorflow.function", "tensorflow.TensorSpec" ], [ "tensorflow.device", "tensorflow.stack", "tensorflow.reshape", "tensorflow.io.parse_single_example", "tensorflow.io.decode_raw", "tensorflow.io.FixedLenFeature", "tensorflow.image.decode_image", "tensorflow.TensorSpec" ], [ "tensorflow.distribute.get_strategy", "tensorflow.test.main" ], [ "tensorflow.reshape", "tensorflow.ones", "tensorflow.keras.utils.register_keras_serializable", "tensorflow.shape" ], [ "tensorflow.keras.layers.LayerNormalization", "tensorflow.TensorShape", "tensorflow.keras.layers.Activation", "tensorflow.keras.constraints.get", "tensorflow.keras.constraints.serialize", "tensorflow.keras.regularizers.get", "tensorflow.keras.initializers.serialize", "tensorflow.keras.utils.register_keras_serializable", "tensorflow.keras.regularizers.serialize", "tensorflow.keras.layers.experimental.EinsumDense", "tensorflow.keras.layers.Dropout", "tensorflow.keras.activations.get", "tensorflow.keras.initializers.get" ], [ "tensorflow.test.main", "tensorflow.keras.Input" ], [ "tensorflow.compat.v1.gfile.GFile", "tensorflow.compat.v1.train.Int64List", "tensorflow.compat.v1.train.BytesList", "tensorflow.compat.v1.train.FloatList" ] ]
emcoglab/sensorimotor-distance-paper-2021
[ "94464bb391ea42ffad8bcef6b087c1343ecbe2c7" ]
[ "exclusivity_correlation.py" ]
[ "\"\"\"\n===========================\nComputes the correlation between pairwise distances and mean exclusivity ratings for randomly drawn pairs of norms.\n===========================\n\nDr. Cai Wingfield\n---------------------------\nEmbodied Cognition Lab\nDepartment of Psychology\nUniversity of Lancaster\[email protected]\ncaiwingfield.net\n---------------------------\n2022\n---------------------------\n\"\"\"\n\nfrom numpy import corrcoef, zeros\nfrom numpy.random import default_rng, seed\n\nfrom linguistic_distributional_models.utils.logging import print_progress\nfrom linguistic_distributional_models.utils.maths import DistanceType, distance\nfrom sensorimotor_norms.sensorimotor_norms import SensorimotorNorms, DataColNames\n\nsn = SensorimotorNorms(use_breng_translation=False, verbose=True)\n\n\ndef exclusivity_correlation(n_draws: int):\n rng = default_rng()\n\n all_words = list(sn.iter_words())\n random_words = rng.choice(all_words, 2 * n_draws, replace=True)\n first_words = random_words[:n_draws]\n second_words = random_words[n_draws:]\n\n distances = zeros((n_draws,)) # Preallocate vectors to be correlated\n mean_exclusivities = zeros((n_draws,))\n for i in range(n_draws):\n w1, w2 = first_words[i], second_words[i]\n v1, v2 = sn.sensorimotor_vector_for_word(w1), sn.sensorimotor_vector_for_word(w2)\n e1, e2 = sn.stat_for_word(w1, DataColNames.exclusivity_sensorimotor), sn.stat_for_word(w2, DataColNames.exclusivity_sensorimotor)\n\n # For the pair\n distances[i] = distance(v1, v2, DistanceType.cosine) # vector distance\n mean_exclusivities[i] = (e1 + e2) / 2 # mean exclusivity\n\n print_progress(i + 1, n_draws)\n\n return corrcoef(distances, mean_exclusivities)\n\n\nif __name__ == \"__main__\":\n seed(451)\n correlation = exclusivity_correlation(n_draws=10_000)\n print(correlation)\n" ]
[ [ "numpy.corrcoef", "numpy.zeros", "numpy.random.seed", "numpy.random.default_rng" ] ]
AnastasiaaSenina/openvino_training_extensions
[ "267425d64372dff5b9083dc0ca6abfc305a71449", "7d606a22143db0af97087709d63a2ec2aa02036c", "7d606a22143db0af97087709d63a2ec2aa02036c", "7d606a22143db0af97087709d63a2ec2aa02036c", "05cb9b30e8220445fcb27988926d88f330091c12", "267425d64372dff5b9083dc0ca6abfc305a71449" ]
[ "pytorch_toolkit/action_recognition/action_recognition/models/multi_frame_baseline.py", "pytorch_toolkit/face_recognition/model/backbones/se_resnext.py", "pytorch_toolkit/nncf/tests/sparsity/magnitude/test_algo.py", "pytorch_toolkit/face_recognition/train.py", "pytorch_toolkit/face_recognition/tests/test_models.py", "tensorflow_toolkit/text_detection/tools/export.py" ]
[ "from torch import nn as nn\nfrom torch.nn import functional as F\n\nfrom ..utils import get_fine_tuning_parameters\nfrom .backbone import make_encoder\nfrom .modules import squash_dims, unsquash_dim\n\n\nclass MultiFrameBaseline(nn.Module):\n \"\"\"Simple baseline that runs a classifier on each frame independently and averages logits.\"\"\"\n\n def __init__(self, sample_duration, encoder='resnet34', n_classes=400, input_size=224, pretrained=True,\n input_channels=3):\n \"\"\"Average prediction over multiple frames\"\"\"\n super().__init__()\n\n # backbone\n encoder = make_encoder(encoder, input_size=input_size, input_channels=input_channels, pretrained=pretrained)\n self.resnet = encoder.features # name is kept for compatibility with older checkpoints\n self.last_feature_size = encoder.features_shape[1]\n self.fc = nn.Linear(encoder.features_shape[0], n_classes)\n self.dropout = nn.Dropout2d(0.5)\n\n self.sequence_size = sample_duration\n self.init_weights()\n\n def init_weights(self):\n \"\"\"Initialize the weights.\"\"\"\n self.fc.weight.data.normal_(0.0, 0.02)\n self.fc.bias.data.fill_(0)\n\n def forward(self, images):\n \"\"\"Extract the image feature vectors.\"\"\"\n # (B x T x C x H x W) -> (B*T x C x H x W)\n images = squash_dims(images, (0, 1))\n\n features = self.resnet(images)\n # features = self.dropout(features)\n\n features = F.avg_pool2d(features, self.last_feature_size) # (B*T) x C\n features = unsquash_dim(features, 0, (-1, self.sequence_size))\n ys = self.fc(features.squeeze(-1).squeeze(-1))\n\n return ys.mean(1)\n\n def trainable_parameters(self):\n param_groups = [\n ('trainable', {'re': r''}),\n ]\n\n return get_fine_tuning_parameters(self, param_groups)\n", "\"\"\"\n Copyright (c) 2018 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport math\nimport torch.nn as nn\n\nfrom model.blocks.se_resnext_blocks import SEBottleneckX\n\n\nclass SEResNeXt(nn.Module):\n def __init__(self, block, layers, cardinality=32, num_classes=1000, activation=nn.ReLU, head=False):\n super(SEResNeXt, self).__init__()\n self.cardinality = cardinality\n self.inplanes = 64\n\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=1, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n self.layer1 = self._make_layer(block, 64, layers[0], activation=activation)\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2, activation=activation)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2, activation=activation)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2, activation=activation)\n self.avgpool = nn.Conv2d(512 * block.expansion, 512 * block.expansion, 7,\n groups=512 * block.expansion, bias=False)\n self.head = head\n if not self.head:\n self.output_channels = 512 * block.expansion\n else:\n self.fc = nn.Conv2d(512 * block.expansion, num_classes, 1, stride=1, padding=0, bias=False)\n self.output_channels = num_classes\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1, activation=nn.ReLU):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, self.cardinality, stride, downsample, activation=activation))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes, self.cardinality, activation=activation))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n if self.head:\n x = self.fc(x)\n\n return x\n\n def get_output_channels(self):\n return self.output_channels\n\n\ndef se_resnext50(**kwargs):\n model = SEResNeXt(SEBottleneckX, [3, 4, 6, 3], **kwargs)\n return model\n\n\ndef se_resnext101(**kwargs):\n model = SEResNeXt(SEBottleneckX, [3, 4, 23, 3], **kwargs)\n return model\n\n\ndef se_resnext152(**kwargs):\n model = SEResNeXt(SEBottleneckX, [3, 8, 36, 3], **kwargs)\n return model\n", "\"\"\"\n Copyright (c) 2019 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nfrom copy import deepcopy\n\nimport pytest\nimport torch\nfrom pytest import approx\n\nfrom nncf.algo_selector import create_compression_algorithm\nfrom nncf.operations import UpdateWeight\nfrom nncf.sparsity.layers import BinaryMask\nfrom nncf.sparsity.magnitude.algo import MagnitudeSparsity\nfrom nncf.sparsity.magnitude.functions import normed_magnitude\nfrom nncf.utils import get_all_modules_by_type\nfrom tests.quantization.test_functions import check_equal\nfrom tests.sparsity.const.test_algo import ref_mask_2, ref_mask_1\nfrom tests.sparsity.magnitude.test_helpers import MagnitudeTestModel, get_basic_magnitude_sparsity_config\nfrom tests.test_helpers import BasicConvTestModel, get_empty_config\n\n\ndef test_can_create_magnitude_sparse_algo__with_defaults():\n model = MagnitudeTestModel()\n config = get_basic_magnitude_sparsity_config()\n config['compression']['params'] = \\\n {'schedule': 'multistep'}\n compression_algo = create_compression_algorithm(deepcopy(model), config)\n\n assert isinstance(compression_algo, MagnitudeSparsity)\n sparse_model = compression_algo.model\n assert compression_algo.sparsity_level == approx(0.1)\n assert len(list(sparse_model.modules())) == 11\n\n model_conv = get_all_modules_by_type(model, 'Conv2d')\n sparse_model_conv = get_all_modules_by_type(sparse_model, 'NNCFConv2d')\n assert len(model_conv) == len(sparse_model_conv)\n\n i = 0\n for module_name in model_conv:\n scope = module_name.split('/')\n scope[-1] = scope[-1].replace('Conv2d', 'NNCFConv2d')\n sparse_module_name = '/'.join(scope)\n assert sparse_module_name in sparse_model_conv\n\n store = []\n sparse_module = sparse_model_conv[sparse_module_name]\n ref_mask = torch.ones_like(sparse_module.weight) if i == 0 else ref_mask_2\n i += 1\n for op in sparse_module.pre_ops.values():\n if isinstance(op, UpdateWeight) and isinstance(op.operand, BinaryMask):\n assert compression_algo.threshold == approx(0.24, 0.1)\n assert torch.allclose(op.operand.binary_mask, ref_mask)\n assert isinstance(compression_algo.weight_importance, type(normed_magnitude))\n assert op.__class__.__name__ not in store\n store.append(op.__class__.__name__)\n\n\[email protected](\n ('weight_importance', 'sparsity_level', 'threshold'),\n (\n ('normed_abs', None, 0.219),\n ('abs', None, 9),\n ('normed_abs', 0.5, 0.243),\n ('abs', 0.5, 10),\n )\n)\ndef test_magnitude_sparse_algo_sets_threshold(weight_importance, sparsity_level, threshold):\n model = MagnitudeTestModel()\n config = get_basic_magnitude_sparsity_config()\n config['compression']['weight_importance'] = weight_importance\n config['compression']['params'] = {'schedule': 'multistep'}\n compression_algo = create_compression_algorithm(model, config)\n if sparsity_level:\n compression_algo.set_sparsity_level(sparsity_level)\n assert compression_algo.threshold == pytest.approx(threshold, 0.01)\n\n\ndef test_can_not_set_sparsity_more_than_one_for_magnitude_sparse_algo():\n model = MagnitudeTestModel()\n config = get_basic_magnitude_sparsity_config()\n compression_algo = create_compression_algorithm(model, config)\n with pytest.raises(AttributeError):\n compression_algo.set_sparsity_level(1)\n compression_algo.set_sparsity_level(1.2)\n\n\ndef test_can_not_create_magnitude_algo__without_steps():\n model = MagnitudeTestModel()\n config = get_basic_magnitude_sparsity_config()\n config['compression']['params'] = {'schedule': 'multistep', 'sparsity_levels': [0.1]}\n with pytest.raises(AttributeError):\n create_compression_algorithm(model, config)\n\n\ndef test_can_create_magnitude_algo__without_levels():\n model = MagnitudeTestModel()\n config = get_basic_magnitude_sparsity_config()\n config['compression']['params'] = {'schedule': 'multistep', 'steps': [1]}\n compression_algo = create_compression_algorithm(model, config)\n assert compression_algo.sparsity_level == approx(0.1)\n\n\ndef test_can_not_create_magnitude_algo__with_not_matched_steps_and_levels():\n model = MagnitudeTestModel()\n config = get_basic_magnitude_sparsity_config()\n config['compression']['params'] = {'schedule': 'multistep', 'sparsity_levels': [0.1], 'steps': [1, 2]}\n with pytest.raises(AttributeError):\n create_compression_algorithm(model, config)\n\n\ndef test_magnitude_algo_set_binary_mask_on_forward():\n model = MagnitudeTestModel()\n config = get_basic_magnitude_sparsity_config()\n config['compression']['weight_importance'] = 'abs'\n compression_algo = create_compression_algorithm(model, config)\n compression_algo.set_sparsity_level(0.3)\n model = compression_algo.model\n with torch.no_grad():\n model(torch.ones([1, 1, 10, 10]))\n\n op = model.conv1.pre_ops['0']\n check_equal(ref_mask_1, op.operand.binary_mask)\n\n op = model.conv2.pre_ops['0']\n check_equal(ref_mask_2, op.operand.binary_mask)\n\n\ndef test_magnitude_algo_binary_masks_are_applied():\n model = BasicConvTestModel()\n config = get_empty_config()\n config['compression']['algorithm'] = \"magnitude_sparsity\"\n compression_algo = create_compression_algorithm(model, config)\n compressed_model = compression_algo.model\n minfo_list = compression_algo.sparsified_module_info # type: List[SparseModuleInfo]\n minfo = minfo_list[0] # type: SparseModuleInfo\n\n minfo.operand.binary_mask = torch.ones_like(minfo.module.weight) # 1x1x2x2\n input_ = torch.ones(size=(1, 1, 5, 5))\n ref_output_1 = -4 * torch.ones(size=(2, 4, 4))\n output_1 = compressed_model(input_)\n assert torch.all(torch.eq(output_1, ref_output_1))\n\n minfo.operand.binary_mask[0][0][0][1] = 0\n minfo.operand.binary_mask[1][0][1][0] = 0\n ref_output_2 = - 3 * torch.ones_like(ref_output_1)\n output_2 = compressed_model(input_)\n assert torch.all(torch.eq(output_2, ref_output_2))\n\n minfo.operand.binary_mask[1][0][0][1] = 0\n ref_output_3 = ref_output_2.clone()\n ref_output_3[1] = -2 * torch.ones_like(ref_output_1[1])\n output_3 = compressed_model(input_)\n assert torch.all(torch.eq(output_3, ref_output_3))\n", "\"\"\"\n Copyright (c) 2018 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport argparse\nimport datetime\nimport os.path as osp\nimport os\nfrom pprint import pformat\n\nimport glog as log\nimport torch\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nimport torch.backends.cudnn as cudnn\nfrom torchvision import transforms as t\nfrom tensorboardX import SummaryWriter\n\nfrom nncf.config import Config\nfrom nncf.dynamic_graph import patch_torch_operators\nfrom nncf.algo_selector import create_compression_algorithm\nfrom datasets import LFW, VGGFace2, MSCeleb1M, IMDBFace, TrillionPairs\n\nfrom losses.am_softmax import AMSoftmaxLoss\nfrom losses.metric_losses import MetricLosses\nfrom evaluate_lfw import evaluate, compute_embeddings_lfw\n\nfrom utils.utils import load_model_state, save_model_cpu\nimport utils.augmentation as augm\nfrom utils.parser_yaml import ArgumentParserWithYaml\nfrom model.common import models_backbones\n\n\ndef train(args):\n \"\"\"Performs training of a face recognition network\"\"\"\n input_size = models_backbones[args.model]().get_input_res()\n if args.train_dataset == 'vgg':\n assert args.t_list\n dataset = VGGFace2(args.train, args.t_list, args.t_land)\n elif args.train_dataset == 'imdbface':\n dataset = IMDBFace(args.train, args.t_list)\n elif args.train_dataset == 'trp':\n dataset = TrillionPairs(args.train, args.t_list)\n else:\n dataset = MSCeleb1M(args.train, args.t_list)\n\n if dataset.have_landmarks:\n log.info('Use alignment for the train data')\n dataset.transform = t.Compose([augm.HorizontalFlipNumpy(p=.5),\n augm.CutOutWithPrior(p=0.05, max_area=0.1),\n augm.RandomRotationNumpy(10, p=.95),\n augm.ResizeNumpy(input_size),\n augm.BlurNumpy(k=5, p=.2),\n augm.NumpyToTensor(switch_rb=True)])\n else:\n dataset.transform = t.Compose([augm.ResizeNumpy(input_size),\n augm.HorizontalFlipNumpy(),\n augm.RandomRotationNumpy(10),\n augm.NumpyToTensor(switch_rb=True)])\n\n if args.weighted:\n train_weights = dataset.get_weights()\n train_weights = torch.DoubleTensor(train_weights)\n sampler = torch.utils.data.sampler.WeightedRandomSampler(train_weights, len(train_weights))\n train_loader = torch.utils.data.DataLoader(dataset, batch_size=args.train_batch_size,\n sampler=sampler, num_workers=3, pin_memory=False)\n else:\n train_loader = DataLoader(dataset, batch_size=args.train_batch_size, num_workers=4, shuffle=True)\n\n lfw = LFW(args.val, args.v_list, args.v_land)\n if lfw.use_landmarks:\n log.info('Use alignment for the test data')\n lfw.transform = t.Compose([augm.ResizeNumpy(input_size),\n augm.NumpyToTensor(switch_rb=True)])\n else:\n lfw.transform = t.Compose([augm.ResizeNumpy((160, 160)),\n augm.CenterCropNumpy(input_size),\n augm.NumpyToTensor(switch_rb=True)])\n\n log_path = './logs/{:%Y_%m_%d_%H_%M}_{}'.format(datetime.datetime.now(), args.snap_prefix)\n writer = SummaryWriter(log_path)\n\n if not osp.exists(args.snap_folder):\n os.mkdir(args.snap_folder)\n\n model = models_backbones[args.model](embedding_size=args.embed_size,\n num_classes=dataset.get_num_classes(), feature=False)\n\n set_dropout_fn = model.set_dropout_ratio\n\n compression_algo = None\n if args.snap_to_resume is not None:\n if args.compr_config:\n config = Config.from_json(args.compr_config)\n compression_algo = create_compression_algorithm(model, config)\n model = compression_algo.model\n\n log.info('Resuming snapshot ' + args.snap_to_resume + ' ...')\n model = load_model_state(model, args.snap_to_resume, args.devices[0], eval_state=False)\n model = torch.nn.DataParallel(model, device_ids=args.devices)\n else:\n model = torch.nn.DataParallel(model, device_ids=args.devices, output_device=args.devices[0])\n model.cuda()\n model.train()\n cudnn.benchmark = True\n\n if args.to_onnx is not None:\n if args.compr_config:\n compression_algo.export_model(args.to_onnx)\n else:\n model = model.eval().cpu()\n input_shape = tuple([1, 3] + list(input_size))\n with torch.no_grad():\n torch.onnx.export(model.module, torch.randn(input_shape), args.to_onnx, verbose=True)\n\n print(\"Saved to\", args.to_onnx)\n return\n\n log.info('Face Recognition model:')\n log.info(model)\n\n if args.mining_type == 'focal':\n softmax_criterion = AMSoftmaxLoss(gamma=args.gamma, m=args.m, margin_type=args.margin_type, s=args.s)\n else:\n softmax_criterion = AMSoftmaxLoss(t=args.t, m=0.35, margin_type=args.margin_type, s=args.s)\n aux_losses = MetricLosses(dataset.get_num_classes(), args.embed_size, writer)\n optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)\n\n if args.compr_config:\n scheduler = optim.lr_scheduler.MultiStepLR(optimizer, [0, 2, 4, 6, 8])\n else:\n scheduler = optim.lr_scheduler.MultiStepLR(optimizer, [3, 6, 9, 13])\n\n log.info('Epoch length: %d' % len(train_loader))\n for epoch_num in range(args.epoch_total_num):\n log.info('Epoch: %d' % epoch_num)\n scheduler.step()\n\n if epoch_num > 6 or args.compr_config:\n set_dropout_fn(0.)\n\n classification_correct = 0\n classification_total = 0\n\n for i, data in enumerate(train_loader, 0):\n iteration = epoch_num * len(train_loader) + i\n\n if iteration % args.val_step == 0:\n snapshot_name = osp.join(args.snap_folder, args.snap_prefix + '_{0}.pt'.format(iteration))\n if iteration > 0:\n log.info('Saving Snapshot: ' + snapshot_name)\n save_model_cpu(model, optimizer, snapshot_name, epoch_num)\n\n log.info('Evaluating Snapshot: ' + snapshot_name)\n model.eval()\n same_acc, diff_acc, all_acc, auc = evaluate(args, lfw, model, compute_embeddings_lfw,\n args.val_batch_size, verbose=False)\n\n model.train()\n\n log.info('Validation accuracy: {0:.4f}, {1:.4f}'.format(same_acc, diff_acc))\n log.info('Validation accuracy mean: {0:.4f}'.format(all_acc))\n log.info('Validation AUC: {0:.4f}'.format(auc))\n writer.add_scalar('Epoch', epoch_num, iteration)\n writer.add_scalar('Accuracy/Val_same_accuracy', same_acc, iteration)\n writer.add_scalar('Accuracy/Val_diff_accuracy', diff_acc, iteration)\n writer.add_scalar('Accuracy/Val_accuracy', all_acc, iteration)\n writer.add_scalar('Accuracy/AUC', auc, iteration)\n\n data, label = data['img'], data['label'].cuda()\n features, sm_outputs = model(data)\n\n optimizer.zero_grad()\n aux_losses.init_iteration()\n aux_loss, aux_log = aux_losses(features, label, epoch_num, iteration)\n loss_sm = softmax_criterion(sm_outputs, label)\n compr_loss = compression_algo.loss() if args.compr_config else 0\n loss = loss_sm + aux_loss + compr_loss\n loss.backward()\n aux_losses.end_iteration()\n optimizer.step()\n\n _, predicted = torch.max(sm_outputs.data, 1)\n classification_total += int(label.size(0))\n classification_correct += int(torch.sum(predicted.eq(label)))\n train_acc = float(classification_correct) / classification_total\n\n if i % 10 == 0:\n log.info('Iteration %d, Softmax loss: %.4f, Total loss: %.4f' % (iteration, loss_sm, loss) + aux_log)\n log.info('Learning rate: %f' % scheduler.get_lr()[0])\n writer.add_scalar('Loss/train_loss', loss, iteration)\n writer.add_scalar('Loss/softmax_loss', loss_sm, iteration)\n writer.add_scalar('Learning_rate', scheduler.get_lr()[0], iteration)\n writer.add_scalar('Accuracy/classification', train_acc, iteration)\n if args.compr_config and \"sparsity_level\" in compression_algo.statistics():\n log.info('Sparsity_level: %.4f' % compression_algo.statistics()[\"sparsity_level\"])\n writer.add_scalar('Sparsity_level', compression_algo.statistics()[\"sparsity_level\"], iteration)\n\n if args.compr_config:\n compression_algo.scheduler.step()\n\n if args.compr_config:\n compression_algo.scheduler.epoch_step()\n\n\ndef main():\n \"\"\"Creates a command line parser and starts training\"\"\"\n parser = ArgumentParserWithYaml(description='Training Face Recognition in PyTorch',\n fromfile_prefix_chars='@',\n epilog=\"Please, note that you can parse parameters from a yaml file if \\\n you add @<path_to_yaml_file> to command line\")\n\n #datasets configuration\n parser.add_argument('--train_dataset', choices=['vgg', 'ms1m', 'trp', 'imdbface'],\n type=str, default='vgg', help='Name of the train dataset.')\n parser.add_argument('--train_data_root', dest='train', required=True, type=str, help='Path to train data.')\n parser.add_argument('--train_list', dest='t_list', required=False, type=str, help='Path to train data image list.')\n parser.add_argument('--train_landmarks', default='', dest='t_land', required=False, type=str,\n help='Path to landmarks for the train images.')\n\n parser.add_argument('--val_data_root', dest='val', required=True, type=str, help='Path to val data.')\n parser.add_argument('--val_step', type=int, default=1000, help='Evaluate model each val_step during each epoch.')\n parser.add_argument('--val_list', dest='v_list', required=True, type=str, help='Path to test data image list.')\n parser.add_argument('--val_landmarks', dest='v_land', default='', required=False, type=str,\n help='Path to landmarks for test images.')\n\n #model configuration\n parser.add_argument('--model', choices=models_backbones.keys(), type=str, default='mobilenet', help='Model type.')\n parser.add_argument('--embed_size', type=int, default=256, help='Size of the face embedding.')\n\n #optimizer configuration\n parser.add_argument('--train_batch_size', type=int, default=170, help='Train batch size.')\n parser.add_argument('--epoch_total_num', type=int, default=30, help='Number of epochs to train.')\n parser.add_argument('--lr', type=float, default=0.4, help='Learning rate.')\n parser.add_argument('--momentum', type=float, default=0.9, help='Momentum.')\n parser.add_argument('--weight_decay', type=float, default=0.0001, help='Weight decay.')\n\n #loss configuration\n parser.add_argument('--mining_type', choices=['focal', 'sv'],\n type=str, default='sv', help='Hard mining method in loss.')\n parser.add_argument('--t', type=float, default=1.1, help='t in support vector softmax. See https://arxiv.org/abs/1812.11317 for details')\n parser.add_argument('--gamma', type=float, default=2., help='Gamma in focal loss. See https://arxiv.org/abs/1708.02002 for details')\n parser.add_argument('--m', type=float, default=0.35, help='Margin size for AMSoftmax.')\n parser.add_argument('--s', type=float, default=30., help='Scale for AMSoftmax.')\n parser.add_argument('--margin_type', choices=['cos', 'arc'],\n type=str, default='cos', help='Margin type for AMSoftmax loss.')\n\n #other parameters\n parser.add_argument('--devices', type=int, nargs='+', default=[0], help='CUDA devices to use.')\n parser.add_argument('--val_batch_size', type=int, default=20, help='Validation batch size.')\n parser.add_argument('--snap_folder', type=str, default='./snapshots/', help='Folder to save snapshots.')\n parser.add_argument('--snap_prefix', type=str, default='FaceReidNet', help='Prefix for snapshots.')\n parser.add_argument('--snap_to_resume', type=str, default=None, help='Snapshot to resume.')\n parser.add_argument('--weighted', action='store_true')\n parser.add_argument('-c', '--compr_config', help='Path to a file with compression parameters', required=False)\n parser.add_argument('--to-onnx', type=str, metavar='PATH', default=None, help='Export to ONNX model by given path')\n\n args = parser.parse_args()\n log.info('Arguments:\\n' + pformat(args.__dict__))\n\n if args.compr_config:\n patch_torch_operators()\n\n with torch.cuda.device(args.devices[0]):\n train(args)\n\n\nif __name__ == '__main__':\n main()\n", "\"\"\"\n Copyright (c) 2018 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport unittest\nimport os\nimport torch\n\nfrom model.common import models_backbones, models_landmarks\nfrom utils.utils import save_model_cpu, load_model_state\n\n\nclass BackbonesTests(unittest.TestCase):\n \"\"\"Tests for backbones\"\"\"\n def test_output_shape(self):\n \"\"\"Checks output shape\"\"\"\n embed_size = 256\n for model_type in models_backbones.values():\n model = model_type(embedding_size=embed_size, feature=True).eval()\n batch = torch.Tensor(1, 3, *model.get_input_res()).uniform_()\n output = model(batch)\n self.assertEqual(list(output.shape), list((1, embed_size, 1, 1)))\n\n def test_save_load_snap(self):\n \"\"\"Checks an ability to save and load model correctly\"\"\"\n embed_size = 256\n snap_name = os.path.join(os.getcwd(), 'test_snap.pt')\n for model_type in models_backbones.values():\n model = model_type(embedding_size=embed_size, feature=True).eval()\n batch = torch.Tensor(1, 3, *model.get_input_res()).uniform_()\n output = model(batch)\n save_model_cpu(model, None, snap_name, 0, write_solverstate=False)\n\n model_loaded = model_type(embedding_size=embed_size, feature=True)\n load_model_state(model_loaded, snap_name, -1, eval_state=True)\n\n output_loaded = model_loaded(batch)\n\n self.assertEqual(torch.norm(output - output_loaded), 0)\n\n\nclass LandnetTests(unittest.TestCase):\n \"\"\"Tests for landmark regressor\"\"\"\n def test_output_shape(self):\n \"\"\"Checks output shape\"\"\"\n model = models_landmarks['landnet']().eval()\n batch = torch.Tensor(1, 3, *model.get_input_res())\n output = model(batch)\n self.assertEqual(list(output.shape), list((1, 10, 1, 1)))\n\n def test_save_load_snap(self):\n \"\"\"Checks an ability to save and load model correctly\"\"\"\n snap_name = os.path.join(os.getcwd(), 'test_snap.pt')\n model = models_landmarks['landnet']().eval()\n batch = torch.Tensor(1, 3, *model.get_input_res()).uniform_()\n output = model(batch)\n save_model_cpu(model, None, snap_name, 0, write_solverstate=False)\n\n model_loaded = models_landmarks['landnet']()\n load_model_state(model_loaded, snap_name, -1, eval_state=True)\n\n output_loaded = model_loaded(batch)\n\n self.assertEqual(torch.norm(output - output_loaded), 0)\n\nif __name__ == '__main__':\n unittest.main()\n", "#!/usr/bin/env python3\n#\n# Copyright (C) 2019 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions\n# and limitations under the License.\n\n\"\"\" This module performs freezing of text detection neural network. \"\"\"\n\nimport argparse\nimport os\n\nimport tensorflow as tf\nfrom tensorflow.python.tools.freeze_graph import freeze_graph\n\nfrom text_detection.model import pixel_link_model\nfrom text_detection.common import load_config\n\n\ntf.compat.v1.disable_v2_behavior()\n\n\ndef arg_parser():\n \"\"\" Returns argument parser. \"\"\"\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--weights', help='Path to trained weights.')\n parser.add_argument('--resolution', nargs=2, type=int, default=(1280, 768))\n parser.add_argument('--config', required=True, help='Path to training configuration file.')\n parser.add_argument('--output_dir', default=None, help='Output Directory')\n return parser\n\ndef print_flops(graph):\n \"\"\" Prints information about FLOPs. \"\"\"\n\n with graph.as_default():\n flops = tf.compat.v1.profiler.profile(\n graph, options=tf.compat.v1.profiler.ProfileOptionBuilder.float_operation())\n print('')\n if flops.total_float_ops > 10 ** 9:\n print('Operations number: {} GFlops'.format(flops.total_float_ops / 10 ** 9))\n elif flops.total_float_ops > 10 ** 6:\n print('Operations number: {} MFlops'.format(flops.total_float_ops / 10 ** 6))\n elif flops.total_float_ops > 10 ** 3:\n print('Operations number: {} KFlops'.format(flops.total_float_ops / 10 ** 3))\n\n return flops\n\n\ndef load_frozen_graph(frozen_graph_filename):\n \"\"\" Loads and returns frozen graph. \"\"\"\n\n with tf.io.gfile.GFile(frozen_graph_filename, \"rb\") as file:\n graph_def = tf.compat.v1.GraphDef()\n graph_def.ParseFromString(file.read())\n\n with tf.Graph().as_default() as graph:\n tf.import_graph_def(graph_def, name='')\n\n return graph\n\n\ndef freeze(args, config):\n \"\"\" Exports model to TF 1.x saved_model (simple_save) and freezes graph. \"\"\"\n\n input_tensor = tf.compat.v1.placeholder(dtype=tf.float32,\n shape=[1] + list(args.resolution[::-1]) + [3])\n model = pixel_link_model(tf.keras.Input(tensor=input_tensor), config=config)\n segm_logits, link_logits = model(input_tensor, training=False)\n link_logits = tf.reshape(link_logits, link_logits.shape.as_list()[0:3] +\n [config['num_neighbours'] * 2])\n\n export_folder = args.output_dir if args.output_dir else os.path.join(os.path.dirname(args.weights), 'export')\n\n with tf.compat.v1.Session() as sess:\n model.load_weights(args.weights)\n\n tf.compat.v1.saved_model.simple_save(sess, export_folder,\n inputs={'input': input_tensor},\n outputs={segm_logits.name[:-2]: segm_logits,\n link_logits.name[:-2]: link_logits})\n\n frozen_graph_path = os.path.join(export_folder, 'frozen_graph.pb')\n\n output_node_names = (segm_logits.name[:-2], link_logits.name[:-2])\n freeze_graph(\n input_graph=None,\n input_saver='',\n input_binary=True,\n input_checkpoint='',\n output_node_names=','.join(output_node_names),\n restore_op_name='save/restore_all',\n filename_tensor_name='save/Const:0',\n output_graph=frozen_graph_path,\n clear_devices=True,\n initializer_nodes='',\n input_meta_graph=None,\n input_saved_model_dir=export_folder,\n )\n\n graph = load_frozen_graph(frozen_graph_path)\n print_flops(graph)\n\n print('')\n print('Output tensor names for using in InferenceEngine:')\n print(' model/link_logits_/add')\n print(' model/segm_logits/add')\n print('')\n print('Run model_optimizer to get IR: mo.py --input_model {} --framework tf'.format(\n frozen_graph_path))\n\n\ndef main():\n \"\"\" Main function. \"\"\"\n args = arg_parser().parse_args()\n config = load_config(args.config)\n freeze(args, config)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.nn.Linear", "torch.nn.functional.avg_pool2d", "torch.nn.Dropout2d" ], [ "torch.nn.Sequential", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ], [ "torch.ones", "torch.eq", "torch.no_grad", "torch.allclose", "torch.ones_like" ], [ "torch.optim.lr_scheduler.MultiStepLR", "torch.max", "torch.randn", "torch.utils.data.DataLoader", "torch.no_grad", "torch.cuda.device", "torch.nn.DataParallel", "torch.DoubleTensor" ], [ "torch.norm" ], [ "tensorflow.Graph", "tensorflow.import_graph_def", "tensorflow.keras.Input", "tensorflow.compat.v1.disable_v2_behavior", "tensorflow.io.gfile.GFile", "tensorflow.compat.v1.saved_model.simple_save", "tensorflow.compat.v1.profiler.ProfileOptionBuilder.float_operation", "tensorflow.compat.v1.Session", "tensorflow.compat.v1.GraphDef" ] ]
KazukiOnodera/Microsoft-Malware-Prediction
[ "103cbf7c4fc98ae584e1aa9d1c220bb79ddbbd80", "103cbf7c4fc98ae584e1aa9d1c220bb79ddbbd80", "103cbf7c4fc98ae584e1aa9d1c220bb79ddbbd80", "103cbf7c4fc98ae584e1aa9d1c220bb79ddbbd80" ]
[ "py/trash/005-2_agg_each_lgb_1.py", "py/trash/010_share_in_OsVer.py", "py/trash/016_share_in_AvSig_v.py", "py/004_countEncoding_each.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 5 22:33:48 2019\n\n@author: Kazuki\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport os, gc\nfrom glob import glob\nfrom tqdm import tqdm\n\nimport sys\nsys.path.append(f'/home/{os.environ.get(\"USER\")}/PythonLibrary')\nimport lgbextension as ex\nimport lightgbm as lgb\nfrom multiprocessing import cpu_count\n\nfrom sklearn.metrics import roc_auc_score\n\nimport utils , utils_cat\nutils.start(__file__)\n#==============================================================================\n\nSEED = np.random.randint(9999)\nprint('SEED:', SEED)\n\n\nDROP = [\n# 'f002_EngineVersion', 'f002_AvSigVersion', 'f002_AppVersion',\n# \n# 'f003_AvSigVersion', 'f003_OsBuildLab', 'f003_Census_OSVersion',\n# 'f003_date_min', 'f003_date_max'\n ]\n\nNFOLD = 5\n\nLOOP = 1\n\nparam = {\n 'objective': 'binary',\n 'metric': 'auc',\n \n 'learning_rate': 0.05,\n 'max_depth': -1,\n 'num_leaves': 2**6 -1,\n 'max_bin': 127,\n \n 'min_child_weight': 10,\n 'min_data_in_leaf': 150,\n 'reg_lambda': 0.5, # L2 regularization term on weights.\n 'reg_alpha': 0.5, # L1 regularization term on weights.\n \n 'colsample_bytree': 0.9,\n 'subsample': 0.7,\n# 'nthread': 32,\n 'nthread': cpu_count(),\n 'bagging_freq': 1,\n 'verbose':-1,\n }\n\nNROUND = 500\nESR = 50\nVERBOSE_EVAL = 25\n\nTRAIN_TH = 0.6\nVALID_TH = 0.8\n\noutpath_tr = '../data/train_f005_1.f'\noutpath_te = '../data/test_f005_1.f'\n\n# =============================================================================\n# load\n# =============================================================================\n\nfiles_tr = sorted(glob('../data/f005/train_f005*.f'))[20:40]\n\n\n[print(i,f) for i,f in enumerate(files_tr)]\n\nX_train = pd.concat([\n pd.read_feather(f).sample(frac=0.5, random_state=SEED) for f in tqdm(files_tr, mininterval=60)\n ], axis=1)\ny_train = utils.load_target().sample(frac=0.5, random_state=SEED)['HasDetections']\n\nif len(DROP)>0:\n X_train.drop(DROP, axis=1, inplace=True)\n\n\n#adv = pd.read_csv('../data/oof_802_adv.py.csv').iloc[:8921483].oof\n#adv_th = adv.quantile(VALID_TH)\n#\n#X_valid = X_train[adv>adv.quantile(VALID_TH)]\n#y_valid = y_train[adv>adv.quantile(VALID_TH)]\n#\n#X_train = X_train[adv<=adv.quantile(TRAIN_TH)]\n#y_train = y_train[adv<=adv.quantile(TRAIN_TH)]\n\nif X_train.columns.duplicated().sum()>0:\n raise Exception(f'duplicated!: { X_train.columns[X_train.columns.duplicated()] }')\nprint('no dup :) ')\nprint(f'X_train.shape {X_train.shape}')\n#print(f'X_valid.shape {X_valid.shape}')\n\ngc.collect()\n\nCAT = list( set(X_train.columns)&set(utils_cat.ALL))\nprint(f'CAT: {CAT}')\n\n\n# =============================================================================\n# hold out\n# =============================================================================\n\n\ndtrain = lgb.Dataset(X_train, y_train.values, \n categorical_feature=CAT, \n free_raw_data=False)\n\n#dvalid = lgb.Dataset(X_valid, y_valid.values, \n# categorical_feature=CAT, \n# free_raw_data=False)\ngc.collect()\n\n\n\n\nmodel = lgb.train(params=param, train_set=dtrain, num_boost_round=NROUND, \n# valid_sets=[dtrain, dvalid], \n# valid_names=['train','valid'], \n# feval=ex.eval_auc,\n categorical_feature=CAT, \n# early_stopping_rounds=ESR,\n verbose_eval=VERBOSE_EVAL)\n\n\n\nimp = ex.getImp(model)\nimp['split'] /= imp['split'].max()\nimp['gain'] /= imp['gain'].max()\nimp['total'] = imp['split'] + imp['gain']\nimp.sort_values('total', ascending=False, inplace=True)\nimp.reset_index(drop=True, inplace=True)\n\n\nimp.to_csv(f'LOG/imp_{__file__}.csv', index=False)\n\n\n# =============================================================================\n# \n# =============================================================================\n\nimp = pd.read_csv('LOG/imp_005-2_agg_each_lgb_1.py.csv')\nCOL = imp.head(30).feature.tolist()\n\nX_train = pd.concat([\n pd.read_feather(f) for f in tqdm(files_tr, mininterval=60)\n ], axis=1)[COL]\n\nX_train.to_feather(outpath_tr)\n\n\n\nfiles_te = sorted(glob('../data/f005/test_f005*.f'))[20:40]\n\nX_test = pd.concat([\n pd.read_feather(f) for f in tqdm(files_te, mininterval=60)\n ], axis=1)[COL]\n\nX_test.to_feather(outpath_te)\n\n\n#==============================================================================\nutils.end(__file__)\n#utils.stop_instance()\n\n", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 23 08:05:56 2019\n\n@author: Kazuki\n\nshare in Census_OSVersion\n\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nimport os\nimport utils\nutils.start(__file__)\n#==============================================================================\n\n\nPREF = 'f010'\n\ncategorical_w_version = [\n 'Census_OSUILocaleIdentifier',\n 'AVProductsInstalled',\n 'Census_FirmwareVersionIdentifier',\n 'Wdft_IsGamer',\n 'Census_ThresholdOptIn',\n 'RtpStateBitfield',\n 'Census_IsSecureBootEnabled',\n 'AVProductsEnabled',\n 'HasTpm',\n 'IsProtected',\n 'Census_PrimaryDiskTypeName',\n 'PuaMode',\n 'DefaultBrowsersIdentifier',\n 'IsSxsPassiveMode',\n 'OrganizationIdentifier',\n 'Census_IsAlwaysOnAlwaysConnectedCapable',\n 'ProductName',\n 'GeoNameIdentifier',\n 'Census_IsVirtualDevice',\n 'Census_PowerPlatformRoleName',\n 'Census_IsTouchEnabled',\n 'Census_OSSkuName',\n 'OsPlatformSubRelease',\n 'Census_FlightRing',\n 'Census_OSEdition',\n 'Census_IsPortableOperatingSystem',\n 'Firewall',\n 'OsBuildLab',\n 'Census_DeviceFamily',\n 'Census_IsPenCapable',\n 'SMode',\n 'Platform',\n 'Census_IsFlightingInternal',\n 'Census_OEMNameIdentifier',\n 'Census_InternalBatteryType',\n 'OsBuild',\n 'Census_HasOpticalDiskDrive',\n 'Census_IsWIMBootEnabled',\n 'Census_OSBuildRevision',\n 'CityIdentifier',\n 'IeVerIdentifier',\n 'Census_ProcessorClass',\n 'OsSuite',\n 'Census_IsFlightsDisabled',\n 'Census_ChassisTypeName',\n 'LocaleEnglishNameIdentifier',\n 'Census_OSArchitecture',\n 'CountryIdentifier',\n 'Census_OSInstallLanguageIdentifier',\n 'Census_OSInstallTypeName',\n 'Census_OSBuildNumber',\n 'AutoSampleOptIn',\n 'OsVer',\n 'SkuEdition',\n 'UacLuaenable',\n 'Census_OEMModelIdentifier',\n 'Census_OSBranch',\n 'Processor',\n 'Census_ProcessorModelIdentifier',\n 'Census_ActivationChannel',\n 'IsBeta',\n 'Census_MDC2FormFactor',\n 'Census_OSWUAutoUpdateOptionsName',\n 'AVProductStatesIdentifier',\n 'Census_GenuineStateName',\n 'Census_FirmwareManufacturerIdentifier',\n 'Wdft_RegionIdentifier',\n 'Census_ProcessorManufacturerIdentifier', \n 'OsBuildLab_major',\n 'OsBuildLab_minor',\n 'OsBuildLab_build',\n 'OsBuildLab_architecture',\n \n \n 'EngineVersion',\n 'AppVersion',\n 'AvSigVersion',\n 'Census_OSVersion',\n \n ]\n\ncat = 'OsVer'\n\ndef fe(input_path, output_path):\n \"\"\"\n input_path = '../data/train.f'\n output_path = '../data/train_{PREF}.f'\n \"\"\"\n \n print('loading...', input_path)\n base = pd.read_feather(input_path)[categorical_w_version]\n \n feature = pd.DataFrame(index=base.index)\n \n for c in tqdm(categorical_w_version):\n if cat == c:\n continue\n col = [cat, c]\n df = base.groupby(col).size() / base.groupby(cat).size()\n df = df.reset_index()\n \n feature[f'{PREF}_{c}-in-{cat}'] = pd.merge(base[col], df, on=col, how='left')[0]\n \n print('writing...', output_path)\n feature.to_feather(output_path)\n \n return\n\n\n#tr = pd.read_feather('../data/train.f')[categorical_wo_version+['Census_OSVersion']]\n#te = pd.read_feather('../data/test.f')[categorical_wo_version+['Census_OSVersion']]\n\nfe('../data/train.f', f'../data/train_{PREF}.f')\nfe('../data/test.f', f'../data/test_{PREF}.f')\n\n\n#==============================================================================\nutils.end(__file__)\n#utils.stop_instance()\n\n", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 27 06:40:16 2019\n\n@author: Kazuki\n\"\"\"\n\n\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nimport os\nimport utils\nutils.start(__file__)\n#==============================================================================\n\n\nPREF = 'f016'\n\ncategorical_w_version = [\n 'Census_OSUILocaleIdentifier',\n 'AVProductsInstalled',\n 'Census_FirmwareVersionIdentifier',\n 'Wdft_IsGamer',\n 'Census_ThresholdOptIn',\n 'RtpStateBitfield',\n 'Census_IsSecureBootEnabled',\n 'AVProductsEnabled',\n 'HasTpm',\n 'IsProtected',\n 'Census_PrimaryDiskTypeName',\n 'PuaMode',\n 'DefaultBrowsersIdentifier',\n 'IsSxsPassiveMode',\n 'OrganizationIdentifier',\n 'Census_IsAlwaysOnAlwaysConnectedCapable',\n 'ProductName',\n 'GeoNameIdentifier',\n 'Census_IsVirtualDevice',\n 'Census_PowerPlatformRoleName',\n 'Census_IsTouchEnabled',\n 'Census_OSSkuName',\n 'OsPlatformSubRelease',\n 'Census_FlightRing',\n 'Census_OSEdition',\n 'Census_IsPortableOperatingSystem',\n 'Firewall',\n 'OsBuildLab',\n 'Census_DeviceFamily',\n 'Census_IsPenCapable',\n 'SMode',\n 'Platform',\n 'Census_IsFlightingInternal',\n 'Census_OEMNameIdentifier',\n 'Census_InternalBatteryType',\n 'OsBuild',\n 'Census_HasOpticalDiskDrive',\n 'Census_IsWIMBootEnabled',\n 'Census_OSBuildRevision',\n 'CityIdentifier',\n 'IeVerIdentifier',\n 'Census_ProcessorClass',\n 'OsSuite',\n 'Census_IsFlightsDisabled',\n 'Census_ChassisTypeName',\n 'LocaleEnglishNameIdentifier',\n 'Census_OSArchitecture',\n 'CountryIdentifier',\n 'Census_OSInstallLanguageIdentifier',\n 'Census_OSInstallTypeName',\n 'Census_OSBuildNumber',\n 'AutoSampleOptIn',\n 'OsVer',\n 'SkuEdition',\n 'UacLuaenable',\n 'Census_OEMModelIdentifier',\n 'Census_OSBranch',\n 'Processor',\n 'Census_ProcessorModelIdentifier',\n 'Census_ActivationChannel',\n 'IsBeta',\n 'Census_MDC2FormFactor',\n 'Census_OSWUAutoUpdateOptionsName',\n 'AVProductStatesIdentifier',\n 'Census_GenuineStateName',\n 'Census_FirmwareManufacturerIdentifier',\n 'Wdft_RegionIdentifier',\n 'Census_ProcessorManufacturerIdentifier', \n 'OsBuildLab_major',\n 'OsBuildLab_minor',\n 'OsBuildLab_build',\n 'OsBuildLab_architecture',\n \n \n 'EngineVersion',\n 'AppVersion',\n 'AvSigVersion',\n 'Census_OSVersion',\n \n ]\n\ncat = 'AvSigVersion'\n\ndef fe(input_path, output_path):\n \"\"\"\n input_path = '../data/train.f'\n output_path = '../data/train_{PREF}.f'\n \"\"\"\n \n print('loading...', input_path)\n base = pd.read_feather(input_path)[categorical_w_version]\n \n base[cat] = base[cat].map(lambda x: '.'.join(x.split('.')[:-2]) )\n \n feature = pd.DataFrame(index=base.index)\n \n for c in tqdm(categorical_w_version):\n if cat == c:\n continue\n col = [cat, c]\n df = base.groupby(col).size() / base.groupby(cat).size()\n df = df.reset_index()\n \n feature[f'{PREF}_{c}-in-{cat}'] = pd.merge(base[col], df, on=col, how='left')[0]\n \n print('writing...', output_path)\n feature.to_feather(output_path)\n \n return\n\n\n#tr = pd.read_feather('../data/train.f')[categorical_wo_version+['Census_OSVersion']]\n#te = pd.read_feather('../data/test.f')[categorical_wo_version+['Census_OSVersion']]\n\nfe('../data/train.f', f'../data/train_{PREF}.f')\nfe('../data/test.f', f'../data/test_{PREF}.f')\n\n\n#==============================================================================\nutils.end(__file__)\n#utils.stop_instance()\n\n", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 30 13:53:08 2019\n\n@author: kazuki.onodera\n\ncount category\n\n\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport gc, os\nfrom glob import glob\n\nfrom multiprocessing import cpu_count, Pool\n#from sklearn.preprocessing import LabelEncoder\n\nimport utils\n\nPREF = 'f004'\n\n\ncol_cat = [\n 'ProductName', # cardinality(6, 6), same distribuyion\n 'EngineVersion', # maybe time series\n 'AppVersion', # maybe time series\n 'AvSigVersion', # maybe time series\n 'DefaultBrowsersIdentifier', # cardinality(1730, 1548)\n 'AVProductStatesIdentifier', # cardinality(28970, 23492)\n 'CountryIdentifier', # cardinality(222, 222)\n 'CityIdentifier', # cardinality(107366, 105817)\n 'OrganizationIdentifier', # cardinality(49, 50)\n 'GeoNameIdentifier', # cardinality(292, 289)\n 'LocaleEnglishNameIdentifier', # cardinality(276, 278)\n 'Platform', # cardinality(4, 4)\n 'Processor', # cardinality(3, 3)\n 'OsVer', # cardinality(58, 44) maybe time series\n 'OsPlatformSubRelease', # cardinality(58, 44)\n 'OsBuildLab', # cardinality(663, 673)\n 'SkuEdition', # cardinality(8, 8)\n 'IeVerIdentifier', # cardinality(303, 294) maybe time series\n 'SmartScreen', # cardinality(21, 21)\n 'Census_MDC2FormFactor', # cardinality(13, 14)\n 'Census_DeviceFamily', # cardinality(3, 3)\n 'Census_OEMNameIdentifier', # cardinality(3832, 3685)\n 'Census_OEMModelIdentifier', # cardinality(175365, 167776)\n 'Census_ProcessorManufacturerIdentifier', # cardinality(7, 7)\n 'Census_ProcessorModelIdentifier', # cardinality(3428, 3438)\n 'Census_ProcessorClass', # cardinality(3, 3)\n 'Census_PrimaryDiskTypeName', # cardinality(4, 4)\n 'Census_ChassisTypeName', # cardinality(52, 48)\n 'Census_PowerPlatformRoleName', # cardinality(10, 10)\n 'Census_InternalBatteryType', # cardinality(78, 63)\n 'Census_OSVersion', # cardinality(469, 475)\n 'Census_OSArchitecture', # cardinality(3, 3)\n 'Census_OSBranch', # cardinality(32, 29)\n 'Census_OSEdition', # cardinality(33, 36)\n 'Census_OSSkuName', # cardinality(30, 31)\n 'Census_OSInstallTypeName', # cardinality(9, 9)\n 'Census_OSInstallLanguageIdentifier', # cardinality(39, 39)\n 'Census_OSUILocaleIdentifier', # cardinality(147, 139)\n 'Census_OSWUAutoUpdateOptionsName', # cardinality(6, 6)\n 'Census_GenuineStateName', # cardinality(5, 5)\n 'Census_ActivationChannel', # cardinality(6, 6)\n 'Census_FlightRing', # cardinality(10, 11)\n 'Census_FirmwareManufacturerIdentifier', # cardinality(712, 722)\n 'Census_FirmwareVersionIdentifier', # cardinality(50494, 49811)\n 'Wdft_RegionIdentifier', # cardinality(15, 15)\n \n 'OsBuild', # cardinality(58, 44)\n 'OsSuite', # cardinality(58, 44)\n \n 'OsBuildLab_major',\n 'OsBuildLab_minor',\n 'OsBuildLab_build',\n 'OsBuildLab_architecture',\n ]\n\ndef frequency_encoding(variable, is_train=True, normalize=False):\n \"\"\"\n https://www.kaggle.com/fabiendaniel/detecting-malwares-with-lgbm\n \"\"\"\n \n if is_train:\n t = tr[variable].value_counts(normalize=normalize).reset_index()\n else:\n t = te[variable].value_counts(normalize=normalize).reset_index()\n \n return t.to_dict()\n\ndef multi(args):\n \n c, outpath_tr, outpath_te = args\n \n tr_f = pd.DataFrame(index=tr.index)\n te_f = pd.DataFrame(index=te.index)\n \n # count train, test\n di_tr = frequency_encoding(c, True, False)\n di_te = frequency_encoding(c, False, False)\n tr_f[c+'_each'] = tr[c].map(lambda x: di_tr.get(x, np.nan)).rank(pct=True)\n te_f[c+'_each'] = te[c].map(lambda x: di_te.get(x, np.nan)).rank(pct=True)\n \n # count train, test\n# di_tr = frequency_encoding(c, True, True)\n# di_te = frequency_encoding(c, False, True)\n# tr_f[c+'_each_norm'] = tr[c].map(lambda x: di_tr.get(x, np.nan))\n# te_f[c+'_each_norm'] = te[c].map(lambda x: di_te.get(x, np.nan))\n \n \n utils.reduce_mem_usage(tr_f)\n utils.reduce_mem_usage(te_f)\n \n # output\n tr_f.add_prefix(PREF+'_').to_feather(outpath_tr)\n te_f.add_prefix(PREF+'_').to_feather(outpath_te)\n \n return\n\n# =============================================================================\n# main\n# =============================================================================\nif __name__ == \"__main__\":\n utils.start(__file__)\n \n tr = pd.read_feather('../data/train.f')[col_cat]\n te = pd.read_feather('../data/test.f')[col_cat]\n \n trte = pd.concat([tr, te], ignore_index=True)\n \n os.system(f'rm ../data/tmp_*_{PREF}*')\n argss = []\n for i,c in enumerate(col_cat):\n argss.append([c, f'../data/tmp_tr_{PREF}_{c}.f', f'../data/tmp_te_{PREF}_{c}.f'])\n \n pool = Pool( 10 )\n pool.map(multi, argss)\n pool.close()\n \n del tr, te, trte; gc.collect()\n \n # train\n df = pd.concat([pd.read_feather(f) for f in sorted(glob(f'../data/tmp_tr_{PREF}*'))], axis=1)\n df.to_feather(f'../data/train_{PREF}.f')\n del df; gc.collect()\n \n # test\n df = pd.concat([pd.read_feather(f) for f in sorted(glob(f'../data/tmp_te_{PREF}*'))], axis=1)\n df.to_feather(f'../data/test_{PREF}.f')\n \n os.system(f'rm ../data/tmp_*_{PREF}*')\n \n utils.end(__file__)\n" ]
[ [ "pandas.read_csv", "pandas.read_feather", "numpy.random.randint" ], [ "pandas.merge", "pandas.read_feather", "pandas.DataFrame" ], [ "pandas.merge", "pandas.read_feather", "pandas.DataFrame" ], [ "pandas.concat", "pandas.read_feather", "pandas.DataFrame" ] ]
jdavidrcamacho/tedi
[ "f963e781e0a3c7be3df338a85a08ab974b6b8019", "f963e781e0a3c7be3df338a85a08ab974b6b8019" ]
[ "tedi/kernels.py", "tedi/utils.py" ]
[ "\"\"\"\nCovariance functions\n\"\"\"\nimport numpy as np\n#because it makes life easier down the line\npi, exp, sine, cosine, sqrt = np.pi, np.exp, np.sin, np.cos, np.sqrt\n__all__ = ['Constant', 'WhiteNoise', 'SquaredExponential' , 'Periodic', \n 'QuasiPeriodic', 'RationalQuadratic', 'Cosine', 'Exponential',\n 'Matern32', 'Matern52', 'RQP']\n\n\nclass kernel(object):\n \"\"\"\n Definition the kernels that will be used. To simplify my life all the\n kernels defined are the sum of kernel + white noise\n \"\"\"\n def __init__(self, *args):\n \"\"\" Puts all kernel arguments in an array pars. \"\"\"\n self.pars = np.array(args, dtype=float)\n def __call__(self, r):\n \"\"\" r = t - t' \"\"\"\n raise NotImplementedError\n def __repr__(self):\n \"\"\" Representation of each kernel instance \"\"\"\n return \"{0}({1})\".format(self.__class__.__name__,\n \", \".join(map(str, self.pars)))\n def __add__(self, b):\n return Sum(self, b)\n def __radd__(self, b):\n return self.__add__(b)\n\n def __mul__(self, b):\n return Multiplication(self, b)\n def __rmul__(self, b):\n return self.__mul__(b)\n\n\nclass _operator(kernel):\n \"\"\" To allow operations between two kernels \"\"\"\n def __init__(self, k1, k2):\n self.k1 = k1\n self.k2 = k2\n self.kerneltype = 'complex'\n @property\n def pars(self):\n return np.append(self.k1.pars, self.k2.pars)\n\n\nclass Sum(_operator):\n \"\"\" To allow the sum of kernels \"\"\"\n def __repr__(self):\n return \"{0} + {1}\".format(self.k1, self.k2)\n def __call__(self, r):\n return self.k1(r) + self.k2(r)\n\n\nclass Multiplication(_operator):\n \"\"\" To allow the multiplication of kernels \"\"\"\n def __repr__(self):\n return \"{0} * {1}\".format(self.k1, self.k2)\n def __call__(self, r):\n return self.k1(r) * self.k2(r)\n\n\n##### Constant kernel #########################################################\nclass Constant(kernel):\n \"\"\"\n This kernel returns its constant argument c \n \n Parameters\n ----------\n c: float\n Constant\n \"\"\"\n def __init__(self, c):\n super(Constant, self).__init__(c)\n self.c = c\n self.params_number = 1 #number of hyperparameters\n def __call__(self, r):\n return self.c**2 * np.ones_like(r)\n\n\n##### White noise kernel ######################################################\nclass WhiteNoise(kernel):\n \"\"\"\n Definition of the white noise kernel.\n \n Parameters\n ----------\n wn: float\n White noise amplitude\n \"\"\"\n def __init__(self, wn):\n super(WhiteNoise, self).__init__(wn)\n self.wn = wn\n self.type = 'stationary'\n self.derivatives = 1 #number of derivatives in this kernel\n self.params_number = 1 #number of hyperparameters\n def __call__(self, r):\n# return self.wn**2 * np.identity(len(r))\n return self.wn**2 * np.diag(np.diag(np.ones_like(r)))\n\n\n##### Squared exponential kernel ##############################################\nclass SquaredExponential(kernel):\n \"\"\"\n Squared Exponential kernel, also known as radial basis function or RBF \n kernel in other works.\n \n Parameters\n ----------\n amplitude: float\n Amplitude of the kernel\n ell: float\n Length-scale\n \"\"\"\n def __init__(self, amplitude, ell):\n super(SquaredExponential, self).__init__(amplitude, ell)\n self.amplitude = amplitude\n self.ell = ell\n self.params_number = 2\n def __call__(self, r):\n return self.amplitude**2 * exp(-0.5 * r**2 / self.ell**2)\n\n\n##### Periodic kernel #########################################################\nclass Periodic(kernel):\n \"\"\"\n Definition of the periodic kernel.\n \n Parameters\n ----------\n amplitude: float\n Amplitude of the kernel\n ell: float\n Lenght scale\n P: float\n Period\n \"\"\"\n def __init__(self, amplitude, P, ell):\n super(Periodic, self).__init__(amplitude, P, ell)\n self.amplitude = amplitude\n self.ell = ell\n self.P = P\n self.params_number = 3 #number of hyperparameters\n def __call__(self, r):\n return self.amplitude**2*exp(-2*sine(pi*np.abs(r)/self.P)**2/self.ell**2)\n\n\n##### Quasi periodic kernel ###################################################\nclass QuasiPeriodic(kernel):\n \"\"\"\n This kernel is the product between the periodic and the squared \n exponential kernels. \n It is commonly known as the quasi-periodic kernel.\n \n Parameters\n ----------\n amplitude: float\n Amplitude of the kernel\n ell_e: float\n Evolutionary time scale\n ell_p: float\n Length scale of the periodic component\n P: float\n Kernel periodicity\n \"\"\"\n def __init__(self, amplitude, ell_e, P, ell_p):\n super(QuasiPeriodic, self).__init__(amplitude, ell_e, P, ell_p)\n self.amplitude = amplitude\n self.ell_e = ell_e\n self.P = P\n self.ell_p = ell_p\n self.params_number = 4\n def __call__(self, r):\n return self.amplitude**2 *exp(- 2*sine(pi*np.abs(r)/self.P)**2 \\\n /self.ell_p**2 - r**2/(2*self.ell_e**2))\n\n\n##### Rational quadratic kernel ###############################################\nclass RationalQuadratic(kernel):\n \"\"\"\n Definition of the rational quadratic kernel.\n \n Parameters\n ----------\n amplitude: float\n Amplitude of the kernel\n alpha: float\n Amplitude of large and small scale variations\n ell: float\n Characteristic lenght scale to define the kernel \"smoothness\"\n \"\"\"\n def __init__(self, amplitude, alpha, ell):\n super(RationalQuadratic, self).__init__(amplitude, alpha, ell)\n self.amplitude = amplitude\n self.alpha = alpha\n self.ell = ell\n self.params_number = 3\n def __call__(self, r):\n return self.amplitude**2*(1+0.5*r**2/(self.alpha*self.ell**2))**(-self.alpha)\n\n\n##### Cosine kernel ###########################################################\nclass Cosine(kernel):\n \"\"\"\n Definition of the cosine kernel.\n \n Parameters\n ----------\n amplitude: float\n Amplitude of the kernel\n P: float\n Period\n \"\"\"\n def __init__(self, amplitude, P):\n super(Cosine, self).__init__(amplitude, P)\n self.amplitude = amplitude\n self.P = P\n self.params_number = 2\n def __call__(self, r):\n return self.amplitude**2 * cosine(2*pi*np.abs(r) / self.P)\n\n\n##### Exponential kernel ######################################################\nclass Exponential(kernel):\n \"\"\"\n Definition of the exponential kernel. This kernel arises when setting v=1/2\n in the matern family of kernels\n \n Parameters\n ----------\n amplitude: float\n Amplitude of the kernel\n ell: float\n Characteristic lenght scale\n \"\"\"\n def __init__(self, amplitude, ell):\n super(Exponential, self).__init__(amplitude, ell)\n self.amplitude = amplitude\n self.ell = ell\n self.params_number = 2\n def __call__(self, r):\n return self.amplitude**2 * exp(- np.abs(r)/self.ell)\n\n\n##### Matern 3/2 kernel #######################################################\nclass Matern32(kernel):\n \"\"\"\n Definition of the Matern 3/2 kernel. This kernel arise when setting v=3/2 \n in the matern family of kernels\n \n Parameters\n ----------\n amplitude: float\n Amplitude of the kernel\n ell: float\n Characteristic lenght scale\n \"\"\"\n def __init__(self, amplitude, ell):\n super(Matern32, self).__init__(amplitude, ell)\n self.amplitude = amplitude\n self.ell = ell\n self.params_number = 2\n def __call__(self, r):\n return self.amplitude**2 *(1 + np.sqrt(3)*np.abs(r)/self.ell) \\\n *np.exp(-np.sqrt(3)*np.abs(r) / self.ell)\n\n\n#### Matern 5/2 kernel ########################################################\nclass Matern52(kernel):\n \"\"\"\n Definition of the Matern 5/2 kernel. This kernel arise when setting v=5/2 \n in the matern family of kernels\n\n Parameters\n ----------\n amplitude: float\n Amplitude of the kernel\n ell: float\n Characteristic lenght scale\n \"\"\"\n def __init__(self, amplitude, ell):\n super(Matern52, self).__init__(amplitude, ell)\n self.amplitude = amplitude\n self.ell = ell\n self.params_number = 2\n def __call__(self, r):\n return self.amplitude**2 * (1 + (3*np.sqrt(5)*self.ell*np.abs(r) \\\n +5*np.abs(r)**2)/(3*self.ell**2) ) \\\n *exp(-np.sqrt(5.0)*np.abs(r)/self.ell)\n\n\n##### RQP kernel ##############################################################\nclass RQP(kernel):\n \"\"\"\n Definition of the product between the periodic kernel and the rational \n quadratic kernel that we called RQP kernel.\n \n Info: Tests show that, if alpha goes to infinity, the RQP tends to the quasi\n periodic kernel, if alpha goes to zero it tends to the periodic kernel.\n There is a goldilocks region of alpha where this kernel is much better \n than the quasi periodic kernel.\n \n Parameters\n ----------\n amplitude: float\n Amplitude of the kernel\n ell_e and ell_p: float\n Aperiodic and periodic lenght scales\n alpha: float\n alpha of the rational quadratic kernel\n P: float\n Periodic repetitions of the kernel\n \"\"\"\n def __init__(self, amplitude, alpha, ell_e, P, ell_p):\n super(RQP, self).__init__(amplitude, alpha, ell_e, P, ell_p)\n self.amplitude = amplitude\n self.alpha = alpha\n self.ell_e = ell_e\n self.P = P\n self.ell_p = ell_p\n self.params_number = 5\n def __call__(self, r):\n a = exp(- 2*sine(pi*np.abs(r)/self.P)**2 / self.ell_p**2)\n b = (1+ r**2/ (2*self.alpha*self.ell_e**2))#**self.alpha\n return self.amplitude**2 * a / (np.sign(b) * (np.abs(b)) ** self.alpha)\n\n\n##### Paciorek's kernel #######################################################\nclass Paciorek(kernel):\n \"\"\"\n Definition of the modified Paciorek's kernel (stationary version). \n \n Parameters\n ----------\n amplitude: float\n Amplitude/amplitude of the kernel\n ell_1: float\n First lenght scale\n ell_2: float\n Second lenght scale\n \"\"\"\n def __init__(self, amplitude, ell_1, ell_2):\n super(Paciorek, self).__init__(amplitude, ell_1, ell_2)\n self.amplitude = amplitude\n self.ell_1 = ell_1\n self.ell_2 = ell_2\n self.params_number = 3\n def __call__(self, r):\n a = sqrt(2*self.ell_1*self.ell_2 / (self.ell_1**2+self.ell_2**2))\n b = exp(-2*r*r / (self.ell_1**2+self.ell_2**2))\n return self.amplitude**2 * a *b\n\n\n###############################################################################\nclass PiecewiseSE(kernel):\n \"\"\"\n Product of the Squared Exponential and Piecewice kernels\n \n Parameters\n ----------\n eta1: float\n Amplitude of the kernel\n eta2: float\n Aperiodic lenght scale\n eta3: float\n Periodic repetitions of the kernel\n \"\"\"\n def __init__(self, eta1, eta2, eta3):\n super(PiecewiseSE, self).__init__(eta1, eta2, eta3)\n self.eta1 = eta1\n self.eta2 = eta2\n self.eta3 = eta3\n self.params_number = 3\n def __call__(self, r):\n SE_term = self.eta1**2 * exp(-0.5 * r**2 / self.eta2**2)\n r = r/(0.5*self.eta3)\n piecewise = (3*np.abs(r) +1) * (1 - np.abs(r))**3\n piecewise = np.where(np.abs(r)>1, 0, piecewise)\n k = SE_term*piecewise\n return k\n\n\n###############################################################################\nclass PiecewiseRQ(kernel):\n \"\"\"\n Product of the Rational Quadratic and Piecewice kernels\n \n Parameters\n ----------\n eta1: float\n Amplitude of the kernel\n alpha: float\n alpha of the rational quadratic kernel\n eta2: float\n Aperiodic lenght scale\n eta3: float\n Periodic repetitions of the kernel\n \"\"\"\n def __init__(self, eta1, alpha, eta2, eta3):\n super(PiecewiseRQ, self).__init__(eta1, alpha, eta2, eta3)\n self.eta1 = eta1\n self.alpha = alpha\n self.eta2 = eta2\n self.eta3 = eta3\n self.params_number = 3\n def __call__(self, r):\n RQ_term = self.eta1**2 * (1+0.5*r**2/(self.alpha*self.eta2**2))**(-self.alpha)\n r = r/(0.5*self.eta3)\n piecewise = (3*np.abs(r) +1) * (1 - np.abs(r))**3\n piecewise = np.where(np.abs(r)>1, 0, piecewise)\n k = RQ_term*piecewise\n return k\n\n\n##### New periodic kernel ######################################################\nclass NewPeriodic(kernel):\n \"\"\"\n Definition of a new periodic kernel derived from mapping the rational \n quadratic kernel to the 2D space u(x) = (cos x, sin x)\n \n Parameters\n ----------\n amplitude: float\n Amplitude of the kernel\n alpha2: float\n Alpha parameter of the rational quadratic mapping\n P: float\n Period\n l: float\n Periodic lenght scale\n \"\"\"\n def __init__(self, amplitude, alpha2, P, l):\n super(NewPeriodic, self).__init__(amplitude, alpha2, P, l)\n self.amplitude = amplitude\n self.alpha2 = alpha2\n self.P = P\n self.l = l\n self.params_number = 4\n def __call__(self, r):\n a = (1 + 2*sine(pi*np.abs(r)/self.P)**2/(self.alpha2*self.l**2))**(-self.alpha2)\n return self.amplitude**2 * a\n\n\n##### New periodic kernel ######################################################\nclass QuasiNewPeriodic(kernel):\n \"\"\"\n Definition of a new quasi-periodic kernel. Derived from mapping the rational\n quadratic kernel to the 2D space u(x) = (cos x, sin x) and multiplying it by\n a squared exponential kernel\n \n Parameters\n ----------\n amplitude: float\n Amplitude of the kernel\n alpha2: float\n Alpha parameter of the rational quadratic mapping\n ell_e: float\n Aperiodic lenght scale\n P: float\n Period\n ell_p: float\n Periodic lenght scale\n \"\"\"\n def __init__(self, amplitude, alpha2, ell_e, P, ell_p):\n super(QuasiNewPeriodic, self).__init__(amplitude, alpha2, ell_e, P, ell_p)\n self.amplitude = amplitude\n self.alpha2 = alpha2\n self.ell_e = ell_e\n self.P = P\n self.ell_p = ell_p\n self.params_number = 5 #number of hyperparameters\n def __call__(self, r):\n a = (1 + 2*sine(pi*np.abs(r)/self.P)**2/(self.alpha2*self.ell_p**2))**(-self.alpha2)\n b = exp(-0.5 * r**2 / self.ell_e**2)\n return self.amplitude**2 * a * b\n\n\nclass NewRQP(kernel):\n \"\"\"\n Definition of a new quasi-periodic kernel. Derived from mapping the rational\n quadratic kernel to the 2D space u(x) = (cos x, sin x) and multiplying it by\n a rational quadratic kernel\n \n Parameters\n ----------\n amplitude: float\n Amplitude of the kernel\n alpha1: float\n Alpha parameter of the rational quadratic kernel\n ell_e: float\n Aperiodic lenght scale\n P: float\n Period\n ell_p: float\n Periodic lenght scale\n alpha2: float\n Another alpha parameter from the mapping \n \"\"\"\n def __init__(self, amplitude, alpha1, alpha2, ell_e, P, ell_p):\n super(NewRQP, self).__init__(amplitude, alpha1, alpha2,\n ell_e, P, ell_p)\n self.amplitude = amplitude\n self.alpha1 = alpha1\n self.alpha2 = alpha2\n self.ell_e = ell_e\n self.P = P\n self.ell_p = ell_p\n self.params_number = 5 #number of hyperparameters\n def __call__(self, r):\n a = (1 + 2*sine(pi*np.abs(r)/self.P)**2/(self.alpha2*self.ell_p**2))**(-self.alpha2)\n b = (1+ 0.5*r**2/ (self.alpha1*self.ell_e**2))**(-self.alpha1)\n return self.amplitude**2 * a * b\n\n\n##### New periodic kernel ######################################################\nclass HarmonicPeriodic(kernel):\n \"\"\"\n Definition of a periodic kernel that models a periodic signal\n with a N number of harmonics\n \n Parameters\n ----------\n N: int\n Number of harmonics\n amplitude: float\n Amplitude of the kernel\n P: float\n Period\n ell: float\n Periodic lenght scale\n \"\"\"\n def __init__(self, N, amplitude, P, ell):\n super(HarmonicPeriodic, self).__init__(N, amplitude, P, ell)\n self.N = N\n self.amplitude = amplitude\n self.ell = ell\n self.P = P\n self.params_number = 4 #number of hyperparameters\n def __call__(self, r, s):\n # r = np.abs(r)\n # s = np.abs(s)\n first = sine((self.N+0.5)*2*pi*r/self.P) / 2*sine(pi*r/self.P)\n second = sine((self.N+0.5)*2*pi*s/self.P) / 2*sine(pi*s/self.P)\n firstPart = (first - second)**2\n first = 0.5/np.tan(pi*r/self.P)\n second = cosine((self.N+0.5)*2*pi*r/self.P) / 2*sine(pi*r/self.P)\n third = 0.5/np.tan(pi*s/self.P)\n fourth = cosine((self.N+0.5)*2*pi*s/self.P) / 2*sine(pi*s/self.P)\n secondPart = (first-second-third+fourth)**2\n return self.amplitude**2*exp(-0.5*(firstPart + secondPart)/self.ell**2)\n\n\n##### New quasi-periodic kernel ################################################\nclass QuasiHarmonicPeriodic(kernel):\n \"\"\"\n Definition of a quasi-periodic kernel that models a periodic signals \n with a N number of harmonics\n \n Parameters\n ----------\n N: int\n Number of harmonics\n amplitude: float\n Amplitude of the kernel\n ell_e: float\n Aperiodic lenght scale\n P: float\n Period\n ell_p: float\n Periodic lenght scale\n \"\"\"\n def __init__(self, N, amplitude, ell_e, P, ell_p):\n super(QuasiHarmonicPeriodic, self).__init__(amplitude, ell_e, P, ell_p)\n self.N = N\n self.amplitude = amplitude\n self.ell_e = ell_e\n self.P = P\n self.ell_p = ell_p\n self.params_number = 5 #number of hyperparameters\n def __call__(self, r, s):\n first = sine((self.N+0.5)*2*pi*r/self.P) / 2*sine(pi*r/self.P)\n second = sine((self.N+0.5)*2*pi*s/self.P) / 2*sine(pi*s/self.P)\n firstPart = (first - second)**2\n first = 0.5/np.tan(pi*r/self.P)\n second = cosine((self.N+0.5)*2*pi*r/self.P) / 2*sine(pi*r/self.P)\n third = 0.5/np.tan(pi*s/self.P)\n fourth = cosine((self.N+0.5)*2*pi*s/self.P) / 2*sine(pi*s/self.P)\n secondPart = (first-second-third+fourth)**2\n a = exp(-0.5*(firstPart + secondPart)/self.ell_p**2)\n b = exp(-0.5 * (r-s)**2 / self.ell_e**2)\n return self.amplitude**2 * a * b\n\n\n##### New quasi-periodic kernel ################################################\nclass CosPeriodic(kernel):\n \"\"\"\n Periodic kernel derived by mapping the squared exponential kernel into thw\n 2D space u(t) = [cos(t + phi), sin(t + phi)]\n \n SPOILER ALERT: If you do the math the phi terms disappear \n \n Parameters\n ----------\n amplitude: float\n Amplitude of the kernel\n P: float\n Period\n ell_p: float\n Periodic lenght scale\n phi: float\n Phase\n \"\"\"\n def __init__(self, amplitude, P, ell):\n super(CosPeriodic, self).__init__(P, ell)\n self.amplitude = amplitude\n self.ell = ell\n self.P = P\n self.params_number = 3 #number of hyperparameters\n def __call__(self, r):\n return self.amplitude**2*exp(-2*cosine(pi*np.abs(r)/self.P)**2/self.ell**2)\n\n\n##### New quasi-periodic kernel ################################################\nclass QuasiCosPeriodic(kernel):\n \"\"\"\n This kernel is the product between the cosPeriodic kernel \n and the squared exponential kernel, it is just another the quasi-periodic \n kernel.\n \n Parameters\n ----------\n amplitude: float\n Amplitude of the kernel\n ell_e: float\n Evolutionary time scale\n ell_p: float\n Length scale of the periodic component\n P: float\n Kernel periodicity\n \"\"\"\n def __init__(self, amplitude, ell_e, P, ell_p):\n super(QuasiCosPeriodic, self).__init__(amplitude, ell_e, P, ell_p)\n self.amplitude = amplitude\n self.ell_e = ell_e\n self.P = P\n self.ell_p = ell_p\n self.params_number = 4\n def __call__(self, r):\n return self.amplitude**2 *exp(- 2*cosine(pi*np.abs(r)/self.P)**2 \\\n /self.ell_p**2 - r**2/(2*self.ell_e**2))\n\n\n### END\n\n##### New periodic kernel ######################################################\nclass unknown(kernel):\n \"\"\"\n \n Parameters\n ----------\n \n \"\"\"\n def __init__(self, amplitude, P, ell, phi):\n super(unknown, self).__init__(amplitude, P, ell, phi)\n self.amplitude = amplitude\n self.ell = ell\n self.P = P\n self.phi = phi\n self.params_number = 4 #number of hyperparameters\n def __call__(self, r, s):\n # r = np.abs(r)\n # s = np.abs(s)\n first = sine(2*pi*r/self.P - self.phi) - sine(2*pi*s/self.P - self.phi)\n second = sine(2*pi*r/self.P + self.phi) - sine(2*pi*s/self.P - self.phi)\n firstPart = first**2 + second**2\n return self.amplitude**2*exp(-0.5*(firstPart)/self.ell**2)\n", "\"\"\"\nA collection of useful functions\n\"\"\"\nfrom scipy.stats import invgamma\nfrom scipy.optimize import minimize\nimport numpy as np\n\n\n##### Semi amplitude calculation ##############################################\ndef semi_amplitude(period, Mplanet, Mstar, ecc):\n \"\"\"\n Calculates the semi-amplitude (K) caused by a planet with a given period \n and mass Mplanet, around a star of mass Mstar, with a eccentricity ecc.\n \n Parameters\n ----------\n period: float\n Period in years\n Mplanet: float\n Planet's mass in Jupiter masses, tecnically is the M.sin i\n Mstar: float\n Star mass in Solar masses\n ecc: float\n Eccentricity\n \n Returns\n -------\n : float\n Semi-amplitude K\n \"\"\"\n per = np.float(np.power(1/period, 1/3))\n Pmass = Mplanet / 1\n Smass = np.float(np.power(1/Mstar, 2/3))\n Ecc = 1 / np.sqrt(1 - ecc**2)\n return 28.435 * per * Pmass* Smass * Ecc\n\n\n##### Minimum mass calculation #################################################\ndef minimum_mass(P, K, ecc, Mstar):\n \"\"\"\n Calculates the minimum mass (M sin i) of a planet with a given period P,\n semi.amplitude K, and eccentricity ecc.\n Note: M planet << M star\n \n Parameters\n ----------\n P: float\n Period (days)\n K: float\n Semi-amplitude (m/s)\n ecc: float\n Eccentricity\n Mstar: float\n Star mass (Solar masses)\n \n Returns\n -------\n Msini: array\n Minimum mass defined as np.array([Jupiter masses, Earth masses])\n \"\"\"\n #Jmass = 317.8 *Emass\n Msini = 4.919e-3 *K *np.sqrt(1 - ecc**2) *np.cbrt(P) *np.cbrt(Mstar)**2\n return np.array([Msini, Msini *317.8])\n\n\n\n##### Keplerian function ######################################################\ndef keplerian(P=365, K=.1, e=0, w=np.pi, T=0, phi=None, gamma=0, t=None):\n \"\"\"\n keplerian() simulates the radial velocity signal of a planet in a \n keplerian orbit around a star.\n \n Parameters\n ----------\n P: float\n Period in days\n K: float\n RV amplitude\n e: float\n Eccentricity\n w: float\n Longitude of the periastron\n T: float\n Zero phase\n phi: float\n Orbital phase\n gamma: float\n Constant system RV\n t: array\n Time of measurements\n \n Returns\n -------\n t: array\n Time of measurements\n RV: array\n RV signal generated\n \"\"\"\n if t is None:\n print('\\n TEMPORAL ERROR, time is nowhere to be found \\n')\n return 0, 0\n #mean anomaly\n if phi is None:\n mean_anom = [2*np.pi*(x1-T)/P for x1 in t]\n else:\n T = t[0] - (P*phi)/(2.*np.pi)\n mean_anom = [2*np.pi*(x1-T)/P for x1 in t]\n #eccentric anomaly -> E0=M + e*sin(M) + 0.5*(e**2)*sin(2*M)\n E0 = [x + e*np.sin(x) + 0.5*(e**2)*np.sin(2*x) for x in mean_anom]\n #mean anomaly -> M0=E0 - e*sin(E0)\n M0 = [x - e*np.sin(x) for x in E0]\n i = 0\n while i < 1000:\n #[x + y for x, y in zip(first, second)]\n calc_aux = [x2-y for x2,y in zip(mean_anom, M0)] \n E1 = [x3 + y/(1-e*np.cos(x3)) for x3,y in zip(E0, calc_aux)]\n M1 = [x4 - e*np.sin(x4) for x4 in E0] \n i += 1\n E0 = E1\n M0 = M1\n nu = [2*np.arctan(np.sqrt((1+e)/(1-e))*np.tan(x5/2)) for x5 in E0]\n RV = [ gamma + K*(e*np.cos(w)+np.cos(w+x6)) for x6 in nu]\n RV = [x for x in RV] #m/s \n return t, RV\n\n\n##### Phase-folding function ##################################################\ndef phase_folding(t, y, yerr, period):\n \"\"\"\n phase_folding() allows the phase folding (duh...) of a given data\n accordingly to a given period\n \n Parameters\n ----------\n t: array\n Time array\n y: array\n Measurements array\n yerr: array\n Measurement errors arrays\n period: float\n Period to fold the data\n \n Returns\n -------\n phase: array\n Phase\n folded_y: array\n Sorted measurments according to the phase\n folded_yerr: array\n Sorted errors according to the phase\n \"\"\"\n #divide the time by the period to convert to phase\n foldtimes = t / period\n #remove the whole number part of the phase\n foldtimes = foldtimes % 1\n if yerr is None:\n yerr = 0 * y\n #sort everything\n phase, folded_y, folded_yerr = zip(*sorted(zip(foldtimes, y, yerr)))\n return phase, folded_y, folded_yerr\n\n\n##### inverse gamma distribution ##############################################\nf = lambda x, lims: \\\n (np.array([invgamma(a=x[0], scale=x[1]).cdf(lims[0]) - 0.01,\n invgamma(a=x[0], scale=x[1]).sf(lims[1]) - 0.01])**2).sum()\n\ndef invGamma(lower, upper, x0=[1, 5], showit=False):\n \"\"\"\n Arguments\n ---------\n lower, upper : float\n The upper and lower limits between which we want 98% of the probability\n x0 : list, length 2\n Initial guesses for the parameters of the inverse gamma (a and scale)\n showit : bool\n Make a plot\n \"\"\"\n limits = [lower, upper]\n result = minimize(f, x0=x0, args=limits, method='L-BFGS-B',\n bounds=[(0, None), (0, None)], tol=1e-10)\n a, b = result.x\n if showit:\n import matplotlib.pyplot as plt\n fig, ax = plt.subplots(1, 1, constrained_layout=True)\n d = invgamma(a=a, scale=b)\n x = np.linspace(0.2*limits[0], 2*limits[1], 1000)\n ax.plot(x, d.pdf(x))\n ax.vlines(limits, 0, d.pdf(x).max())\n plt.show()\n return invgamma(a=a, scale=b)\n\n\n##### RMS ######################################################################\ndef rms(array):\n \"\"\" Root mean square of array\n Parameters\n ----------\n array: array\n Measurements\n \n Returns\n -------\n rms: float\n Root mean squared error\n \"\"\"\n mu = np.average(array)\n rms = np.sqrt(np.sum((array - mu)**2) / array.size)\n return rms\n\ndef wrms(array, weights):\n \"\"\" Weighted root mean square of array, given weights \n \n Parameters\n ----------\n array: array\n Measurements\n weights: array\n weights = 1 / errors**2\n To add jitter do 1 / (errors*2 + jitter**2)\n \n Returns\n -------\n rms: float\n Weighted root mean squared error\n \"\"\"\n mu = np.average(array, weights=weights)\n rms = np.sqrt(np.sum(weights * (array - mu)**2) / np.sum(weights)) \n return rms\n\n### END\n" ]
[ [ "numpy.ones_like", "numpy.abs", "numpy.sqrt", "numpy.tan", "numpy.sign", "numpy.append", "numpy.array" ], [ "scipy.stats.invgamma", "numpy.sqrt", "numpy.linspace", "numpy.power", "matplotlib.pyplot.subplots", "numpy.cos", "numpy.sin", "numpy.cbrt", "numpy.tan", "scipy.optimize.minimize", "numpy.average", "numpy.array", "numpy.sum", "matplotlib.pyplot.show" ] ]
aynetdia/flair
[ "7e0958423ceb9744a87b0c27fd66f7be4caf0d99" ]
[ "flair/embeddings/document.py" ]
[ "from abc import abstractmethod\nimport logging\nfrom typing import List, Union\n\nimport torch\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\nfrom transformers import AutoTokenizer, AutoConfig, AutoModel, CONFIG_MAPPING, PreTrainedTokenizer\n\nimport flair\nfrom flair.data import Sentence\nfrom flair.embeddings.base import Embeddings, ScalarMix\nfrom flair.embeddings.token import TokenEmbeddings, StackedEmbeddings, FlairEmbeddings\nfrom flair.nn import LockedDropout, WordDropout\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nlog = logging.getLogger(\"flair\")\n\n\nclass DocumentEmbeddings(Embeddings):\n \"\"\"Abstract base class for all document-level embeddings. Every new type of document embedding must implement these methods.\"\"\"\n\n @property\n @abstractmethod\n def embedding_length(self) -> int:\n \"\"\"Returns the length of the embedding vector.\"\"\"\n pass\n\n @property\n def embedding_type(self) -> str:\n return \"sentence-level\"\n\n\nclass TransformerDocumentEmbeddings(DocumentEmbeddings):\n def __init__(\n self,\n model: str = \"bert-base-uncased\",\n fine_tune: bool = True,\n batch_size: int = 1,\n layers: str = \"-1\",\n layer_mean: bool = False,\n **kwargs\n ):\n \"\"\"\n Bidirectional transformer embeddings of words from various transformer architectures.\n :param model: name of transformer model (see https://huggingface.co/transformers/pretrained_models.html for\n options)\n :param fine_tune: If True, allows transformers to be fine-tuned during training\n :param batch_size: How many sentence to push through transformer at once. Set to 1 by default since transformer\n models tend to be huge.\n :param layers: string indicating which layers to take for embedding (-1 is topmost layer)\n :param layer_mean: If True, uses a scalar mix of layers as embedding\n \"\"\"\n super().__init__()\n\n # temporary fix to disable tokenizer parallelism warning\n # (see https://stackoverflow.com/questions/62691279/how-to-disable-tokenizers-parallelism-true-false-warning)\n import os\n os.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\n\n # load tokenizer and transformer model\n self.tokenizer: PreTrainedTokenizer = AutoTokenizer.from_pretrained(model, **kwargs)\n if not 'config' in kwargs:\n config = AutoConfig.from_pretrained(model, output_hidden_states=True, **kwargs)\n self.model = AutoModel.from_pretrained(model, config=config, **kwargs)\n else:\n self.model = AutoModel.from_pretrained(None, **kwargs)\n\n # model name\n self.name = 'transformer-document-' + str(model)\n self.base_model_name = str(model)\n\n # when initializing, embeddings are in eval mode by default\n self.model.eval()\n self.model.to(flair.device)\n\n # embedding parameters\n if layers == 'all':\n # send mini-token through to check how many layers the model has\n hidden_states = self.model(torch.tensor([1], device=flair.device).unsqueeze(0))[-1]\n self.layer_indexes = [int(x) for x in range(len(hidden_states))]\n else:\n self.layer_indexes = [int(x) for x in layers.split(\",\")]\n\n self.layer_mean = layer_mean\n self.fine_tune = fine_tune\n self.static_embeddings = not self.fine_tune\n self.batch_size = batch_size\n\n # check whether CLS is at beginning or end\n self.initial_cls_token: bool = self._has_initial_cls_token(tokenizer=self.tokenizer)\n\n @staticmethod\n def _has_initial_cls_token(tokenizer: PreTrainedTokenizer) -> bool:\n # most models have CLS token as last token (GPT-1, GPT-2, TransfoXL, XLNet, XLM), but BERT is initial\n tokens = tokenizer.encode('a')\n initial_cls_token: bool = False\n if tokens[0] == tokenizer.cls_token_id: initial_cls_token = True\n return initial_cls_token\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n \"\"\"Add embeddings to all words in a list of sentences.\"\"\"\n\n # using list comprehension\n sentence_batches = [sentences[i * self.batch_size:(i + 1) * self.batch_size]\n for i in range((len(sentences) + self.batch_size - 1) // self.batch_size)]\n\n for batch in sentence_batches:\n self._add_embeddings_to_sentences(batch)\n\n return sentences\n\n def _add_embeddings_to_sentences(self, sentences: List[Sentence]):\n \"\"\"Extract sentence embedding from CLS token or similar and add to Sentence object.\"\"\"\n\n # gradients are enabled if fine-tuning is enabled\n gradient_context = torch.enable_grad() if (self.fine_tune and self.training) else torch.no_grad()\n\n with gradient_context:\n\n # first, subtokenize each sentence and find out into how many subtokens each token was divided\n subtokenized_sentences = []\n\n # subtokenize sentences\n for sentence in sentences:\n # tokenize and truncate to max subtokens (TODO: check better truncation strategies)\n subtokenized_sentence = self.tokenizer.encode(sentence.to_tokenized_string(),\n add_special_tokens=True,\n max_length=self.tokenizer.model_max_length,\n truncation=True,\n )\n\n subtokenized_sentences.append(\n torch.tensor(subtokenized_sentence, dtype=torch.long, device=flair.device))\n\n # find longest sentence in batch\n longest_sequence_in_batch: int = len(max(subtokenized_sentences, key=len))\n\n # initialize batch tensors and mask\n input_ids = torch.zeros(\n [len(sentences), longest_sequence_in_batch],\n dtype=torch.long,\n device=flair.device,\n )\n mask = torch.zeros(\n [len(sentences), longest_sequence_in_batch],\n dtype=torch.long,\n device=flair.device,\n )\n for s_id, sentence in enumerate(subtokenized_sentences):\n sequence_length = len(sentence)\n input_ids[s_id][:sequence_length] = sentence\n mask[s_id][:sequence_length] = torch.ones(sequence_length)\n\n # put encoded batch through transformer model to get all hidden states of all encoder layers\n hidden_states = self.model(input_ids, attention_mask=mask)[-1] if len(sentences) > 1 \\\n else self.model(input_ids)[-1]\n\n # iterate over all subtokenized sentences\n for sentence_idx, (sentence, subtokens) in enumerate(zip(sentences, subtokenized_sentences)):\n\n index_of_CLS_token = 0 if self.initial_cls_token else len(subtokens) - 1\n\n cls_embeddings_all_layers: List[torch.FloatTensor] = \\\n [hidden_states[layer][sentence_idx][index_of_CLS_token] for layer in self.layer_indexes]\n\n # use scalar mix of embeddings if so selected\n if self.layer_mean:\n sm = ScalarMix(mixture_size=len(cls_embeddings_all_layers))\n sm_embeddings = sm(cls_embeddings_all_layers)\n\n cls_embeddings_all_layers = [sm_embeddings]\n\n # set the extracted embedding for the token\n sentence.set_embedding(self.name, torch.cat(cls_embeddings_all_layers))\n\n @property\n @abstractmethod\n def embedding_length(self) -> int:\n \"\"\"Returns the length of the embedding vector.\"\"\"\n return (\n len(self.layer_indexes) * self.model.config.hidden_size\n if not self.layer_mean\n else self.model.config.hidden_size\n )\n\n def __getstate__(self):\n # special handling for serializing transformer models\n config_state_dict = self.model.config.__dict__\n model_state_dict = self.model.state_dict()\n\n if not hasattr(self, \"base_model_name\"): self.base_model_name = self.name.split('transformer-document-')[-1]\n\n # serialize the transformer models and the constructor arguments (but nothing else)\n model_state = {\n \"config_state_dict\": config_state_dict,\n \"model_state_dict\": model_state_dict,\n \"embedding_length_internal\": self.embedding_length,\n\n \"base_model_name\": self.base_model_name,\n \"fine_tune\": self.fine_tune,\n \"batch_size\": self.batch_size,\n \"layer_indexes\": self.layer_indexes,\n \"layer_mean\": self.layer_mean,\n }\n\n return model_state\n\n def __setstate__(self, d):\n self.__dict__ = d\n\n # necessary for reverse compatibility with Flair <= 0.7\n if 'use_scalar_mix' in self.__dict__.keys():\n self.__dict__['layer_mean'] = d['use_scalar_mix']\n\n # special handling for deserializing transformer models\n if \"config_state_dict\" in d:\n\n # load transformer model\n config_class = CONFIG_MAPPING[d[\"config_state_dict\"][\"model_type\"]]\n loaded_config = config_class.from_dict(d[\"config_state_dict\"])\n\n # constructor arguments\n layers = ','.join([str(idx) for idx in self.__dict__['layer_indexes']])\n\n # re-initialize transformer word embeddings with constructor arguments\n embedding = TransformerDocumentEmbeddings(\n model=self.__dict__['base_model_name'],\n fine_tune=self.__dict__['fine_tune'],\n batch_size=self.__dict__['batch_size'],\n layers=layers,\n layer_mean=self.__dict__['layer_mean'],\n\n config=loaded_config,\n state_dict=d[\"model_state_dict\"],\n )\n\n # I have no idea why this is necessary, but otherwise it doesn't work\n for key in embedding.__dict__.keys():\n self.__dict__[key] = embedding.__dict__[key]\n\n else:\n model_name = self.__dict__['name'].split('transformer-document-')[-1]\n # reload tokenizer to get around serialization issues\n try:\n tokenizer = AutoTokenizer.from_pretrained(model_name)\n except:\n pass\n self.tokenizer = tokenizer\n\n\nclass DocumentPoolEmbeddings(DocumentEmbeddings):\n def __init__(\n self,\n embeddings: List[TokenEmbeddings],\n fine_tune_mode: str = \"none\",\n pooling: str = \"mean\",\n ):\n \"\"\"The constructor takes a list of embeddings to be combined.\n :param embeddings: a list of token embeddings\n :param fine_tune_mode: if set to \"linear\" a trainable layer is added, if set to\n \"nonlinear\", a nonlinearity is added as well. Set this to make the pooling trainable.\n :param pooling: a string which can any value from ['mean', 'max', 'min']\n \"\"\"\n super().__init__()\n\n self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embeddings)\n self.__embedding_length = self.embeddings.embedding_length\n\n # optional fine-tuning on top of embedding layer\n self.fine_tune_mode = fine_tune_mode\n if self.fine_tune_mode in [\"nonlinear\", \"linear\"]:\n self.embedding_flex = torch.nn.Linear(\n self.embedding_length, self.embedding_length, bias=False\n )\n self.embedding_flex.weight.data.copy_(torch.eye(self.embedding_length))\n\n if self.fine_tune_mode in [\"nonlinear\"]:\n self.embedding_flex_nonlinear = torch.nn.ReLU(self.embedding_length)\n self.embedding_flex_nonlinear_map = torch.nn.Linear(\n self.embedding_length, self.embedding_length\n )\n\n self.__embedding_length: int = self.embeddings.embedding_length\n\n self.to(flair.device)\n\n if pooling not in ['min', 'max', 'mean']:\n raise ValueError(f\"Pooling operation for {self.mode!r} is not defined\")\n\n self.pooling = pooling\n self.name: str = f\"document_{self.pooling}\"\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def embed(self, sentences: Union[List[Sentence], Sentence]):\n \"\"\"Add embeddings to every sentence in the given list of sentences. If embeddings are already added, updates\n only if embeddings are non-static.\"\"\"\n\n # if only one sentence is passed, convert to list of sentence\n if isinstance(sentences, Sentence):\n sentences = [sentences]\n\n self.embeddings.embed(sentences)\n\n for sentence in sentences:\n word_embeddings = []\n for token in sentence.tokens:\n word_embeddings.append(token.get_embedding().unsqueeze(0))\n\n word_embeddings = torch.cat(word_embeddings, dim=0).to(flair.device)\n\n if self.fine_tune_mode in [\"nonlinear\", \"linear\"]:\n word_embeddings = self.embedding_flex(word_embeddings)\n\n if self.fine_tune_mode in [\"nonlinear\"]:\n word_embeddings = self.embedding_flex_nonlinear(word_embeddings)\n word_embeddings = self.embedding_flex_nonlinear_map(word_embeddings)\n\n if self.pooling == \"mean\":\n pooled_embedding = torch.mean(word_embeddings, 0)\n elif self.pooling == \"max\":\n pooled_embedding, _ = torch.max(word_embeddings, 0)\n elif self.pooling == \"min\":\n pooled_embedding, _ = torch.min(word_embeddings, 0)\n\n sentence.set_embedding(self.name, pooled_embedding)\n\n def _add_embeddings_internal(self, sentences: List[Sentence]):\n pass\n\n def extra_repr(self):\n return f\"fine_tune_mode={self.fine_tune_mode}, pooling={self.pooling}\"\n\n\nclass DocumentTFIDFEmbeddings(DocumentEmbeddings):\n def __init__(\n self,\n train_dataset,\n **vectorizer_params,\n ):\n \"\"\"The constructor for DocumentTFIDFEmbeddings.\n :param train_dataset: the train dataset which will be used to construct vectorizer\n :param vectorizer_params: parameters given to Scikit-learn's TfidfVectorizer constructor\n \"\"\"\n super().__init__()\n\n import numpy as np\n self.vectorizer = TfidfVectorizer(dtype=np.float32, **vectorizer_params)\n self.vectorizer.fit([s.to_original_text() for s in train_dataset])\n \n self.__embedding_length: int = len(self.vectorizer.vocabulary_)\n\n self.to(flair.device)\n\n self.name: str = f\"document_tfidf\"\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def embed(self, sentences: Union[List[Sentence], Sentence]):\n \"\"\"Add embeddings to every sentence in the given list of sentences.\"\"\"\n\n # if only one sentence is passed, convert to list of sentence\n if isinstance(sentences, Sentence):\n sentences = [sentences]\n\n raw_sentences = [s.to_original_text() for s in sentences]\n tfidf_vectors = torch.from_numpy(self.vectorizer.transform(raw_sentences).A)\n \n for sentence_id, sentence in enumerate(sentences):\n sentence.set_embedding(self.name, tfidf_vectors[sentence_id])\n \n def _add_embeddings_internal(self, sentences: List[Sentence]):\n pass\n\n\nclass DocumentRNNEmbeddings(DocumentEmbeddings):\n def __init__(\n self,\n embeddings: List[TokenEmbeddings],\n hidden_size=128,\n rnn_layers=1,\n reproject_words: bool = True,\n reproject_words_dimension: int = None,\n bidirectional: bool = False,\n dropout: float = 0.5,\n word_dropout: float = 0.0,\n locked_dropout: float = 0.0,\n rnn_type=\"GRU\",\n fine_tune: bool = True,\n ):\n \"\"\"The constructor takes a list of embeddings to be combined.\n :param embeddings: a list of token embeddings\n :param hidden_size: the number of hidden states in the rnn\n :param rnn_layers: the number of layers for the rnn\n :param reproject_words: boolean value, indicating whether to reproject the token embeddings in a separate linear\n layer before putting them into the rnn or not\n :param reproject_words_dimension: output dimension of reprojecting token embeddings. If None the same output\n dimension as before will be taken.\n :param bidirectional: boolean value, indicating whether to use a bidirectional rnn or not\n :param dropout: the dropout value to be used\n :param word_dropout: the word dropout value to be used, if 0.0 word dropout is not used\n :param locked_dropout: the locked dropout value to be used, if 0.0 locked dropout is not used\n :param rnn_type: 'GRU' or 'LSTM'\n \"\"\"\n super().__init__()\n\n self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embeddings)\n\n self.rnn_type = rnn_type\n\n self.reproject_words = reproject_words\n self.bidirectional = bidirectional\n\n self.length_of_all_token_embeddings: int = self.embeddings.embedding_length\n\n self.static_embeddings = False if fine_tune else True\n\n self.__embedding_length: int = hidden_size\n if self.bidirectional:\n self.__embedding_length *= 4\n\n self.embeddings_dimension: int = self.length_of_all_token_embeddings\n if self.reproject_words and reproject_words_dimension is not None:\n self.embeddings_dimension = reproject_words_dimension\n\n self.word_reprojection_map = torch.nn.Linear(\n self.length_of_all_token_embeddings, self.embeddings_dimension\n )\n\n # bidirectional RNN on top of embedding layer\n if rnn_type == \"LSTM\":\n self.rnn = torch.nn.LSTM(\n self.embeddings_dimension,\n hidden_size,\n num_layers=rnn_layers,\n bidirectional=self.bidirectional,\n batch_first=True,\n )\n else:\n self.rnn = torch.nn.GRU(\n self.embeddings_dimension,\n hidden_size,\n num_layers=rnn_layers,\n bidirectional=self.bidirectional,\n batch_first=True,\n )\n\n self.name = \"document_\" + self.rnn._get_name()\n\n # dropouts\n self.dropout = torch.nn.Dropout(dropout) if dropout > 0.0 else None\n self.locked_dropout = (\n LockedDropout(locked_dropout) if locked_dropout > 0.0 else None\n )\n self.word_dropout = WordDropout(word_dropout) if word_dropout > 0.0 else None\n\n torch.nn.init.xavier_uniform_(self.word_reprojection_map.weight)\n\n self.to(flair.device)\n\n self.eval()\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: Union[List[Sentence], Sentence]):\n \"\"\"Add embeddings to all sentences in the given list of sentences. If embeddings are already added, update\n only if embeddings are non-static.\"\"\"\n\n # TODO: remove in future versions\n if not hasattr(self, \"locked_dropout\"):\n self.locked_dropout = None\n if not hasattr(self, \"word_dropout\"):\n self.word_dropout = None\n\n if type(sentences) is Sentence:\n sentences = [sentences]\n\n self.rnn.zero_grad()\n\n # embed words in the sentence\n self.embeddings.embed(sentences)\n\n lengths: List[int] = [len(sentence.tokens) for sentence in sentences]\n longest_token_sequence_in_batch: int = max(lengths)\n\n pre_allocated_zero_tensor = torch.zeros(\n self.embeddings.embedding_length * longest_token_sequence_in_batch,\n dtype=torch.float,\n device=flair.device,\n )\n\n all_embs: List[torch.Tensor] = list()\n for sentence in sentences:\n all_embs += [\n emb for token in sentence for emb in token.get_each_embedding()\n ]\n nb_padding_tokens = longest_token_sequence_in_batch - len(sentence)\n\n if nb_padding_tokens > 0:\n t = pre_allocated_zero_tensor[\n : self.embeddings.embedding_length * nb_padding_tokens\n ]\n all_embs.append(t)\n\n sentence_tensor = torch.cat(all_embs).view(\n [\n len(sentences),\n longest_token_sequence_in_batch,\n self.embeddings.embedding_length,\n ]\n )\n\n # before-RNN dropout\n if self.dropout:\n sentence_tensor = self.dropout(sentence_tensor)\n if self.locked_dropout:\n sentence_tensor = self.locked_dropout(sentence_tensor)\n if self.word_dropout:\n sentence_tensor = self.word_dropout(sentence_tensor)\n\n # reproject if set\n if self.reproject_words:\n sentence_tensor = self.word_reprojection_map(sentence_tensor)\n\n # push through RNN\n packed = pack_padded_sequence(\n sentence_tensor, lengths, enforce_sorted=False, batch_first=True\n )\n rnn_out, hidden = self.rnn(packed)\n outputs, output_lengths = pad_packed_sequence(rnn_out, batch_first=True)\n\n # after-RNN dropout\n if self.dropout:\n outputs = self.dropout(outputs)\n if self.locked_dropout:\n outputs = self.locked_dropout(outputs)\n\n # extract embeddings from RNN\n for sentence_no, length in enumerate(lengths):\n last_rep = outputs[sentence_no, length - 1]\n\n embedding = last_rep\n if self.bidirectional:\n first_rep = outputs[sentence_no, 0]\n embedding = torch.cat([first_rep, last_rep], 0)\n\n if self.static_embeddings:\n embedding = embedding.detach()\n\n sentence = sentences[sentence_no]\n sentence.set_embedding(self.name, embedding)\n\n def _apply(self, fn):\n\n # models that were serialized using torch versions older than 1.4.0 lack the _flat_weights_names attribute\n # check if this is the case and if so, set it\n for child_module in self.children():\n if isinstance(child_module, torch.nn.RNNBase) and not hasattr(child_module, \"_flat_weights_names\"):\n _flat_weights_names = []\n\n if child_module.__dict__[\"bidirectional\"]:\n num_direction = 2\n else:\n num_direction = 1\n for layer in range(child_module.__dict__[\"num_layers\"]):\n for direction in range(num_direction):\n suffix = \"_reverse\" if direction == 1 else \"\"\n param_names = [\"weight_ih_l{}{}\", \"weight_hh_l{}{}\"]\n if child_module.__dict__[\"bias\"]:\n param_names += [\"bias_ih_l{}{}\", \"bias_hh_l{}{}\"]\n param_names = [\n x.format(layer, suffix) for x in param_names\n ]\n _flat_weights_names.extend(param_names)\n\n setattr(child_module, \"_flat_weights_names\",\n _flat_weights_names)\n\n child_module._apply(fn)\n\n\nclass DocumentLMEmbeddings(DocumentEmbeddings):\n def __init__(self, flair_embeddings: List[FlairEmbeddings]):\n super().__init__()\n\n self.embeddings = flair_embeddings\n self.name = \"document_lm\"\n\n # IMPORTANT: add embeddings as torch modules\n for i, embedding in enumerate(flair_embeddings):\n self.add_module(\"lm_embedding_{}\".format(i), embedding)\n if not embedding.static_embeddings:\n self.static_embeddings = False\n\n self._embedding_length: int = sum(\n embedding.embedding_length for embedding in flair_embeddings\n )\n\n @property\n def embedding_length(self) -> int:\n return self._embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]):\n if type(sentences) is Sentence:\n sentences = [sentences]\n\n for embedding in self.embeddings:\n embedding.embed(sentences)\n\n # iterate over sentences\n for sentence in sentences:\n sentence: Sentence = sentence\n\n # if its a forward LM, take last state\n if embedding.is_forward_lm:\n sentence.set_embedding(\n embedding.name,\n sentence[len(sentence) - 1]._embeddings[embedding.name],\n )\n else:\n sentence.set_embedding(\n embedding.name, sentence[0]._embeddings[embedding.name]\n )\n\n return sentences\n\n\nclass SentenceTransformerDocumentEmbeddings(DocumentEmbeddings):\n def __init__(\n self,\n model: str = \"bert-base-nli-mean-tokens\",\n batch_size: int = 1,\n convert_to_numpy: bool = False,\n ):\n \"\"\"\n :param model: string name of models from SentencesTransformer Class\n :param name: string name of embedding type which will be set to Sentence object\n :param batch_size: int number of sentences to processed in one batch\n :param convert_to_numpy: bool whether the encode() returns a numpy array or PyTorch tensor\n \"\"\"\n super().__init__()\n\n try:\n from sentence_transformers import SentenceTransformer\n except ModuleNotFoundError:\n log.warning(\"-\" * 100)\n log.warning('ATTENTION! The library \"sentence-transformers\" is not installed!')\n log.warning(\n 'To use Sentence Transformers, please first install with \"pip install sentence-transformers\"'\n )\n log.warning(\"-\" * 100)\n pass\n\n self.model = SentenceTransformer(model)\n self.name = 'sentence-transformers-' + str(model)\n self.batch_size = batch_size\n self.convert_to_numpy = convert_to_numpy\n self.static_embeddings = True\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n\n sentence_batches = [sentences[i * self.batch_size:(i + 1) * self.batch_size]\n for i in range((len(sentences) + self.batch_size - 1) // self.batch_size)]\n\n for batch in sentence_batches:\n self._add_embeddings_to_sentences(batch)\n\n return sentences\n\n def _add_embeddings_to_sentences(self, sentences: List[Sentence]):\n\n # convert to plain strings, embedded in a list for the encode function\n sentences_plain_text = [sentence.to_plain_string() for sentence in sentences]\n\n embeddings = self.model.encode(sentences_plain_text, convert_to_numpy=self.convert_to_numpy)\n for sentence, embedding in zip(sentences, embeddings):\n sentence.set_embedding(self.name, embedding)\n\n @property\n @abstractmethod\n def embedding_length(self) -> int:\n \"\"\"Returns the length of the embedding vector.\"\"\"\n return self.model.get_sentence_embedding_dimension()\n" ]
[ [ "torch.mean", "torch.max", "torch.zeros", "torch.cat", "torch.nn.GRU", "torch.nn.utils.rnn.pad_packed_sequence", "torch.no_grad", "torch.nn.Dropout", "torch.ones", "torch.eye", "torch.nn.utils.rnn.pack_padded_sequence", "torch.tensor", "sklearn.feature_extraction.text.TfidfVectorizer", "torch.enable_grad", "torch.min", "torch.nn.Linear", "torch.nn.LSTM", "torch.nn.init.xavier_uniform_", "torch.nn.ReLU" ] ]
Penn-TopGuNN/TopGuNN
[ "e736e467f1991a33c5ee54407665cbd9fef1e521" ]
[ "code/embed_and_filter.py" ]
[ "import numpy as np\nimport torch\nfrom transformers import BertTokenizer, BertModel\nfrom torch.utils.data import DataLoader \nimport util\nfrom util import MaskableList\nfrom collections import defaultdict, Counter\nfrom sentence_transformers import SentenceTransformer\nimport spacy\nimport time\nimport itertools\nfrom itertools import islice\nimport os\nimport argparse\nfrom sklearn.preprocessing import normalize\nfrom sqlitedict import SqliteDict\nimport ast\nimport pickle as pkl\nimport sqlite3\n\nnlp = spacy.load(\"en_core_web_lg\", disable=[\"ner\"]) ## you only need the parser and tagger\n## device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") ##.to(device)\n## NOTE: once debugging is ironed-out remove all print statements, csv file, and time study files, for AWS\n\n'''usage: (if you use embed_and_filter_job_launcher.py) \npython3 -u code/embed_and_filter_job_launcher.py \\\nyou must change the command line arguments inside the embed_and_filter_job_launcher.py file\n'''\n\n'''usage: (if you use embed_and_filter.sh)\npython3 -u code/embed_and_filter.py \\\n-job_id $i \\\n-outDir 'betatest/out/' \\\n-dataDir 'betatest/data/' \\\n-NUM_JOBS 2 \\\n-NUM_GPUS 2 \\\n-PROC_PER_GPU 1 \\\n-gpu_ids 0 1 \\\n-batch_size 175 \\\n-clip_len 225 \\\n-job_slices \"job_slices.pkl\" \\\n-query_sentences 'betatest/data/query_sentences.txt' \\\n-sentences_dict 'sentences.db' \\\n-trace_dict 'trace.db' \\\n-spacy_toks_dict 'spacy_toks.db' \\\n-spacy_pos_dict 'spacy_pos.db' \\\n-spacy_deps_dict 'spacy_deps.db' \\\n--BERT \\\n--MEAN \\\n> 'betatest/out/embed_and_filter_job'$i'.stdout' 2>&1\nalternative: | tee betatest/out/stdout_job_array.txt) 3>&1 1>&2 2>&3 | tee betatest/out/stderr_job_array.txt\n'''\n\n'''global argparser'''\ntotal_nword_embeddings, nskipped, time_elapsed_embedding, time_elapsed_filtering = 0, 0, 0, 0\nbert_tokenizer, bert_model = None, None\nparser = argparse.ArgumentParser(description='Processing list of files...')\nparser.add_argument('-outDir', required=True, help='Directory where all outfiles will be written to. Example: out/')\nparser.add_argument('-dataDir', required=True, help='Directory where all data files are located. Example: data/')\nparser.add_argument('-job_id', required=True, help='job_id responsible for x-partition of the amalgams.')\n# parser.add_argument('-NUM_JOBS', type=int, required=True, help='example: 5 (should match npartitions==NUM_GPUS)')\nparser.add_argument('-batch_size', type=int, required=True, help='example: 400 (400 sentences in each batch)')\nparser.add_argument('-clip_len', type=int, required=True, help='number of sentences to batch')\n#parser.add_argument('-NUM_GPUS', type=int, required=True, help='number of GPUs')\n#parser.add_argument('-PROC_PER_GPU', type=int, required=True, help='number of processes per GPU')\nparser.add_argument('-gpu_id', type=int, required=True, help='list gpu_ids available separated by white space, i.e. - 3 4 5 16')\nparser.add_argument('-job_slices', type=str, required=True, help=\"the job slices file output from create_amalgams.py. Example: 'job_slices.pkl'\")\nparser.add_argument('-query_sentences', type=str, required=True, help=\"query sentences filename. Example: 'query_sentences.txt'\")\nparser.add_argument('-sentences_dict', required=True, help=\"sqlite db filename. Example: 'sentences_dict.db'\")\nparser.add_argument('-trace_dict', required=True, help=\"sqlite db filename. Example: 'trace_dict.db'\")\nparser.add_argument('-spacy_toks_dict', required=True, help=\"sqlite db filename. Example: 'spacy_toks_dict.db'\")\nparser.add_argument('-spacy_pos_dict', required=True, help=\"sqlite db filename. Example: 'spacy_pos_dict.db'\")\nparser.add_argument('-spacy_deps_dict', required=True, help=\"sqlite db filename. Example: 'spacy_deps_dict.db'\")\nparser.add_argument('--BERT', action='store_false', dest='SBERT_flag', required=False, help='Enable BERT as the model')\nparser.add_argument('--MEAN', action='store_false', dest='HEAD_flag', required=False, help='Calculates embeddings using the mean of the subword units')\nparser.add_argument('--SBERT', action='store_true', dest='SBERT_flag', required=False, help='Enable SBERT as the model')\nparser.add_argument('--HEAD', action='store_true', dest='HEAD_flag', required=False, help='Calculates embedding using only the headword embedding of the subword unit')\nargs = parser.parse_args()\n\n'''global variables'''\n## load job partition file\njob_slices = util.pickle_load(args.outDir+args.job_slices)\nprint('\\nlen(job_slices): {}'.format(len(job_slices)))\n\n\n #################################################\n ########## Embed and Filter ############\n #################################################\n\n \ndef embed_sentences(round_id, sentence_batch, trace_batch, spacy_toks, spacy_pos, spacy_deps): ## , bert_tokenizer, bert_model, SBERT_flag, HEAD_flag\n ''' Takes in a batch of sentences and generates BERT embeddings for them. \n Args:\n Returns:\n Note:\n remove bert_tokenizer, bert_model, SBERT_flag, HEAD_flag from method signature when not running multiprocessing\n make sure SBERT_flag, and HEAD_flag are added back in\n '''\n global time_elapsed_embedding, time_elapsed_filtering\n global bert_tokenizer, bert_model, args\n start_embed_time = time.time()\n\n cur_words, cur_embeds = [], []\n content_tags = ['ADJ', 'ADV', 'NOUN', 'VERB']\n aux_tags = ['aux', 'auxpass', 'poss', 'possessive', 'cop', 'punct']\n\n ## tensor board, web ui (pytorch)\n ## perform lowercasing of all the sentences for embeddings\n sent_iter=iter(sentence_batch)\n lowercased_sentence_batch = [sent.lower() for sent in sent_iter]\n \n if args.SBERT_flag:\n\n return bert_model.encode([sentence])[0]\n\n else:\n\n ##pytorch logging library\n\n # try:\n\n ## batched encoding is a dict with keys = dict_keys(['input_ids', 'token_type_ids', 'attention_mask'])\n NER_encoded_batch = [bert_tokenizer.batch_encode_plus(tok) for tok in spacy_toks] ## bert_NER_toks \n # encoded_batch = bert_tokenizer.batch_encode_plus(lowercased_sentence_batch) ## regular bert_toks \n \n ## We want BERT to process our examples all at once (as one lowercased_sentence_batch).\n ## For that reason, we need to pad all lists to the same size, so we can represent the input as one 2-d array.\n padded_batch = bert_tokenizer.batch_encode_plus(lowercased_sentence_batch, pad_to_max_length=True)\n\n ## Grab indices and attn masks from the padded lowercased_sentence_batch.\n ## We need to tell BERT to ignore (mask) the padding we've added when it's processed as input.\n padded_input_ids, attention_masks = np.array(padded_batch['input_ids']), np.array(padded_batch['attention_mask'])\n\n NER_iter = iter(NER_encoded_batch)\n bert_NER_toks = [[bert_tokenizer.convert_ids_to_tokens(NER_unit)[1:-1] for NER_unit in cur_dict['input_ids']] for cur_dict in NER_iter]\n\n padded_tinput_ids = torch.tensor(padded_input_ids).cuda() ##batched padded_input_ids converted to torch tensors\n attention_masks = torch.tensor(attention_masks).cuda() ##batched attention_masks converted to torch tensors\n \n # print('padded_tinput_ids.size()[1] ', padded_tinput_ids.size())\n\n if padded_tinput_ids.size()[1] > args.clip_len:\n print('\\n\\nclipping sentences round {} '.format(round_id))\n # print('\\nclipped sentences: ', sentence_batch)\n # print('\\nbert_NER_toks: ', bert_NER_toks)\n # print(' after change round {} - type(padded_tinput_ids) and size: {} {} '.format(i, type(padded_tinput_ids), padded_tinput_ids.size()))\n # bert_NER_toks = [NER_unit[:args.clip_len] for NER_unit in bert_NER_toks]\n print('before padded_tinput_ids.size: ', padded_tinput_ids.size())\n padded_batch = bert_tokenizer.batch_encode_plus(lowercased_sentence_batch, max_length=args.clip_len, pad_to_max_length=True)\n padded_input_ids, attention_masks = np.array(padded_batch['input_ids']), np.array(padded_batch['attention_mask'])\n print('padded_input_ids.dtype, attention_masks.dtype: ', padded_input_ids.dtype, attention_masks.dtype)\n padded_tinput_ids = torch.tensor(padded_input_ids).cuda() ##batched padded_input_ids converted to torch tensors\n attention_masks = torch.tensor(attention_masks).cuda() ##batched attention_masks converted to torch tensors\n print('after padded_tinput_ids.size: ', padded_tinput_ids.size())\n print('---end clipped sentences---')\n print('\\n\\n')\n\n # print('after having been clipped - padded_tinput_ids.size: ', padded_tinput_ids.size())\n try:\n with torch.no_grad():\n embeds = bert_model(padded_tinput_ids, attention_mask=attention_masks)\n except RuntimeError:\n print('\\n\\nLine 143 CUDA out of memory. ')\n print('padded_tinput_ids.size: ', padded_tinput_ids.size())\n return -1\n\n ## Saves relevant word embeddings from the padding (removing [CLS] and [SEP] tokens)\n ## for each sentence, where the last token resides\n mask_iter = iter(np.array(attention_masks.cpu()))\n relevant_ids = np.array([[i,len(arr)-1-list(arr[::-1]).index(1)] for i, arr in enumerate(mask_iter)])\n ## changes [SEP] tokens attention to 0\n attention_masks[relevant_ids[:,0], relevant_ids[:,1]]=0 ## temp[:,0] return 0th col for all rows, temp[:,1]] return 1st col for all rows. Change corresponding [row, col] in arrays to 0\n ## changes [CLS] tokens attention to 0\n attention_masks[:,0]=0\n\n ## attention masks to be applied to relevant embeddings within each torch tensor\n mask_iter, embeds_iter = iter(attention_masks), iter(embeds[0]) \n relevant_embeds = [MaskableList(sublist)[submask] for sublist, submask in zip(embeds_iter, mask_iter)]\n\n ## reflects the bert_NER full-token words (not bert's subword units)\n pos_iter, dep_iter = iter(spacy_pos), iter(spacy_deps)\n relevant_annotations_mask = [(np.in1d(cur_pos,content_tags)) & (~np.in1d(cur_dep,aux_tags)) for cur_pos, cur_dep in zip(pos_iter,dep_iter)]\n\n embed_time = time.time() - start_embed_time\n time_elapsed_embedding += embed_time\n\n start_filter_time = time.time()\n\n if args.HEAD_flag:\n ## use only embedding of the full-token word for each subword unit\n\n for i in range(len(bert_NER_toks)):\n end_index,j,k=0,0,0\n while(j<len(relevant_embeds[i])):\n end_index=end_index+len(bert_NER_toks[i][k])\n if relevant_annotations_mask[i][k]:\n cur_words.append((k,spacy_toks[i][k],(trace_batch[i][0], int(trace_batch[i][1]))))\n ## stack, mean, and numpy 'em\n temp = torch.mean(torch.stack(relevant_embeds[i][j:j+1]),0).cpu().numpy()\n cur_embeds.append(temp)\n j,k=end_index,k+1 \n\n else:\n # use mean of subwords units to calculate embeddings\n try: \n for i in range(len(bert_NER_toks)):\n end_index,j,k=0,0,0 \n while(j<len(relevant_embeds[i])):\n end_index=end_index+len(bert_NER_toks[i][k])\n # if (round_id > 799 and round_id < 803) or (round_id > 984 and round_id < 988):\n # print('i {}, k {}, len(bert_NER_toks[i]) {}, bert_NER_toks[i][k] {}'.format(i, k, len(bert_NER_toks[i]), bert_NER_toks[i][k]))\n # print('bert_NER_toks[i]: ', bert_NER_toks[i])\n if relevant_annotations_mask[i][k]:\n cur_words.append((k,spacy_toks[i][k],(trace_batch[i][0], int(trace_batch[i][1]))))\n ## stack, mean, and numpy 'em\n temp = torch.mean(torch.stack(relevant_embeds[i][j:end_index]),0).cpu().numpy() ##is this end_index or end_index+1\n cur_embeds.append(temp)\n j,k=end_index,k+1 \n except IndexError as e:\n print('\\n\\n---IndexError: list index out of range!---')\n print(e)\n print('round_id: ', round_id)\n print('i, k:', i, k)\n print('len(sentence_batch), len(trace_batch[0]): ', len(sentence_batch), len(trace_batch[0]))\n print('len(bert_NER_toks)', len(bert_NER_toks))\n print('len(bert_NER_toks[i]): ', len(bert_NER_toks[i]))\n # print('\\nbert_NER_toks[i]: ', bert_NER_toks[i])\n # print('\\nbert_NER_toks', bert_NER_toks)\n print('--end current error--\\n\\n')\n\n filter_time = (time.time() - start_filter_time)\n time_elapsed_filtering += filter_time\n\n # print('round %d Time elapsed filtering content words:\\t%s' % (round_id, time.strftime(\"%H:%M:%S\", time.gmtime(filter_time))))\n # except AttributeError:\n # print('\\n\\n---AttributeError----NoneType object has no attribute batch_encode_plus!')\n # print('spacy_toks: ')\n # print(spacy_toks)\n # print('trace_batch: ')\n # print(trace_batch)\n # print('sentence_batch: ')\n # print(sentence_batch)\n # print('print(list(sentence_batch)):')\n # print(list(sentence_batch))\n # print('---end of line---\\n\\n')\n\n if round_id % 100 == 0:\n print('finished batch {}. len(words): {} len(embeds): {}'.format(round_id, len(cur_words), len(cur_embeds)))\n\n return cur_words, cur_embeds\n\ndef embed_all_batches(batched_sentences, batched_trace_info, batched_spacy_toks, batched_spacy_pos, batched_spacy_deps):\n '''Iterates through giga_dict and batches sentences to send of embed_all_sentences().\n Args:\n Returns:\n Note:\n '''\n\n global args, total_nword_embeddings\n\n words, word_embeds = [], []\n\n batch_iter, trace_iter, spacy_toks_iter, spacy_pos_iter, spacy_deps_iter = iter(batched_sentences), iter(batched_trace_info), iter(batched_spacy_toks), iter(batched_spacy_pos), iter(batched_spacy_deps)\n\n for round_id, (sentence_batch, trace_batch, spacy_toks_batch, spacy_pos_batch, spacy_deps_batch) in enumerate(zip(batch_iter, trace_iter, spacy_toks_iter, spacy_pos_iter, spacy_deps_iter)):\n \n if round_id % 100 == 0:\n print('\\nprocessing embedding {}... percentage processed {}'.format(round_id, (round_id/len(batched_sentences))*100))\n\n cur_words, cur_embeds = embed_sentences(round_id, sentence_batch, trace_batch, spacy_toks_batch, spacy_pos_batch, spacy_deps_batch) ## each batch is of size batch_size (see global var)\n\n words.extend(cur_words)\n word_embeds.extend(cur_embeds)\n\n total_nword_embeddings += len(cur_embeds)\n\n return words, word_embeds\n\n \ndef handle_batches(cur_sentences, cur_trace, cur_spacy_toks, cur_spacy_pos, cur_spacy_deps, words_dict, word_embeds_fname):\n\n global args, job_slices, time_elapsed_embedding, time_elapsed_filtering\n\n embed_time, filtering_time = 0, 0\n batch_size, outDir = args.batch_size, args.outDir\n print('size of batch: ', batch_size)\n\n ## Reads in gigaword file\n # sentences, trace = read_file(gigaword_fname)\n print('len(sentences), len(trace), len(cur_spacy_toks), len(cur_spacy_pos), len(cur_spacy_deps): ', len(cur_sentences), len(cur_trace), len(cur_spacy_toks), len(cur_spacy_pos), len(cur_spacy_deps))\n\n ## use pytorch library DataLoader to batch sentences and nlp annotations\n batched_sentences = DataLoader(cur_sentences, batch_size=batch_size)\n batched_trace_info = DataLoader(cur_trace, batch_size=batch_size, collate_fn=custom_collate)\n batched_spacy_toks = DataLoader(cur_spacy_toks, batch_size=batch_size, collate_fn=custom_collate)\n batched_spacy_pos = DataLoader(cur_spacy_pos, batch_size=batch_size, collate_fn=custom_collate)\n batched_spacy_deps = DataLoader(cur_spacy_deps, batch_size=batch_size, collate_fn=custom_collate) \n \n print('DataLoader (batch_size %d): %d %d %d %d %d' %(batch_size, len(batched_sentences), len(batched_trace_info), len(batched_spacy_toks), len(batched_spacy_pos), len(batched_spacy_deps)))\n\n ## Embeds sentences from all batches\n words, word_embeds = embed_all_batches(batched_sentences, batched_trace_info, batched_spacy_toks, batched_spacy_pos, batched_spacy_deps) \n\n print('these lengths should match: len(words): {}, len(word_embeds): {}, total_nword_embeds_check: {} '.format(len(words), len(word_embeds), total_nword_embeddings))\n\n word_dict_start = time.time()\n words_iter = iter(words)\n idx_iter = range(len(words))\n words_dict.update([(idx,word) for idx,word in zip(idx_iter,words_iter)])\n words_dict.commit()\n words_dict.close()\n word_dict_time = time.time() - word_dict_start\n\n ## memmap word_embeds\n memmap_start = time.time()\n fp = np.memmap(word_embeds_fname, dtype='float32', mode='w+', shape=(len(word_embeds),768))\n fp[:] = word_embeds[:]\n del fp\n memmap_time = time.time() - memmap_start\n\n words_dict_fname = str(words_dict)[str(words_dict).index(\"(\")+1:str(words_dict).index(\")\")]\n \n ## write shapes of each word_embedding job to a file to create word index later\n with open(args.outDir+'shapes.txt','a') as fout:\n fout.write(word_embeds_fname+' '+str(len(word_embeds))+'\\n')\n fout.write(words_dict_fname+' '+str(len(words))+'\\n')\n fout.close()\n\n # print stats for sanity check\n print('\\n---stats---:')\n print('total time embeddings docs: %s' % (time.strftime(\"%H:%M:%S\", time.gmtime(time_elapsed_embedding))))\n print('total time filtering content words: %s'% (time.strftime(\"%H:%M:%S\", time.gmtime(time_elapsed_filtering))))\n print('total time creating word_sqlite_dict: %s'% (time.strftime(\"%H:%M:%S\", time.gmtime(word_dict_time))))\n print('total elapsed copying word_embeds to memmap: %s'% (time.strftime(\"%H:%M:%S\", time.gmtime(memmap_time))))\n\ndef create_query_matrix():\n\n print('creating query matrix...')\n global args\n\n ## xq files (query data)\n xq_fname = args.outDir+'xq.dat' ## mmep query word embeddings\n # qsents_fname = args.outDir+'qsents.pkl' ## sentences_dict\n # qwords_fname = args.outDir+'qwords.pkl' ## qwords_dict\n qsentences_dict, qwords_dict = SqliteDict(args.outDir+'qsentences.db'), SqliteDict(args.outDir+'qwords.db')\n\n batch_size = args.batch_size\n print('batch_size for query_matrix: ', batch_size)\n xq, q_words, q_sentences, q_trace = [], [], [], [] \n\n ## use len(query sentences as the batch_size)\n ## read in query sentences\n with open(args.query_sentences, 'r') as fin:\n for sent_id, line in enumerate(fin.read().splitlines()):\n q_sentences.append(line.strip())\n q_trace.append((args.query_sentences, sent_id))\n\n print('len(q_sentences) and len(q_trace): ', len(q_sentences), len(q_trace))\n\n spacy_docs = list(nlp.pipe(q_sentences)) ##no nead to clip len for spacy toks for the query matrix\n spacy_toks = [[tok.text for tok in doc] for doc in spacy_docs]\n spacy_pos = [[tok.pos_ for tok in doc] for doc in spacy_docs]\n spacy_deps = [[tok.dep_ for tok in doc] for doc in spacy_docs]\n\n ## use pytorch library DataLoader to batch sentences and helper func batchify to batch spacy annotations\n batched_q_sentences = DataLoader(q_sentences, batch_size=batch_size)\n batched_q_trace_info = DataLoader(q_trace, batch_size=batch_size, collate_fn=custom_collate)\n batched_spacy_toks = DataLoader(spacy_toks, batch_size=batch_size, collate_fn=custom_collate)\n batched_spacy_pos = DataLoader(spacy_pos, batch_size=batch_size, collate_fn=custom_collate)\n batched_spacy_deps = DataLoader(spacy_deps, batch_size=batch_size, collate_fn=custom_collate)\n\n print('DataLoader (batch_size %d): %d %d %d %d %d' %(batch_size, len(batched_q_sentences), len(batched_q_trace_info), len(batched_spacy_toks), len(batched_spacy_pos), len(batched_spacy_deps)))\n\n for round_id, (sentence_batch, trace_batch, spacy_toks_batch, spacy_pos_batch, spacy_deps_batch) in enumerate(zip(batched_q_sentences, batched_q_trace_info, batched_spacy_toks, batched_spacy_pos, batched_spacy_deps)):\n \n cur_words, cur_embeds = embed_sentences(round_id, sentence_batch, trace_batch, spacy_toks_batch, spacy_pos_batch, spacy_deps_batch) ## each batch is of size batch_size (see global var)\n\n\n q_words.extend(cur_words)\n xq.extend([normalize([embed])[0] for embed in cur_embeds])\n\n print('xq.shape: ', len(xq), len(xq[0]))\n\n qwords_dict_fname = str(qwords_dict)[str(qwords_dict).index(\"(\")+1:str(qwords_dict).index(\")\")]\n\n with open(args.outDir+'shapes.txt','a') as fout:\n fout.write(xq_fname+' '+str(len(xq))+'\\n')\n fout.write(qwords_dict_fname+' '+str(len(q_words))+'\\n')\n fout.close()\n\n ## memmap qword_embeds\n fp = np.memmap(xq_fname, dtype='float32', mode='w+', shape=(len(xq),768))\n fp[:] = xq[:]\n del fp\n\n qsentences_dict.update([(idx,sent) for idx,sent in enumerate(q_sentences)])\n qwords_dict.update([(idx,qword) for idx,qword in enumerate(q_words)])\n\n qsentences_dict.commit()\n qwords_dict.commit()\n\n qsentences_dict.close()\n qwords_dict.close()\n\n print('finished processing query matrix...')\n\n # return xq_fname, qsents_fname, qwords_fname\n\n\n\n #################################################\n ########### HELPER FUNCTIONS #########\n #################################################\n\ndef get_partition_slice(it, size, section):\n '''I could change this to only return the start and end index of each subarray instead of all the indices for that partition\n '''\n it = iter(it)\n return list(iter(lambda: tuple(islice(it, size)), ()))[section]\n\ndef get_slice(it, size):\n '''I could change this to only return the start and end index of each subarray instead of all the indices for that partition\n '''\n it = iter(it)\n return list(iter(lambda: tuple(islice(it, size)), ()))\n\ndef custom_collate(x):\n return x \n\ndef batchify(sentences, batch_size):\n\n batched_items, this_batch, = [], []\n for cur_item in islice(sentences,None,None):\n this_batch.append(cur_item)\n if len(this_batch) == batch_size:\n batched_items.append(this_batch)\n this_batch = []\n if len(this_batch) > 0:\n batched_items.append(this_batch)\n\n return batched_items\n\n\ndef fast_read_from_sqlite_dict(sqlite_dict, start_index, end_index):\n\n sqlite_dict_db = sqlite3.connect(sqlite_dict)\n sqlite_dict_db_cursor = sqlite_dict_db.cursor()\n sqlite_dict_db_cursor.execute(\"SELECT value FROM unnamed WHERE CAST(key as INTEGER) >= ? AND CAST(key as INTEGER) <= ?;\", (start_index, end_index))\n \n return [pkl.loads(x) for x in itertools.chain.from_iterable(sqlite_dict_db_cursor.fetchall())]\n\n\n# import itertools\n# trace_iter_1, trace_iter_2 = itertools.tee(trace_iter)\n# cur_trace_data = [(value, key) for key, value in zip(trace_iter_1, fast_read_from_sqlite_dict(trace_data, trace_iter_2))]\n\n## do sanity check in ipython on loading these dictionaries and reading in using fast read, find out how to do cur_trace\n## be careful about the indexing, b/c it looks like whatever is indexed in fast read includes the end index, whereas in trace_iter = list(range(start, end)) end does not. So you might need to do +1 or -1\n\n #################################################\n ########### Main #########\n ################################################# \n\n\ndef main(cur_sentences, cur_trace, cur_spacy_toks, cur_spacy_pos, cur_spacy_deps):\n\n global args \n\n print('did you make it here?')\n\n ## xb files \n words_dict = SqliteDict(args.outDir+'words_job'+args.job_id+'.db')\n word_embeds_fname = args.outDir+'word_embeds_job'+args.job_id+'.dat'\n\n print('\\nprocessing files for job {}...'.format(args.job_id))\n\n start = time.time()\n\n ## Generates words and respective word_embeds for each partition of the sentence index \n ## and outputs them to outfolders to combine later for creating annoy index\n print('handling batches for job %s...' % (args.job_id))\n handle_batches(cur_sentences, cur_trace, cur_spacy_toks, cur_spacy_pos, cur_spacy_deps, words_dict, word_embeds_fname)\n\n handle_batches_time = time.time()-start\n\n print('time handling batches: %s' % (time.strftime(\"%H:%M:%S\", time.gmtime(handle_batches_time))))\n\n print('finished job {}'.format(args.job_id))\n\n\nif __name__ == '__main__':\n\n main_begin = time.time()\n\n print('---argparser---:')\n for arg in vars(args):\n print(arg, '\\t', getattr(args, arg), '\\t', type(arg))\n\n # run processing on GPU <gpu_id>\n cuda_idx = args.gpu_id\n\n with torch.cuda.device(cuda_idx):\n\n ## initialize bert_tokenizer and bert_model as global variable for all jobs\n if args.SBERT_flag:\n print('loading SBERT')\n ## Loads SBERT\n bert_tokenizer = None\n bert_model = SentenceTransformer('bert-base-nli-mean-tokens') ## model = SentenceTransformer('bert-base-nli-stsb-mean-tokens')\n bert_model = bert_model.cuda()\n else:\n print('loading regular BERT')\n ## Loads BERT-base uncased\n ## BERT-Base, Uncased: 12-layer, 768-hidden, 12-heads, 110M parameters\n bert_tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n bert_model = BertModel.from_pretrained('bert-base-uncased', output_hidden_states=True, output_attentions=True)\n bert_model = bert_model.cuda()\n # bert_model = apex.amp.initialize(bert_model, opt_level=\"O2\").to(device)\n\n if int(args.job_id) == 1:\n print('\\nOnly processing query matrix during job {}: '.format(args.job_id))\n create_query_matrix()\n\n # print(\"l\\nen(sent_data): {}, len(trace_data): {}, len(spacy_toks): {} len(spacy_pos): {} len(spacy_deps): {}\".format(len(sent_data), len(trace_data), len(spacy_toks), len(spacy_pos), len(spacy_deps)))\n\n ## get correct partition for this job\n start_index, end_index = job_slices[int(args.job_id)-1] \n print('\\njob {} - start index: {} end index: {} len(cur_partition): {}'.format(args.job_id, start_index, end_index, end_index-start_index))\n\n start = time.time()\n cur_sent_data = fast_read_from_sqlite_dict(args.outDir+args.sentences_dict, start_index, end_index)\n trace_iter = iter(list(range(start_index, end_index+1)))\n cur_trace_data = [(value, key) for key, value in zip(trace_iter, fast_read_from_sqlite_dict(args.outDir+args.trace_dict, start_index, end_index))]\n cur_spacy_toks = fast_read_from_sqlite_dict(args.outDir+args.spacy_toks_dict, start_index, end_index)\n cur_spacy_pos = fast_read_from_sqlite_dict(args.outDir+args.spacy_pos_dict, start_index, end_index)\n cur_spacy_deps = fast_read_from_sqlite_dict(args.outDir+args.spacy_deps_dict, start_index, end_index)\n retrieve_time = time.time() - start\n\n print('total elapsed time retrieving the current partition: %s'% (time.strftime(\"%H:%M:%S\", time.gmtime(retrieve_time))))\n\n\n print(\"\\nlen(cur_sent_data): {}, len(cur_trace_data): {}\".format(len(cur_sent_data), len(cur_trace_data)))\n print(\"len(cur_spacy_toks): {} len(cur_spacy_pos): {} len(cur_spacy_deps): {}\".format(len(cur_spacy_toks), len(cur_spacy_pos), len(cur_spacy_deps)))\n\n main(cur_sent_data, cur_trace_data, cur_spacy_toks, cur_spacy_pos, cur_spacy_deps)\n\n main_end = time.time() - main_begin\n print('total time inside main: %s'% (time.strftime(\"%H:%M:%S\", time.gmtime(main_end))))\n\n # ## start job on partition of the sentence index\n # split_size = int(len(sent_data)/args.NUM_JOBS)\n # cur_partition = get_slice(list(range(len(sent_data))), split_size, (int(args.job_id)-1))\n # print('job {} - start index {} end index {}'.format(args.job_id, cur_partition[0], cur_partition[-1]))\n # if len(cur_partition)>=2:\n # i, j = cur_partition[0], cur_partition[-1]\n # main(sent_data[i:j+1], trace_data[i:j+1], spacy_toks[i:j+1], spacy_pos[i:j+1], spacy_deps[i:j+1])\n # else:\n # i = cur_partition[0]\n # main(sent_data[i:], trace_data[i:], spacy_toks[i:], spacy_pos[i:], spacy_deps[i:])\n\n\n'''\n## To run this file:\n## Create virtual environment on nlpgrid\npython3 -m venv <path_for_virtual_environment>\n\n## Reno example:\npython3 -m venv ~/venv3/ ##becca's venv: giga\n\n## Activate virtual environment\nsource <path_for_virtual_environment>/bin/activate\n\n## Reno example:\nsource ~/venv3/bin/activate \ne.g. ~/giga/bin/activate ##becca's venv: giga\n\n## Install packages necessary\npip install nltk\npip install numpy\npip install tqdm\npip install torch==1.4.0\npip install transformers\npip install annoy\npip install faiss-gpu (see section on installing faiss for more info)\npip install sklearn\npip install -U sentence-transformers\npip install ndjson\npip install spacy\npython3 -m spacy download en_core_web_lg\n\n## confirm torch version\npython3\n>>>import torch\n>>>print(torch.__version__) //should be 1.4.0\n\n## installing faiss\n to check which cuda version you have in nlpgrid\n cat /usr/local/cuda/version.txt\n for CPU version\n conda install faiss-cpu -c pytorch\n for GPU version\n conda install faiss-cpu -c pytorch\n conda install faiss-gpu cudatoolkit=8.0 -c pytorch # For CUDA8\n conda install faiss-gpu cudatoolkit=9.0 -c pytorch # For CUDA9\n conda install faiss-gpu cudatoolkit=10.0 -c pytorch # For CUDA10\n for nlpgrid gpus\n pip install faiss-gpu\n## confirm faiss\n python3\n >>>import faiss\n >>>import numpy as np\n## confirm annoy\n python3\n >>>import annoy\n >>>from annoy import AnnoyIndex\n \n'''\n" ]
[ [ "numpy.in1d", "torch.utils.data.DataLoader", "torch.tensor", "sklearn.preprocessing.normalize", "torch.no_grad", "torch.cuda.device", "numpy.array", "torch.stack" ] ]
dantaslab/resfams_update
[ "982091818a299d316811fe98c7656762be7284fb" ]
[ "Analysis/Precision-Recall_Analysis/scripts/add_tp_seqs.py" ]
[ "import sys\nimport os\nimport pandas as pd\nimport csv\nimport argparse\nfrom collections import OrderedDict\nfrom io import StringIO\n\n\ndef main(argv):\n args = parse_arguments(argv)\n out = args.out_path\n file1 = args.file1\n file2 = args.file2\n file3 = args.file3\n\n\n ddf1 = addSeqs(file1,file2)\n ddf2 = removeSeqs(ddf1,file3)\n\n\n with open(out, 'w+') as output:\n for row in ddf2:\n print(\"\\t\".join(row))\n output.write(\"\\t\".join(row)+\"\\n\")\n\n\ndef parse_arguments(argv):\n parser = argparse.ArgumentParser(\n prog = 'mapping.py',\n description = 'A program to map two files (csv of txt) to each other')\n parser.add_argument(\n '-f1', '--file1',\n help = 'Enter first file.',\n required = True\n )\n parser.add_argument(\n '-f2', '--file2',\n help = 'Enter fplist file.',\n required = True\n )\n parser.add_argument(\n '-f3', '--file3',\n help = 'Enter nhlist file.',\n required = True\n )\n parser.add_argument(\n '-o', '-outpath',\n dest = 'out_path',\n help = 'Enter path to dropped seqs file'\n )\n\n return parser.parse_args()\n\n\ndef addSeqs(file1, file2):\n df1 = pd.read_table(file1, sep=\"\\t\", names=['seq_name','dbID'])\n\n df2 = pd.read_table(file2, sep=\"\\t\", skiprows=2, usecols=[0,2], names=['seq_name','dbID'])\n\n\n ddf = pd.concat([df1,df2])\n ddf = ddf.groupby('seq_name')['dbID'].apply(list).map(set).str.join('|')\n ddf = ddf.reset_index()\n print(ddf.head())\n\n return ddf\n\n\ndef removeSeqs(ddf1, file3):\n\n data = ddf1.values.tolist()\n\n nhlist = []\n with open(file3) as f3:\n reader = csv.reader(f3, delimiter='\\t')\n next(reader, None)\n next(reader, None)\n for row in reader:\n # print(row)\n nhlist.append(row)\n\n\n ddf2 = []\n for row in data:\n if row[1] != None:\n rfids = str(row[1]).split(\"|\")\n else:\n rfids = []\n\n\n for seq in nhlist:\n id = seq[2]\n\n if row[0] == seq[0]:\n for rfid in rfids:\n if id == rfid:\n rfids.remove(id)\n\n array = [row[0],\"|\".join(rfids)]\n ddf2.append(array)\n\n return ddf2\n\n\n\n\n\nif __name__==\"__main__\":\n main(sys.argv[1:])\n" ]
[ [ "pandas.read_table", "pandas.concat" ] ]
mendezr/MetPy
[ "0c75c14ac4af360b06ed7c4735b17709caef2449" ]
[ "metpy/io/tests/test_io_tools.py" ]
[ "# Copyright (c) 2016 MetPy Developers.\n# Distributed under the terms of the BSD 3-Clause License.\n# SPDX-License-Identifier: BSD-3-Clause\n\"\"\"Test the `io.tools` module.\"\"\"\n\nimport numpy as np\nimport pytest\n\nfrom metpy.io._tools import hexdump, UnitLinker\nfrom metpy.io.cdm import Dataset\nfrom metpy.testing import assert_array_equal, ignore_deprecation\nfrom metpy.units import units\n\n\[email protected]()\n@ignore_deprecation\ndef test_var():\n \"\"\"Fixture to create a dataset and variable for tests.\"\"\"\n ds = Dataset()\n ds.createDimension('x', 5)\n var = ds.createVariable('data', 'f4', ('x',), 5)\n var[:] = np.arange(5)\n return var\n\n\ndef test_unit_linker(test_var):\n \"\"\"Test that UnitLinker successfully adds units.\"\"\"\n test_var.units = 'meters'\n new_var = UnitLinker(test_var)\n assert_array_equal(new_var[:], np.arange(5) * units.m)\n\n\ndef test_unit_linker_get_units(test_var):\n \"\"\"Test that we can get the units from UnitLinker.\"\"\"\n test_var.units = 'knots'\n new_var = UnitLinker(test_var)\n assert new_var.units == units('knots')\n\n\ndef test_unit_linker_missing(test_var):\n \"\"\"Test that UnitLinker works with missing units.\"\"\"\n new_var = UnitLinker(test_var)\n assert_array_equal(new_var[:], np.arange(5))\n\n\ndef test_unit_linker_bad(test_var):\n \"\"\"Test that UnitLinker ignores bad unit strings.\"\"\"\n test_var.units = 'badunit'\n new_var = UnitLinker(test_var)\n assert_array_equal(new_var[:], np.arange(5))\n\n\ndef test_unit_override(test_var):\n \"\"\"Test that we can override a variable's bad unit string.\"\"\"\n test_var.units = 'C'\n new_var = UnitLinker(test_var)\n new_var.units = 'degC'\n assert_array_equal(new_var[:], np.arange(5) * units.degC)\n\n\ndef test_unit_override_obj(test_var):\n \"\"\"Test that we can override with an object.\"\"\"\n test_var.units = 'C'\n new_var = UnitLinker(test_var)\n new_var.units = units.degC\n assert_array_equal(new_var[:], np.arange(5) * units.degC)\n\n\ndef test_attribute_forwarding(test_var):\n \"\"\"Test that we are properly able to access attributes from the variable.\"\"\"\n test_var.att = 'abc'\n new_var = UnitLinker(test_var)\n assert new_var.att == test_var.att\n\n\ndef test_hexdump():\n \"\"\"Test hexdump tool.\"\"\"\n data = bytearray([77, 101, 116, 80, 121])\n assert hexdump(data, 4, width=8) == '4D657450 79------ 0 0 MetPy'\n" ]
[ [ "numpy.arange" ] ]
mcx/open_spiel
[ "062cbfc07621343e7d77209cb421ba690328142b", "062cbfc07621343e7d77209cb421ba690328142b", "062cbfc07621343e7d77209cb421ba690328142b", "062cbfc07621343e7d77209cb421ba690328142b", "062cbfc07621343e7d77209cb421ba690328142b", "062cbfc07621343e7d77209cb421ba690328142b" ]
[ "open_spiel/python/algorithms/double_oracle_test.py", "open_spiel/python/egt/dynamics_test.py", "open_spiel/python/mfg/algorithms/mirror_descent.py", "open_spiel/python/environments/catch.py", "open_spiel/python/examples/bridge_supervised_learning.py", "open_spiel/python/pytorch/rcfr.py" ]
[ "# Copyright 2019 DeepMind Technologies Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for open_spiel.python.algorithms.double_oracle.\"\"\"\n\nfrom absl.testing import absltest\nimport numpy as np\n\nfrom open_spiel.python.algorithms import double_oracle\nimport pyspiel\n\n\nclass DoubleOracleTest(absltest.TestCase):\n\n def test_rock_paper_scissors(self):\n game = pyspiel.load_matrix_game(\"matrix_rps\")\n solver = double_oracle.DoubleOracleSolver(game)\n solution, iteration, value = solver.solve(initial_strategies=[[0], [0]])\n np.testing.assert_allclose(solution[0], np.ones(3)/3.)\n np.testing.assert_allclose(solution[1], np.ones(3)/3.)\n self.assertEqual(iteration, 3)\n self.assertAlmostEqual(value, 0.0)\n\n def test_single_step(self):\n game = pyspiel.load_matrix_game(\"matrix_rps\")\n solver = double_oracle.DoubleOracleSolver(game)\n solver.subgame_strategies = [[0], [0]]\n best_response, best_response_utility = solver.step()\n self.assertListEqual(best_response, [1, 1])\n self.assertListEqual(best_response_utility, [1.0, 1.0])\n\n def test_kuhn_poker(self):\n game = pyspiel.extensive_to_matrix_game(pyspiel.load_game(\"kuhn_poker\"))\n solver = double_oracle.DoubleOracleSolver(game)\n solution, iteration, value = solver.solve(initial_strategies=[[0], [0]])\n\n # check if solution is Nash\n exp_utilty = solution[0] @ solver.payoffs @ solution[1]\n self.assertAlmostEqual(max(solver.payoffs[0] @ solution[1]), exp_utilty[0])\n self.assertAlmostEqual(max(solution[0] @ solver.payoffs[1]), exp_utilty[1])\n\n self.assertEqual(iteration, 8)\n self.assertAlmostEqual(value, 0.0)\n\n\nif __name__ == \"__main__\":\n absltest.main()\n", "# Copyright 2019 DeepMind Technologies Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for open_spiel.python.egt.dynamics.\"\"\"\n\nimport math\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\n\nimport numpy as np\n\nfrom open_spiel.python.egt import dynamics\nfrom open_spiel.python.egt.utils import game_payoffs_array\nimport pyspiel\n\n\ndef _sum_j_x_j_ln_x_j_over_x_i(x):\n r\"\"\"Computes \\sum_j x_j ln(x_j / x_i).\"\"\"\n # By having a = x.reshape([1, -1]) and b = x.reshape([-1, 1]), we can use\n # broadcasting and have:\n # (a / b)[i, j] = x_j / x_i\n # thus giving:\n # \\sum_j x_j * log(x_j/ x_i) = sum(a * ln (a/b), axis=1)\n\n a = x.reshape([1, -1])\n b = x.reshape([-1, 1])\n\n return np.sum(a * np.log(np.divide(a, b)), axis=1)\n\n\ndef _q_learning_dynamics(composition, payoff, temperature):\n r\"\"\"An equivalent implementation of `dynamics.boltzmannq`.\"\"\"\n return 1 / temperature * dynamics.replicator(composition, payoff) + (\n composition * _sum_j_x_j_ln_x_j_over_x_i(composition))\n\n\nclass _InternalTest(absltest.TestCase):\n\n def test__sum_j_x_j_ln_x_j_over_x_i(self):\n # This tests a sub-function of `_q_learning_dynamics` to ensure its\n # internals are correct.\n x = np.asarray([1., 2., 3.])\n\n # We use 2 different formula to check we have the correct result.\n expected = [sum([x_j * math.log(x_j / x_i) for x_j in x]) for x_i in x]\n\n log = math.log\n expected_0 = 1. * log(1 / 1.) + 2 * log(2 / 1.) + 3 * log(3 / 1.)\n expected_1 = 1. * log(1 / 2.) + 2 * log(2 / 2.) + 3 * log(3 / 2.)\n expected_2 = 1. * log(1 / 3.) + 2 * log(2 / 3.) + 3 * log(3 / 3.)\n\n expected_2 = np.asarray([expected_0, expected_1, expected_2])\n np.testing.assert_array_equal(expected, expected_2)\n\n np.testing.assert_array_equal(expected, _sum_j_x_j_ln_x_j_over_x_i(x))\n\n\nclass DynamicsTest(parameterized.TestCase):\n\n def test_boltzmann_q(self):\n\n x = np.asarray([1 / 2, 1 / 2])\n payoff = np.asarray([[1, 0], [0, 1]], dtype=np.float32)\n temperature = 1\n\n np.testing.assert_array_equal(\n dynamics.boltzmannq(x, payoff, temperature),\n _q_learning_dynamics(x, payoff, temperature))\n\n def test_rd_rps_pure_fixed_points(self):\n game = pyspiel.load_matrix_game('matrix_rps')\n payoff_matrix = game_payoffs_array(game)\n rd = dynamics.replicator\n dyn = dynamics.SinglePopulationDynamics(payoff_matrix, rd)\n x = np.eye(3)\n np.testing.assert_allclose(dyn(x[0]), np.zeros((3,)))\n np.testing.assert_allclose(dyn(x[1]), np.zeros((3,)))\n np.testing.assert_allclose(dyn(x[2]), np.zeros((3,)))\n\n @parameterized.parameters(dynamics.replicator, dynamics.boltzmannq,\n dynamics.qpg)\n def test_dynamics_rps_mixed_fixed_point(self, func):\n game = pyspiel.load_matrix_game('matrix_rps')\n payoff_matrix = game_payoffs_array(game)\n dyn = dynamics.SinglePopulationDynamics(payoff_matrix, func)\n x = np.ones(shape=(3,)) / 3.\n np.testing.assert_allclose(dyn(x), np.zeros((3,)), atol=1e-15)\n\n def test_multi_population_rps(self):\n game = pyspiel.load_matrix_game('matrix_rps')\n payoff_matrix = game_payoffs_array(game)\n rd = dynamics.replicator\n dyn = dynamics.MultiPopulationDynamics(payoff_matrix, [rd] * 2)\n x = np.concatenate([np.ones(k) / float(k) for k in payoff_matrix.shape[1:]])\n np.testing.assert_allclose(dyn(x), np.zeros((6,)), atol=1e-15)\n\n def test_multi_population_three_populations(self):\n payoff_matrix = np.arange(3 * 2 * 3 * 4).reshape(3, 2, 3, 4)\n rd = dynamics.replicator\n dyn = dynamics.MultiPopulationDynamics(payoff_matrix, [rd] * 3)\n x = np.concatenate([np.ones(k) / float(k) for k in payoff_matrix.shape[1:]])\n self.assertEqual(dyn(x).shape, (9,))\n\n def test_multi_population_four_populations(self):\n payoff_matrix = np.zeros((4, 2, 2, 2, 2))\n payoff_matrix[:, 0, 0, 0, 0] = np.ones((4,))\n rd = dynamics.replicator\n dyn = dynamics.MultiPopulationDynamics(payoff_matrix, [rd] * 4)\n x = np.concatenate([np.ones(k) / float(k) for k in payoff_matrix.shape[1:]])\n avg_fitness = 1. / float(2**4) # if all players play uniform random\n dx = dyn(x)\n np.testing.assert_allclose(dx[::2], np.ones((4,)) * avg_fitness / 2.)\n np.testing.assert_allclose(dx[1::2], np.ones((4,)) * (-avg_fitness) / 2.)\n\n def test_time_average(self):\n n, k = 10, 3\n traj = np.ones(shape=(n, k))\n time_avg = dynamics.time_average(traj)\n np.testing.assert_allclose(time_avg, np.ones(shape=(n, k)))\n\n traj[1::2] = -1. * traj[1::2]\n time_avg = dynamics.time_average(traj)\n np.testing.assert_allclose(time_avg[-1], np.zeros(shape=(k,)))\n np.testing.assert_allclose(time_avg[-2],\n 1. / (n - 1.) * np.ones(shape=(k,)))\n\n\nif __name__ == '__main__':\n absltest.main()\n", "# Copyright 2019 DeepMind Technologies Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Mirror Descent (https://arxiv.org/pdf/2103.00623.pdf).\"\"\"\nfrom typing import Optional\n\nimport numpy as np\n\nfrom open_spiel.python import policy as policy_std\nfrom open_spiel.python.mfg import value\nfrom open_spiel.python.mfg.algorithms import distribution\nimport pyspiel\n\n\ndef softmax_projection(logits):\n max_l = max(logits)\n exp_l = [np.exp(l - max_l) for l in logits]\n norm_exp = sum(exp_l)\n return [l / norm_exp for l in exp_l]\n\n\nclass ProjectedPolicy(policy_std.Policy):\n \"\"\"Project values on the policy simplex.\"\"\"\n\n def __init__(self, game, player_ids,\n cumulative_state_value: value.ValueFunction):\n \"\"\"Initializes the projected policy.\n\n Args:\n game: The game to analyze.\n player_ids: list of player ids for which this policy applies; each should\n be in the range 0..game.num_players()-1.\n cumulative_state_value: The cumulative state value to project.\n \"\"\"\n super(ProjectedPolicy, self).__init__(game, player_ids)\n self._cumulative_state_value = cumulative_state_value\n\n def cumulative_value(self, state, action=None):\n if action is None:\n return self._cumulative_state_value(\n state.observation_string(pyspiel.PlayerId.DEFAULT_PLAYER_ID))\n else:\n new_state = state.child(action)\n return state.rewards()[0] + self._cumulative_state_value(\n new_state.observation_string(pyspiel.PlayerId.DEFAULT_PLAYER_ID))\n\n def action_probabilities(self, state, player_id=None):\n action_logit = [(a, self.cumulative_value(state, action=a))\n for a in state.legal_actions()]\n action, logit = zip(*action_logit)\n prob = softmax_projection(logit)\n action_prob = zip(action, prob)\n return dict(action_prob)\n\n\nclass MirrorDescent(object):\n \"\"\"The mirror descent algorithm.\"\"\"\n\n def __init__(self,\n game,\n state_value: Optional[value.ValueFunction] = None,\n lr=0.01,\n root_state=None):\n \"\"\"Initializes mirror descent.\n\n Args:\n game: The game,\n state_value: A state value function. Default to TabularValueFunction.\n lr: The learning rate of mirror descent,\n root_state: The state of the game at which to start. If `None`, the game\n root state is used.\n \"\"\"\n self._game = game\n if root_state is None:\n self._root_states = game.new_initial_states()\n else:\n self._root_states = [root_state]\n self._policy = policy_std.UniformRandomPolicy(game)\n self._distribution = distribution.DistributionPolicy(game, self._policy)\n self._md_step = 0\n self._lr = lr\n\n self._state_value = (\n state_value if state_value else value.TabularValueFunction(game))\n self._cumulative_state_value = value.TabularValueFunction(game)\n\n def eval_state(self, state, learning_rate):\n \"\"\"Evaluate the value of a state and update the cumulative sum.\"\"\"\n state_str = state.observation_string(pyspiel.PlayerId.DEFAULT_PLAYER_ID)\n if self._state_value.has(state_str):\n return self._state_value(state_str)\n elif state.is_terminal():\n self._state_value.set_value(\n state_str,\n state.rewards()[state.mean_field_population()])\n self._cumulative_state_value.add_value(\n state_str, learning_rate * self._state_value(state_str))\n return self._state_value(state_str)\n elif state.current_player() == pyspiel.PlayerId.CHANCE:\n self._state_value.set_value(state_str, 0.0)\n for action, prob in state.chance_outcomes():\n new_state = state.child(action)\n self._state_value.add_value(\n state_str, prob * self.eval_state(new_state, learning_rate))\n self._cumulative_state_value.add_value(\n state_str, learning_rate * self._state_value(state_str))\n return self._state_value(state_str)\n elif state.current_player() == pyspiel.PlayerId.MEAN_FIELD:\n dist_to_register = state.distribution_support()\n dist = [\n self._distribution.value_str(str_state, 0.0)\n for str_state in dist_to_register\n ]\n new_state = state.clone()\n new_state.update_distribution(dist)\n self._state_value.set_value(\n state_str,\n state.rewards()[state.mean_field_population()] +\n self.eval_state(new_state, learning_rate))\n self._cumulative_state_value.add_value(\n state_str, learning_rate * self._state_value(state_str))\n return self._state_value(state_str)\n else:\n assert int(state.current_player()) >= 0, \"The player id should be >= 0\"\n v = 0.0\n for action, prob in self._policy.action_probabilities(state).items():\n new_state = state.child(action)\n v += prob * self.eval_state(new_state, learning_rate)\n self._state_value.set_value(\n state_str,\n state.rewards()[state.mean_field_population()] + v)\n self._cumulative_state_value.add_value(\n state_str, learning_rate * self._state_value(state_str))\n return self._state_value(state_str)\n\n def iteration(self, learning_rate=None):\n \"\"\"an iteration of Mirror Descent.\"\"\"\n self._md_step += 1\n # TODO(sertan): Fix me.\n self._state_value = value.TabularValueFunction(self._game)\n for state in self._root_states:\n self.eval_state(state, learning_rate if learning_rate else self._lr)\n self._policy = ProjectedPolicy(self._game,\n list(range(self._game.num_players())),\n self._cumulative_state_value)\n self._distribution = distribution.DistributionPolicy(\n self._game, self._policy)\n\n def get_policy(self):\n return self._policy\n\n @property\n def distribution(self) -> distribution.DistributionPolicy:\n return self._distribution\n", "# Copyright 2019 DeepMind Technologies Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Catch reinforcement learning environment.\"\"\"\n\nimport collections\nimport numpy as np\n\nfrom open_spiel.python import rl_environment\n\n# Actions\nNOOP = 0\nLEFT = 1\nRIGHT = 2\n\n_Point = collections.namedtuple(\"Point\", [\"x\", \"y\"])\n\n\nclass Environment(object):\n \"\"\"A catch reinforcement learning environment.\n\n The implementation considers illegal actions: trying to move the paddle in the\n wall direction when next to a wall will incur in an invalid action and an\n error will be purposely raised.\n \"\"\"\n\n def __init__(self, discount=1.0, width=5, height=10, seed=None):\n self._rng = np.random.RandomState(seed)\n self._width = width\n self._height = height\n self._should_reset = True\n self._num_actions = 3\n\n # Discount returned at non-initial steps.\n self._discounts = [discount] * self.num_players\n\n def reset(self):\n \"\"\"Resets the environment.\"\"\"\n self._should_reset = False\n self._ball_pos = _Point(x=self._rng.randint(0, self._width - 1), y=0)\n self._paddle_pos = _Point(\n x=self._rng.randint(0, self._width - 1), y=self._height - 1)\n\n legal_actions = [NOOP]\n if self._paddle_pos.x > 0:\n legal_actions.append(LEFT)\n if self._paddle_pos.x < self._width - 1:\n legal_actions.append(RIGHT)\n\n observations = {\n \"info_state\": [self._get_observation()],\n \"legal_actions\": [legal_actions],\n \"current_player\": 0,\n }\n\n return rl_environment.TimeStep(\n observations=observations,\n rewards=None,\n discounts=None,\n step_type=rl_environment.StepType.FIRST)\n\n def step(self, actions):\n \"\"\"Updates the environment according to `actions` and returns a `TimeStep`.\n\n Args:\n actions: A singleton list with an integer, or an integer, representing the\n action the agent took.\n\n Returns:\n A `rl_environment.TimeStep` namedtuple containing:\n observation: singleton list of dicts containing player observations,\n each corresponding to `observation_spec()`.\n reward: singleton list containing the reward at this timestep, or None\n if step_type is `rl_environment.StepType.FIRST`.\n discount: singleton list containing the discount in the range [0, 1], or\n None if step_type is `rl_environment.StepType.FIRST`.\n step_type: A `rl_environment.StepType` value.\n \"\"\"\n if self._should_reset:\n return self.reset()\n\n if isinstance(actions, list):\n action = actions[0]\n elif isinstance(actions, int):\n action = actions\n else:\n raise ValueError(\"Action not supported.\", actions)\n\n # Update paddle position\n x, y = self._paddle_pos.x, self._paddle_pos.y\n if action == LEFT:\n x -= 1\n elif action == RIGHT:\n x += 1\n elif action != NOOP:\n raise ValueError(\"unrecognized action \", action)\n\n assert 0 <= x < self._width, (\n \"Illegal action detected ({}), new state: ({},{})\".format(action, x, y))\n self._paddle_pos = _Point(x, y)\n\n # Update ball position\n x, y = self._ball_pos.x, self._ball_pos.y\n if y == self._height - 1:\n done = True\n reward = 1.0 if x == self._paddle_pos.x else -1.0\n else:\n done = False\n y += 1\n reward = 0.0\n self._ball_pos = _Point(x, y)\n\n # Return observation\n step_type = (\n rl_environment.StepType.LAST if done else rl_environment.StepType.MID)\n self._should_reset = step_type == rl_environment.StepType.LAST\n\n legal_actions = [NOOP]\n if self._paddle_pos.x > 0:\n legal_actions.append(LEFT)\n if self._paddle_pos.x < self._width - 1:\n legal_actions.append(RIGHT)\n\n observations = {\n \"info_state\": [self._get_observation()],\n \"legal_actions\": [legal_actions],\n \"current_player\": 0,\n }\n\n return rl_environment.TimeStep(\n observations=observations,\n rewards=[reward],\n discounts=self._discounts,\n step_type=step_type)\n\n def _get_observation(self):\n board = np.zeros((self._height, self._width), dtype=np.float32)\n board[self._ball_pos.y, self._ball_pos.x] = 1.0\n board[self._paddle_pos.y, self._paddle_pos.x] = 1.0\n return board.flatten()\n\n def observation_spec(self):\n \"\"\"Defines the observation provided by the environment.\n\n Each dict member will contain its expected structure and shape.\n\n Returns:\n A specification dict describing the observation fields and shapes.\n \"\"\"\n return dict(\n info_state=tuple([self._height * self._width]),\n legal_actions=(self._num_actions,),\n current_player=(),\n )\n\n def action_spec(self):\n \"\"\"Defines action specifications.\n\n Specifications include action boundaries and their data type.\n\n Returns:\n A specification dict containing action properties.\n \"\"\"\n return dict(num_actions=self._num_actions, min=0, max=2, dtype=int)\n\n @property\n def num_players(self):\n return 1\n\n @property\n def is_turn_based(self):\n return False\n", "# Copyright 2019 DeepMind Technologies Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Train a policy net on bridge bidding based on a dataset of trajectories.\n\nSuitable data for training, generated by WBridge5, may be downloaded from:\nhttps://console.cloud.google.com/storage/browser/openspiel-data/bridge\n\"\"\"\n\nimport os\nimport pickle\nfrom typing import Any, Tuple\n\nfrom absl import app\nfrom absl import flags\n\nimport haiku as hk\nimport jax\nfrom jax import numpy as jnp\nimport numpy as np\nimport optax\n\nimport pyspiel\n\nOptState = Any\nParams = Any\n\nFLAGS = flags.FLAGS\nGAME = pyspiel.load_game('bridge(use_double_dummy_result=false)')\nNUM_ACTIONS = 38\nMIN_ACTION = 52\nNUM_CARDS = 52\nNUM_PLAYERS = 4\nTOP_K_ACTIONS = 5 # How many alternative actions to display\n\nflags.DEFINE_integer('iterations', 100000, 'Number of iterations')\nflags.DEFINE_string('data_path', None, 'Location for data')\nflags.DEFINE_integer('eval_every', 10000, 'How often to evaluate the policy')\nflags.DEFINE_integer('num_examples', 3,\n 'How many examples to print per evaluation')\nflags.DEFINE_integer('train_batch', 128, 'Batch size for training step')\nflags.DEFINE_integer('eval_batch', 10000, 'Batch size when evaluating')\nflags.DEFINE_integer('rng_seed', 42, 'Seed for initial network weights')\nflags.DEFINE_string('save_path', None, 'Location for saved networks')\n\n\ndef _no_play_trajectory(line: str):\n \"\"\"Returns the deal and bidding actions only given a text trajectory.\"\"\"\n actions = [int(x) for x in line.split(' ')]\n # Usually a trajectory is NUM_CARDS chance events for the deal, plus one\n # action for every bid of the auction, plus NUM_CARDS actions for the play\n # phase. Exceptionally, if all NUM_PLAYERS players Pass, there is no play\n # phase and the trajectory is just of length NUM_CARDS + NUM_PLAYERS.\n if len(actions) == NUM_CARDS + NUM_PLAYERS:\n return tuple(actions)\n else:\n return tuple(actions[:-NUM_CARDS])\n\n\ndef make_dataset(file: str):\n \"\"\"Creates dataset as a generator of single examples.\"\"\"\n all_trajectories = [_no_play_trajectory(line) for line in open(file)]\n while True:\n np.random.shuffle(all_trajectories)\n for trajectory in all_trajectories:\n action_index = np.random.randint(52, len(trajectory))\n state = GAME.new_initial_state()\n for action in trajectory[:action_index]:\n state.apply_action(action)\n yield (state.observation_tensor(), trajectory[action_index] - MIN_ACTION)\n\n\ndef batch(dataset, batch_size: int):\n \"\"\"Creates a batched dataset from a one-at-a-time dataset.\"\"\"\n observations = np.zeros([batch_size] + GAME.observation_tensor_shape(),\n np.float32)\n labels = np.zeros(batch_size, dtype=np.int32)\n while True:\n for batch_index in range(batch_size):\n observations[batch_index], labels[batch_index] = next(dataset)\n yield observations, labels\n\n\ndef one_hot(x, k):\n \"\"\"Returns a one-hot encoding of `x` of size `k`.\"\"\"\n return jnp.array(x[..., jnp.newaxis] == jnp.arange(k), dtype=np.float32)\n\n\ndef net_fn(x):\n \"\"\"Haiku module for our network.\"\"\"\n net = hk.Sequential([\n hk.Linear(1024),\n jax.nn.relu,\n hk.Linear(1024),\n jax.nn.relu,\n hk.Linear(1024),\n jax.nn.relu,\n hk.Linear(1024),\n jax.nn.relu,\n hk.Linear(NUM_ACTIONS),\n jax.nn.log_softmax,\n ])\n return net(x)\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n\n # Make the network.\n net = hk.without_apply_rng(hk.transform(net_fn))\n\n # Make the optimiser.\n opt = optax.adam(1e-4)\n\n @jax.jit\n def loss(\n params: Params,\n inputs: np.ndarray,\n targets: np.ndarray,\n ) -> jnp.DeviceArray:\n \"\"\"Cross-entropy loss.\"\"\"\n assert targets.dtype == np.int32\n log_probs = net.apply(params, inputs)\n return -jnp.mean(one_hot(targets, NUM_ACTIONS) * log_probs)\n\n @jax.jit\n def accuracy(\n params: Params,\n inputs: np.ndarray,\n targets: np.ndarray,\n ) -> jnp.DeviceArray:\n \"\"\"Classification accuracy.\"\"\"\n predictions = net.apply(params, inputs)\n return jnp.mean(jnp.argmax(predictions, axis=-1) == targets)\n\n @jax.jit\n def update(\n params: Params,\n opt_state: OptState,\n inputs: np.ndarray,\n targets: np.ndarray,\n ) -> Tuple[Params, OptState]:\n \"\"\"Learning rule (stochastic gradient descent).\"\"\"\n _, gradient = jax.value_and_grad(loss)(params, inputs, targets)\n updates, opt_state = opt.update(gradient, opt_state)\n new_params = optax.apply_updates(params, updates)\n return new_params, opt_state\n\n def output_samples(params: Params, max_samples: int):\n \"\"\"Output some cases where the policy disagrees with the dataset action.\"\"\"\n if max_samples == 0:\n return\n count = 0\n with open(os.path.join(FLAGS.data_path, 'test.txt')) as f:\n lines = list(f)\n np.random.shuffle(lines)\n for line in lines:\n state = GAME.new_initial_state()\n actions = _no_play_trajectory(line)\n for action in actions:\n if not state.is_chance_node():\n observation = np.array(state.observation_tensor(), np.float32)\n policy = np.exp(net.apply(params, observation))\n probs_actions = [(p, a + MIN_ACTION) for a, p in enumerate(policy)]\n pred = max(probs_actions)[1]\n if pred != action:\n print(state)\n for p, a in reversed(sorted(probs_actions)[-TOP_K_ACTIONS:]):\n print('{:7} {:.2f}'.format(state.action_to_string(a), p))\n print('Ground truth {}\\n'.format(state.action_to_string(action)))\n count += 1\n break\n state.apply_action(action)\n if count >= max_samples:\n return\n\n # Make datasets.\n if FLAGS.data_path is None:\n raise app.UsageError(\n 'Please generate your own supervised training data or download from '\n 'https://console.cloud.google.com/storage/browser/openspiel-data/bridge'\n ' and supply the local location as --data_path')\n train = batch(\n make_dataset(os.path.join(FLAGS.data_path, 'train.txt')),\n FLAGS.train_batch)\n test = batch(\n make_dataset(os.path.join(FLAGS.data_path, 'test.txt')), FLAGS.eval_batch)\n\n # Initialize network and optimiser.\n rng = jax.random.PRNGKey(FLAGS.rng_seed) # seed used for network weights\n inputs, unused_targets = next(train)\n params = net.init(rng, inputs)\n opt_state = opt.init(params)\n\n # Train/eval loop.\n for step in range(FLAGS.iterations):\n # Do SGD on a batch of training examples.\n inputs, targets = next(train)\n params, opt_state = update(params, opt_state, inputs, targets)\n\n # Periodically evaluate classification accuracy on the test set.\n if (1 + step) % FLAGS.eval_every == 0:\n inputs, targets = next(test)\n test_accuracy = accuracy(params, inputs, targets)\n print(f'After {1+step} steps, test accuracy: {test_accuracy}.')\n if FLAGS.save_path:\n filename = os.path.join(FLAGS.save_path, f'params-{1 + step}.pkl')\n with open(filename, 'wb') as pkl_file:\n pickle.dump(params, pkl_file)\n output_samples(params, FLAGS.num_examples)\n\n\nif __name__ == '__main__':\n app.run(main)\n", "# Copyright 2019 DeepMind Technologies Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Regression counterfactual regret minimization (RCFR) [Waugh et al., 2015; Morrill, 2016].\n\nIn contrast to (tabular) counterfactual regret minimization (CFR)\n[Zinkevich et al., 2007], RCFR replaces the table of regrets that generate the\ncurrent policy profile with a profile of regression models. The average\npolicy is still tracked exactly with a full game-size table. The exploitability\nof the average policy in zero-sum games decreases as the model accuracy and\nthe number of iterations increase [Waugh et al., 2015; Morrill, 2016]. As long\nas the regression model errors decrease across iterations, the average policy\nconverges toward a Nash equilibrium in zero-sum games.\n\n# References\n\nDustin Morrill. Using Regret Estimation to Solve Games Compactly.\n M.Sc. thesis, Computing Science Department, University of Alberta,\n Apr 1, 2016, Edmonton Alberta, Canada.\nKevin Waugh, Dustin Morrill, J. Andrew Bagnell, and Michael Bowling.\n Solving Games with Functional Regret Estimation. At the Twenty-Ninth AAAI\n Conference on Artificial Intelligence, January 25-29, 2015, Austin Texas,\n USA. Pages 2138-2145.\nMartin Zinkevich, Michael Johanson, Michael Bowling, and Carmelo Piccione.\n Regret Minimization in Games with Incomplete Information.\n At Advances in Neural Information Processing Systems 20 (NeurIPS). 2007.\n\"\"\"\n\nimport numpy as np\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\n\ndef tensor_to_matrix(tensor):\n \"\"\"Converts `tensor` to a matrix (a rank-2 tensor) or raises an exception.\n\n Args:\n tensor: The tensor to convert.\n\n Returns:\n A PyTorch matrix (rank-2 `torch.Tensor`).\n\n Raises:\n ValueError: If `tensor` cannot be trivially converted to a matrix, i.e.\n `tensor` has a rank > 2.\n \"\"\"\n tensor = torch.Tensor(tensor)\n rank = tensor.ndim\n # rank = len(list(tensor.shape))\n if rank > 2:\n raise ValueError(\n (\"Tensor {} cannot be converted into a matrix as it is rank \"\n \"{} > 2.\").format(tensor, rank))\n elif rank < 2:\n num_columns = 1 if rank == 0 else tensor.shape[0]\n tensor = torch.reshape(tensor, [1, num_columns])\n return tensor\n\n\ndef with_one_hot_action_features(state_features, legal_actions,\n num_distinct_actions):\n \"\"\"Constructs features for each sequence by extending state features.\n\n Sequences features are constructed by concatenating one-hot features\n indicating each action to the information state features and stacking them.\n\n Args:\n state_features: The features for the information state alone. Must be a\n `torch.Tensor` with a rank less than or equal to (if batched) 2.\n legal_actions: The list of legal actions in this state. Determines the\n number of rows in the returned feature matrix.\n num_distinct_actions: The number of globally distinct actions in the game.\n Determines the length of the action feature vector concatenated onto the\n state features.\n\n Returns:\n A `torch.Tensor` feature matrix with one row for each sequence and # state\n features plus `num_distinct_actions`-columns.\n\n Raises:\n ValueError: If `state_features` has a rank > 2.\n \"\"\"\n state_features = tensor_to_matrix(state_features)\n with_action_features = []\n for action in legal_actions:\n action_features = F.one_hot(\n torch.tensor([action]), num_classes=num_distinct_actions)\n all_features = torch.cat([state_features, action_features], axis=1)\n with_action_features.append(all_features)\n return torch.cat(with_action_features, axis=0)\n\n\ndef sequence_features(state, num_distinct_actions):\n \"\"\"The sequence features at `state`.\n\n Features are constructed by concatenating `state`'s normalized feature\n vector with one-hot vectors indicating each action (see\n `with_one_hot_action_features`).\n\n Args:\n state: An OpenSpiel `State`.\n num_distinct_actions: The number of globally distinct actions in `state`'s\n game.\n\n Returns:\n A `torch.Tensor` feature matrix with one row for each sequence.\n \"\"\"\n return with_one_hot_action_features(state.information_state_tensor(),\n state.legal_actions(),\n num_distinct_actions)\n\n\ndef num_features(game):\n \"\"\"Returns the number of features returned by `sequence_features`.\n\n Args:\n game: An OpenSpiel `Game`.\n \"\"\"\n return game.information_state_tensor_size() + game.num_distinct_actions()\n\n\nclass RootStateWrapper(object):\n \"\"\"Analyzes the subgame at a given root state.\n\n It enumerates features for each player sequence, creates a mapping between\n information states to sequence index offsets, and caches terminal values\n in a dictionary with history string keys.\n\n Properties:\n root: An OpenSpiel `State`.\n sequence_features: A `list` of sequence feature matrices, one for each\n player. This list uses depth-first, information state-major ordering, so\n sequences are grouped by information state. I.e. the first legal action\n in the first state has index 0, the second action in the same information\n state has index 1, the third action will have index 3, and so on.\n Sequences in the next information state descendant of the first action\n will begin indexing its sequences at the number of legal actions in the\n ancestor information state.\n num_player_sequences: The number of sequences for each player.\n info_state_to_sequence_idx: A `dict` mapping each information state string\n to the `sequence_features` index of the first sequence in the\n corresponding information state.\n terminal_values: A `dict` mapping history strings to terminal values for\n each player.\n \"\"\"\n\n def __init__(self, state):\n self.root = state\n self._num_distinct_actions = len(state.legal_actions_mask(0))\n\n self.sequence_features = [[] for _ in range(state.num_players())]\n self.num_player_sequences = [0] * state.num_players()\n self.info_state_to_sequence_idx = {}\n self.terminal_values = {}\n self._walk_descendants(state)\n self.sequence_features = [\n torch.cat(rows, axis=0) for rows in self.sequence_features\n ]\n\n def _walk_descendants(self, state):\n \"\"\"Records information about `state` and its descendants.\"\"\"\n if state.is_terminal():\n self.terminal_values[state.history_str()] = np.array(state.returns())\n return\n\n elif state.is_chance_node():\n for action, _ in state.chance_outcomes():\n self._walk_descendants(state.child(action))\n return\n\n player = state.current_player()\n info_state = state.information_state_string(player)\n actions = state.legal_actions()\n\n if info_state not in self.info_state_to_sequence_idx:\n n = self.num_player_sequences[player]\n self.info_state_to_sequence_idx[info_state] = n\n self.sequence_features[player].append(\n sequence_features(state, self._num_distinct_actions))\n self.num_player_sequences[player] += len(actions)\n\n for action in actions:\n self._walk_descendants(state.child(action))\n\n def sequence_weights_to_policy(self, sequence_weights, state):\n \"\"\"Returns a behavioral policy at `state` from sequence weights.\n\n Args:\n sequence_weights: An array of non-negative weights, one for each of\n `state.current_player()`'s sequences in `state`'s game.\n state: An OpenSpiel `State` that represents an information state in an\n alternating-move game.\n\n Returns:\n A `np.array<double>` probability distribution representing the policy in\n `state` encoded by `sequence_weights`. Weights corresponding to actions\n in `state` are normalized by their sum.\n\n Raises:\n ValueError: If there are too few sequence weights at `state`.\n \"\"\"\n info_state = state.information_state_string()\n sequence_offset = self.info_state_to_sequence_idx[info_state]\n actions = state.legal_actions()\n\n sequence_idx_end = sequence_offset + len(actions)\n weights = sequence_weights[sequence_offset:sequence_idx_end]\n\n if len(weights) < len(actions):\n raise ValueError(\n (\"Invalid policy: Policy {player} at sequence offset \"\n \"{sequence_offset} has only {policy_len} elements but there \"\n \"are {num_actions} legal actions.\").format(\n player=state.current_player(),\n sequence_offset=sequence_offset,\n policy_len=len(weights),\n num_actions=len(actions)))\n return normalized_by_sum(weights)\n\n def sequence_weights_to_policy_fn(self, player_sequence_weights):\n \"\"\"Returns a policy function based on sequence weights for each player.\n\n Args:\n player_sequence_weights: A list of weight arrays, one for each player.\n Each array should have a weight for each of that player's sequences in\n `state`'s game.\n\n Returns:\n A `State` -> `np.array<double>` function. The output of this function is\n a probability distribution that represents the policy at the given\n `State` encoded by `player_sequence_weights` according to\n `sequence_weights_to_policy`.\n \"\"\"\n\n def policy_fn(state):\n player = state.current_player()\n return self.sequence_weights_to_policy(player_sequence_weights[player],\n state)\n\n return policy_fn\n\n def sequence_weights_to_tabular_profile(self, player_sequence_weights):\n \"\"\"Returns the tabular profile-form of `player_sequence_weights`.\"\"\"\n return sequence_weights_to_tabular_profile(\n self.root, self.sequence_weights_to_policy_fn(player_sequence_weights))\n\n def counterfactual_regrets_and_reach_weights(self, regret_player,\n reach_weight_player,\n *sequence_weights):\n \"\"\"Returns counterfactual regrets and reach weights as a tuple.\n\n Args:\n regret_player: The player for whom counterfactual regrets are computed.\n reach_weight_player: The player for whom reach weights are computed.\n *sequence_weights: A list of non-negative sequence weights for each player\n determining the policy profile. Behavioral policies are generated by\n normalizing sequence weights corresponding to actions in each\n information state by their sum.\n\n Returns:\n The counterfactual regrets and reach weights as an `np.array`-`np.array`\n tuple.\n\n Raises:\n ValueError: If there are too few sequence weights at any information state\n for any player.\n \"\"\"\n num_players = len(sequence_weights)\n regrets = np.zeros(self.num_player_sequences[regret_player])\n reach_weights = np.zeros(self.num_player_sequences[reach_weight_player])\n\n def _walk_descendants(state, reach_probabilities, chance_reach_probability):\n \"\"\"Compute `state`'s counterfactual regrets and reach weights.\n\n Args:\n state: An OpenSpiel `State`.\n reach_probabilities: The probability that each player plays to reach\n `state`'s history.\n chance_reach_probability: The probability that all chance outcomes in\n `state`'s history occur.\n\n Returns:\n The counterfactual value of `state`'s history.\n Raises:\n ValueError if there are too few sequence weights at any information\n state for any player.\n \"\"\"\n\n if state.is_terminal():\n player_reach = (\n np.prod(reach_probabilities[:regret_player]) *\n np.prod(reach_probabilities[regret_player + 1:]))\n\n counterfactual_reach_prob = player_reach * chance_reach_probability\n u = self.terminal_values[state.history_str()]\n return u[regret_player] * counterfactual_reach_prob\n\n elif state.is_chance_node():\n v = 0.0\n for action, action_prob in state.chance_outcomes():\n v += _walk_descendants(\n state.child(action), reach_probabilities,\n chance_reach_probability * action_prob)\n return v\n\n player = state.current_player()\n info_state = state.information_state_string(player)\n sequence_idx_offset = self.info_state_to_sequence_idx[info_state]\n actions = state.legal_actions(player)\n\n sequence_idx_end = sequence_idx_offset + len(actions)\n my_sequence_weights = sequence_weights[player][\n sequence_idx_offset:sequence_idx_end]\n\n if len(my_sequence_weights) < len(actions):\n raise ValueError(\n (\"Invalid policy: Policy {player} at sequence offset \"\n \"{sequence_idx_offset} has only {policy_len} elements but there \"\n \"are {num_actions} legal actions.\").format(\n player=player,\n sequence_idx_offset=sequence_idx_offset,\n policy_len=len(my_sequence_weights),\n num_actions=len(actions)))\n\n policy = normalized_by_sum(my_sequence_weights)\n action_values = np.zeros(len(actions))\n state_value = 0.0\n\n is_reach_weight_player_node = player == reach_weight_player\n is_regret_player_node = player == regret_player\n\n reach_prob = reach_probabilities[player]\n for action_idx, action in enumerate(actions):\n action_prob = policy[action_idx]\n next_reach_prob = reach_prob * action_prob\n\n if is_reach_weight_player_node:\n reach_weight_player_plays_down_this_line = next_reach_prob > 0\n if not reach_weight_player_plays_down_this_line:\n continue\n sequence_idx = sequence_idx_offset + action_idx\n reach_weights[sequence_idx] += next_reach_prob\n\n reach_probabilities[player] = next_reach_prob\n\n action_value = _walk_descendants(\n state.child(action), reach_probabilities, chance_reach_probability)\n\n if is_regret_player_node:\n state_value = state_value + action_prob * action_value\n else:\n state_value = state_value + action_value\n action_values[action_idx] = action_value\n\n reach_probabilities[player] = reach_prob\n\n if is_regret_player_node:\n regrets[sequence_idx_offset:sequence_idx_end] += (\n action_values - state_value)\n return state_value\n\n # End of _walk_descendants\n\n _walk_descendants(self.root, np.ones(num_players), 1.0)\n return regrets, reach_weights\n\n\ndef normalized_by_sum(v, axis=0, mutate=False):\n \"\"\"Divides each element of `v` along `axis` by the sum of `v` along `axis`.\n\n Assumes `v` is non-negative. Sets of `v` elements along `axis` that sum to\n zero are normalized to `1 / v.shape[axis]` (a uniform distribution).\n\n Args:\n v: Non-negative array of values.\n axis: An integer axis.\n mutate: Whether or not to store the result in `v`.\n\n Returns:\n The normalized array.\n \"\"\"\n v = np.asarray(v)\n denominator = v.sum(axis=axis, keepdims=True)\n denominator_is_zero = denominator == 0\n\n # Every element of `denominator_is_zero` that is true corresponds to a\n # set of elements in `v` along `axis` that are all zero. By setting these\n # denominators to `v.shape[axis]` and adding 1 to each of the corresponding\n # elements in `v`, these elements are normalized to `1 / v.shape[axis]`\n # (a uniform distribution).\n denominator += v.shape[axis] * denominator_is_zero\n if mutate:\n v += denominator_is_zero\n v /= denominator\n else:\n v = (v + denominator_is_zero) / denominator\n return v\n\n\ndef relu(v):\n \"\"\"Returns the element-wise maximum between `v` and 0.\"\"\"\n return np.maximum(v, 0)\n\n\ndef _descendant_states(state, depth_limit, depth, include_terminals,\n include_chance_states):\n \"\"\"Recursive descendant state generator.\n\n Decision states are always yielded.\n\n Args:\n state: The current state.\n depth_limit: The descendant depth limit. Zero will ensure only\n `initial_state` is generated and negative numbers specify the absence of a\n limit.\n depth: The current descendant depth.\n include_terminals: Whether or not to include terminal states.\n include_chance_states: Whether or not to include chance states.\n\n Yields:\n `State`, a state that is `initial_state` or one of its descendants.\n \"\"\"\n if state.is_terminal():\n if include_terminals:\n yield state\n return\n\n if depth > depth_limit >= 0:\n return\n\n if not state.is_chance_node() or include_chance_states:\n yield state\n\n for action in state.legal_actions():\n state_for_search = state.child(action)\n for substate in _descendant_states(state_for_search, depth_limit, depth + 1,\n include_terminals,\n include_chance_states):\n yield substate\n\n\ndef all_states(initial_state,\n depth_limit=-1,\n include_terminals=False,\n include_chance_states=False):\n \"\"\"Generates states from `initial_state`.\n\n Generates the set of states that includes only the `initial_state` and its\n descendants that satisfy the inclusion criteria specified by the remaining\n parameters. Decision states are always included.\n\n Args:\n initial_state: The initial state from which to generate states.\n depth_limit: The descendant depth limit. Zero will ensure only\n `initial_state` is generated and negative numbers specify the absence of a\n limit. Defaults to no limit.\n include_terminals: Whether or not to include terminal states. Defaults to\n `False`.\n include_chance_states: Whether or not to include chance states. Defaults to\n `False`.\n\n Returns:\n A generator that yields the `initial_state` and its descendants that\n satisfy the inclusion criteria specified by the remaining parameters.\n \"\"\"\n return _descendant_states(\n state=initial_state,\n depth_limit=depth_limit,\n depth=0,\n include_terminals=include_terminals,\n include_chance_states=include_chance_states)\n\n\ndef sequence_weights_to_tabular_profile(root, policy_fn):\n \"\"\"Returns the `dict` of `list`s of action-prob pairs-form of `policy_fn`.\"\"\"\n tabular_policy = {}\n players = list(range(root.num_players()))\n for state in all_states(root):\n for player in players:\n legal_actions = state.legal_actions(player)\n if len(legal_actions) < 1:\n continue\n info_state = state.information_state_string(player)\n if info_state in tabular_policy:\n continue\n my_policy = policy_fn(state)\n tabular_policy[info_state] = list(zip(legal_actions, my_policy))\n return tabular_policy\n\n\ndef feedforward_evaluate(layers,\n x,\n use_skip_connections=False,\n hidden_are_factored=False,\n hidden_activation=nn.ReLU):\n \"\"\"Evaluates `layers` as a feedforward neural network on `x`.\n\n Args:\n layers: The neural network layers (`torch.Tensor` -> `torch.Tensor`\n callables).\n x: The array-like input to evaluate. Must be trivially convertible to a\n matrix (tensor rank <= 2).\n use_skip_connections: Whether or not to use skip connections between layers.\n If the layer input has too few features to be added to the layer output,\n then the end of input is padded with zeros. If it has too many features,\n then the input is truncated.\n hidden_are_factored: Whether or not hidden logical layers are factored into\n two separate linear transformations stored as adjacent elements of\n `layers`.\n hidden_activation: the activation function following the hidden layers.\n\n Returns:\n The `torch.Tensor` evaluation result.\n\n Raises:\n ValueError: If `x` has a rank greater than 2.\n \"\"\"\n x = tensor_to_matrix(x)\n i = 0\n while i < len(layers) - 1:\n if isinstance(layers[i], hidden_activation):\n x = layers[i](x)\n i += 1\n continue\n y = layers[i](x)\n i += 1\n if hidden_are_factored:\n y = layers[i](y)\n i += 1\n if use_skip_connections:\n my_num_features = x.shape[1]\n padding = y.shape[1] - my_num_features\n if padding > 0:\n zeros = torch.zeros([x.shape[0], padding])\n x = torch.cat([x, zeros], axis=1)\n elif padding < 0:\n x = x[0:x.shape[0], 0:y.shape[1]]\n y = x + y\n x = y\n return layers[-1](x)\n\n\nclass DeepRcfrModel(nn.Module):\n \"\"\"A flexible deep feedforward RCFR model class.\n\n Properties:\n layers: The `torch.keras.Layer` layers describing this model.\n \"\"\"\n\n def __init__(self,\n game,\n num_hidden_units,\n num_hidden_layers=1,\n num_hidden_factors=0,\n hidden_activation=nn.ReLU,\n use_skip_connections=False,\n regularizer=None):\n \"\"\"Creates a new `DeepRcfrModel.\n\n Args:\n game: The OpenSpiel game being solved.\n num_hidden_units: The number of units in each hidden layer.\n num_hidden_layers: The number of hidden layers. Defaults to 1.\n num_hidden_factors: The number of hidden factors or the matrix rank of the\n layer. If greater than zero, hidden layers will be split into two\n separate linear transformations, the first with\n `num_hidden_factors`-columns and the second with\n `num_hidden_units`-columns. The result is that the logical hidden layer\n is a rank-`num_hidden_units` matrix instead of a rank-`num_hidden_units`\n matrix. When `num_hidden_units < num_hidden_units`, this is effectively\n implements weight sharing. Defaults to 0.\n hidden_activation: The activation function to apply over hidden layers.\n Defaults to `torch.nn.ReLU`.\n use_skip_connections: Whether or not to apply skip connections (layer\n output = layer(x) + x) on hidden layers. Zero padding or truncation is\n used to match the number of columns on layer inputs and outputs.\n regularizer: A regularizer to apply to each layer. Defaults to `None`.\n \"\"\"\n super(DeepRcfrModel, self).__init__()\n self._use_skip_connections = use_skip_connections\n self._hidden_are_factored = num_hidden_factors > 0\n self._hidden_activation = hidden_activation\n input_rank = game.information_state_tensor_shape(\n )[0] + game.new_initial_state().num_distinct_actions()\n\n self.layers = []\n for _ in range(num_hidden_layers):\n if self._hidden_are_factored:\n self.layers.append(nn.Linear(input_rank, num_hidden_factors, bias=True))\n\n self.layers.append(\n nn.Linear(\n num_hidden_factors if self._hidden_are_factored else input_rank,\n num_hidden_units,\n bias=True))\n if hidden_activation:\n self.layers.append(hidden_activation())\n\n self.layers.append(nn.Linear(num_hidden_units, 1, bias=True))\n\n self.layers = nn.ModuleList(self.layers)\n # Construct variables for all layers by exercising the network.\n x = torch.zeros([1, num_features(game)])\n for layer in self.layers:\n x = layer(x)\n\n def __call__(self, x):\n \"\"\"Evaluates this model on `x`.\"\"\"\n return feedforward_evaluate(\n layers=self.layers,\n x=x,\n use_skip_connections=self._use_skip_connections,\n hidden_are_factored=self._hidden_are_factored,\n hidden_activation=self._hidden_activation)\n\n\nclass _RcfrSolver(object):\n \"\"\"An abstract RCFR solver class.\n\n Requires that subclasses implement `evaluate_and_update_policy`.\n \"\"\"\n\n def __init__(self, game, models, truncate_negative=False):\n \"\"\"Creates a new `_RcfrSolver`.\n\n Args:\n game: An OpenSpiel `Game`.\n models: Current policy models (optimizable array-like -> `torch.Tensor`\n callables) for both players.\n truncate_negative: Whether or not to truncate negative (approximate)\n cumulative regrets to zero to implement RCFR+. Defaults to `False`.\n \"\"\"\n self._game = game\n self._models = models\n self._truncate_negative = truncate_negative\n self._root_wrapper = RootStateWrapper(game.new_initial_state())\n\n self._cumulative_seq_probs = [\n np.zeros(n) for n in self._root_wrapper.num_player_sequences\n ]\n\n def _sequence_weights(self, player=None):\n \"\"\"Returns regret-like weights for each sequence as an `np.array`.\n\n Negative weights are truncated to zero.\n\n Args:\n player: The player to compute weights for, or both if `player` is `None`.\n Defaults to `None`.\n \"\"\"\n if player is None:\n return [\n self._sequence_weights(player)\n for player in range(self._game.num_players())\n ]\n else:\n tensor = F.relu(\n torch.squeeze(self._models[player](\n self._root_wrapper.sequence_features[player])))\n return tensor.detach().numpy()\n\n def evaluate_and_update_policy(self, train_fn):\n \"\"\"Performs a single step of policy evaluation and policy improvement.\n\n Args:\n train_fn: A (model, `torch.data.Dataset`) function that trains the given\n regression model to accurately reproduce the x to y mapping given x-y\n data.\n\n Raises:\n NotImplementedError: If not overridden by child class.\n \"\"\"\n raise NotImplementedError()\n\n def current_policy(self):\n \"\"\"Returns the current policy profile.\n\n Returns:\n A `dict<info state, list<Action, probability>>` that maps info state\n strings to `Action`-probability pairs describing each player's policy.\n \"\"\"\n return self._root_wrapper.sequence_weights_to_tabular_profile(\n self._sequence_weights())\n\n def average_policy(self):\n \"\"\"Returns the average of all policies iterated.\n\n This average policy converges toward a Nash policy as the number of\n iterations increases as long as the regret prediction error decreases\n continually [Morrill, 2016].\n\n The policy is computed using the accumulated policy probabilities computed\n using `evaluate_and_update_policy`.\n\n Returns:\n A `dict<info state, list<Action, probability>>` that maps info state\n strings to (Action, probability) pairs describing each player's policy.\n \"\"\"\n return self._root_wrapper.sequence_weights_to_tabular_profile(\n self._cumulative_seq_probs)\n\n def _previous_player(self, player):\n \"\"\"The previous player in the turn ordering.\"\"\"\n return player - 1 if player > 0 else self._game.num_players() - 1\n\n def _average_policy_update_player(self, regret_player):\n \"\"\"The player for whom the average policy should be updated.\"\"\"\n return self._previous_player(regret_player)\n\n\nclass RcfrSolver(_RcfrSolver):\n \"\"\"RCFR with an effectively infinite regret data buffer.\n\n Exact or bootstrapped cumulative regrets are stored as if an infinitely\n large data buffer. The average strategy is updated and stored in a full\n game-size table. Reproduces the RCFR versions used in experiments by\n Waugh et al. [2015] and Morrill [2016] except that this class does not\n restrict the user to regression tree models.\n \"\"\"\n\n def __init__(self, game, models, bootstrap=None, truncate_negative=False):\n self._bootstrap = bootstrap\n super(RcfrSolver, self).__init__(\n game, models, truncate_negative=truncate_negative)\n\n self._regret_targets = [\n np.zeros(n) for n in self._root_wrapper.num_player_sequences\n ]\n\n def evaluate_and_update_policy(self, train_fn):\n \"\"\"Performs a single step of policy evaluation and policy improvement.\n\n Args:\n train_fn: A (model, `torch.data.Dataset`) function that trains the given\n regression model to accurately reproduce the x to y mapping given x-y\n data.\n \"\"\"\n sequence_weights = self._sequence_weights()\n player_seq_features = self._root_wrapper.sequence_features\n for regret_player in range(self._game.num_players()):\n seq_prob_player = self._average_policy_update_player(regret_player)\n\n regrets, seq_probs = (\n self._root_wrapper.counterfactual_regrets_and_reach_weights(\n regret_player, seq_prob_player, *sequence_weights))\n\n if self._bootstrap:\n self._regret_targets[regret_player][:] = sequence_weights[regret_player]\n if self._truncate_negative:\n regrets = np.maximum(-relu(self._regret_targets[regret_player]),\n regrets)\n\n self._regret_targets[regret_player] += regrets\n self._cumulative_seq_probs[seq_prob_player] += seq_probs\n\n targets = torch.unsqueeze(\n torch.Tensor(self._regret_targets[regret_player]), axis=1)\n data = torch.utils.data.TensorDataset(player_seq_features[regret_player],\n targets)\n\n regret_player_model = self._models[regret_player]\n train_fn(regret_player_model, data)\n sequence_weights[regret_player] = self._sequence_weights(regret_player)\n\n\nclass ReservoirBuffer(object):\n \"\"\"A generic reservoir buffer data structure.\n\n After every insertion, its contents represents a `size`-size uniform\n random sample from the stream of candidates that have been encountered.\n \"\"\"\n\n def __init__(self, size):\n self.size = size\n self.num_elements = 0\n self._buffer = np.full([size], None, dtype=object)\n self._num_candidates = 0\n\n @property\n def buffer(self):\n return self._buffer[:self.num_elements]\n\n def insert(self, candidate):\n \"\"\"Consider this `candidate` for inclusion in this sampling buffer.\"\"\"\n self._num_candidates += 1\n if self.num_elements < self.size:\n self._buffer[self.num_elements] = candidate\n self.num_elements += 1\n return\n idx = np.random.choice(self._num_candidates)\n if idx < self.size:\n self._buffer[idx] = candidate\n\n def insert_all(self, candidates):\n \"\"\"Consider all `candidates` for inclusion in this sampling buffer.\"\"\"\n for candidate in candidates:\n self.insert(candidate)\n\n def num_available_spaces(self):\n \"\"\"The number of freely available spaces in this buffer.\"\"\"\n return self.size - self.num_elements\n\n\nclass ReservoirRcfrSolver(_RcfrSolver):\n \"\"\"RCFR with a reservoir buffer for storing regret data.\n\n The average strategy is updated and stored in a full game-size table.\n \"\"\"\n\n def __init__(self, game, models, buffer_size, truncate_negative=False):\n self._buffer_size = buffer_size\n super(ReservoirRcfrSolver, self).__init__(\n game, models, truncate_negative=truncate_negative)\n self._reservoirs = [\n ReservoirBuffer(self._buffer_size) for _ in range(game.num_players())\n ]\n\n def evaluate_and_update_policy(self, train_fn):\n \"\"\"Performs a single step of policy evaluation and policy improvement.\n\n Args:\n train_fn: A (model, `torch.data.Dataset`) function that trains the given\n regression model to accurately reproduce the x to y mapping given x-y\n data.\n \"\"\"\n sequence_weights = self._sequence_weights()\n player_seq_features = self._root_wrapper.sequence_features\n for regret_player in range(self._game.num_players()):\n seq_prob_player = self._average_policy_update_player(regret_player)\n\n regrets, seq_probs = (\n self._root_wrapper.counterfactual_regrets_and_reach_weights(\n regret_player, seq_prob_player, *sequence_weights))\n\n if self._truncate_negative:\n regrets = np.maximum(-relu(sequence_weights[regret_player]), regrets)\n\n next_data = list(\n zip(player_seq_features[regret_player],\n torch.unsqueeze(torch.Tensor(regrets), axis=1)))\n\n self._reservoirs[regret_player].insert_all(next_data)\n\n self._cumulative_seq_probs[seq_prob_player] += seq_probs\n\n my_buffer = list(\n torch.stack(a) for a in zip(*self._reservoirs[regret_player].buffer))\n\n data = torch.utils.data.TensorDataset(*my_buffer)\n\n regret_player_model = self._models[regret_player]\n train_fn(regret_player_model, data)\n sequence_weights[regret_player] = self._sequence_weights(regret_player)\n" ]
[ [ "numpy.ones" ], [ "numpy.asarray", "numpy.arange", "numpy.eye", "numpy.ones", "numpy.testing.assert_array_equal", "numpy.zeros", "numpy.divide" ], [ "numpy.exp" ], [ "numpy.random.RandomState", "numpy.zeros" ], [ "numpy.zeros", "numpy.random.shuffle" ], [ "numpy.maximum", "torch.Tensor", "torch.cat", "numpy.asarray", "numpy.random.choice", "torch.nn.ModuleList", "torch.reshape", "torch.utils.data.TensorDataset", "torch.zeros", "numpy.full", "torch.tensor", "numpy.ones", "torch.nn.Linear", "numpy.prod", "torch.stack", "numpy.zeros" ] ]
nan-wang/PaddleOCR
[ "09604c38e42591c240771edbbff43a6dd7ebf592", "09604c38e42591c240771edbbff43a6dd7ebf592" ]
[ "tools/infer_table.py", "ppocr/data/imaug/iaa_augment.py" ]
[ "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nimport os\nimport sys\nimport json\n\n__dir__ = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(__dir__)\nsys.path.append(os.path.abspath(os.path.join(__dir__, '..')))\n\nos.environ[\"FLAGS_allocator_strategy\"] = 'auto_growth'\n\nimport paddle\nfrom paddle.jit import to_static\n\nfrom ppocr.data import create_operators, transform\nfrom ppocr.modeling.architectures import build_model\nfrom ppocr.postprocess import build_post_process\nfrom ppocr.utils.save_load import init_model\nfrom ppocr.utils.utility import get_image_file_list\nimport tools.program as program\nimport cv2\n\ndef main(config, device, logger, vdl_writer):\n global_config = config['Global']\n\n # build post process\n post_process_class = build_post_process(config['PostProcess'],\n global_config)\n\n # build model\n if hasattr(post_process_class, 'character'):\n config['Architecture'][\"Head\"]['out_channels'] = len(\n getattr(post_process_class, 'character'))\n\n model = build_model(config['Architecture'])\n\n init_model(config, model, logger)\n\n # create data ops\n transforms = []\n use_padding = False\n for op in config['Eval']['dataset']['transforms']:\n op_name = list(op)[0]\n if 'Label' in op_name:\n continue\n if op_name == 'KeepKeys':\n op[op_name]['keep_keys'] = ['image']\n if op_name == \"ResizeTableImage\":\n use_padding = True\n padding_max_len = op['ResizeTableImage']['max_len']\n transforms.append(op)\n\n global_config['infer_mode'] = True\n ops = create_operators(transforms, global_config)\n\n model.eval()\n for file in get_image_file_list(config['Global']['infer_img']):\n logger.info(\"infer_img: {}\".format(file))\n with open(file, 'rb') as f:\n img = f.read()\n data = {'image': img}\n batch = transform(data, ops)\n images = np.expand_dims(batch[0], axis=0)\n images = paddle.to_tensor(images)\n preds = model(images)\n post_result = post_process_class(preds)\n res_html_code = post_result['res_html_code']\n res_loc = post_result['res_loc']\n img = cv2.imread(file)\n imgh, imgw = img.shape[0:2]\n res_loc_final = []\n for rno in range(len(res_loc[0])):\n x0, y0, x1, y1 = res_loc[0][rno]\n left = max(int(imgw * x0), 0)\n top = max(int(imgh * y0), 0)\n right = min(int(imgw * x1), imgw - 1)\n bottom = min(int(imgh * y1), imgh - 1)\n cv2.rectangle(img, (left, top), (right, bottom), (0, 0, 255), 2)\n res_loc_final.append([left, top, right, bottom])\n res_loc_str = json.dumps(res_loc_final)\n logger.info(\"result: {}, {}\".format(res_html_code, res_loc_final))\n logger.info(\"success!\")\n\n\nif __name__ == '__main__':\n config, device, logger, vdl_writer = program.preprocess()\n main(config, device, logger, vdl_writer)\n\n", "# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis code is refer from:\nhttps://github.com/WenmuZhou/DBNet.pytorch/blob/master/data_loader/modules/iaa_augment.py\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np\nimport imgaug\nimport imgaug.augmenters as iaa\n\n\nclass AugmenterBuilder(object):\n def __init__(self):\n pass\n\n def build(self, args, root=True):\n if args is None or len(args) == 0:\n return None\n elif isinstance(args, list):\n if root:\n sequence = [self.build(value, root=False) for value in args]\n return iaa.Sequential(sequence)\n else:\n return getattr(iaa, args[0])(\n *[self.to_tuple_if_list(a) for a in args[1:]])\n elif isinstance(args, dict):\n cls = getattr(iaa, args['type'])\n return cls(**{\n k: self.to_tuple_if_list(v)\n for k, v in args['args'].items()\n })\n else:\n raise RuntimeError('unknown augmenter arg: ' + str(args))\n\n def to_tuple_if_list(self, obj):\n if isinstance(obj, list):\n return tuple(obj)\n return obj\n\n\nclass IaaAugment():\n def __init__(self, augmenter_args=None, **kwargs):\n if augmenter_args is None:\n augmenter_args = [{\n 'type': 'Fliplr',\n 'args': {\n 'p': 0.5\n }\n }, {\n 'type': 'Affine',\n 'args': {\n 'rotate': [-10, 10]\n }\n }, {\n 'type': 'Resize',\n 'args': {\n 'size': [0.5, 3]\n }\n }]\n self.augmenter = AugmenterBuilder().build(augmenter_args)\n\n def __call__(self, data):\n image = data['image']\n shape = image.shape\n\n if self.augmenter:\n aug = self.augmenter.to_deterministic()\n data['image'] = aug.augment_image(image)\n data = self.may_augment_annotation(aug, data, shape)\n return data\n\n def may_augment_annotation(self, aug, data, shape):\n if aug is None:\n return data\n\n line_polys = []\n for poly in data['polys']:\n new_poly = self.may_augment_poly(aug, shape, poly)\n line_polys.append(new_poly)\n data['polys'] = np.array(line_polys)\n return data\n\n def may_augment_poly(self, aug, img_shape, poly):\n keypoints = [imgaug.Keypoint(p[0], p[1]) for p in poly]\n keypoints = aug.augment_keypoints(\n [imgaug.KeypointsOnImage(\n keypoints, shape=img_shape)])[0].keypoints\n poly = [(p.x, p.y) for p in keypoints]\n return poly\n" ]
[ [ "numpy.expand_dims" ], [ "numpy.array" ] ]
allisonchen23/Pensieve-PPO2
[ "c4add70d3e1e28a6d2e90f2571ca53d1d35647e1" ]
[ "src/a2c.py" ]
[ "import tflearn\nimport math\nimport numpy as np\nimport tensorflow as tf\nimport os\nimport time\nos.environ['CUDA_VISIBLE_DEVICES'] = '2'\n\nFEATURE_NUM = 128\nEPS = 1e-4\nGAMMA = 0.99\n\n\nclass Network():\n def CreateNetwork(self, inputs):\n with tf.variable_scope('actor'):\n split_0 = tflearn.fully_connected(\n inputs[:, 0:1, -1], FEATURE_NUM, activation='relu')\n split_1 = tflearn.fully_connected(\n inputs[:, 1:2, -1], FEATURE_NUM, activation='relu')\n split_2 = tflearn.conv_1d(\n inputs[:, 2:3, :], FEATURE_NUM, 4, activation='relu')\n split_3 = tflearn.conv_1d(\n inputs[:, 3:4, :], FEATURE_NUM, 4, activation='relu')\n split_4 = tflearn.conv_1d(\n inputs[:, 4:5, :self.a_dim], FEATURE_NUM, 4, activation='relu')\n split_5 = tflearn.fully_connected(\n inputs[:, 5:6, -1], FEATURE_NUM, activation='relu')\n\n split_2_flat = tflearn.flatten(split_2)\n split_3_flat = tflearn.flatten(split_3)\n split_4_flat = tflearn.flatten(split_4)\n\n merge_net = tflearn.merge(\n [split_0, split_1, split_2_flat, split_3_flat, split_4_flat, split_5], 'concat')\n\n net = tflearn.fully_connected(\n merge_net, FEATURE_NUM, activation='relu')\n\n pi = tflearn.fully_connected(net, self.a_dim, activation='softmax')\n value = tflearn.fully_connected(net, 1, activation='linear')\n return pi, value\n\n def get_network_params(self):\n return self.sess.run(self.network_params)\n\n def set_network_params(self, input_network_params):\n self.sess.run(self.set_network_params_op, feed_dict={\n i: d for i, d in zip(self.input_network_params, input_network_params)\n })\n\n def __init__(self, sess, state_dim, action_dim, learning_rate):\n self.quality = 0\n self.s_dim = state_dim\n self.a_dim = action_dim\n self.lr_rate = learning_rate\n self.sess = sess\n self.outputs = tf.placeholder(tf.float32, [None, 1])\n self.inputs = tf.placeholder(\n tf.float32, [None, self.s_dim[0], self.s_dim[1]])\n self.acts = tf.placeholder(tf.float32, [None, self.a_dim])\n self.entropy_weight = tf.placeholder(tf.float32)\n self.pi, self.val = self.CreateNetwork(inputs=self.inputs)\n self.real_out = tf.clip_by_value(self.pi, EPS, 1. - EPS)\n self.log_prob = tf.log(tf.reduce_sum(tf.multiply(\n self.real_out, self.acts), reduction_indices=1, keepdims=True))\n self.entropy = tf.multiply(self.real_out, tf.log(self.real_out))\n\n # Get all network parameters\n self.network_params = \\\n tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='actor')\n\n # Set all network parameters\n self.input_network_params = []\n for param in self.network_params:\n self.input_network_params.append(\n tf.placeholder(tf.float32, shape=param.get_shape()))\n self.set_network_params_op = []\n for idx, param in enumerate(self.input_network_params):\n self.set_network_params_op.append(\n self.network_params[idx].assign(param))\n\n self.loss = tflearn.mean_square(self.val, self.outputs) \\\n - tf.reduce_mean(self.log_prob * tf.stop_gradient(self.outputs - self.val)) \\\n + self.entropy_weight * tf.reduce_mean(self.entropy)\n\n self.optimize = tf.train.AdamOptimizer(\n self.lr_rate).minimize(self.loss)\n\n def predict(self, input):\n action = self.sess.run(self.real_out, feed_dict={\n self.inputs: input\n })\n return action[0]\n\n def get_entropy(self, step):\n if step < 20000:\n return 5.\n elif step < 50000:\n return 3.\n elif step < 70000:\n return 1.\n elif step < 90000:\n return 0.5\n elif step < 120000:\n return 0.3\n else:\n return 0.1\n\n def train(self, s_batch, a_batch, v_batch, epoch):\n # print s_batch.shape, a_batch.shape, v_batch.shape\n # s_batch, a_batch, v_batch = tflearn.data_utils.shuffle(\n # s_batch, a_batch, v_batch)\n self.sess.run(self.optimize, feed_dict={\n self.inputs: s_batch,\n self.acts: a_batch,\n self.outputs: v_batch,\n self.entropy_weight: self.get_entropy(epoch)\n })\n\n def compute_v(self, s_batch, a_batch, r_batch, terminal):\n ba_size = len(s_batch)\n\n R_batch = np.zeros([len(r_batch), 1])\n\n if terminal:\n R_batch[-1, 0] = 0 # terminal state\n else:\n v_batch = self.sess.run(self.val, feed_dict={\n self.inputs: s_batch\n })\n R_batch[-1, 0] = v_batch[-1, 0] # boot strap from last state\n for t in reversed(range(ba_size - 1)):\n R_batch[t, 0] = r_batch[t] + GAMMA * R_batch[t + 1, 0]\n\n return list(R_batch)\n" ]
[ [ "tensorflow.clip_by_value", "tensorflow.multiply", "tensorflow.reduce_mean", "tensorflow.get_collection", "tensorflow.placeholder", "tensorflow.stop_gradient", "tensorflow.log", "tensorflow.train.AdamOptimizer", "tensorflow.variable_scope" ] ]
coryschwartz/nebula-crawler
[ "34ebe1109a5117949b4f285891a065adcc0bae08", "34ebe1109a5117949b4f285891a065adcc0bae08", "34ebe1109a5117949b4f285891a065adcc0bae08" ]
[ "analysis/mixed/plot_churn.py", "analysis/report/plot_crawl.py", "analysis/mixed/plot_geo_dangle.py" ]
[ "import psycopg2\nimport toml\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom lib import node_time, node_uptime\n\nconfig = toml.load(\"./db.toml\")['psql']\nconn = psycopg2.connect(\n host=config['host'],\n port=config['port'],\n database=config['database'],\n user=config['user'],\n password=config['password'],\n)\n\nstart, end = node_time.get_time_range(conn)\nuptimes = [i for i in node_uptime.get_node_uptime(conn, start, end) if i]\n\n# Plotting cdf of uptimes, code adapted from ../churn/cdf.py\nhist_values, bin_edges = np.histogram(\n uptimes, bins=len(uptimes), density=True\n)\n\n# Since we provided an integer to the bins parameter above. The edges are equal width.\n# This means the width between the first two elements is equal for all edges.\nedge_width = bin_edges[1] - bin_edges[0]\n\n# Integerate over histogram\ncumsum = np.cumsum(hist_values) * edge_width\n\n# build plot\nplt.plot(bin_edges[1:], cumsum, label=\"All sessions\")\n\n# Presentation logic\nplt.rc('font', size=8)\nplt.gca().xaxis.set_major_formatter(lambda x, pos=None: x / 3600)\nplt.xlabel(\"Hours\")\nplt.ylabel(\"Percentage of online peers\")\nplt.tight_layout()\nplt.xticks(np.arange(0, max(bin_edges[1:]), 3 * 60 * 60))\nplt.xlim(-60 * 60, 24 * 60 * 60)\nplt.grid(True)\nplt.legend()\nplt.title(\"Session cdf from %s to %s\" % (start.replace(microsecond=0), end.replace(microsecond=0)))\n\n# Finalize\nplt.show()\n", "import pandas as pd\nimport seaborn as sns\nfrom matplotlib import pyplot as plt\nimport matplotlib.dates as md\nimport lib_plot\nfrom lib_db import DBClient\nfrom lib_fmt import thousands_ticker_formatter\n\n\ndef main(db_client: DBClient):\n sns.set_theme()\n\n results = db_client.get_crawls()\n\n results_df = pd.DataFrame(results, columns=[\"started_at\", \"crawled_peers\", \"dialable_peers\", \"undialable_peers\"])\n results_df['started_at'] = pd.to_datetime(results_df['started_at'], unit='s')\n results_df[\"percentage_dialable\"] = 100 * results_df[\"dialable_peers\"] / results_df[\"crawled_peers\"]\n\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5))\n\n sns.lineplot(ax=ax1, x=results_df[\"started_at\"], y=results_df[\"crawled_peers\"])\n sns.lineplot(ax=ax1, x=results_df[\"started_at\"], y=results_df[\"dialable_peers\"])\n sns.lineplot(ax=ax1, x=results_df[\"started_at\"], y=results_df[\"undialable_peers\"])\n\n ax1.legend(loc='lower right', labels=[\"Total\", \"Dialable\", \"Undialable\"])\n\n ax1.xaxis.set_major_formatter(md.DateFormatter('%Y-%m-%d'))\n ax1.set_ylim(0)\n\n ax1.set_xlabel(\"Time (CEST)\")\n ax1.set_ylabel(\"Count\")\n\n ax1.get_yaxis().set_major_formatter(thousands_ticker_formatter)\n\n sns.lineplot(ax=ax2, x=results_df[\"started_at\"], y=results_df[\"percentage_dialable\"])\n ax2.xaxis.set_major_formatter(md.DateFormatter('%Y-%m-%d'))\n ax2.set_ylim(0, 100)\n ax2.set_xlabel(\"Time (CEST)\")\n ax2.set_ylabel(\"Dialable Peers in %\")\n\n plt.tight_layout()\n lib_plot.savefig(\"crawl-overview\")\n plt.show()\n\n\nif __name__ == '__main__':\n db_client = DBClient()\n main(db_client)\n", "import psycopg2\nimport toml\nimport matplotlib.pyplot as plt\nfrom lib import node_time, node_classification, node_geolocation\n\n\nconfig = toml.load(\"./db.toml\")['psql']\nconn = psycopg2.connect(\n host=config['host'],\n port=config['port'],\n database=config['database'],\n user=config['user'],\n password=config['password'],\n)\n\nstart, end = node_time.get_time_range(conn)\ndangle = node_classification.get_dangling_nodes(conn, start, end)\nlocations = node_geolocation.get_geolocation(conn, dangle)\n\ncounts = dict()\nsum = 0\nfor _, location in locations.items():\n if location in counts:\n counts[location] += 1\n else:\n counts[location] = 1\n sum += 1\ncountsTrim = {\"others\": 0}\nfor key, val in counts.items():\n if val / sum < 0.01:\n countsTrim[\"others\"] += val\n else:\n countsTrim[key] = val\n{k: v for k, v in sorted(countsTrim.items(), key=lambda item: item[1])}\n\n# Plot\nplt.rc('font', size=8)\nplt.pie(countsTrim.values(), labels=countsTrim.keys(), autopct=\"%.1f%%\")\nplt.title(\"Dangling nodes geolocation info from %s to %s\" % (start.replace(microsecond=0), end.replace(microsecond=0)))\nplt.show()\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.gca", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.rc", "numpy.cumsum", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlim", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ], [ "matplotlib.dates.DateFormatter", "matplotlib.pyplot.tight_layout", "pandas.to_datetime", "matplotlib.pyplot.subplots", "pandas.DataFrame", "matplotlib.pyplot.show" ], [ "matplotlib.pyplot.show", "matplotlib.pyplot.rc" ] ]
bendevera/DS-Unit-3-Sprint-2-SQL-and-Databases
[ "24290da3bdbfaaadfa87c23f6f4196e2220360ab" ]
[ "module2-sql-for-analysis/insert_titantic.py" ]
[ "import pandas as pd \nimport psycopg2\nimport os\nfrom dotenv import load_dotenv\nload_dotenv()\n\n# read in our data\ndf = pd.read_csv('./titanic.csv')\nprint(f\"DF shape: {df.shape}\")\n\n# create connection to db we want to move the data to\nconn = psycopg2.connect(\n host=os.getenv('DB_HOST'), \n dbname=os.getenv('DB_USER'), \n user=os.getenv('DB_USER'), \n password=os.getenv('DB_PASSWORD')\n)\ncur = conn.cursor()\n\n# ensure the table is fresh by dropping if exists and creating from scratch\nquery = \"select exists(select * from information_schema.tables where table_name='titantic')\"\ncur.execute(query)\n\nif cur.fetchone()[0]:\n print(\"dropping table...\")\n query = \"DROP TABLE titantic;\"\n cur.execute(query)\n\nprint(\"creating table...\")\nquery = \"\"\"\nCREATE TABLE titantic (\n id SERIAL PRIMARY KEY,\n survived BOOLEAN,\n class TEXT,\n name TEXT,\n sex TEXT,\n age INTEGER,\n siblings BOOLEAN,\n parents BOOLEAN,\n fare REAL\n)\n\"\"\"\ncur.execute(query)\n\ndef get_name(name):\n return name.replace(\"'\", \"\")\n\ndef get_row(row):\n return (bool(row[0]), row[1], get_name(row[2]), row[3], row[4], bool(row[5]), bool(row[6]), row[7])\n\n# for each row in the csv, add a row to the postgres db\nprint(\"adding rows...\")\nfor row in df.values:\n query = \"INSERT INTO titantic (survived, class, name, sex, age, siblings, parents, fare) VALUES \" + str(get_row(row)) + \";\"\n cur.execute(query)\n\nquery = \"SELECT * FROM titantic\"\ncur.execute(query)\nrows = cur.fetchall()\nprint(f\"Num rows: {len(rows)}\")\nconn.commit()\ncur.close()" ]
[ [ "pandas.read_csv" ] ]
cclauss/h2o4gpu
[ "9885416deb3285f5d0f33023d6c07373ac4fc0b7" ]
[ "src/interface_py/h2o4gpu/util/import_data.py" ]
[ "#- * - encoding : utf - 8 - * -\n\"\"\"\n:copyright: 2017-2018 H2O.ai, Inc.\n:license: Apache License Version 2.0 (see LICENSE for details)\n\"\"\"\n\n\ndef import_data(data_path,\n use_pandas=False,\n intercept=True,\n valid_fraction=0.2,\n classification=True):\n \"\"\"Import Data for H2O GPU Edition\n\n This function will read in data and prepare it for H2O4GPU's GLM solver.\n\n Note, the data is assumed to be all numeric,i.e.,\n categoricals are one hot encoded, etc.\n\n :param data_path : str\n A path to a dataset (The dataset needs to be all numeric)\n :param use_pandas : bool\n Indicate if Pandas should be used to parse\n :param intercept : bool\n Indicate if intercept term is needed\n :param valid_fraction : float\n Percentage of dataset reserved for a validation set\n :param classification : bool\n Classification problem?\n :returns\n If valid_fraction > 0 it will return the following:\n train_x: numpy array of train input variables\n train_y: numpy array of y variable\n valid_x: numpy array of valid input variables\n valid_y: numpy array of valid y variable\n family : string that would either be \"logistic\" if classification is set\n to True, otherwise \"elasticnet\"\n If valid_fraction == 0 it will return the following:\n train_x: numpy array of train input variables\n train_y: numpy array of y variable\n family : string that would either be \"logistic\" if classification is set\n to True, otherwise \"elasticnet\"\n \"\"\"\n #Can import data using pandas or feather.\n use_pandas = use_pandas\n\n data_file = data_path # If importing using pandas\n\n if use_pandas:\n print(\"Reading Data with Pandas\")\n import pandas as pd\n data = pd.read_csv(data_file)\n else:\n print(\"Reading Data with Feather\")\n import feather\n data = feather.read_dataframe(data_file)\n print(data.shape)\n import numpy as np\n data_x = np.array(\n data.iloc[:, :data.shape[1] - 1],\n dtype='float32',\n order='C',\n copy=False)\n data_y = np.array(\n data.iloc[:, data.shape[1] - 1], dtype='float32', order='C', copy=False)\n\n #Setup train / validation set split\n #(assuming form of mxn where m = row count and n = col count)\n morig = data_x.shape[0]\n norig = data_x.shape[1]\n print(\"Original m=%d n=%d\" % (morig, norig))\n import sys\n sys.stdout.flush()\n\n #Do train / valid split\n if valid_fraction > 0:\n valid_fraction = valid_fraction\n HO = int(valid_fraction * morig)\n H = morig - HO\n print(\"Size of Train rows=%d & valid rows=%d\" % (H, HO))\n sys.stdout.flush()\n train_x = data_x[0:H, :]\n train_y = data_y[0:H]\n valid_x = data_x[H:morig, :]\n valid_y = data_y[H:morig]\n print(\"Size of Train cols=%d valid cols=%d\" % (train_x.shape[1],\n valid_x.shape[1]))\n else:\n train_x = data_x\n train_y = data_y\n\n\n#Using intercept\n if intercept:\n train_x = np.hstack(\n [train_x,\n np.ones((train_x.shape[0], 1), dtype=train_x.dtype)])\n if valid_fraction > 0:\n valid_x = np.hstack(\n [valid_x,\n np.ones((valid_x.shape[0], 1), dtype=valid_x.dtype)])\n print(\"Size of Train cols=%d & valid cols=%d after adding \"\n \"intercept column\" % (train_x.shape[1], valid_x.shape[1]))\n else:\n print(\"Size of Train cols=%d after adding intercept column\" %\n (train_x.shape[1]))\n\n if classification:\n family = \"logistic\"\n else:\n family = \"elasticnet\"\n if valid_fraction > 0:\n return train_x, train_y, valid_x, valid_y, family\n\n return train_x, train_y, family\n" ]
[ [ "numpy.array", "pandas.read_csv", "numpy.ones" ] ]
geez0219/ARC
[ "f2176f0d442d4a2d6028f0770b1efc1a9ae982b8", "f2176f0d442d4a2d6028f0770b1efc1a9ae982b8", "f2176f0d442d4a2d6028f0770b1efc1a9ae982b8" ]
[ "source/meta_compare/language_modeling/sls_language_modeling.py", "source/normal_compare/image_classification/base_lr.py", "source/normal_compare/language_modeling/base_lr.py" ]
[ "import os\n\nimport fastestimator as fe\nimport numpy as np\nimport sls\nimport torch\nimport torch.nn as nn\nimport wget\nfrom fastestimator.op.numpyop import NumpyOp\nfrom fastestimator.op.tensorop import TensorOp\nfrom fastestimator.op.tensorop.loss import CrossEntropy\nfrom fastestimator.op.tensorop.model import ModelOp, UpdateOp\n\n\ndef get_ptb(folder_path, seq_length=64):\n file_names = [\"ptb.train.txt\", \"ptb.valid.txt\", \"ptb.test.txt\"]\n urls = [\n 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.train.txt',\n 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.valid.txt',\n 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.test.txt'\n ]\n # Read text\n texts = []\n for file_name, url in zip(file_names, urls):\n text = []\n file_path = os.path.join(folder_path, file_name)\n if not os.path.exists(file_path):\n wget.download(url, out=folder_path)\n with open(file_path, 'r') as f:\n for line in f:\n text.extend(line.split() + ['<eos>'])\n texts.append(text)\n # Build dictionary from training data\n vocab = sorted(set(texts[0]))\n word2idx = {u: i for i, u in enumerate(vocab)}\n #convert word to index and split the sequences and discard the last incomplete sequence\n data = [[word2idx[word] for word in text[:-(len(text) % seq_length)]] for text in texts]\n train_data, eval_data, test_data = [np.array(d).reshape(-1, seq_length) for d in data]\n return train_data, eval_data, test_data\n\n\nclass CreateInputAndTarget(NumpyOp):\n def forward(self, data, state):\n return data[:-1], data[1:]\n\n\nclass DimesionAdjust(TensorOp):\n def forward(self, data, state):\n x, y = data\n return x.T, y.T.reshape(-1)\n\n\nclass Perplexity(fe.trace.Trace):\n def on_epoch_end(self, data):\n ce = data[\"ce\"]\n data.write_with_log(self.outputs[0], np.exp(ce))\n\n\nclass BuildModel(nn.Module):\n def __init__(self, vocab_size=10000, embedding_dim=300, rnn_units=600):\n super().__init__()\n self.embed_layer = nn.Embedding(vocab_size, embedding_dim)\n self.lstm_layer = nn.LSTM(embedding_dim, rnn_units)\n self.dropout = nn.Dropout(0.5)\n self.fc = nn.Linear(rnn_units, vocab_size)\n\n nn.init.xavier_uniform_(self.lstm_layer.weight_ih_l0.data)\n nn.init.xavier_uniform_(self.lstm_layer.weight_hh_l0.data)\n\n def forward(self, x):\n x = self.embed_layer(x)\n x, _ = self.lstm_layer(x)\n x = x.view(x.size(0) * x.size(1), x.size(2))\n x = self.dropout(x)\n x = self.fc(x)\n return x\n\n\nclass DummpyUpdate(UpdateOp):\n def forward(self, data, state):\n pass\n\n\nclass SGDLinesSearch(fe.op.tensorop.TensorOp):\n def __init__(self, model, opt, loss_op, inputs, outputs, mode=\"train\"):\n super().__init__(inputs=inputs, outputs=outputs, mode=mode)\n self.model = model\n self.opt = opt\n self.loss_op = loss_op\n\n def forward(self, data, state):\n x, y = data\n closure = lambda: self.loss_op.forward((self.model(x), y), state=state)\n self.opt.zero_grad()\n loss = self.opt.step(closure=closure)\n return loss\n\n\nclass PrintLR(fe.trace.Trace):\n def __init__(self, opt):\n super().__init__(mode=\"train\")\n self.opt = opt\n\n def on_batch_end(self, data):\n if self.system.global_step % self.system.log_steps == 0 or self.system.global_step == 1:\n data.write_with_log(\"model_lr\", float(self.opt.state['step_size']))\n\n\ndef get_estimator(data_dir, epochs=98, batch_size=128, seq_length=20, vocab_size=10000):\n train_data, _, test_data = get_ptb(folder_path=data_dir, seq_length=seq_length + 1)\n pipeline = fe.Pipeline(train_data=fe.dataset.NumpyDataset(data={\"x\": train_data}),\n eval_data=fe.dataset.NumpyDataset(data={\"x\": test_data}),\n batch_size=batch_size,\n ops=CreateInputAndTarget(inputs=\"x\", outputs=(\"x\", \"y\")),\n drop_last=True)\n # step 2\n model = fe.build(model_fn=lambda: BuildModel(vocab_size, embedding_dim=300, rnn_units=600), optimizer_fn=\"sgd\")\n opt = sls.Sls(model.parameters())\n network = fe.Network(ops=[\n DimesionAdjust(inputs=(\"x\", \"y\"), outputs=(\"x\", \"y\")),\n ModelOp(model=model, inputs=\"x\", outputs=\"y_pred\", mode=None),\n SGDLinesSearch(model=model,\n opt=opt,\n loss_op=CrossEntropy(inputs=(\"y_pred\", \"y\"), outputs=\"ce\", form=\"sparse\", from_logits=True),\n inputs=(\"x\", \"y\"),\n outputs=\"ce\"),\n CrossEntropy(inputs=(\"y_pred\", \"y\"), outputs=\"ce\", form=\"sparse\", from_logits=True, mode=\"eval\"),\n DummpyUpdate(model=model, loss_name=\"ce\")\n ])\n # step 3\n traces = [Perplexity(inputs=\"ce\", outputs=\"perplexity\", mode=\"eval\"), PrintLR(opt=opt)]\n estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=epochs, traces=traces)\n return estimator\n", "import fastestimator as fe\nimport tensorflow as tf\nfrom fastestimator.op.numpyop.meta import Sometimes\nfrom fastestimator.op.numpyop.multivariate import HorizontalFlip, PadIfNeeded, RandomCrop\nfrom fastestimator.op.numpyop.univariate import CoarseDropout, Normalize\nfrom fastestimator.op.tensorop.loss import CrossEntropy\nfrom fastestimator.op.tensorop.model import ModelOp, UpdateOp\nfrom fastestimator.trace.metric import Accuracy\nfrom tensorflow.python.keras import layers\n\n\ndef residual(x, num_channel):\n x = layers.Conv2D(num_channel, 3, padding='same')(x)\n x = layers.BatchNormalization(momentum=0.8)(x)\n x = layers.LeakyReLU(alpha=0.1)(x)\n x = layers.Conv2D(num_channel, 3, padding='same')(x)\n x = layers.BatchNormalization(momentum=0.8)(x)\n x = layers.LeakyReLU(alpha=0.1)(x)\n return x\n\n\ndef my_model():\n # prep layers\n inp = layers.Input(shape=(32, 32, 3))\n x = layers.Conv2D(64, 3, padding='same')(inp)\n x = layers.BatchNormalization(momentum=0.8)(x)\n x = layers.LeakyReLU(alpha=0.1)(x)\n # layer1\n x = layers.Conv2D(128, 3, padding='same')(x)\n x = layers.MaxPool2D()(x)\n x = layers.BatchNormalization(momentum=0.8)(x)\n x = layers.LeakyReLU(alpha=0.1)(x)\n x = layers.Add()([x, residual(x, 128)])\n # layer2\n x = layers.Conv2D(256, 3, padding='same')(x)\n x = layers.MaxPool2D()(x)\n x = layers.BatchNormalization(momentum=0.8)(x)\n x = layers.LeakyReLU(alpha=0.1)(x)\n # layer3\n x = layers.Conv2D(512, 3, padding='same')(x)\n x = layers.MaxPool2D()(x)\n x = layers.BatchNormalization(momentum=0.8)(x)\n x = layers.LeakyReLU(alpha=0.1)(x)\n x = layers.Add()([x, residual(x, 512)])\n # layers4\n x = layers.GlobalMaxPool2D()(x)\n x = layers.Flatten()(x)\n x = layers.Dense(10)(x)\n x = layers.Activation('softmax', dtype='float32')(x)\n model = tf.keras.Model(inputs=inp, outputs=x)\n return model\n\n\ndef get_estimator(init_lr, epochs=30, batch_size=128):\n # step 1\n train_data, eval_data = fe.dataset.data.cifar10.load_data()\n\n pipeline = fe.Pipeline(\n train_data=train_data,\n eval_data=eval_data,\n batch_size=batch_size,\n ops=[\n Normalize(inputs=\"x\", outputs=\"x\", mean=(0.4914, 0.4822, 0.4465), std=(0.2471, 0.2435, 0.2616)),\n PadIfNeeded(min_height=40, min_width=40, image_in=\"x\", image_out=\"x\", mode=\"train\"),\n RandomCrop(32, 32, image_in=\"x\", image_out=\"x\", mode=\"train\"),\n Sometimes(HorizontalFlip(image_in=\"x\", image_out=\"x\", mode=\"train\")),\n CoarseDropout(inputs=\"x\", outputs=\"x\", mode=\"train\", max_holes=1)\n ])\n # step 2\n model = fe.build(model_fn=my_model, optimizer_fn=lambda: tf.optimizers.Adam(init_lr)) # 1e-2, 1e-3, 1e-4\n network = fe.Network(ops=[\n ModelOp(model=model, inputs=\"x\", outputs=\"y_pred\"),\n CrossEntropy(inputs=(\"y_pred\", \"y\"), outputs=\"ce\"),\n UpdateOp(model=model, loss_name=\"ce\")\n ])\n # step 3\n traces = [Accuracy(true_key=\"y\", pred_key=\"y_pred\")]\n estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=epochs, traces=traces)\n return estimator\n", "import os\n\nimport fastestimator as fe\nimport numpy as np\nimport tensorflow as tf\nimport wget\nfrom fastestimator.op.tensorop.model import ModelOp, UpdateOp\nfrom fastestimator.schedule import EpochScheduler\nfrom fastestimator.trace.adapt import LRScheduler\n\n\ndef get_ptb(folder_path, seq_length=64):\n file_names = [\"ptb.train.txt\", \"ptb.valid.txt\", \"ptb.test.txt\"]\n urls = [\n 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.train.txt',\n 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.valid.txt',\n 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.test.txt'\n ]\n # Read text\n texts = []\n for file_name, url in zip(file_names, urls):\n text = []\n file_path = os.path.join(folder_path, file_name)\n if not os.path.exists(file_path):\n wget.download(url, out=folder_path)\n with open(file_path, 'r') as f:\n for line in f:\n text.extend(line.split() + ['<eos>'])\n texts.append(text)\n # Build dictionary from training data\n vocab = sorted(set(texts[0]))\n word2idx = {u: i for i, u in enumerate(vocab)}\n #convert word to index and split the sequences and discard the last incomplete sequence\n data = [[word2idx[word] for word in text[:-(len(text) % seq_length)]] for text in texts]\n train_data, eval_data, test_data = [np.array(d).reshape(-1, seq_length) for d in data]\n return train_data, eval_data, test_data\n\n\nclass CreateInputAndTarget(fe.op.numpyop.NumpyOp):\n def forward(self, data, state):\n x = data\n return x[:-1], x[1:]\n\n\nclass SparseCrossEntropy(fe.op.tensorop.TensorOp):\n def forward(self, data, state):\n y_pred, y = data\n loss = tf.keras.losses.sparse_categorical_crossentropy(y, y_pred, from_logits=True)\n return tf.reduce_mean(loss)\n\n\nclass Perplexity(fe.trace.Trace):\n def on_epoch_end(self, data):\n ce = data[\"ce\"]\n data.write_with_log(self.outputs[0], np.exp(ce))\n\n\ndef build_model(vocab_size, embedding_dim, rnn_units, batch_size):\n model = tf.keras.Sequential([\n tf.keras.layers.Embedding(vocab_size, embedding_dim, batch_input_shape=[batch_size, None]),\n tf.keras.layers.LSTM(rnn_units, return_sequences=True, recurrent_initializer='glorot_uniform'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(vocab_size)\n ])\n return model\n\n\ndef get_estimator(init_lr, data_dir, seq_length=20, batch_size=128, vocab_size=10000, epochs=98):\n train_data, _, test_data = get_ptb(folder_path=data_dir, seq_length=seq_length + 1)\n pipeline = fe.Pipeline(train_data=fe.dataset.NumpyDataset(data={\"x\": train_data}),\n eval_data=fe.dataset.NumpyDataset(data={\"x\": test_data}),\n batch_size=batch_size,\n ops=CreateInputAndTarget(inputs=\"x\", outputs=(\"x\", \"y\")),\n drop_last=True)\n # step 2\n model = fe.build(model_fn=lambda: build_model(vocab_size, embedding_dim=300, rnn_units=600, batch_size=batch_size),\n optimizer_fn=lambda: tf.optimizers.SGD(init_lr, momentum=0.9)) #1.0, 0.1, 0.01\n network = fe.Network(ops=[\n ModelOp(model=model, inputs=\"x\", outputs=\"y_pred\"),\n SparseCrossEntropy(inputs=(\"y_pred\", \"y\"), outputs=\"ce\"),\n UpdateOp(model=model, loss_name=\"ce\")\n ])\n # step 3\n traces = [Perplexity(inputs=\"ce\", outputs=\"perplexity\", mode=\"eval\")]\n estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=epochs, traces=traces)\n return estimator\n" ]
[ [ "torch.nn.Dropout", "torch.nn.LSTM", "torch.nn.Embedding", "torch.nn.Linear", "torch.nn.init.xavier_uniform_", "numpy.array", "numpy.exp" ], [ "tensorflow.python.keras.layers.BatchNormalization", "tensorflow.python.keras.layers.Add", "tensorflow.python.keras.layers.Activation", "tensorflow.python.keras.layers.Flatten", "tensorflow.python.keras.layers.Dense", "tensorflow.keras.Model", "tensorflow.python.keras.layers.LeakyReLU", "tensorflow.python.keras.layers.Conv2D", "tensorflow.python.keras.layers.MaxPool2D", "tensorflow.optimizers.Adam", "tensorflow.python.keras.layers.Input", "tensorflow.python.keras.layers.GlobalMaxPool2D" ], [ "tensorflow.keras.layers.Embedding", "tensorflow.reduce_mean", "tensorflow.keras.layers.Dense", "tensorflow.keras.losses.sparse_categorical_crossentropy", "tensorflow.keras.layers.LSTM", "tensorflow.optimizers.SGD", "tensorflow.keras.layers.Dropout", "numpy.array", "numpy.exp" ] ]
longyearxuk/sokrg
[ "001fcf8275eb158765de4e99e0d442b1712aa061" ]
[ "visualize_normal_score_transfers.py" ]
[ "import pandas as pd\nimport matplotlib.pyplot as plt\nfrom numpy.random import randn\nfrom krg_utils import transform_normal_scores\n\nr = randn(10000)\nslip_sc = pd.read_csv('slip_nscore_transform_table.csv')\n\nslip = transform_normal_scores(r, slip_sc)\n\navg_slip = slip_sc['x'].sum() / len(slip_sc['x'])\navg_score = slip_sc['nscore'].sum() / len(slip_sc['nscore'])\nprint(avg_slip, slip.mean())\n\n# visualize\nfig, ax = plt.subplots()\nax.plot(slip_sc['x'], slip_sc['nscore'])\nax.axvline(x=avg_slip, linestyle='--', color='black')\nplt.show()" ]
[ [ "pandas.read_csv", "numpy.random.randn", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show" ] ]
rajayalamanchili/image_caption_generator
[ "0d1b8a3262b3dcf9329c4685d9f4026bdf7274db" ]
[ "src/visualization/visualize.py" ]
[ "from src.data.datasets import FlickrDataset\nfrom src.config import config\n\nimport matplotlib.pyplot as plt\nimport torch\nfrom PIL import Image\n\ndef display_img_FlickrDataset(dataset, index=0, predicted_caption=None):\n \n image = Image.open(dataset.images_directory / dataset.image_ids[index])\n caption_txt = \"\\n\".join(dataset.img_caption_dict[dataset.image_ids[index]])\n \n fig = plt.figure(figsize=(30, 12))\n ax = fig.add_subplot(1, 2, 1)\n ax.imshow(image)\n ax.axis(\"off\")\n ax = fig.add_subplot(1, 2, 2)\n ax.text(0,0.1,\"Actual:\", fontsize=15, verticalalignment=\"top\", weight=\"bold\")\n ax.text(0,0.15,caption_txt, fontsize=15, verticalalignment=\"top\", weight=\"bold\")\n ax.text(0,0.4,\"Predicted:\", fontsize=15, verticalalignment=\"top\", weight=\"bold\")\n ax.text(0,0.45,caption_txt, fontsize=15, verticalalignment=\"top\", weight=\"bold\")\n ax.axis(\"off\")\n ax.invert_yaxis()\n \nif __name__ == \"__main__\":\n\n training_dataset = FlickrDataset(file_name=config.CAPTIONS_TRAIN_FILE, dtype=\"train\")\n \n display_img_FlickrDataset(training_dataset, 100)\n " ]
[ [ "matplotlib.pyplot.figure" ] ]
fhmjones/ocgy-dv-fjversion
[ "176a47d28daabc93821f37decb38fff320491885" ]
[ "dashdir/parse-csv.py" ]
[ "# any work with the data file\n# make a nicer csv to pull from\nimport pandas as pd\nimport gsw\n\n\n# all of the parameters from the full data: 'Longitude [degrees_east]', 'Latitude [degrees_north]',\n# 'PRESSURE [dbar]', 'DEPTH [m]', 'CTDTMP [deg C]', 'CTDSAL', 'SALINITY_D_CONC_BOTTLE', 'SALINITY_D_CONC_PUMP',\n# 'SALINITY_D_CONC_FISH', 'SALINITY_D_CONC_UWAY', 'NITRATE_D_CONC_BOTTLE [umol/kg]', 'NITRATE_D_CONC_PUMP [umol/kg]',\n# 'NITRATE_D_CONC_FISH [umol/kg]', 'NITRATE_D_CONC_UWAY [umol/kg]', 'NITRATE_LL_D_CONC_BOTTLE [umol/kg]',\n# 'NITRATE_LL_D_CONC_FISH [umol/kg]', 'NO2+NO3_D_CONC_BOTTLE [umol/kg]', 'NO2+NO3_D_CONC_FISH [umol/kg]',\n# 'Fe_D_CONC_BOTTLE [nmol/kg]', 'Fe_D_CONC_FISH [nmol/kg]', 'Fe_II_D_CONC_BOTTLE [nmol/kg]', 'Fe_II_D_CONC_FISH [nmol/kg]',\n# 'Fe_S_CONC_BOTTLE [nmol/kg]', 'Fe_S_CONC_FISH [nmol/kg]'\n\ndef average_data(cruise_data):\n # from https://stackoverflow.com/questions/48830324/pandas-average-columns-with-same-value-in-other-columns\n cruise_data = cruise_data.groupby(['Latitude', 'Longitude', 'Station', 'Depth'], as_index=False).mean()\n return cruise_data\n\n\n# removes specifically empty iron data.\ndef remove_empty_data(cruise_data):\n grouped_data = cruise_data.groupby(['Latitude', 'Longitude', 'Station'])\n for name, group in grouped_data:\n if (group['Iron'].isna().values.all()):\n cruise_data = cruise_data.drop(grouped_data.get_group(name).index)\n return cruise_data\n\n\n# add data for [Nitrate] : [Fe] or [Nitrate]/[Fe]\ndef get_nitrate(cruise_data, index, row):\n current_depth = row['Depth']\n min = None\n max = None\n if row['Depth'] <= 100:\n min, max = current_depth - 5, current_depth + 5\n elif row['Depth'] > 100:\n min, max = current_depth - 10, current_depth + 10\n\n avg_nitrate = cruise_data['Nitrate'][((cruise_data.Depth <= max) & (cruise_data.Depth >= min))].mean()\n return avg_nitrate\n\n\ndef add_ratio_data(cruise_data):\n ratio = []\n\n for index, row in cruise_data.iterrows():\n if row['Iron'] is None:\n ratio.append(None)\n else:\n nitrate = get_nitrate(cruise_data, index, row)\n ratio.append(nitrate / row['Iron'])\n\n cruise_data['Ratio'] = ratio\n\n''' \ndef add_ratio_data(cruise_data):\n nit = cruise_data['Nitrate']\n iron = cruise_data['Iron']\n ratio = nit / iron\n\n cruise_data['Ratio'] = ratio\n'''\n\ndef add_density_data(cruise_data):\n #from: http://www.teos-10.org/pubs/gsw/html/gsw_sigma0.html\n practical_salinity = cruise_data['Salinity']\n pressure = cruise_data['Pressure']\n longitude = cruise_data['Longitude']\n latitude = cruise_data['Latitude']\n absolute_salinity = gsw.SA_from_SP(practical_salinity, pressure, longitude, latitude)\n temperature = cruise_data['Temperature']\n sigma0 = gsw.sigma0(absolute_salinity, temperature)\n\n cruise_data['Density'] = sigma0\n\n'''\ndef add_test_density_data(cruise_data):\n practical_salinity = cruise_data['Salinity']\n temperature = cruise_data['Temperature']\n sigma0 = gsw.sigma0(practical_salinity, temperature)\n\n cruise_data['Density'] = sigma0\n'''\n\n# read in original data\nGA03_data = pd.read_csv(\"./data/GA03w.csv\")\nGIPY05_data = pd.read_csv(\"./data/GIPY05e.csv\")\nGP02_data = pd.read_csv(\"./data/GP02w.csv\")\nGIPY04_data = pd.read_csv(\"./data/GIPY04.csv\")\n#TEST_data = pd.read_csv(\"./data/TestCruise.csv\")\n\nheaders = ['Station', 'Latitude', 'Longitude', 'Depth', 'Temperature', 'Salinity', 'Nitrate', 'Iron', 'Pressure']\n\n'''\n#make TEST dataframe\ndata = [TEST_data['Station'], TEST_data['Latitude [degrees_north]'], TEST_data['Longitude [degrees_east]'],\n TEST_data['DEPTH [m]'], TEST_data['CTDTMP [deg C]'], TEST_data['CTDSAL'], TEST_data['NO2+NO3_D_CONC_BOTTLE [umol/kg]'],\n TEST_data['Fe_D_CONC_BOTTLE [nmol/kg]'], TEST_data['PRESSURE [dbar]']]\nTEST = pd.concat(data, axis=1, keys=headers)\nadd_density_data(TEST)\nTEST.to_csv('TEST_filtered.csv', index=False)\n'''\n\n\n# make GA03 dataframe and csv\ndata = [GA03_data['Station'], GA03_data['Latitude [degrees_north]'], GA03_data['Longitude [degrees_east]'],\n GA03_data['DEPTH [m]'], GA03_data['CTDTMP [deg C]'], GA03_data['CTDSAL'], GA03_data['NITRATE_D_CONC_BOTTLE [umol/kg]'],\n GA03_data['Fe_D_CONC_BOTTLE [nmol/kg]'], GA03_data['PRESSURE [dbar]']]\nGA03 = pd.concat(data, axis=1, keys=headers)\n# remove unwanted lons and lats\nGA03 = GA03[((GA03.Longitude <= 360 - 60) & (GA03.Longitude >= 360 - 65)) | (GA03.Longitude >= 360 - 25)]\n#GA03 = average_data(GA03)\nadd_ratio_data(GA03)\nadd_density_data(GA03)\nGA03 = remove_empty_data(GA03)\nGA03 = GA03[(GA03.Depth <= 500)]\n\nstations = []\npositions = []\nfor i in range(len(GA03)):\n station = GA03['Station'].values[i]\n lat = GA03['Latitude'].values[i]\n lon = GA03['Longitude'].values[i]\n if len(positions) == 0 or [lat, lon] != positions[-1]:\n positions.append([lat, lon])\n stations.append(station)\n# print(stations)\nfor i in [4]: # choosing specific profiles\n GA03 = GA03.drop(GA03[(GA03.Latitude == positions[i][0]) & (GA03.Longitude == positions[i][1])].index)\nGA03.to_csv('./data/GA03_filtered.csv', index=False)\n\n# make GIPY05 dataframe and csv\ndata = [GIPY05_data['Station'], GIPY05_data['Latitude [degrees_north]'], GIPY05_data['Longitude [degrees_east]'],\n GIPY05_data['DEPTH [m]'],\n GIPY05_data['CTDTMP [deg C]'], GIPY05_data['CTDSAL'], GIPY05_data['NO2+NO3_D_CONC_BOTTLE [umol/kg]'],\n GIPY05_data['Fe_D_CONC_BOTTLE [nmol/kg]'], GIPY05_data['PRESSURE [dbar]']]\nGIPY05 = pd.concat(data, axis=1, keys=headers)\n# remove unwanted lons and lats\nGIPY05 = GIPY05[(GIPY05.Latitude >= -45) | (GIPY05.Latitude <= -65)]\n#GIPY05 = average_data(GIPY05)\nadd_ratio_data(GIPY05)\nadd_density_data(GIPY05)\nGIPY05 = remove_empty_data(GIPY05)\nGIPY05 = GIPY05[(GIPY05.Depth <= 500)]\n\npositions = []\nstations = []\nfor i in range(len(GIPY05)):\n station = GIPY05['Station'].values[i]\n lat = GIPY05['Latitude'].values[i]\n lon = GIPY05['Longitude'].values[i]\n if len(positions) == 0 or [lat, lon] != positions[-1]:\n positions.append([lat, lon])\n stations.append(station)\n# print(stations)\n# for i in []: #choosing specific profiles\n# GIPY05 = GIPY05.drop(GIPY05[(GIPY05.Latitude == positions[i][0]) & (GIPY05.Longitude == positions[i][1])].index)\nGIPY05.to_csv('./data/GIPY05_filtered.csv', index=False)\n\n# make GP02 dataframe and csv\ndata = [GP02_data['Station'], GP02_data['Latitude [degrees_north]'], GP02_data['Longitude [degrees_east]'],\n GP02_data['DEPTH [m]'],\n GP02_data['CTDTMP [deg C]'], GP02_data['CTDSAL'], GP02_data['NO2+NO3_D_CONC_BOTTLE [umol/kg]'],\n GP02_data['Fe_D_CONC_BOTTLE [nmol/kg]'], GP02_data['PRESSURE [dbar]']]\nGP02 = pd.concat(data, axis=1, keys=headers)\n# remove unwanted lons and lats\nGP02 = GP02[(GP02.Longitude <= 155) | (GP02.Longitude >= 180)]\n# GP02 = average_data(GP02)\nadd_ratio_data(GP02)\nadd_density_data(GP02)\nGP02 = remove_empty_data(GP02)\nGP02 = GP02[(GP02.Depth <= 500)]\n\npositions = []\nstations = []\nfor i in range(len(GP02)):\n station = GP02['Station'].values[i]\n lat = GP02['Latitude'].values[i]\n lon = GP02['Longitude'].values[i]\n if len(positions) == 0 or [lat, lon] != positions[-1]:\n positions.append([lat, lon])\n stations.append(station)\n# print(stations)\n# for i in []: #choosing specific profiles\n# GP02 = GP02.drop(GP02[(GP02.Latitude == positions[i][0]) & (GP02.Longitude == positions[i][1])].index)\nGP02.to_csv('./data/GP02_filtered.csv', index=False)\n\n# make GIPY04 dataframe and csv\ndata = [GIPY04_data['Station'], GIPY04_data['Latitude [degrees_north]'], GIPY04_data['Longitude [degrees_east]'],\n GIPY04_data['DEPTH [m]'],\n GIPY04_data['CTDTMP [deg C]'], GIPY04_data['CTDSAL'], GIPY04_data['NITRATE_D_CONC_BOTTLE [umol/kg]'],\n GIPY04_data['Fe_D_CONC_BOTTLE [nmol/kg]'], GIPY04_data['PRESSURE [dbar]']]\nGIPY04 = pd.concat(data, axis=1, keys=headers)\n# remove unwanted lons and lats\nGIPY04 = GIPY04[(GIPY04.Latitude >= -45)]\n# GIPY04 = average_data(GIPY04)\nadd_ratio_data(GIPY04)\nadd_density_data(GIPY04)\nGIPY04 = remove_empty_data(GIPY04)\nGIPY04 = GIPY04[(GIPY04.Depth <= 500)]\n\npositions = []\nstations = []\nfor i in range(len(GIPY04)):\n station = GIPY04['Station'].values[i]\n lat = GIPY04['Latitude'].values[i]\n lon = GIPY04['Longitude'].values[i]\n if len(positions) == 0 or [lat, lon] != positions[-1]:\n positions.append([lat, lon])\n stations.append(station)\n# print(stations)\nfor i in [0, 2, 4]: # choosing specific profiles\n GIPY04 = GIPY04.drop(GIPY04[(GIPY04.Latitude == positions[i][0]) & (GIPY04.Longitude == positions[i][1])].index)\nGIPY04.to_csv('./data/GIPY04_filtered.csv', index=False)\n" ]
[ [ "pandas.concat", "pandas.read_csv" ] ]
agtoever/twixtbot-ui
[ "366d7bef33fdbaa260ea8b3330fa9ab29ad05f03" ]
[ "src/plot.py" ]
[ "\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport constants as ct\n\n\nclass ThreeBarPlot():\n\n def __init__(self, canvas, bar_color):\n self.bar_color = bar_color\n self.prepare(canvas)\n\n def update(self, values=None, xmax=None):\n\n # clear the subplot\n ax1 = self.sub_plot\n ax1.clear()\n ax1.invert_yaxis()\n\n if values is not None:\n ind = np.arange(3)\n labels = [str(m).upper() for m in values[\"moves\"]]\n if \"Y\" in values:\n ax1.set_xlim(xmin=0, xmax=xmax)\n ax1.barh(ind, values[\"Y\"],\n color=self.bar_color, tick_label=labels)\n offset = xmax * 0.02\n for i, v in enumerate(values[\"Y\"]):\n ax1.text(v + offset, i + 0.27, str(v),\n color=ct.PLOT_LABEL_COLOR,\n fontfamily=ct.PLOT_LABEL_FONT[0],\n fontsize=ct.PLOT_LABEL_FONT[1])\n\n plt.subplots_adjust(left=None, bottom=None,\n right=None, top=None, wspace=0, hspace=0)\n self.agg.draw()\n\n def prepare(self, canvas):\n\n fig, ax1 = plt.subplots(figsize=(2.4, 0.7))\n\n ax1.tick_params(axis='x', which='major', labelcolor=\"black\", top=False,\n labeltop=False, labelbottom=False, bottom=False)\n ax1.tick_params(axis='y', which='major', labelcolor=\"black\",\n labelleft=True, labelsize=8, pad=.8)\n\n ax1.spines['bottom'].set_visible(False)\n ax1.spines['top'].set_visible(False)\n ax1.spines['right'].set_visible(False)\n\n agg = FigureCanvasTkAgg(fig, canvas)\n agg.get_tk_widget().pack()\n\n self.sub_plot = ax1\n self.agg = agg\n\n\nclass EvalHistPlot():\n def __init__(self, canvas, stgs):\n self.sub_plot = None\n self.agg = None\n self.prepare(canvas)\n self.stgs = stgs\n\n def sc_to_color(self, sc):\n if sc > 0:\n return self.stgs.get(ct.K_COLOR[1])\n return self.stgs.get(ct.K_COLOR[2])\n\n def update(self, values=None):\n # clear the subplot\n ax1 = self.sub_plot\n ax1.clear()\n\n if values is not None:\n ax1.bar(values.keys(), values.values(),\n color=list(map(self.sc_to_color, values.values())))\n\n xmax = max(10, len(values))\n plt.xlim(-1, xmax)\n plt.xticks(np.arange(0, xmax, xmax // 6))\n plt.ylim([-1, 1])\n\n plt.subplots_adjust(left=None, bottom=0.3,\n right=None, top=0.9, wspace=0, hspace=0)\n self.agg.draw()\n\n def prepare(self, canvas):\n fig, ax1 = plt.subplots(figsize=(2.4, 0.7))\n\n ax1.tick_params(axis='x', which='major', labelcolor=\"black\",\n labelsize=8, pad=.8, top=False, bottom=False)\n ax1.tick_params(axis='y', which='major', labelcolor=\"black\",\n labelsize=8, pad=.8)\n ax1.autoscale(True, axis='x', tight=True)\n\n ax1.spines['bottom'].set_visible(False)\n ax1.spines['top'].set_visible(False)\n ax1.spines['right'].set_visible(False)\n\n agg = FigureCanvasTkAgg(fig, canvas)\n agg.get_tk_widget().pack()\n\n self.sub_plot = ax1\n self.agg = agg\n" ]
[ [ "numpy.arange", "matplotlib.pyplot.ylim", "matplotlib.pyplot.subplots", "matplotlib.pyplot.xlim", "matplotlib.pyplot.subplots_adjust", "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg" ] ]
hanouticelina/reinforcement-learning
[ "c7c6765486ea9546bbd8ce75e6032a408a1410cf", "c7c6765486ea9546bbd8ce75e6032a408a1410cf", "c7c6765486ea9546bbd8ce75e6032a408a1410cf" ]
[ "TME 8. DDPG/utils.py", "TME 1. Bandits/bandits.py", "TME 11. MADDPG/utils.py" ]
[ "import time\nimport subprocess\nfrom collections import namedtuple,defaultdict\nimport logging\nimport json\nimport os\nimport yaml\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport sys\nimport threading\nimport numpy as np\nimport gym\nfrom collections import deque\nimport random\nimport torch.autograd\nfrom torch.autograd import Variable\n\nclass OUNoise(object):\n def __init__(self, action_space, mu=0.0, theta=0.15, max_sigma=0.3, min_sigma=0.3, decay_period=100000):\n self.mu = mu\n self.theta = theta\n self.sigma = max_sigma\n self.max_sigma = max_sigma\n self.min_sigma = min_sigma\n self.decay_period = decay_period\n self.action_dim = action_space.shape[0]\n self.low = action_space.low\n self.high = action_space.high\n self.reset()\n \n def reset(self):\n self.state = np.ones(self.action_dim) * self.mu\n \n def evolve_state(self):\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(self.action_dim)\n self.state = x + dx\n return self.state\n \n def get_action(self, action, t=0):\n ou_state = self.evolve_state()\n self.sigma = self.max_sigma - (self.max_sigma - self.min_sigma) * min(1.0, t / self.decay_period)\n return np.clip(action + ou_state, self.low, self.high)\n\n\n\ndef loadTensorBoard(outdir):\n t = threading.Thread(target=launchTensorBoard, args=([outdir]))\n t.start()\n\ndef launchTensorBoard(tensorBoardPath):\n print('tensorboard --logdir=' + tensorBoardPath)\n ret=os.system('tensorboard --logdir=' + tensorBoardPath)\n if ret!=0:\n syspath = os.path.dirname(sys.executable)\n print(os.path.dirname(sys.executable))\n ret = os.system(syspath+\"/\"+'tensorboard --logdir=' + tensorBoardPath)\n return\n\nclass Orn_Uhlen:\n def __init__(self, n_actions, mu=0, theta=0.15, sigma=0.2):\n self.n_actions = n_actions\n self.X = np.ones(n_actions) * mu\n self.mu = mu\n self.sigma = sigma\n self.theta = theta\n\n def reset(self):\n self.X = np.ones(self.n_actions) * self.mu\n\n def sample(self):\n dX = self.theta * (self.mu - self.X)\n dX += self.sigma * np.random.randn(self.n_actions)\n self.X += dX\n return torch.FloatTensor(self.X)\n\nclass FeatureExtractor(object):\n def __init__(self):\n super().__init__()\n\n def getFeatures(self,obs):\n pass\n\nclass NothingToDo(FeatureExtractor):\n def __init__(self,env):\n super().__init__()\n ob=env.reset()\n self.outSize=len(ob)\n\n def getFeatures(self,obs):\n return obs\n\n###### Pour Gridworld #############################\"\n\nclass MapFromDumpExtractor(FeatureExtractor):\n def __init__(self,env):\n super().__init__()\n outSize = env.start_grid_map.reshape(1, -1).shape[1]\n self.outSize=outSize\n\n def getFeatures(self, obs):\n #prs(obs)\n return obs.reshape(1,-1)\n\nclass MapFromDumpExtractor2(FeatureExtractor):\n def __init__(self,env):\n super().__init__()\n outSize=env.start_grid_map.reshape(1, -1).shape[1]\n self.outSize=outSize*3\n\n def getFeatures(self, obs):\n state=np.zeros((3,np.shape(obs)[0],np.shape(obs)[1]))\n state[0]=np.where(obs == 2,1,state[0])\n state[1] = np.where(obs == 4, 1, state[1])\n state[2] = np.where(obs == 6, 1, state[2])\n return state.reshape(1,-1)\n\n\n\n\nclass DistsFromStates(FeatureExtractor):\n def __init__(self,env):\n super().__init__()\n self.outSize=16\n\n def getFeatures(self, obs):\n #prs(obs)\n #x=np.loads(obs)\n x=obs\n #print(x)\n astate = list(map(\n lambda x: x[0] if len(x) > 0 else None,\n np.where(x == 2)\n ))\n astate=np.array(astate)\n a3=np.where(x == 3)\n d3=np.array([0])\n if len(a3[0])>0:\n astate3 = np.concatenate(a3).reshape(2,-1).T\n d3=np.power(astate-astate3,2).sum(1).min().reshape(1)\n\n #d3 = np.array(d3).reshape(1)\n a4 = np.where(x == 4)\n d4 = np.array([0])\n if len(a4[0]) > 0:\n astate4 = np.concatenate(a4).reshape(2,-1).T\n d4 = np.power(astate - astate4, 2).sum(1).min().reshape(1)\n #d4 = np.array(d4)\n a5 = np.where(x == 5)\n d5 = np.array([0])\n #prs(a5)\n if len(a5[0]) > 0:\n astate5 = np.concatenate(a5).reshape(2,-1).T\n d5 = np.power(astate - astate5, 2).sum(1).min().reshape(1)\n #d5 = np.array(d5)\n a6 = np.where(x == 6)\n d6 = np.array([0])\n if len(a6[0]) > 0:\n astate6 = np.concatenate(a6).reshape(2,-1).T\n d6 = np.power(astate - astate6, 2).sum(1).min().reshape(1)\n #d6=np.array(d6)\n\n #prs(\"::\",d3,d4,d5,d6)\n ret=np.concatenate((d3,d4,d5,d6)).reshape(1,-1)\n ret=np.dot(ret.T,ret)\n return ret.reshape(1,-1)\n\n#######################################################################################\n\n\n\n\n\n\n# class Qfunction(nn.Module):\n# def __init__(self):\n# super(Qfunction,self).__init__()\n#\n# def setcuda(self, device):\n#\n# #FeatureExtractor.floatTensor = torch.cuda.FloatTensor(1, device=device)\n# #FeatureExtractor.longTensor = torch.cuda.LongTensor(1, device=device)\n# self.cuda(device=device)\n\n\n\n\n\n\nclass convMDP(nn.Module):\n def __init__(self, inSize, outSize, layers=[], convs=None, finalActivation=None, batchNorm=False,init_batchNorm=False,activation=torch.tanh):\n super(convMDP, self).__init__()\n #print(inSize,outSize)\n\n self.inSize=inSize\n self.outSize=outSize\n self.batchNorm=batchNorm\n self.init_batchNorm = init_batchNorm\n self.activation=activation\n\n self.convs=None\n if convs is not None:\n self.convs = nn.ModuleList([])\n for x in convs:\n self.convs.append(nn.Conv2d(x[0], x[1], x[2], stride=x[3]))\n inSize = np.sqrt(inSize / x[0])\n inSize=((inSize-x[2])/x[3])+1\n inSize=inSize*inSize*x[1]\n #print(inSize)\n\n self.layers = nn.ModuleList([])\n self.bn = nn.ModuleList([])\n i=0\n if batchNorm or init_batchNorm:\n self.bn.append(nn.BatchNorm1d(num_features=inSize))\n for x in layers:\n self.layers.append(nn.Linear(inSize, x))\n if batchNorm:\n self.bn.append(nn.BatchNorm1d(num_features=x))\n\n #nn.init.xavier_uniform_(self.layers[i].weight)\n nn.init.normal_(self.layers[i].weight.data, 0.0, 0.02)\n nn.init.normal_(self.layers[i].bias.data,0.0,0.02)\n i+=1\n inSize = x\n self.layers.append(nn.Linear(inSize, outSize))\n\n #nn.init.uniform_(self.layers[-1].weight)\n nn.init.normal_(self.layers[-1].weight.data, 0.0, 0.02)\n nn.init.normal_(self.layers[-1].bias.data, 0.0, 0.02)\n self.finalActivation=finalActivation\n\n def setcuda(self, device):\n self.cuda(device=device)\n\n\n\n def forward(self, x):\n #print(\"d\", x.size(),self.inSize)\n x=x.view(-1,self.inSize)\n\n if self.convs is not None:\n\n n=x.size()[0]\n i=0\n for c in self.convs:\n if i==0:\n w=np.sqrt(x.size()[1])\n x=x.view(n,c.in_channels,w,w)\n x=c(x)\n x=self.activation(x)\n i+=1\n x=x.view(n,-1)\n\n #print(x.size())\n if self.batchNorm or self.init_batchNorm:\n x=self.bn[0](x)\n x = self.layers[0](x)\n\n\n for i in range(1, len(self.layers)):\n x = self.activation(x)\n #if self.drop is not None:\n # x = nn.drop(x)\n if self.batchNorm:\n x = self.bn[i](x)\n x = self.layers[i](x)\n\n\n if self.finalActivation is not None:\n x=self.finalActivation(x)\n #print(\"f\",x.size())\n return x\n\nclass NN(nn.Module):\n def __init__(self, inSize, outSize, layers=[]):\n super(NN, self).__init__()\n self.layers = nn.ModuleList([])\n for x in layers:\n self.layers.append(nn.Linear(inSize, x))\n inSize = x\n self.layers.append(nn.Linear(inSize, outSize))\n\n def setcuda(self, device):\n self.cuda(device=device)\n\n def forward(self, x):\n x = self.layers[0](x)\n for i in range(1, len(self.layers)):\n x = torch.tanh(x)\n x = self.layers[i](x)\n\n return x\n\n\nclass Critic(nn.Module):\n def __init__(self, input_size, hidden_size, output_size):\n super(Critic, self).__init__()\n self.linear1 = nn.Linear(input_size, hidden_size)\n self.linear2 = nn.Linear(hidden_size, hidden_size)\n self.linear3 = nn.Linear(hidden_size, output_size)\n\n def forward(self, state, action):\n \"\"\"\n Params state and actions are torch tensors\n \"\"\"\n x = torch.cat([state, action], 1)\n x = F.relu(self.linear1(x))\n x = F.relu(self.linear2(x))\n x = self.linear3(x)\n\n return x\n\nclass Actor(nn.Module):\n def __init__(self, input_size, hidden_size, output_size, learning_rate = 3e-4):\n super(Actor, self).__init__()\n self.linear1 = nn.Linear(input_size, hidden_size)\n self.linear2 = nn.Linear(hidden_size, hidden_size)\n self.linear3 = nn.Linear(hidden_size, output_size)\n \n def forward(self, state):\n \"\"\"\n Param state is a torch tensor\n \"\"\"\n x = F.relu(self.linear1(state))\n x = F.relu(self.linear2(x))\n x = torch.tanh(self.linear3(x))\n\n return x\n\nclass LogMe(dict):\n def __init__(self,writer,term=True):\n self.writer = writer\n self.dic = defaultdict(list)\n self.term = term\n def write(self,i):\n if len(self.dic)==0: return\n s=f\"Epoch {i} : \"\n for k,v in self.dic.items():\n self.writer.add_scalar(k,sum(v)*1./len(v),i)\n s+=f\"{k}:{sum(v)*1./len(v)} -- \"\n self.dic.clear()\n if self.term: logging.info(s)\n def update(self,l):\n for k,v in l:\n self.add(k,v)\n def direct_write(self,k,v,i):\n self.writer.add_scalar(k,v,i)\n def add(self,k,v):\n self.dic[k].append(v)\n\ndef save_src(path):\n current_dir = os.getcwd()\n package_dir = current_dir.split('RL', 1)[0]\n #path = os.path.abspath(path)\n os.chdir(package_dir)\n #print(package_dir)\n src_files = subprocess.Popen(('find', 'RL', '-name', '*.py', '-o', '-name', '*.yaml'),\n stdout=subprocess.PIPE)\n #print(package_dir,path)\n #path=os.path.abspath(path)\n\n\n #print(str(src_files))\n\n subprocess.check_output(('tar', '-zcf', path+\"/arch.tar\", '-T', '-'), stdin=src_files.stdout, stderr=subprocess.STDOUT)\n src_files.wait()\n os.chdir(current_dir)\n\n\n\ndef prs(*args):\n st = \"\"\n for s in args:\n st += str(s)\n print(st)\n\n\nclass DotDict(dict):\n \"\"\"dot.notation access to dictionary attributes (Thomas Robert)\"\"\"\n __getattr__ = dict.get\n __setattr__ = dict.__setitem__\n __delattr__ = dict.__delitem__\n\n\ndef load_yaml(path):\n with open(path, 'r') as stream:\n opt = yaml.load(stream,Loader=yaml.Loader)\n return DotDict(opt)\n\ndef write_yaml(file,dotdict):\n d=dict(dotdict)\n with open(file, 'w', encoding='utf8') as outfile:\n yaml.dump(d, outfile, default_flow_style=False, allow_unicode=True)\n\n\nclass EpsilonGreedyDecay:\n def __init__(self, epsilon, eta, epsilon_min):\n self.eta = eta\n self.epsilon=epsilon\n self.epsilon_min=epsilon_min\n def act(self, episode, q_values):\n decay = self.epsilon / (1 + (self.eta * episode))\n if decay<self.epsilon_min:\n decay=self.epsilon_min\n if np.random.random() > decay:\n _,action = torch.max(q_values,0) # we take the action that maximize the q_value\n return action.item()\n return np.random.randint(len(q_values))", "import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom random import *\nimport math\n\n# Traitement des donnees\n# Comme chaque ligne contient id:context:reward, la classe data permet de bien gérer et stocker les données\nclass Data:\n def __init__(self, FILENAME):\n self.reward=[] # Contient tous les tableaux de gains\n self.context=[] # contient les tableau de context\n with open(FILENAME) as f:\n for line in f.readlines():\n parts = line.split(':')\n parts[1] = parts[1].split(';')\n parts[2] = parts[2].split(';')\n self.context.append(np.array([float(i) for i in parts[1]]))\n self.reward.append(np.array([float(i) for i in parts[2]]))\n self.reward=np.array(self.reward)\n self.context=np.array(self.context)\n \n def __iter__(self):\n # à chaque fois on traite un tuple context,reward\n for i in range(len(self.reward)):\n yield self.context[i], self.reward[i]\n \n \nclass RandomStrategy(): \n def __init__(self):\n pass\n def step(self,context,reward):\n # permet de jouer un coup, retourne l'index du annonceur choisit\n rand_idx = np.random.randint(low = 0, high = len(reward))\n return rand_idx\n \n \n \n# la classe permet de jouer en suivant la stratégie StaticBest à chaque fois\nclass StaticBestStrategy():\n def __init__(self, data):\n mu = np.array(list(data))[:,1] # contient la moyenne de gain général pour chaque annonceur\n self.best = np.argmax(np.sum(mu, axis = 0)) # l'index de l'annonceur qui a la meilleure moyenne géneral\n def step(self,context,reward):\n # permet de jouer un coup, retourne l'index du annonceur choisit\n return self.best\n \nclass OptimaleStrategy :\n def __init__(self):\n pass\n\n def step(self,context,reward): \n # permet de jouer un coup, retourne l'index du annonceur choisit\n return np.argmax(np.array(reward))\n \n \n# La classe abstraite permet de définir le squelette de toutes stratégie\nclass Bandit:\n def __init__(self):\n pass\n def step(self,context,reward): \n # permet de jouer un coup, retourne l'index du annonceur choisit\n action = self.action(context)\n self.update(reward,action)\n return action\n def action(self,context):\n pass\n def update(self,reward,action):\n pass\n\n# La classe abstraite permet de définir le squelette des stratégies qui comptent les annonceurs choisis dans le passé pour choisir l'annonceur\nclass CountBased(Bandit):\n def __init__(self):\n pass\n def update(self,reward,action):\n # permet de jouer un coup (trouver l'action selon la stratégie et mettre à jour les parametres), retourne l'index du annonceur choisit\n self.mu[action]=(self.mu[action]*self.nb[action]+reward[action])/(self.nb[action]+1) # mu = ((mu*s) + gain)/(s+1) avec mu l'ancien moyenne et s le nombre de fois qu'on a utilisé cet annonceur\n self.nb[action]+=1\n \n# la classe permet de jouer en suivant la stratégie UCB à chaque fois\nclass UCB(CountBased):\n def __init__(self,data):\n #initialisation avec les 10 premiers tuples (context,reward) pour chaque tuple i on utilise l'annonceur i \n self.mu = np.stack(np.array(list(data))[:10,1]).diagonal()\n # le nombre de fois ou on a utilisé les annonceurs\n self.nb=np.ones((list(data)[0][1].shape[0]))\n self.mu.flags.writeable = True\n def action(self,context):\n # permet de choisir la bonne action selon la stratégie \n return np.argmax(self.mu+np.power(2*np.log(np.sum(self.nb))/self.nb,1/2))\n \n \n# la classe permet de jouer en suivant la stratégie UCB à chaque fois\nclass E_Greedy(CountBased):\n def __init__(self,data,e=0.1):\n #initialisation avec les 10 premiers tuples (context,reward) pour chaque tuple i on utilise l'annonceur i \n self.mu = np.stack(np.array(list(data))[:10,1]).diagonal()\n # le nombre de fois ou on a utilisé les annonceurs\n self.nb=np.ones((list(data)[0][1].shape[0]))\n # learning rate\n self.e=e\n self.mu.flags.writeable = True\n def action(self,context):\n # permet de choisir la bonne action selon la stratégie \n a = random()\n if(a<self.e):\n return np.random.randint(low = 0, high = len(context))\n return np.argmax(self.mu)\n \n# la classe permet de jouer en suivant la stratégie UCB à chaque fois\nclass LinUCB(Bandit):\n \n def __init__(self,data,alpha=0.2):\n # le nombre d'annonceurs\n self.nb=list(data)[0][1].shape[0]\n # coeff d'éxploration\n self.alpha=alpha\n # la dimention de context\n self.d =list(data)[0][0].shape[0]\n self.A=[np.identity(self.d)]*self.nb\n self.b=np.zeros((self.nb,self.d))\n\n def step(self,context,reward):\n # permet de jouer un coup (trouver l'action selon la stratégie et mettre à jour les parametres), retourne l'index du annonceur choisit\n context=context.reshape((self.d,1))\n action = self.action(context)\n self.update(action,reward,context)\n return action\n \n def action(self,context):\n # permet de choisir la bonne action selon la stratégie \n val=np.zeros(self.nb)\n for i in range(self.nb):\n teta=np.dot(np.linalg.inv(self.A[i]),self.b[i]).reshape((self.d,1))\n p=np.dot(teta.T,context)+self.alpha*np.sqrt(np.dot(np.dot(context.T,np.linalg.inv(self.A[i])),context)) \n val[i]=p \n return np.random.choice(np.where(val == val.max())[0])\n \n def update(self, action,reward,context):\n self.A[action]+=np.dot(context,context.T)\n self.b[action]+=reward[action]*context[:,0]", "import time\nimport subprocess\nfrom collections import namedtuple,defaultdict\nimport logging\nimport json\nimport os\nimport yaml\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport sys\nimport threading\nimport numpy as np\nimport gym\nfrom collections import deque\nimport random\nimport torch.autograd\nfrom torch.autograd import Variable\nimport torch.optim as optim\nimport copy\n\n\n\nclass CriticNetwork(nn.Module):\n\n def __init__(self, state_dim, action_dim):\n\n super(CriticNetwork, self).__init__()\n self.state_fc = nn.Linear(state_dim, 64)\n self.fc1 = nn.Linear(action_dim+64, 128)\n self.fc2 = nn.Linear(128, 64)\n self.fc3 = nn.Linear(64, 1)\n self.reset_parameters()\n\n def reset_parameters(self):\n\n self.state_fc.weight.data.uniform_(*hidden_init(self.state_fc))\n self.fc1.weight.data.uniform_(*hidden_init(self.fc1))\n self.fc2.weight.data.uniform_(*hidden_init(self.fc2))\n self.fc3.weight.data.uniform_(-3e-3, 3e-3)\n\n def forward(self, state, action):\n\n state, action = state.squeeze(), action.squeeze()\n x = F.relu(self.state_fc(state))\n x = torch.cat((x, action), dim=1)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n return self.fc3(x)\n\ndef hidden_init(layer):\n fan_in = layer.weight.data.size()[0]\n lim = 1. / np.sqrt(fan_in)\n return (-lim, lim)\n\nclass ActorNetwork(nn.Module):\n\n def __init__(self, state_dim, action_dim):\n\n\n super(ActorNetwork, self).__init__()\n \n self.fc1 = nn.Linear(state_dim, 64)\n \n self.fc2 = nn.Linear(64, 128)\n \n self.fc3 = nn.Linear(128, action_dim)\n self.reset_parameters()\n\n def reset_parameters(self):\n \"\"\"\n Initialize parameters\n \"\"\"\n self.fc1.weight.data.uniform_(*hidden_init(self.fc1))\n self.fc2.weight.data.uniform_(*hidden_init(self.fc2))\n self.fc3.weight.data.uniform_(-3e-3, 3e-3)\n\n def forward(self, x):\n \"\"\"\n Maps a state to actions\n \"\"\"\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n return torch.tanh(self.fc3(x))\n\nclass OUNoise(object):\n def __init__(self, action_space, mu=0.0, theta=0.15, max_sigma=0.3, min_sigma=0.3, decay_period=100000):\n self.mu = mu\n self.theta = theta\n self.sigma = max_sigma\n self.max_sigma = max_sigma\n self.min_sigma = min_sigma\n self.decay_period = decay_period\n self.action_dim = action_space.shape[0]\n self.low = action_space.low\n self.high = action_space.high\n self.reset()\n \n def reset(self):\n self.state = np.ones(self.action_dim) * self.mu\n \n def evolve_state(self):\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(self.action_dim)\n self.state = x + dx\n return self.state\n \n def get_action(self, action, t=0):\n ou_state = self.evolve_state()\n self.sigma = self.max_sigma - (self.max_sigma - self.min_sigma) * min(1.0, t / self.decay_period)\n return np.clip(action + ou_state, self.low, self.high)\n\n\n\ndef loadTensorBoard(outdir):\n t = threading.Thread(target=launchTensorBoard, args=([outdir]))\n t.start()\n\ndef launchTensorBoard(tensorBoardPath):\n print('tensorboard --logdir=' + tensorBoardPath)\n ret=os.system('tensorboard --logdir=' + tensorBoardPath)\n if ret!=0:\n syspath = os.path.dirname(sys.executable)\n print(os.path.dirname(sys.executable))\n ret = os.system(syspath+\"/\"+'tensorboard --logdir=' + tensorBoardPath)\n return\n\nclass Orn_Uhlen:\n def __init__(self, n_actions, mu=0, theta=0.15, sigma=0.2):\n self.n_actions = n_actions\n self.X = np.ones(n_actions) * mu\n self.mu = mu\n self.sigma = sigma\n self.theta = theta\n\n def reset(self):\n self.X = np.ones(self.n_actions) * self.mu\n\n def sample(self):\n dX = self.theta * (self.mu - self.X)\n dX += self.sigma * np.random.randn(self.n_actions)\n self.X += dX\n return torch.FloatTensor(self.X)\n\nclass FeatureExtractor(object):\n def __init__(self):\n super().__init__()\n\n def getFeatures(self,obs):\n pass\n\nclass NothingToDo(FeatureExtractor):\n def __init__(self,env):\n super().__init__()\n ob=env.reset()\n self.outSize=len(ob)\n\n def getFeatures(self,obs):\n return obs\n\n###### Pour Gridworld #############################\"\n\nclass MapFromDumpExtractor(FeatureExtractor):\n def __init__(self,env):\n super().__init__()\n outSize = env.start_grid_map.reshape(1, -1).shape[1]\n self.outSize=outSize\n\n def getFeatures(self, obs):\n #prs(obs)\n return obs.reshape(1,-1)\n\nclass MapFromDumpExtractor2(FeatureExtractor):\n def __init__(self,env):\n super().__init__()\n outSize=env.start_grid_map.reshape(1, -1).shape[1]\n self.outSize=outSize*3\n\n def getFeatures(self, obs):\n state=np.zeros((3,np.shape(obs)[0],np.shape(obs)[1]))\n state[0]=np.where(obs == 2,1,state[0])\n state[1] = np.where(obs == 4, 1, state[1])\n state[2] = np.where(obs == 6, 1, state[2])\n return state.reshape(1,-1)\n\n\n\n\nclass DistsFromStates(FeatureExtractor):\n def __init__(self,env):\n super().__init__()\n self.outSize=16\n\n def getFeatures(self, obs):\n #prs(obs)\n #x=np.loads(obs)\n x=obs\n #print(x)\n astate = list(map(\n lambda x: x[0] if len(x) > 0 else None,\n np.where(x == 2)\n ))\n astate=np.array(astate)\n a3=np.where(x == 3)\n d3=np.array([0])\n if len(a3[0])>0:\n astate3 = np.concatenate(a3).reshape(2,-1).T\n d3=np.power(astate-astate3,2).sum(1).min().reshape(1)\n\n #d3 = np.array(d3).reshape(1)\n a4 = np.where(x == 4)\n d4 = np.array([0])\n if len(a4[0]) > 0:\n astate4 = np.concatenate(a4).reshape(2,-1).T\n d4 = np.power(astate - astate4, 2).sum(1).min().reshape(1)\n #d4 = np.array(d4)\n a5 = np.where(x == 5)\n d5 = np.array([0])\n #prs(a5)\n if len(a5[0]) > 0:\n astate5 = np.concatenate(a5).reshape(2,-1).T\n d5 = np.power(astate - astate5, 2).sum(1).min().reshape(1)\n #d5 = np.array(d5)\n a6 = np.where(x == 6)\n d6 = np.array([0])\n if len(a6[0]) > 0:\n astate6 = np.concatenate(a6).reshape(2,-1).T\n d6 = np.power(astate - astate6, 2).sum(1).min().reshape(1)\n #d6=np.array(d6)\n\n #prs(\"::\",d3,d4,d5,d6)\n ret=np.concatenate((d3,d4,d5,d6)).reshape(1,-1)\n ret=np.dot(ret.T,ret)\n return ret.reshape(1,-1)\n\n\n\n\n\n\n\n\nclass convMDP(nn.Module):\n def __init__(self, inSize, outSize, layers=[], convs=None, finalActivation=None, batchNorm=False,init_batchNorm=False,activation=torch.tanh):\n super(convMDP, self).__init__()\n #print(inSize,outSize)\n\n self.inSize=inSize\n self.outSize=outSize\n self.batchNorm=batchNorm\n self.init_batchNorm = init_batchNorm\n self.activation=activation\n\n self.convs=None\n if convs is not None:\n self.convs = nn.ModuleList([])\n for x in convs:\n self.convs.append(nn.Conv2d(x[0], x[1], x[2], stride=x[3]))\n inSize = np.sqrt(inSize / x[0])\n inSize=((inSize-x[2])/x[3])+1\n inSize=inSize*inSize*x[1]\n #print(inSize)\n\n self.layers = nn.ModuleList([])\n self.bn = nn.ModuleList([])\n i=0\n if batchNorm or init_batchNorm:\n self.bn.append(nn.BatchNorm1d(num_features=inSize))\n for x in layers:\n self.layers.append(nn.Linear(inSize, x))\n if batchNorm:\n self.bn.append(nn.BatchNorm1d(num_features=x))\n\n #nn.init.xavier_uniform_(self.layers[i].weight)\n nn.init.normal_(self.layers[i].weight.data, 0.0, 0.02)\n nn.init.normal_(self.layers[i].bias.data,0.0,0.02)\n i+=1\n inSize = x\n self.layers.append(nn.Linear(inSize, outSize))\n\n #nn.init.uniform_(self.layers[-1].weight)\n nn.init.normal_(self.layers[-1].weight.data, 0.0, 0.02)\n nn.init.normal_(self.layers[-1].bias.data, 0.0, 0.02)\n self.finalActivation=finalActivation\n\n\n\n\n\n def forward(self, x):\n #print(\"d\", x.size(),self.inSize)\n x=x.view(-1,self.inSize)\n\n if self.convs is not None:\n\n n=x.size()[0]\n i=0\n for c in self.convs:\n if i==0:\n w=np.sqrt(x.size()[1])\n x=x.view(n,c.in_channels,w,w)\n x=c(x)\n x=self.activation(x)\n i+=1\n x=x.view(n,-1)\n\n #print(x.size())\n if self.batchNorm or self.init_batchNorm:\n x=self.bn[0](x)\n x = self.layers[0](x)\n\n\n for i in range(1, len(self.layers)):\n x = self.activation(x)\n #if self.drop is not None:\n # x = nn.drop(x)\n if self.batchNorm:\n x = self.bn[i](x)\n x = self.layers[i](x)\n\n\n if self.finalActivation is not None:\n x=self.finalActivation(x)\n #print(\"f\",x.size())\n return x\n\nclass NN(nn.Module):\n def __init__(self, inSize, outSize, layers=[]):\n super(NN, self).__init__()\n self.layers = nn.ModuleList([])\n for x in layers:\n self.layers.append(nn.Linear(inSize, x))\n inSize = x\n self.layers.append(nn.Linear(inSize, outSize))\n\n\n\n def forward(self, x):\n x = self.layers[0](x)\n for i in range(1, len(self.layers)):\n x = torch.tanh(x)\n x = self.layers[i](x)\n\n return x\n\n\nclass Critic(nn.Module):\n def __init__(self, input_size, hidden_size, output_size):\n super(Critic, self).__init__()\n self.linear1 = nn.Linear(input_size, hidden_size)\n self.linear2 = nn.Linear(hidden_size, hidden_size)\n self.linear3 = nn.Linear(hidden_size, output_size)\n\n def forward(self, state, action):\n \"\"\"\n Params state and actions are torch tensors\n \"\"\"\n x = torch.cat([state, action], 1)\n x = F.relu(self.linear1(x))\n x = F.relu(self.linear2(x))\n x = self.linear3(x)\n\n return x\n\nclass Actor(nn.Module):\n def __init__(self, input_size, hidden_size, output_size, learning_rate = 3e-4):\n super(Actor, self).__init__()\n self.linear1 = nn.Linear(input_size, hidden_size)\n self.linear2 = nn.Linear(hidden_size, hidden_size)\n self.linear3 = nn.Linear(hidden_size, output_size)\n \n def forward(self, state):\n \"\"\"\n Param state is a torch tensor\n \"\"\"\n x = F.relu(self.linear1(state))\n x = F.relu(self.linear2(x))\n x = torch.tanh(self.linear3(x))\n\n return x\n\nclass LogMe(dict):\n def __init__(self,writer,term=True):\n self.writer = writer\n self.dic = defaultdict(list)\n self.term = term\n def write(self,i):\n if len(self.dic)==0: return\n s=f\"Epoch {i} : \"\n for k,v in self.dic.items():\n self.writer.add_scalar(k,sum(v)*1./len(v),i)\n s+=f\"{k}:{sum(v)*1./len(v)} -- \"\n self.dic.clear()\n if self.term: logging.info(s)\n def update(self,l):\n for k,v in l:\n self.add(k,v)\n def direct_write(self,k,v,i):\n self.writer.add_scalar(k,v,i)\n def add(self,k,v):\n self.dic[k].append(v)\n\ndef save_src(path):\n current_dir = os.getcwd()\n package_dir = current_dir.split('RL', 1)[0]\n #path = os.path.abspath(path)\n os.chdir(package_dir)\n #print(package_dir)\n src_files = subprocess.Popen(('find', 'RL', '-name', '*.py', '-o', '-name', '*.yaml'),\n stdout=subprocess.PIPE)\n #print(package_dir,path)\n #path=os.path.abspath(path)\n\n\n #print(str(src_files))\n\n subprocess.check_output(('tar', '-zcf', path+\"/arch.tar\", '-T', '-'), stdin=src_files.stdout, stderr=subprocess.STDOUT)\n src_files.wait()\n os.chdir(current_dir)\n\n\ndef draw(scores, path=\"fig.png\", title=\"Performance\", xlabel=\"Episode #\", ylabel=\"Score\"):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n plt.title(title)\n plt.plot(np.arange(len(scores)), scores)\n plt.ylabel(ylabel)\n plt.xlabel(xlabel)\n plt.savefig(path) \n\n\ndef prs(*args):\n st = \"\"\n for s in args:\n st += str(s)\n print(st)\n\n\nclass DotDict(dict):\n \"\"\"dot.notation access to dictionary attributes (Thomas Robert)\"\"\"\n __getattr__ = dict.get\n __setattr__ = dict.__setitem__\n __delattr__ = dict.__delitem__\n\n\ndef load_yaml(path):\n with open(path, 'r') as stream:\n opt = yaml.load(stream,Loader=yaml.Loader)\n return DotDict(opt)\n\ndef write_yaml(file,dotdict):\n d=dict(dotdict)\n with open(file, 'w', encoding='utf8') as outfile:\n yaml.dump(d, outfile, default_flow_style=False, allow_unicode=True)\n\n\nclass EpsilonGreedyDecay:\n def __init__(self, epsilon, eta, epsilon_min):\n self.eta = eta\n self.epsilon=epsilon\n self.epsilon_min=epsilon_min\n def act(self, episode, q_values):\n decay = self.epsilon / (1 + (self.eta * episode))\n if decay<self.epsilon_min:\n decay=self.epsilon_min\n if np.random.random() > decay:\n _,action = torch.max(q_values,0) # we take the action that maximize the q_value\n return action.item()\n return np.random.randint(len(q_values))\n\n\n\n\nclass DDPGAgent:\n \n def __init__(self,\n state_dim,\n action_dim,\n lr_actor = 1e-4,\n lr_critic = 1e-4,\n lr_decay = .95,\n replay_buff_size = 10000,\n gamma = .9,\n batch_size = 128,\n random_seed = 42,\n soft_update_tau = 1e-3\n ):\n\n self.lr_actor = lr_actor\n self.gamma = gamma\n self.lr_critic = lr_critic\n self.lr_decay = lr_decay\n self.tau = soft_update_tau\n self.actor_local = ActorNetwork(state_dim, action_dim)\n self.actor_target = ActorNetwork(state_dim, action_dim)\n self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=self.lr_actor) \n self.critic_local = CriticNetwork(state_dim, action_dim)\n self.critic_target = CriticNetwork(state_dim, action_dim)\n self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=self.lr_critic) \n self.noise = OUNoise(action_dim, random_seed)\n self.memory = ReplayBuffer(action_dim, replay_buff_size, batch_size, random_seed)\n \n \n def update_model(self, state, action, reward, next_state, done):\n self.memory.add(state, action, reward, next_state, done)\n if not self.memory.is_ready():\n return\n \n experiences = self.memory.sample()\n states, actions, rewards, next_states, dones = experiences\n actions_next = self.actor_target(next_states)\n Q_targets_next = self.critic_target(next_states, actions_next)\n Q_targets = rewards + (self.gamma * Q_targets_next * (1 - dones)).detach()\n Q_expected = self.critic_local(states, actions)\n y = Q_targets.mean().item()\n critic_loss = F.smooth_l1_loss(Q_expected, Q_targets)\n self.critic_optimizer.zero_grad()\n critic_loss.backward()\n self.critic_optimizer.step()\n actions_pred = self.actor_local(states)\n actor_loss = -self.critic_local(states, actions_pred).mean()\n self.actor_optimizer.zero_grad()\n actor_loss.backward()\n self.actor_optimizer.step()\n self.soft_update(self.critic_local, self.critic_target, self.tau)\n self.soft_update(self.actor_local, self.actor_target, self.tau) \n \n def act(self, state, noise_t=0.0):\n if random.random()<0.1:\n v=random.random()\n return np.array([v,1-v])\n if len(np.shape(state)) == 1:\n state = state.reshape(1,-1)\n state = torch.from_numpy(state).float()\n self.actor_local.eval()\n with torch.no_grad():\n action = self.actor_local(state).cpu().data.numpy()\n self.actor_local.train()\n action += self.noise.sample() * noise_t\n return np.clip(action, -1, 1).squeeze()\n \n def reset(self):\n self.noise.reset()\n \n def soft_update(self, local_model, target_model, tau):\n\n for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):\n target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)\n \n\nclass OUNoise:\n\n def __init__(self, size, seed, mu=0., theta=0.15, sigma=0.2):\n\n self.mu = mu * np.ones(size)\n self.theta = theta\n self.sigma = sigma\n self.seed = random.seed(seed)\n self.reset()\n\n def reset(self):\n\n self.state = copy.copy(self.mu)\n\n def sample(self):\n\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])\n self.state = x + dx\n return self.state\n\nclass ReplayBuffer:\n\n\n def __init__(self, action_size, buffer_size, batch_size, seed):\n\n self.action_size = action_size\n self.memory = deque(maxlen=buffer_size) # internal memory (deque)\n self.batch_size = batch_size\n self.experience = namedtuple(\"Experience\", field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"])\n self.seed = random.seed(seed)\n \n def add(self, state, action, reward, next_state, done):\n\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)\n \n def sample(self):\n \n experiences = random.sample(self.memory, k=self.batch_size)\n states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float()\n actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).float()\n rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float()\n next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float()\n dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float()\n\n return (states, actions, rewards, next_states, dones)\n \n def is_ready(self):\n return len(self.memory) > self.batch_size\n\n def __len__(self):\n\n return len(self.memory)\n" ]
[ [ "numpy.dot", "torch.max", "numpy.sqrt", "torch.cat", "torch.tanh", "numpy.concatenate", "torch.FloatTensor", "numpy.random.randn", "numpy.where", "numpy.clip", "torch.nn.BatchNorm1d", "numpy.power", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.init.normal_", "numpy.array", "numpy.random.random", "numpy.ones", "numpy.shape" ], [ "numpy.dot", "numpy.linalg.inv", "numpy.argmax", "numpy.identity", "numpy.array", "numpy.zeros", "numpy.sum" ], [ "numpy.dot", "numpy.sqrt", "torch.max", "torch.cat", "torch.tanh", "numpy.concatenate", "torch.FloatTensor", "numpy.random.randn", "torch.nn.functional.smooth_l1_loss", "torch.no_grad", "numpy.where", "numpy.clip", "torch.from_numpy", "torch.nn.BatchNorm1d", "numpy.power", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.init.normal_", "numpy.array", "numpy.random.random", "numpy.ones", "numpy.shape", "numpy.vstack" ] ]
dhrubokarmaker/RoeBot
[ "fbd86c6c2e5930b0ec41be1b6001ad182cb8e49c" ]
[ "main.py" ]
[ "import nltk \nfrom nltk.stem.lancaster import LancasterStemmer\nfrom nltk.tokenize import word_tokenize\nfrom tensorflow.python.ops.gen_array_ops import expand_dims_eager_fallback\n\nstemmer = LancasterStemmer()\n\nimport numpy\nimport tflearn\nimport random\nimport json\nimport tensorflow as tf\nimport pickle\nimport discord\nimport os\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nwith open(\"intents.json\") as file:\n data = json.load(file)\ntry:\n with open(\"data.pickle\",\"rb\") as f:\n words,labels,training,output = pickle.load(f)\nexcept:\n words = []\n labels = []\n docs_x = []\n docs_y = []\n\n for intent in data[\"intents\"]:\n for pattern in intent[\"patterns\"]:\n wrds = nltk.word_tokenize(pattern)\n words.extend(wrds)\n docs_x.append(wrds)\n docs_y.append(intent[\"tag\"])\n\n if intent[\"tag\"] not in labels:\n labels.append(intent[\"tag\"])\n\n words = [stemmer.stem(w.lower()) for w in words if w != \"?\"]\n words = sorted(list(set(words)))\n\n labels = sorted(labels)\n\n training = []\n output = []\n\n out_empty = [0 for _ in range(len(labels))]\n\n\n for x,doc in enumerate(docs_x):\n bag = []\n wrds = [stemmer.stem(w) for w in doc]\n for w in words:\n if w in wrds:\n bag.append(1)\n else:\n bag.append(0)\n \n output_row = out_empty[:]\n\n output_row[labels.index(docs_y[x])] = 1\n\n training.append(bag)\n output.append(output_row)\n\n training = numpy.array(training)\n output = numpy.array(output)\n\n with open(\"data.pickle\",\"wb\") as f:\n pickle.dump((words,labels,training,output),f)\n\n\ntf.compat.v1.reset_default_graph()\nnet = tflearn.input_data(shape=[None,len(training[0])])\nnet = tflearn.fully_connected(net,8)\nnet = tflearn.fully_connected(net,8)\nnet = tflearn.fully_connected(net,len(output[0]),activation=\"softmax\")\nnet = tflearn.regression(net)\nmodel = tflearn.DNN(net)\n\ntry:\n model.load(\"model.tflearn\")\nexcept:\n model.fit(training,output,n_epoch=1000,batch_size=8,show_metric=True)\n model.save(\"model.tflearn\") \n \n \ndef bag_of_words(s,words):\n bag = [0 for _ in range(len(words))]\n s_words = nltk.word_tokenize(s)\n s_words = [stemmer.stem(word.lower()) for word in s_words]\n\n for s_word in s_words:\n for i,w in enumerate(words):\n if w == s_word:\n bag[i] = 1\n\n return numpy.array(bag)\n\ndef chat(message):\n result = model.predict([bag_of_words(message,words)])[0]\n result_index = numpy.argmax(result)\n if result[result_index] > 0.80:\n tag = labels[result_index]\n for intent in data[\"intents\"]:\n if intent[\"tag\"] == tag:\n responses = intent[\"responses\"]\n \n return random.choice(responses)\n else:\n return \"Didn't get that :(\"\n\n\nclient = discord.Client()\n\[email protected]\nasync def on_ready():\n print('We have logged in as {0.user}'.format(client))\n\n\[email protected]\nasync def on_message(message):\n if message.author == client.user:\n return\n if message.content.startswith('$roe'):\n text = message.content.lstrip('$roe')\n await message.channel.send(chat(text))\n \nclient.run(os.getenv('TOKEN'))\n\n\n" ]
[ [ "numpy.array", "tensorflow.compat.v1.reset_default_graph", "numpy.argmax" ] ]
joegle/hrv-biofeedback
[ "08152889798d41bd9246c4550174377bf3eaa8f1" ]
[ "python-heart/examples/record.py" ]
[ "#!/usr/bin/env python2\nfrom __future__ import print_function\nimport heart\nimport datetime\nimport time\nimport sys\nimport numpy as np\nimport argparse\nimport os\nimport stat\n\nclass recorder(heart.Heart_Monitor):\n \"\"\"Command line tool that records the Arduino heart beat data into timestamped file\"\"\"\n\n def __init__(self, args):\n heart.Heart_Monitor.__init__(self,args.source)\n now = datetime.datetime.now()\n \n start_time = now.strftime('%Y-%m-%d-%H:%M:%S')\n stat_mode = os.stat(args.source).st_mode\n if stat.S_ISREG(stat_mode) or args.test:\n print(\"TESTING Not writing data to anywhere\")\n self.datafile = open(\"/dev/null\",\"w\")\n else:\n self.datafile = open(start_time+\".txt\",\"w\")\n print(\"Writing data to '%s'\" % (self.datafile.name))\n \n #self.datafile.write(\"# %s\"%(start_time))\n self.datafile.write(\"# R wave intervals in milliseconds per line\\n\")\n if args.message:\n self.log(\"annotation: \" + args.message)\n\n def fifteen(self):\n # Fifteen minute mark\n print(\"$\",end='')\n \n def log(self, message):\n self.datafile.write(\"# %s\\n\"%(message))\n \n def start(self):\n \"\"\"Begins the infinite loop of detecting heart beats\"\"\"\n sys.stderr.write(\"Starting monitor (Control-C to quit)\\n\")\n self.datafile.write(\"# start time: %s, %s\\n\"%(datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S'),time.time()))\n while True:\n self.listen_for_beat()\n\n def on_beat(self):\n #self.stream.write(chr(self.beat_time%255))\n print(self.beat_time, file=self.datafile)\n char = \".\"\n \n if np.sum(self.RR_intervals)/60000 >= 15:\n char = '$'\n \n print(char, end=\"\")\n sys.stdout.flush()\n\n def session_summary(self):\n print(\"\\n= Session Summary =\")\n \n print(\"File: {0}\".format(self.datafile.name))\n print(\"Beats: {0:>6}\".format(len(self.RR_intervals)))\n print(\"Time: {0:>7} minutes\".format(round(np.sum(self.RR_intervals)/60000,2)))\n print(\"Mean: {0:>7}\".format(round(np.average(self.RR_intervals), 2)))\n print(\"STD: {0:>8}\".format(round(np.std(self.RR_intervals), 2)))\n print(\"BPM: {0:>8}\".format(round(60000/np.average(self.RR_intervals), 2)))\n\n def on_quit(self):\n self.datafile.write(\"# end time: %s, %s\\n\"%(datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S'),time.time()))\n sys.stderr.write(\"Quitting monitor\\n\") \n self.session_summary()\n self.datafile.close()\n\nguide = \"\"\"# Examples\n./record.py sample.txt\n./record.py /dev/ttyUSB0\n./record.py /dev/usbmodem1411\n\"\"\"\n\nparser = argparse.ArgumentParser(description='Record heart beat intervals',\n formatter_class=argparse.RawDescriptionHelpFormatter,\n epilog=guide)\n\nparser.add_argument('-m','--message', help='Log a message')\nparser.add_argument('-t','--test', help=\"Test run\", action='store_true')\nparser.add_argument('source',help=\"Serial device or test data file (/dev/ttyUSB0, /dev/tty.usbmodem1411, sample.txt)\")\n\nargs = parser.parse_args()\n\n\ndef main():\n session = recorder(args)\n session.start()\n\nif __name__ == \"__main__\":\n main()\n\n" ]
[ [ "numpy.std", "numpy.average", "numpy.sum" ] ]
jasonleeinf/nmtlab
[ "122b70cc226d9ce17ad106a3bd3a5318bd3b359f", "122b70cc226d9ce17ad106a3bd3a5318bd3b359f" ]
[ "nmtlab/trainers/hvd_utils.py", "nmtlab/modules/multihead_attention.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport torch\nimport horovod.torch as hvd\n\n\ndef broadcast_optimizer_state(optimizer, root_rank):\n \"\"\"\n This function is copied from the newest horovod version.\n But the newest version has to be compiled with gcc7\n \"\"\"\n if isinstance(optimizer, torch.optim.LBFGS):\n # TODO(travis): L-BFGS cannot be easily supported without serializing\n # the entire state_dict, as its structure is deeply nested and contains\n # None type parameter values\n raise ValueError('cannot broadcast torch.optim.LBFGS state')\n\n state_dict = optimizer.state_dict()\n\n # Newly created optimizers will not have their state initialized, so\n # do that initialization here\n if len(state_dict['state']) == 0:\n for group in optimizer.param_groups:\n for p in group['params']:\n p.grad = torch.autograd.Variable(\n p.data.new(p.size()).zero_())\n optimizer.step()\n state_dict = optimizer.state_dict()\n\n params = []\n callbacks = {}\n occurrences = collections.defaultdict(int)\n\n # Some optimizer parameters may be represented as scalars instead of\n # tensors. In such cases, we need to wrap the scalar in a tensor, then\n # broadcast, then update the appropriate value in the state_dict with the\n # new unwrapped scalar value via a callback.\n def _create_callback(pid, name, t, p):\n def _from_tensor():\n state_dict['state'][pid][name] = t(p.numpy()[0])\n return _from_tensor\n\n # Groups are unordered, but their params will be distinct\n for group in state_dict['param_groups']:\n # The params list here is ordered by the layers in the model\n for pid in group['params']:\n if pid not in state_dict['state']:\n continue\n param_state = state_dict['state'][pid]\n for name, p in param_state.items():\n # Some parameter names may appear more than once, in which\n # case we ensure they have a unique identifier defined by\n # their order\n occurrences[name] += 1\n key = '%s.%d' % (str(name), occurrences[name])\n\n if not torch.is_tensor(p):\n # Wrap the scalar in a FloatTensor, and remember its type\n # so we can cast it back after unwrapping\n t = type(p)\n p = torch.Tensor([p])\n callbacks[key] = _create_callback(pid, name, t, p)\n\n params.append((key, p))\n\n # Synchronized broadcast of all parameters\n hvd.broadcast_parameters(params, root_rank)\n\n # Post-broadcast clenaup for non-tensor parameters\n for key, p in params:\n if key in callbacks:\n callbacks[key]()\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch\nimport torch.nn as nn\n\nfrom nmtlab.modules.kv_attention import KeyValAttention\n\n\nclass MultiHeadAttention(nn.Module):\n \"\"\"The implementation of multi-head attention.\n \n Following the original description in the transformer paper.\n \"\"\"\n\n _RELATIVE_POS_CLIP = 100\n \n def __init__(self, out_size, num_head=8, hidden_size=None, additive=False, dropout_ratio=0, relative_pos=False):\n super(MultiHeadAttention, self).__init__()\n if hidden_size is None:\n hidden_size = out_size\n self._num_head = num_head\n self._hidden_size = hidden_size\n self._out_size = out_size\n self._additive = additive\n if relative_pos:\n self.relative_posmatrix = nn.Embedding(self._RELATIVE_POS_CLIP * 2 + 1, hidden_size)\n else:\n self.relative_posmatrix = None\n self._attention = KeyValAttention(scaling=True, dropout_ratio=dropout_ratio, )\n if additive:\n # Taken from RNMT+ paper\n raise NotImplementedError\n else:\n self.linear_Q = nn.Linear(out_size, hidden_size)\n self.linear_K = nn.Linear(out_size, hidden_size)\n self.linear_V = nn.Linear(out_size, hidden_size)\n self.linear_O = nn.Linear(hidden_size, out_size)\n \n def forward_2d(self, query, keys, values, mask=None):\n \"\"\"Compute attention for 2-dimensional queries (batch x hidden).\n \"\"\"\n query = query.unsqueeze(1) # (B, 1, H)\n context_vectors, weights = self.forward_3d(query, keys, values, mask=mask)\n context_vectors = context_vectors.squeeze(1)\n weights = weights.squeeze(1)\n return context_vectors, weights\n \n def forward_3d(self, query, keys, values, mask=None):\n \"\"\"Compute attention for 3-dimensional input (batch x step x hidden).\n \"\"\"\n B = query.shape[0]\n head_dim = self._hidden_size // self._num_head\n transformed_query = self.linear_Q(query)\n if self.relative_posmatrix is not None:\n TQ = query.shape[1]\n TK = keys.shape[1]\n #pos = torch.arange(TK).repeat(TQ, 1)\n #relpos = pos - torch.arange(TQ)[:, None]\n relpos = torch.arange(TK)[None, :] - torch.arange(TQ)[:, None]\n relpos = torch.clamp(relpos, -self._RELATIVE_POS_CLIP, self._RELATIVE_POS_CLIP)\n relpos += self._RELATIVE_POS_CLIP\n if torch.cuda.is_available():\n relpos = relpos.cuda()\n relpos_embed = self.relative_posmatrix(relpos)\n relpos_logits = (transformed_query.unsqueeze(-2) * relpos_embed.unsqueeze(0)).sum(-1)\n relpos_logits = relpos_logits.unsqueeze(1)\n else:\n relpos_logits = None\n query = transformed_query.view(B, -1, self._num_head, head_dim).transpose(1, 2) # (B, 4, TQ, H)\n keys = self.linear_K(keys).view(keys.shape[0], -1, self._num_head, head_dim).transpose(1, 2)\n values = self.linear_V(values).view(values.shape[0], -1, self._num_head, head_dim).transpose(1, 2)\n if mask is not None and mask.dim() < keys.dim():\n mask = mask.unsqueeze(1)\n context_vectors, weights = self._attention(query, keys, values, mask=mask, additional_logits=relpos_logits) # (B, 4, TQ, H)\n context_vectors = context_vectors.transpose(1, 2).contiguous().view(B, -1, self._num_head * head_dim) # (B, TQ, H)\n context_vectors = self.linear_O(context_vectors)\n return context_vectors, weights\n \n def forward(self, query, keys, values, mask=None):\n \"\"\"Compute the context vector with key value attention.\n \n Returns:\n context vector and attention weights.\n \"\"\"\n if query.dim() == 2:\n return self.forward_2d(query, keys, values, mask)\n elif query.dim() == 3:\n return self.forward_3d(query, keys, values, mask)\n else:\n raise NotImplementedError\n\n" ]
[ [ "torch.is_tensor", "torch.Tensor" ], [ "torch.nn.Embedding", "torch.nn.Linear", "torch.cuda.is_available", "torch.arange", "torch.clamp" ] ]
YuehChuan/nnom
[ "68af27a0631244f2bb78cd4e4f2da916f122991a" ]
[ "examples/keyword_spotting/model/mfcc.py" ]
[ "\n\nfrom python_speech_features import mfcc\nimport scipy.io.wavfile as wav\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport numpy as np\nimport os\nimport random\n\ndef load_noise(path='dat/_background_noise_/'):\n noise = []\n files = os.listdir(path)\n for f in files:\n filename = f\n if ('wav' not in filename):\n continue\n f = os.path.join(path, f)\n (rate, sig) = wav.read(f)\n noise.append(sig)\n return noise\n\ndef generate_mfcc(sig, rate, sig_len, noise=None, noise_weight=0.1, winlen=0.03125, winstep=0.03125/2, numcep=13, nfilt=26, nfft=512, lowfreq=20, highfreq=4000, winfunc=np.hanning, ceplifter=0, preemph=0.97):\n if(len(sig) != sig_len):\n if(len(sig)< sig_len):\n sig = np.pad(sig, (0, sig_len - len(sig)), 'constant')\n if(len(sig) >sig_len):\n sig = sig[0:sig_len]\n # i dont know, 'tensorflow' normalization\n sig = sig.astype('float') / 32768\n\n if(noise is not None):\n noise = noise[random.randint(0, len(noise)-1)] # pick a noise\n start = random.randint(0, len(noise)-sig_len) # pick a sequence\n noise = noise[start:start+sig_len]\n noise = noise.astype('float')/32768\n sig = sig * (1-noise_weight) + noise * noise_weight\n #wav.write('noise_test.wav', rate, sig)\n mfcc_feat = mfcc(sig, rate, winlen=winlen, winstep=winstep, numcep=numcep, nfilt=nfilt, nfft=nfft, lowfreq=lowfreq,\n highfreq=highfreq, winfunc=winfunc, ceplifter=ceplifter, preemph=preemph)\n mfcc_feat = mfcc_feat.astype('float32')\n return mfcc_feat\n\ndef merge_mfcc_file(input_path='dat/', mix_noise=True, sig_len=16000, winlen=0.03125, winstep=0.03125/2, numcep=13, nfilt=26, nfft=512,\n lowfreq=20, highfreq=4000, winfunc=np.hanning, ceplifter=0, preemph=0.97):\n\n train_data = []\n test_data = []\n validate_data = []\n train_lable = []\n test_label = []\n validate_label =[]\n\n if mix_noise:\n noise = load_noise()\n else:\n noise = None\n\n with open(input_path + 'testing_list.txt', 'r') as f:\n test_list = f.read()\n with open(input_path + 'validation_list.txt', 'r') as f:\n validate_list = f.read()\n\n files = os.listdir(input_path)\n for fi in files:\n fi_d = os.path.join(input_path, fi)\n # folders of each cmd\n if os.path.isdir(fi_d):\n label = fi_d.split('/')[1] # get the label from the dir\n print(label)\n # noise in training\n if 'noise' in label:\n for f in os.listdir(fi_d):\n filename = f\n if('wav' not in filename):\n continue\n f = os.path.join(fi_d, f)\n (rate, sig) = wav.read(f)\n for i in range(0, len(sig), sig_len):\n data = generate_mfcc(sig[i:i+sig_len], rate, sig_len, winlen=winlen, winstep=winstep, numcep=numcep,\n nfilt=nfilt, nfft=nfft, lowfreq=lowfreq,\n highfreq=highfreq, winfunc=winfunc, ceplifter=ceplifter, preemph=preemph)\n data = np.array(data) # ?? no idea why this works\n train_data.append(data)\n train_lable.append('noise')\n\n continue\n # dataset\n for f in os.listdir(fi_d):\n filename = f\n f = os.path.join(fi_d, f)\n (rate, sig) = wav.read(f)\n data = generate_mfcc(sig, rate, sig_len, noise=noise, winlen=winlen, winstep=winstep, numcep=numcep, nfilt=nfilt, nfft=nfft, lowfreq=lowfreq,\n highfreq=highfreq, winfunc=winfunc, ceplifter=ceplifter, preemph=preemph)\n data = np.array(data) # ?? no idea why this works\n\n # split dataset into train, test, validate\n if filename in test_list:\n test_data.append(data)\n test_label.append(label)\n elif filename in validate_list:\n validate_data.append(data)\n validate_label.append(label)\n else:\n train_data.append(data)\n train_lable.append(label)\n\n # finalize\n train_data = np.array(train_data)\n test_data = np.array(test_data)\n validate_data = np.array(validate_data)\n\n return (train_data, train_lable), (test_data, test_label), (validate_data, validate_label)\n\n\nif __name__ == \"__main__\":\n\n # test\n (x_train, y_train), (x_test, y_test), (x_val, y_val) = merge_mfcc_file()\n\n np.save('train_data.npy', x_train)\n np.save('train_label.npy', y_train)\n np.save('test_data.npy', x_test)\n np.save('test_label.npy', y_test)\n np.save('val_data.npy', x_val)\n np.save('val_label.npy', y_val)\n\n print('x_train shape:', x_train.shape, 'max', x_train.max(), 'min', x_train.min())\n\n mfcc_feat = x_train[3948]\n mfcc_feat = np.swapaxes(mfcc_feat, 0, 1)\n ig, ax = plt.subplots()\n cax = ax.imshow(mfcc_feat, interpolation='nearest', origin='lower', aspect='auto')\n ax.set_title('MFCC')\n plt.show()\n" ]
[ [ "numpy.swapaxes", "matplotlib.pyplot.subplots", "numpy.save", "numpy.array", "scipy.io.wavfile.read", "matplotlib.pyplot.show" ] ]
brjdenis/qaserver
[ "93a4c3272cf38199e7ef67d1285a9ffacef46883" ]
[ "pyqaserver/picketfence_module.py" ]
[ "import sys\nimport os\nimport tempfile\nfrom multiprocessing import Pool\nimport datetime\nimport numpy as np\nimport matplotlib.style\nimport matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib.figure import Figure\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\n# To revert back to matplotlib 1.0 style\nmatplotlib.style.use('classic')\n\nfrom pylinac.core.profile import SingleProfile as PylinacSingleProfile\n\nparent_module = sys.modules['.'.join(__name__.split('.')[:-1]) or '__main__']\nif __name__ == '__main__' or parent_module.__name__ == '__main__':\n #sys.path.append(os.path.abspath(os.path.realpath(\"python_packages\")))\n import config\n from python_packages.bottlepy.bottle import Bottle, request, TEMPLATE_PATH, template, redirect, response\n import general_functions\n import RestToolbox_modified as RestToolbox\n from python_packages import mpld3\nelse:\n from . import config\n from .python_packages.bottlepy.bottle import Bottle, request, TEMPLATE_PATH, template, redirect, response\n from . import general_functions\n from . import RestToolbox_modified as RestToolbox\n from .python_packages import mpld3\n\nCUR_DIR = os.path.realpath(os.path.dirname(__file__))\n\n# Path to Bottle templates\nTEMPLATE_PATH.insert(0, os.path.join(CUR_DIR, 'views'))\n\n# Url to some mpld3 library\nD3_URL = config.D3_URL\nMPLD3_URL = config.MPLD3_URL\n\nPI = np.pi\n\n# MLC type for PicketFence analysis:\nLEAF_TYPE = [\"Varian_120\", \"Varian_120HD\", \"Varian_80\", \"Elekta_80\", \"Elekta_160\"]\n\n# Here starts the bottle server\npf_app = Bottle()\n\n@pf_app.route('/picket_fence', method=\"POST\")\ndef picket_fence():\n\n displayname = request.forms.hidden_displayname\n username = request.get_cookie(\"account\", secret=config.SECRET_KEY)\n if not username:\n redirect(\"/login\")\n try:\n variables = general_functions.Read_from_dcm_database()\n variables[\"displayname\"] = displayname\n except ConnectionError:\n return template(\"error_template\", {\"error_message\": \"Orthanc is refusing connection.\"})\n variables[\"LEAF_TYPE\"] = LEAF_TYPE\n response.set_cookie(\"account\", username, secret=config.SECRET_KEY, samesite=\"lax\")\n return template(\"picket_fence\", variables)\n\ndef picket_fence_helperf_catch_error(args):\n try:\n return picket_fence_helperf(args)\n except Exception as e:\n return template(\"error_template\", {\"error_message\": str(e)})\n\ndef picket_fence_helperf(args):\n '''This function is used in order to prevent memory problems'''\n temp_folder = args[\"temp_folder\"]\n file_path = args[\"file_path\"]\n clip_box = args[\"clip_box\"]\n py_filter = args[\"py_filter\"]\n num_pickets = args[\"num_pickets\"]\n sag = args[\"sag\"]\n mlc = args[\"mlc\"]\n invert = args[\"invert\"]\n orientation = args[\"orientation\"]\n w = args[\"w\"]\n imgdescription = args[\"imgdescription\"]\n station = args[\"station\"]\n displayname = args[\"displayname\"]\n acquisition_datetime = args[\"acquisition_datetime\"]\n general_functions.set_configuration(args[\"config\"]) # Transfer to this process\n \n # Chose module:\n if mlc in [\"Varian_80\", \"Elekta_80\", \"Elekta_160\"]:\n use_original_pylinac = \"False\"\n else:\n use_original_pylinac = \"True\"\n \n # Collect data for \"save results\"\n dicomenergy = general_functions.get_energy_from_imgdescription(imgdescription)\n user_machine, user_energy = general_functions.get_user_machine_and_energy(station, dicomenergy)\n machines_and_energies = general_functions.get_machines_and_energies(general_functions.get_treatmentunits_picketfence())\n tolerances = general_functions.get_tolerance_user_machine_picketfence(user_machine) # If user_machne has specific tolerance\n if not tolerances:\n action_tolerance, tolerance, generate_pdf_report = general_functions.get_settings_picketfence()\n else:\n action_tolerance, tolerance, generate_pdf_report = tolerances[0]\n\n tolerance = float(tolerance)\n action_tol = float(action_tolerance)\n \n save_results = {\n \"user_machine\": user_machine,\n \"user_energy\": user_energy,\n \"machines_and_energies\": machines_and_energies,\n \"displayname\": displayname\n }\n\n # Import either original pylinac module or the modified module\n if use_original_pylinac == \"True\":\n from pylinac import PicketFence as PicketFence # Original pylinac analysis\n else:\n if __name__ == '__main__' or parent_module.__name__ == '__main__':\n from python_packages.pylinac.picketfence_modified import PicketFence as PicketFence\n else:\n from .python_packages.pylinac.picketfence_modified import PicketFence as PicketFence\n\n try:\n pf = PicketFence(file_path, filter=py_filter)\n except Exception as e:\n return template(\"error_template\", {\"error_message\": \"Module PicketFence cannot calculate. \"+str(e)})\n\n # Here we force pixels to background outside of box:\n if clip_box != 0:\n try:\n pf.image.check_inversion_by_histogram(percentiles=[4, 50, 96]) # Check inversion otherwise this might not work\n general_functions.clip_around_image(pf.image, clip_box)\n except Exception as e:\n return template(\"error_template\", {\"error_message\": \"Unable to apply clipbox. \"+str(e)})\n\n # Now invert if needed\n if invert:\n try:\n pf.image.invert()\n except Exception as e:\n return template(\"error_template\", {\"error_message\": \"Unable to invert the image. \"+str(e)})\n \n # Now analyze\n try:\n if use_original_pylinac == \"True\":\n hdmlc = True if mlc==\"Varian_120HD\" else False\n pf.analyze(tolerance=tolerance, action_tolerance=action_tol, hdmlc=hdmlc, sag_adjustment=float(sag), num_pickets=num_pickets,\n orientation=orientation)\n else:\n pf.analyze(tolerance=tolerance, action_tolerance=action_tol, mlc_type=mlc, sag_adjustment=float(sag), num_pickets=num_pickets,\n orientation=orientation)\n except Exception as e:\n return template(\"error_template\", {\"error_message\": \"Picket fence module cannot analyze. \"+str(e)})\n \n # Added an if clause to tell if num of mlc's are not the same on all pickets:\n\n num_mlcs = len(pf.pickets[0].mlc_meas)\n for p in pf.pickets:\n if len(p.mlc_meas) != num_mlcs:\n return template(\"error_template\", {\"error_message\": \"Not all pickets have the same number of leaves. \"+\n \"Probably your image si too skewed. Rotate your collimator a bit \"+\n \"and try again. Use the jaws perpendicular to MLCs to set the right \"+\n \"collimator angle.\"})\n error_array = np.array([])\n max_error = []\n max_error_leaf = []\n passed_tol = []\n picket_offsets = []\n picket_nr = pf.num_pickets\n for k in pf.pickets.pickets:\n error_array = np.concatenate((error_array, k.error_array))\n max_error.append(k.max_error)\n max_err_leaf_ind = np.argmax(k.error_array)\n\n max_error_leaf.append(max_err_leaf_ind)\n passed_tol.append(\"Passed\" if k.passed else \"Failed\")\n picket_offsets.append(k.dist2cax)\n\n # Plot images\n if pf.settings.orientation == \"Left-Right\":\n fig_pf = Figure(figsize=(9, 10), tight_layout={\"w_pad\":0})\n else:\n fig_pf = Figure(figsize=(9.5, 7), tight_layout={\"w_pad\":0})\n\n img_ax = fig_pf.add_subplot(1,1,1)\n img_ax.imshow(pf.image.array, cmap=matplotlib.cm.gray, interpolation=\"none\", aspect=\"equal\", origin='upper')\n\n # Taken from pylinac: leaf_error_subplot:\n tol_line_height = [pf.settings.tolerance, pf.settings.tolerance]\n tol_line_width = [0, max(pf.image.shape)]\n # make the new axis\n divider = make_axes_locatable(img_ax)\n if pf.settings.orientation == 'Up-Down':\n axtop = divider.append_axes('right', 1.75, pad=0.2, sharey=img_ax)\n else:\n axtop = divider.append_axes('bottom', 1.75, pad=0.5, sharex=img_ax)\n\n # get leaf positions, errors, standard deviation, and leaf numbers\n pos, vals, err, leaf_nums = pf.pickets.error_hist()\n\n # Changed leaf_nums to sequential numbers:\n leaf_nums = list(np.arange(0, len(leaf_nums), 1))\n\n # plot the leaf errors as a bar plot\n if pf.settings.orientation == 'Up-Down':\n axtop.barh(pos, vals, xerr=err, height=pf.pickets[0].sample_width * 2, alpha=0.4, align='center')\n # plot the tolerance line(s)\n axtop.plot(tol_line_height, tol_line_width, 'r-', linewidth=3)\n if pf.settings.action_tolerance is not None:\n tol_line_height_action = [pf.settings.action_tolerance, pf.settings.action_tolerance]\n tol_line_width_action = [0, max(pf.image.shape)]\n axtop.plot(tol_line_height_action, tol_line_width_action, 'y-', linewidth=3)\n\n # reset xlims to comfortably include the max error or tolerance value\n axtop.set_xlim([0, max(max(vals), pf.settings.tolerance) + 0.1])\n else:\n axtop.bar(pos, vals, yerr=err, width=pf.pickets[0].sample_width * 2, alpha=0.4, align='center')\n axtop.plot(tol_line_width, tol_line_height, 'r-', linewidth=3)\n if pf.settings.action_tolerance is not None:\n tol_line_height_action = [pf.settings.action_tolerance, pf.settings.action_tolerance]\n tol_line_width_action = [0, max(pf.image.shape)]\n axtop.plot(tol_line_width_action, tol_line_height_action, 'y-', linewidth=3)\n axtop.set_ylim([0, max(max(vals), pf.settings.tolerance) + 0.1])\n\n # add formatting to axis\n axtop.grid(True)\n axtop.set_title(\"Average Error (mm)\")\n\n # add tooltips if interactive\n # Copied this from previous version of pylinac\n interactive = True\n if interactive:\n if pf.settings.orientation == 'Up-Down':\n labels = [['Leaf pair: {0} <br> Avg Error: {1:3.3f} mm <br> Stdev: {2:3.3f} mm'.format(leaf_num, err, std)]\n for leaf_num, err, std in zip(leaf_nums, vals, err)]\n voffset = 0\n hoffset = 20\n else:\n labels = [['Leaf pair: {0}, Avg Error: {1:3.3f} mm, Stdev: {2:3.3f} mm'.format(leaf_num, err, std)]\n for leaf_num, err, std in zip(leaf_nums, vals, err)]\n\n if pf.settings.orientation == 'Up-Down':\n for num, patch in enumerate(axtop.axes.patches):\n ttip = mpld3.plugins.PointHTMLTooltip(patch, labels[num], voffset=voffset, hoffset=hoffset)\n mpld3.plugins.connect(fig_pf, ttip)\n mpld3.plugins.connect(fig_pf, mpld3.plugins.MousePosition(fontsize=14))\n else:\n for num, patch in enumerate(axtop.axes.patches):\n ttip = mpld3.plugins.PointLabelTooltip(patch, labels[num], location='top left')\n mpld3.plugins.connect(fig_pf, ttip)\n mpld3.plugins.connect(fig_pf, mpld3.plugins.MousePosition(fontsize=14))\n\n for p_num, picket in enumerate(pf.pickets):\n picket.add_guards_to_axes(img_ax.axes)\n for idx, mlc_meas in enumerate(picket.mlc_meas):\n mlc_meas.plot2axes(img_ax.axes, width=1.5)\n\n # plot CAX\n img_ax.plot(pf.settings.image_center.x, pf.settings.image_center.y, 'r+', ms=12, markeredgewidth=3)\n\n # tighten up the plot view\n img_ax.set_xlim([0, pf.image.shape[1]])\n img_ax.set_ylim([pf.image.shape[0], 0])\n img_ax.axis('off')\n img_ax.set_xticks([])\n img_ax.set_yticks([])\n \n # Histogram of all errors and average profile plot\n upper_bound = pf.settings.tolerance\n upper_outliers = np.sum(error_array.flatten()>=upper_bound)\n fig_pf2 = Figure(figsize=(10, 4), tight_layout={\"w_pad\":2})\n ax2 = fig_pf2.add_subplot(1,2,1)\n ax3 = fig_pf2.add_subplot(1,2,2)\n n, bins = np.histogram(error_array.flatten(), density=False, bins=10, range=(0, upper_bound))\n ax2.bar(bins[0:-1], n, width=np.diff(bins)[0], facecolor='green', alpha=0.75)\n ax2.bar([upper_bound,upper_bound*1.1], upper_outliers, width=0.1*upper_bound, facecolor='red', alpha=0.75)\n ax2.plot([pf.settings.action_tolerance,pf.settings.action_tolerance], [0,max(n)/2] , color=\"orange\")\n ax2.annotate(\"Action Tol.\", (pf.settings.action_tolerance, 1.05*max(n)/2), color='black',\n fontsize=6, ha='center', va='bottom')\n ax2.plot([pf.settings.tolerance,pf.settings.tolerance], [0,max(n)/2] , color=\"darkred\")\n ax2.annotate(\"Tol.\", (pf.settings.tolerance, 1.05*max(n)/2), color='black',\n fontsize=6, ha='center', va='bottom')\n\n # Plot mean inplane profile and calculate FWHM:\n mlc_mean_profile = pf.pickets.image_mlc_inplane_mean_profile\n ax3.plot(mlc_mean_profile.values, \"b-\")\n picket_fwhm = []\n fwhm_mean = 0\n try:\n peaks = mlc_mean_profile.find_peaks(max_number=picket_nr, min_distance=0.02, threshold=0.5)\n peaks = np.sort(peaks)\n ax3.plot(peaks, mlc_mean_profile[peaks], \"ro\")\n\n separation = int(np.mean(np.diff(peaks))/3)\n mmpd = 1/pf.image.dpmm\n # Get valleys\n valleys = []\n for p in np.arange(0, len(peaks)-1, 1):\n prof_partial = mlc_mean_profile[peaks[p]: peaks[p+1]]\n valleys.append(peaks[p]+np.argmin(prof_partial))\n edge_points = [peaks[0]-separation] + valleys + [peaks[-1]+separation]\n ax3.plot(edge_points, mlc_mean_profile[edge_points], \"yo\")\n\n for k in np.arange(0, len(edge_points)-1, 1):\n pr = PylinacSingleProfile(mlc_mean_profile[edge_points[k]:edge_points[k+1]])\n left = pr[0]\n right = pr[-1]\n amplitude = mlc_mean_profile[peaks[k]]\n if left < right:\n x = 100*((amplitude-left)*0.5 +left-right)/(amplitude-right)\n a = pr._penumbra_point(x=50, side=\"left\", interpolate=True)\n b = pr._penumbra_point(x=x, side=\"right\", interpolate=True)\n else:\n x = 100*((amplitude-right)*0.5 +right-left)/(amplitude-left)\n a = pr._penumbra_point(x=x, side=\"left\", interpolate=True)\n b = pr._penumbra_point(x=50, side=\"right\", interpolate=True)\n left_point = edge_points[k]+a\n right_point = edge_points[k]+b\n ax3.plot([left_point, right_point], [np.interp(left_point, np.arange(0, len(mlc_mean_profile.values), 1), mlc_mean_profile.values),\n np.interp(right_point, np.arange(0, len(mlc_mean_profile.values), 1), mlc_mean_profile.values)], \"-k\", alpha=0.5)\n picket_fwhm.append(np.abs(a-b)*mmpd)\n \n fwhm_mean = np.mean(picket_fwhm)\n except:\n picket_fwhm = [np.nan]*picket_nr\n fwhm_mean = np.nan\n if len(picket_fwhm) != picket_nr:\n fwhm_mean = np.mean(picket_fwhm)\n picket_fwhm = [np.nan]*picket_nr\n\n ax2.set_xlim([-0.025, pf.settings.tolerance*1.15])\n ax3.set_xlim([0, pf.image.shape[1]])\n ax2.set_title(\"Leaf error\")\n ax3.set_title(\"MLC mean profile\")\n ax2.set_xlabel(\"Error [mm]\")\n ax2.set_ylabel(\"Counts\")\n ax3.set_xlabel(\"Pixel\")\n ax3.set_ylabel(\"Grey value\")\n\n passed = \"Passed\" if pf.passed else \"Failed\"\n\n script = mpld3.fig_to_html(fig_pf, d3_url=D3_URL, mpld3_url=MPLD3_URL)\n script2 = mpld3.fig_to_html(fig_pf2, d3_url=D3_URL, mpld3_url=MPLD3_URL)\n variables = {\n \"script\": script,\n \"script2\": script2,\n \"passed\": passed,\n \"max_error\": max_error,\n \"max_error_leaf\": max_error_leaf,\n \"passed_tol\": passed_tol,\n \"picket_nr\": picket_nr,\n \"tolerance\": pf.settings.tolerance,\n \"perc_passing\": pf.percent_passing,\n \"max_error_all\": pf.max_error,\n \"max_error_picket_all\": pf.max_error_picket,\n \"max_error_leaf_all\": pf.max_error_leaf,\n \"median_error\": pf.abs_median_error,\n \"spacing\": pf.pickets.mean_spacing,\n \"picket_offsets\": picket_offsets,\n \"fwhm_mean\": fwhm_mean,\n \"picket_fwhm\": picket_fwhm,\n \"pdf_report_enable\": generate_pdf_report,\n \"save_results\": save_results,\n \"acquisition_datetime\": acquisition_datetime\n }\n\n # Generate pylinac report:\n if generate_pdf_report == \"True\":\n pdf_file = tempfile.NamedTemporaryFile(delete=False, prefix=\"PicketFence_\", suffix=\".pdf\", dir=config.PDF_REPORT_FOLDER)\n metadata = RestToolbox.GetInstances(config.ORTHANC_URL, [w])\n try:\n patient = metadata[0][\"PatientName\"]\n except:\n patient = \"\"\n try:\n stationname = metadata[0][\"StationName\"]\n except:\n stationname = \"\"\n try:\n date_time = RestToolbox.get_datetime(metadata[0])\n date_var = datetime.datetime.strptime(date_time[0], \"%Y%m%d\").strftime(\"%d/%m/%Y\")\n except:\n date_var = \"\"\n pf.publish_pdf(pdf_file, notes=[\"Date = \"+date_var, \"Patient = \"+patient, \"Station = \"+stationname])\n\n variables[\"pdf_report_filename\"] = os.path.basename(pdf_file.name)\n #gc.collect()\n\n general_functions.delete_files_in_subfolders([temp_folder]) # Delete image\n return template(\"picket_fence_results\", variables)\n\n@pf_app.route('/picket_fence_calculate/<w>', method=\"POST\")\ndef picket_fence_calculate(w):\n # w is the image, m is the mlc type\n \n temp_folder, file_path = RestToolbox.GetSingleDcm(config.ORTHANC_URL, w)\n clip_box = float(request.forms.hidden_clipbox)*10.0\n py_filter = int(request.forms.hidden_filter)\n py_filter = None if py_filter==0 else py_filter\n num_pickets = int(request.forms.hidden_peaks)\n num_pickets = None if num_pickets==0 else num_pickets\n sag = float(request.forms.hidden_sag)\n mlc = request.forms.hidden_mlc\n invert = True if request.forms.hidden_invert==\"true\" else False\n orientation = request.forms.hidden_orientation\n orientation = None if orientation==\"Automatic\" else orientation\n imgdescription = request.forms.hidden_imgdescription\n station = request.forms.hidden_station\n displayname = request.forms.hidden_displayname\n acquisition_datetime = request.forms.hidden_datetime\n\n args = {\"temp_folder\": temp_folder, \"file_path\": file_path, \"clip_box\": clip_box, \"py_filter\":py_filter,\n \"num_pickets\":num_pickets, \"sag\": sag, \"mlc\":mlc, \"invert\":invert, \"orientation\":orientation,\n \"w\":w, \"imgdescription\": imgdescription,\"station\": station, \"displayname\": displayname,\n \"acquisition_datetime\": acquisition_datetime, \"config\": general_functions.get_configuration()}\n p = Pool(1)\n data = p.map(picket_fence_helperf_catch_error, [args])\n p.close()\n p.join()\n return data\n" ]
[ [ "numpy.abs", "matplotlib.figure.Figure", "matplotlib.style.use", "matplotlib.use", "numpy.sort", "numpy.concatenate", "numpy.argmax", "numpy.mean", "numpy.diff", "numpy.argmin", "numpy.array" ] ]
lgeiger/tensorboard
[ "6b012202689ae3c55e27c3690455e47f8d18c54d", "6b012202689ae3c55e27c3690455e47f8d18c54d" ]
[ "tensorboard/loader.py", "tensorboard/plugins/scalar/summary_test.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"TensorBoard data ingestion module.\n\nWARNING: This module is currently EXPERIMENTAL.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport contextlib\nimport functools\nimport inspect\nimport locale\nimport logging\nimport os\nimport re\nimport sys\nimport threading\nimport time\nimport types # pylint: disable=unused-import\n\nimport six\nimport tensorflow as tf\n\nfrom tensorboard import db\nfrom tensorboard import util\n\n\nclass Record(collections.namedtuple('Record', ('record', 'offset'))):\n \"\"\"Value class for a record returned by RecordReader.\n\n Fields:\n record: The byte string record that was read.\n offset: The byte offset in the file *after* this record was read.\n\n :type record: str\n :type offset: int\n \"\"\"\n __slots__ = () # Enforces use of only tuple fields.\n\n\[email protected]\[email protected]_2_unicode_compatible\nclass RecordReader(object):\n \"\"\"Pythonic veneer around PyRecordReader.\"\"\"\n\n def __init__(self, path, start_offset=0):\n \"\"\"Creates new instance.\n\n Args:\n path: Path of file. This can be on a remote file system if the\n TensorFlow build supports it.\n start_offset: Byte offset to seek in file once it's opened.\n\n :type path: str\n :type start_offset: int\n \"\"\"\n self.path = tf.compat.as_text(path)\n self._offset = start_offset\n self._size = -1\n self._reader = None # type: tf.pywrap_tensorflow.PyRecordReader\n self._is_closed = False\n self._lock = threading.Lock()\n\n def get_size(self):\n \"\"\"Returns byte length of file.\n\n This is guaranteed to return a number greater than or equal to the\n offset of the last record returned by get_next_record().\n\n This method can be called after the instance has been closed.\n\n Raises:\n IOError: If file has shrunk from last read offset, or start\n offset, or last read size.\n\n :rtype: int\n \"\"\"\n size = tf.gfile.Stat(self.path).length\n minimum = max(self._offset, self._size)\n if size < minimum:\n raise IOError('File shrunk: %d < %d: %s' % (size, minimum, self.path))\n self._size = size\n return size\n\n def get_next_record(self):\n \"\"\"Reads record from file.\n\n Returns:\n A Record or None if no more were available.\n\n Raises:\n IOError: On open or read error, or if close was called.\n tf.errors.DataLossError: If corruption was encountered in the\n records file.\n\n :rtype: Record\n \"\"\"\n if self._is_closed:\n raise IOError('%s is closed' % self)\n if self._reader is None:\n self._reader = self._open()\n try:\n if not inspect.getargspec(self._reader.GetNext).args[1:]: # pylint: disable=deprecated-method\n self._reader.GetNext()\n else:\n # GetNext() expects a status argument on TF <= 1.7\n with tf.errors.raise_exception_on_not_ok_status() as status:\n self._reader.GetNext(status)\n except tf.errors.OutOfRangeError:\n # We ignore partial read exceptions, because a record may be truncated.\n # PyRecordReader holds the offset prior to the failed read, so retrying\n # will succeed.\n return None\n self._offset = self._reader.offset()\n return Record(self._reader.record(), self._offset)\n\n def close(self):\n \"\"\"Closes record reader if open.\n\n Further reads are not permitted after this method is called.\n \"\"\"\n if self._is_closed:\n return\n if self._reader is not None:\n self._reader.Close()\n self._is_closed = True\n self._reader = None\n\n def _open(self):\n with tf.errors.raise_exception_on_not_ok_status() as status:\n return tf.pywrap_tensorflow.PyRecordReader_New(\n tf.resource_loader.readahead_file_path(tf.compat.as_bytes(self.path)),\n self._offset, tf.compat.as_bytes(''), status)\n\n def __str__(self):\n return u'RecordReader{%s}' % self.path\n\n\[email protected]\[email protected]_2_unicode_compatible\nclass BufferedRecordReader(object):\n \"\"\"Wrapper around RecordReader that does threaded read-ahead.\n\n This class implements the same interface as RecordReader. It prevents\n remote file systems from devastating loader performance. It does not\n degrade throughput on local file systems.\n\n The thread is spawned when the first read operation happens. The\n thread will diligently try to buffer records in the background. Its\n goal is to sleep as much as possible without blocking read operations.\n\n This class is thread safe. It can be used from multiple threads\n without any need for external synchronization.\n \"\"\"\n\n READ_AHEAD_AGGRESSION = 2.3 # Does full replenish when ~40% full.\n READ_AHEAD_BYTES = 16 * 1024 * 1024\n STAT_INTERVAL_SECONDS = 4.0\n\n def __init__(self, path,\n start_offset=0,\n read_ahead=READ_AHEAD_BYTES,\n stat_interval=STAT_INTERVAL_SECONDS,\n clock=time.time,\n record_reader_factory=RecordReader):\n \"\"\"Creates new instance.\n\n The i/o thread is not started until the first read happens.\n\n Args:\n path: Path of file. This can be on a remote file system if the\n TensorFlow build supports it.\n start_offset: Byte offset to seek in file once it's opened.\n read_ahead: The number of record bytes to buffer into memory\n before the thread starts blocking. This value must be >0 and\n the default is BufferedRecordReader.READ_AHEAD_BYTES.\n stat_interval: A float with the minimum number of seconds between\n stat calls, to determine the file size. If this is 0.0 then\n the thread will stat after every re-buffer, but never be\n woken up in order to stat.\n clock: Function returning a float with the number of seconds\n since the UNIX epoch in zulu time.\n record_reader_factory: The RecordReader constructor, which can be\n changed for testing.\n\n :type path: str\n :type start_offset: int\n :type read_ahead: int\n :type clock: () -> float\n :type record_reader_factory: (str, int) -> RecordReader\n \"\"\"\n self.path = tf.compat.as_text(path)\n self._read_ahead = read_ahead\n self._stat_interval = stat_interval\n self._clock = clock\n self._is_closed = False\n self._has_reached_end = False\n self._offset = 0\n self._size = -1\n self._last_stat = 0.0\n self._buffered = 0\n self._reader = record_reader_factory(self.path, start_offset)\n self._records = collections.deque() # type: collections.deque[Record]\n self._read_exception = \\\n None # type: tuple[BaseException, BaseException, types.TracebackType]\n self._close_exception = \\\n None # type: tuple[BaseException, BaseException, types.TracebackType]\n self._lock = threading.Lock()\n self._wake_up_producer = threading.Condition(self._lock)\n self._wake_up_consumers = threading.Condition(self._lock)\n self._thread = threading.Thread(target=self._run,\n name=_shorten_event_log_path(self.path))\n\n def get_size(self):\n \"\"\"Returns byte length of file.\n\n This is guaranteed to return a number greater than or equal to the\n offset of the last record returned by get_next_record().\n\n In the average case, this method will not block. However, if the\n i/o thread has not yet computed this value, then this method will\n block on a stat call.\n\n This method can be called after the instance has been closed.\n\n Returns:\n The byte length of file, which might increase over time, but is\n guaranteed to never decrease. It's also guaranteed that it will\n be greater than or equal to the offset field of any Record.\n\n :rtype: int\n \"\"\"\n with self._lock:\n if self._should_stat():\n self._stat()\n return self._size\n\n def get_next_record(self):\n \"\"\"Reads one record.\n\n When this method is first called, it will spawn the thread and\n block until a record is read. Once the thread starts, it will queue\n up records which can be read without blocking. The exception is\n when we reach the end of the file, in which case each repeated call\n will be synchronous. There is no background polling. If new data is\n appended to the file, new records won't be buffered until this\n method is invoked again. The caller should take care to meter calls\n to this method once it reaches the end of file, lest they impact\n performance.\n\n Returns:\n A Record object, or None if there are no more records available\n at the moment.\n\n Raises:\n IOError: If this instance has been closed.\n tf.errors.DataLossError: If corruption was encountered in the\n records file.\n Exception: To propagate any exceptions that may have been thrown\n by the read operation in the other thread. If an exception is\n thrown, then all subsequent calls to this method will rethrow\n that same exception.\n\n :rtype: Record\n \"\"\"\n with self._lock:\n if self._is_closed:\n raise IOError('%s is closed' % self)\n if not self._thread.is_alive():\n self._thread.start()\n else:\n record = self._get_record()\n if record is not None:\n if self._should_wakeup():\n self._wake_up_producer.notify()\n return record\n self._has_reached_end = False\n self._wake_up_producer.notify()\n while not (self._read_exception or\n self._has_reached_end or\n self._records):\n self._wake_up_consumers.wait()\n return self._get_record()\n\n def close(self):\n \"\"\"Closes event log reader if open.\n\n If the i/o thread is running, this method blocks until it has been\n shut down.\n\n Further reads are not permitted after this method is called.\n\n Raises:\n Exception: To propagate any exceptions that may have been thrown\n by the close operation in the other thread. If an exception\n is thrown, then all subsequent calls to this method will\n rethrow that same exception.\n \"\"\"\n with self._lock:\n if not self._is_closed:\n self._is_closed = True\n if not self._thread.is_alive():\n self._reader = None\n return\n self._wake_up_producer.notify()\n while self._reader is not None:\n self._wake_up_consumers.wait()\n if self._close_exception is not None:\n six.reraise(*self._close_exception)\n\n def _get_record(self):\n if self._read_exception is not None:\n six.reraise(*self._read_exception)\n if not self._records:\n return None\n record = self._records.popleft()\n self._buffered -= len(record.record)\n return record\n\n @util.guarded_by('_lock')\n def _should_wakeup(self):\n return (self._is_closed or\n self._read_exception is None and\n (self._should_rebuffer() or\n (self._stat_interval and self._should_stat())))\n\n @util.guarded_by('_lock')\n def _should_rebuffer(self):\n return (not self._has_reached_end and\n (float(self._buffered) <\n self._read_ahead / BufferedRecordReader.READ_AHEAD_AGGRESSION))\n\n @util.guarded_by('_lock')\n def _should_stat(self):\n return (self._read_exception is None and\n (self._offset > self._size or\n self._last_stat <= self._clock() - self._stat_interval))\n\n @util.guarded_by('_lock')\n def _stat(self):\n try:\n now = self._clock()\n self._size = self._reader.get_size()\n self._last_stat = now\n except Exception as e: # pylint: disable=broad-except\n tf.logging.debug('Stat failed: %s', e)\n self._read_exception = sys.exc_info()\n\n def _run(self):\n while True:\n with self._lock:\n while not self._should_wakeup():\n self._wake_up_producer.wait()\n if self._is_closed:\n try:\n self._reader.close()\n tf.logging.debug('Closed')\n except Exception as e: # pylint: disable=broad-except\n self._close_exception = sys.exc_info()\n tf.logging.debug('Close failed: %s', e)\n self._reader = None\n self._wake_up_consumers.notify_all()\n return\n if self._buffered >= self._read_ahead:\n tf.logging.debug('Waking up to stat')\n self._stat()\n continue\n # Calculate a good amount of data to read outside the lock.\n # The less we have buffered, the less re-buffering we'll do.\n # We want to minimize wait time in the other thread. See the\n # following contour plot: https://goo.gl/HTBcCU\n x = float(self._buffered)\n y = BufferedRecordReader.READ_AHEAD_AGGRESSION\n c = float(self._read_ahead)\n want = int(min(c - x, y/c * x**y + 1))\n # Perform re-buffering outside lock.\n self._rebuffer(want)\n\n def _rebuffer(self, want):\n tf.logging.debug('Waking up to read %s bytes', _localize_int(want))\n records = []\n read_exception = self._read_exception\n if read_exception is None:\n try:\n while want > 0:\n record = self._reader.get_next_record()\n if record is None:\n break\n self._offset = record.offset\n records.append(record)\n want -= len(record.record)\n except Exception as e: # pylint: disable=broad-except\n tf.logging.debug('Read failed: %s', e)\n read_exception = sys.exc_info()\n with self._lock:\n self._read_exception = read_exception\n if self._should_stat():\n self._stat()\n if not self._read_exception:\n if not records:\n self._has_reached_end = True\n else:\n for record in records:\n self._records.append(record)\n self._buffered += len(record.record)\n self._wake_up_consumers.notify_all()\n\n def __str__(self):\n return u'BufferedRecordReader{%s}' % self.path\n\n\nclass RateCounter(object):\n \"\"\"Utility class for tracking how much a number increases each second.\n\n The rate is calculated by averaging of samples within a time window,\n which weights recent samples more strongly.\n \"\"\"\n\n def __init__(self, window, clock=time.time):\n \"\"\"Creates new instance.\n\n Args:\n window: The maximum number of seconds across which rate is\n averaged. In practice, the rate might be averaged over a time\n period greater than window if set_value is being called less\n frequently than window.\n clock: Function returning a float with the number of seconds\n since the UNIX epoch in zulu time.\n\n :type window: float\n :type clock: () -> float\n \"\"\"\n self._window = window\n self._clock = clock\n self._points = collections.deque()\n self._last_value = None # type: float\n self._last_time = None # type: float\n\n def get_rate(self):\n \"\"\"Determines rate of increase in value per second averaged over window.\n\n Returns:\n An integer representing the rate or None if not enough\n information has been collected yet.\n\n :rtype: int\n \"\"\"\n points = []\n total_elapsed = 0.0\n total_weight = 0.0\n for rate, elapsed, _ in self._points:\n weight = 1.0 / (total_elapsed + 1) * elapsed\n total_elapsed += elapsed\n total_weight += weight\n points.append((rate, weight))\n if not total_weight:\n return 0\n return int(sum(w / total_weight * r for r, w in points))\n\n def set_value(self, value):\n \"\"\"Sets number state.\n\n This method adds a delta between value and the value of the last\n time this method was called. Therefore the first invocation does\n not add a delta.\n\n Raises:\n ValueError: If value is less than the last value.\n\n :type value: float\n \"\"\"\n value = float(value)\n now = self._clock()\n if self._last_value is None:\n self._last_value = value\n self._last_time = now\n return\n if value < self._last_value:\n raise ValueError('%f < %f' % (value, self._last_value))\n delta = value - self._last_value\n elapsed = now - self._last_time\n if not elapsed:\n return\n self._points.appendleft((delta / elapsed, elapsed, now))\n self._last_time = now\n self._last_value = value\n self._remove_old_points()\n\n def bump(self):\n \"\"\"Makes time since last set_value count for nothing.\"\"\"\n self._last_time = self._clock()\n\n def _remove_old_points(self):\n threshold = self._clock() - self._window\n while self._points:\n r, e, t = self._points.pop()\n if t > threshold:\n self._points.append((r, e, t))\n break\n\n\[email protected]\nclass Progress(object):\n \"\"\"Terminal UI for displaying job progress in terms of bytes.\n\n On teletypes, this class will display a nice ephemeral unicode\n progress bar. Otherwise it just emits periodic log messages.\n\n This class keeps track of the rate at which input is processed, as\n well as the rate it grows. These values are represented to the user\n using the DELTA and NABLA symbols.\n\n An alarm is displayed if the consumption rate falls behind the\n production rate. In order for this to be calculated properly, the\n sleep method of this class should be used rather than time.sleep.\n \"\"\"\n\n BAR_INTERVAL_SECONDS = 0.25\n BAR_LOGGER = logging.getLogger('tensorflow' + util.LogHandler.EPHEMERAL)\n BAR_WIDTH = 45\n BLOCK_DARK = u'\\u2593'\n BLOCK_LIGHT = u'\\u2591'\n DELTA = u'\\u2206'\n LOG_INTERVAL_SECONDS = 5.0\n NABLA = u'\\u2207'\n RATE_WINDOW = 20.0\n\n def __init__(self, clock=time.time,\n sleep=time.sleep,\n log_callback=tf.logging.info,\n bar_callback=BAR_LOGGER.info,\n rate_counter_factory=RateCounter):\n \"\"\"Creates new instance.\n\n Args:\n clock: Function returning a float with the number of seconds\n since the UNIX epoch in zulu time.\n sleep: Injected time.sleep function.\n log_callback: Callback for emitting normal log records.\n bar_callback: Callback for emitting ephemeral bar records.\n rate_counter_factory: Constructor to RateCounter, which can be\n swapped out for testing.\n\n :type clock: () -> float\n :type sleep: (float) -> None\n :type rate_counter_factory: (float) -> RateCounter\n \"\"\"\n self._clock = clock\n self._sleep = sleep\n self._log_callback = log_callback\n self._bar_callback = bar_callback\n self._initialized = False\n self._offset = 0\n self._size = 0\n self._last_log_time = 0.0\n self._last_bar_time = 0.0\n self._last_log_offset = -1\n self._last_bar_offset = -1\n self._rate_offset = rate_counter_factory(Progress.RATE_WINDOW)\n self._rate_size = rate_counter_factory(Progress.RATE_WINDOW)\n\n def set_progress(self, offset, size):\n \"\"\"Updates the progress bar state.\n\n This method will cause progress information to be occasionally\n written out.\n\n Args:\n offset: The number of bytes processed so far.\n size: The total number of bytes. This is allowed to increase or\n decrease, but it must remain at least offset.\n\n Raises:\n ValueError: If offset is greater than size, or offset or size\n decreased from the last invocation.\n\n :type offset: int\n :type size: int\n \"\"\"\n if offset > size:\n raise ValueError('offset (%d) can not exceed size (%d)' % (offset, size))\n self._rate_offset.set_value(offset)\n self._rate_size.set_value(size)\n self._offset = offset\n self._size = size\n now = self._clock()\n if not self._initialized:\n self._last_log_time = now\n self._last_bar_time = now\n self._initialized = True\n return\n elapsed = now - self._last_log_time\n if elapsed >= Progress.LOG_INTERVAL_SECONDS:\n self._last_log_time = now\n self._show_log()\n elapsed = now - self._last_bar_time\n if elapsed >= Progress.BAR_INTERVAL_SECONDS:\n self._last_bar_time = now\n self._show_bar()\n\n def close(self):\n \"\"\"Forces progress to be written to log.\n\n This method exists because we don't want the progress bar to say\n something like 98% once the file is done loading.\n \"\"\"\n self._show_log(can_stall=False)\n self._show_bar(can_stall=False)\n # Instructs util.LogHandler to clear the ephemeral logging state.\n self._bar_callback('')\n\n def sleep(self, seconds):\n \"\"\"Sleeps for a given number of seconds.\n\n Time spent sleeping in this method does not have a detrimental\n impact on the consumption rate.\n\n :type seconds: float\n \"\"\"\n self._sleep(seconds)\n self._rate_offset.bump()\n\n def _show_log(self, can_stall=True):\n is_stalled = can_stall and self._offset == self._last_log_offset\n self._last_log_offset = self._offset\n self._log_callback('Loaded %s', self._get_message(is_stalled))\n\n def _show_bar(self, can_stall=True):\n is_stalled = can_stall and self._offset == self._last_bar_offset\n self._last_bar_offset = self._offset\n sofar = int(self._get_fraction() * Progress.BAR_WIDTH)\n bar = (Progress.BLOCK_DARK * sofar +\n Progress.BLOCK_LIGHT * (Progress.BAR_WIDTH - sofar))\n self._bar_callback(u'%s %s ', bar, self._get_message(is_stalled))\n\n def _get_message(self, is_stalled):\n rate_offset = self._rate_offset.get_rate() # summary processing speed\n rate_size = self._rate_size.get_rate() # summary production speed\n message = u'%d%% of %s%s%s' % (\n int(self._get_fraction() * 100.0),\n _localize_int(self._size),\n self._get_rate_suffix(Progress.DELTA, rate_offset),\n self._get_rate_suffix(Progress.NABLA, rate_size))\n if rate_offset and rate_size and rate_offset < rate_size:\n # If TensorFlow is writing summaries to disk faster than we can\n # insert them into the database, that's kind of problematic.\n message += u' ' + self._make_red(u'[meltdown]')\n elif is_stalled:\n message += u' %s[stalled]%s' % (util.Ansi.BOLD, util.Ansi.RESET)\n return message\n\n def _get_fraction(self):\n if not self._size:\n return 0.0\n else:\n return float(self._offset) / self._size\n\n def _get_rate_suffix(self, symbol, rate):\n if not rate:\n return u''\n return u' %s %sB/s' % (symbol, _localize_int(rate))\n\n def _make_red(self, text):\n return (util.Ansi.BOLD +\n util.Ansi.RED +\n (util.Ansi.FLIP if self._offset % 2 == 0 else u'') +\n text +\n util.Ansi.RESET)\n\n\[email protected]\[email protected]_ordering\[email protected]_2_unicode_compatible\nclass EventLogReader(object):\n \"\"\"Helper class for reading from event log files.\n\n This class is a wrapper around BufferedRecordReader that operates on\n record files containing tf.Event protocol buffers.\n\n Fields:\n rowid: An integer primary key in EventLogs table, or 0 if unknown.\n path: A string with the path of the event log on the local or\n remote file system.\n timestamp: An integer of the number of seconds since the UNIX epoch\n in UTC according to hostname at the time when the event log\n file was created.\n hostname: A string with the FQDN of the machine that wrote this\n event log file.\n \"\"\"\n\n def __init__(self, path,\n start_offset=0,\n record_reader_factory=BufferedRecordReader):\n \"\"\"Creates new instance.\n\n Args:\n path: Path of event log file.\n start_offset: Byte offset to seek in file once it's opened.\n record_reader_factory: A reference to the constructor of a class\n that implements the same interface as RecordReader.\n\n :type path: str\n :type record_reader_factory: (str, int) -> RecordReader\n \"\"\"\n self.rowid = 0\n self.path = tf.compat.as_text(path)\n m = _EVENT_LOG_PATH_PATTERN.search(self.path)\n if not m:\n raise ValueError('Bad event log path: ' + self.path)\n self.timestamp = int(m.group('timestamp'))\n self.hostname = m.group('hostname')\n self._offset = start_offset\n self._reader_factory = record_reader_factory\n self._reader = self._reader_factory(self.path, start_offset)\n self._key = (os.path.dirname(self.path), self.timestamp, self.hostname)\n\n def get_next_event(self):\n \"\"\"Reads an event proto from the file.\n\n Returns:\n A tf.Event or None if no more records exist in the file. Please\n note that the file remains open for subsequent reads in case more\n are appended later.\n\n :rtype: tf.Event\n \"\"\"\n record = self._reader.get_next_record()\n if record is None:\n return None\n event = tf.Event()\n event.ParseFromString(record.record)\n self._offset = record.offset\n return event\n\n def set_offset(self, offset):\n \"\"\"Sets byte offset in file.\n\n :type offset: int\n \"\"\"\n if offset == self._offset:\n return\n self._reader.close()\n self._reader = self._reader_factory(self.path, offset)\n self._offset = offset\n\n def get_offset(self):\n \"\"\"Returns current byte offset in file.\n\n :rtype: int\n \"\"\"\n return self._offset\n\n def get_size(self):\n \"\"\"Returns byte length of file.\n\n :rtype: int\n \"\"\"\n return self._reader.get_size()\n\n def save_progress(self, db_conn):\n \"\"\"Saves current offset to DB.\n\n The rowid property must be set beforehand.\n\n :type db_conn: db.Connection\n \"\"\"\n with contextlib.closing(db_conn.cursor()) as c:\n c.execute(\n 'UPDATE EventLogs SET offset = ? WHERE rowid = ? AND offset < ?',\n (self._offset, self.rowid, self._offset))\n\n def close(self):\n \"\"\"Closes event log reader if open.\n\n Further i/o is not permitted after this method is called.\n \"\"\"\n if self._reader is not None:\n self._reader.close()\n self._reader = None\n\n def __hash__(self):\n return hash(self._key)\n\n def __eq__(self, other):\n return self._key == other._key\n\n def __lt__(self, other):\n return self._key < other._key\n\n def __str__(self):\n offset = self.get_offset()\n if offset:\n return u'EventLogReader{path=%s, offset=%d}' % (self.path, offset)\n else:\n return u'EventLogReader{%s}' % self.path\n\n\[email protected]\[email protected]_ordering\[email protected]_2_unicode_compatible\nclass RunReader(object):\n \"\"\"Utility for loading event logs into the DB.\n\n This class merges the chain of event log files into one meaningful\n stream of events, ordered by step or timestamp.\n\n Fields:\n rowid: The primary key of the corresponding row in Runs.\n name: Display name of this run.\n \"\"\"\n\n def __init__(self, rowid, name):\n \"\"\"Creates new instance.\n\n Args:\n rowid: Primary key of run in `Runs` table, which should already\n be inserted. This is a bit-packed int made by db.RUN_ROWID.\n name: Display name of run.\n\n :type rowid: int\n :type name: str\n \"\"\"\n self.rowid = db.RUN_ROWID.check(rowid)\n self.run_id = db.RUN_ROWID.parse(rowid)[1]\n self.name = tf.compat.as_text(name)\n self._mark = -1\n self._logs = [] # type: list[EventLogReader]\n self._index = 0\n self._entombed_progress = 0\n self._saved_events = \\\n collections.deque() # type: collections.deque[tf.Event]\n self._prepended_events = \\\n collections.deque() # type: collections.deque[tf.Event]\n\n def add_event_log(self, db_conn, log):\n \"\"\"Adds event log to run loader.\n\n Event logs must be added monotonically, based on the timestamp in\n the filename. Please note that calling this method could cause a\n current batch of reads to fast forward.\n\n Args:\n db_conn: A PEP 249 Connection object.\n log: An EventLogReader instance.\n\n Returns:\n True if log was actually added.\n\n :type db_conn: db.Connection\n :type log: EventLogReader\n :rtype: bool\n \"\"\"\n if self._logs and log <= self._logs[-1]:\n return False\n with contextlib.closing(db_conn.cursor()) as c:\n c.execute(\n 'SELECT rowid, offset FROM EventLogs WHERE run_id = ? AND path = ?',\n (self.run_id, log.path))\n row = c.fetchone()\n if row:\n log.rowid = row[0]\n log.set_offset(row[1])\n else:\n event_log_id = db.EVENT_LOG_ID.generate()\n log.rowid = db.EVENT_LOG_ROWID.create(self.run_id, event_log_id)\n c.execute(\n ('INSERT INTO EventLogs (rowid, run_id, path, offset)'\n ' VALUES (?, ?, ?, 0)'),\n (log.rowid, self.run_id, log.path))\n tf.logging.debug('Adding %s', log)\n self._logs.append(log)\n # Skip over event logs we've already read.\n if log.get_offset() > 0 and not self._prepended_events:\n self._index = len(self._logs) - 1\n self._cleanup()\n return True\n\n def get_next_event(self):\n \"\"\"Returns next tf.Event from event logs or None if stalled.\n\n :rtype: tf.Event\n \"\"\"\n event = None\n if self._prepended_events:\n event = self._prepended_events.popleft()\n elif self._index < len(self._logs):\n while True:\n log = self._logs[self._index]\n event = log.get_next_event()\n if event is not None:\n break\n if self._index == len(self._logs) - 1:\n break\n self._index += 1\n self._cleanup()\n if event is not None and self._mark != -1:\n self._saved_events.append(event)\n return event\n\n def mark_peek_reset(self):\n \"\"\"Returns next event without advancing.\n\n Note: This method sets the mark to the current position.\n\n :rtype: tf.Event\n \"\"\"\n self.mark()\n result = self.get_next_event()\n self.reset()\n return result\n\n def get_offset(self):\n \"\"\"Returns number of bytes read across all event log files.\n\n :rtype: int\n \"\"\"\n if self._mark != -1:\n return self._mark\n return self._get_offset()\n\n def _get_offset(self):\n return sum(el.get_offset() for el in self._logs) + self._entombed_progress\n\n def get_size(self):\n \"\"\"Returns sum of byte lengths of event log files.\n\n :rtype: int\n \"\"\"\n return sum(el.get_size() for el in self._logs) + self._entombed_progress\n\n def save_progress(self, db_conn):\n \"\"\"Saves current offsets of all open event logs to DB.\n\n This should be called after the mark has been advanced.\n\n :type db_conn: db.Connection\n \"\"\"\n n = 0\n while self._index >= n < len(self._logs):\n self._logs[n].save_progress(db_conn)\n n += 1\n\n def mark(self):\n \"\"\"Marks current position in file so reset() can be called.\"\"\"\n if self._prepended_events:\n raise ValueError('mark() offsets must be monotonic')\n self._mark = self._get_offset()\n self._saved_events.clear()\n\n def reset(self):\n \"\"\"Resets read state to where mark() was called.\"\"\"\n if self._mark == -1:\n return\n self._prepended_events.extend(self._saved_events)\n self._saved_events.clear()\n\n def close(self):\n \"\"\"Closes all event log readers.\n\n This method may be called multiple times, but further operations\n are not permitted.\n\n Raises:\n Exception: To propagate the most recent exception thrown by the\n EventLogReader close method. Suppressed exceptions are\n logged.\n \"\"\"\n util.close_all(self._logs)\n self._index = len(self._logs)\n self._mark = -1\n self._prepended_events.clear()\n self._saved_events.clear()\n\n def _cleanup(self):\n # Last event log has to be preserved so we can continue enforcing\n # monotonicity. We entomb offset because that also has to be\n # monotonic, but the size does not.\n if 0 < self._index < len(self._logs):\n deleted = self._logs[:self._index]\n self._logs = self._logs[self._index:]\n self._index = 0\n self._entombed_progress += sum(l.get_offset() for l in deleted)\n util.close_all(deleted)\n\n def _skip_to_event_log(self, i):\n should_mark = self._mark != -1 and i > self._index\n self._index = i\n if should_mark:\n self._prepended_events.clear()\n self.mark()\n\n def __hash__(self):\n return hash(self.rowid)\n\n def __eq__(self, other):\n return self.rowid == other.rowid\n\n def __lt__(self, other):\n return self.rowid < other.rowid\n\n def __str__(self):\n offset = self.get_offset()\n if offset:\n return u'RunReader{name=%s, offset=%d}' % (self.name, offset)\n else:\n return u'RunReader{%s}' % self.name\n\n\ndef _get_basename(path):\n \"\"\"Gets base name of path.\n\n This is the same as os.path.basename, however it may potentially do\n i/o to handle a few edge cases, which would otherwise cause the\n result to be less meaningful, e.g. \".\" and \"..\".\n\n :type path: str\n :rtype: str\n \"\"\"\n return os.path.basename(os.path.normpath(os.path.join(_get_cwd(), path)))\n\n\ndef _get_cwd():\n \"\"\"Returns current directory and try not to expand symlinks.\n\n :rtype: str\n \"\"\"\n result = os.environ.get('PWD')\n if not result:\n result = os.getcwd()\n return result\n\n\ndef get_event_logs(directory):\n \"\"\"Walks directory tree for EventLogReader files.\n\n Args:\n directory: Path of directory.\n\n Returns:\n List of EventLogReader objects, ordered by directory name and\n timestamp.\n\n :type directory: str\n :rtype: list[EventLogReader]\n \"\"\"\n logs = []\n for dirname, _, filenames in tf.gfile.Walk(directory):\n for filename in filenames:\n if is_event_log_file(filename):\n logs.append(EventLogReader(os.path.join(dirname, filename)))\n logs.sort()\n return logs\n\n\n_EVENT_LOG_PATH_PATTERN = re.compile(\n r'\\.tfevents\\.(?P<timestamp>\\d+).(?P<hostname>[-.0-9A-Za-z]+)$')\n\n\ndef is_event_log_file(path):\n \"\"\"Returns True if path appears to be an event log file.\n\n :type path: str\n :rtype: bool\n \"\"\"\n return bool(_EVENT_LOG_PATH_PATTERN.search(path))\n\n\n_SHORTEN_EVENT_LOG_PATH_PATTERN = re.compile(r'(?:[^/\\\\]+[/\\\\])?(?:[^/\\\\]+)$')\n\n\ndef _shorten_event_log_path(path):\n \"\"\"Makes an event log path more human readable.\n\n Returns:\n Path containing only basename and the first parent directory name,\n if there is one.\n\n :type path: str\n :rtype: str\n \"\"\"\n m = _SHORTEN_EVENT_LOG_PATH_PATTERN.search(path)\n return m.group(0) if m else None\n\n\ndef _localize_int(n):\n \"\"\"Adds locale specific thousands group separators.\n\n :type n: int\n :rtype: str\n \"\"\"\n return locale.format('%d', n, grouping=True)\n", "# -*- coding: utf-8 -*-\n# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for the scalar plugin summary generation functions.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport six\nimport tensorflow as tf\n\nfrom tensorboard.plugins.scalar import metadata\nfrom tensorboard.plugins.scalar import summary\n\n\nclass SummaryTest(tf.test.TestCase):\n\n def pb_via_op(self, summary_op, feed_dict=None):\n with tf.Session() as sess:\n actual_pbtxt = sess.run(summary_op, feed_dict=feed_dict or {})\n actual_proto = tf.Summary()\n actual_proto.ParseFromString(actual_pbtxt)\n return actual_proto\n\n def normalize_summary_pb(self, pb):\n \"\"\"Pass `pb`'s `TensorProto` through a marshalling roundtrip.\n\n `TensorProto`s can be equal in value even if they are not identical\n in representation, because data can be stored in either the\n `tensor_content` field or the `${dtype}_value` field. This\n normalization ensures a canonical form, and should be used before\n comparing two `Summary`s for equality.\n \"\"\"\n result = tf.Summary()\n result.MergeFrom(pb)\n for value in result.value:\n if value.HasField('tensor'):\n new_tensor = tf.make_tensor_proto(tf.make_ndarray(value.tensor))\n value.ClearField('tensor')\n value.tensor.MergeFrom(new_tensor)\n return result\n\n def compute_and_check_summary_pb(self, name, data,\n display_name=None, description=None,\n data_tensor=None, feed_dict=None):\n \"\"\"Use both `op` and `pb` to get a summary, asserting equality.\n\n Returns:\n a `Summary` protocol buffer\n \"\"\"\n if data_tensor is None:\n data_tensor = tf.constant(data)\n op = summary.op(\n name, data, display_name=display_name, description=description)\n pb = self.normalize_summary_pb(summary.pb(\n name, data, display_name=display_name, description=description))\n pb_via_op = self.normalize_summary_pb(\n self.pb_via_op(op, feed_dict=feed_dict))\n self.assertProtoEquals(pb, pb_via_op)\n return pb\n\n def test_metadata(self):\n pb = self.compute_and_check_summary_pb('a', 1.13)\n summary_metadata = pb.value[0].metadata\n plugin_data = summary_metadata.plugin_data\n self.assertEqual(summary_metadata.display_name, 'a')\n self.assertEqual(summary_metadata.summary_description, '')\n self.assertEqual(plugin_data.plugin_name, metadata.PLUGIN_NAME)\n content = summary_metadata.plugin_data.content\n # There's no content, so successfully parsing is fine.\n metadata.parse_plugin_metadata(content)\n\n def test_explicit_display_name_and_description(self):\n display_name = '\"A\"'\n description = 'The first letter of the alphabet.'\n pb = self.compute_and_check_summary_pb('a', 1.13,\n display_name=display_name,\n description=description)\n summary_metadata = pb.value[0].metadata\n self.assertEqual(summary_metadata.display_name, display_name)\n self.assertEqual(summary_metadata.summary_description, description)\n plugin_data = summary_metadata.plugin_data\n self.assertEqual(plugin_data.plugin_name, metadata.PLUGIN_NAME)\n content = summary_metadata.plugin_data.content\n # There's no content, so successfully parsing is fine.\n metadata.parse_plugin_metadata(content)\n\n def test_float_value(self):\n pb = self.compute_and_check_summary_pb('a', 1.13)\n value = tf.make_ndarray(pb.value[0].tensor).item()\n self.assertEqual(float, type(value))\n self.assertNear(1.13, value, 1e-6)\n\n def test_int_value(self):\n # ints should be valid, but converted to floats.\n pb = self.compute_and_check_summary_pb('a', 113)\n value = tf.make_ndarray(pb.value[0].tensor).item()\n self.assertEqual(float, type(value))\n self.assertNear(113.0, value, 1e-6)\n\n def test_bool_value(self):\n # bools should be valid, but converted to floats.\n pb = self.compute_and_check_summary_pb('a', True)\n value = tf.make_ndarray(pb.value[0].tensor).item()\n self.assertEqual(float, type(value))\n self.assertEqual(1.0, value)\n\n def test_string_value_in_op(self):\n with six.assertRaisesRegex(self, Exception, r'Cast str.*float'):\n with tf.Session() as sess:\n sess.run(summary.op('a', tf.constant(\"113\")))\n\n def test_string_value_in_pb(self):\n with six.assertRaisesRegex(self, ValueError, r'Cast str.*float'):\n summary.pb('a', np.array(\"113\"))\n\n def test_requires_rank_0_in_op(self):\n with six.assertRaisesRegex(self, Exception, r'Expected scalar shape'):\n with tf.Session() as sess:\n sess.run(summary.op('a', tf.constant([1, 1, 3])))\n\n def test_requires_rank_0_in_pb(self):\n with six.assertRaisesRegex(self, ValueError, r'Expected scalar shape'):\n summary.pb('a', np.array([1, 1, 3]))\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.gfile.Walk", "tensorflow.logging.debug", "tensorflow.gfile.Stat", "tensorflow.compat.as_bytes", "tensorflow.errors.raise_exception_on_not_ok_status", "tensorflow.Event", "tensorflow.compat.as_text" ], [ "tensorflow.constant", "tensorflow.test.main", "tensorflow.Session", "tensorflow.Summary", "tensorflow.make_ndarray", "numpy.array" ] ]
jw9730/clova-speech-hackathon
[ "72cc2e31b0ec18a6486ddc746835a472bf6577fe" ]
[ "test.py" ]
[ "import wavio\nimport torch\nimport numpy as np\nfrom specaugment import spec_augment_pytorch, melscale_pytorch\nimport matplotlib.pyplot as plt\n\nPAD = 0\nN_FFT = 512\nSAMPLE_RATE = 16000\n\ndef trim(data, threshold_attack=0.01, threshold_release=0.05, attack_margin=5000, release_margin=5000):\n data_size = len(data)\n cut_head = 0\n cut_tail = data_size\n\n plt.subplot(5,1,1)\n plt.plot(data)\n\n # Square\n w = np.power(np.divide(data, np.max(data)), 2)\n\n plt.subplot(5,1,2)\n plt.plot(w)\n\n # Gaussian kernel\n sig = 20000\n time = np.linspace(-40000, 40000)\n kernel = np.exp(-np.square(time)/2/sig/sig)\n\n # Smooth and normalize\n w = np.convolve(w, kernel, mode='same')\n w = np.divide(w, np.max(w))\n\n plt.subplot(5,1,3)\n plt.plot(w)\n\n\n # Detect crop sites\n for sample in range(data_size):\n sample_num = sample\n sample_amp = w[sample_num]\n if sample_amp > threshold_attack:\n cut_head = np.max([sample_num - attack_margin, 0])\n break\n\n for sample in range(data_size):\n sample_num = data_size-sample-1\n sample_amp = w[sample_num]\n if sample_amp > threshold_release:\n cut_tail = np.min([sample_num + release_margin, data_size])\n break\n\n print(cut_head)\n print(cut_tail)\n plt.subplot(5,1,4)\n plt.plot(data[cut_head:cut_tail])\n\n data_copy = data[cut_head:cut_tail]\n del w, time, kernel, data\n\n plt.subplot(5,1,5)\n plt.plot(data_copy)\n #plt.show()\n\n return data_copy\n\n\ndef get_spectrogram_feature(filepath, train_mode=False):\n (rate, width, sig) = wavio.readwav(filepath)\n wavio.writewav24(\"test.wav\", rate=rate, data=sig)\n sig = sig.ravel()\n sig = trim(sig)\n\n stft = torch.stft(torch.FloatTensor(sig),\n N_FFT,\n hop_length=int(0.01*SAMPLE_RATE),\n win_length=int(0.030*SAMPLE_RATE),\n window=torch.hamming_window(int(0.030*SAMPLE_RATE)),\n center=False,\n normalized=False,\n onesided=True)\n\n stft = (stft[:,:,0].pow(2) + stft[:,:,1].pow(2)).pow(0.5)\n\n amag = stft.clone().detach()\n\n amag = amag.view(-1, amag.shape[0], amag.shape[1]) # reshape spectrogram shape to [batch_size, time, frequency]\n mel = melscale_pytorch.mel_scale(amag, sample_rate=SAMPLE_RATE, n_mels=N_FFT//2+1) # melspec with same shape\n\n plt.subplot(1,2,1)\n plt.imshow(mel.transpose(1,2).squeeze(), cmap='jet')\n\n p = 1 # always augment\n randp = np.random.uniform(0, 1)\n do_aug = p > randp\n if do_aug & train_mode: # apply augment\n print(\"augment image\")\n mel = spec_augment_pytorch.spec_augment(mel, time_warping_para=80, frequency_masking_para=54,\n time_masking_para=50, frequency_mask_num=1, time_mask_num=1)\n feat = mel.view(mel.shape[1], mel.shape[2]) # squeeze back to [frequency, time]\n feat = feat.transpose(0, 1).clone().detach()\n\n plt.subplot(1,2,2)\n plt.imshow(feat, cmap='jet')\n plt.show() # display it\n\n del stft, amag, mel\n return feat\n\n\nfilepath = \"./sample_dataset/train/train_data/wav_007.wav\"\nfeat = get_spectrogram_feature(filepath, train_mode=True)\n\nfilepath = \"./sample_dataset/train/train_data/wav_002.wav\"\nfeat = get_spectrogram_feature(filepath, train_mode=True)\n\nfilepath = \"./sample_dataset/train/train_data/wav_006.wav\"\nfeat = get_spectrogram_feature(filepath, train_mode=True)\n\nfilepath = \"./sample_dataset/train/train_data/wav_016.wav\"\nfeat = get_spectrogram_feature(filepath, train_mode=True)\n\nfilepath = \"./sample_dataset/train/train_data/wav_040.wav\"\nfeat = get_spectrogram_feature(filepath, train_mode=True)" ]
[ [ "numpy.convolve", "numpy.square", "matplotlib.pyplot.imshow", "numpy.linspace", "numpy.min", "matplotlib.pyplot.plot", "numpy.max", "matplotlib.pyplot.subplot", "torch.FloatTensor", "numpy.random.uniform", "matplotlib.pyplot.show" ] ]
risteon/tf_quat2rot
[ "f308cab83e552300c274b733dd6cc5609269feb4" ]
[ "tf_quat2rot/check.py" ]
[ "# -*- coding: utf-8 -*-\n\n__author__ = \"\"\"Christoph Rist\"\"\"\n__email__ = \"[email protected]\"\n\nimport tensorflow as tf\n\n\ndef assert_normalized_quaternion(quaternion: tf.Tensor):\n with tf.control_dependencies(\n [\n tf.debugging.assert_near(\n tf.ones_like(quaternion[..., 0]),\n tf.linalg.norm(quaternion, axis=-1),\n message=\"Input quaternions are not normalized.\",\n )\n ]\n ):\n return tf.identity(quaternion)\n\n\ndef assert_valid_rotation(rotation_matrix: tf.Tensor):\n r = rotation_matrix\n with tf.control_dependencies(\n [\n tf.debugging.assert_near(\n tf.ones_like(rotation_matrix[..., 0, 0]),\n tf.linalg.det(rotation_matrix),\n message=\"Invalid rotation matrix.\",\n ),\n tf.debugging.assert_near(\n tf.linalg.matmul(r, r, transpose_a=True),\n tf.eye(3, batch_shape=tf.shape(r)[:-2], dtype=r.dtype),\n message=\"Invalid rotation matrix.\",\n ),\n ]\n ):\n return tf.identity(r)\n" ]
[ [ "tensorflow.shape", "tensorflow.ones_like", "tensorflow.identity", "tensorflow.linalg.matmul", "tensorflow.linalg.det", "tensorflow.linalg.norm" ] ]
MichaelXcc/seldon-core
[ "e304ba28b9ef14bbda4f357bb145db2732a9e0a5" ]
[ "testing/scripts/test_benchmark.py" ]
[ "import json\n\nimport numpy as np\nimport pytest\nimport tensorflow as tf\nfrom google.protobuf import json_format\n\nfrom seldon_e2e_utils import post_comment_in_pr, run_benchmark_and_capture_results\n\n\[email protected]\[email protected](\"argo_worfklows\")\ndef test_service_orchestrator():\n\n sort_by = [\"apiType\", \"disableOrchestrator\"]\n\n data_size = 1_000\n data = [100.0] * data_size\n\n data_tensor = {\"data\": {\"tensor\": {\"values\": data, \"shape\": [1, data_size]}}}\n\n df = run_benchmark_and_capture_results(\n api_type_list=[\"rest\", \"grpc\"],\n disable_orchestrator_list=[\"false\", \"true\"],\n image_list=[\"seldonio/seldontest_predict:1.10.0-dev\"],\n benchmark_data=data_tensor,\n )\n df = df.sort_values(sort_by)\n\n result_body = \"# Benchmark results - Testing Service Orchestrator\\n\\n\"\n\n orch_mean = all(\n (\n df[df[\"disableOrchestrator\"] == \"false\"][\"mean\"].values\n - df[df[\"disableOrchestrator\"] == \"true\"][\"mean\"].values\n )\n < 3\n )\n result_body += f\"* Orch added mean latency under 4ms: {orch_mean}\\n\"\n orch_nth = all(\n (\n df[df[\"disableOrchestrator\"] == \"false\"][\"95th\"].values\n - df[df[\"disableOrchestrator\"] == \"true\"][\"95th\"].values\n )\n < 5\n )\n result_body += f\"* Orch added 95th latency under 5ms: {orch_nth}\\n\"\n orch_nth = all(\n (\n df[df[\"disableOrchestrator\"] == \"false\"][\"99th\"].values\n - df[df[\"disableOrchestrator\"] == \"true\"][\"99th\"].values\n )\n < 10\n )\n result_body += f\"* Orch added 99th latency under 10ms: {orch_nth}\\n\"\n\n # We have to set no errors to 1 as the tools for some reason have 1 as base\n no_err = all(df[\"errors\"] <= 1)\n result_body += f\"* No errors: {no_err}\\n\"\n\n result_body += \"\\n### Results table\\n\\n\"\n result_body += str(df.to_markdown())\n post_comment_in_pr(result_body)\n\n assert orch_mean\n assert orch_nth\n\n\[email protected]\[email protected](\"argo_worfklows\")\ndef test_workers_performance():\n\n sort_by = [\"apiType\", \"serverWorkers\"]\n\n data_size = 10\n data = [100.0] * data_size\n\n data_tensor = {\"data\": {\"tensor\": {\"values\": data, \"shape\": [1, data_size]}}}\n\n df = run_benchmark_and_capture_results(\n api_type_list=[\"grpc\", \"rest\"],\n server_workers_list=[\"1\", \"5\", \"10\"],\n benchmark_concurrency_list=[\"10\", \"100\", \"1000\"],\n parallelism=\"1\",\n requests_cpu_list=[\"4000Mi\"],\n limits_cpu_list=[\"4000Mi\"],\n image_list=[\"seldonio/seldontest_predict:1.10.0-dev\"],\n benchmark_data=data_tensor,\n )\n df = df.sort_values(sort_by)\n\n result_body = \"# Benchmark results - Testing Workers Performance\\n\\n\"\n\n result_body += \"\\n### Results table\\n\\n\"\n result_body += str(df.to_markdown())\n post_comment_in_pr(result_body)\n\n\[email protected]\[email protected](\"argo_worfklows\")\ndef test_python_wrapper_v1_vs_v2_iris():\n\n sort_by = [\"concurrency\", \"apiType\"]\n benchmark_concurrency_list = [\"1\", \"50\", \"150\"]\n\n result_body = \"\"\n result_body += \"\\n# Benchmark Results - Python Wrapper V1 vs V2\\n\\n\"\n\n # Using single worker as fastapi also uses single worker\n df_pywrapper = run_benchmark_and_capture_results(\n api_type_list=[\"rest\", \"grpc\"],\n protocol=\"seldon\",\n server_list=[\"SKLEARN_SERVER\"],\n benchmark_concurrency_list=benchmark_concurrency_list,\n model_uri_list=[\"gs://seldon-models/v1.12.0-dev/sklearn/iris\"],\n benchmark_data={\"data\": {\"ndarray\": [[1, 2, 3, 4]]}},\n )\n df_pywrapper = df_pywrapper.sort_values(sort_by)\n\n conc_idx = df_pywrapper[\"concurrency\"] == 1\n # Python V1 Wrapper Validations\n # Ensure base mean performance latency below 10 ms\n v1_latency_mean = all((df_pywrapper[conc_idx][\"mean\"] < 10))\n result_body += f\"* V1 base mean performance latency under 10ms: {v1_latency_mean}\\n\"\n # Ensure 99th percentiles are not spiking above 15ms\n v1_latency_nth = all(df_pywrapper[conc_idx][\"99th\"] < 10)\n result_body += f\"* V1 base 99th performance latenc under 10ms: {v1_latency_nth}\\n\"\n # Ensure throughput is above 180 rps for REST\n v1_rps_rest = all(\n df_pywrapper[(df_pywrapper[\"apiType\"] == \"rest\") & conc_idx][\n \"throughputAchieved\"\n ]\n > 180\n )\n result_body += f\"* V1 base throughput above 180rps: {v1_rps_rest}\\n\"\n # Ensure throughput is above 250 rps for GRPC\n v1_rps_grpc = all(\n df_pywrapper[(df_pywrapper[\"apiType\"] == \"grpc\") & conc_idx][\n \"throughputAchieved\"\n ]\n > 250\n )\n result_body += f\"* V1 base throughput above 250rps: {v1_rps_grpc}\\n\"\n # Validate latenc added by adding service orchestrator is lower than 4ms\n\n # TODO: Validate equivallent of parallel workers in MLServer\n df_mlserver = run_benchmark_and_capture_results(\n api_type_list=[\"rest\", \"grpc\"],\n model_name=\"classifier\",\n protocol=\"kfserving\",\n server_list=[\"SKLEARN_SERVER\"],\n model_uri_list=[\"gs://seldon-models/sklearn/iris-0.23.2/lr_model\"],\n benchmark_concurrency_list=benchmark_concurrency_list,\n benchmark_data={\n \"inputs\": [\n {\n \"name\": \"predict\",\n \"datatype\": \"FP32\",\n \"shape\": [1, 4],\n \"data\": [[1, 2, 3, 4]],\n }\n ]\n },\n benchmark_grpc_data_override={\n \"model_name\": \"classifier\",\n \"inputs\": [\n {\n \"name\": \"predict\",\n \"datatype\": \"FP32\",\n \"shape\": [1, 4],\n \"contents\": {\"fp32_contents\": [1, 2, 3, 4]},\n }\n ],\n },\n )\n # First we sort the dataframes to ensure they are compared correctly\n df_mlserver = df_mlserver.sort_values(sort_by)\n\n # Python V1 Wrapper Validations\n\n conc_idx = df_mlserver[\"concurrency\"] == 1\n # Ensure all mean performance latency below 5 ms\n v2_latency_mean = all(df_mlserver[conc_idx][\"mean\"] < 5)\n result_body += f\"* V2 mean performance latency under 5ms: {v2_latency_mean}\\n\"\n # Ensure 99th percentiles are not spiking above 15ms\n v2_latency_nth = all(df_mlserver[conc_idx][\"99th\"] < 10)\n result_body += f\"* V2 99th performance latenc under 10ms: {v2_latency_nth}\\n\"\n # Ensure throughput is above 180 rps for REST\n v2_rps_rest = all(\n df_mlserver[(df_mlserver[\"apiType\"] == \"rest\") & conc_idx][\"throughputAchieved\"]\n > 250\n )\n result_body += f\"* V2 REST throughput above 250rps: {v2_rps_rest}\\n\"\n # Ensure throughput is above 250 rps for GRPC\n v2_rps_grpc = all(\n df_mlserver[(df_mlserver[\"apiType\"] == \"grpc\") & conc_idx][\"throughputAchieved\"]\n > 250\n )\n result_body += f\"* V2 throughput above 300rps: {v2_rps_grpc}\\n\"\n\n result_body += \"\\n### Python V1 Wrapper Results table\\n\\n\"\n result_body += str(df_pywrapper.to_markdown())\n result_body += \"\\n\\n\\n### Python V2 MLServer Results table\\n\\n\"\n result_body += str(df_mlserver.to_markdown())\n\n post_comment_in_pr(result_body)\n\n assert v1_latency_mean\n assert v1_latency_nth\n assert v1_rps_rest\n assert v1_rps_grpc\n assert v2_latency_mean\n assert v2_latency_nth\n assert v2_rps_rest\n assert v2_rps_grpc\n\n\[email protected]\[email protected](\"argo_worfklows\")\ndef test_v1_seldon_data_types():\n\n sort_by = [\"concurrency\", \"apiType\"]\n\n # 10000 element array\n data_size = 10_000\n data = [100.0] * data_size\n\n benchmark_concurrency_list = [\"1\", \"50\", \"150\"]\n\n image_list = [\"seldonio/seldontest_predict:1.10.0-dev\"]\n\n data_ndarray = {\"data\": {\"ndarray\": data}}\n data_tensor = {\"data\": {\"tensor\": {\"values\": data, \"shape\": [1, data_size]}}}\n\n array = np.array(data)\n tftensor_proto = tf.make_tensor_proto(array)\n tftensor_json_str = json_format.MessageToJson(tftensor_proto)\n tftensor_dict = json.loads(tftensor_json_str)\n data_tftensor = {\"data\": {\"tftensor\": tftensor_dict}}\n\n df_ndarray = run_benchmark_and_capture_results(\n api_type_list=[\"rest\", \"grpc\"],\n image_list=image_list,\n benchmark_concurrency_list=benchmark_concurrency_list,\n benchmark_data=data_ndarray,\n )\n df_ndarray = df_ndarray.sort_values(sort_by)\n\n df_tensor = run_benchmark_and_capture_results(\n api_type_list=[\"rest\", \"grpc\"],\n image_list=image_list,\n benchmark_concurrency_list=benchmark_concurrency_list,\n benchmark_data=data_tensor,\n )\n df_tensor = df_tensor.sort_values(sort_by)\n\n df_tftensor = run_benchmark_and_capture_results(\n api_type_list=[\"rest\", \"grpc\"],\n image_list=image_list,\n benchmark_concurrency_list=benchmark_concurrency_list,\n benchmark_data=data_tftensor,\n )\n df_tftensor = df_tftensor.sort_values(sort_by)\n\n result_body = \"# Benchmark results - Testing Seldon V1 Data Types\\n\\n\"\n\n result_body += \"\\n### Results for NDArray\\n\\n\"\n result_body += str(df_ndarray.to_markdown())\n result_body += \"\\n### Results for Tensor\\n\\n\"\n result_body += str(df_tensor.to_markdown())\n result_body += \"\\n### Results for TFTensor\\n\\n\"\n result_body += str(df_tftensor.to_markdown())\n post_comment_in_pr(result_body)\n" ]
[ [ "numpy.array", "tensorflow.make_tensor_proto" ] ]
MATHplus-Young-Academy/P2-Cardiac-Motion
[ "844995e8e5760f981c425d13c0bd7f2f3bb8baec" ]
[ "NN_segmentation/tst_dataset.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 3 17:32:08 2020\n\n@author: apramanik\n\"\"\"\n\n\n\nimport numpy as np\nimport SimpleITK as sitk \nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nimport matplotlib.pyplot as plt\n\n\n\n#%% Functions\ndef normalize_img(img):\n img = img.copy().astype(np.float32)\n img -= np.mean(img)\n img /= np.std(img)\n return img\n\ndef crop_img(img):\n sizex = 144\n sizey = 144\n sizez = 8\n img = img.copy()\n sh = img.shape\n midptx = int(sh[2]/2)\n midpty = int(sh[3]/2)\n if sh[1]<8:\n residue=8-sh[1]\n a=np.zeros((sh[0],int(residue),144,144),dtype=np.float32)\n img=img[:,:,midptx-int(sizex/2):midptx+int(sizex/2),midpty-int(sizey/2):midpty+int(sizey/2)]\n img=np.concatenate((img,a),axis=1)\n else:\n midptz = int(sh[1]/2)\n img = img[:,midptz-int(sizez/2):midptz+int(sizez/2),midptx-int(sizex/2):midptx+int(sizex/2),midpty-int(sizey/2):midpty+int(sizey/2)]\n return img\n \ndef crop_label(img):\n sizex = 144\n sizey = 144\n sizez = 8\n img = img.copy()\n sh = img.shape\n midptx = int(sh[1]/2)\n midpty = int(sh[2]/2)\n if sh[0]<8:\n residue=8-sh[0]\n a=np.zeros((int(residue),144,144),dtype=np.float32)\n img=img[:,midptx-int(sizex/2):midptx+int(sizex/2),midpty-int(sizey/2):midpty+int(sizey/2)]\n img=np.concatenate((img,a),axis=0)\n else:\n midptz = int(sh[0]/2)\n img = img[midptz-int(sizez/2):midptz+int(sizez/2),midptx-int(sizex/2):midptx+int(sizex/2),midpty-int(sizey/2):midpty+int(sizey/2)]\n return img\n \ndef crop_img_paper(img):\n sizez = 8\n img = img.copy()\n sh = img.shape\n if sh[1]<8:\n residue=8-sh[1]\n a=np.zeros((sh[0],int(residue),sh[2],sh[3]),dtype=np.float32)\n img=np.concatenate((img,a),axis=1)\n else:\n midptz = int(sh[1]/2)\n img = img[:,midptz-int(sizez/2):midptz+int(sizez/2),:,:]\n return img\n \ndef crop_label_paper(img):\n sizez = 8\n img = img.copy()\n sh = img.shape\n if sh[0]<8:\n residue=8-sh[0]\n a=np.zeros((int(residue),sh[1],sh[2]),dtype=np.float32)\n img=np.concatenate((img,a),axis=0)\n else:\n midptz = int(sh[0]/2)\n img = img[midptz-int(sizez/2):midptz+int(sizez/2),:,:]\n return img\n\n#%%paths\n\n\n#%%dataset preparation\n\nclass cardiacdata_old(Dataset):\n\n def __init__(self, img_dir = \"./Datasets/patient005/patient005_4d.nii.gz\", label_dir = r'./Datasets/patient005/patient005_frame01_gt.nii.gz'): \n IMG_DIR = \"./Datasets\"\n \n ptnum=str(5).zfill(3) \n img_dir = IMG_DIR + '/patient'+ptnum+'/patient'+ptnum+'_4d.nii.gz'\n dummy_img = sitk.GetArrayFromImage(sitk.ReadImage(img_dir))\n dummy_img = crop_img(dummy_img)\n \n file = open(IMG_DIR + '/patient'+ptnum+'/'+\"Info.cfg\",\"r\")\n es=int(file.read().split(\"\\n\")[1].split(\":\")[1])\n es_str=str(es).zfill(2)\n gt_dir_es = IMG_DIR + '/patient'+ptnum+'/patient'+ptnum+'_frame'+es_str+'_gt.nii.gz'\n es_label = sitk.GetArrayFromImage(sitk.ReadImage(gt_dir_es))\n es_label = crop_label(es_label)\n\n file = open(IMG_DIR + '/patient'+ptnum+'/'+\"Info.cfg\",\"r\")\n ed=int(file.read().split(\"\\n\")[0].split(\":\")[1])\n ed_str=str(ed).zfill(2)\n gt_dir_ed = IMG_DIR + '/patient'+ptnum+'/patient'+ptnum+'_frame'+ed_str+'_gt.nii.gz'\n ed_label = sitk.GetArrayFromImage(sitk.ReadImage(gt_dir_ed))\n ed_label = crop_label(ed_label)\n\n a = dummy_img[ed-1:ed]\n b = dummy_img[es-1:es]\n dummy_img = np.concatenate((a,b),axis=0)\n dummy_img = normalize_img(dummy_img)\n\n ed_label = np.expand_dims(ed_label,axis=0)\n es_label = np.expand_dims(es_label,axis=0)\n dummy_gt = np.concatenate((ed_label,es_label),axis=0)\n\n \n self.img = np.expand_dims(np.reshape(dummy_img,[dummy_img.shape[0]*dummy_img.shape[1],dummy_img.shape[2],dummy_img.shape[3]]),axis=0) \n self.gt = np.expand_dims(np.reshape(dummy_gt,[dummy_gt.shape[0]*dummy_gt.shape[1],dummy_gt.shape[2],dummy_gt.shape[3]]),axis=0)\n self.len = self.img.shape[0]\n \n return\n \n def __len__(self):\n return self.len\n \n def __getitem__(self, i):\n \n img = self.img[i]\n gt = self.gt[i]\n \n img = torch.from_numpy(img.astype(np.float32)).unsqueeze(0)\n gt = torch.from_numpy(gt.astype(np.float32)).long()\n\n return img,gt\n \nclass cardiacdata(Dataset):\n\n def __init__(self, img_dir = \"./Datasets/patient005/patient005_4d.nii.gz\", label_dir = r'./Datasets/patient005/patient005_frame01_gt.nii.gz'): \n dummy_img = sitk.GetArrayFromImage(sitk.ReadImage(img_dir))\n dummy_img = np.squeeze(dummy_img)\n # print(dummy_img.shape)\n dummy_img = crop_img(dummy_img)\n # print(dummy_img.shape)\n dummy_img = normalize_img(dummy_img)\n \n if not label_dir is None:\n dummy_gt = sitk.GetArrayFromImage(sitk.ReadImage(label_dir))\n # print(dummy_gt.shape)\n dummy_gt = np.squeeze(dummy_gt)\n dummy_gt = crop_img(dummy_gt)\n \n \n \n self.img = np.expand_dims(np.reshape(dummy_img,[dummy_img.shape[0]*dummy_img.shape[1],dummy_img.shape[2],dummy_img.shape[3]]),axis=0) \n if not label_dir is None:\n self.gt = np.expand_dims(np.reshape(dummy_gt,[dummy_gt.shape[0]*dummy_gt.shape[1],dummy_gt.shape[2],dummy_gt.shape[3]]),axis=0)\n else:\n self.gt = np.zeros(self.img.shape)\n self.len = self.img.shape[0]\n \n return\n \n def __len__(self):\n return self.len\n \n def __getitem__(self, i):\n \n img = self.img[i]\n gt = self.gt[i]\n \n img = torch.from_numpy(img.astype(np.float32)).unsqueeze(0)\n gt = torch.from_numpy(gt.astype(np.float32)).long()\n\n return img,gt\n \n \nif __name__ == \"__main__\":\n dataset = cardiacdata()\n loader = DataLoader(dataset, shuffle=False, batch_size=1)\n count=0\n for step, (img, gt) in enumerate(loader):\n count=count+1\n print('img shape is:', img.shape)\n print('gt shape is:', gt.shape)\n fig, axes = plt.subplots(1,2)\n pos = axes[0].imshow(img[0,0,2,])\n pos = axes[1].imshow(gt[0,2,])\n plt.show()\n #break\n " ]
[ [ "numpy.expand_dims", "numpy.reshape", "numpy.squeeze", "torch.utils.data.DataLoader", "matplotlib.pyplot.subplots", "numpy.concatenate", "numpy.std", "numpy.mean", "matplotlib.pyplot.show", "numpy.zeros" ] ]
AndreaCossu/ContinualLearning_RecurrentNetworks
[ "8cbc247f1f660f7acb94868696d128e538ad72f4", "8cbc247f1f660f7acb94868696d128e538ad72f4" ]
[ "clutils/datasets/QuickDraw.py", "clutils/models/utils.py" ]
[ "import torch\nimport numpy as np\nimport os\nfrom torch.utils.data import TensorDataset, DataLoader\nfrom .utils import collate_sequences\n\n\nNORMALIZER = { # (mu, std) per class computed on the concatenation of both features (discarding the binary feature)\n 'hot dog': (1.3554527691145501, 55.15028414343622),\n 'palm tree': (-0.7063322505461493, 49.02700047706162),\n 'moon': (0.8036297226693486, 41.345375756324735),\n 'envelope': (4.900210171097034, 69.4196392054246),\n 'dumbbell': (2.0407119932197504, 47.695996108391235),\n 'microwave': (2.9699868328411974, 66.93104801889736),\n 'onion': (1.2284401861051968, 45.21653229074296),\n 'nail': (1.6172277953943177, 62.21706638258232),\n 'paper clip': (0.6436449511025123, 33.32139677497804),\n 'soccer ball': (0.6708907017116656, 39.52546034271634),\n 'drill': (1.1185769827821401, 48.722276882610934),\n 'telephone': (1.3498681969396034, 40.76261400934935),\n 'airplane': (1.0251489388319772, 53.19602656498733),\n 'dishwasher': (2.2191710394266084, 61.508456849155735),\n 'chair': (4.016188671169509, 62.53028260788498),\n 'grass': (3.376122464598659, 69.31003265138725),\n 'rhinoceros': (1.2215264458448767, 48.80834840225656),\n 'octopus': (0.8146148966002359, 40.89244147955804),\n 'cloud': (0.35621641218733063, 29.409365110585483),\n 'bicycle': (0.46389958129146036, 48.99489500128756),\n 'swan': (1.049680167563788, 42.94216649535794),\n 'picture frame': (1.5065118700885085, 63.253773000519175),\n 'shorts': (1.9229859470161206, 52.22414095061445),\n 'flying saucer': (1.5281540318557478, 55.091686319872025),\n 'basketball': (1.6894072481767088, 57.72410547176846),\n 'harp': (2.906289433329914, 78.44568624724737),\n 'beard': (0.9775846803866044, 40.10763763299041),\n 'binoculars': (0.808846681416587, 47.034367710374035),\n 'tiger': (0.9438875155470355, 50.66921493109194),\n 'book': (2.5133103399080188, 65.60820901501357),\n 'scissors': (0.6647841622339, 40.07199951607956),\n 'raccoon': (0.7915126973835728, 43.36239169880799),\n 'peanut': (0.5509739234166906, 30.524261515788336),\n 'wheel': (0.7686023820927692, 47.607169012136815),\n 'trombone': (1.0112428613309314, 52.411164111718705),\n 'diamond': (2.395434500084604, 59.73484161759297),\n 'parachute': (2.056040072916103, 60.77205525434674),\n 'tractor': (0.5855071624918279, 50.5522849539403),\n 'windmill': (0.24800006974356498, 52.12209721342569),\n 'alarm clock': (0.391438978240927, 41.44493991046053),\n 'clarinet': (1.2795783017970905, 49.905620294236705),\n 'spider': (0.6505395210719399, 51.18743252881025),\n 'violin': (0.8724565090414226, 52.533813964768754),\n 'clock': (1.6654012441543409, 37.33134444355261),\n 'tent': (3.3092329281631137, 79.47572994387069),\n 'belt': (2.3132169051670886, 71.13105919924993),\n 'map': (2.6555302484638714, 61.11029370697819),\n 'toe': (0.5471757615653022, 43.27192861865762),\n 'bread': (1.3686935317654665, 47.6839114556787),\n 'kangaroo': (0.6918159454175237, 35.99155678225584),\n 'camera': (1.4527253130110975, 49.336211227235296),\n 'duck': (1.598900790833744, 41.45077993986563),\n 'lipstick': (0.41066758960159977, 41.786372987299615),\n 'snowman': (0.14670998509400804, 31.624590642386174),\n 'pickup truck': (1.6892820330935685, 54.644954488199524),\n 'radio': (1.1157698308056494, 56.49502963911298),\n 'truck': (1.814018865712332, 55.76992610437815),\n 'train': (1.470463028668502, 77.63271694640828),\n 'teapot': (0.9014336302825292, 37.48169241933444),\n 'tree': (0.13337780967798976, 42.97342154517355),\n 'hourglass': (2.39448903480218, 52.622226862393084),\n 'eyeglasses': (1.379818588483046, 52.57994649640675),\n 'church': (0.9630982672082059, 69.99862099910592),\n 'submarine': (0.9138035290673335, 56.77613283220326),\n 'couch': (2.283752727373058, 68.77383224311272),\n 'umbrella': (0.5170775226020248, 47.83678678400117),\n 'whale': (0.3497782843722267, 52.43513503159438),\n 'cooler': (2.19778540728888, 61.74770130955316),\n 'sword': (0.5910085971920176, 48.46440617862079),\n 'table': (4.542251159698462, 87.48848948789511),\n 'skull': (0.6570416475324352, 36.02607871443743),\n 'house': (2.654399100012597, 66.81448281800678),\n 'blackberry': (0.3455386008050086, 29.1600574174796),\n 'bush': (0.7558370448198207, 41.04289142315455),\n 'giraffe': (1.4011522715876905, 46.32335477059355),\n 'rainbow': (4.702348561309779, 82.07143165031478),\n 'yoga': (1.1423119096294918, 50.79902795898255),\n 'mailbox': (0.3511077577743624, 55.61495444057362),\n 'wristwatch': (1.0924273980760375, 49.96303380973288),\n 'The Eiffel Tower': (1.2008260944995623, 73.04798400687072),\n 'syringe': (1.6277984132013836, 56.22798342770764),\n 'bulldozer': (1.060340370028316, 50.385030079706375),\n 'door': (3.36173249421909, 66.2933191994613),\n 'zebra': (1.132649710524639, 52.459089632246396),\n 'beach': (2.263430578427388, 106.91064036288513),\n 'crown': (0.7879512551564102, 50.206077053610386),\n 'screwdriver': (1.1442268550285573, 46.07164154904856),\n 'bear': (0.9395651847291722, 36.986932048274426),\n 'sink': (0.9422909049727696, 47.54959424164138),\n 'teddy-bear': (0.7913359738933313, 33.36477019938705),\n 'square': (1.3275239412907043, 65.89863242901156),\n 'cruise ship': (2.15931097599974, 78.36615495337965),\n 'waterslide': (3.4486527614397833, 83.83125777723943),\n 'elbow': (4.092940205383508, 61.758770494053785),\n 'stereo': (1.8269654619223092, 58.475208066649714),\n 'sweater': (1.067301637554828, 44.59577281486724),\n 'bandage': (1.4032828202796717, 49.86169830574158),\n 'bat': (0.8121797269039484, 37.69212883824029),\n 'The Mona Lisa': (1.6676774598611082, 58.162407907625514),\n 'sea turtle': (0.7386565039500725, 46.86228560536563),\n 'butterfly': (0.4342721164650034, 37.484845221008726),\n 'mosquito': (0.6493471316616555, 40.10938957349605),\n 'tennis racquet': (0.015468185574502526, 62.24923783294656),\n 'tornado': (0.7822439181013964, 45.2077352961338),\n 'computer': (3.0811423630717107, 60.20403781306317),\n 'bridge': (3.679091358194862, 120.07641800536442),\n 'toothbrush': (1.2915788907996562, 53.22105425492547),\n 'baseball bat': (0.410479892106175, 39.02003924116569),\n 'bench': (4.462592927663926, 85.6302327587279),\n 'finger': (-0.6637118888775841, 49.09874846625699),\n 'canoe': (2.9733556427417493, 68.6835039501244),\n 'baseball': (1.6959011615443051, 45.45130310748645),\n 'circle': (-0.39852915378672893, 31.77419572565473),\n 'banana': (1.3562427804512358, 42.94349204337924),\n 'bathtub': (2.3570421946852544, 65.3192157735626),\n 'axe': (1.0293065652442999, 54.84964062528346),\n 'lantern': (1.1476541043730428, 53.67189040723054),\n 'birthday cake': (0.15146259492252578, 55.89568012892327),\n 'castle': (1.804799071214271, 63.20589225473029),\n 'wine bottle': (1.034291851931799, 44.04598147244387),\n 'ant': (0.9303194592264448, 34.43552547266363),\n 'The Great Wall of China': (4.285709330181438, 131.23951199298298),\n 'bee': (0.43095116254566934, 40.56855963179127),\n 'apple': (-0.44780125973592305, 30.208033668691396),\n 'arm': (1.7757119621507091, 46.692967793920644),\n 'asparagus': (-0.2421902384249924, 48.97218720603324),\n 'angel': (0.5489444327750316, 41.66381961171915),\n 'cup': (2.673919370605991, 43.54406248924784),\n 'carrot': (0.6945175048056408, 46.104020850556616),\n 'bucket': (1.7396654767172537, 48.828570427954205),\n 'animal migration': (2.6285542168388782, 61.28180224245095),\n 'cell phone': (1.9267526020713346, 49.38973568488984),\n 'van': (1.9173825658872794, 54.8721828825201),\n 'dolphin': (0.9714616061928398, 42.83044052150523),\n 'bowtie': (1.168151585565935, 37.61503592501492),\n 'campfire': (0.2534712087647997, 42.286814756524535),\n 'ceiling fan': (1.0603067359693852, 40.52738328774831),\n 'boomerang': (0.5759666273292099, 39.78957492087158),\n 'aircraft carrier': (1.5172469688772912, 78.7478229402662),\n 'cactus': (-0.1281463623029328, 42.27573114632624),\n 'cake': (0.31108565857076187, 56.322115673526696),\n 'anvil': (1.471075424663743, 48.99321880248113),\n 'toothpaste': (1.8461911264030182, 51.53740072123023),\n 'swing set': (3.971684529151281, 98.99892200987023),\n 'feather': (-0.42952206263561854, 53.55639949373167),\n 'flashlight': (1.9317251715822668, 62.79624045193533),\n 'garden hose': (1.5452934595615202, 53.713569777275175),\n 'camel': (1.5165348305653266, 35.07846843003865),\n 'calculator': (1.991161645112966, 50.142844727554575),\n 'diving board': (1.7300484119947224, 75.61560569527323),\n 'chandelier': (1.991040877029286, 50.65396442677625),\n 'helmet': (1.9722019205320098, 45.87749730234627),\n 'squirrel': (0.729042851521045, 35.3641639039348),\n 'ambulance': (1.0598312283596059, 55.875842882074),\n 'bottlecap': (1.5970585109209756, 40.01592713375047),\n 'hospital': (1.7313904919786411, 72.37806984815816),\n 'coffee cup': (1.32151623967879, 41.665383540075005),\n 'watermelon': (1.8482342559051477, 59.31958622930048),\n 'dresser': (2.396722754292761, 79.1225545952145),\n 'bed': (2.588378888585306, 78.08870505568636),\n 'bird': (1.5906829218142842, 41.059856184169284),\n 'cookie': (0.7048879723978447, 34.29958258051739),\n 'underwear': (3.027964069514147, 54.355597943207094),\n 'drums': (1.1572575727426198, 54.68602043565278),\n 'cat': (0.9684180598296738, 43.19493215282525),\n 'calendar': (2.495118096854286, 82.1800159400022),\n 'bracelet': (0.4661401948292038, 31.127130949231766),\n 'eraser': (2.3490401085702235, 56.643670114244784),\n 'dog': (0.8907946320439043, 38.505287852990726),\n 'barn': (2.2770830828592583, 77.75086163641558),\n 'spoon': (0.5421543550003102, 37.016180276322515),\n 'sun': (-0.2008690561101928, 57.798300559005334),\n 'toilet': (1.291036016063847, 40.288417166228925),\n 'backpack': (1.3079276772602353, 46.33461078978928),\n 'trumpet': (1.233316766684717, 47.840050217395266),\n 'frying pan': (1.1317137754492954, 42.27197781360748),\n 'blueberry': (0.3428165650102726, 29.923143234478975),\n 'toaster': (1.3159036268033921, 59.46381954781093),\n 'floor lamp': (-1.4045719348973986, 52.73112796615196),\n 'crocodile': (1.2462846638010021, 51.83360295588419),\n 'police car': (0.6314716475098945, 51.402397657264785),\n 'cow': (0.6487350495428166, 44.82200063524666),\n 'basket': (1.781348034990179, 61.40405101602184),\n 'cello': (1.4380096549620986, 59.481368251629206),\n 'golf club': (2.935274820103259, 47.944997493610416),\n 'school bus': (1.3202131289388477, 61.70753264839142),\n 'hockey puck': (0.725588239742589, 48.55963543134594),\n 'fence': (3.8660243770815614, 92.36222788620427),\n 'donut': (0.431402194475543, 32.222374599013726),\n 'goatee': (1.2211961416317247, 39.81077215140121),\n 'traffic light': (1.269260032432163, 44.30942006032888),\n 'hamburger': (1.4103828007350085, 49.04022894395681),\n 'ear': (1.9563928536834947, 34.3747704500531),\n 'compass': (0.8636275744036599, 38.906947603746346),\n 'broccoli': (-0.08805269427735608, 30.880695648320078),\n 'skyscraper': (1.3477313197584702, 87.73974365488579),\n 'fan': (0.5595090068208328, 42.26975493031441),\n 'hot air balloon': (1.0010255829235684, 45.01559229242698),\n 'mountain': (5.349497596465423, 69.73739652862577),\n 'fork': (0.21995268515715857, 43.66291957421616),\n 'face': (1.1847102417517064, 41.81747854722619),\n 'crab': (0.5500211063457824, 48.30558365265961),\n 'ice cream': (0.5645385757011395, 41.72357855932428),\n 'foot': (1.6352285029716924, 40.86466847411941),\n 'hat': (2.1269765754849357, 53.181061994837336),\n 'candle': (-0.9566338163648833, 46.30537462785261),\n 'flip flops': (1.1195172002513105, 45.28787295602699),\n 'hammer': (0.40690889202283986, 45.31354440860368),\n 'guitar': (0.9118308447368665, 58.627968076179016),\n 'brain': (0.5667801625156502, 39.94893006675094),\n 'stove': (1.2695451153311437, 56.13115551721316),\n 'headphones': (1.7442579010033754, 38.05663003234409),\n 'flamingo': (1.3329066566304946, 44.20478550977875),\n 'flower': (0.014788800722293086, 28.686447255310085),\n 'bus': (1.5110163683563511, 65.58525727312637),\n 'hot tub': (0.9262199087425361, 63.37602990315963),\n 'elephant': (1.0286360401485168, 42.29328387209706),\n 'fire hydrant': (0.4353600099500871, 48.49174159770318),\n 'laptop': (2.5420362830209355, 63.093568635534155),\n 'leaf': (-0.07888685459428697, 51.531397540382116),\n 'potato': (0.7248796777877287, 36.04373128693473),\n 'hockey stick': (2.560198275283893, 47.75516557446046),\n 'lighter': (-0.10645657100081249, 38.600148168238576),\n 'hexagon': (2.7288170353096675, 50.79748328406929),\n 'garden': (0.881398058547382, 59.301002560708866),\n 'marker': (1.4847281646438588, 50.021490600998504),\n 'keyboard': (2.8496015722739236, 81.38936435354776),\n 'camouflage': (0.8524647599170719, 65.65432278791238),\n 'knee': (5.3541853695693575, 60.225209719801974),\n 'sheep': (1.2468686657122494, 35.19467195151128),\n 'microphone': (0.3006266208385552, 46.783442715555715),\n 'mushroom': (0.28405131561550195, 40.671965829362236),\n 'light bulb': (0.3093205629583717, 35.25819445171456),\n 'hand': (0.7429242999868996, 34.70475212985948),\n 'key': (0.7406380633244096, 34.13758650534517),\n 'house plant': (-0.4396176672108764, 40.515632771810296),\n 'eye': (0.8606006296728399, 44.889207403048424),\n 'matches': (0.3485948924638904, 47.42024782911991),\n 'broom': (2.9233557704577193, 49.52062851559808),\n 'knife': (1.4292202706092547, 51.01808033723662),\n 'crayon': (1.467668727844571, 51.82316360295973),\n 'ocean': (7.872452229036218, 89.99111246191521),\n 'dragon': (0.8266093687680877, 49.41364315921484),\n 'leg': (5.117580228531927, 54.01711580361819),\n 'horse': (0.9246973774561026, 48.65827974249926),\n 'zigzag': (9.770917367360767, 61.744673036996616),\n 'car': (1.1106827823007763, 47.60058589694208),\n 'grapes': (0.6046526027097275, 27.16306317679192),\n 'lightning': (4.090431090680993, 57.03172069825947),\n 'moustache': (1.7875824399413591, 37.731677498630745),\n 'mouth': (2.76090978291076, 57.20373539326289),\n 'vase': (0.5528729482101566, 36.996243257356014),\n 'fish': (0.8878609523273818, 44.34932077221152),\n 'string bean': (1.346485501392611, 54.7312484146683),\n 'lighthouse': (0.4274423658693314, 75.81546755799378),\n 'ladder': (5.90632648589332, 110.16555003310557),\n 'television': (1.3151946885305383, 62.90537952277926),\n 'helicopter': (0.7111156159770702, 56.6546344981718),\n 'pillow': (2.0992806701392936, 55.274535278488294),\n 'pencil': (2.0345830706124053, 62.90446034037889),\n 'rollerskates': (2.0053135688983006, 39.31457668947572),\n 'jail': (5.661515872939487, 115.47255551897983),\n 'mermaid': (0.3187352763659362, 39.8221589482459),\n 'jacket': (2.0929497013270537, 50.6087533539712),\n 'megaphone': (1.8135468059177202, 54.66219701027781),\n 'nose': (4.435118108240006, 36.01419720778613),\n 'pants': (1.4927018991320877, 55.47801110072461),\n 'octagon': (2.0144474110553916, 49.61164954802588),\n 'pizza': (0.9106006910867426, 49.75334623210151),\n 'passport': (2.09209268126368, 53.80930291521799),\n 'pool': (2.06494328488252, 67.72608882496336),\n 'motorbike': (0.4038001637130562, 46.94203574972685),\n 'snake': (1.5154800788642753, 49.350623204522535),\n 'pond': (0.7752730687547197, 47.62409950756826),\n 'frog': (0.8874821595962438, 39.61840650901404),\n 'pig': (0.47576581658267675, 39.5924494951546),\n 'penguin': (1.0164857569517498, 40.88730060878002),\n 'cannon': (0.8927868329478531, 53.019935221920896),\n 'parrot': (1.6070485082553567, 43.38710309821747),\n 'lobster': (0.5829596663716866, 42.78511651754868),\n 'saw': (1.6178343188617499, 43.19552103419441),\n 'strawberry': (0.6209739512011668, 32.08453043500838),\n 'firetruck': (1.125536302973774, 65.91057171556372),\n 'speedboat': (2.0848389958987257, 76.42986457816829),\n 'popsicle': (0.4813981088599971, 42.49229183387311),\n 'hurricane': (0.7079895622313991, 61.715710494552816),\n 'see saw': (1.8249850934378673, 70.89383197689017),\n 'saxophone': (0.9072383454069756, 36.470803735437904),\n 'mug': (2.5296236017401257, 42.26283334121334),\n 'piano': (2.6469411517060806, 73.27448246359215),\n 'mouse': (0.8020204927469491, 43.836228689128035),\n 'power outlet': (2.071476276483809, 46.822370189887785),\n 'hedgehog': (0.4703510415238984, 45.92192258266138),\n 'oven': (1.8548425634903463, 62.43067850281194),\n 'shoe': (1.297356215372919, 41.93847714957883),\n 'rifle': (2.5223233995449474, 60.73555429659974),\n 'roller coaster': (2.6065332991832584, 86.95567387367467),\n 'peas': (0.7749159834453123, 42.94847025647788),\n 'lion': (0.4463371384240275, 34.510210963204415),\n 'rake': (3.442498762575747, 57.38005406297777),\n 'postcard': (3.700937086574, 69.8261672011201),\n 'sock': (1.9223557134218592, 43.2687682421636),\n 'purse': (1.6872172724499956, 48.85082993380252),\n 'sleeping bag': (1.2484033851490541, 52.138238912603775),\n 'skateboard': (2.4819607493229663, 53.19362309156825),\n 'necklace': (2.392666309866489, 41.3064841582455),\n 'stairs': (5.195938168639603, 47.15470516213574),\n 'lollipop': (0.10920444361594842, 38.89025105370695),\n 'snowflake': (2.3415722082063986, 68.96721342968107),\n 'rabbit': (0.9078200152038035, 34.75862482451542),\n 'owl': (1.2457620241823235, 42.73803624793326),\n 'shovel': (1.970015817486029, 45.419236670608626),\n 'pear': (-0.45220059964010495, 30.843347488001527),\n 'remote control': (1.1358869210694837, 44.83511889242796),\n 'star': (0.3626996748657054, 52.65011227641426),\n 'scorpion': (0.4161827603069684, 38.88321413686467),\n 'washing machine': (1.5518183007862645, 51.91417194144562),\n 'monkey': (0.9748166731813579, 38.60787650590758),\n 'pineapple': (0.562007915664679, 43.7000843939721),\n 'sandwich': (1.6847535599541337, 57.542891294247035),\n 'shark': (1.272828952833183, 49.334895742299615),\n 'sailboat': (1.309450897368411, 66.09322028103158),\n 'steak': (0.8908929135892587, 46.82398060648129),\n 'stethoscope': (2.300526882061146, 43.63511505624273),\n 'wine glass': (2.1753360642422095, 42.95333738304328),\n 'smiley face': (1.4208837631558537, 43.864342591767816),\n 'streetlight': (-1.4343035375659503, 57.70810758721286),\n 'squiggle': (5.131557337201909, 48.02532522224354),\n 'stop sign': (1.3327274061718097, 42.78360537094287),\n 'line': (40.59167311123959, 112.02341955570965),\n 'pliers': (0.796279030471497, 45.67250007650481),\n 'paint can': (1.3512234721466652, 47.35796924253278),\n 'panda': (0.5475608600999033, 33.69643785103632),\n 'paintbrush': (0.20347385695100456, 47.341806442823824),\n 't-shirt': (0.9831120778329658, 42.21114938247829),\n 'fireplace': (1.3117628588460688, 61.01045131707993),\n 'river': (5.610367142280469, 117.56790294876312),\n 'snorkel': (1.2366543753832537, 43.709326082973476),\n 'rain': (3.6461954118834403, 61.31247784406768),\n 'triangle': (1.1218274781431306, 64.34926695455631),\n 'suitcase': (1.9098774305372213, 57.805580971303506),\n 'stitches': (4.142179481399166, 79.85573072340479),\n 'tooth': (0.7350361072423909, 34.97655580063578),\n 'snail': (0.3764966115255877, 34.91367713422217),\n 'spreadsheet': (4.333452826793876, 134.8852997594341)\n}\n\nclass QuickDrawDataset(torch.utils.data.Dataset):\n def __init__(self, data_dict, normalizers, task_vector=None):\n\n self.data_dict = data_dict\n self.normalizers = normalizers\n self.task_vector = task_vector\n\n self.patterns_per_class = [len(v) for k,v in self.data_dict.items()]\n\n self.min_class_id = min(list(self.data_dict.keys()))\n\n def __getitem__(self, idx):\n # select class based on idx\n class_id = None\n curr_idx = idx\n ppc = [0] + self.patterns_per_class\n\n for i in range(1, len(ppc)):\n if curr_idx < ppc[i]:\n class_id = self.min_class_id + (i - 1)\n break\n elif curr_idx == ppc[i]:\n curr_idx -= ppc[i]\n class_id = self.min_class_id + i\n break\n else:\n curr_idx -= ppc[i]\n\n if class_id is None:\n raise IndexError('Out of range when indexing QuickDraw!')\n\n # normalize\n x_cur = torch.from_numpy(self.data_dict[class_id][curr_idx]).float() #/ self.normalizers[class_id][1]\n y_cur = torch.tensor(class_id).long()\n\n if self.task_vector is not None:\n x_cur = torch.cat((self.task_vector.unsqueeze(0).repeat(x_cur.size(0),1), x_cur), dim=1)\n\n return x_cur, y_cur\n\n def __len__(self):\n return sum(self.patterns_per_class)\n\n\nclass CLQuickDraw():\n def __init__(self, root, train_batch_size, test_batch_size,\n len_task_vector=0, task_vector_at_test=False):\n\n self.root = root\n self.train_batch_size = train_batch_size\n self.test_batch_size = test_batch_size\n\n self.len_task_vector = len_task_vector\n self.task_vector_at_test = task_vector_at_test\n\n self.dataloaders = []\n\n self.current_class_id = 0\n\n def _load_data(self, classes):\n train_dict, test_dict, normalizer = {}, {}, {}\n for classname in classes:\n feature = np.load(os.path.join(self.root, f\"{classname}.npz\"), encoding='latin1', allow_pickle=True)\n train, test = feature['train'], feature['test'] # discard feature['valid'] because we don't need it\n train_dict[self.current_class_id] = train\n test_dict[self.current_class_id] = test\n normalizer[self.current_class_id] = NORMALIZER[classname]\n self.current_class_id += 1\n\n return train_dict, test_dict, normalizer\n\n def get_task_loaders(self, classes=None, task_id=None):\n\n if classes is not None:\n train, test, normalizer = self._load_data(classes)\n\n if self.len_task_vector > 0:\n task_vector = torch.zeros(self.len_task_vector).float()\n task_vector[len(self.dataloaders)] = 1.\n else:\n task_vector = None\n\n train_dataset = QuickDrawDataset(train, normalizer,\n task_vector=task_vector)\n test_dataset = QuickDrawDataset(test, normalizer,\n task_vector=task_vector if self.task_vector_at_test else None)\n\n train_batch_size = len(train_dataset) if self.train_batch_size == 0 else self.train_batch_size\n test_batch_size = len(test_dataset) if self.test_batch_size == 0 else self.test_batch_size\n\n train_loader = DataLoader(train_dataset, batch_size=train_batch_size, shuffle=True, drop_last=True,\n collate_fn=collate_sequences)\n test_loader = DataLoader(test_dataset, batch_size=test_batch_size, shuffle=False, drop_last=True,\n collate_fn=collate_sequences)\n\n self.dataloaders.append([train_loader, test_loader])\n\n return train_loader, test_loader\n\n elif task_id is not None:\n return self.dataloaders[task_id]", "import torch\nimport torch.nn as nn\n\n\ndef expand_output_layer(layer, n_units):\n \"\"\"\n Expand layer wiht n_units more.\n layer can be either a Linear layer or a (weight, bias) tuple of Parameters.\n\n Return a new torch.nn.Linear\n \"\"\"\n\n if isinstance(layer, tuple):\n weight = layer[0]\n bias = layer[1]\n elif isinstance(layer, nn.Linear):\n weight = layer.weight\n bias = layer.bias\n else:\n raise ValueError(f\"layer must be torch.nn.Linear or tuple of Parameters. Got {type(layer)}.\")\n\n with torch.no_grad():\n # recompute output size\n old_output_size = weight.size(0)\n hidden_size = weight.size(1)\n new_output_size = old_output_size + n_units\n\n # copy old output layer into new one\n new_layer = nn.Linear(hidden_size, new_output_size, bias=True).to(weight.device)\n new_layer.weight.data[:old_output_size, :] = weight.clone()\n new_layer.bias.data[:old_output_size] = bias.clone()\n\n return new_layer\n\n\ndef sequence_to_flat(x):\n n_dims = len(x.size())\n\n if n_dims > 2:\n return x.view(x.size(0), -1)\n\n return x\n\n\ndef init_weights(model, initw=None):\n if initw is None:\n def initw(m):\n nn.init.xavier_uniform_(m.weight)\n nn.init.constant_(m.bias, 0.01)\n model.apply(initw)\n\n\ndef zero_weight(m):\n nn.init.constant_(m.weight, 0.)\n nn.init.constant_(m.bias, 0.)\n\n\ndef compute_conv_out_shape(Win, Hin, padding, dilation, kernel_size, stride):\n return (\n int(((Win + 2*padding - dilation * (kernel_size - 1) - 1) / stride) + 1),\n int(((Hin + 2*padding - dilation * (kernel_size - 1) - 1) / stride) + 1)\n )\n\ndef compute_conv_out_shape_1d(window_size, padding, dilation, kernel_size, stride):\n return int(((window_size + 2*padding - dilation * (kernel_size - 1) - 1) / stride) + 1)" ]
[ [ "torch.zeros", "torch.utils.data.DataLoader", "torch.from_numpy", "torch.tensor" ], [ "torch.nn.init.constant_", "torch.nn.Linear", "torch.no_grad", "torch.nn.init.xavier_uniform_" ] ]
zawecha1/arXiv2020-RIFE
[ "8eb622a150bd3bf0e773033cbba4728e64340ba1" ]
[ "dataset.py" ]
[ "import cv2\nimport ast\nimport torch\nimport numpy as np\nimport random\nfrom torch.utils.data import DataLoader, Dataset\n\ncv2.setNumThreads(1)\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nclass VimeoDataset(Dataset):\n def __init__(self, dataset_name, batch_size=32):\n self.batch_size = batch_size\n self.dataset_name = dataset_name\n self.load_data()\n self.h = 256\n self.w = 448\n self.data_root = 'vimeo_triplet'\n self.image_root = os.path.join(self.data_root, 'sequences')\n train_fn = os.path.join(self.data_root, 'tri_trainlist.txt')\n test_fn = os.path.join(self.data_root, 'tri_testlist.txt')\n with open(train_fn, 'r') as f:\n self.trainlist = f.read().splitlines()\n with open(test_fn, 'r') as f:\n self.testlist = f.read().splitlines() \n\n def __len__(self):\n return len(self.meta_data)\n\n def load_data(self):\n if self.dataset_name == 'train':\n self.meta_data = self.trainlist\n else:\n self.meta_data = self.testlist\n\n def aug(self, img0, gt, img1, h, w):\n ih, iw, _ = img0.shape\n x = np.random.randint(0, ih - h + 1)\n y = np.random.randint(0, iw - w + 1)\n img0 = img0[x:x+h, y:y+w, :]\n img1 = img1[x:x+h, y:y+w, :]\n gt = gt[x:x+h, y:y+w, :]\n return img0, gt, img1\n\n def getimg(self, index):\n imgpath = self.meta_data[index]\n imgpaths = [imgpath + '/im1.png', imgpath + '/im2.png', imgpath + '/im3.png']\n\n # Load images\n img0 = cv2.imread(imgpaths[0])\n gt = cv2.imread(imgpaths[1])\n img1 = cv2.imread(imgpaths[2])\n return img0, gt, img1\n \n def __getitem__(self, index): \n img0, gt, img1 = self.getimg(index)\n if self.dataset_name == 'train':\n img0, gt, img1 = self.aug(img0, gt, img1, 224, 224)\n if random.uniform(0, 1) < 0.5:\n img0 = img0[:, :, ::-1]\n img1 = img1[:, :, ::-1]\n gt = gt[:, :, ::-1]\n if random.uniform(0, 1) < 0.5:\n img0 = img0[::-1]\n img1 = img1[::-1]\n gt = gt[::-1]\n if random.uniform(0, 1) < 0.5:\n img0 = img0[:, ::-1]\n img1 = img1[:, ::-1]\n gt = gt[:, ::-1]\n if random.uniform(0, 1) < 0.5:\n tmp = img1\n img1 = img0\n img0 = tmp\n img0 = torch.from_numpy(img0.copy()).permute(2, 0, 1)\n img1 = torch.from_numpy(img1.copy()).permute(2, 0, 1)\n gt = torch.from_numpy(gt.copy()).permute(2, 0, 1)\n return torch.cat((img0, img1, gt), 0)\n" ]
[ [ "torch.cat", "torch.cuda.is_available", "numpy.random.randint" ] ]
vessemer/LungCancerDetection
[ "b1810c608896406abf8964298c0dd9ccf4816008" ]
[ "Scripts/extract_features.py" ]
[ "import sys\nsys.path.append('../')\nsys.path.append('../support/')\nfrom scipy.ndimage.measurements import label\nfrom scipy.ndimage import interpolation\nfrom time import time\nfrom glob import glob\nimport timeit\nfrom os.path import join, basename, isfile\nfrom tqdm import tqdm\nfrom paths import *\nfrom ct_reader import *\nimport dicom\nfrom scipy.misc import imresize\nfrom multiprocessing import Pool\nimport pickle\nfrom paths import *\nfrom scipy.ndimage import morphology\n# import seaborn as sns\nimport pandas as pd\nfrom numpy import *\n\n\n# In[2]:\n\ndef read_ct(path, ret_xy_spacing=False):\n patient = read_ct_scan(path)\n image = get_pixels_hu(patient)\n image[image == image[0,0,0]] = 0\n \n if ret_xy_spacing:\n try:\n return image, patient[0].PixelSpacing[0]\n except AttributeError:\n return image, scan.GetSpacing()[0]\n \n return image\n\n\n# In[3]:\n\ndef label_nodules(enhanced):\n isolated = enhanced.copy()\n isolated[(isolated == 4)\n |(isolated == 2)\n |(isolated == 6)] = 0\n isolated, _ = label(isolated)\n\n vascular = enhanced.copy()\n vascular[(vascular == 1)\n |(vascular == 2)\n |(vascular == 3)] = 0\n vascular, _ = label(vascular)\n\n plural = enhanced.copy()\n plural[(plural == 1)\n |(plural == 4)\n |(plural == 5)] = 0\n plural, _ = label(plural)\n return isolated, vascular, plural\n\n\n# In[4]:\n\ndef mask_features(mask,sp_mask):\n volumes = bincount(mask.flatten())\n zone_volumes = bincount(sp_mask.flatten())\n ans = dict()\n for i in range(16):\n try:\n ans['volume' + str(i)] = volumes[i]\n except:\n ans['volume' + str(i)] = 0 \n for i in range(7):\n ans['z_volume' + str(i)] = zone_volumes[i]\n ans['l//r'] = volumes[1] / volumes[2] if(volumes[2]) else 0.0\n ans['lungoverlap//l'] = volumes[3] / volumes[1] if(volumes[1]) else 0.0\n ans['br_overlap//l'] = volumes[5] / volumes[1] if(volumes[1]) else 0.0\n ans['br_overlap//r'] = volumes[6] / volumes[2] if(volumes[2]) else 0.0\n ans['tr_overlap//l'] = volumes[9] / volumes[1] if(volumes[1]) else 0.0\n ans['tr_overlap//r'] = volumes[10] / volumes[2] if(volumes[2]) else 0.0\n ans['br_tr_overlap//tr'] = volumes[12] / volumes[8] if(volumes[8]) else 0.0\n ans['z_volume_1//2'] = zone_volumes[1] / zone_volumes[2]\n ans['z_volume_2//3'] = zone_volumes[2] / zone_volumes[3]\n ans['z_volume_4//5'] = zone_volumes[4] / zone_volumes[5]\n ans['z_volume_5//6'] = zone_volumes[5] / zone_volumes[6]\n return ans\n\n\n# In[5]:\n\ndef if_left(mask):\n return in1d(mask,[1,3,5,7,9,11,13,15]).reshape(mask.shape)\n \ndef if_right(mask):\n return in1d(mask,[2,3,6,7,10,11,14,15]).reshape(mask.shape)\n\ndef split_mask(mask):\n mn1 = where(if_left(mask))[0].min()\n mx1 = where(if_left(mask))[0].max()\n mn2 = where(if_right(mask))[0].min()\n mx2 = where(if_right(mask))[0].max()\n height1 = int((mx1-mn1)/3.0)\n height2 = int((mx2-mn2)/3.0)\n mask_zones = zeros(mask.shape)\n mask_zones[mn1:mn1+height1,:,:] = 1 \n mask_zones[mn1+height1:mn1+2*height1,:,:] = 2\n mask_zones[mn1+2*height1:mx1,:,:] = 3\n mask_l = if_left(mask)*mask_zones\n mask_zones = zeros(mask.shape)\n mask_zones[mn2:mn2+height2,:,:] = 4\n mask_zones[mn2+height2:mn2+2*height2,:,:] = 5\n mask_zones[mn2+2*height2:mx2,:,:] = 6\n return (mask_l + if_right(mask) * mask_zones).astype('int8')\n\n\n# In[6]:\n\ndef merge(enhanced, mask):\n return 8 * mask + enhanced\n\n\n# In[7]:\n\ndef collect_stats(enhanced,mask,sp_mask):\n prev_time = time()\n l_enhanced = enhanced * if_left(mask)\n r_enhanced = enhanced * if_right(mask)\n \n \n# print('split_mask ',time()-prev_time)\n# prev_time = time()\n \n enh_areas = bincount(enhanced.flatten())[1:]\n enh_l_areas = bincount(l_enhanced.flatten())[1:]\n enh_r_areas = bincount(r_enhanced.flatten())[1:]\n \n enh_areas_zones = list()\n for i in range(1,7):\n enh_areas_zones.append(bincount((enhanced * (sp_mask == i)).flatten())[1:])\n# enh_l_areas = concatenate((enh_areas_zones[1][enh_areas_zones[1]>0],\n# enh_areas_zones[2][enh_areas_zones[2]>0],\n# enh_areas_zones[0][enh_areas_zones[0]>0]))\n# enh_r_areas = concatenate((enh_areas_zones[4][enh_areas_zones[4]>0],\n# enh_areas_zones[5][enh_areas_zones[5]>0],\n# enh_areas_zones[3][enh_areas_zones[3]>0]))\n# enh_areas = concatenate((enh_l_areas,enh_r_areas))\n# print('bincounts ',time()-prev_time)\n# prev_time = time()\n \n if not enh_areas.shape[0]:\n max_areas = dict()\n for i in range(5):\n max_areas['max'+str(i)] = 0\n max_areas['max_l'+str(i)] = 0\n max_areas['max_r'+str(i)] = 0\n zone_feats = dict()\n for i in range(6):\n zone_feats['amoun_z' + str(i+1)] = 0\n zone_feats['sumarea_z' + str(i+1)] = 0\n enh_comps_after_dil = dict()\n for i in range(20):\n enh_comps_after_dil['comps_dil'+str(i)] = 0\n enh_comps_after_dil['comps_dil_l'+str(i)] = 0\n enh_comps_after_dil['comps_dil_r'+str(i)] = 0\n ans = dict((('areas', 0), ('amoun', 0), \n ('mean', 0), ('std', 0), ('median', 0), \n ('mean_not_min', 0), \n ('median_not_min', 0), \n ('modes', [0] * 9)))\n ans.update(max_areas)\n ans.update(enh_comps_after_dil)\n ans.update(mask_features(mask,sp_mask))\n ans.update(zone_feats)\n return ans\n \n enh_amoun = enh_areas[enh_areas > 0].shape[0]\n enh_amoun_l = enh_l_areas[enh_l_areas > 0].shape[0]\n enh_amoun_r = enh_r_areas[enh_r_areas > 0].shape[0]\n enh_amoun_zones = [x[x > 0].shape[0] for x in enh_areas_zones]\n enh_area_sum_zones = [x[x > 0].sum() for x in enh_areas_zones]\n \n zone_feats = dict()\n for i in range(6):\n zone_feats['amoun_z' + str(i+1)] = enh_amoun_zones[i]\n zone_feats['sumarea_z' + str(i+1)] = enh_area_sum_zones[i]\n \n enh_mean = mean(enh_areas)\n enh_std = std(enh_areas)\n enh_sort_areas = sorted(enh_areas[enh_areas > 0],reverse=True)\n enh_sort_areas_l = sorted(enh_l_areas[enh_l_areas > 0],reverse=True)\n enh_sort_areas_r = sorted(enh_r_areas[enh_r_areas > 0],reverse=True)\n max_areas = dict()\n for i in range(5):\n try:\n max_areas['max'+str(i)] = enh_sort_areas[i]\n except:\n max_areas['max'+str(i)] = 0 \n try:\n max_areas['max_l'+str(i)] = enh_sort_areas_l[i]\n except:\n max_areas['max_l'+str(i)] = 0 \n try:\n max_areas['max_r'+str(i)] = enh_sort_areas_r[i]\n except:\n max_areas['max_l'+str(i)] = 0\n \n enh_median = median(enh_areas)\n enh_mean_not_min = enh_areas[enh_areas != enh_areas.min()].mean()\n enh_median_not_min = median(enh_areas[enh_areas != enh_areas.min()])\n modes = [2, 3, 4, 5, 6, 9, 12, 19, 37, 1e7]\n enh_modes = [sum((enh_areas >= modes[i - 1]) \n & (modes[i] > enh_areas))\n for i in range(1, len(modes))]\n \n# print('stats ',time()-prev_time)\n# prev_time = time()\n \n img = enhanced.copy()\n enh_comps_after_dil = dict()\n iter_num = 1\n for i in range(iter_num):\n labeled,label_num = label(img)\n enh_comps_after_dil['comps_dil'+str(i)] = label_num\n enh_comps_after_dil['comps_dil_l'+str(i)] = len(unique(labeled*if_left(mask)))\n enh_comps_after_dil['comps_dil_r'+str(i)] = len(unique(labeled*if_right(mask)))\n img = morphology.binary_dilation(img,structure=ones((5,5,5)))\n labeled,label_num = label(img)\n enh_comps_after_dil['comps_dil'+str(iter_num)] = label_num\n enh_comps_after_dil['comps_dil_l'+str(iter_num)] = len(unique(labeled*if_left(mask)))\n enh_comps_after_dil['comps_dil_r'+str(iter_num)] = len(unique(labeled*if_right(mask)))\n\n# print('dil ',time()-prev_time)\n# prev_time = time()\n \n \n ans = dict((('areas', sum(enh_areas)), ('amoun', enh_amoun), \n ('mean', enh_mean), ('std', enh_std), ('median', enh_median), \n ('mean_not_min', enh_mean_not_min), \n ('median_not_min', enh_median_not_min),\n ('modes', enh_modes)))\n ans.update(max_areas)\n ans.update(enh_comps_after_dil)\n ans.update(mask_features(mask,sp_mask))\n ans.update(zone_feats)\n\n# print('mask_feats ',time()-prev_time)\n# prev_time = time()\n \n return ans\n\n\n# In[9]:\n\ndef operate(path):\n try:\n enhanced = load(join(PATH['STAGE_ENHANCED'], \n path + '.npy'))\n mask = load(join(PATH['STAGE_MASKS'], \n path + '.npy'))\n\n zoomfactor = [w / float(f) for w, f in zip(enhanced.shape, mask.shape)]\n mask = interpolation.zoom(mask, zoom=zoomfactor, order = 0, mode = 'nearest')\n isolated, vascular, plural = label_nodules(enhanced)\n sp_mask = split_mask(mask)\n save(join(PATH['STAGE_MASKS'], path), merge(enhanced,mask))\n return (path, collect_stats(isolated,mask,sp_mask)),\\\n (path, collect_stats(vascular,mask,sp_mask)),\\\n (path, collect_stats(plural,mask,sp_mask))\n except:\n pass\n return ((path, None), (path, None), (path, None))\n\n\n# In[ ]:\n\npatients = set([basename(path)[:32] for path in glob(join(PATH['STAGE_ENHANCED'], '*'))])\npatients = patients.difference(pickle.load(open(join(PATH['STAGE_MASKS'], 'still_erroneus_ncrash'), 'rb')))\nstats = list()\nCPU = 8\n#print('Start. ', len(patients))\nwith Pool(CPU) as pool:\n stats = pool.map(operate, list(patients))\n \n#print('Done.')\npath = join(PATH['STAGE_MASKS'], 'DATAFRAMES')\npickle.dump(stats, open(join(path, 'merged_stats_100'), 'wb'))\n\n" ]
[ [ "scipy.ndimage.interpolation.zoom", "scipy.ndimage.measurements.label" ] ]
takkii/Pylean
[ "d51595e2788e946d9a2492bbe7131e4ada19062f" ]
[ "analyze/ruby-dic3_ana.py" ]
[ "from os.path import expanduser\n\nimport dask.dataframe as dd\nimport os\nimport pandas as pd\nfrom pandas import DataFrame\n\n# ------------------------------- KEYWORD -------------------------------------------------------------------------\n\n\nhome = expanduser(\"~\")\n\nd1 = os.path.expanduser(\"~/.cache/dein/repos/github.com/takkii/go_straight/dict/\")\nd2 = os.path.expanduser(\"~/.config/nvim/.cache/dein/repos/github.com/takkii/go_straight/dict/\")\nd3 = os.path.expanduser(\"~/.config/nvim/repos/github.com/takkii/go_straight/dict/\")\n\nif os.path.isdir(d1):\n ruby_method = open(os.path.expanduser(\n \"~/.cache/dein/repos/github.com/takkii/go_straight/dict/ruby_dict\"))\nelif os.path.isdir(d1):\n ruby_method = open(os.path.expanduser(\n \"~/.config/nvim/repos/github.com/takkii/go_straight/dict/ruby_dict\"))\nelif os.path.isdir(d3):\n ruby_method = open(os.path.expanduser(\n \"~/.config/nvim/.cache/dein/repos/github.com/takkii/go_straight/dict/ruby_dict\"))\nelse:\n print(\"Please, Check the path of go_straight.\")\n\nindex_ruby = ruby_method.readlines()\nSeri = pd.Series(index_ruby)\nsort_ruby = Seri.sort_index()\ndata_ruby = DataFrame(sort_ruby)\nddf = dd.from_pandas(data=data_ruby, npartitions=1)\ndata = ddf.compute()\nprint(data)\nruby_method.close()\n\n# ------------------------------- KEYWORD -------------------------------------------------------------------------\n" ]
[ [ "pandas.Series", "pandas.DataFrame" ] ]
zilbermanor/functions
[ "a1ef1411089314b8a264a70077a64ea77ccc0558" ]
[ "sklearn_classifier/sklearn_classifier.py" ]
[ "import json\nimport os\nfrom importlib import import_module\nfrom inspect import getfullargspec, FullArgSpec\nfrom cloudpickle import dump, load\nimport itertools\n\nimport sklearn\nimport pandas as pd\nimport pyarrow as pa\nimport pyarrow.parquet as pq\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom sklearn.utils.testing import all_estimators\nfrom sklearn.datasets import make_classification\nfrom sklearn.preprocessing import label_binarize\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\n\nfrom typing import Union, List, Any, Optional\nfrom mlrun.execution import MLClientCtx\nfrom mlrun.datastore import DataItem\nfrom mlrun.artifacts import PlotArtifact\n\nskversion = sklearn.__version__\n\nimport warnings\n\nwarnings.simplefilter(action=\"ignore\", category=FutureWarning)\n\n\ndef _gcf_clear(plt):\n \"\"\"Utility to clear matplotlib figure\n\n Run this inside every plot method before calling any matplotlib\n methods\n\n :param plot: matloblib figure object\n \"\"\"\n plt.cla()\n plt.clf()\n plt.close()\n\n\ndef _create_class(pkg_class: str):\n \"\"\"Create a class from a package.module.class string\n\n :param pkg_class: full class location,\n e.g. \"sklearn.model_selection.GroupKFold\"\n \"\"\"\n splits = pkg_class.split(\".\")\n clfclass = splits[-1]\n pkg_module = splits[:-1]\n class_ = getattr(import_module(\".\".join(pkg_module)), clfclass)\n return class_\n\ndef _create_function(pkg_func: list):\n \"\"\"Create a function from a package.module.function string\n\n :param pkg_func: full function location,\n e.g. \"sklearn.feature_selection.f_classif\"\n \"\"\"\n splits = pkg_func.split(\".\")\n pkg_module = \".\".join(splits[:-1])\n cb_fname = splits[-1]\n pkg_module = __import__(pkg_module, fromlist=[cb_fname])\n function_ = getattr(pkg_module, cb_fname)\n return function_\n\ndef get_model_configs(\n my_models: Union[str, List[str]],\n class_key = \"CLASS\",\n fit_key = \"FIT\",\n meta_key = \"META\",\n) -> Union[dict, List[dict]]:\n \"\"\"build sklearn model configuration parameters\n \n Take (full) class name of an scikit-learn model \n and retrieve its `class` and `fit` parameters and\n their default values.\n \n Also returns some useful metadata values for the class\n \"\"\"\n # get a list of all sklearn estimators\n estimators = all_estimators()\n def _get_estimator(pkg_class):\n \"\"\"find a specific class in a list of sklearn estimators\"\"\"\n my_class = pkg_class.split('.')[-1]\n return list(filter(lambda x: x[0] == my_class, estimators))[0]\n\n # find estimators corresponding to my_models list\n my_estimators = []\n my_models = [my_models] if isinstance(my_models, str) else my_models\n for model in my_models:\n estimator_name, estimator_class = _get_estimator(model)\n my_estimators.append((estimator_name, estimator_class))\n\n # get class and fit specs\n estimator_specs = []\n for an_estimator in my_estimators:\n estimator_specs.append((an_estimator[0], # model only name\n getfullargspec(an_estimator[1]), # class params\n getfullargspec(an_estimator[1].fit), # fit params\n an_estimator[1])) # package.module.model\n\n model_configs = []\n\n for estimator in estimator_specs:\n model_json = {class_key: {}, fit_key: {}}\n fit_params = {}\n\n for i, key in enumerate(model_json.keys()):\n f = estimator[i+1]\n args_paired = []\n defs_paired = []\n\n # reverse the args since there are fewer defaults than args\n args = f.args\n args.reverse()\n n_args = len(args)\n\n defs = f.defaults\n if defs is None:\n defs = [defs]\n defs = list(defs)\n defs.reverse()\n n_defs = len(defs)\n\n n_smallest = min(n_args, n_defs)\n n_largest = max(n_args, n_defs)\n\n # build 2 lists that can be concatenated\n for ix in range(n_smallest):\n if args[ix] is not \"self\":\n args_paired.append(args[ix])\n defs_paired.append(defs[ix])\n\n for ix in range(n_smallest, n_largest):\n if ix is not 0 and args[ix] is not \"self\":\n args_paired.append(args[ix])\n defs_paired.append(None)\n # concatenate lists into appropriate structure\n model_json[key] = dict(zip(reversed(args_paired), reversed(defs_paired)))\n\n model_json[meta_key] = {}\n model_json[meta_key]['sklearn_version'] = skversion\n model_json[meta_key]['class'] = '.'.join([estimator[3].__module__, estimator[0]])\n model_configs.append(model_json)\n if len(model_configs) == 1:\n # do we want to log this modified model as an artifact?\n return model_configs[0]\n else:\n # do we want to log this modified model as an artifact?\n return model_configs\n\ndef update_model_config(\n config: dict,\n new_class: dict,\n new_fit: dict,\n class_key: str = \"CLASS\",\n fit_key: str = \"FIT\"\n):\n \"\"\"Update model config json\n \n This function is essential since there are modifications in class\n and fit params that must be made (callbacks are a good example, without\n which there is no training history available)\n \n TODO: currently a model config contains 2 keys, but this will likely\n expand to include other functions beyond class and fit. So need to expand \n this to a list of Tuple(str, dict), where `str` corresponds to a key\n in the model config and `dict` contains the params and their new values.\n \n :param config: original model definition containing 2 keys, CLASS and FIT\n :param new_class: new class key-values\n :param new_fit: new fit key-values\n \"\"\"\n config[class_key].update(new_class)\n config[fit_key].update(new_fit)\n \n return config\n\ndef train_model(\n context: MLClientCtx,\n model_pkg_class: str,\n data_key: Union[DataItem, str],\n sample: int,\n label_column: str,\n model_key: str = \"model\",\n test_size: float = 0.05,\n train_val_split: float = 0.75,\n test_set_key: str = \"test_set\",\n rng: int = 1,\n models_dir: str = \"models\",\n plots_dir: str = \"plots\",\n score_method: str = \"micro\",\n class_params_updates: Union[DataItem, dict] = {},\n fit_params_updates: Union[DataItem, dict] = {},\n) -> None:\n \"\"\"train a classifier.\n\n :param context: the function context\n :param model_pkg_class: the model to train, e.g, 'sklearn.neural_networks.MLPClassifier'\n :param data_key: (\"raw\") name of raw data file\n :param sample: Selects the first n rows, or select a sample\n starting from the first. If negative <-1, select\n a random sample\n :param label_column: ground-truth (y) labels\n :param model_key: ('model') name of model in artifact store,\n points to a directory\n :param test_size: (0.05) test set size\n :param train_val_split: (0.75) Once the test set has been removed the\n training set gets this proportion.\n :param test_set_key: store the test data set under this key in the\n artifact store\n :param rng: (1) sklearn rng seed\n :param models_dir: models subfolder on artifact path\n :param plots_dir: plot subfolder on artifact path\n :param score_method: for multiclass classification\n :param class_updates: update these scikit-learn classifier params,\n input as a dict\n :param fit_updates: update scikit-learn fit parameters, input as\n a dict.\n \"\"\"\n # extract file name from DataItem\n srcfilepath = str(data_key)\n \n # TODO: this should be part of data's metadata dealt with in another step get a data set, sample, etc...\n # get all data or a sample\n if (sample == -1) or (sample >= 1):\n # get all rows, or contiguous sample starting at row 1.\n raw = pq.read_table(srcfilepath).to_pandas().dropna()\n labels = raw.pop(label_column)\n raw = raw.iloc[:sample, :]\n labels = labels.iloc[:sample]\n else:\n # grab a random sample\n raw = pq.read_table(srcfilepath).to_pandas().dropna().sample(sample * -1)\n labels = raw.pop(label_column)\n\n # TODO: this should be part of data's metadata dealt with in another step\n context.header = raw.columns.values\n \n # TODO: all of this should be part of a spitter component that does cv too, dealt with in another step\n # make a hot encode copy of labels before the split\n yb = label_binarize(labels, classes=list(range(raw.shape[1])))\n # double split to generate 3 data sets: train, validation and test\n # with xtest,ytest set aside\n x, xtest, y, ytest = train_test_split(np.concatenate([raw, yb], axis=0), labels, test_size=test_size, random_state=rng)\n xtrain, xvalid, ytrain, yvalid = train_test_split(x, y, train_size=train_val_split, random_state=rng)\n # extract the hot_encoded labels\n ytrainb = xtrain[:, -yb.shape[1]:].copy()\n xtrain = xtrain[:, :-yb.shape[1]].copy()\n # extract the hot_encoded labels\n yvalidb = xvalid[:, -yb.shape[1]:].copy()\n xvalid = xvalid[:, :-yb.shape[1]].copy()\n # extract the hot_encoded labels\n ytestb = xtest[:, -yb.shape[1]:].copy()\n xtest = xtest[:, :-yb.shape[1]].copy() \n # set-aside test_set\n test_set = pd.concat(\n [pd.DataFrame(data=xtest, columns=context.header),\n pd.DataFrame(data=ytest, columns=[label_column]),\n pd.DataFrame(data=ytestb, columns=[label_column])],\n axis=1,)\n filepath = os.path.join(base_path, test_set_key + \".pqt\")\n test_set.to_parquet(filepath, index=False)\n context.log_artifact(test_set_key, local_path=test_set_key + \".pqt\")\n\n # load the model config\n model_config = get_model_configs(model_pkg_class)\n # get update params if any\n if isinstance(class_params_updates, DataItem):\n class_params_updates = json.loads(class_params_updates.get())\n if isinstance(fit_params_updates, DataItem):\n fit_params_updates = json.loads(fit_params_updates.get())\n # update the parameters \n # add data to fit params\n fit_params_updates.update({'X': xtrain,'y': ytrain})\n model_config = update_model_config(model_config, class_params_update, fit_params_updates)\n\n # create class and fit\n ClassifierClass = _create_class(model_config[\"META\"][\"class\"])\n model = ClassifierClass(**class_params)\n model.fit(**fit_params)\n\n # save model\n filepath = os.path.join(base_path, f\"{models_dir}/{model_key}.pkl\")\n dump(model, open(filepath, \"wb\"))\n context.log_artifact(model_key, local_path=models_dir)\n\n # compute validation metrics\n ypred = model.predict(xvalid)\n y_score = model.predict_proba(xvalid)\n\n average_precision = average_precision_score(yvalidb,\n y_score,\n average=score_method)\n\n context.log_result(f\"accuracy\", float(model.score(xvalid, yvalid)))\n context.log_result(f\"rocauc\", roc_auc_score(yvalidb, y_score))\n context.log_result(f\"f1_score\", f1_score(yvalid, ypred,\n average=score_method))\n context.log_result(f\"avg_precscore\", average_precision)\n\n # validation plots\n \n plot_roc(context, yvalidb, y_score)\n plot_confusion_matrix(context, yvalid, ypred, key=\"confusion\", fmt=\"png\")\n\ndef plot_roc(\n context,\n y_labels,\n y_probs,\n key=\"roc\",\n plots_dir: str = \"plots\",\n fmt=\"png\",\n x_label: str = \"false positive rate\",\n y_label: str = \"true positive rate\",\n title: str = \"roc curve\",\n legend_loc: str = \"best\"\n):\n \"\"\"plot roc curves\n \n TODO: add averaging method (as string) that was used to create probs, \n display in legend\n \n :param context: the function context\n :param y_labels: ground truth labels, hot encoded for multiclass \n :param y_probs: model prediction probabilities\n :param key: (\"roc\") key of plot in artifact store\n :param plots_dir: (\"plots\") destination folder relative path to artifact path\n :param fmt: (\"png\") plot format\n :param x_label: (\"false positive rate\") x-axis labels\n :param y_label: (\"true positive rate\") y-axis labels\n :param title: (\"roc curve\") title of plot\n :param legend_loc: (\"best\") location of plot legend\n \"\"\"\n # don't bother if this doesn't work\n assert y_probs.shape == y_labels.shape\n \n # clear matplotlib current figure\n _gcf_clear(plt)\n \n # data accummulators by class\n fpr = dict()\n tpr = dict()\n roc_auc = dict()\n \n # draw 45 degree line\n plt.plot([0, 1], [0, 1], \"k--\")\n \n # labelling\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n plt.title(title)\n plt.legend(loc=legend_loc)\n \n # single ROC or mutliple\n for i in range(y_labels.shape[1]):\n fpr[i], tpr[i], _ = metrics.roc_curve(y_labels[:, i], y_probs[:, i], pos_label=1)\n roc_auc[i] = metrics.auc(fpr[i], tpr[i])\n plt.plot(fpr[i], tpr[i], label=f\"class {i}\")\n\n fname = f\"{plots_dir}/{key}.{fmt}\"\n plt.savefig(os.path.join(context.artifact_path, fname))\n context.log_artifact(PlotArtifact(key, body=plt.gcf()), local_path=fname)\n \n\ndef plot_confusion_matrix(\n context: MLClientCtx,\n labels,\n predictions,\n key: str = \"confusion_matrix\",\n plots_dir: str = \"plots\",\n colormap: str = \"Blues\",\n fmt: str = \"png\",\n sample_weight=None\n):\n \"\"\"Create a confusion matrix.\n Plot and save a confusion matrix using test data from a\n modelline step.\n \n See https://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html\n \n TODO: fix label alignment\n TODO: consider using another packaged version\n TODO: refactor to take params dict for plot options\n\n :param context: function context\n :param labels: validation data ground-truth labels\n :param predictions: validation data predictions\n :param key: str\n :param plots_dir: relative path of plots in artifact store\n :param colormap: colourmap for confusion matrix\n :param fmt: plot format\n :param sample_weight: sample weights\n \"\"\"\n _gcf_clear(plt)\n \n cm = metrics.confusion_matrix(labels, predictions, sample_weight=None)\n sns.heatmap(cm, annot=True, cmap=colormap, square=True)\n\n fig = plt.gcf()\n fname = f\"{plots_dir}/{key}.{fmt}\"\n fig.savefig(os.path.join(context.artifact_path, fname))\n context.log_artifact(PlotArtifact(key, body=fig), local_path=fname)\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "sklearn.metrics.auc", "matplotlib.pyplot.cla", "sklearn.model_selection.train_test_split", "sklearn.utils.testing.all_estimators", "sklearn.metrics.confusion_matrix", "matplotlib.pyplot.plot", "matplotlib.pyplot.gcf", "numpy.concatenate", "matplotlib.pyplot.clf", "sklearn.metrics.roc_curve", "pandas.DataFrame", "matplotlib.pyplot.close", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylabel" ] ]
ByungKwanLee/AdversarialMemory
[ "d4cccfec4d370f975dffc826346b1a1a28916444" ]
[ "train/train_trade.py" ]
[ "#!/usr/bin/env python\n\n# numpy package\nimport numpy as np\n\n# torch package\nimport torch\nimport torchvision\nfrom torch.nn.functional import cross_entropy, softmax, log_softmax\n\n# basic package\nimport os\nimport sys\nsys.path.append('.')\nimport argparse\nfrom tqdm import tqdm\nfrom datetime import datetime\n\n# custom package\nfrom loader.argument_print import argument_print\nfrom loader.loader import dataset_loader, network_loader, attack_loader\n\n# cudnn enable\ntorch.backends.cudnn.benchmark = True\ntorch.backends.cudnn.enabled = True\n\n# argument parser\nparser = argparse.ArgumentParser(description='Joint Adversarial Defense')\nparser.add_argument('--lr', default=0.01, type=float, help='learning rate')\nparser.add_argument('--steps', default=10, type=int, help='adv. steps')\nparser.add_argument('--eps', required=True, type=float, help='max norm')\nparser.add_argument('--dataset', required=True, type=str, help='dataset name')\nparser.add_argument('--network', required=True, type=str, help='network name')\nparser.add_argument('--data_root', required=True, type=str, help='path to dataset')\nparser.add_argument('--epoch', default=60, type=int, help='epoch number')\nparser.add_argument('--attack', default='pgd', type=str, help='attack type')\nparser.add_argument('--save_dir', default='./experiment', type=str, help='save directory')\nargs = parser.parse_args()\n\n# loading dataset, network, and attack\ntrainloader, testloader = dataset_loader(args)\nnet = network_loader(args, mean=args.mean, std=args.std).cuda()\nattack = attack_loader(args, net)\n\n# Adam Optimizer with KL divergence, and Scheduling Learning rate\noptimizer = torch.optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-2)\nscheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.2)\n\n# Setting checkpoint date time\ndate_time = datetime.today().strftime(\"%m%d%H%M\")\n\n# checkpoint_name\ncheckpoint_name = 'TRADE_'+args.network+'_'+args.dataset+'_'+date_time+'.pth'\n\n# argument print\nargument_print(args, checkpoint_name)\n\n\ndef train():\n\n # Modeling Adversarial Loss\n for epoch in range(args.epoch):\n\n # train environment\n net.train()\n\n print('\\n\\n[TRADE/Epoch] : {}'.format(epoch+1))\n\n total_cross_loss = 0\n correct = 0\n total = 0\n\n for batch_idx, (inputs, targets) in enumerate(tqdm(trainloader)):\n\n # dataloader parsing and generate adversarial examples\n inputs, targets = inputs.cuda(), targets.cuda()\n\n # learning network parameters\n optimizer.zero_grad()\n adv_x = attack(inputs, targets) if args.eps != 0 else inputs\n cross_entropy_loss = cross_entropy(net(adv_x), targets)-(softmax(net(inputs).detach(),dim=1)*log_softmax(net(adv_x),dim=1)).sum(dim=1).mean()\n cross_entropy_loss.backward()\n optimizer.step()\n\n # validation\n pred = torch.max(net(adv_x).detach(), dim=1)[1]\n correct += torch.sum(pred.eq(targets)).item()\n total += targets.numel()\n\n # logging two types loss and total loss\n total_cross_loss += cross_entropy_loss.item()\n\n if batch_idx % 50 == 0 and batch_idx != 0:\n print('[TRADE/Train] Iter: {}, Acc: {:.3f}, CE: {:.3f}'.format(\n batch_idx, # Iter\n 100.*correct / total, # Acc\n total_cross_loss / (batch_idx+1) # CrossEntropy\n )\n )\n\n # Scheduling learning rate by stepLR\n scheduler.step()\n\n # Adversarial validation\n adversarial_test()\n\n # Save checkpoint file\n torch.save({\n 'epoch': epoch+1,\n 'model_state_dict': net.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'total_cross_entropy_loss' : total_cross_loss / (batch_idx+1)\n }, os.path.join(args.save_dir,checkpoint_name))\n\n # argument print\n argument_print(args, checkpoint_name)\n\n\ndef adversarial_test():\n\n correct = 0\n total = 0\n\n print('\\n\\n[TRADE/Test] Under Testing ... Wait PLZ')\n for batch_idx, (inputs, targets) in enumerate(tqdm(testloader)):\n\n # dataloader parsing and generate adversarial examples\n inputs, targets = inputs.cuda(), targets.cuda()\n adv_x = attack(inputs, targets) if args.eps != 0 else inputs\n\n # Evaluation\n outputs = net(adv_x).detach()\n\n # Test\n pred = torch.max(outputs, dim=1)[1]\n correct += torch.sum(pred.eq(targets)).item()\n total += targets.numel()\n\n print('[TRADE/Test] Acc: {:.3f}'.format(100.*correct / total))\n\n\nif __name__ == \"__main__\":\n train()" ]
[ [ "torch.max", "torch.optim.lr_scheduler.StepLR" ] ]
ShenDezhou/CAIL
[ "c4cfa98ab4ecedbce34a7a5a186830486047540c", "c4cfa98ab4ecedbce34a7a5a186830486047540c", "c4cfa98ab4ecedbce34a7a5a186830486047540c", "c4cfa98ab4ecedbce34a7a5a186830486047540c", "c4cfa98ab4ecedbce34a7a5a186830486047540c", "c4cfa98ab4ecedbce34a7a5a186830486047540c", "c4cfa98ab4ecedbce34a7a5a186830486047540c" ]
[ "CAIL2021/slsb/selftest.py", "CAIL2020/lawsplit/torch_server.py", "CAIL2020/cocr/torchocr/networks/backbones/DetResNetvd.py", "CAIL2020/cocr/torchocr/utils/vis.py", "CAIL2020/sfzyza/model.py", "CAIL2020/cocr/torchocr/utils/ckpt.py", "CAIL2020/zwfc/torch_server.py" ]
[ "import json\n\nimport pandas\nimport urllib3\nfrom classmerge import match\nfrom dataclean import cleanall\n\ndf = pandas.read_csv(\"dataset/valid-phase1.csv\")\nhttp = urllib3.PoolManager()\ncorrect = 0\nfor index, row in df.iterrows():\n label = row[0]\n title = row[1].replace(\".doc\",\"\").replace(\".docx\",\"\")\n content = cleanall(row[2])\n url = \"http://192.168.0.161:58080/z?1={}&2={}\".format(title, content)\n print(url)\n if len(url) > 9999:\n url = url[:9999]\n result = http.request('GET', url)\n result = json.loads(result.data)\n print(label, result['answer'][0])\n df.at[index, 'type1'] = result['answer'][0]\n df.at[index, 'title'] = title\n df.at[index, 'content'] = content\n if match(result['answer'][0], label):\n correct +=1\ndf.to_csv(\"eval/test-bert.csv\", index=False)\nprint('ACCURACY:{}%'.format(correct*100.0/len(df)))", "import argparse\nimport logging\nimport time\n\nimport falcon\n\nfrom falcon_cors import CORS\nimport json\nimport waitress\n\nimport re\n\nimport pandas\nlogging.basicConfig(level=logging.INFO, format='%(asctime)-18s %(message)s')\nlogger = logging.getLogger()\ncors_allow_all = CORS(allow_all_origins=True,\n allow_origins_list=['*'],\n allow_all_headers=True,\n allow_all_methods=True,\n allow_credentials_all_origins=True\n )\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n '-p', '--port', default=58004,\n help='falcon server port')\n# parser.add_argument(\n# '-c', '--config_file', default='config/bert_config_l.json',\n# help='model config file')\nargs = parser.parse_args()\n# model_config=args.config_file\n#\n# MODEL_MAP = {\n# 'bert': BertForClassification,\n# 'cnn': CharCNN\n# }\n\n\nclass TorchResource:\n\n def __init__(self):\n logger.info(\"...\")\n\n self.rule = ' +第([^条]{1,7})条 (.*)'\n self.chapter = '第[一二三四五六七八九十]{1,3}分?[章编]'\n self.pattern = re.compile(self.rule)\n self.chapter_pattern = re.compile(self.chapter)\n\n self.FORMAL_DIGIT = \"零一二三四五六七八九\"\n self.math_digit = \"0123456789\"\n logger.info(\"###\")\n\n def format2digit(self, word):\n trans = \"\"\n if word.startswith('十'):\n trans += '1'\n\n for c in word:\n if c in self.FORMAL_DIGIT:\n trans += self.math_digit[self.FORMAL_DIGIT.index(c)]\n if c == '千' and not word.endswith('千'):\n if '百' not in word and '十' not in word:\n trans += \"0\"\n if word.endswith(c):\n if c == \"十\":\n trans += '0'\n if c == \"百\":\n trans += '00'\n if c == \"千\":\n trans += '000'\n return trans\n\n def split(self, content):\n # logger.info('1:{}, 2:{}'.format(title, content))\n\n df = pandas.DataFrame()\n f = content.split('\\n')\n buffer = []\n digit = 0\n for line in f:\n match = re.search(self.pattern, line)\n if match:\n # output\n article_digit = self.format2digit(match.group(1))\n if digit:\n tup = (str(int(article_digit) - 1), r\"\\n\".join(buffer))\n buffer = []\n dic = dict(zip(('id', 'desc'), tup))\n df = df.append(dic, ignore_index=True)\n buffer.append(line.strip())\n digit += 1\n else:\n if self.chapter_pattern.search(line):\n context = line.strip().split(' ')[-1]\n else:\n buffer.append(line.strip())\n # last\n if buffer:\n tup = (article_digit, r\"\\n\".join(buffer))\n dic = dict(zip(('id', 'desc'), tup))\n df = df.append(dic, ignore_index=True)\n filename = \"data/{}.csv\".format(time.time())\n df.to_csv(filename, columns=['id', 'desc'], index=False)\n tuple = {'id':df['id'].to_list(), 'desc':df['desc'].to_list()}\n return tuple\n\n\n def on_get(self, req, resp):\n logger.info(\"...\")\n resp.set_header('Access-Control-Allow-Origin', '*')\n resp.set_header('Access-Control-Allow-Methods', '*')\n resp.set_header('Access-Control-Allow-Headers', '*')\n resp.set_header('Access-Control-Allow-Credentials','true')\n # title = req.get_param('1', True)\n content = req.get_param('1', True)\n # clean_title = shortenlines(title)\n # clean_content = cleanall(content)\n resp.media = self.split(content)\n logger.info(\"###\")\n\n\n def on_post(self, req, resp):\n \"\"\"Handles POST requests\"\"\"\n resp.set_header('Access-Control-Allow-Origin', '*')\n resp.set_header('Access-Control-Allow-Methods', '*')\n resp.set_header('Access-Control-Allow-Headers', '*')\n resp.set_header('Access-Control-Allow-Credentials', 'true')\n resp.set_header(\"Cache-Control\", \"no-cache\")\n data = req.stream.read(req.content_length)\n jsondata = json.loads(data)\n # clean_title = shortenlines(jsondata['title'])\n # clean_content = self.split((jsondata['content'])\n resp.media = self.split(jsondata['content'])\n\nif __name__==\"__main__\":\n api = falcon.API(middleware=[cors_allow_all.middleware])\n api.req_options.auto_parse_form_urlencoded = True\n api.add_route('/z', TorchResource())\n waitress.serve(api, port=args.port, threads=48, url_scheme='http')\n", "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\nfrom collections import OrderedDict\nimport os\nimport torch\nfrom torch import nn\n\nfrom torchocr.networks.CommonModules import HSwish\n\n\nclass ConvBNACT(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, groups=1, act=None):\n super().__init__()\n self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,\n stride=stride, padding=padding, groups=groups,\n bias=False)\n self.bn = nn.BatchNorm2d(out_channels)\n if act == 'relu':\n self.act = nn.ReLU()\n elif act == 'hard_swish':\n self.act = HSwish()\n elif act is None:\n self.act = None\n\n def load_3rd_state_dict(self, _3rd_name, _state, _name_prefix):\n to_load_state_dict = OrderedDict()\n if _3rd_name == 'paddle':\n to_load_state_dict['conv.weight'] = torch.Tensor(_state[f'{_name_prefix}_weights'])\n if _name_prefix == 'conv1':\n bn_name = f'bn_{_name_prefix}'\n else:\n bn_name = f'bn{_name_prefix[3:]}'\n to_load_state_dict['bn.weight'] = torch.Tensor(_state[f'{bn_name}_scale'])\n to_load_state_dict['bn.bias'] = torch.Tensor(_state[f'{bn_name}_offset'])\n to_load_state_dict['bn.running_mean'] = torch.Tensor(_state[f'{bn_name}_mean'])\n to_load_state_dict['bn.running_var'] = torch.Tensor(_state[f'{bn_name}_variance'])\n self.load_state_dict(to_load_state_dict)\n else:\n pass\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n if self.act is not None:\n x = self.act(x)\n return x\n\n\nclass ConvBNACTWithPool(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, groups=1, act=None):\n super().__init__()\n self.pool = nn.AvgPool2d(kernel_size=2, stride=2, padding=0, ceil_mode=True)\n\n self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=1,\n padding=(kernel_size - 1) // 2,\n groups=groups,\n bias=False)\n self.bn = nn.BatchNorm2d(out_channels)\n if act is None:\n self.act = None\n else:\n self.act = nn.ReLU()\n\n def load_3rd_state_dict(self, _3rd_name, _state, _name_prefix):\n to_load_state_dict = OrderedDict()\n if _3rd_name == 'paddle':\n to_load_state_dict['conv.weight'] = torch.Tensor(_state[f'{_name_prefix}_weights'])\n if _name_prefix == 'conv1':\n bn_name = f'bn_{_name_prefix}'\n else:\n bn_name = f'bn{_name_prefix[3:]}'\n to_load_state_dict['bn.weight'] = torch.Tensor(_state[f'{bn_name}_scale'])\n to_load_state_dict['bn.bias'] = torch.Tensor(_state[f'{bn_name}_offset'])\n to_load_state_dict['bn.running_mean'] = torch.Tensor(_state[f'{bn_name}_mean'])\n to_load_state_dict['bn.running_var'] = torch.Tensor(_state[f'{bn_name}_variance'])\n self.load_state_dict(to_load_state_dict)\n else:\n pass\n\n def forward(self, x):\n x = self.pool(x)\n x = self.conv(x)\n x = self.bn(x)\n if self.act is not None:\n x = self.act(x)\n return x\n\n\nclass ShortCut(nn.Module):\n def __init__(self, in_channels, out_channels, stride, name, if_first=False):\n super().__init__()\n assert name is not None, 'shortcut must have name'\n\n self.name = name\n if in_channels != out_channels or stride != 1:\n if if_first:\n self.conv = ConvBNACT(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride,\n padding=0, groups=1, act=None)\n else:\n self.conv = ConvBNACTWithPool(in_channels=in_channels, out_channels=out_channels, kernel_size=1,\n groups=1, act=None)\n elif if_first:\n self.conv = ConvBNACT(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride,\n padding=0, groups=1, act=None)\n else:\n self.conv = None\n\n def load_3rd_state_dict(self, _3rd_name, _state):\n if _3rd_name == 'paddle':\n if self.conv:\n self.conv.load_3rd_state_dict(_3rd_name, _state, self.name)\n else:\n pass\n\n def forward(self, x):\n if self.conv is not None:\n x = self.conv(x)\n return x\n\n\nclass BottleneckBlock(nn.Module):\n def __init__(self, in_channels, out_channels, stride, if_first, name):\n super().__init__()\n assert name is not None, 'bottleneck must have name'\n self.name = name\n self.conv0 = ConvBNACT(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0,\n groups=1, act='relu')\n self.conv1 = ConvBNACT(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=stride,\n padding=1, groups=1, act='relu')\n self.conv2 = ConvBNACT(in_channels=out_channels, out_channels=out_channels * 4, kernel_size=1, stride=1,\n padding=0, groups=1, act=None)\n self.shortcut = ShortCut(in_channels=in_channels, out_channels=out_channels * 4, stride=stride,\n if_first=if_first, name=f'{name}_branch1')\n self.relu = nn.ReLU()\n self.output_channels = out_channels * 4\n\n def load_3rd_state_dict(self, _3rd_name, _state):\n self.conv0.load_3rd_state_dict(_3rd_name, _state, f'{self.name}_branch2a')\n self.conv1.load_3rd_state_dict(_3rd_name, _state, f'{self.name}_branch2b')\n self.conv2.load_3rd_state_dict(_3rd_name, _state, f'{self.name}_branch2c')\n self.shortcut.load_3rd_state_dict(_3rd_name, _state)\n\n def forward(self, x):\n y = self.conv0(x)\n y = self.conv1(y)\n y = self.conv2(y)\n y = y + self.shortcut(x)\n return self.relu(y)\n\n\nclass BasicBlock(nn.Module):\n def __init__(self, in_channels, out_channels, stride, if_first, name):\n super().__init__()\n assert name is not None, 'block must have name'\n self.name = name\n\n self.conv0 = ConvBNACT(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride,\n padding=1, groups=1, act='relu')\n self.conv1 = ConvBNACT(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1,\n groups=1, act=None)\n self.shortcut = ShortCut(in_channels=in_channels, out_channels=out_channels, stride=stride,\n name=f'{name}_branch1', if_first=if_first, )\n self.relu = nn.ReLU()\n self.output_channels = out_channels\n\n def load_3rd_state_dict(self, _3rd_name, _state):\n if _3rd_name == 'paddle':\n self.conv0.load_3rd_state_dict(_3rd_name, _state, f'{self.name}_branch2a')\n self.conv1.load_3rd_state_dict(_3rd_name, _state, f'{self.name}_branch2b')\n self.shortcut.load_3rd_state_dict(_3rd_name, _state)\n else:\n pass\n\n def forward(self, x):\n y = self.conv0(x)\n y = self.conv1(y)\n y = y + self.shortcut(x)\n return self.relu(y)\n\n\nclass ResNet(nn.Module):\n def __init__(self, in_channels, layers, pretrained=True, **kwargs):\n \"\"\"\n the Resnet backbone network for detection module.\n Args:\n params(dict): the super parameters for network build\n \"\"\"\n super().__init__()\n supported_layers = {\n 18: {'depth': [2, 2, 2, 2], 'block_class': BasicBlock},\n 34: {'depth': [3, 4, 6, 3], 'block_class': BasicBlock},\n 50: {'depth': [3, 4, 6, 3], 'block_class': BottleneckBlock},\n 101: {'depth': [3, 4, 23, 3], 'block_class': BottleneckBlock},\n 152: {'depth': [3, 8, 36, 3], 'block_class': BottleneckBlock},\n 200: {'depth': [3, 12, 48, 3], 'block_class': BottleneckBlock}\n }\n assert layers in supported_layers, \\\n \"supported layers are {} but input layer is {}\".format(supported_layers, layers)\n depth = supported_layers[layers]['depth']\n block_class = supported_layers[layers]['block_class']\n\n num_filters = [64, 128, 256, 512]\n self.conv1 = nn.Sequential(\n ConvBNACT(in_channels=in_channels, out_channels=32, kernel_size=3, stride=2, padding=1, act='relu'),\n ConvBNACT(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1, act='relu'),\n ConvBNACT(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1, act='relu')\n )\n self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n self.stages = nn.ModuleList()\n self.out_channels = []\n in_ch = 64\n for block_index in range(len(depth)):\n block_list = []\n for i in range(depth[block_index]):\n if layers >= 50:\n if layers in [101, 152, 200] and block_index == 2:\n if i == 0:\n conv_name = \"res\" + str(block_index + 2) + \"a\"\n else:\n conv_name = \"res\" + str(block_index + 2) + \"b\" + str(i)\n else:\n conv_name = \"res\" + str(block_index + 2) + chr(97 + i)\n else:\n conv_name = f'res{str(block_index + 2)}{chr(97 + i)}'\n block_list.append(block_class(in_channels=in_ch, out_channels=num_filters[block_index],\n stride=2 if i == 0 and block_index != 0 else 1,\n if_first=block_index == i == 0, name=conv_name))\n in_ch = block_list[-1].output_channels\n self.out_channels.append(in_ch)\n self.stages.append(nn.Sequential(*block_list))\n if pretrained:\n ckpt_path = f'./weights/resnet{layers}_vd.pth'\n logger = logging.getLogger('torchocr')\n if os.path.exists(ckpt_path):\n logger.info('load imagenet weights')\n self.load_state_dict(torch.load(ckpt_path))\n else:\n logger.info(f'{ckpt_path} not exists')\n\n def load_3rd_state_dict(self, _3rd_name, _state):\n if _3rd_name == 'paddle':\n for m_conv_index, m_conv in enumerate(self.conv1, 1):\n m_conv.load_3rd_state_dict(_3rd_name, _state, f'conv1_{m_conv_index}')\n for m_stage in self.stages:\n for m_block in m_stage:\n m_block.load_3rd_state_dict(_3rd_name, _state)\n else:\n pass\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.pool1(x)\n out = []\n for stage in self.stages:\n x = stage(x)\n out.append(x)\n return out\n", "# -*- coding: utf-8 -*-\n# @Time : 2020/6/18 10:34\n# @Author : THU\nimport math\nimport numpy as np\nfrom PIL import Image, ImageDraw, ImageFont\n\n\ndef draw_ocr_box_txt(image, boxes, txts = None):\n if isinstance(image,np.ndarray):\n image = Image.fromarray(image)\n h, w = image.height, image.width\n img_left = image.copy()\n img_right = Image.new('RGB', (w, h), (255, 255, 255))\n\n import random\n # 每次使用相同的随机种子 ,可以保证两次颜色一致\n random.seed(0)\n draw_left = ImageDraw.Draw(img_left)\n draw_right = ImageDraw.Draw(img_right)\n for i,box in enumerate(boxes):\n color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))\n draw_left.polygon(box, fill=color)\n draw_right.polygon([box[0][0], box[0][1],\n box[1][0], box[1][1],\n box[2][0], box[2][1],\n box[3][0], box[3][1]], outline=color)\n if txts is not None:\n txt = str(txts[i])\n box_height = math.sqrt((box[0][0] - box[3][0]) ** 2 + (box[0][1] - box[3][1]) ** 2)\n box_width = math.sqrt((box[0][0] - box[1][0]) ** 2 + (box[0][1] - box[1][1]) ** 2)\n if box_height > 2 * box_width:\n font_size = max(int(box_width * 0.9), 10)\n font = ImageFont.truetype(\"./doc/2.ttf\", font_size, encoding=\"utf-8\")\n cur_y = box[0][1]\n for c in txt:\n char_size = font.getsize(c)\n draw_right.text((box[0][0] + 3, cur_y), c, fill=(0, 0, 0), font=font)\n cur_y += char_size[1]\n else:\n font_size = max(int(box_height * 0.8), 10)\n font = ImageFont.truetype(\"./doc/2.ttf\", font_size, encoding=\"utf-8\")\n draw_right.text([box[0][0], box[0][1]], txt, fill=(0, 0, 0), font=font)\n img_left = Image.blend(image, img_left, 0.5)\n if txts is not None:\n img_show = Image.new('RGB', (w * 2, h), (255, 255, 255))\n img_show.paste(img_left, (0, 0, w, h))\n img_show.paste(img_right, (w, 0, w * 2, h))\n else:\n img_show = np.array(img_left)\n return np.array(img_show)\n\n\n\ndef show_img(imgs: np.ndarray, title='img'):\n from matplotlib import pyplot as plt\n color = (len(imgs.shape) == 3 and imgs.shape[-1] == 3)\n imgs = np.expand_dims(imgs, axis=0)\n for i, img in enumerate(imgs):\n plt.figure()\n plt.title('{}_{}'.format(title, i))\n plt.imshow(img, cmap=None if color else 'gray')\n\n\ndef draw_bbox(img_path, result, color=(255, 0, 0), thickness=2):\n import cv2\n if isinstance(img_path, str):\n img_path = cv2.imread(img_path)\n # img_path = cv2.cvtColor(img_path, cv2.COLOR_BGR2RGB)\n img_path = img_path.copy()\n for point in result:\n point = point.astype(int)\n cv2.polylines(img_path, [point], True, color, thickness)\n return img_path", "\"\"\"BERT and RNN model for sentence pair classification.\n\nAuthor: Tsinghuaboy ([email protected])\n\nUsed for SMP-CAIL2020-Argmine.\n\"\"\"\nimport torch\n\nfrom torch import nn\nfrom torch.nn.utils.rnn import pack_padded_sequence, PackedSequence\nfrom transformers import BertModel\nfrom transformers import AutoModel\nimport torch.nn.functional as F\nfrom sfzyza.capsnetx import PrimaryCaps, FCCaps, FlattenCaps\n\nclass BertForClassification(nn.Module):\n \"\"\"BERT with simple linear model.\"\"\"\n def __init__(self, config):\n \"\"\"Initialize the model with config dict.\n\n Args:\n config: python dict must contains the attributes below:\n config.bert_model_path: pretrained model path or model type\n e.g. 'bert-base-chinese'\n config.hidden_size: The same as BERT model, usually 768\n config.num_classes: int, e.g. 2\n config.dropout: float between 0 and 1\n \"\"\"\n super().__init__()\n self.bert = BertModel.from_pretrained(config.bert_model_path)\n for param in self.bert.parameters():\n param.requires_grad = True\n self.dropout = nn.Dropout(config.dropout)\n self.linear = nn.Linear(4, config.num_classes)\n self.num_classes = config.num_classes\n\n self.dim_capsule = config.dim_capsule\n self.num_compressed_capsule = config.num_compressed_capsule\n self.ngram_size = [2, 4, 8]\n self.convs_doc = nn.ModuleList([nn.Conv1d(config.max_seq_len, 32, K, stride=2) for K in self.ngram_size])\n torch.nn.init.xavier_uniform_(self.convs_doc[0].weight)\n torch.nn.init.xavier_uniform_(self.convs_doc[1].weight)\n torch.nn.init.xavier_uniform_(self.convs_doc[2].weight)\n\n self.primary_capsules_doc = PrimaryCaps(num_capsules=self.dim_capsule, in_channels=32, out_channels=32,\n kernel_size=1, stride=1)\n\n self.flatten_capsules = FlattenCaps()\n\n if config.hidden_size == 768:\n self.W_doc = nn.Parameter(torch.FloatTensor(147328, self.num_compressed_capsule))\n else:#1024\n self.W_doc = nn.Parameter(torch.FloatTensor(196480, self.num_compressed_capsule))\n torch.nn.init.xavier_uniform_(self.W_doc)\n\n self.fc_capsules_doc_child = FCCaps(config, output_capsule_num=config.num_classes,\n input_capsule_num=self.num_compressed_capsule,\n in_channels=self.dim_capsule, out_channels=self.dim_capsule)\n\n\n def compression(self, poses, W):\n poses = torch.matmul(poses.permute(0, 2, 1), W).permute(0, 2, 1)\n activations = torch.sqrt((poses ** 2).sum(2))\n return poses, activations\n\n\n\n def forward(self, input_ids, attention_mask, token_type_ids):\n \"\"\"Forward inputs and get logits.\n\n Args:\n input_ids: (batch_size, max_seq_len)\n attention_mask: (batch_size, max_seq_len)\n token_type_ids: (batch_size, max_seq_len)\n\n Returns:\n logits: (batch_size, num_classes)\n \"\"\"\n batch_size = input_ids.shape[0]\n hiddens = self.bert(input_ids=input_ids, attention_mask=attention_mask,token_type_ids=token_type_ids,\n output_hidden_states=True)[2]\n hidden_state = torch.cat([*hiddens[-3:], hiddens[0]], dim=2)\n # bert_output[0]: (batch_size, sequence_length, hidden_size)\n # bert_output[1]: (batch_size, hidden_size)\n #hidden_state = hidden_state.mean(1)\n #hidden_state = self.dropout(hidden_state)\n #logits = self.linear(hidden_state).view(batch_size, self.num_classes)\n #logits = torch.sigmoid(logits)\n # logits: (batch_size, num_classes)\n nets_doc_l = []\n for i in range(len(self.ngram_size)):\n nets = self.convs_doc[i](hidden_state)\n nets_doc_l.append(nets)\n nets_doc = torch.cat((nets_doc_l[0], nets_doc_l[1], nets_doc_l[2]), 2)\n poses_doc, activations_doc = self.primary_capsules_doc(nets_doc)\n poses, activations = self.flatten_capsules(poses_doc, activations_doc)\n poses, activations = self.compression(poses, self.W_doc)\n poses, logits = self.fc_capsules_doc_child(poses, activations, range(4))#4 types in total.\n\n logits = self.linear(logits.view(batch_size,-1)).view(batch_size, self.num_classes)\n return logits\n\nclass BertXForClassification(nn.Module):\n \"\"\"BERT with simple linear model.\"\"\"\n def __init__(self, config):\n \"\"\"Initialize the model with config dict.\n\n Args:\n config: python dict must contains the attributes below:\n config.bert_model_path: pretrained model path or model type\n e.g. 'bert-base-chinese'\n config.hidden_size: The same as BERT model, usually 768\n config.num_classes: int, e.g. 2\n config.dropout: float between 0 and 1\n \"\"\"\n super().__init__()\n self.bert = BertModel.from_pretrained(config.bert_model_path)\n for param in self.bert.parameters():\n param.requires_grad = True\n\n # data(b, 512, 768) -> conv(b, 511,767) -> bn -> mp(b, 4, 6)\n self.conv_module = nn.Sequential(\n nn.Conv2d(1, 1, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0)),\n nn.BatchNorm2d(1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(128, 128), stride=(128, 128), padding=(1, 1))\n )\n # data(b, 512, 768) -> conv(b, 255,255) -> bn -> mp(b, 4, 4)\n self.conv_module2 = nn.Sequential(\n nn.Conv2d(1,1, kernel_size=(2,3), stride=(2,3),padding=(0,0)),\n nn.BatchNorm2d(1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(64, 64), stride=(64, 64),padding=(1,1))\n )\n # data(b, 512, 768) -> conv(b, 169, 192) -> bn -> mp(b, 5, 6)\n self.conv_module3 = nn.Sequential(\n nn.Conv2d(1, 1, kernel_size=(3, 4), stride=(3, 4), padding=(0, 0)),\n nn.BatchNorm2d(1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(32, 32), stride=(32, 32), padding=(1, 1))\n )\n # data(b, 512, 768) -> conv(b, 127, 127) -> bn -> mp(b, 4, 4)\n self.conv_module4 = nn.Sequential(\n nn.Conv2d(1, 1, kernel_size=(4, 6), stride=(4, 6), padding=(0, 0)),\n nn.BatchNorm2d(1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(32, 32), stride=(32, 32), padding=(1, 1))\n )\n # data(b, 512, 768) -> conv(b, 101, 108) -> bn -> mp(b, 6, 6)\n self.conv_module5 = nn.Sequential(\n nn.Conv2d(1, 1, kernel_size=(5, 7), stride=(5, 7), padding=(0, 0)),\n nn.BatchNorm2d(1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(16, 16), stride=(16, 16), padding=(1, 1))\n )\n # data(b, 512, 768) -> conv(b, 84, 84) -> bn -> mp(b, 5, 5)\n self.conv_module6 = nn.Sequential(\n nn.Conv2d(1, 1, kernel_size=(6, 9), stride=(6, 9), padding=(0, 0)),\n nn.BatchNorm2d(1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(16, 16), stride=(16, 16), padding=(1, 1))\n )\n # data(b, 512, 768) -> conv(b, 72, 75) -> bn -> mp(b, 9, 9)\n self.conv_module7 = nn.Sequential(\n nn.Conv2d(1, 1, kernel_size=(7, 10), stride=(7, 10), padding=(0, 0)),\n nn.BatchNorm2d(1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(8, 8), stride=(8, 8), padding=(1, 1))\n )\n #cnn feature map has a total number of 228 dimensions.\n self.dropout = nn.Dropout(config.dropout)\n self.linear = nn.Linear(config.hidden_size+228, config.num_classes)\n self.bn = nn.BatchNorm1d(config.num_classes)\n self.num_classes = config.num_classes\n\n def forward(self, input_ids, attention_mask, token_type_ids):\n \"\"\"Forward inputs and get logits.\n\n Args:\n input_ids: (batch_size, max_seq_len)\n attention_mask: (batch_size, max_seq_len)\n token_type_ids: (batch_size, max_seq_len)\n\n Returns:\n logits: (batch_size, num_classes)\n \"\"\"\n batch_size = input_ids.shape[0]\n bert_output = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n # encoder_hidden_states=False\n )\n # bert_output[0]: (batch_size, sequence_length, hidden_size)\n encoded_output = bert_output[0]\n # encoded_output[0]: (batch_size, 1, sequence_length, hidden_size)\n encoded_output = encoded_output.view(batch_size, 1, encoded_output.shape[1], -1)\n cnn_feats = []\n cnn_feats.append(self.conv_module(encoded_output))\n cnn_feats.append(self.conv_module2(encoded_output))\n cnn_feats.append(self.conv_module3(encoded_output))\n cnn_feats.append(self.conv_module4(encoded_output))\n cnn_feats.append(self.conv_module5(encoded_output))\n cnn_feats.append(self.conv_module6(encoded_output))\n cnn_feats.append(self.conv_module7(encoded_output))\n for index in range(len(cnn_feats)):\n cnn_feats[index] = cnn_feats[index].reshape((batch_size, -1))\n con_cnn_feats = torch.cat(cnn_feats, dim=1)\n\n # bert_output[1]: (batch_size, hidden_size)\n pooled_output = bert_output[1]\n # 228 + 768 ->\n pooled_output = torch.cat([con_cnn_feats, pooled_output], dim=1)\n pooled_output = self.dropout(pooled_output)\n logits = self.linear(pooled_output).view(batch_size, self.num_classes)\n logits = self.bn(logits)\n logits = nn.functional.softmax(logits, dim=-1)\n # logits: (batch_size, num_classes)\n return logits\n\nclass BertYForClassification(nn.Module):\n \"\"\"BERT with simple linear model.\"\"\"\n def __init__(self, config):\n \"\"\"Initialize the model with config dict.\n\n Args:\n config: python dict must contains the attributes below:\n config.bert_model_path: pretrained model path or model type\n e.g. 'bert-base-chinese'\n config.hidden_size: The same as BERT model, usually 768\n config.num_classes: int, e.g. 2\n config.dropout: float between 0 and 1\n \"\"\"\n super().__init__()\n self.bert = BertModel.from_pretrained(config.bert_model_path)\n for param in self.bert.parameters():\n param.requires_grad = True\n\n # data(b, 512, 768) -> conv(b, 511,767) -> bn -> mp(b, 4, 6)\n self.conv_module = nn.Sequential(\n nn.Conv2d(1, 1, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0)),\n nn.BatchNorm2d(1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(128, 128), stride=(128, 128), padding=(1, 1))\n )\n # data(b, 512, 768) -> conv(b, 255,255) -> bn -> mp(b, 4, 4)\n self.conv_module2 = nn.Sequential(\n nn.Conv2d(1,1, kernel_size=(2,3), stride=(2,3),padding=(0,0)),\n nn.BatchNorm2d(1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(64, 64), stride=(64, 64),padding=(1,1))\n )\n # data(b, 512, 768) -> conv(b, 169, 192) -> bn -> mp(b, 5, 6)\n self.conv_module3 = nn.Sequential(\n nn.Conv2d(1, 1, kernel_size=(3, 4), stride=(3, 4), padding=(0, 0)),\n nn.BatchNorm2d(1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(32, 32), stride=(32, 32), padding=(1, 1))\n )\n # data(b, 512, 768) -> conv(b, 127, 127) -> bn -> mp(b, 4, 4)\n self.conv_module4 = nn.Sequential(\n nn.Conv2d(1, 1, kernel_size=(4, 6), stride=(4, 6), padding=(0, 0)),\n nn.BatchNorm2d(1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(32, 32), stride=(32, 32), padding=(1, 1))\n )\n # data(b, 512, 768) -> conv(b, 101, 108) -> bn -> mp(b, 6, 6)\n self.conv_module5 = nn.Sequential(\n nn.Conv2d(1, 1, kernel_size=(5, 7), stride=(5, 7), padding=(0, 0)),\n nn.BatchNorm2d(1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(16, 16), stride=(16, 16), padding=(1, 1))\n )\n # data(b, 512, 768) -> conv(b, 84, 84) -> bn -> mp(b, 5, 5)\n self.conv_module6 = nn.Sequential(\n nn.Conv2d(1, 1, kernel_size=(6, 9), stride=(6, 9), padding=(0, 0)),\n nn.BatchNorm2d(1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(16, 16), stride=(16, 16), padding=(1, 1))\n )\n # data(b, 512, 768) -> conv(b, 72, 75) -> bn -> mp(b, 9, 9)\n self.conv_module7 = nn.Sequential(\n nn.Conv2d(1, 1, kernel_size=(7, 10), stride=(7, 10), padding=(0, 0)),\n nn.BatchNorm2d(1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(8, 8), stride=(8, 8), padding=(1, 1))\n )\n self.conv_module8 = nn.Sequential(\n nn.Conv2d(1, 1, kernel_size=(8, 12), stride=(8, 12), padding=(0, 0)),\n nn.BatchNorm2d(1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(8, 8), stride=(8, 8), padding=(1, 1))\n )\n # data(b, 512, 768) -> conv(b, 255,255) -> bn -> mp(b, 4, 4)\n self.conv_module9 = nn.Sequential(\n nn.Conv2d(1, 1, kernel_size=(9, 13), stride=(9, 13), padding=(0, 0)),\n nn.BatchNorm2d(1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(8, 8), stride=(8, 8), padding=(1, 1))\n )\n # data(b, 512, 768) -> conv(b, 169, 192) -> bn -> mp(b, 5, 6)\n self.conv_moduleA = nn.Sequential(\n nn.Conv2d(1, 1, kernel_size=(10, 15), stride=(10, 15), padding=(0, 0)),\n nn.BatchNorm2d(1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(4, 4), stride=(4, 4), padding=(1, 1))\n )\n # data(b, 512, 768) -> conv(b, 127, 127) -> bn -> mp(b, 4, 4)\n self.conv_moduleB = nn.Sequential(\n nn.Conv2d(1, 1, kernel_size=(11, 16), stride=(11, 16), padding=(0, 0)),\n nn.BatchNorm2d(1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(4, 4), stride=(4, 4), padding=(1, 1))\n )\n # data(b, 512, 768) -> conv(b, 101, 108) -> bn -> mp(b, 6, 6)\n self.conv_moduleC = nn.Sequential(\n nn.Conv2d(1, 1, kernel_size=(12, 18), stride=(12, 18), padding=(0, 0)),\n nn.BatchNorm2d(1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2), padding=(1, 1))\n )\n # data(b, 512, 768) -> conv(b, 84, 84) -> bn -> mp(b, 5, 5)\n self.conv_moduleD = nn.Sequential(\n nn.Conv2d(1, 1, kernel_size=(13, 19), stride=(13, 19), padding=(0, 0)),\n nn.BatchNorm2d(1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2), padding=(1, 1))\n )\n # data(b, 512, 768) -> conv(b, 72, 75) -> bn -> mp(b, 9, 9)\n self.conv_moduleE = nn.Sequential(\n nn.Conv2d(1, 1, kernel_size=(14, 21), stride=(14, 21), padding=(0, 0)),\n nn.BatchNorm2d(1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2), padding=(1, 1))\n )\n\n #cnn feature map has a total number of 228 dimensions.\n self.dropout = nn.Dropout(config.dropout)\n # 1-7: 228; 8-14: 1691\n self.linear = nn.Linear(config.hidden_size + 1005, config.num_classes)\n #self.bn = nn.BatchNorm1d(config.num_classes)\n self.num_classes = config.num_classes\n\n def forward(self, input_ids, attention_mask, token_type_ids):\n \"\"\"Forward inputs and get logits.\n\n Args:\n input_ids: (batch_size, max_seq_len)\n attention_mask: (batch_size, max_seq_len)\n token_type_ids: (batch_size, max_seq_len)\n\n Returns:\n logits: (batch_size, num_classes)\n \"\"\"\n batch_size = input_ids.shape[0]\n bert_output = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n # encoder_hidden_states=False\n )\n # bert_output[0]: (batch_size, sequence_length, hidden_size)\n encoded_output = bert_output[0]\n # encoded_output[0]: (batch_size, 1, sequence_length, hidden_size)\n encoded_output = encoded_output.view(batch_size, 1, encoded_output.shape[1], -1)\n cnn_feats = []\n cnn_feats.append(self.conv_module(encoded_output))\n cnn_feats.append(self.conv_module2(encoded_output))\n cnn_feats.append(self.conv_module3(encoded_output))\n cnn_feats.append(self.conv_module4(encoded_output))\n cnn_feats.append(self.conv_module5(encoded_output))\n cnn_feats.append(self.conv_module6(encoded_output))\n cnn_feats.append(self.conv_module7(encoded_output))\n cnn_feats.append(self.conv_module8(encoded_output))\n cnn_feats.append(self.conv_module9(encoded_output))\n cnn_feats.append(self.conv_moduleA(encoded_output))\n cnn_feats.append(self.conv_moduleB(encoded_output))\n cnn_feats.append(self.conv_moduleC(encoded_output))\n cnn_feats.append(self.conv_moduleD(encoded_output))\n cnn_feats.append(self.conv_moduleE(encoded_output))\n for index in range(len(cnn_feats)):\n cnn_feats[index] = cnn_feats[index].reshape((batch_size, -1))\n con_cnn_feats = torch.cat(cnn_feats, dim=1)\n\n # bert_output[1]: (batch_size, hidden_size)\n pooled_output = bert_output[1]\n # 228 + 768 ->\n pooled_output = torch.cat([con_cnn_feats, pooled_output], dim=1)\n pooled_output = self.dropout(pooled_output)\n logits = self.linear(pooled_output).view(batch_size, self.num_classes)\n #logits = self.bn(logits)\n logits = nn.functional.softmax(logits, dim=-1)\n # logits: (batch_size, num_classes)\n return logits\n#A Hierarchical Multi-grained Transformer-based Document Summarization Method\nclass BertXLForClassification(nn.Module):\n \"\"\"BERT with simple linear model.\"\"\"\n def __init__(self, config):\n \"\"\"Initialize the model with config dict.\n\n Args:\n config: python dict must contains the attributes below:\n config.bert_model_path: pretrained model path or model type\n e.g. 'bert-base-chinese'\n config.hidden_size: The same as BERT model, usually 768\n config.num_classes: int, e.g. 2\n config.dropout: float between 0 and 1\n \"\"\"\n super().__init__()\n if 'xl' in config.model_type:\n self.bert = AutoModel.from_pretrained(config.bert_model_path)\n else:\n self.bert = BertModel.from_pretrained(config.bert_model_path)\n\n for param in self.bert.parameters():\n param.requires_grad = True\n\n #cnn feature map has a total number of 228 dimensions.\n self.dropout = nn.Dropout(config.dropout)\n # 1-7: 228; 8-14: 1691\n self.linear = nn.Linear(config.hidden_size, config.num_classes)\n # self.linear_last = nn.Linear(config.max_seq_len, config.num_classes)\n #self.bn = nn.BatchNorm1d(config.num_classes)\n self.num_classes = config.num_classes\n\n def forward(self, input_ids, attention_mask, token_type_ids):\n \"\"\"Forward inputs and get logits.\n\n Args:\n input_ids: (batch_size, max_seq_len)\n attention_mask: (batch_size, max_seq_len)\n token_type_ids: (batch_size, max_seq_len)\n\n Returns:\n logits: (batch_size, num_classes)\n \"\"\"\n batch_size = input_ids.shape[0]\n bert_output = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n # encoder_hidden_states=False\n )\n\n encoded_output = bert_output[0]\n\n\n encoded_output = torch.mean(encoded_output, dim=1)\n pooled_output = self.dropout(encoded_output)\n\n # logits = logits.squeeze(dim=2)\n logits = self.linear(pooled_output)\n #logits = self.bn(logits)\n logits = nn.functional.softmax(logits, dim=-1)\n # logits: (batch_size, num_classes)\n return logits\n\n\nclass BertXLCForClassification(nn.Module):\n \"\"\"BERT with simple linear model.\"\"\"\n def __init__(self, config):\n \"\"\"Initialize the model with config dict.\n\n Args:\n config: python dict must contains the attributes below:\n config.bert_model_path: pretrained model path or model type\n e.g. 'bert-base-chinese'\n config.hidden_size: The same as BERT model, usually 768\n config.num_classes: int, e.g. 2\n config.dropout: float between 0 and 1\n \"\"\"\n super().__init__()\n if 'xl' in config.model_type:\n self.bert = AutoModel.from_pretrained(config.bert_model_path)\n else:\n self.bert = BertModel.from_pretrained(config.bert_model_path)\n\n for param in self.bert.parameters():\n param.requires_grad = True\n self.dropout = nn.Dropout(config.dropout)\n self.linear = nn.Linear(4, config.num_classes)\n self.num_classes = config.num_classes\n\n self.dim_capsule = config.dim_capsule\n self.num_compressed_capsule = config.num_compressed_capsule\n self.ngram_size = [2, 4, 8]\n self.convs_doc = nn.ModuleList([nn.Conv1d(config.max_seq_len, 32, K, stride=2) for K in self.ngram_size])\n torch.nn.init.xavier_uniform_(self.convs_doc[0].weight)\n torch.nn.init.xavier_uniform_(self.convs_doc[1].weight)\n torch.nn.init.xavier_uniform_(self.convs_doc[2].weight)\n\n self.primary_capsules_doc = PrimaryCaps(num_capsules=self.dim_capsule, in_channels=32, out_channels=32,\n kernel_size=1, stride=1)\n\n self.flatten_capsules = FlattenCaps()\n\n if config.hidden_size == 768:\n self.W_doc = nn.Parameter(torch.FloatTensor(147328, self.num_compressed_capsule))\n else:#1024\n self.W_doc = nn.Parameter(torch.FloatTensor(196480, self.num_compressed_capsule))\n torch.nn.init.xavier_uniform_(self.W_doc)\n\n self.fc_capsules_doc_child = FCCaps(config, output_capsule_num=config.num_classes,\n input_capsule_num=self.num_compressed_capsule,\n in_channels=self.dim_capsule, out_channels=self.dim_capsule)\n\n\n def compression(self, poses, W):\n poses = torch.matmul(poses.permute(0, 2, 1), W).permute(0, 2, 1)\n activations = torch.sqrt((poses ** 2).sum(2))\n return poses, activations\n\n\n def forward(self, input_ids, attention_mask, token_type_ids):\n \"\"\"Forward inputs and get logits.\n\n Args:\n input_ids: (batch_size, max_seq_len)\n attention_mask: (batch_size, max_seq_len)\n token_type_ids: (batch_size, max_seq_len)\n\n Returns:\n logits: (batch_size, num_classes)\n \"\"\"\n batch_size = input_ids.shape[0]\n hiddens = self.bert(input_ids=input_ids, attention_mask=attention_mask,token_type_ids=token_type_ids,\n output_hidden_states=True)[1]\n hidden_state = torch.cat([*hiddens[-3:], hiddens[0]], dim=2)\n # bert_output[0]: (batch_size, sequence_length, hidden_size)\n # bert_output[1]: (batch_size, hidden_size)\n nets_doc_l = []\n for i in range(len(self.ngram_size)):\n nets = self.convs_doc[i](hidden_state)\n nets_doc_l.append(nets)\n nets_doc = torch.cat((nets_doc_l[0], nets_doc_l[1], nets_doc_l[2]), 2)\n poses_doc, activations_doc = self.primary_capsules_doc(nets_doc)\n poses, activations = self.flatten_capsules(poses_doc, activations_doc)\n poses, activations = self.compression(poses, self.W_doc)\n poses, logits = self.fc_capsules_doc_child(poses, activations, range(4))#4 types in total.\n\n logits = self.linear(logits.view(batch_size,-1)).view(batch_size, self.num_classes)\n return logits\n\n\nclass RnnForSentencePairClassification(nn.Module):\n \"\"\"Unidirectional GRU model for sentences pair classification.\n 2 sentences use the same encoder and concat to a linear model.\n \"\"\"\n def __init__(self, config):\n \"\"\"Initialize the model with config dict.\n\n Args:\n config: python dict must contains the attributes below:\n config.vocab_size: vocab size\n config.hidden_size: RNN hidden size and embedding dim\n config.num_classes: int, e.g. 2\n config.dropout: float between 0 and 1\n \"\"\"\n super().__init__()\n self.embedding = nn.Embedding(\n config.vocab_size, config.hidden_size, padding_idx=0)\n self.rnn = nn.GRU(\n config.hidden_size, hidden_size=config.hidden_size,\n bidirectional=False, batch_first=True)\n self.linear = nn.Linear(config.hidden_size * 2, config.num_classes)\n self.dropout = nn.Dropout(config.dropout)\n self.num_classes = config.num_classes\n\n def forward(self, s1_ids, s2_ids, s1_lengths, s2_lengths):\n \"\"\"Forward inputs and get logits.\n\n Args:\n s1_ids: (batch_size, max_seq_len)\n s2_ids: (batch_size, max_seq_len)\n s1_lengths: (batch_size)\n s2_lengths: (batch_size)\n\n Returns:\n logits: (batch_size, num_classes)\n \"\"\"\n batch_size = s1_ids.shape[0]\n # ids: (batch_size, max_seq_len)\n s1_embed = self.embedding(s1_ids)\n s2_embed = self.embedding(s2_ids)\n # embed: (batch_size, max_seq_len, hidden_size)\n s1_packed: PackedSequence = pack_padded_sequence(\n s1_embed, s1_lengths, batch_first=True, enforce_sorted=False)\n s2_packed: PackedSequence = pack_padded_sequence(\n s2_embed, s2_lengths, batch_first=True, enforce_sorted=False)\n # packed: (sum(lengths), hidden_size)\n self.rnn.flatten_parameters()\n _, s1_hidden = self.rnn(s1_packed)\n _, s2_hidden = self.rnn(s2_packed)\n s1_hidden = s1_hidden.view(batch_size, -1)\n s2_hidden = s2_hidden.view(batch_size, -1)\n hidden = torch.cat([s1_hidden, s2_hidden], dim=-1)\n hidden = self.linear(hidden).view(-1, self.num_classes)\n hidden = self.dropout(hidden)\n logits = nn.functional.softmax(hidden, dim=-1)\n # logits: (batch_size, num_classes)\n return logits\n\n\nclass LogisticRegression(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n self.embedding = nn.Embedding(\n config.vocab_size, config.hidden_size, padding_idx=0)\n self.dropout = nn.Dropout(config.dropout)\n self.fc1 = nn.Linear(config.vocab_size, config.num_classes)\n\n def forward(self, s1_ids, s2_ids, s1_lengths, s2_lengths, **kwargs):\n batch_size = s1_ids.shape[0]\n s1_embed = self.embedding(s1_ids)\n s2_embed = self.embedding(s2_ids)\n # embed: (batch_size, max_seq_len, hidden_size)\n # s1_packed: PackedSequence = pack_padded_sequence(\n # s1_embed, s1_lengths, batch_first=True, enforce_sorted=False)\n # s2_packed: PackedSequence = pack_padded_sequence(\n # s2_embed, s2_lengths, batch_first=True, enforce_sorted=False)\n # _, s1_hidden = self.rnn(s1_packed)\n # _, s2_hidden = self.rnn(s2_packed)\n s1_hidden = s1_embed.view(batch_size, -1)\n s2_hidden = s2_embed.view(batch_size, -1)\n hidden = torch.cat([s1_hidden, s2_hidden], dim=-1)\n\n # x = torch.squeeze(hidden) # (batch, vocab_size)\n x = self.dropout(hidden)\n logit = self.fc1(x) # (batch, target_size)\n return logit\n\n\n\nclass CharCNN(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n # self.is_cuda_enabled = config.cuda\n\n num_conv_filters = config.num_conv_filters\n output_channel = config.output_channel\n hidden_size = config.hidden_size\n target_class = config.num_classes\n input_channel = config.hidden_size\n\n self.embedding = nn.Embedding(\n config.vocab_size, config.hidden_size, padding_idx=0)\n\n self.conv1 = nn.Conv1d(input_channel, num_conv_filters, kernel_size=7)\n self.conv2 = nn.Conv1d(num_conv_filters, num_conv_filters, kernel_size=7)\n self.conv3 = nn.Conv1d(num_conv_filters, num_conv_filters, kernel_size=3)\n self.conv4 = nn.Conv1d(num_conv_filters, num_conv_filters, kernel_size=3)\n self.conv5 = nn.Conv1d(num_conv_filters, num_conv_filters, kernel_size=3)\n self.conv6 = nn.Conv1d(num_conv_filters, output_channel, kernel_size=3)\n\n self.dropout = nn.Dropout(config.dropout)\n self.fc1 = nn.Linear(output_channel, hidden_size)\n self.fc2 = nn.Linear(hidden_size, hidden_size)\n self.fc3 = nn.Linear(hidden_size, target_class)\n\n def forward(self, s1_ids, s2_ids, s1_lengths, s2_lengths):\n batch_size = s1_ids.shape[0]\n # ids: (batch_size, max_seq_len)\n s1_embed = self.embedding(s1_ids)\n s2_embed = self.embedding(s2_ids)\n\n embed = torch.cat([s1_embed, s2_embed], dim=1)\n # embed: (batch_size, max_seq_len, hidden_size)\n # s1_packed: PackedSequence = pack_padded_sequence(\n # s1_embed, s1_lengths, batch_first=True, enforce_sorted=False)\n if torch.cuda.is_available():\n x = embed.transpose(1, 2).type(torch.cuda.FloatTensor)\n # x = embed.transpose(1, 2).type(torch.FloatTensor)\n else:\n x = embed.transpose(1, 2).type(torch.FloatTensor)\n\n x = F.max_pool1d(F.relu(self.conv1(x)), 3)\n x = F.max_pool1d(F.relu(self.conv2(x)), 3)\n x = F.relu(self.conv3(x))\n x = F.relu(self.conv4(x))\n x = F.relu(self.conv5(x))\n x = F.relu(self.conv6(x))\n\n x = F.max_pool1d(x, x.size(2)).squeeze(2)\n x = F.relu(self.fc1(x.view(x.size(0), -1)))\n x = self.dropout(x)\n x = F.relu(self.fc2(x))\n x = self.dropout(x)\n return self.fc3(x)", "# -*- coding: utf-8 -*-\n# @Time : 2020/6/16 8:56\n# @Author : THU\nimport os\nimport torch\n\n\ndef load_checkpoint(_model, resume_from, to_use_device, _optimizers=None, third_name=None):\n \"\"\"\n 加载预训练模型\n Args:\n _model: 模型\n resume_from: 预训练模型路径\n to_use_device: 设备\n _optimizers: 如果不为None,则表明采用模型的训练参数\n third_name: 第三方预训练模型的名称\n\n Returns:\n\n \"\"\"\n global_state = {}\n if not third_name:\n state = torch.load(resume_from, map_location=to_use_device)\n _model.load_state_dict(state['state_dict'])\n if _optimizers is not None:\n _optimizers.load_state_dict(state['optimizer'])\n if 'global_state' in state:\n global_state = state['global_state']\n\n elif third_name == 'paddle':\n import paddle.fluid as fluid\n paddle_model = fluid.io.load_program_state(resume_from)\n _model.load_3rd_state_dict(third_name, paddle_model)\n return _model, _optimizers, global_state\n\n\ndef save_checkpoint(checkpoint_path, model, _optimizers, logger, cfg, **kwargs):\n state = {\n 'state_dict': model.state_dict(),\n 'optimizer': _optimizers.state_dict(),\n 'cfg': cfg}\n state.update(kwargs)\n torch.save(state, checkpoint_path)\n logger.info('models saved to %s' % checkpoint_path)\n\n\ndef save_checkpoint_logic(total_loss, total_num, min_loss, net, solver, epoch, rec_train_options, logger):\n \"\"\"\n 根据配置文件保存模型\n Args:\n total_loss:\n total_num:\n min_loss:\n net:\n epoch:\n rec_train_options:\n logger:\n Returns:\n\n \"\"\"\n # operation for model save as parameter ckpt_save_type is HighestAcc\n if rec_train_options['ckpt_save_type'] == 'HighestAcc':\n loss_mean = sum([total_loss[idx] * total_num[idx] for idx in range(len(total_loss))]) / sum(total_num)\n if loss_mean < min_loss:\n min_loss = loss_mean\n save_checkpoint(os.path.join(rec_train_options['checkpoint_save_dir'], 'epoch_' + str(epoch) + '.pth'), net,\n solver, epoch, logger)\n\n else:\n if epoch % rec_train_options['ckpt_save_epoch'] == 0:\n save_checkpoint(os.path.join(rec_train_options['checkpoint_save_dir'], 'epoch_' + str(epoch) + '.pth'), net,\n solver, epoch, logger)\n return min_loss\n", "import argparse\nimport itertools\nimport logging\nimport os\nimport time\nfrom types import SimpleNamespace\nimport falcon\nimport pandas\nimport torch\nfrom falcon_cors import CORS\nimport waitress\nimport numpy as np\n\nimport json\nimport re\nfrom torch.utils.data import DataLoader\n\nfrom data import Data\nfrom evaluate import evaluate, handy_tool, calculate_accuracy_f1\nfrom model import BERNet, BERXLNet, NERNet, NERWNet\nfrom utils import load_torch_model\n\n\nMODEL_MAP = {\n 'bert': BERNet,\n 'bertxl': BERXLNet,\n 'rnn': NERNet,\n 'rnnkv': NERWNet\n}\n\n\nlogging.basicConfig(level=logging.INFO, format='%(asctime)-18s %(message)s')\nlogger = logging.getLogger()\ncors_allow_all = CORS(allow_all_origins=True,\n allow_origins_list=['*'],\n allow_all_headers=True,\n allow_all_methods=True,\n allow_credentials_all_origins=True\n )\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n '-p', '--port', default=58081,\n help='falcon server port')\nparser.add_argument(\n '-c', '--config_file', default='config/bert_config-xl.json',\n help='model config file')\nargs = parser.parse_args()\nmodel_config=args.config_file\n\n\ndef result_to_json(string, tags):\n item = {\"string\": string, \"entities\": []}\n entity_name = \"\"\n entity_start = 0\n idx = 0\n i = -1\n zipped = zip(string, tags)\n listzip = list(zipped)\n last = len(listzip)\n for char, tag in listzip:\n i += 1\n if tag == 3:\n item[\"entities\"].append({\"word\": char, \"start\": idx, \"end\": idx+1, \"type\":'s'})\n elif tag == 0:\n entity_name += char\n entity_start = idx\n elif tag == 1:\n if (entity_name != \"\") and (i == last):\n entity_name += char\n item[\"entities\"].append({\"word\": entity_name, \"start\": entity_start, \"end\": idx + 1, \"type\": 'bms'})\n entity_name = \"\"\n else:\n entity_name += char\n elif tag == 2: # or i == len(zipped)\n entity_name += char\n item[\"entities\"].append({\"word\": entity_name, \"start\": entity_start, \"end\": idx + 1, \"type\": 'bms'})\n entity_name = \"\"\n else:\n entity_name = \"\"\n entity_start = idx\n idx += 1\n return item\n\n\n\nclass TorchResource:\n\n def __init__(self):\n logger.info(\"...\")\n # 0. Load config\n with open(model_config) as fin:\n self.config = json.load(fin, object_hook=lambda d: SimpleNamespace(**d))\n if torch.cuda.is_available():\n self.device = torch.device('cuda')\n else:\n self.device = torch.device('cpu')\n # 1. Load data\n self.data = Data(vocab_file=os.path.join(self.config.model_path, 'vocab.txt'),\n max_seq_len=self.config.max_seq_len,\n model_type=self.config.model_type, config=self.config)\n\n # 2. Load model\n self.model = MODEL_MAP[self.config.model_type](self.config)\n self.model = load_torch_model(\n self.model, model_path=os.path.join(self.config.model_path, 'model.bin'))\n self.model.to(self.device)\n logger.info(\"###\")\n\n def flatten(self, ll):\n return list(itertools.chain(*ll))\n\n def cleanall(self, content):\n return content.replace(\" \", \"\", 10**10)\n\n def split(self, content):\n line = re.findall('(.*?(?:[\\n ]|.$))', content)\n sublines = []\n for l in line:\n if len(l) > self.config.max_seq_len:\n ll = re.findall('(.*?(?:[。,]|.$))', l)\n sublines.extend(ll)\n else:\n sublines.append(l)\n sublines = [l for l in sublines if len(l.strip())> 0]\n return sublines\n\n def bert_classification(self, content):\n logger.info('1:{}'.format( content))\n lines = self.split(content)\n rows = []\n for line in lines:\n rows.append( {'content': line})\n df = pandas.DataFrame(rows)\n filename = \"data/{}.csv\".format(time.time())\n df.to_csv(filename, index=False, columns=['content'])\n test_set, sc_list, label_list, row_list = self.data.load_file(filename, train=False)\n\n # token_list = []\n # for line in sc_list:\n # tokens = self.data.tokenizer.convert_ids_to_tokens(line)\n # token_list.append(tokens)\n\n data_loader_test = DataLoader(\n test_set, batch_size=self.config.batch_size, shuffle=False)\n # Evaluate\n answer_list, length_list = evaluate(self.model, data_loader_test, self.device, isTest=True)\n mod_tokens_list = handy_tool(row_list, length_list)\n result = [result_to_json(t, s) for t, s in zip(mod_tokens_list, answer_list)]\n entities = [item['entities'] for item in result]\n entities = self.flatten(entities)\n\n return {\"data\": entities}\n\n def on_get(self, req, resp):\n logger.info(\"...\")\n resp.set_header('Access-Control-Allow-Origin', '*')\n resp.set_header('Access-Control-Allow-Methods', '*')\n resp.set_header('Access-Control-Allow-Headers', '*')\n resp.set_header('Access-Control-Allow-Credentials','true')\n content = req.get_param('text', True)\n # clean_content =\n #clean_content = self.cleanall(content)\n resp.media = self.bert_classification(content)\n logger.info(\"###\")\n\n\n def on_post(self, req, resp):\n \"\"\"Handles POST requests\"\"\"\n resp.set_header('Access-Control-Allow-Origin', '*')\n resp.set_header('Access-Control-Allow-Methods', '*')\n resp.set_header('Access-Control-Allow-Headers', '*')\n resp.set_header('Access-Control-Allow-Credentials', 'true')\n resp.set_header(\"Cache-Control\", \"no-cache\")\n data = req.stream.read(req.content_length)\n data = data.decode('utf-8')\n # regex = re.compile(r'\\\\(?![/u\"])')\n # data = regex.sub(r\"\\\\\", data)\n jsondata = json.loads(data)\n # clean_title = shortenlines(jsondata['1'])\n # clean_content = cleanall(jsondata['2'])\n content = jsondata['text']\n # clean_content = self.cleanall(content)\n resp.media = self.bert_classification(content)\n logger.info(\"###\")\n\nif __name__==\"__main__\":\n api = falcon.API(middleware=[cors_allow_all.middleware])\n api.req_options.auto_parse_form_urlencoded = True\n api.add_route('/z', TorchResource())\n waitress.serve(api, port=args.port, threads=48, url_scheme='http')\n" ]
[ [ "pandas.read_csv" ], [ "pandas.DataFrame" ], [ "torch.nn.Sequential", "torch.Tensor", "torch.load", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.AvgPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ], [ "matplotlib.pyplot.imshow", "numpy.array", "numpy.expand_dims", "matplotlib.pyplot.figure" ], [ "torch.nn.BatchNorm1d", "torch.nn.Dropout", "torch.nn.functional.softmax", "torch.mean", "torch.cat", "torch.nn.GRU", "torch.nn.Conv2d", "torch.nn.Embedding", "torch.nn.utils.rnn.pack_padded_sequence", "torch.nn.Linear", "torch.nn.Conv1d", "torch.nn.MaxPool2d", "torch.FloatTensor", "torch.cuda.is_available", "torch.nn.init.xavier_uniform_", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ], [ "torch.load", "torch.save" ], [ "torch.device", "torch.utils.data.DataLoader", "torch.cuda.is_available", "pandas.DataFrame" ] ]
natetsang/open-rl
[ "426723d0d6759672ce77e02afeb55cbeb68fcfb0" ]
[ "openrl/algorithms/imitation/imitation_learning.py" ]
[ "import gym\nimport time\nimport pickle\nimport argparse\nimport numpy as np\nimport tensorflow as tf\nfrom typing import Callable, Union, Tuple, List\nfrom models.models import actor_fc_discrete_network, actor_critic_fc_discrete_network\nfrom algorithms.imitation.utils import plot_training_results\nfrom util.replay_buffer import ReplayBuffer\n\n\n# Set up\nGAMMA = 0.99\nLEARNING_RATE = 0.0001\n\n\nclass ImitationAgent:\n def __init__(self,\n environment: gym.Env,\n model_fn: Callable[..., tf.keras.Model],\n optimizer: tf.keras.optimizers,\n run_dagger: bool,\n expert_policy,\n expert_data_path,\n replay_buffer: ReplayBuffer,\n model_kwargs: dict = None,\n train_kwargs: dict = None,\n save_dir: str = None) -> None:\n # Env vars\n self.env = environment\n self.state_dims = model_kwargs.get('state_dims')\n self.num_actions = model_kwargs.get('num_actions')\n\n num_hidden_layers = model_kwargs.get(\"num_hidden_layers\")\n hidden_size = model_kwargs.get(\"hidden_size\")\n\n # Algorithm\n self.run_dagger = run_dagger\n\n # Expert\n self.expert_policy = expert_policy\n self.expert_data = ImitationAgent.load_expert_data(expert_data_path)\n\n # Actor model\n self.model = model_fn(state_dims=self.state_dims,\n num_actions=self.num_actions,\n num_hidden_layers=num_hidden_layers,\n hidden_size=hidden_size)\n\n self.optimizer = optimizer\n self.loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False) # Discrete action space only\n\n # Replay buffer\n self.replay_buffer = replay_buffer\n\n # Training vars\n self.cur_episode = 0\n self.total_steps = 0\n self.max_ep_len = train_kwargs.get(\"max_ep_len\")\n self.batch_size = train_kwargs.get(\"batch_size\") # Batch size of data collection from buffer\n self.train_batch_size = train_kwargs.get('train_batch_size') # Batch size for training models\n self.eval_batch_size = train_kwargs.get('eval_batch_size') # Batch size for eval\n self.num_agent_train_steps_per_iter = train_kwargs.get('num_agent_train_steps_per_iter') # Grad updates per run\n\n # Save directories\n self.save_dir = save_dir\n\n def save_models(self) -> None:\n self.model.save(self.save_dir)\n\n def load_models(self) -> tf.keras.Model:\n self.model = tf.keras.models.load_model(self.save_dir)\n return self.model\n\n @staticmethod\n def load_expert_data(path):\n with open(path, 'rb') as f:\n expert_data = pickle.load(f)\n return expert_data\n\n def sample_random_trajectory(self) -> Tuple[List[Tuple], Union[int, float]]:\n \"\"\"\n Sample 1 trajectory.\n\n :param max_path_length: the maximum number of steps to take in the trajectory\n :param random: whether or not to sample actions randomly or using MPC\n :return:\n \"\"\"\n state = tf.expand_dims(tf.convert_to_tensor(self.env.reset()), 0)\n num_steps = 0\n total_rewards = 0\n transitions = [] # transition tuples (s,a,r,s',d)\n while True:\n num_steps += 1\n action_prob = self.model(state)\n action = np.random.choice(self.num_actions, p=np.squeeze(action_prob))\n next_state, reward, done, _ = self.env.step(action)\n next_state = tf.reshape(next_state, [1, self.state_dims])\n\n total_rewards += reward\n\n if done or num_steps > self.max_ep_len:\n transitions.append((state, action, reward, next_state, 1))\n break\n\n transitions.append((state, action, reward, next_state, 0))\n state = next_state\n\n return transitions, total_rewards\n\n def sample_n_trajectories(self) -> Tuple[List, List, int]:\n \"\"\"\n Sample `self.batch_size` trajectories. Each trajectory should be no longer than\n `max_path_length` steps/transitions. Note that transitions are different than trajectories!\n A transition is a tuple (s,a,r,s',d) and a trajectory is made up of 1 to `max_path_length` transitions.\n\n :param batch_size: The number of transitions to sample.\n :param max_path_length: The maximum steps/transitions per trajectory\n :param random: Boolean to indicate whether or not to sample actions randomly or via MPC\n :return:\n \"\"\"\n num_steps_this_batch = 0\n trajectory_rewards = []\n transitions = []\n while num_steps_this_batch < self.batch_size:\n traj, rews = self.sample_random_trajectory()\n num_steps_this_batch += len(traj)\n trajectory_rewards.append(rews)\n # Note that we're extending, not appending, because we don't care about trajectories, we care about\n # the transitions. If we appended, it would be ([[(tran 1), (tran 2)], ..., [(tran n), (tran n+1)]],\n # where each sublist is a trajectory. But by extending, it's instead ([(tran 1), ..., (tran n)]\n transitions.extend(traj)\n return transitions, trajectory_rewards, num_steps_this_batch\n\n def relabel_actions_with_expert(self, transitions: List[Tuple]) -> List[Tuple]:\n \"\"\"\n Given a batch of transition tuples, query the Expert Policy and update the action based on\n the Expert. This is the key difference between vanilla behavioral cloning and DAgger. This\n step is equivalent to asking a human expert to label our dataset with actions the correct actions.\n \"\"\"\n updated_transitions = []\n for transition in transitions:\n state, action, reward, next_state, done = transition\n action_prob, _ = self.expert_policy(state)\n expert_action = np.argmax(np.squeeze(action_prob))\n updated_transitions.append((state, expert_action, reward, next_state, done))\n return updated_transitions\n\n def train_episode(self) -> List:\n # Step 1: Sample trajectories\n if self.cur_episode == 0:\n # Load expert_data\n transitions = self.expert_data\n else:\n # Or sample trajectories using current policy\n transitions, _, _ = self.sample_n_trajectories()\n\n # Step 2: For DAgger only, ask expert policy to label data with actions\n if self.run_dagger and self.cur_episode > 0:\n transitions = self.relabel_actions_with_expert(transitions)\n\n # Step 3: Store the sampled transitions in the replay buffer\n self.replay_buffer.store_transitions_batch(transitions)\n\n # Step 4: Train model!\n losses = []\n for train_step in range(self.num_agent_train_steps_per_iter):\n # Sample a random batch of data from the replay buffer\n states, actions, _, _, _ = self.replay_buffer.sample(batch_size=self.train_batch_size)\n\n with tf.GradientTape() as tape:\n action_prob = self.model(states)\n loss = self.loss_fn(actions, action_prob)\n grads = tape.gradient(loss, self.model.trainable_variables)\n self.optimizer.apply_gradients(zip(grads, self.model.trainable_variables))\n\n losses.append(loss)\n self.cur_episode += 1\n return losses\n\n def run_agent(self, render=False) -> Tuple[float, int]:\n total_reward, total_steps = 0, 0\n state = self.env.reset()\n done = False\n\n while not done:\n if render:\n self.env.render()\n\n # Select action\n action_prob = self.model(tf.expand_dims(state, axis=0))\n action = np.argmax(np.squeeze(action_prob))\n\n # Interact with environment\n state, reward, done, _ = self.env.step(action)\n\n # Bookkeeping\n total_reward += reward\n total_steps += 1\n return total_reward, total_steps\n\n\ndef main() -> None:\n # Check input params\n if args.run_dagger:\n assert args.epochs > 1, \"DAgger needs more than 1 iteration of training, where each iter\" \\\n \"we query the expert and train\"\n else:\n assert args.epochs == 1, \"Vanilla behavior cloning collects expert data only once and does traditional\" \\\n \"supervised learning on that dataset.\"\n\n # Create environment\n env = gym.make(args.env)\n\n # Set seeds\n if args.seed:\n np.random.seed(args.seed)\n tf.random.set_seed(args.seed)\n env.seed(args.seed)\n\n # Create helper vars for model creation\n _state_dims = len(env.observation_space.high)\n _action_dims = 1\n _num_actions = env.action_space.n\n\n # Create Replay Buffer\n buffer = ReplayBuffer(state_dims=_state_dims, action_dims=_action_dims)\n\n # Instantiate optimizer\n opt = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE)\n\n # Instantiate expert policy from file\n # TODO >> I think it's a bit cleaner to load the entire model instead of just the weights\n # but I'm getting a TF error that I think was fixed in a later version. I should probably\n # try updating the version and seeing if it fixes itself.\n expert = actor_critic_fc_discrete_network(state_dims=_state_dims,\n num_actions=_num_actions,\n num_hidden_layers=2,\n hidden_size=128)\n expert.load_weights(args.expert_policy_file)\n\n # Create agent\n agent = ImitationAgent(environment=env,\n model_fn=actor_fc_discrete_network,\n optimizer=opt,\n replay_buffer=buffer,\n run_dagger=args.run_dagger,\n expert_policy=expert,\n expert_data_path=args.expert_data,\n model_kwargs=dict(state_dims=_state_dims,\n num_actions=_num_actions,\n num_hidden_layers=2,\n hidden_size=256),\n train_kwargs=dict(max_ep_len=args.max_ep_len,\n batch_size=args.batch_size,\n train_batch_size=args.train_batch_size,\n eval_batch_size=args.eval_batch_size,\n num_agent_train_steps_per_iter=args.num_agent_train_steps_per_iter)\n )\n\n # Run training\n ep_mean_rewards_history, ep_max_rewards_history, ep_min_rewards_history = [], [], []\n ep_mean_loss_history, ep_max_loss_history, ep_min_loss_history = [], [], []\n ep_steps_history = []\n ep_wallclock_history = []\n start = time.time()\n for e in range(args.epochs):\n # Run one episode\n ep_loss = agent.train_episode()\n ep_rew, ep_steps = agent.run_agent()\n\n # Prepare for logging\n mean_ep_rew, max_ep_rew, min_ep_rew, std_ep_rew = np.mean(ep_rew), np.max(ep_rew), np.min(ep_rew), np.std(ep_rew)\n mean_ep_loss, max_ep_loss, min_ep_loss = np.mean(ep_loss), np.max(ep_loss), np.min(ep_loss)\n ep_wallclock_history.append(time.time() - start)\n\n ep_mean_rewards_history.append(mean_ep_rew)\n ep_max_rewards_history.append(max_ep_rew)\n ep_min_rewards_history.append(min_ep_rew)\n\n ep_mean_loss_history.append(mean_ep_loss)\n ep_max_loss_history.append(max_ep_loss)\n ep_min_loss_history.append(min_ep_loss)\n\n ep_steps_history.append(ep_steps)\n\n template = \"EPISODE {} | mean ep reward: {:.2f} - max ep reward: {:.2f}\" \\\n \" - min ep reward: {:.2f} - std ep reward: {:.2f} - mean ep loss {:.2f}\"\n print(template.format(e, mean_ep_rew, max_ep_rew, min_ep_rew, std_ep_rew, mean_ep_loss))\n\n # Now that we've completed training, let's plot the results\n print(f\"Training time elapsed (sec): {round(time.time() - start, 2)}\")\n\n # Let's evaluate the performance of the trained agent\n print(\"Beginning evaluation of trained agent!\")\n eval_rew = []\n for i in range(50):\n ep_rew, ep_steps = agent.run_agent()\n eval_rew.append(ep_rew)\n print(f\"Evaluation rewards: mean - {np.mean(eval_rew)} | min - {np.min(eval_rew)} | max - {np.max(eval_rew)}\")\n\n # Plot summary of results\n plot_training_results(mean_rewards_history=ep_mean_rewards_history,\n max_rew_history=ep_max_rewards_history,\n min_rew_history=ep_min_rewards_history,\n mean_loss_history=ep_mean_loss_history,\n max_loss_history=ep_max_loss_history,\n min_loss_history=ep_min_loss_history,\n steps_history=ep_steps_history,\n wallclock_history=ep_wallclock_history,\n save_dir=\"./results.png\")\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--seed\", type=int, default=1)\n parser.add_argument(\"--env\", type=str, default=\"CartPole-v0\")\n parser.add_argument('--expert_policy_file', type=str, default='./checkpoints/expert_model_weights')\n parser.add_argument('--expert_data', type=str, default='expert_data.pkl')\n # parser.add_argument(\"--run_dagger\", action=\"store_false\")\n parser.add_argument(\"--run_dagger\", type=bool, default=False)\n parser.add_argument(\"--epochs\", type=int, default=1)\n parser.add_argument('--max_ep_len', type=int, default=100) # max trajectory length\n\n parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=20) # number of grad updates per iter\n parser.add_argument('--batch_size', type=int, default=1000) # num steps/transitions to sample for itr 1+\n parser.add_argument('--train_batch_size', type=int, default=512) # training batch size per model\n parser.add_argument('--eval_batch_size', type=int, default=400) # steps collected per eval iteration\n args = parser.parse_args()\n\n main()\n" ]
[ [ "tensorflow.keras.models.load_model", "numpy.random.seed", "numpy.min", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "numpy.squeeze", "tensorflow.reshape", "tensorflow.expand_dims", "numpy.max", "numpy.std", "tensorflow.keras.optimizers.Adam", "numpy.mean", "tensorflow.random.set_seed", "tensorflow.GradientTape" ] ]
solazu/FinRL-Library
[ "6cfe00933c16fc8a74efc9fb3d9cfa1b3bf296ea" ]
[ "finrl/commands/data_commands.py" ]
[ "import logging\nimport sys\nimport yfinance\nimport pandas as pd\nimport yfinance as yf\nimport os\n\nfrom collections import defaultdict\nfrom datetime import datetime, timedelta\nfrom typing import Any, Dict, List\n\n\nfrom finrl.config import TimeRange, setup_utils_configuration\nfrom finrl.data.converter import convert_ohlcv_format, convert_trades_format\nfrom finrl.data.history import (convert_trades_to_ohlcv, refresh_backtest_ohlcv_data,\n refresh_backtest_trades_data)\nfrom finrl.exceptions import OperationalException\nfrom finrl.exchange import timeframe_to_minutes\nfrom finrl.resolvers import ExchangeResolver\nfrom finrl.state import RunMode\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef start_download_cryptodata(args: Dict[str, Any]) -> None:\n \"\"\"\n Parameters:\n ARGS_DOWNLOAD_DATA = {'config': ['config.json'], 'datadir': None, \n 'user_data_dir': None, 'pairs': None, 'pairs_file': None, \n 'days': 160, 'timerange': None, \n 'download_trades': False, 'exchange': 'binance', \n 'timeframes': ['1d'], 'erase': False, \n 'dataformat_ohlcv': None, 'dataformat_trades': None}\n \n Returns:\n Json files in user_data/data/exchange/*.json\n \"\"\"\n config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)\n if 'days' in config and 'timerange' in config:\n raise OperationalException(\"--days and --timerange are mutually exclusive. \"\n \"You can only specify one or the other.\")\n timerange = TimeRange()\n if 'days' in config:\n time_since = (datetime.now() - timedelta(days=config['days'])).strftime(\"%Y%m%d\")\n timerange = TimeRange.parse_timerange(f'{time_since}-')\n\n if 'timerange' in config:\n timerange = timerange.parse_timerange(config['timerange'])\n\n # Remove stake-currency to skip checks which are not relevant for datadownload\n config['stake_currency'] = ''\n\n if 'pairs' not in config:\n raise OperationalException(\n \"Downloading data requires a list of pairs. \"\n \"Please check the documentation on how to configure this.\")\n\n logger.info(f\"About to download pairs: {config['pairs']}, \"\n f\"intervals: {config['timeframes']} to {config['datadir']}\")\n\n pairs_not_available: List[str] = []\n\n # Init exchange\n exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config, validate=False)\n # Manual validations of relevant settings\n exchange.validate_pairs(config['pairs'])\n for timeframe in config['timeframes']:\n exchange.validate_timeframes(timeframe)\n\n try:\n\n if config.get('download_trades'):\n pairs_not_available = refresh_backtest_trades_data(\n exchange, pairs=config['pairs'], datadir=config['datadir'],\n timerange=timerange, erase=bool(config.get('erase')),\n data_format=config['dataformat_trades'])\n\n # Convert downloaded trade data to different timeframes\n convert_trades_to_ohlcv(\n pairs=config['pairs'], timeframes=config['timeframes'],\n datadir=config['datadir'], timerange=timerange, erase=bool(config.get('erase')),\n data_format_ohlcv=config['dataformat_ohlcv'],\n data_format_trades=config['dataformat_trades'],\n )\n else:\n pairs_not_available = refresh_backtest_ohlcv_data(\n exchange, pairs=config['pairs'], timeframes=config['timeframes'],\n datadir=config['datadir'], timerange=timerange, erase=bool(config.get('erase')),\n data_format=config['dataformat_ohlcv'])\n\n except KeyboardInterrupt:\n sys.exit(\"Interrupt received, aborting ...\")\n\n finally:\n if pairs_not_available:\n logger.info(f\"Pairs [{','.join(pairs_not_available)}] not available \"\n f\"on exchange {exchange.name}.\")\n\ndef start_download_stockdata(args: Dict[str, Any]) -> None:\n \"\"\"Fetches data from Yahoo API\n Parameters\n ----------\n ticker_list, timerange, \n Returns\n -------\n Json of data\n \"\"\"\n args[\"exchange\"] = \"yahoo\"\n config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)\n \n\n if 'days' in config and 'timerange' in config:\n raise OperationalException(\"--days and --timerange are mutually exclusive. \"\n \"You can only specify one or the other.\")\n\n config[\"datadir\"] = \"user_data/data/yahoo\"\n\n timerange = TimeRange()\n if 'days' in config:\n time_since = (datetime.now() - timedelta(days=config['days'])).strftime(\"%Y%m%d\")\n timerange = TimeRange.parse_timerange(f'{time_since}-')\n start = datetime.fromtimestamp(timerange.startts).strftime(\"%Y-%m-%d\")\n end = datetime.now().strftime(\"%Y-%m-%d\")\n\n if 'timerange' in config:\n timerange = timerange.parse_timerange(config['timerange'])\n start = datetime.fromtimestamp(timerange.startts).strftime(\"%Y-%m-%d\")\n end = datetime.fromtimestamp(timerange.stopts).strftime(\"%Y-%m-%d\")\n try:\n data_df = pd.DataFrame()\n for tic in config['ticker_list']:\n temp_df = yf.download(tic, start=start, end=end)\n temp_df.columns = [\n \"open\",\n \"high\",\n \"low\",\n \"close\",\n \"adjcp\",\n \"volume\",\n ]\n temp_df[\"close\"] = temp_df[\"adjcp\"]\n temp_df = temp_df.drop([\"adjcp\"], axis=1)\n temp_df.to_json(f'{os.getcwd()}/{config[\"datadir\"]}/{tic}.json')\n except KeyboardInterrupt:\n sys.exit(\"Interrupt received, aborting ...\")\n\n\n\n\n\ndef start_convert_data(args: Dict[str, Any], ohlcv: bool = True) -> None:\n \"\"\"\n Convert data from one format to another\n \"\"\"\n config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)\n if ohlcv:\n convert_ohlcv_format(config,\n convert_from=args['format_from'], convert_to=args['format_to'],\n erase=args['erase'])\n else:\n convert_trades_format(config,\n convert_from=args['format_from'], convert_to=args['format_to'],\n erase=args['erase'])\n\n\ndef start_list_data(args: Dict[str, Any]) -> None:\n \"\"\"\n List available backtest data\n \"\"\"\n\n config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)\n\n from tabulate import tabulate\n\n from freqtrade.data.history.idatahandler import get_datahandler\n dhc = get_datahandler(config['datadir'], config['dataformat_ohlcv'])\n\n paircombs = dhc.ohlcv_get_available_data(config['datadir'])\n\n if args['pairs']:\n paircombs = [comb for comb in paircombs if comb[0] in args['pairs']]\n\n print(f\"Found {len(paircombs)} pair / timeframe combinations.\")\n groupedpair = defaultdict(list)\n for pair, timeframe in sorted(paircombs, key=lambda x: (x[0], timeframe_to_minutes(x[1]))):\n groupedpair[pair].append(timeframe)\n\n if groupedpair:\n print(tabulate([(pair, ', '.join(timeframes)) for pair, timeframes in groupedpair.items()],\n headers=(\"Pair\", \"Timeframe\"),\n tablefmt='psql', stralign='right'))\n" ]
[ [ "pandas.DataFrame" ] ]
megodoonch/birdsong
[ "582e7ddecf6c9c1b75f17418097f7bcbf6784d31", "582e7ddecf6c9c1b75f17418097f7bcbf6784d31" ]
[ "surface/misc.py", "markhov/bigrams.py" ]
[ "import numpy as np\n\n# Generate some n number of colours, hopefully maximally different\n\n\n# source: http://stackoverflow.com/questions/470690/how-to-automatically-generate-n-distinct-colors\nimport colorsys\n\ndef get_colors(num_colors):\n colors=[]\n for i in np.arange(0., 360., 360. / num_colors):\n hue = i/360.\n lightness = (20 + np.random.rand() * 10)/100.\n saturation = (90 + np.random.rand() * 10)/100.\n colors.append(colorsys.hls_to_rgb(hue, lightness, saturation))\n return colors\n\n\n\n\ndef cohens_d(x, y):\n lx = len(x)- 1\n ly = len(y)- 1\n md = abs(np.mean(x) - np.mean(y)) ## mean difference (numerator)\n csd = lx * np.var(x) + ly * np.var(y)\n csd = csd/(lx + ly)\n csd = np.sqrt(csd) ## common sd computation\n cd = md/csd ## cohen's d\n return cd\n\n\n\n\n\n\n\ndef get_freqs(lst):\n # Return a list of pairs that correspond to counts of \n # elements: (a,n) means that a appeared n times in the list.\n # The list is ordered by a, whatever order that variable has.\n counts = {}\n for l in lst:\n counts[l] = counts.get(l,0)+1\n\n # Convert to a list of pairs\n pairs = counts.items()\n\n # Order by the first element of each pair\n # pairs = pairs.sort(cmp=lambda (x,na),(y,nb): cmp(x,y))\n\n return pairs\n", "\"\"\"turning corpus into bigram chain\"\"\"\n\nimport numpy as np\nimport random\n\n\nf=open('../corpus/cath8.txt','r')\ncorpus = f.readlines()\nf.close()\ncorpus = [line.rstrip('\\n') for line in corpus]\n\n\n\nbis = {'[':{}}\nfor s in corpus:\n s=['[']+s.split(' ')\n for i in range(1,len(s)):\n bis[s[i-1]]=bis.get(s[i-1],{})\n bis[s[i-1]][s[i]]=bis[s[i-1]].get(s[i],0)+1\n\nfor lhs in bis:\n tot=float(sum(bis[lhs].values()))\n for rhs in bis[lhs]:\n bis[lhs][rhs]=bis[lhs][rhs]/tot\n\n\ndef bis_log(bigrams):\n for a in bigrams:\n for b in bigrams[a]:\n bigrams[a][b]=np.log(bigrams[a][b])\n return bigrams\n\nbigrams = bis_log(bis)\n\ndef bis_random(bigrams):\n \"\"\"\n gives bigrams random probabilities that sum to 1 for each lhs\n\n Arguments\n bigrams : bigram markhov chain as a dict of dicts\n\n Returns\n bigrams with probs replaced with random (log) probs\n \"\"\"\n for a in bigrams:\n n=len(bigrams[a])\n print (n)\n r = [random.random() for i in range(n)]\n s = sum(r)\n r = [ i/s for i in r ]\n i=0\n print (r[i])\n for b in bigrams[a]:\n bigrams[a][b] = np.log(r[i])\n i+=1\n return bigrams\n" ]
[ [ "numpy.sqrt", "numpy.arange", "numpy.mean", "numpy.random.rand", "numpy.var" ], [ "numpy.log" ] ]
saidineshpola/Knowledge-Distillation-Toolkit
[ "b05ebc28ae1385c9caa1c4c1c93db2d67356e85f", "b05ebc28ae1385c9caa1c4c1c93db2d67356e85f", "b05ebc28ae1385c9caa1c4c1c93db2d67356e85f", "b05ebc28ae1385c9caa1c4c1c93db2d67356e85f", "b05ebc28ae1385c9caa1c4c1c93db2d67356e85f", "b05ebc28ae1385c9caa1c4c1c93db2d67356e85f", "b05ebc28ae1385c9caa1c4c1c93db2d67356e85f" ]
[ "utils/fairseq_mod/fairseq_mod/criterions/cross_entropy.py", "examples/resnet_compression_demo/train_teacher_net.py", "utils/fairseq_mod/tests/test_concat_dataset.py", "utils/fairseq_mod/fairseq_mod/models/wav2vec/teacher_wav2vec2.py", "utils/fairseq_mod/examples/byte_level_bpe/gru_transformer.py", "utils/fairseq_mod/fairseq_mod/models/transformer.py", "utils/fairseq_mod/examples/speech_recognition/criterions/ASG_loss.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport math\nfrom dataclasses import dataclass\n\nimport torch.nn.functional as F\nfrom fairseq_mod import metrics, utils\nfrom fairseq_mod.criterions import FairseqCriterion, register_criterion\nfrom fairseq_mod.dataclass import FairseqDataclass\nfrom omegaconf import II\n\n\n@dataclass\nclass CrossEntropyCriterionConfig(FairseqDataclass):\n sentence_avg: bool = II(\"params.optimization.sentence_avg\")\n\n\n@register_criterion(\"cross_entropy\", dataclass=CrossEntropyCriterionConfig)\nclass CrossEntropyCriterion(FairseqCriterion):\n def __init__(self, task, sentence_avg):\n super().__init__(task)\n self.sentence_avg = sentence_avg\n\n def forward(self, model, sample, reduce=True):\n \"\"\"Compute the loss for the given sample.\n\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n \"\"\"\n net_output = model(**sample[\"net_input\"])\n loss, _ = self.compute_loss(model, net_output, sample, reduce=reduce)\n sample_size = (\n sample[\"target\"].size(0) if self.sentence_avg else sample[\"ntokens\"]\n )\n logging_output = {\n \"loss\": loss.data,\n \"ntokens\": sample[\"ntokens\"],\n \"nsentences\": sample[\"target\"].size(0),\n \"sample_size\": sample_size,\n }\n return loss, sample_size, logging_output\n\n def compute_loss(self, model, net_output, sample, reduce=True):\n lprobs = model.get_normalized_probs(net_output, log_probs=True)\n lprobs = lprobs.view(-1, lprobs.size(-1))\n target = model.get_targets(sample, net_output).view(-1)\n loss = F.nll_loss(\n lprobs,\n target,\n ignore_index=self.padding_idx,\n reduction=\"sum\" if reduce else \"none\",\n )\n return loss, loss\n\n @staticmethod\n def reduce_metrics(logging_outputs) -> None:\n \"\"\"Aggregate logging outputs from data parallel training.\"\"\"\n loss_sum = sum(log.get(\"loss\", 0) for log in logging_outputs)\n ntokens = sum(log.get(\"ntokens\", 0) for log in logging_outputs)\n sample_size = sum(log.get(\"sample_size\", 0) for log in logging_outputs)\n\n metrics.log_scalar(\n \"loss\", loss_sum / sample_size / math.log(2), sample_size, round=3\n )\n if sample_size != ntokens:\n metrics.log_scalar(\n \"nll_loss\", loss_sum / ntokens / math.log(2), ntokens, round=3\n )\n metrics.log_derived(\n \"ppl\", lambda meters: utils.get_perplexity(meters[\"nll_loss\"].avg)\n )\n else:\n metrics.log_derived(\n \"ppl\", lambda meters: utils.get_perplexity(meters[\"loss\"].avg)\n )\n\n @staticmethod\n def logging_outputs_can_be_summed() -> bool:\n \"\"\"\n Whether the logging outputs returned by `forward` can be summed\n across workers prior to calling `reduce_metrics`. Setting this\n to True will improves distributed training speed.\n \"\"\"\n return True\n", "from tqdm import tqdm\n\nimport torch\nfrom torch import nn, optim\n\nimport torchvision\nfrom torchvision.models.resnet import ResNet, BasicBlock\nfrom torchvision import datasets, transforms\n\nfrom inference_pipeline import inference_pipeline\n\nclass TeacherModel(ResNet):\n def __init__(self):\n super(TeacherModel, self).__init__(BasicBlock, [3, 4, 6, 3], num_classes=10) #ResNet34\n self.conv1 = torch.nn.Conv2d(1, 64,\n kernel_size=(7, 7),\n stride=(2, 2),\n padding=(3, 3), bias=False)\n\ndef train(model, train_loader, optimizer, loss_function, device):\n total_loss = 0\n model.train()\n progress = tqdm(enumerate(train_loader), desc=\"Train Loss: \", total=len(train_loader))\n for i, data in progress:\n X, y = data[0].to(device), data[1].to(device)\n optimizer.zero_grad()\n outputs = model(X)\n loss = loss_function(outputs, y)\n loss.backward()\n optimizer.step()\n current_loss = loss.item()\n total_loss += current_loss\n progress.set_description(\"Train Loss: {:.4f}\".format(total_loss/(i+1)))\n return model\n\ndef main():\n total_epoch = 5\n device = torch.device(\"cuda\")\n\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])\n train_kwargs = {'batch_size': 64, 'num_workers': 4}\n test_kwargs = {'batch_size': 1000, 'num_workers': 4}\n train_dataset = datasets.MNIST('./data', train=True, download=False, transform=transform)\n test_dataset = datasets.MNIST('./data', train=False, transform=transform)\n train_loader = torch.utils.data.DataLoader(train_dataset,**train_kwargs)\n test_loader = torch.utils.data.DataLoader(test_dataset, **test_kwargs)\n\n model = TeacherModel().to(device)\n optimizer = optim.Adadelta(model.parameters())\n loss_function = nn.CrossEntropyLoss()\n inference_pipeline_example = inference_pipeline(device)\n\n for epoch in range(total_epoch):\n model = train(model, train_loader, optimizer, loss_function, device)\n result = inference_pipeline_example.run_inference_pipeline(model, test_loader)\n val_acc = result[\"inference_result\"]\n print(f\"epoch {epoch}, validation accuracy = {val_acc} \\n\")\n torch.save(model.state_dict(), \"./saved_model/resnet34_teacher.pt\")\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n", "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport unittest\n\nimport torch\nfrom fairseq_mod.data import LanguagePairDataset, TokenBlockDataset\nfrom fairseq_mod.data.concat_dataset import ConcatDataset\nfrom tests.test_train import mock_dict\n\n\nclass TestConcatDataset(unittest.TestCase):\n def setUp(self):\n d = mock_dict()\n tokens_1 = torch.LongTensor([1]).view(1, -1)\n tokens_ds1 = TokenBlockDataset(\n tokens_1,\n sizes=[tokens_1.size(-1)],\n block_size=1,\n pad=0,\n eos=1,\n include_targets=False,\n )\n self.dataset_1 = LanguagePairDataset(\n tokens_ds1, tokens_ds1.sizes, d, shuffle=False\n )\n tokens_2 = torch.LongTensor([2]).view(1, -1)\n tokens_ds2 = TokenBlockDataset(\n tokens_2,\n sizes=[tokens_2.size(-1)],\n block_size=1,\n pad=0,\n eos=1,\n include_targets=False,\n )\n self.dataset_2 = LanguagePairDataset(\n tokens_ds2, tokens_ds2.sizes, d, shuffle=False\n )\n\n def test_concat_dataset_basics(self):\n d = ConcatDataset(\n [self.dataset_1, self.dataset_2]\n )\n assert(len(d) == 2)\n assert(d[0]['source'][0] == 1)\n assert(d[1]['source'][0] == 2)\n\n d = ConcatDataset(\n [self.dataset_1, self.dataset_2], sample_ratios=[1, 2]\n )\n assert(len(d) == 3)\n assert(d[0]['source'][0] == 1)\n assert(d[1]['source'][0] == 2)\n assert(d[2]['source'][0] == 2)\n\n d = ConcatDataset(\n [self.dataset_1, self.dataset_2], sample_ratios=[2, 1]\n )\n assert(len(d) == 3)\n assert(d[0]['source'][0] == 1)\n assert(d[1]['source'][0] == 1)\n assert(d[2]['source'][0] == 2)\n", "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport logging\nimport math\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom typing import List, Tuple\n\nfrom fairseq_mod import utils\nfrom fairseq_mod.data.data_utils import compute_mask_indices\nfrom fairseq_mod.models import BaseFairseqModel, register_model, register_model_architecture\nfrom fairseq_mod.modules import (\n Fp32GroupNorm,\n Fp32LayerNorm,\n GradMultiply,\n GumbelVectorQuantizer,\n LayerNorm,\n MultiheadAttention,\n SamePad,\n TransposeLast,\n)\nfrom fairseq_mod.modules.transformer_sentence_encoder import init_bert_params\nfrom fairseq_mod.utils import buffered_arange\n\n\n@register_model(\"teacher_wav2vec2\")\nclass TeacherWav2Vec2Model(BaseFairseqModel):\n @staticmethod\n def add_args(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n\n parser.add_argument(\n \"--extractor-mode\",\n choices=[\"default\", \"layer_norm\"],\n help=\"mode for feature extractor. default has a single group norm with d groups in the first conv block, whereas layer_norm has layer norms in every block (meant to use with --normalize)\",\n )\n\n parser.add_argument(\n \"--encoder-layers\",\n type=int,\n metavar=\"L\",\n help=\"num encoder layers in the transformer\",\n )\n parser.add_argument(\n \"--encoder-embed-dim\",\n type=int,\n metavar=\"H\",\n help=\"encoder embedding dimension\",\n )\n parser.add_argument(\n \"--encoder-ffn-embed-dim\",\n type=int,\n metavar=\"F\",\n help=\"encoder embedding dimension for FFN\",\n )\n parser.add_argument(\n \"--encoder-attention-heads\",\n type=int,\n metavar=\"A\",\n help=\"num encoder attention heads\",\n )\n parser.add_argument(\n \"--activation-fn\",\n choices=utils.get_available_activation_fns(),\n help=\"activation function to use\",\n )\n\n parser.add_argument(\n \"--dropout\",\n type=float,\n metavar=\"D\",\n help=\"dropout probability for the transformer\",\n )\n\n parser.add_argument(\n \"--attention-dropout\",\n type=float,\n metavar=\"D\",\n help=\"dropout probability for attention weights\",\n )\n\n parser.add_argument(\n \"--activation-dropout\",\n type=float,\n metavar=\"D\",\n help=\"dropout probability after activation in FFN\",\n )\n\n parser.add_argument(\n \"--final-dim\",\n type=int,\n metavar=\"D\",\n help=\"project final representations and targets to this many dimensions\",\n )\n\n parser.add_argument(\n \"--layer-norm-first\",\n action=\"store_true\",\n help=\"apply layernorm first in the transformer\",\n )\n\n parser.add_argument(\n \"--encoder-layerdrop\",\n type=float,\n help=\"probability of dropping a tarnsformer layer\",\n )\n\n parser.add_argument(\n \"--conv-feature-layers\",\n type=str,\n metavar=\"EXPR\",\n help=\"convolutional feature extraction layers [(dim, kernel_size, stride), ...]\",\n )\n\n parser.add_argument(\n \"--logit-temp\", type=float, help=\"temperature to divide logits by\"\n )\n\n parser.add_argument(\n \"--quantize-targets\", action=\"store_true\", help=\"use quantized targets\"\n )\n\n parser.add_argument(\n \"--quantize-input\", action=\"store_true\", help=\"use quantized inputs\"\n )\n\n parser.add_argument(\n \"--same-quantizer\",\n action=\"store_true\",\n help=\"use same quantizer for inputs and targets\",\n )\n\n parser.add_argument(\n \"--feature-grad-mult\",\n type=float,\n help=\"multiply feature extractor var grads by this\",\n )\n\n parser.add_argument(\n \"--latent-vars\",\n type=int,\n metavar=\"N\",\n help=\"number of latent variables V in each group of the codebook\",\n )\n\n parser.add_argument(\n \"--latent-groups\",\n type=int,\n metavar=\"N\",\n help=\"number of groups G of latent variables in the codebook\",\n )\n\n parser.add_argument(\n \"--latent-dim\",\n type=int,\n metavar=\"N\",\n help=\"if set, uses this dimensionality for latent variables. otherwise uses final_dim / latent_groups\",\n )\n\n parser.add_argument(\"--mask-length\", type=int, help=\"mask length\")\n\n parser.add_argument(\n \"--mask-prob\", type=float, help=\"probability of replacing a token with mask\"\n )\n\n parser.add_argument(\n \"--mask-selection\",\n type=str,\n choices=[\"static\", \"uniform\", \"normal\", \"poisson\"],\n help=\"how to choose masks\",\n )\n\n parser.add_argument(\n \"--mask-other\",\n type=float,\n help=\"secondary mask argument (used for more complex distributions), see help in compute_mask_indices\",\n )\n\n parser.add_argument(\n \"--no-mask-overlap\",\n action=\"store_true\",\n help=\"whether to allow masks to overlap\",\n )\n\n parser.add_argument(\n \"--mask-min-space\",\n type=int,\n help=\"min space between spans (if no overlap is enabled)\",\n )\n\n parser.add_argument(\n \"--mask-channel-length\",\n type=int,\n help=\"repeat the mask indices multiple times\",\n )\n\n parser.add_argument(\n \"--mask-channel-prob\",\n type=float,\n help=\"probability of replacing a token with mask\",\n )\n\n parser.add_argument(\n \"--mask-channel-selection\",\n type=str,\n choices=[\"static\", \"uniform\", \"normal\", \"poisson\"],\n help=\"how to choose masks\",\n )\n\n parser.add_argument(\n \"--mask-channel-other\",\n type=float,\n help=\"secondary mask argument (used for more complex distributions), see help in compute_mask_indices\",\n )\n\n parser.add_argument(\n \"--no-mask-channel-overlap\",\n action=\"store_true\",\n help=\"whether to allow masks to overlap\",\n )\n\n parser.add_argument(\n \"--mask-channel-min-space\",\n type=int,\n help=\"min space between spans (if no overlap is enabled)\",\n )\n\n parser.add_argument(\n \"--dropout-input\",\n type=float,\n metavar=\"D\",\n help=\"dropout to apply to the input (after feat extr)\",\n )\n\n parser.add_argument(\n \"--dropout-features\",\n type=float,\n metavar=\"D\",\n help=\"dropout to apply to the features (after feat extr)\",\n )\n\n parser.add_argument(\n \"--num-negatives\", type=int, metavar=\"N\", help=\"number of negative examples\"\n )\n\n parser.add_argument(\n \"--negatives-from-everywhere\",\n action=\"store_true\",\n help=\"sample negatives from everywhere, not just masked states\",\n )\n\n parser.add_argument(\n \"--cross-sample-negatives\",\n type=int,\n metavar=\"N\",\n help=\"num of cross sampled negatives\",\n )\n\n parser.add_argument(\n \"--codebook-negatives\",\n type=int,\n metavar=\"N\",\n help=\"num of codebook sampled negatives\",\n )\n\n parser.add_argument(\n \"--conv-pos\",\n type=int,\n metavar=\"N\",\n help=\"number of filters for convolutional positional embeddings\",\n )\n\n parser.add_argument(\n \"--conv-pos-groups\",\n type=int,\n metavar=\"N\",\n help=\"number of groups for convolutional positional embedding\",\n )\n\n parser.add_argument(\n \"--latent-temp\",\n type=str,\n metavar=\"D\",\n help=\"temperature for latent variable sampling. can be tuple of 3 values (start, end, decay)\",\n )\n\n parser.add_argument(\n \"--target-glu\", action=\"store_true\", help=\"adds projection + glu to targets\"\n )\n\n parser.add_argument(\n \"--conv-bias\", action=\"store_true\", help=\"include bias in conv encoder\"\n )\n\n def __init__(self, args):\n super().__init__()\n self.args = args\n\n feature_enc_layers = eval(args.conv_feature_layers)\n self.embed = feature_enc_layers[-1][0]\n\n self.feature_extractor = ConvFeatureExtractionModel(\n conv_layers=feature_enc_layers,\n dropout=0.0,\n mode=args.extractor_mode,\n conv_bias=args.conv_bias,\n )\n\n self.post_extract_proj = (\n nn.Linear(self.embed, args.encoder_embed_dim)\n if self.embed != args.encoder_embed_dim and not args.quantize_input\n else None\n )\n\n self.mask_prob = args.mask_prob\n self.mask_selection = args.mask_selection\n self.mask_other = args.mask_other\n self.mask_length = args.mask_length\n self.no_mask_overlap = args.no_mask_overlap\n self.mask_min_space = args.mask_min_space\n\n self.mask_channel_prob = args.mask_channel_prob\n self.mask_channel_selection = args.mask_channel_selection\n self.mask_channel_other = args.mask_channel_other\n self.mask_channel_length = args.mask_channel_length\n self.no_mask_channel_overlap = args.no_mask_channel_overlap\n self.mask_channel_min_space = args.mask_channel_min_space\n\n self.dropout_input = nn.Dropout(args.dropout_input)\n self.dropout_features = nn.Dropout(args.dropout_features)\n\n self.feature_grad_mult = args.feature_grad_mult\n\n self.quantizer = None\n self.input_quantizer = None\n\n self.n_negatives = args.num_negatives\n self.cross_sample_negatives = args.cross_sample_negatives\n self.codebook_negatives = args.codebook_negatives\n self.negatives_from_everywhere = args.negatives_from_everywhere\n\n self.logit_temp = args.logit_temp\n\n final_dim = args.final_dim if args.final_dim > 0 else args.encoder_embed_dim\n\n if args.quantize_targets:\n vq_dim = args.latent_dim if args.latent_dim > 0 else final_dim\n self.quantizer = GumbelVectorQuantizer(\n dim=self.embed,\n num_vars=args.latent_vars,\n temp=eval(args.latent_temp),\n groups=args.latent_groups,\n combine_groups=False,\n vq_dim=vq_dim,\n time_first=True,\n )\n self.project_q = nn.Linear(vq_dim, final_dim)\n else:\n self.project_q = nn.Linear(self.embed, final_dim)\n\n if args.quantize_input:\n if args.same_quantizer and self.quantizer is not None:\n vq_dim = final_dim\n self.input_quantizer = self.quantizer\n else:\n vq_dim = (\n args.latent_dim if args.latent_dim > 0 else args.encoder_embed_dim\n )\n self.input_quantizer = GumbelVectorQuantizer(\n dim=self.embed,\n num_vars=args.latent_vars,\n temp=eval(args.latent_temp),\n groups=args.latent_groups,\n combine_groups=False,\n vq_dim=vq_dim,\n time_first=True,\n )\n self.project_inp = nn.Linear(vq_dim, args.encoder_embed_dim)\n\n self.mask_emb = nn.Parameter(\n torch.FloatTensor(args.encoder_embed_dim).uniform_()\n )\n\n self.encoder = TransformerEncoder(args)\n self.layer_norm = LayerNorm(self.embed)\n\n self.target_glu = None\n if args.target_glu:\n self.target_glu = nn.Sequential(\n nn.Linear(final_dim, final_dim * 2), nn.GLU()\n )\n\n self.final_proj = nn.Linear(args.encoder_embed_dim, final_dim)\n\n def upgrade_state_dict_named(self, state_dict, name):\n super().upgrade_state_dict_named(state_dict, name)\n \"\"\"Upgrade a (possibly old) state dict for new versions of fairseq.\"\"\"\n return state_dict\n\n @classmethod\n def build_model(cls, args, task=None):\n \"\"\"Build a new model instance.\"\"\"\n\n # make sure all arguments are present\n base_architecture(args)\n\n return cls(args)\n\n def apply_mask(self, x, padding_mask, mask_indices):\n #B, T, C = x.shape\n x[mask_indices] = self.mask_emb\n\n #if self.mask_channel_prob > 0:\n # mask_channel_indices = compute_mask_indices(\n # (B, C),\n # None,\n # self.mask_channel_prob,\n # self.mask_channel_length,\n # self.mask_channel_selection,\n # self.mask_channel_other,\n # no_overlap=self.no_mask_channel_overlap,\n # min_space=self.mask_channel_min_space,\n # )\n # mask_channel_indices = (\n # torch.from_numpy(mask_channel_indices)\n # .to(x.device)\n # .unsqueeze(1)\n # .expand(-1, T, -1)\n # )\n # x[mask_channel_indices] = 0\n\n return x\n\n def sample_negatives(self, y, num):\n\n if self.n_negatives == 0 and self.cross_sample_negatives == 0:\n return y.new(0)\n\n bsz, tsz, fsz = y.shape\n y = y.view(-1, fsz) # BTC => (BxT)C\n\n cross_high = tsz * bsz\n high = tsz\n with torch.no_grad():\n assert high > 1, f\"{bsz,tsz,fsz}\"\n\n if self.n_negatives > 0:\n tszs = (\n buffered_arange(num)\n .unsqueeze(-1)\n .expand(-1, self.n_negatives)\n .flatten()\n )\n\n neg_idxs = torch.randint(\n low=0, high=high - 1, size=(bsz, self.n_negatives * num)\n )\n neg_idxs[neg_idxs >= tszs] += 1\n\n if self.cross_sample_negatives > 0:\n tszs = (\n buffered_arange(num)\n .unsqueeze(-1)\n .expand(-1, self.cross_sample_negatives)\n .flatten()\n )\n\n cross_neg_idxs = torch.randint(\n low=0,\n high=cross_high - 1,\n size=(bsz, self.cross_sample_negatives * num),\n )\n cross_neg_idxs[cross_neg_idxs >= tszs] += 1\n\n if self.n_negatives > 0:\n for i in range(1, bsz):\n neg_idxs[i] += i * high\n else:\n neg_idxs = cross_neg_idxs\n\n if self.cross_sample_negatives > 0 and self.n_negatives > 0:\n neg_idxs = torch.cat([neg_idxs, cross_neg_idxs], dim=1)\n\n negs = y[neg_idxs.view(-1)]\n negs = negs.view(\n bsz, num, self.n_negatives + self.cross_sample_negatives, fsz\n ).permute(\n 2, 0, 1, 3\n ) # to NxBxTxC\n return negs, neg_idxs\n\n def compute_preds(self, x, y, negatives):\n\n neg_is_pos = (y == negatives).all(-1)\n y = y.unsqueeze(0)\n targets = torch.cat([y, negatives], dim=0)\n\n logits = torch.cosine_similarity(x.float(), targets.float(), dim=-1).type_as(x)\n\n logits /= self.logit_temp\n\n if neg_is_pos.any():\n logits[1:][neg_is_pos] = float(\"-inf\")\n\n return logits\n\n def forward(self, source, padding_mask=None, mask=True, features_only=False, temperature=1):\n\n if self.feature_grad_mult > 0:\n features = self.feature_extractor(source)\n if self.feature_grad_mult != 1.0:\n features = GradMultiply.apply(features, self.feature_grad_mult)\n else:\n with torch.no_grad():\n features = self.feature_extractor(source)\n\n features_pen = features.float().pow(2).mean()\n\n features = features.transpose(1, 2)\n features = self.layer_norm(features)\n unmasked_features = features.clone()\n\n if padding_mask is not None:\n extra = padding_mask.size(1) % features.size(1)\n if extra > 0:\n padding_mask = padding_mask[:, :-extra]\n padding_mask = padding_mask.view(padding_mask.size(0), features.size(1), -1)\n padding_mask = padding_mask.all(-1)\n\n if self.post_extract_proj is not None:\n features = self.post_extract_proj(features)\n\n features = self.dropout_input(features)\n unmasked_features = self.dropout_features(unmasked_features)\n\n num_vars = None\n code_ppl = None\n prob_ppl = None\n curr_temp = None\n\n if self.input_quantizer:\n q = self.input_quantizer(features, produce_targets=False)\n features = q[\"x\"]\n num_vars = q[\"num_vars\"]\n code_ppl = q[\"code_perplexity\"]\n prob_ppl = q[\"prob_perplexity\"]\n curr_temp = q[\"temp\"]\n features = self.project_inp(features)\n\n x = features\n y = unmasked_features\n\n x = self.encoder(x, padding_mask=padding_mask)\n logits = self.get_logits_after_proj(x)\n token_prob = self.get_normalized_probs(x, log_probs=False, temperature=temperature)\n\n return {\"x\": x, \"padding_mask\": padding_mask, \"logits\": logits, \"prob\":token_prob}\n\n def quantize(self, x):\n assert self.quantizer is not None\n x = self.feature_extractor(x)\n x = x.transpose(1, 2)\n x = self.layer_norm(x)\n return self.quantizer.forward_idx(x)\n\n def extract_features(self, source, padding_mask, mask=False):\n res = self.forward(source, padding_mask, mask=mask, features_only=True)\n return res[\"x\"], res[\"padding_mask\"]\n\n def get_logits(self, net_output):\n logits = net_output[\"x\"]\n logits = logits.transpose(0, 2)\n logits = logits.reshape(-1, logits.size(-1))\n return logits\n\n def get_targets(self, sample, net_output, expand_steps=True):\n x = net_output[\"x\"]\n return x.new_zeros(x.size(1) * x.size(2), dtype=torch.long)\n\n def get_extra_losses(self, net_output):\n pen = []\n\n if \"prob_perplexity\" in net_output:\n pen.append(\n (net_output[\"num_vars\"] - net_output[\"prob_perplexity\"])\n / net_output[\"num_vars\"]\n )\n\n if \"features_pen\" in net_output:\n pen.append(net_output[\"features_pen\"])\n\n return pen\n\n def remove_pretraining_modules(self):\n self.quantizer = None\n self.project_q = None\n self.target_glu = None\n self.final_proj = None\n\n def set_mask_emb(self, mask_emb):\n self.mask_emb = mask_emb\n\n def init_proj_layer_to_decoder(self, proj_layer_weight, proj_layer_bias):\n # Initialize the projection layer which outputs probability distributions over tokens.\n # These probability distributions will be passed to the decoder.\n # self.proj_to_decoder is the same proj layer as in wav2vec_asr.Wav2VecEncoder\n tgt_dict_len, encoder_embed_dim = proj_layer_weight.shape\n self.proj_to_decoder = nn.Linear(encoder_embed_dim, tgt_dict_len)\n self.proj_to_decoder.weight = torch.nn.Parameter(proj_layer_weight.float())\n self.proj_to_decoder.bias = torch.nn.Parameter(proj_layer_bias.float())\n return\n\n def get_logits_after_proj(self, encoder_last_layer):\n \"\"\"\n Take final layer of the encoder and project to M tokens\n\n Arguments:\n encoder_last_layer: the final layer of the encoder (transformer layers)\n Returns:\n logits: logits of the probability distribution over M tokens\n \"\"\"\n\n encoder_last_layer = encoder_last_layer.transpose(0, 1)\n logits = self.proj_to_decoder(encoder_last_layer)\n return logits\n\n def get_normalized_probs(self, encoder_last_layer, log_probs=True, temperature=1):\n \"\"\"Get normalized probabilities (or log probs) from a net's output.\"\"\"\n logits = self.get_logits_after_proj(encoder_last_layer)\n logits = logits / temperature\n if log_probs:\n return utils.log_softmax(logits.float(), dim=-1)\n else:\n return utils.softmax(logits.float(), dim=-1)\n\n @classmethod\n def create_teacher_model(cls, fairseq_pretrained_model_path, target_dict):\n \"\"\"\n Teacher model is the pre-trained wav2vec 2.0 model from fairseq.\n Args:\n fairseq_pretrained_model_path (str): path to the pre-trained model\n Return a teacher model\n \"\"\"\n w2v = torch.load(fairseq_pretrained_model_path)\n w2v[\"args\"].w2v_args.arch = 'teacher_wav2vec2'\n teacher_model = cls.build_model(w2v[\"args\"].w2v_args, target_dict)\n teacher_model_state = teacher_model.state_dict()\n for name, param in w2v[\"model\"].items():\n name_as_list = name.split('.')[2:] # Need to get rid of 'w2v_encoder.w2v_model' in the name\n name_in_t_model = '.'.join(name_as_list)\n if name_in_t_model in teacher_model_state:\n teacher_model_state[name_in_t_model].copy_(param)\n else:\n print(name + ' is not in teacher model state_dict')\n print('Finished loading weights into the teacher model')\n return teacher_model\n\nclass ConvFeatureExtractionModel(nn.Module):\n def __init__(\n self,\n conv_layers: List[Tuple[int, int, int]],\n dropout: float = 0.0,\n mode: str = \"default\",\n conv_bias: bool = False,\n ):\n super().__init__()\n\n assert mode in {\"default\", \"layer_norm\"}\n\n def block(\n n_in,\n n_out,\n k,\n stride,\n is_layer_norm=False,\n is_group_norm=False,\n conv_bias=False,\n ):\n def make_conv():\n conv = nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias)\n nn.init.kaiming_normal_(conv.weight)\n return conv\n\n assert (\n is_layer_norm and is_group_norm\n ) == False, \"layer norm and group norm are exclusive\"\n\n if is_layer_norm:\n return nn.Sequential(\n make_conv(),\n nn.Dropout(p=dropout),\n nn.Sequential(\n TransposeLast(),\n Fp32LayerNorm(dim, elementwise_affine=True),\n TransposeLast(),\n ),\n nn.GELU(),\n )\n elif is_group_norm:\n return nn.Sequential(\n make_conv(),\n nn.Dropout(p=dropout),\n Fp32GroupNorm(dim, dim, affine=True),\n nn.GELU(),\n )\n else:\n return nn.Sequential(make_conv(), nn.Dropout(p=dropout), nn.GELU())\n\n in_d = 1\n self.conv_layers = nn.ModuleList()\n for i, cl in enumerate(conv_layers):\n assert len(cl) == 3, \"invalid conv definition: \" + str(cl)\n (dim, k, stride) = cl\n\n self.conv_layers.append(\n block(\n in_d,\n dim,\n k,\n stride,\n is_layer_norm=mode == \"layer_norm\",\n is_group_norm=mode == \"default\" and i == 0,\n conv_bias=conv_bias,\n )\n )\n in_d = dim\n\n def forward(self, x):\n\n # BxT -> BxCxT\n x = x.unsqueeze(1)\n\n for conv in self.conv_layers:\n x = conv(x)\n\n return x\n\n\nclass TransformerEncoder(nn.Module):\n def __init__(self, args):\n super().__init__()\n\n self.dropout = args.dropout\n self.embedding_dim = args.encoder_embed_dim\n\n self.pos_conv = nn.Conv1d(\n self.embedding_dim,\n self.embedding_dim,\n kernel_size=args.conv_pos,\n padding=args.conv_pos // 2,\n groups=args.conv_pos_groups,\n )\n dropout = 0\n std = math.sqrt((4 * (1.0 - dropout)) / (args.conv_pos * self.embedding_dim))\n nn.init.normal_(self.pos_conv.weight, mean=0, std=std)\n nn.init.constant_(self.pos_conv.bias, 0)\n\n self.pos_conv = nn.utils.weight_norm(self.pos_conv, name=\"weight\", dim=2)\n self.pos_conv = nn.Sequential(self.pos_conv, SamePad(args.conv_pos), nn.GELU())\n\n self.layers = nn.ModuleList(\n [\n TransformerSentenceEncoderLayer(\n embedding_dim=self.embedding_dim,\n ffn_embedding_dim=args.encoder_ffn_embed_dim,\n num_attention_heads=args.encoder_attention_heads,\n dropout=self.dropout,\n attention_dropout=args.attention_dropout,\n activation_dropout=args.activation_dropout,\n activation_fn=args.activation_fn,\n layer_norm_first=args.layer_norm_first,\n )\n for _ in range(args.encoder_layers)\n ]\n )\n\n self.layer_norm_first = args.layer_norm_first\n self.layer_norm = LayerNorm(self.embedding_dim)\n self.layerdrop = args.encoder_layerdrop\n\n self.apply(init_bert_params)\n\n def forward(self, x, padding_mask=None):\n x = self.extract_features(x, padding_mask)\n\n if self.layer_norm_first:\n x = self.layer_norm(x)\n\n return x\n\n def extract_features(self, x, padding_mask=None):\n\n if padding_mask is not None:\n x[padding_mask] = 0\n\n x_conv = self.pos_conv(x.transpose(1, 2))\n x_conv = x_conv.transpose(1, 2)\n x += x_conv\n\n if not self.layer_norm_first:\n x = self.layer_norm(x)\n\n x = F.dropout(x, p=self.dropout, training=self.training)\n\n # B x T x C -> T x B x C\n x = x.transpose(0, 1)\n\n layer_results = []\n for i, layer in enumerate(self.layers):\n dropout_probability = np.random.random()\n if not self.training or (dropout_probability > self.layerdrop):\n x, z = layer(x, self_attn_padding_mask=padding_mask, need_weights=False)\n layer_results.append(x)\n\n # T x B x C -> B x T x C\n x = x.transpose(0, 1)\n\n return x\n\n def max_positions(self):\n \"\"\"Maximum output length supported by the encoder.\"\"\"\n return self.args.max_positions\n\n def upgrade_state_dict_named(self, state_dict, name):\n \"\"\"Upgrade a (possibly old) state dict for new versions of fairseq.\"\"\"\n return state_dict\n\n\nclass TransformerSentenceEncoderLayer(nn.Module):\n \"\"\"\n Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained\n models.\n \"\"\"\n\n def __init__(\n self,\n embedding_dim: float = 768,\n ffn_embedding_dim: float = 3072,\n num_attention_heads: float = 8,\n dropout: float = 0.1,\n attention_dropout: float = 0.1,\n activation_dropout: float = 0.1,\n activation_fn: str = \"relu\",\n layer_norm_first: bool = False,\n ) -> None:\n\n super().__init__()\n # Initialize parameters\n self.embedding_dim = embedding_dim\n self.dropout = dropout\n self.activation_dropout = activation_dropout\n\n # Initialize blocks\n self.activation_fn = utils.get_activation_fn(activation_fn)\n self.self_attn = MultiheadAttention(\n self.embedding_dim,\n num_attention_heads,\n dropout=attention_dropout,\n self_attention=True,\n )\n\n self.dropout1 = nn.Dropout(dropout)\n self.dropout2 = nn.Dropout(self.activation_dropout)\n self.dropout3 = nn.Dropout(dropout)\n\n self.layer_norm_first = layer_norm_first\n\n # layer norm associated with the self attention layer\n self.self_attn_layer_norm = LayerNorm(self.embedding_dim)\n self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim)\n self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim)\n\n # layer norm associated with the position wise feed-forward NN\n self.final_layer_norm = LayerNorm(self.embedding_dim)\n\n def forward(\n self,\n x: torch.Tensor,\n self_attn_mask: torch.Tensor = None,\n self_attn_padding_mask: torch.Tensor = None,\n need_weights: bool = False,\n att_args=None,\n ):\n \"\"\"\n LayerNorm is applied either before or after the self-attention/ffn\n modules similar to the original Transformer imlementation.\n \"\"\"\n residual = x\n\n if self.layer_norm_first:\n x = self.self_attn_layer_norm(x)\n x, attn = self.self_attn(\n query=x,\n key=x,\n value=x,\n key_padding_mask=self_attn_padding_mask,\n need_weights=False,\n attn_mask=self_attn_mask,\n )\n x = self.dropout1(x)\n x = residual + x\n\n residual = x\n x = self.final_layer_norm(x)\n x = self.activation_fn(self.fc1(x))\n x = self.dropout2(x)\n x = self.fc2(x)\n x = self.dropout3(x)\n x = residual + x\n else:\n x, attn = self.self_attn(\n query=x,\n key=x,\n value=x,\n key_padding_mask=self_attn_padding_mask,\n need_weights=need_weights,\n )\n\n x = self.dropout1(x)\n x = residual + x\n\n x = self.self_attn_layer_norm(x)\n\n residual = x\n x = self.activation_fn(self.fc1(x))\n x = self.dropout2(x)\n x = self.fc2(x)\n x = self.dropout3(x)\n x = residual + x\n x = self.final_layer_norm(x)\n\n return x, attn\n\n\n@register_model_architecture(\"teacher_wav2vec2\", \"teacher_wav2vec2\")\ndef base_architecture(args):\n args.extractor_mode = getattr(args, \"extractor_mode\", \"default\")\n\n args.encoder_layers = getattr(args, \"encoder_layers\", 12)\n args.encoder_embed_dim = getattr(args, \"encoder_embed_dim\", 768)\n args.encoder_ffn_embed_dim = getattr(args, \"encoder_ffn_embed_dim\", 3072)\n args.encoder_attention_heads = getattr(args, \"encoder_attention_heads\", 12)\n\n args.activation_fn = getattr(args, \"activation_fn\", \"gelu\")\n\n args.dropout = getattr(args, \"dropout\", 0.1)\n args.attention_dropout = getattr(args, \"attention_dropout\", 0.1)\n args.activation_dropout = getattr(args, \"activation_dropout\", 0.0)\n\n args.final_dim = getattr(args, \"final_dim\", 0)\n\n args.layer_norm_first = getattr(args, \"layer_norm_first\", False)\n args.encoder_layerdrop = getattr(args, \"encoder_layerdrop\", 0.0)\n\n conv_feature_layers = \"[(512, 10, 5)]\"\n conv_feature_layers += \" + [(512, 8, 4)]\"\n conv_feature_layers += \" + [(512, 4, 2)] * 3\"\n conv_feature_layers += \" + [(512, 1, 1)]\"\n args.conv_feature_layers = getattr(args, \"conv_feature_layers\", conv_feature_layers)\n\n args.logit_temp = getattr(args, \"logit_temp\", 0.1)\n\n args.quantize_targets = getattr(args, \"quantize_targets\", False)\n args.quantize_input = getattr(args, \"quantize_input\", False)\n args.same_quantizer = getattr(args, \"same_quantizer\", False)\n\n args.feature_grad_mult = getattr(args, \"feature_grad_mult\", 1.0)\n\n args.latent_vars = getattr(args, \"latent_vars\", 320)\n args.latent_groups = getattr(args, \"latent_groups\", 2)\n args.latent_dim = getattr(args, \"latent_dim\", 0)\n\n args.mask_length = getattr(args, \"mask_length\", 10)\n args.mask_prob = getattr(args, \"mask_prob\", 0.65)\n args.mask_selection = getattr(args, \"mask_selection\", \"static\")\n args.mask_other = getattr(args, \"mask_other\", 0)\n args.no_mask_overlap = getattr(args, \"no_mask_overlap\", False)\n args.mask_min_space = getattr(args, \"mask_min_space\", 1)\n\n args.mask_channel_length = getattr(args, \"mask_channel_length\", 10)\n args.mask_channel_prob = getattr(args, \"mask_channel_prob\", 0)\n args.mask_channel_selection = getattr(args, \"mask_channel_selection\", \"static\")\n args.mask_channel_other = getattr(args, \"mask_channel_other\", 0)\n args.no_mask_channel_overlap = getattr(args, \"no_mask_channel_overlap\", False)\n args.mask_channel_min_space = getattr(args, \"mask_channel_min_space\", 1)\n\n args.dropout_input = getattr(args, \"dropout_input\", 0)\n args.dropout_features = getattr(args, \"dropout_features\", 0)\n\n args.num_negatives = getattr(args, \"num_negatives\", 100)\n args.negatives_from_everywhere = getattr(args, \"negatives_from_everywhere\", False)\n args.cross_sample_negatives = getattr(args, \"cross_sample_negatives\", 0)\n args.codebook_negatives = getattr(args, \"codebook_negatives\", 0)\n\n args.conv_pos = getattr(args, \"conv_pos\", 128)\n args.conv_pos_groups = getattr(args, \"conv_pos_groups\", 16)\n\n args.latent_temp = getattr(args, \"latent_temp\", \"(2,0.5,0.999995)\")\n\n args.target_glu = getattr(args, \"target_glu\", False)\n\n args.conv_bias = getattr(args, \"conv_bias\", False)\n", "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom fairseq_mod.models import register_model, register_model_architecture\nfrom fairseq_mod.models.transformer import TransformerModel, TransformerEncoder\n\n\n@register_model(\"gru_transformer\")\nclass GRUTransformerModel(TransformerModel):\n @classmethod\n def build_encoder(cls, args, src_dict, embed_tokens):\n return GRUTransformerEncoder(args, src_dict, embed_tokens)\n\n\nclass GRUTransformerEncoder(TransformerEncoder):\n def __init__(self, args, dictionary, embed_tokens):\n super().__init__(args, dictionary, embed_tokens)\n self.emb_ctx = nn.GRU(input_size=embed_tokens.embedding_dim,\n hidden_size=embed_tokens.embedding_dim // 2,\n num_layers=1, bidirectional=True)\n\n def forward_embedding(self, src_tokens):\n # embed tokens and positions\n x = embed = self.embed_scale * self.embed_tokens(src_tokens)\n if self.embed_positions is not None:\n x = embed + self.embed_positions(src_tokens)\n\n # contextualize embeddings\n x = x.transpose(0, 1)\n x = self.dropout_module(x)\n x, _ = self.emb_ctx.forward(x)\n x = x.transpose(0, 1)\n\n if self.layernorm_embedding is not None:\n x = self.layernorm_embedding(x)\n x = self.dropout_module(x)\n return x, embed\n\n\n@register_model_architecture(\"gru_transformer\", \"gru_transformer\")\ndef gru_transformer_base_architecture(args):\n args.encoder_embed_path = getattr(args, \"encoder_embed_path\", None)\n args.encoder_embed_dim = getattr(args, \"encoder_embed_dim\", 512)\n args.encoder_ffn_embed_dim = getattr(args, \"encoder_ffn_embed_dim\", 2048)\n args.encoder_layers = getattr(args, \"encoder_layers\", 6)\n args.encoder_attention_heads = getattr(args, \"encoder_attention_heads\", 8)\n args.encoder_normalize_before = getattr(args, \"encoder_normalize_before\", False)\n args.encoder_learned_pos = getattr(args, \"encoder_learned_pos\", False)\n args.decoder_embed_path = getattr(args, \"decoder_embed_path\", None)\n args.decoder_embed_dim = getattr(args, \"decoder_embed_dim\", args.encoder_embed_dim)\n args.decoder_ffn_embed_dim = getattr(\n args, \"decoder_ffn_embed_dim\", args.encoder_ffn_embed_dim\n )\n args.decoder_layers = getattr(args, \"decoder_layers\", 6)\n args.decoder_attention_heads = getattr(args, \"decoder_attention_heads\", 8)\n args.decoder_normalize_before = getattr(args, \"decoder_normalize_before\", False)\n args.decoder_learned_pos = getattr(args, \"decoder_learned_pos\", False)\n args.attention_dropout = getattr(args, \"attention_dropout\", 0.0)\n args.activation_dropout = getattr(args, \"activation_dropout\", 0.0)\n args.activation_fn = getattr(args, \"activation_fn\", \"relu\")\n args.dropout = getattr(args, \"dropout\", 0.1)\n args.adaptive_softmax_cutoff = getattr(args, \"adaptive_softmax_cutoff\", None)\n args.adaptive_softmax_dropout = getattr(args, \"adaptive_softmax_dropout\", 0)\n args.share_decoder_input_output_embed = getattr(\n args, \"share_decoder_input_output_embed\", False\n )\n args.share_all_embeddings = getattr(args, \"share_all_embeddings\", False)\n args.no_token_positional_embeddings = getattr(\n args, \"no_token_positional_embeddings\", False\n )\n args.adaptive_input = getattr(args, \"adaptive_input\", False)\n args.no_cross_attention = getattr(args, \"no_cross_attention\", False)\n args.cross_self_attention = getattr(args, \"cross_self_attention\", False)\n args.layer_wise_attention = getattr(args, \"layer_wise_attention\", False)\n\n args.decoder_output_dim = getattr(\n args, \"decoder_output_dim\", args.decoder_embed_dim\n )\n args.decoder_input_dim = getattr(args, \"decoder_input_dim\", args.decoder_embed_dim)\n\n args.no_scale_embedding = getattr(args, \"no_scale_embedding\", False)\n args.layernorm_embedding = getattr(args, \"layernorm_embedding\", False)\n\n\n@register_model_architecture(\"gru_transformer\", \"gru_transformer_big\")\ndef gru_transformer_big(args):\n args.encoder_embed_dim = getattr(args, \"encoder_embed_dim\", 1024)\n args.encoder_ffn_embed_dim = getattr(args, \"encoder_ffn_embed_dim\", 4096)\n args.encoder_attention_heads = getattr(args, \"encoder_attention_heads\", 16)\n args.encoder_normalize_before = getattr(args, \"encoder_normalize_before\", False)\n args.decoder_embed_dim = getattr(args, \"decoder_embed_dim\", 1024)\n args.decoder_ffn_embed_dim = getattr(args, \"decoder_ffn_embed_dim\", 4096)\n args.decoder_attention_heads = getattr(args, \"decoder_attention_heads\", 16)\n args.dropout = getattr(args, \"dropout\", 0.3)\n gru_transformer_base_architecture(args)\n", "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport math\nfrom typing import Any, Dict, List, Optional, Tuple\n\nimport torch\nimport torch.nn as nn\nfrom fairseq_mod import utils\nfrom fairseq_mod.models import (\n FairseqEncoder,\n FairseqEncoderDecoderModel,\n FairseqIncrementalDecoder,\n register_model,\n register_model_architecture,\n)\nfrom fairseq_mod.models.fairseq_encoder import EncoderOut\nfrom fairseq_mod.modules import (\n AdaptiveSoftmax,\n FairseqDropout,\n LayerDropModuleList,\n LayerNorm,\n PositionalEmbedding,\n SinusoidalPositionalEmbedding,\n TransformerDecoderLayer,\n TransformerEncoderLayer,\n)\nfrom fairseq_mod.modules.quant_noise import quant_noise as apply_quant_noise_\nfrom torch import Tensor\n\nDEFAULT_MAX_SOURCE_POSITIONS = 1024\nDEFAULT_MAX_TARGET_POSITIONS = 1024\n\n\n@register_model(\"transformer\")\nclass TransformerModel(FairseqEncoderDecoderModel):\n \"\"\"\n Transformer model from `\"Attention Is All You Need\" (Vaswani, et al, 2017)\n <https://arxiv.org/abs/1706.03762>`_.\n\n Args:\n encoder (TransformerEncoder): the encoder\n decoder (TransformerDecoder): the decoder\n\n The Transformer model provides the following named architectures and\n command-line arguments:\n\n .. argparse::\n :ref: fairseq.models.transformer_parser\n :prog:\n \"\"\"\n\n @classmethod\n def hub_models(cls):\n # fmt: off\n\n def moses_subword(path):\n return {\n 'path': path,\n 'tokenizer': 'moses',\n 'bpe': 'subword_nmt',\n }\n\n def moses_fastbpe(path):\n return {\n 'path': path,\n 'tokenizer': 'moses',\n 'bpe': 'fastbpe',\n }\n\n return {\n 'transformer.wmt14.en-fr': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-fr.joined-dict.transformer.tar.bz2'),\n 'transformer.wmt16.en-de': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt16.en-de.joined-dict.transformer.tar.bz2',\n 'transformer.wmt18.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt18.en-de.ensemble.tar.gz'),\n 'transformer.wmt19.en-de': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.ensemble.tar.gz'),\n 'transformer.wmt19.en-ru': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.ensemble.tar.gz'),\n 'transformer.wmt19.de-en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.ensemble.tar.gz'),\n 'transformer.wmt19.ru-en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.ensemble.tar.gz'),\n 'transformer.wmt19.en-de.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.single_model.tar.gz'),\n 'transformer.wmt19.en-ru.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.single_model.tar.gz'),\n 'transformer.wmt19.de-en.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.single_model.tar.gz'),\n 'transformer.wmt19.ru-en.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.single_model.tar.gz'),\n }\n # fmt: on\n\n def __init__(self, args, encoder, decoder):\n super().__init__(encoder, decoder)\n self.args = args\n self.supports_align_args = True\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n # fmt: off\n parser.add_argument('--activation-fn',\n choices=utils.get_available_activation_fns(),\n help='activation function to use')\n parser.add_argument('--dropout', type=float, metavar='D',\n help='dropout probability')\n parser.add_argument('--attention-dropout', type=float, metavar='D',\n help='dropout probability for attention weights')\n parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',\n help='dropout probability after activation in FFN.')\n parser.add_argument('--encoder-embed-path', type=str, metavar='STR',\n help='path to pre-trained encoder embedding')\n parser.add_argument('--encoder-embed-dim', type=int, metavar='N',\n help='encoder embedding dimension')\n parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',\n help='encoder embedding dimension for FFN')\n parser.add_argument('--encoder-layers', type=int, metavar='N',\n help='num encoder layers')\n parser.add_argument('--encoder-attention-heads', type=int, metavar='N',\n help='num encoder attention heads')\n parser.add_argument('--encoder-normalize-before', action='store_true',\n help='apply layernorm before each encoder block')\n parser.add_argument('--encoder-learned-pos', action='store_true',\n help='use learned positional embeddings in the encoder')\n parser.add_argument('--decoder-embed-path', type=str, metavar='STR',\n help='path to pre-trained decoder embedding')\n parser.add_argument('--decoder-embed-dim', type=int, metavar='N',\n help='decoder embedding dimension')\n parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',\n help='decoder embedding dimension for FFN')\n parser.add_argument('--decoder-layers', type=int, metavar='N',\n help='num decoder layers')\n parser.add_argument('--decoder-attention-heads', type=int, metavar='N',\n help='num decoder attention heads')\n parser.add_argument('--decoder-learned-pos', action='store_true',\n help='use learned positional embeddings in the decoder')\n parser.add_argument('--decoder-normalize-before', action='store_true',\n help='apply layernorm before each decoder block')\n parser.add_argument('--decoder-output-dim', type=int, metavar='N',\n help='decoder output dimension (extra linear layer '\n 'if different from decoder embed dim')\n parser.add_argument('--share-decoder-input-output-embed', action='store_true',\n help='share decoder input and output embeddings')\n parser.add_argument('--share-all-embeddings', action='store_true',\n help='share encoder, decoder and output embeddings'\n ' (requires shared dictionary and embed dim)')\n parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',\n help='if set, disables positional embeddings (outside self attention)')\n parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',\n help='comma separated list of adaptive softmax cutoff points. '\n 'Must be used with adaptive_loss criterion'),\n parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',\n help='sets adaptive softmax dropout for the tail projections')\n parser.add_argument('--layernorm-embedding', action='store_true',\n help='add layernorm to embedding')\n parser.add_argument('--no-scale-embedding', action='store_true',\n help='if True, dont scale embeddings')\n # args for \"Cross+Self-Attention for Transformer Models\" (Peitz et al., 2019)\n parser.add_argument('--no-cross-attention', default=False, action='store_true',\n help='do not perform cross-attention')\n parser.add_argument('--cross-self-attention', default=False, action='store_true',\n help='perform cross+self-attention')\n # args for \"Reducing Transformer Depth on Demand with Structured Dropout\" (Fan et al., 2019)\n parser.add_argument('--encoder-layerdrop', type=float, metavar='D', default=0,\n help='LayerDrop probability for encoder')\n parser.add_argument('--decoder-layerdrop', type=float, metavar='D', default=0,\n help='LayerDrop probability for decoder')\n parser.add_argument('--encoder-layers-to-keep', default=None,\n help='which layers to *keep* when pruning as a comma-separated list')\n parser.add_argument('--decoder-layers-to-keep', default=None,\n help='which layers to *keep* when pruning as a comma-separated list')\n # args for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020)\n parser.add_argument('--quant-noise-pq', type=float, metavar='D', default=0,\n help='iterative PQ quantization noise at training time')\n parser.add_argument('--quant-noise-pq-block-size', type=int, metavar='D', default=8,\n help='block size of quantization noise at training time')\n parser.add_argument('--quant-noise-scalar', type=float, metavar='D', default=0,\n help='scalar quantization noise and scalar quantization at training time')\n # fmt: on\n\n @classmethod\n def build_model(cls, args, task):\n \"\"\"Build a new model instance.\"\"\"\n\n # make sure all arguments are present in older models\n base_architecture(args)\n\n if args.encoder_layers_to_keep:\n args.encoder_layers = len(args.encoder_layers_to_keep.split(\",\"))\n if args.decoder_layers_to_keep:\n args.decoder_layers = len(args.decoder_layers_to_keep.split(\",\"))\n\n if getattr(args, \"max_source_positions\", None) is None:\n args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS\n if getattr(args, \"max_target_positions\", None) is None:\n args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS\n\n src_dict, tgt_dict = task.source_dictionary, task.target_dictionary\n\n if args.share_all_embeddings:\n if src_dict != tgt_dict:\n raise ValueError(\"--share-all-embeddings requires a joined dictionary\")\n if args.encoder_embed_dim != args.decoder_embed_dim:\n raise ValueError(\n \"--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim\"\n )\n if args.decoder_embed_path and (\n args.decoder_embed_path != args.encoder_embed_path\n ):\n raise ValueError(\n \"--share-all-embeddings not compatible with --decoder-embed-path\"\n )\n encoder_embed_tokens = cls.build_embedding(\n args, src_dict, args.encoder_embed_dim, args.encoder_embed_path\n )\n decoder_embed_tokens = encoder_embed_tokens\n args.share_decoder_input_output_embed = True\n else:\n encoder_embed_tokens = cls.build_embedding(\n args, src_dict, args.encoder_embed_dim, args.encoder_embed_path\n )\n decoder_embed_tokens = cls.build_embedding(\n args, tgt_dict, args.decoder_embed_dim, args.decoder_embed_path\n )\n\n encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)\n decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)\n return cls(args, encoder, decoder)\n\n @classmethod\n def build_embedding(cls, args, dictionary, embed_dim, path=None):\n num_embeddings = len(dictionary)\n padding_idx = dictionary.pad()\n\n emb = Embedding(num_embeddings, embed_dim, padding_idx)\n # if provided, load from preloaded dictionaries\n if path:\n embed_dict = utils.parse_embedding(path)\n utils.load_embedding(embed_dict, dictionary, emb)\n return emb\n\n @classmethod\n def build_encoder(cls, args, src_dict, embed_tokens):\n return TransformerEncoder(args, src_dict, embed_tokens)\n\n @classmethod\n def build_decoder(cls, args, tgt_dict, embed_tokens):\n return TransformerDecoder(\n args,\n tgt_dict,\n embed_tokens,\n no_encoder_attn=getattr(args, \"no_cross_attention\", False),\n )\n\n # TorchScript doesn't support optional arguments with variable length (**kwargs).\n # Current workaround is to add union of all arguments in child classes.\n def forward(\n self,\n src_tokens,\n src_lengths,\n prev_output_tokens,\n return_all_hiddens: bool = True,\n features_only: bool = False,\n alignment_layer: Optional[int] = None,\n alignment_heads: Optional[int] = None,\n ):\n \"\"\"\n Run the forward pass for an encoder-decoder model.\n\n Copied from the base class, but without ``**kwargs``,\n which are not supported by TorchScript.\n \"\"\"\n encoder_out = self.encoder(\n src_tokens, src_lengths=src_lengths, return_all_hiddens=return_all_hiddens\n )\n decoder_out = self.decoder(\n prev_output_tokens,\n encoder_out=encoder_out,\n features_only=features_only,\n alignment_layer=alignment_layer,\n alignment_heads=alignment_heads,\n src_lengths=src_lengths,\n return_all_hiddens=return_all_hiddens,\n )\n return decoder_out\n\n # Since get_normalized_probs is in the Fairseq Model which is not scriptable,\n # I rewrite the get_normalized_probs from Base Class to call the\n # helper function in the Base Class.\n @torch.jit.export\n def get_normalized_probs(\n self,\n net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],\n log_probs: bool,\n sample: Optional[Dict[str, Tensor]] = None,\n ):\n \"\"\"Get normalized probabilities (or log probs) from a net's output.\"\"\"\n return self.get_normalized_probs_scriptable(net_output, log_probs, sample)\n\n\nclass TransformerEncoder(FairseqEncoder):\n \"\"\"\n Transformer encoder consisting of *args.encoder_layers* layers. Each layer\n is a :class:`TransformerEncoderLayer`.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n dictionary (~fairseq.data.Dictionary): encoding dictionary\n embed_tokens (torch.nn.Embedding): input embedding\n \"\"\"\n\n def __init__(self, args, dictionary, embed_tokens):\n super().__init__(dictionary)\n self.register_buffer(\"version\", torch.Tensor([3]))\n\n self.dropout_module = FairseqDropout(args.dropout, module_name=self.__class__.__name__)\n self.encoder_layerdrop = args.encoder_layerdrop\n\n embed_dim = embed_tokens.embedding_dim\n self.padding_idx = embed_tokens.padding_idx\n self.max_source_positions = args.max_source_positions\n\n self.embed_tokens = embed_tokens\n\n self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)\n\n self.embed_positions = (\n PositionalEmbedding(\n args.max_source_positions,\n embed_dim,\n self.padding_idx,\n learned=args.encoder_learned_pos,\n )\n if not args.no_token_positional_embeddings\n else None\n )\n\n if getattr(args, \"layernorm_embedding\", False):\n self.layernorm_embedding = LayerNorm(embed_dim)\n else:\n self.layernorm_embedding = None\n\n if not args.adaptive_input and args.quant_noise_pq > 0:\n self.quant_noise = apply_quant_noise_(\n nn.Linear(embed_dim, embed_dim, bias=False),\n args.quant_noise_pq,\n args.quant_noise_pq_block_size,\n )\n else:\n self.quant_noise = None\n\n if self.encoder_layerdrop > 0.0:\n self.layers = LayerDropModuleList(p=self.encoder_layerdrop)\n else:\n self.layers = nn.ModuleList([])\n self.layers.extend(\n [self.build_encoder_layer(args) for i in range(args.encoder_layers)]\n )\n self.num_layers = len(self.layers)\n\n if args.encoder_normalize_before:\n self.layer_norm = LayerNorm(embed_dim)\n else:\n self.layer_norm = None\n\n def build_encoder_layer(self, args):\n return TransformerEncoderLayer(args)\n\n def forward_embedding(self, src_tokens):\n # embed tokens and positions\n x = embed = self.embed_scale * self.embed_tokens(src_tokens)\n if self.embed_positions is not None:\n x = embed + self.embed_positions(src_tokens)\n if self.layernorm_embedding is not None:\n x = self.layernorm_embedding(x)\n x = self.dropout_module(x)\n if self.quant_noise is not None:\n x = self.quant_noise(x)\n return x, embed\n\n def forward(self, src_tokens, src_lengths, return_all_hiddens: bool = False):\n \"\"\"\n Args:\n src_tokens (LongTensor): tokens in the source language of shape\n `(batch, src_len)`\n src_lengths (torch.LongTensor): lengths of each source sentence of\n shape `(batch)`\n return_all_hiddens (bool, optional): also return all of the\n intermediate hidden states (default: False).\n\n Returns:\n namedtuple:\n - **encoder_out** (Tensor): the last encoder layer's output of\n shape `(src_len, batch, embed_dim)`\n - **encoder_padding_mask** (ByteTensor): the positions of\n padding elements of shape `(batch, src_len)`\n - **encoder_embedding** (Tensor): the (scaled) embedding lookup\n of shape `(batch, src_len, embed_dim)`\n - **encoder_states** (List[Tensor]): all intermediate\n hidden states of shape `(src_len, batch, embed_dim)`.\n Only populated if *return_all_hiddens* is True.\n \"\"\"\n x, encoder_embedding = self.forward_embedding(src_tokens)\n\n # B x T x C -> T x B x C\n x = x.transpose(0, 1)\n\n # compute padding mask\n encoder_padding_mask = src_tokens.eq(self.padding_idx)\n\n encoder_states = [] if return_all_hiddens else None\n\n # encoder layers\n for layer in self.layers:\n x = layer(x, encoder_padding_mask)\n if return_all_hiddens:\n assert encoder_states is not None\n encoder_states.append(x)\n\n if self.layer_norm is not None:\n x = self.layer_norm(x)\n\n return EncoderOut(\n encoder_out=x, # T x B x C\n encoder_padding_mask=encoder_padding_mask, # B x T\n encoder_embedding=encoder_embedding, # B x T x C\n encoder_states=encoder_states, # List[T x B x C]\n src_tokens=None,\n src_lengths=None,\n )\n\n @torch.jit.export\n def reorder_encoder_out(self, encoder_out: EncoderOut, new_order):\n \"\"\"\n Reorder encoder output according to *new_order*.\n\n Args:\n encoder_out: output from the ``forward()`` method\n new_order (LongTensor): desired order\n\n Returns:\n *encoder_out* rearranged according to *new_order*\n \"\"\"\n \"\"\"\n Since encoder_padding_mask and encoder_embedding are both of type\n Optional[Tensor] in EncoderOut, they need to be copied as local\n variables for Torchscript Optional refinement\n \"\"\"\n encoder_padding_mask: Optional[Tensor] = encoder_out.encoder_padding_mask\n encoder_embedding: Optional[Tensor] = encoder_out.encoder_embedding\n\n new_encoder_out = (\n encoder_out.encoder_out\n if encoder_out.encoder_out is None\n else encoder_out.encoder_out.index_select(1, new_order)\n )\n new_encoder_padding_mask = (\n encoder_padding_mask\n if encoder_padding_mask is None\n else encoder_padding_mask.index_select(0, new_order)\n )\n new_encoder_embedding = (\n encoder_embedding\n if encoder_embedding is None\n else encoder_embedding.index_select(0, new_order)\n )\n src_tokens = encoder_out.src_tokens\n if src_tokens is not None:\n src_tokens = src_tokens.index_select(0, new_order)\n\n src_lengths = encoder_out.src_lengths\n if src_lengths is not None:\n src_lengths = src_lengths.index_select(0, new_order)\n\n encoder_states = encoder_out.encoder_states\n if encoder_states is not None:\n for idx, state in enumerate(encoder_states):\n encoder_states[idx] = state.index_select(1, new_order)\n\n return EncoderOut(\n encoder_out=new_encoder_out, # T x B x C\n encoder_padding_mask=new_encoder_padding_mask, # B x T\n encoder_embedding=new_encoder_embedding, # B x T x C\n encoder_states=encoder_states, # List[T x B x C]\n src_tokens=src_tokens, # B x T\n src_lengths=src_lengths, # B x 1\n )\n\n def max_positions(self):\n \"\"\"Maximum input length supported by the encoder.\"\"\"\n if self.embed_positions is None:\n return self.max_source_positions\n return min(self.max_source_positions, self.embed_positions.max_positions)\n\n def upgrade_state_dict_named(self, state_dict, name):\n \"\"\"Upgrade a (possibly old) state dict for new versions of fairseq.\"\"\"\n if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):\n weights_key = \"{}.embed_positions.weights\".format(name)\n if weights_key in state_dict:\n print(\"deleting {0}\".format(weights_key))\n del state_dict[weights_key]\n state_dict[\n \"{}.embed_positions._float_tensor\".format(name)\n ] = torch.FloatTensor(1)\n for i in range(self.num_layers):\n # update layer norms\n self.layers[i].upgrade_state_dict_named(\n state_dict, \"{}.layers.{}\".format(name, i)\n )\n\n version_key = \"{}.version\".format(name)\n if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:\n # earlier checkpoints did not normalize after the stack of layers\n self.layer_norm = None\n self.normalize = False\n state_dict[version_key] = torch.Tensor([1])\n return state_dict\n\n\nclass TransformerDecoder(FairseqIncrementalDecoder):\n \"\"\"\n Transformer decoder consisting of *args.decoder_layers* layers. Each layer\n is a :class:`TransformerDecoderLayer`.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n dictionary (~fairseq.data.Dictionary): decoding dictionary\n embed_tokens (torch.nn.Embedding): output embedding\n no_encoder_attn (bool, optional): whether to attend to encoder outputs\n (default: False).\n \"\"\"\n\n def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):\n self.args = args\n super().__init__(dictionary)\n self.register_buffer(\"version\", torch.Tensor([3]))\n self._future_mask = torch.empty(0)\n\n self.dropout_module = FairseqDropout(args.dropout, module_name=self.__class__.__name__)\n self.decoder_layerdrop = args.decoder_layerdrop\n self.share_input_output_embed = args.share_decoder_input_output_embed\n\n input_embed_dim = embed_tokens.embedding_dim\n embed_dim = args.decoder_embed_dim\n self.embed_dim = embed_dim\n self.output_embed_dim = args.decoder_output_dim\n\n self.padding_idx = embed_tokens.padding_idx\n self.max_target_positions = args.max_target_positions\n\n self.embed_tokens = embed_tokens\n\n self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)\n\n if not args.adaptive_input and args.quant_noise_pq > 0:\n self.quant_noise = apply_quant_noise_(\n nn.Linear(embed_dim, embed_dim, bias=False),\n args.quant_noise_pq,\n args.quant_noise_pq_block_size,\n )\n else:\n self.quant_noise = None\n\n self.project_in_dim = (\n Linear(input_embed_dim, embed_dim, bias=False)\n if embed_dim != input_embed_dim\n else None\n )\n\n self.embed_positions = (\n PositionalEmbedding(\n args.max_target_positions,\n embed_dim,\n self.padding_idx,\n learned=args.decoder_learned_pos,\n )\n if not args.no_token_positional_embeddings\n else None\n )\n\n if getattr(args, \"layernorm_embedding\", False):\n self.layernorm_embedding = LayerNorm(embed_dim)\n else:\n self.layernorm_embedding = None\n\n self.cross_self_attention = getattr(args, \"cross_self_attention\", False)\n\n if self.decoder_layerdrop > 0.0:\n self.layers = LayerDropModuleList(p=self.decoder_layerdrop)\n else:\n self.layers = nn.ModuleList([])\n self.layers.extend(\n [\n self.build_decoder_layer(args, no_encoder_attn)\n for _ in range(args.decoder_layers)\n ]\n )\n self.num_layers = len(self.layers)\n\n if args.decoder_normalize_before and not getattr(\n args, \"no_decoder_final_norm\", False\n ):\n self.layer_norm = LayerNorm(embed_dim)\n else:\n self.layer_norm = None\n\n self.project_out_dim = (\n Linear(embed_dim, self.output_embed_dim, bias=False)\n if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights\n else None\n )\n\n self.adaptive_softmax = None\n self.output_projection = None\n if args.adaptive_softmax_cutoff is not None:\n self.adaptive_softmax = AdaptiveSoftmax(\n len(dictionary),\n self.output_embed_dim,\n utils.eval_str_list(args.adaptive_softmax_cutoff, type=int),\n dropout=args.adaptive_softmax_dropout,\n adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,\n factor=args.adaptive_softmax_factor,\n tie_proj=args.tie_adaptive_proj,\n )\n elif self.share_input_output_embed:\n self.output_projection = nn.Linear(\n self.embed_tokens.weight.shape[1],\n self.embed_tokens.weight.shape[0],\n bias=False,\n )\n self.output_projection.weight = self.embed_tokens.weight\n else:\n self.output_projection = nn.Linear(\n self.output_embed_dim, len(dictionary), bias=False\n )\n nn.init.normal_(\n self.output_projection.weight, mean=0, std=self.output_embed_dim ** -0.5\n )\n\n def build_decoder_layer(self, args, no_encoder_attn=False):\n return TransformerDecoderLayer(args, no_encoder_attn)\n\n def forward(\n self,\n prev_output_tokens,\n encoder_out: Optional[EncoderOut] = None,\n incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,\n features_only: bool = False,\n full_context_alignment: bool = False,\n alignment_layer: Optional[int] = None,\n alignment_heads: Optional[int] = None,\n src_lengths: Optional[Any] = None,\n return_all_hiddens: bool = False,\n ):\n \"\"\"\n Args:\n prev_output_tokens (LongTensor): previous decoder outputs of shape\n `(batch, tgt_len)`, for teacher forcing\n encoder_out (optional): output from the encoder, used for\n encoder-side attention\n incremental_state (dict): dictionary used for storing state during\n :ref:`Incremental decoding`\n features_only (bool, optional): only return features without\n applying output layer (default: False).\n full_context_alignment (bool, optional): don't apply\n auto-regressive mask to self-attention (default: False).\n\n Returns:\n tuple:\n - the decoder's output of shape `(batch, tgt_len, vocab)`\n - a dictionary with any model-specific outputs\n \"\"\"\n x, extra = self.extract_features(\n prev_output_tokens,\n encoder_out=encoder_out,\n incremental_state=incremental_state,\n full_context_alignment=full_context_alignment,\n alignment_layer=alignment_layer,\n alignment_heads=alignment_heads,\n )\n if not features_only:\n x = self.output_layer(x)\n return x, extra\n\n def extract_features(\n self,\n prev_output_tokens,\n encoder_out: Optional[EncoderOut] = None,\n incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,\n full_context_alignment: bool = False,\n alignment_layer: Optional[int] = None,\n alignment_heads: Optional[int] = None,\n ):\n return self.extract_features_scriptable(\n prev_output_tokens,\n encoder_out,\n incremental_state,\n full_context_alignment,\n alignment_layer,\n alignment_heads,\n )\n\n \"\"\"\n A scriptable subclass of this class has an extract_features method and calls\n super().extract_features, but super() is not supported in torchscript. Aa copy of\n this function is made to be used in the subclass instead.\n \"\"\"\n\n def extract_features_scriptable(\n self,\n prev_output_tokens,\n encoder_out: Optional[EncoderOut] = None,\n incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,\n full_context_alignment: bool = False,\n alignment_layer: Optional[int] = None,\n alignment_heads: Optional[int] = None,\n ):\n \"\"\"\n Similar to *forward* but only return features.\n\n Includes several features from \"Jointly Learning to Align and\n Translate with Transformer Models\" (Garg et al., EMNLP 2019).\n\n Args:\n full_context_alignment (bool, optional): don't apply\n auto-regressive mask to self-attention (default: False).\n alignment_layer (int, optional): return mean alignment over\n heads at this layer (default: last layer).\n alignment_heads (int, optional): only average alignment over\n this many heads (default: all heads).\n\n Returns:\n tuple:\n - the decoder's features of shape `(batch, tgt_len, embed_dim)`\n - a dictionary with any model-specific outputs\n \"\"\"\n if alignment_layer is None:\n alignment_layer = self.num_layers - 1\n\n # embed positions\n positions = (\n self.embed_positions(\n prev_output_tokens, incremental_state=incremental_state\n )\n if self.embed_positions is not None\n else None\n )\n\n if incremental_state is not None:\n prev_output_tokens = prev_output_tokens[:, -1:]\n if positions is not None:\n positions = positions[:, -1:]\n\n # embed tokens and positions\n x = self.embed_scale * self.embed_tokens(prev_output_tokens)\n\n if self.quant_noise is not None:\n x = self.quant_noise(x)\n\n if self.project_in_dim is not None:\n x = self.project_in_dim(x)\n\n if positions is not None:\n x += positions\n\n if self.layernorm_embedding is not None:\n x = self.layernorm_embedding(x)\n\n x = self.dropout_module(x)\n\n # B x T x C -> T x B x C\n x = x.transpose(0, 1)\n\n self_attn_padding_mask: Optional[Tensor] = None\n if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any():\n self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)\n\n # decoder layers\n attn: Optional[Tensor] = None\n inner_states: List[Optional[Tensor]] = [x]\n for idx, layer in enumerate(self.layers):\n if incremental_state is None and not full_context_alignment:\n self_attn_mask = self.buffered_future_mask(x)\n else:\n self_attn_mask = None\n\n x, layer_attn, _ = layer(\n x,\n encoder_out.encoder_out if encoder_out is not None else None,\n encoder_out.encoder_padding_mask if encoder_out is not None else None,\n incremental_state,\n self_attn_mask=self_attn_mask,\n self_attn_padding_mask=self_attn_padding_mask,\n need_attn=bool((idx == alignment_layer)),\n need_head_weights=bool((idx == alignment_layer)),\n )\n inner_states.append(x)\n if layer_attn is not None and idx == alignment_layer:\n attn = layer_attn.float().to(x)\n\n if attn is not None:\n if alignment_heads is not None:\n attn = attn[:alignment_heads]\n\n # average probabilities over heads\n attn = attn.mean(dim=0)\n\n if self.layer_norm is not None:\n x = self.layer_norm(x)\n\n # T x B x C -> B x T x C\n x = x.transpose(0, 1)\n\n if self.project_out_dim is not None:\n x = self.project_out_dim(x)\n\n return x, {\"attn\": [attn], \"inner_states\": inner_states}\n\n def output_layer(self, features):\n \"\"\"Project features to the vocabulary size.\"\"\"\n if self.adaptive_softmax is None:\n # project back to size of vocabulary\n return self.output_projection(features)\n else:\n return features\n\n def max_positions(self):\n \"\"\"Maximum output length supported by the decoder.\"\"\"\n if self.embed_positions is None:\n return self.max_target_positions\n return min(self.max_target_positions, self.embed_positions.max_positions)\n\n def buffered_future_mask(self, tensor):\n dim = tensor.size(0)\n # self._future_mask.device != tensor.device is not working in TorchScript. This is a workaround.\n if (\n self._future_mask.size(0) == 0\n or (not self._future_mask.device == tensor.device)\n or self._future_mask.size(0) < dim\n ):\n self._future_mask = torch.triu(\n utils.fill_with_neg_inf(torch.zeros([dim, dim])), 1\n )\n self._future_mask = self._future_mask.to(tensor)\n return self._future_mask[:dim, :dim]\n\n def upgrade_state_dict_named(self, state_dict, name):\n \"\"\"Upgrade a (possibly old) state dict for new versions of fairseq.\"\"\"\n if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):\n weights_key = \"{}.embed_positions.weights\".format(name)\n if weights_key in state_dict:\n del state_dict[weights_key]\n state_dict[\n \"{}.embed_positions._float_tensor\".format(name)\n ] = torch.FloatTensor(1)\n\n if f\"{name}.output_projection.weight\" not in state_dict:\n if self.share_input_output_embed:\n embed_out_key = f\"{name}.embed_tokens.weight\"\n else:\n embed_out_key = f\"{name}.embed_out\"\n if embed_out_key in state_dict:\n state_dict[f\"{name}.output_projection.weight\"] = state_dict[\n embed_out_key\n ]\n if not self.share_input_output_embed:\n del state_dict[embed_out_key]\n\n for i in range(self.num_layers):\n # update layer norms\n layer_norm_map = {\n \"0\": \"self_attn_layer_norm\",\n \"1\": \"encoder_attn_layer_norm\",\n \"2\": \"final_layer_norm\",\n }\n for old, new in layer_norm_map.items():\n for m in (\"weight\", \"bias\"):\n k = \"{}.layers.{}.layer_norms.{}.{}\".format(name, i, old, m)\n if k in state_dict:\n state_dict[\n \"{}.layers.{}.{}.{}\".format(name, i, new, m)\n ] = state_dict[k]\n del state_dict[k]\n\n version_key = \"{}.version\".format(name)\n if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2:\n # earlier checkpoints did not normalize after the stack of layers\n self.layer_norm = None\n self.normalize = False\n state_dict[version_key] = torch.Tensor([1])\n\n return state_dict\n\n\ndef Embedding(num_embeddings, embedding_dim, padding_idx):\n m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)\n nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)\n nn.init.constant_(m.weight[padding_idx], 0)\n return m\n\n\ndef Linear(in_features, out_features, bias=True):\n m = nn.Linear(in_features, out_features, bias)\n nn.init.xavier_uniform_(m.weight)\n if bias:\n nn.init.constant_(m.bias, 0.0)\n return m\n\n\n@register_model_architecture(\"transformer\", \"transformer\")\ndef base_architecture(args):\n args.encoder_embed_path = getattr(args, \"encoder_embed_path\", None)\n args.encoder_embed_dim = getattr(args, \"encoder_embed_dim\", 512)\n args.encoder_ffn_embed_dim = getattr(args, \"encoder_ffn_embed_dim\", 2048)\n args.encoder_layers = getattr(args, \"encoder_layers\", 6)\n args.encoder_attention_heads = getattr(args, \"encoder_attention_heads\", 8)\n args.encoder_normalize_before = getattr(args, \"encoder_normalize_before\", False)\n args.encoder_learned_pos = getattr(args, \"encoder_learned_pos\", False)\n args.decoder_embed_path = getattr(args, \"decoder_embed_path\", None)\n args.decoder_embed_dim = getattr(args, \"decoder_embed_dim\", args.encoder_embed_dim)\n args.decoder_ffn_embed_dim = getattr(\n args, \"decoder_ffn_embed_dim\", args.encoder_ffn_embed_dim\n )\n args.decoder_layers = getattr(args, \"decoder_layers\", 6)\n args.decoder_attention_heads = getattr(args, \"decoder_attention_heads\", 8)\n args.decoder_normalize_before = getattr(args, \"decoder_normalize_before\", False)\n args.decoder_learned_pos = getattr(args, \"decoder_learned_pos\", False)\n args.attention_dropout = getattr(args, \"attention_dropout\", 0.0)\n args.activation_dropout = getattr(args, \"activation_dropout\", 0.0)\n args.activation_fn = getattr(args, \"activation_fn\", \"relu\")\n args.dropout = getattr(args, \"dropout\", 0.1)\n args.adaptive_softmax_cutoff = getattr(args, \"adaptive_softmax_cutoff\", None)\n args.adaptive_softmax_dropout = getattr(args, \"adaptive_softmax_dropout\", 0)\n args.share_decoder_input_output_embed = getattr(\n args, \"share_decoder_input_output_embed\", False\n )\n args.share_all_embeddings = getattr(args, \"share_all_embeddings\", False)\n args.no_token_positional_embeddings = getattr(\n args, \"no_token_positional_embeddings\", False\n )\n args.adaptive_input = getattr(args, \"adaptive_input\", False)\n args.no_cross_attention = getattr(args, \"no_cross_attention\", False)\n args.cross_self_attention = getattr(args, \"cross_self_attention\", False)\n\n args.decoder_output_dim = getattr(\n args, \"decoder_output_dim\", args.decoder_embed_dim\n )\n args.decoder_input_dim = getattr(args, \"decoder_input_dim\", args.decoder_embed_dim)\n\n args.no_scale_embedding = getattr(args, \"no_scale_embedding\", False)\n args.layernorm_embedding = getattr(args, \"layernorm_embedding\", False)\n args.tie_adaptive_weights = getattr(args, \"tie_adaptive_weights\", False)\n\n\n@register_model_architecture(\"transformer\", \"transformer_iwslt_de_en\")\ndef transformer_iwslt_de_en(args):\n args.encoder_embed_dim = getattr(args, \"encoder_embed_dim\", 512)\n args.encoder_ffn_embed_dim = getattr(args, \"encoder_ffn_embed_dim\", 1024)\n args.encoder_attention_heads = getattr(args, \"encoder_attention_heads\", 4)\n args.encoder_layers = getattr(args, \"encoder_layers\", 6)\n args.decoder_embed_dim = getattr(args, \"decoder_embed_dim\", 512)\n args.decoder_ffn_embed_dim = getattr(args, \"decoder_ffn_embed_dim\", 1024)\n args.decoder_attention_heads = getattr(args, \"decoder_attention_heads\", 4)\n args.decoder_layers = getattr(args, \"decoder_layers\", 6)\n base_architecture(args)\n\n\n@register_model_architecture(\"transformer\", \"transformer_wmt_en_de\")\ndef transformer_wmt_en_de(args):\n base_architecture(args)\n\n\n# parameters used in the \"Attention Is All You Need\" paper (Vaswani et al., 2017)\n@register_model_architecture(\"transformer\", \"transformer_vaswani_wmt_en_de_big\")\ndef transformer_vaswani_wmt_en_de_big(args):\n args.encoder_embed_dim = getattr(args, \"encoder_embed_dim\", 1024)\n args.encoder_ffn_embed_dim = getattr(args, \"encoder_ffn_embed_dim\", 4096)\n args.encoder_attention_heads = getattr(args, \"encoder_attention_heads\", 16)\n args.encoder_normalize_before = getattr(args, \"encoder_normalize_before\", False)\n args.decoder_embed_dim = getattr(args, \"decoder_embed_dim\", 1024)\n args.decoder_ffn_embed_dim = getattr(args, \"decoder_ffn_embed_dim\", 4096)\n args.decoder_attention_heads = getattr(args, \"decoder_attention_heads\", 16)\n args.dropout = getattr(args, \"dropout\", 0.3)\n base_architecture(args)\n\n\n@register_model_architecture(\"transformer\", \"transformer_vaswani_wmt_en_fr_big\")\ndef transformer_vaswani_wmt_en_fr_big(args):\n args.dropout = getattr(args, \"dropout\", 0.1)\n transformer_vaswani_wmt_en_de_big(args)\n\n\n@register_model_architecture(\"transformer\", \"transformer_wmt_en_de_big\")\ndef transformer_wmt_en_de_big(args):\n args.attention_dropout = getattr(args, \"attention_dropout\", 0.1)\n transformer_vaswani_wmt_en_de_big(args)\n\n\n# default parameters used in tensor2tensor implementation\n@register_model_architecture(\"transformer\", \"transformer_wmt_en_de_big_t2t\")\ndef transformer_wmt_en_de_big_t2t(args):\n args.encoder_normalize_before = getattr(args, \"encoder_normalize_before\", True)\n args.decoder_normalize_before = getattr(args, \"decoder_normalize_before\", True)\n args.attention_dropout = getattr(args, \"attention_dropout\", 0.1)\n args.activation_dropout = getattr(args, \"activation_dropout\", 0.1)\n transformer_vaswani_wmt_en_de_big(args)\n", "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nfrom fairseq_mod import utils\nfrom fairseq_mod.criterions import FairseqCriterion, register_criterion\nfrom examples.speech_recognition.data.replabels import pack_replabels\n\n\n@register_criterion(\"asg_loss\")\nclass ASGCriterion(FairseqCriterion):\n @staticmethod\n def add_args(parser):\n group = parser.add_argument_group(\"ASG Loss\")\n group.add_argument(\n \"--asg-transitions-init\",\n help=\"initial diagonal value of transition matrix\",\n type=float,\n default=0.0,\n )\n group.add_argument(\n \"--max-replabel\", help=\"maximum # of replabels\", type=int, default=2\n )\n group.add_argument(\n \"--linseg-updates\",\n help=\"# of training updates to use LinSeg initialization\",\n type=int,\n default=0,\n )\n group.add_argument(\n \"--hide-linseg-messages\",\n help=\"hide messages about LinSeg initialization\",\n action=\"store_true\",\n )\n\n def __init__(\n self,\n task,\n silence_token,\n asg_transitions_init,\n max_replabel,\n linseg_updates,\n hide_linseg_messages,\n ):\n from wav2letter.criterion import ASGLoss, CriterionScaleMode\n\n super().__init__(task)\n self.tgt_dict = task.target_dictionary\n self.eos = self.tgt_dict.eos()\n self.silence = (\n self.tgt_dict.index(silence_token)\n if silence_token in self.tgt_dict\n else None\n )\n self.max_replabel = max_replabel\n\n num_labels = len(self.tgt_dict)\n self.asg = ASGLoss(num_labels, scale_mode=CriterionScaleMode.TARGET_SZ_SQRT)\n self.asg.trans = torch.nn.Parameter(\n asg_transitions_init * torch.eye(num_labels), requires_grad=True\n )\n\n self.linseg_progress = torch.nn.Parameter(\n torch.tensor([0], dtype=torch.int), requires_grad=False\n )\n self.linseg_maximum = linseg_updates\n self.linseg_message_state = \"none\" if hide_linseg_messages else \"start\"\n\n @classmethod\n def build_criterion(cls, args, task):\n return cls(\n task,\n args.silence_token,\n args.asg_transitions_init,\n args.max_replabel,\n args.linseg_updates,\n args.hide_linseg_messages,\n )\n\n def linseg_step(self):\n if not self.training:\n return False\n if self.linseg_progress.item() < self.linseg_maximum:\n if self.linseg_message_state == \"start\":\n print(\"| using LinSeg to initialize ASG\")\n self.linseg_message_state = \"finish\"\n self.linseg_progress.add_(1)\n return True\n elif self.linseg_message_state == \"finish\":\n print(\"| finished LinSeg initialization\")\n self.linseg_message_state = \"none\"\n return False\n\n def replace_eos_with_silence(self, tgt):\n if tgt[-1] != self.eos:\n return tgt\n elif self.silence is None or (len(tgt) > 1 and tgt[-2] == self.silence):\n return tgt[:-1]\n else:\n return tgt[:-1] + [self.silence]\n\n def forward(self, model, sample, reduce=True):\n \"\"\"Compute the loss for the given sample.\n\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n \"\"\"\n\n net_output = model(**sample[\"net_input\"])\n emissions = net_output[\"encoder_out\"].transpose(0, 1).contiguous()\n B = emissions.size(0)\n T = emissions.size(1)\n device = emissions.device\n\n target = torch.IntTensor(B, T)\n target_size = torch.IntTensor(B)\n using_linseg = self.linseg_step()\n\n for b in range(B):\n initial_target_size = sample[\"target_lengths\"][b].item()\n if initial_target_size == 0:\n raise ValueError(\"target size cannot be zero\")\n\n tgt = sample[\"target\"][b, :initial_target_size].tolist()\n tgt = self.replace_eos_with_silence(tgt)\n tgt = pack_replabels(tgt, self.tgt_dict, self.max_replabel)\n tgt = tgt[:T]\n\n if using_linseg:\n tgt = [tgt[t * len(tgt) // T] for t in range(T)]\n\n target[b][: len(tgt)] = torch.IntTensor(tgt)\n target_size[b] = len(tgt)\n\n loss = self.asg.forward(emissions, target.to(device), target_size.to(device))\n\n if reduce:\n loss = torch.sum(loss)\n\n sample_size = (\n sample[\"target\"].size(0) if self.args.sentence_avg else sample[\"ntokens\"]\n )\n logging_output = {\n \"loss\": utils.item(loss.data) if reduce else loss.data,\n \"ntokens\": sample[\"ntokens\"],\n \"nsentences\": sample[\"target\"].size(0),\n \"sample_size\": sample_size,\n }\n return loss, sample_size, logging_output\n\n @staticmethod\n def aggregate_logging_outputs(logging_outputs):\n \"\"\"Aggregate logging outputs from data parallel training.\"\"\"\n loss_sum = sum(log.get(\"loss\", 0) for log in logging_outputs)\n ntokens = sum(log.get(\"ntokens\", 0) for log in logging_outputs)\n nsentences = sum(log.get(\"nsentences\", 0) for log in logging_outputs)\n sample_size = sum(log.get(\"sample_size\", 0) for log in logging_outputs)\n agg_output = {\n \"loss\": loss_sum / nsentences,\n \"ntokens\": ntokens,\n \"nsentences\": nsentences,\n \"sample_size\": sample_size,\n }\n return agg_output\n" ]
[ [ "torch.nn.functional.nll_loss" ], [ "torch.device", "torch.nn.CrossEntropyLoss", "torch.nn.Conv2d", "torch.utils.data.DataLoader" ], [ "torch.LongTensor" ], [ "torch.nn.Dropout", "torch.nn.GELU", "numpy.random.random", "torch.nn.GLU", "torch.randint", "torch.load", "torch.cat", "torch.nn.init.constant_", "torch.nn.ModuleList", "torch.nn.utils.weight_norm", "torch.nn.functional.dropout", "torch.nn.Linear", "torch.nn.init.normal_", "torch.no_grad", "torch.FloatTensor", "torch.nn.Conv1d", "torch.nn.init.kaiming_normal_" ], [ "torch.nn.GRU" ], [ "torch.empty", "torch.Tensor", "torch.zeros", "torch.nn.init.constant_", "torch.nn.ModuleList", "torch.nn.Embedding", "torch.nn.Linear", "torch.nn.init.normal_", "torch.FloatTensor", "torch.nn.init.xavier_uniform_" ], [ "torch.tensor", "torch.eye", "torch.sum", "torch.IntTensor" ] ]
henryzxu/pytorch-hsml-rl
[ "3b36f29cf91f3ca68820ea124a2ee7a75327b94f", "3b36f29cf91f3ca68820ea124a2ee7a75327b94f" ]
[ "maml_rl/sampler.py", "maml_rl/envs/mdp.py" ]
[ "import gym\nimport torch\nimport multiprocessing as mp\nimport numpy as np\n\nfrom maml_rl.envs.subproc_vec_env import SubprocVecEnv\nfrom maml_rl.episode import BatchEpisodes\n\ndef make_env(env_name):\n def _make_env():\n return gym.make(env_name)\n return _make_env\n\nclass BatchSampler(object):\n def __init__(self, env_name, batch_size, num_workers=mp.cpu_count() - 1):\n self.env_name = env_name\n self.batch_size = batch_size\n self.num_workers = num_workers\n \n self.queue = mp.Queue()\n self.envs = SubprocVecEnv([make_env(env_name) for _ in range(num_workers)],\n queue=self.queue)\n self._env = gym.make(env_name)\n\n def sample(self, policy, task, tree=None, params=None, gamma=0.95, device='cpu'):\n episodes = BatchEpisodes(batch_size=self.batch_size, gamma=gamma, device=device)\n for i in range(self.batch_size):\n self.queue.put(i)\n for _ in range(self.num_workers):\n self.queue.put(None)\n observations, batch_ids = self.envs.reset()\n dones = [False]\n while (not all(dones)) or (not self.queue.empty()):\n with torch.no_grad():\n input = torch.from_numpy(observations).float().to(device=device)\n\n if self.env_name == 'AntPos-v0':\n _, embedding = tree.forward(torch.from_numpy(task[\"position\"]).float().to(device=device))\n if self.env_name == 'AntVel-v1':\n _, embedding = tree.forward(torch.from_numpy(np.array([task[\"velocity\"]])).float().to(device=device))\n\n # print(input.shape)\n # print(embedding.shape)\n observations_tensor = torch.t(\n torch.stack([torch.cat([torch.from_numpy(np.array(teo)).to(device=device), embedding[0]], 0) for teo in input], 1))\n\n actions_tensor = policy(observations_tensor, task=task, params=params, enhanced=False).sample()\n actions = actions_tensor.cpu().numpy()\n new_observations, rewards, dones, new_batch_ids, _ = self.envs.step(actions)\n episodes.append(observations_tensor.cpu().numpy(), actions, rewards, batch_ids)\n observations, batch_ids = new_observations, new_batch_ids\n return episodes\n\n def reset_task(self, task):\n tasks = [task for _ in range(self.num_workers)]\n reset = self.envs.reset_task(tasks)\n return all(reset)\n\n def sample_tasks(self, num_tasks):\n tasks = self._env.unwrapped.sample_tasks(num_tasks)\n return tasks\n", "import numpy as np\n\nimport gym\nfrom gym import spaces\nfrom gym.utils import seeding\n\nclass TabularMDPEnv(gym.Env):\n \"\"\"Tabular MDP problems, as described in [1].\n\n At each time step, the agent chooses one of `num_actions` actions, say `i`, \n receives a reward sampled from a Normal distribution with mean `m_i` and \n variance 1 (fixed across all tasks), and reaches a new state following the \n dynamics of the Markov Decision Process (MDP). The tabular MDP tasks are \n generated by sampling the mean rewards from a Normal distribution with mean \n 1 and variance 1, and sampling the transition probabilities from a uniform \n Dirichlet distribution (ie. with parameter 1).\n\n [1] Yan Duan, John Schulman, Xi Chen, Peter L. Bartlett, Ilya Sutskever,\n Pieter Abbeel, \"RL2: Fast Reinforcement Learning via Slow Reinforcement\n Learning\", 2016 (https://arxiv.org/abs/1611.02779)\n \"\"\"\n def __init__(self, num_states, num_actions, task={}):\n super(TabularMDPEnv, self).__init__()\n self.num_states = num_states\n self.num_actions = num_actions\n \n self.action_space = spaces.Discrete(num_actions)\n self.observation_space = spaces.Box(low=0.0,\n high=1.0, shape=(num_states,), dtype=np.float32)\n\n self._task = task\n self._transitions = task.get('transitions', np.full((num_states,\n num_actions, num_states), 1.0 / num_states, dtype=np.float32))\n self._rewards_mean = task.get('rewards_mean', np.zeros((num_states,\n num_actions), dtype=np.float32))\n self._state = 0\n self.seed()\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def sample_tasks(self, num_tasks):\n transitions = self.np_random.dirichlet(np.ones(self.num_states),\n size=(num_tasks, self.num_states, self.num_actions))\n rewards_mean = self.np_random.normal(1.0, 1.0,\n size=(num_tasks, self.num_states, self.num_actions))\n tasks = [{'transitions': transition, 'rewards_mean': reward_mean}\n for (transition, reward_mean) in zip(transitions, rewards_mean)]\n return tasks\n\n def reset_task(self, task):\n self._task = task\n self._transitions = task['transitions']\n self._rewards_mean = task['rewards_mean']\n\n def reset(self):\n # From [1]: \"an episode always starts on the first state\"\n self._state = 0\n observation = np.zeros(self.num_states, dtype=np.float32)\n observation[self._state] = 1.0\n\n return observation\n\n def step(self, action):\n assert self.action_space.contains(action)\n mean = self._rewards_mean[self._state, action]\n reward = self.np_random.normal(mean, 1.0)\n\n self._state = self.np_random.choice(self.num_states,\n p=self._transitions[self._state, action])\n observation = np.zeros(self.num_states, dtype=np.float32)\n observation[self._state] = 1.0\n\n return observation, reward, False, self._task\n" ]
[ [ "numpy.array", "torch.no_grad", "torch.from_numpy" ], [ "numpy.ones", "numpy.zeros", "numpy.full" ] ]
moslemk/Theano
[ "8d3a67b73fda49350d9944c9a24fc9660131861c", "8d3a67b73fda49350d9944c9a24fc9660131861c", "8d3a67b73fda49350d9944c9a24fc9660131861c", "8d3a67b73fda49350d9944c9a24fc9660131861c" ]
[ "theano/sandbox/gpuarray/type.py", "theano/compile/function_module.py", "theano/tensor/signal/tests/test_downsample.py", "theano/sandbox/gpuarray/basic_ops.py" ]
[ "import numpy\n\nimport theano\nfrom theano.tensor.var import _tensor_py_operators\nfrom theano import Type, Variable, Constant, tensor, config, scalar\nfrom theano.compile import SharedVariable\n\n# Make sure this is importable even if pygpu is absent\n# (it will not work though)\ntry:\n import pygpu\n from pygpu import gpuarray\n from pygpu.elemwise import compare, elemwise2\nexcept ImportError:\n pass\n\n_context_reg = {}\n\n\ndef reg_context(name, ctx):\n \"\"\"\n Register a context by mapping it to a name.\n\n The context must be of type `GpuContext` and the name can be\n anything hashable (but is usually a string). Only one context can\n be registered per name and the second registration for a given\n name will raise an error.\n\n Parameters\n ----------\n name : hashable object\n Name to associate the context with (usually a string)\n ctx : GpuContext\n Context instance\n\n \"\"\"\n if name in _context_reg:\n raise ValueError(\"context name %s is already defined\" % (name,))\n if not isinstance(ctx, gpuarray.GpuContext):\n raise TypeError(\"context is not GpuContext\")\n _context_reg[name] = ctx\n\n\ndef get_context(name):\n \"\"\"\n Retrive the context associated with a name.\n\n Return the context object mapped to `ref` that was previously\n register through :func:`reg_context`. Trying to get the context\n for an unregistered `ref` will raise a exception.\n\n Parameters\n ----------\n name : hashable object\n Name associated with the context we want (usually a string)\n\n \"\"\"\n if name not in _context_reg:\n raise ValueError(\"context name %s not defined\" % (name,))\n return _context_reg[name]\n\n\ndef list_contexts():\n \"\"\"\n Return an iterable of all the registered context names.\n \"\"\"\n return _context_reg.keys()\n\n\n# Private method\ndef _name_for_ctx(ctx):\n for k, v in _context_reg:\n if v == ctx:\n return k\n raise ValueError('context is not registered')\n\n\n# This is a private method for use by the tests only\ndef _unreg_context(name):\n del _context_reg[name]\n\n\nclass GpuArrayType(Type):\n def __init__(self, dtype, broadcastable, context_name=None, name=None):\n # In case this was not provided and no global value is available\n self.dtype = str(dtype)\n self.broadcastable = tuple(bool(b) for b in broadcastable)\n self.ndim = len(self.broadcastable)\n self.name = name\n self.context_name = context_name\n try:\n self.typecode = gpuarray.dtype_to_typecode(self.dtype)\n except gpuarray.GpuArrayException:\n raise TypeError(\"Unsupported dtype for %s: %s\" %\n (self.__class__.__name__, self.dtype))\n\n def clone(self, dtype=None, broadcastable=None):\n if dtype is None:\n dtype = self.dtype\n if broadcastable is None:\n broadcastable = self.broadcastable\n return self.__class__(dtype=dtype, broadcastable=broadcastable,\n context_name=self.context_name, name=self.name)\n\n # This is a property to keep the type pickleable\n @property\n def context(self):\n return get_context(self.context_name)\n\n def __repr__(self):\n return \"GpuArrayType<%s>(%s, %s)\" % (self.context_name, self.dtype,\n self.broadcastable)\n\n def filter(self, data, strict=False, allow_downcast=None):\n if (isinstance(data, gpuarray.GpuArray) and\n data.typecode == self.typecode):\n # This is just to make this condition not enter the\n # following branches\n pass\n elif strict:\n if not isinstance(data, gpuarray.GpuArray):\n raise TypeError(\"%s expected a GpuArray object.\" % self,\n data, type(data))\n if self.typecode != data.typecode:\n raise TypeError(\"%s expected typecode %d (dtype %s), \"\n \"got %d (dtype %s).\" %\n (self, self.typecode, self.dtype,\n data.typecode, str(data.dtype)))\n if self.context != data.context:\n raise TypeError(\"data context does not match type context\")\n # fallthrough to ndim check\n elif (allow_downcast or\n (allow_downcast is None and\n type(data) == float and\n self.dtype == config.floatX)):\n data = gpuarray.array(data, dtype=self.typecode, copy=False,\n ndmin=len(self.broadcastable),\n context=self.context)\n else:\n if not hasattr(data, 'dtype'):\n # This is to convert objects that don't have a dtype\n # (like lists). We anticipate that the type below\n # will match and we pass copy=False so it won't make a\n # second object on the GPU.\n data = gpuarray.array(data, copy=False, context=self.context)\n\n up_dtype = scalar.upcast(self.dtype, data.dtype)\n if up_dtype == self.dtype:\n data = gpuarray.array(data, dtype=self.dtype, copy=False,\n context=self.context)\n else:\n raise TypeError(\"%s cannot store a value of dtype %s \"\n \"without risking loss of precision.\" %\n (self, data.dtype))\n\n if self.ndim != data.ndim:\n raise TypeError(\"Wrong number of dimensions: expected %s, \"\n \"got %s with shape %s.\" % (self.ndim, data.ndim,\n data.shape), data)\n shp = data.shape\n for i, b in enumerate(self.broadcastable):\n if b and shp[i] != 1:\n raise TypeError(\"Non-unit value on shape on a broadcastable\"\n \" dimension.\", shp, self.broadcastable)\n return data\n\n def filter_variable(self, other, allow_convert=True):\n from theano.sandbox.gpuarray import GpuFromHost\n\n if hasattr(other, '_as_GpuArrayVariable'):\n other = other._as_GpuArrayVariable(self.context_name)\n\n if not isinstance(other, Variable):\n other = self.Constant(type=self, data=other)\n\n if other.type == self:\n return other\n\n if not isinstance(other.type, tensor.TensorType):\n raise TypeError('Incompatible type', (self, other.type))\n if (other.type.dtype != self.dtype):\n raise TypeError('Incompatible dtype', (self.dtype,\n other.type.dtype))\n if other.type.ndim != self.ndim:\n raise TypeError('Incompatible number of dimensions.'\n ' Expected %d, got %d.' % (self.ndim, other.ndim))\n if other.type.broadcastable != self.broadcastable:\n if allow_convert:\n type2 = other.type.clone(broadcastable=self.broadcastable)\n other2 = type2.convert_variable(other)\n else:\n other2 = None\n if other2 is None:\n raise TypeError('Incompatible broadcastable dimensions.'\n ' Expected %s, got %s.' %\n (str(other.type.broadcastable),\n str(self.broadcastable)))\n other = other2\n\n return GpuFromHost(self.context_name)(other)\n\n @staticmethod\n def values_eq(a, b):\n if a.shape != b.shape:\n return False\n if a.typecode != b.typecode:\n return False\n a_eq_b = numpy.asarray(compare(a, '==', b))\n if a_eq_b.all():\n return True\n\n # maybe the trouble is that there are NaNs\n a = numpy.asarray(a)\n b = numpy.asarray(b)\n\n a_missing = numpy.isnan(a)\n if a_missing.any():\n b_missing = numpy.isnan(b)\n return numpy.all(a_eq_b + (a_missing == b_missing))\n else:\n return False\n\n @staticmethod\n def values_eq_approx(a, b,\n allow_remove_inf=False, allow_remove_nan=False,\n rtol=None, atol=None):\n if a.shape != b.shape or a.dtype != b.dtype:\n return False\n if 'int' in str(a.dtype):\n return GpuArrayType.values_eq(a, b)\n else:\n if allow_remove_inf or allow_remove_nan:\n raise NotImplementedError(\n \"GpuArrayType.values_eq_approx() don't implemented the\"\n \" allow_remove_inf and allow_remove_nan parameter\")\n if a.dtype == 'float16' or b.dtype == 'float16':\n an = numpy.asarray(a)\n bn = numpy.asarray(b)\n return tensor.TensorType.values_eq_approx(\n an, bn, allow_remove_inf=allow_remove_inf,\n allow_remove_nan=allow_remove_nan, rtol=rtol, atol=atol)\n atol_, rtol_ = theano.tensor.basic._get_atol_rtol(a, b)\n if rtol is not None:\n rtol_ = rtol\n if atol is not None:\n atol_ = atol\n res = elemwise2(a, '', b, a, odtype=numpy.dtype('bool'),\n op_tmpl=\"res[i] = (fabs(%%(a)s - %%(b)s) <\"\n \"(%(atol_)s + %(rtol_)s * fabs(%%(b)s)))\" %\n locals())\n ret = numpy.asarray(res).all()\n if ret:\n return True\n # maybe the trouble is that there are NaNs\n an = numpy.asarray(a)\n bn = numpy.asarray(b)\n return tensor.TensorType.values_eq_approx(\n an, bn, allow_remove_inf=allow_remove_inf,\n allow_remove_nan=allow_remove_nan, rtol=rtol, atol=atol)\n\n @staticmethod\n def may_share_memory(a, b):\n if (not isinstance(a, gpuarray.GpuArray) or\n not isinstance(b, gpuarray.GpuArray)):\n return False\n return pygpu.gpuarray.may_share_memory(a, b)\n\n def value_zeros(self, shape):\n return pygpu.gpuarray.zeros(shape, dtype=self.typecode,\n context=self.context)\n\n def make_variable(self, name=None):\n return self.Variable(self, name=name)\n\n def __eq__(self, other):\n return (type(self) == type(other) and\n self.typecode == other.typecode and\n self.broadcastable == other.broadcastable and\n self.context_name == other.context_name)\n\n def convert_variable(self, var):\n vt = var.type\n if (type(self) == type(vt) and\n self.typecode == vt.typecode and\n self.ndim == vt.ndim and\n self.context_name == vt.context_name and\n all(sb == ob or ob for sb, ob in zip(self.broadcastable,\n vt.broadcastable))):\n return theano.tensor.patternbroadcast(var, self.broadcastable)\n\n def __hash__(self):\n return hash((type(self), self.typecode, self.broadcastable,\n self.context_name))\n\n def dtype_specs(self):\n \"\"\"\n Return a tuple (python type, c type, numpy typenum) that corresponds\n to self.dtype.\n\n This function is used internally as part of C code generation.\n\n \"\"\"\n # TODO: add more type correspondances for e.g. int32, int64, float32,\n # complex64, etc.\n try:\n return {\n 'float16': (float, 'npy_float16', 'NPY_FLOAT16'),\n 'float32': (float, 'npy_float32', 'NPY_FLOAT32'),\n 'float64': (float, 'npy_float64', 'NPY_FLOAT64'),\n 'uint8': (int, 'npy_uint8', 'NPY_UINT8'),\n 'int8': (int, 'npy_int8', 'NPY_INT8'),\n 'uint16': (int, 'npy_uint16', 'NPY_UINT16'),\n 'int16': (int, 'npy_int16', 'NPY_INT16'),\n 'uint32': (int, 'npy_uint32', 'NPY_UINT32'),\n 'int32': (int, 'npy_int32', 'NPY_INT32'),\n 'uint64': (int, 'npy_uint64', 'NPY_UINT64'),\n 'int64': (int, 'npy_int64', 'NPY_INT64'),\n 'complex128': (complex, 'theano_complex128', 'NPY_COMPLEX128'),\n 'complex64': (complex, 'theano_complex64', 'NPY_COMPLEX64')\n }[self.dtype]\n except KeyError:\n raise TypeError(\"Unsupported dtype for %s: %s\" %\n (self.__class__.__name__, self.dtype))\n\n def get_shape_info(self, obj):\n return obj.shape\n\n def get_size(self, shape_info):\n if shape_info:\n return numpy.prod(shape_info) * numpy.dtype(self.dtype).itemsize\n else:\n return numpy.dtype(self.dtype).itemsize\n\n def c_declare(self, name, sub, check_input=True):\n return \"\"\"\n PyGpuArrayObject *%(name)s;\n \"\"\" % locals()\n\n def c_init(self, name, sub):\n return \"%s = NULL;\" % (name,)\n\n def c_extract(self, name, sub, check_input=True):\n # TODO I don't check broadcast stuff for now.\n return \"\"\"\n %(name)s = NULL;\n if (py_%(name)s == Py_None) {\n PyErr_SetString(PyExc_ValueError, \"expected a GpuArray, not None\");\n %(fail)s\n }\n /* First check if we are the base type exactly (the most common case),\n then do the full subclass check if needed. */\n if (py_%(name)s->ob_type != &PyGpuArrayType &&\n !PyObject_TypeCheck(py_%(name)s, &PyGpuArrayType)) {\n PyErr_SetString(PyExc_ValueError, \"expected a GpuArray\");\n %(fail)s\n }\n %(name)s = (PyGpuArrayObject *)py_%(name)s;\n Py_INCREF(%(name)s);\n \"\"\" % {'name': name, 'fail': sub['fail']}\n\n def c_cleanup(self, name, sub):\n return \"Py_XDECREF(%(name)s); %(name)s = NULL;\" % {'name': name}\n\n def c_sync(self, name, sub):\n return \"\"\"\n if (!%(name)s) {\n Py_XDECREF(py_%(name)s);\n Py_INCREF(Py_None);\n py_%(name)s = Py_None;\n } else if ((void *)py_%(name)s != (void *)%(name)s) {\n Py_XDECREF(py_%(name)s);\n py_%(name)s = (PyObject *)%(name)s;\n Py_INCREF(py_%(name)s);\n }\n \"\"\" % {'name': name}\n\n def c_init_code(self):\n # We don't actually need the numpy API except in\n # HostFromGpu and GpuFromHost and those case will be covered\n # by the TensorType parameter\n return ['import_pygpu__gpuarray();']\n\n def c_headers(self):\n # We need arrayobject for the PyArrayDescr struct def\n # (even if we just use a pointer to it in a function def)\n return ['<gpuarray/array.h>', '<gpuarray/kernel.h>',\n '<gpuarray/error.h>', '<gpuarray/buffer_blas.h>',\n '<numpy/arrayobject.h>', '<gpuarray_api.h>']\n\n def c_header_dirs(self):\n return [pygpu.get_include(), numpy.get_include()]\n\n def c_libraries(self):\n return ['gpuarray']\n\n def c_code_cache_version(self):\n ver = pygpu.gpuarray.api_version()\n # we only use the major version since the minor revision are\n # API-compatible.\n return (1, ver[0])\n\n\nclass _operators(_tensor_py_operators):\n def _as_TensorVariable(self):\n from .basic_ops import host_from_gpu\n return host_from_gpu(self)\n\n def _as_GpuArrayVariable(self, context_name):\n if self.type.context_name == context_name:\n return self\n else:\n from .basic_ops import GpuToGpu\n return GpuToGpu(context_name)(self)\n\n\nclass GpuArrayVariable(_operators, Variable):\n pass\n\n\nGpuArrayType.Variable = GpuArrayVariable\n\n\nclass GpuArraySignature(tensor.TensorConstantSignature):\n # might do something better if we can run the sum on the GPU, but\n # for now this will suffice.\n pass\n\n\nclass GpuArrayConstant(_operators, Constant):\n def signature(self):\n return GpuArraySignature((self.type, numpy.asarray(self.data)))\n\n def __str__(self):\n if self.name is not None:\n return self.name\n try:\n np_data = numpy.asarray(self.data)\n except gpuarray.GpuArrayException:\n np_data = self.data\n return \"GpuArrayConstant{%s}\" % np_data\n\n\nGpuArrayType.Constant = GpuArrayConstant\n\n\nclass GpuArraySharedVariable(_operators, SharedVariable):\n def get_value(self, borrow=False, return_internal_type=False):\n if return_internal_type:\n if borrow:\n return self.container.value\n else:\n return self.container.value.copy()\n else:\n return numpy.asarray(self.container.value)\n\n def set_value(self, value, borrow=False):\n if isinstance(value, pygpu.gpuarray.GpuArray):\n value = pygpu.gpuarray.array(value, copy=(not borrow),\n context=self.type.context)\n self.container.value = value\n\n def __getitem__(self, *args):\n return _operators.__getitem__(self, *args)\n\n\nGpuArrayType.SharedVariable = GpuArraySharedVariable\n\n\ndef gpuarray_shared_constructor(value, name=None, strict=False,\n allow_downcast=None, borrow=False,\n broadcastable=None,\n context_name=None):\n \"\"\"\n SharedVariable constructor for GpuArrayType.\n\n \"\"\"\n if not isinstance(value, (numpy.ndarray, pygpu.gpuarray.GpuArray)):\n raise TypeError('ndarray or GpuArray required')\n\n try:\n get_context(context_name)\n except ValueError:\n # Don't make this a hard error if we attempt to make a shared\n # variable while there is no default context.\n if context_name is None:\n raise TypeError('No default context and no context specified')\n raise\n\n if broadcastable is None:\n broadcastable = (False,) * value.ndim\n type = GpuArrayType(value.dtype, broadcastable, context_name=context_name)\n deviceval = pygpu.gpuarray.array(value, copy=(not borrow),\n context=type.context)\n return GpuArraySharedVariable(type=type, value=deviceval, name=name,\n strict=strict)\n\ntheano.compile.register_view_op_c_code(GpuArrayType, \"\"\"\n Py_XDECREF(%(oname)s);\n %(oname)s = %(iname)s;\n Py_XINCREF(%(oname)s);\n\"\"\", version=(0,))\n\n# Register GpuArrayType C code for Shape Op.\ntheano.compile.register_shape_c_code(\n GpuArrayType,\n \"\"\"\n npy_intp shape[] = {%(iname)s->ga.nd};\n if(%(oname)s == NULL || (PyArray_DIMS(%(oname)s)[0] != shape[0]))\n {\n Py_XDECREF(%(oname)s);\n %(oname)s = (PyArrayObject*) PyArray_SimpleNew(1, shape, NPY_INT64);\n }\n for(int i=0;i<shape[0];i++)\n {\n ((npy_int64*)PyArray_GETPTR1(%(oname)s, i))[0] = %(iname)s->ga.dimensions[i];\n }\n \"\"\",\n version=1)\n\ntheano.compile.register_shape_i_c_code(\n GpuArrayType,\n \"\"\"\n if(!%(oname)s)\n %(oname)s=(PyArrayObject*)PyArray_ZEROS(0, NULL, NPY_INT64, 0);\n ((npy_int64*)PyArray_DATA(%(oname)s))[0] =\n %(iname)s->ga.dimensions[%(i)s];\n \"\"\",\n \"\"\"\n if (%(i)s>=%(iname)s->ga.nd){\n PyErr_SetString(PyExc_TypeError,\n \"Number of dimensions lower than expected\");\n %(fail)s\n }\n \"\"\",\n version=(1,))\n\ntheano.compile.register_deep_copy_op_c_code(GpuArrayType, \"\"\"\n Py_XDECREF(%(oname)s);\n %(oname)s = pygpu_copy(%(iname)s, GA_ANY_ORDER);\n if (!%(oname)s) { %(fail)s }\n\"\"\", version=(5,))\n\ntheano.compile.register_rebroadcast_c_code(\n GpuArrayType,\n \"\"\"\n if(%(iname)s->ga.dimensions[%(axis)s] != 1){\n PyErr_Format(PyExc_ValueError,\n \"Dimension %(axis)s in Rebroadcast's input was\"\n \" supposed to be 1 (got %%d instead)\",\n %(iname)s->ga.dimensions[%(axis)s]);\n %(fail)s\n }\n \"\"\",\n version=1)\n\ntheano.compile.register_specify_shape_c_code(\n GpuArrayType,\n \"\"\"\n if (PyGpuArray_NDIM(%(iname)s) != PyArray_DIMS(%(shape)s)[0]) {\n PyErr_Format(PyExc_AssertionError,\n \"SpecifyShape: vector of shape has %%d elements,\"\n \" but the input has %%d dimensions.\",\n PyGpuArray_NDIM(%(iname)s),\n PyArray_DIMS(%(shape)s)[0]);\n %(fail)s;\n }\n for(int i = 0; i < PyGpuArray_NDIM(%(iname)s); i++){\n dtype_%(shape)s shp = ((dtype_%(shape)s*)PyArray_GETPTR1(%(shape)s,\n i))[0];\n if (PyGpuArray_DIMS(%(iname)s)[i] != shp) {\n PyErr_Format(PyExc_AssertionError,\n \"SpecifyShape: dim %%d of input has shape %%d,\"\n \" expected %%d.\",\n i, PyGpuArray_DIMS(%(iname)s)[i],\n shp);\n %(fail)s;\n }\n }\n Py_XDECREF(%(oname)s);\n %(oname)s = %(iname)s;\n Py_XINCREF(%(oname)s);\n \"\"\",\n version=1,\n c_support_code_apply='#include <numpy_compat.h>')\n\n\nclass GpuContextType(Type):\n def filter(self, data, strict=False, allow_downcast=None):\n if not isinstance(data, gpuarray.GpuContext):\n raise TypeError('context is not a GpuContext')\n return data\n\n def __eq__(self, other):\n return type(self) == type(other)\n\n def __hash__(self):\n return hash(type(self))\n\n @staticmethod\n def values_eq(a, b):\n return a == b\n\n def c_declare(self, name, sub, check_input=True):\n return \"PyGpuContextObject *%s;\" % (name,)\n\n def c_init(self, name, sub):\n return \"%s = NULL;\" % (name,)\n\n def c_extract(self, name, sub, check_input=True):\n if check_input:\n res = \"\"\"\nif (!PyObject_TypeCheck(py_%(name)s, &PyGpuContextType)) {\n PyErr_SetString(PyExc_TypeError, \"expected a GpuContext\");\n %(fail)s\n}\n\"\"\" % dict(name=name, fail=sub['fail'])\n else:\n res = \"\"\n return res + \"\"\"\n%(name)s = (PyGpuContextObject *)py_%(name)s;\nPy_INCREF(%(name)s);\n\"\"\" % dict(name=name)\n\n def c_cleanup(self, name, sub):\n return \"Py_XDECREF(%(name)s); %(name)s = NULL;\" % dict(name=name)\n\n # c_sync is intentionally not declared to prevent normal usage\n\n def c_init_code(self):\n return ['import_pygpu__gpuarray();']\n\n def c_headers(self):\n return ['<gpuarray_api.h>']\n\n def c_header_dirs(self):\n return [pygpu.get_include()]\n\n def c_code_cache_version(self):\n ver = pygpu.gpuarray.api_version()\n return (0, ver[0])\n\n # Variable, Contstant, ... not declared\n\ngpu_context_type = GpuContextType()\n", "\"\"\"\nDriver of graph construction, optimization, and linking.\n\n\"\"\"\nfrom __future__ import print_function\n\nimport copy\nfrom six import string_types, iteritems, iterkeys\nfrom six.moves import xrange\nimport six.moves.copyreg as copyreg\nimport six.moves.cPickle as pickle\nfrom itertools import chain\nimport time\nimport warnings\nimport numpy\n\nimport theano\nfrom theano import config, gof\nfrom functools import partial\nfrom theano.compat import izip\nfrom theano.gof import graph\nimport theano.compile.mode\nfrom theano.compile.io import (\n In, SymbolicInput, SymbolicInputKit, SymbolicOutput)\nfrom theano.compile.ops import deep_copy_op, view_op\nfrom theano.gof.graph import is_same_graph\nfrom theano.gof.op import ops_with_inner_function\n\nimport logging\n_logger = logging.getLogger('theano.compile.function_module')\n\n__docformat__ = \"restructuredtext en\"\n\n\nclass UnusedInputError(Exception):\n \"\"\"\n A symbolic input passed to function is not needed.\n\n \"\"\"\n\n pass\n\n\ndef alias_root(v):\n \"\"\"\n Return the variable to which v is aliased by view_maps and destroy_maps.\n\n \"\"\"\n if v.owner is None:\n return v\n vmap = getattr(v.owner.op, 'view_map', {})\n dmap = getattr(v.owner.op, 'destroy_map', {})\n outpos = v.owner.outputs.index(v)\n v_views = vmap.get(outpos, []) + dmap.get(outpos, [])\n if len(v_views) > 1:\n raise NotImplementedError(\n str(v) + \" is a view/destroyed version of more then one inputs. \"\n \"Currently, we only support the case where an output is a view or \"\n \"a destroyed version of one input.\")\n elif v_views:\n return alias_root(v.owner.inputs[v_views[0]])\n else:\n return v\n\n\ndef view_tree_set(v, treeset):\n \"\"\"\n Add to `treeset` all variables that are views of v, given that v is\n not a view.\n\n \"\"\"\n treeset.add(v)\n for cl, v_input_pos_to_cl in v.clients:\n if cl == 'output':\n continue\n vmap = getattr(cl.op, 'view_map', {})\n dmap = getattr(cl.op, 'destroy_map', {})\n for opos, iposlist in chain(iteritems(vmap), iteritems(dmap)):\n if v_input_pos_to_cl in iposlist:\n if cl.outputs[opos] not in treeset:\n view_tree_set(cl.outputs[opos], treeset)\n\n\ndef infer_reuse_pattern(fgraph, outputs_to_disown):\n \"\"\"\n Given an fgraph and a list of variables, returns the list or set\n of all variables which may share the same underlying data storage\n as any of the specified variables. Used internally by function,\n FunctionMaker.\n\n This list (or set) is also refered to as no_recycling sometimes,\n especially by linker code.\n\n \"\"\"\n rval = set()\n for o in outputs_to_disown:\n view_tree_set(alias_root(o), rval)\n # remove from rval all of the inputs, constants, values.\n rval = set(r for r in rval if r.owner is not None)\n\n return rval\n\n\ndef fgraph_updated_vars(fgraph, expanded_inputs):\n \"\"\"\n Reconstruct the full \"updates\" dictionary, mapping from FunctionGraph input\n variables to the fgraph outputs that will replace their values.\n\n Returns\n -------\n dict variable -> variable\n\n \"\"\"\n updated_vars = {}\n potential_values = list(fgraph.outputs) # copy the list\n if len(expanded_inputs) != len(fgraph.inputs):\n raise ValueError('expanded_inputs must match len(fgraph.inputs)')\n for e_input, ivar in reversed(list(zip(expanded_inputs, fgraph.inputs))):\n if e_input.update is not None:\n updated_vars[ivar] = potential_values.pop()\n return updated_vars\n\n\nclass Supervisor:\n \"\"\"\n Listener for FunctionGraph events which makes sure that no\n operation overwrites the contents of protected Variables. The\n outputs of the FunctionGraph are protected by default.\n\n \"\"\"\n\n def __init__(self, protected):\n self.protected = list(protected)\n\n def validate(self, fgraph):\n if not hasattr(fgraph, 'destroyers'):\n return True\n for r in self.protected + list(fgraph.outputs):\n if fgraph.destroyers(r):\n raise gof.InconsistencyError(\"Trying to destroy a protected\"\n \"Variable.\", r)\n\n\ndef std_fgraph(input_specs, output_specs, accept_inplace=False):\n \"\"\"\n Makes an FunctionGraph corresponding to the input specs and the output\n specs. Any SymbolicInput in the input_specs, if its update field\n is not None, will add an output to the FunctionGraph corresponding to that\n update. The return value is the FunctionGraph as well as a list of\n SymbolicOutput instances corresponding to the updates.\n\n If accept_inplace is False, the graph will be checked for inplace\n operations and an exception will be raised if it has any. If\n accept_inplace is True, a DestroyHandler will be added to the FunctionGraph\n if there are any inplace operations.\n\n The returned FunctionGraph is a clone of the graph between the provided\n inputs and outputs.\n\n \"\"\"\n orig_inputs = [spec.variable for spec in input_specs]\n\n # Extract the updates and the mapping between update outputs and\n # the updated inputs.\n updates = []\n update_mapping = {}\n out_idx = len(output_specs)\n for inp_idx in range(len(input_specs)):\n if input_specs[inp_idx].update:\n updates.append(input_specs[inp_idx].update)\n update_mapping[out_idx] = inp_idx\n out_idx += 1\n\n orig_outputs = [spec.variable for spec in output_specs] + updates\n\n fgraph = gof.fg.FunctionGraph(orig_inputs, orig_outputs,\n update_mapping=update_mapping)\n\n for node in fgraph.apply_nodes:\n if getattr(node.op, 'destroy_map', None):\n if not accept_inplace:\n raise TypeError(\"Graph must not contain inplace operations\",\n node, node.op)\n else:\n fgraph.attach_feature(gof.DestroyHandler())\n break\n\n # We need to protect all immutable inputs from inplace operations.\n fgraph.attach_feature(\n Supervisor(input\n for spec, input in zip(input_specs, fgraph.inputs)\n if not (spec.mutable or\n (hasattr(fgraph, 'destroyers') and\n fgraph.destroyers(input)))))\n\n # If named nodes are replaced, keep the name\n for feature in std_fgraph.features:\n fgraph.attach_feature(feature())\n return fgraph, list(map(SymbolicOutput, updates))\n\n\nstd_fgraph.features = [gof.toolbox.PreserveNames]\n\n\nclass AliasedMemoryError(Exception):\n \"\"\"\n Memory is aliased that should not be.\n\n \"\"\"\n pass\n\n\n###\n# Function\n###\n\n# unique id object used as a placeholder for duplicate entries\nDUPLICATE = ['DUPLICATE']\n\n\nclass Function(object):\n \"\"\"\n Type of the functions returned by theano.function or\n theano.FunctionMaker.create.\n\n `Function` is the callable object that does computation. It has the storage\n of inputs and outputs, performs the packing and unpacking of inputs and\n return values. It implements the square-bracket indexing so that you can\n look up the value of a symbolic node.\n\n Functions are copyable via {{{fn.copy()}}} and {{{copy.copy(fn)}}}.\n When a function is copied, this instance is duplicated. Contrast with\n self.maker (instance of `FunctionMaker`) that is shared between copies.\n The meaning of copying a function is that the containers and their current\n values will all be duplicated. This requires that mutable inputs be\n copied, whereas immutable inputs may be shared between copies.\n\n A Function instance is hashable, on the basis of its memory\n address (its id).\n\n A Function instance is only equal to itself.\n\n A Function instance may be serialized using the `pickle` or\n `cPickle` modules. This will save all default inputs, the graph,\n and *** to the pickle file (WRITEME).\n\n A Function instance have a ``trust_input`` field that default to\n False. When True, we don't do extra check of the input to give\n better error message. In some case, python code will still return\n the good results if you pass a python or numpy scalar instead of a\n numpy tensor. C code should raise an error if you pass an object\n of the wrong type.\n\n Attributes\n ----------\n finder\n inv_finder\n\n \"\"\"\n\n pickle_aliased_memory_strategy = 'warn'\n \"\"\"\n How to deal with pickling finding aliased storage.\n\n Meaningful settings are: 'ignore', 'warn', 'raise'.\n\n If the value is 'warn', then a message will be printed to stderr\n if aliased storage is dectected during pickle.dump.\n\n If the value is 'raise', then an AliasedMemoryError will be raised\n if aliased storage is detected during pickle.dump.\n\n \"\"\"\n\n input_storage = None\n \"\"\"\n List of Container instances.\n\n \"\"\"\n\n output_storage = None\n \"\"\"\n List of Container instances.\n\n \"\"\"\n\n indices = None\n \"\"\"\n List of (SymbolicInput|SymbolicInputKit, indices, [SymbolicInput,...]),\n one tuple for each input.\n\n The first tuple element is the SymbolicInput object for the corresponding\n function input.\n\n The second and third tuple elements are used only by Kits, which\n are deprecated.\n\n \"\"\"\n\n defaults = None\n \"\"\"\n List of 3-tuples, one 3-tuple for each input.\n\n Tuple element 0: Bool: Is this input required at each function call?\n Tuple element 1: Bool: Should this inputs value be reverted after\n each call?\n Tuple element 2: Any: The value associated with this input.\n\n \"\"\"\n\n unpack_single = None\n \"\"\"\n Bool: for outputs lists of length 1, should the 0'th element be\n returned directly?\n\n \"\"\"\n\n return_none = None\n \"\"\"\n Bool: whether the function should return None or not.\n\n \"\"\"\n\n maker = None\n \"\"\"\n FunctionMaker instance.\n\n \"\"\"\n\n fn = None\n \"\"\"\n A function that evaluates the graph. Typically a linker's make_thunk method\n created this function.\n\n \"\"\"\n\n finder = None\n \"\"\"\n Dictionary mapping several kinds of things to containers.\n\n We set an entry in finder for:\n\n - the index of the input\n\n - the variable instance the input is based on\n\n - the name of the input\n\n All entries map to the container or to DUPLICATE if an ambiguity\n is detected.\n\n \"\"\"\n\n inv_finder = None\n \"\"\"\n Dict. Reverse lookup of `finder`.\n\n It maps container -> SymbolicInput\n\n \"\"\"\n\n def __init__(self, fn, input_storage, output_storage, indices, outputs,\n defaults, unpack_single, return_none, output_keys, maker):\n self.fn = fn\n self.input_storage = input_storage\n self.output_storage = output_storage\n self.indices = indices\n self.outputs = outputs\n self.defaults = defaults\n self.unpack_single = unpack_single\n self.return_none = return_none\n self.maker = maker\n self.profile = None # reassigned in FunctionMaker.create\n self.trust_input = False # If True, we don't check the input parameter\n self.name = None\n self.nodes_with_inner_function = []\n self.output_keys = output_keys\n\n # We will be popping stuff off this `containers` object. It is a copy.\n containers = list(self.input_storage)\n finder = {}\n inv_finder = {}\n\n def distribute(indices, cs, value):\n input.distribute(value, indices, cs)\n for c in cs:\n c.provided += 1\n\n # Store the list of names of named inputs.\n named_inputs = []\n # Count the number of un-named inputs.\n n_unnamed_inputs = 0\n\n # Initialize the storage\n # this loop works by modifying the elements (as variable c) of\n # self.input_storage inplace.\n for i, ((input, indices, sinputs), (required, refeed, value)) in \\\n enumerate(zip(self.indices, defaults)):\n # this is true iff input is not a SymbolicInputKit\n if indices is None:\n # containers is being used as a stack. Here we pop off\n # the next one.\n c = containers[0]\n c.strict = getattr(input, 'strict', False)\n c.allow_downcast = getattr(input, 'allow_downcast', None)\n\n if value is not None:\n # Always initialize the storage.\n if isinstance(value, gof.Container):\n # There is no point in obtaining the current value\n # stored in the container, since the container is\n # shared.\n # For safety, we make sure 'refeed' is False, since\n # there is no need to refeed the defaullt value.\n assert not refeed\n else:\n c.value = value\n c.required = required\n c.implicit = input.implicit\n # this is a count of how many times the input has been\n # provided (reinitialized to 0 on __call__)\n c.provided = 0\n finder[i] = c\n finder[input.variable] = c\n if input.name not in finder:\n finder[input.name] = c\n else:\n finder[input.name] = DUPLICATE\n if input.name is None:\n n_unnamed_inputs += 1\n else:\n named_inputs.append(input.name)\n inv_finder[c] = input\n containers[:1] = []\n else:\n # TODO The following code may need to do something to handle\n # implicit inputs.\n\n # The input is a SymbolicInputKit, so we take as many\n # containers as the Kit provides inputs\n cs = containers[:len(indices)]\n # distribute does the initialization of the containers\n input.distribute(value, indices, cs)\n f = partial(distribute, indices, cs)\n # Like before, we set a finder entry for the kit. Note that\n # we are not mapping to a container but to a function which\n # can reinitialize all the containers\n finder[i] = f\n finder[input] = f\n if input.name not in finder:\n finder[input.name] = f\n else:\n finder[input.name] = DUPLICATE\n # For each input in the kit and its corresponding\n # container, we put an entry in finder. This allows\n # the user to micro-manage elements of the kit if need\n # be. All containers inherit the required field and\n # have their own \"provided\" counter\n for c, sin in zip(cs, sinputs):\n finder[sin.variable] = c\n finder[sin.name] = c\n if sin.name not in finder:\n finder[sin.name] = c\n else:\n finder[sin.name] = DUPLICATE\n inv_finder[c] = input\n c.required = required\n c.provided = 0\n containers[:len(indices)] = []\n\n self.finder = finder\n self.inv_finder = inv_finder\n\n # this class is important in overriding the square-bracket notation:\n # fn.value[x]\n # self reference is available via the closure on the class\n class ValueAttribute(object):\n def __getitem__(self, item):\n try:\n s = finder[item]\n except KeyError:\n raise TypeError(\"Unknown input or state: %s\" % str(item))\n if s is DUPLICATE:\n raise TypeError(\"Ambiguous name: %s - please check the \"\n \"names of the inputs of your function \"\n \"for duplicates.\" % str(item))\n if isinstance(s, gof.Container):\n return s.value\n else:\n raise NotImplementedError\n\n def __setitem__(self, item, value):\n try:\n s = finder[item]\n except KeyError:\n # Print informative error message.\n msg = get_info_on_inputs(named_inputs, n_unnamed_inputs)\n raise TypeError(\"Unknown input or state: %s. %s\" %\n (str(item), msg))\n if s is DUPLICATE:\n raise TypeError(\"Ambiguous name: %s - please check the \"\n \"names of the inputs of your function \"\n \"for duplicates.\" % str(item))\n if isinstance(s, gof.Container):\n s.value = value\n s.provided += 1\n else:\n s(value)\n\n def __contains__(self, item):\n return finder.__contains__(item)\n\n # this class is important in overriding the square-bracket notation:\n # fn.container[x]\n # self reference is available via the closure on the class\n class ContainerAttribute(object):\n def __getitem__(self, item):\n return finder[item]\n\n def __contains__(self, item):\n return finder.__contains__(item)\n # You cannot set the container\n\n self._value = ValueAttribute()\n self._container = ContainerAttribute()\n\n # Compute self.n_returned_outputs.\n # This is used only when fn.need_update_inputs is False\n # because we're using one of the VM objects and it is\n # putting updates back into the input containers all by itself.\n assert len(self.maker.expanded_inputs) == len(self.input_storage)\n self.n_returned_outputs = len(self.output_storage)\n for input in self.maker.expanded_inputs:\n if input.update is not None:\n self.n_returned_outputs -= 1\n\n for node in self.maker.fgraph.apply_nodes:\n if node.op in ops_with_inner_function:\n self.nodes_with_inner_function.append(node.op)\n\n def __contains__(self, item):\n return self.value.__contains__(item)\n\n def __getitem__(self, item):\n return self.value[item]\n\n def __setitem__(self, item, value):\n self.value[item] = value\n\n def __copy__(self):\n \"\"\"\n Copy a function. Copied function have separate intermediate\n storages and output storages with original function\n \"\"\"\n return self.copy()\n\n def copy(self, share_memory=False, swap=None, delete_updates=False,\n name=None, profile=None):\n \"\"\"\n Copy this function. Copied function will have separated maker and\n fgraph with original function. User can choose whether to separate\n storage by changing the share_memory arguments.\n\n Parameters\n ----------\n share_memory : boolean\n When True, two function share intermediate storages(storages except input and\n output storages). Otherwise two functions will only share partial\n storages and same maker. If two functions share memory and\n allow_gc=False, this will increase executing speed and save memory.\n\n swap : dict\n Dictionary that map old SharedVariables to new\n SharedVariables. Default is None.\n NOTE: The shared variable swap in only done in the new returned\n function, not in the user graph.\n\n delete_updates : boolean\n If True, Copied function will not have updates.\n name : string\n If provided, will be the name of the new\n Function. Otherwise, it will be old + \" copy\"\n\n profile :\n as theano.function profile parameter\n\n Returns\n -------\n Copied theano.Function\n \"\"\"\n # helper function\n def checkSV(sv_ori, sv_rpl):\n \"\"\"\n Assert two SharedVariable follow some restirctions:\n 1. same type\n 2. same shape or dim?\n \"\"\"\n SharedVariable = theano.tensor.sharedvar.SharedVariable\n assert isinstance(sv_ori, SharedVariable), (\n \"Key of swap should be SharedVariable, given:\", sv_ori,\n \" type\", type(sv_ori))\n assert isinstance(sv_rpl, SharedVariable), (\n \"Value of swap should be SharedVariable, given:\", sv_rpl,\n \"type\", type(sv_ori))\n assert sv_ori.type == sv_rpl.type, (\n \"Type of given SharedVariable conflicts with original one\",\n \"Type of given SharedVariable:\", sv_rpl.type,\n \"Type of original SharedVariable:\", sv_ori.type)\n\n maker = self.maker\n\n # Copy Ins and their storage.\n # so that they have different storage as their value\n ins = [copy.copy(input) for input in maker.inputs]\n\n # Delete update output in fgraph and updates In instances if needed\n if delete_updates:\n # The first len(maker.outputs) variables are original variables.\n # The rest are the updates.\n out_vars = maker.fgraph.outputs[:len(maker.outputs)]\n else:\n out_vars = maker.fgraph.outputs\n\n # Init new fgraph using copied variables and get memo\n # memo: a dict that map old variables to new variables\n memo = graph.clone_get_equiv(maker.fgraph.inputs, out_vars)\n fg_cpy = gof.fg.FunctionGraph([memo[i] for i in maker.fgraph.inputs],\n [memo[o] for o in out_vars],\n clone=False)\n\n # Re initialize Outs and swap update and variable in Ins\n # By doing this, we can pass FunctionMaker._check_unused_inputs()\n outs = list(map(SymbolicOutput, fg_cpy.outputs[:len(maker.outputs)]))\n for out_ori, out_cpy in zip(maker.outputs, outs):\n out_cpy.borrow = out_ori.borrow\n\n # swap SharedVariable\n if swap is not None:\n exist_svs = [i.variable for i in maker.inputs]\n\n # Check if given ShareVariables exist\n for sv in iterkeys(swap):\n if sv not in exist_svs:\n raise ValueError(\"SharedVariable: %s not found\" %\n (sv.name))\n\n # Swap SharedVariable in fgraph and In instances\n for index, (i, in_v) in enumerate(zip(ins, fg_cpy.inputs)):\n # Variables in maker.inputs are defined by user, therefore we\n # use them to make comparision and do the mapping.\n # Otherwise we don't touch them.\n var = maker.inputs[index].variable\n\n if var in swap:\n swap_sv = swap[var]\n checkSV(i.variable, swap_sv)\n\n # swap variable and value of In instances\n i.variable = swap_sv\n i.value = swap_sv.container\n\n # In the fgraph we use the cloned SharedVariable\n swap_sv = swap_sv.clone()\n\n # Swap SharedVariable in fgraph\n # if inputs was replaced, change self.inputs\n fg_cpy.inputs[index] = swap_sv\n fg_cpy.replace(in_v, swap_sv, reason=\"Swap SV\")\n\n # Delete update if needed\n update_i = len(outs)\n for i, in_var in zip(ins, fg_cpy.inputs):\n i.variable = in_var\n if not delete_updates and i.update is not None:\n i.update = fg_cpy.outputs[update_i]\n update_i += 1\n else:\n i.update = None\n\n # Construct new storage_map that map new variable to old storage,\n # so that the ensuing function shares storage with the original one\n storage_map = self.fn.storage_map\n new_storage_map = {}\n # TODO: We could share the output storage, but we must make sure\n # 2 different function call won't override each other values. This\n # is already done elsewhere, so to reuse it the user would need to\n # use Out(var, borrow=True) and maybe the mutable=True flag too.\n # But to be safe for now as it isn't documented and we aren't sure\n # it is well tested, we don't share the part of the storage_map.\n if share_memory:\n i_o_vars = maker.fgraph.inputs + maker.fgraph.outputs\n for key in storage_map.keys():\n if key not in i_o_vars:\n new_storage_map[memo[key]] = storage_map[key]\n\n if not name and self.name:\n name = self.name + \" copy\"\n\n input_storage = [i.value for i in ins]\n # reinitialize new maker and create new function\n if profile is None:\n profile = config.profile\n # profile -> True or False\n if profile is True:\n if name:\n message = name\n else:\n message = str(maker.profile.message) + \" copy\"\n profile = theano.compile.profiling.ProfileStats(message=message)\n # profile -> object\n elif type(profile) == str:\n profile = theano.compile.profiling.ProfileStats(message=profile)\n\n f_cpy = maker.__class__(inputs=ins, outputs=outs, fgraph=fg_cpy,\n mode=maker.mode, profile=profile,\n on_unused_input=maker.on_unused_input,\n function_builder=maker.function_builder,\n # As this is an optimized graph, it\n # can contain inplace. DebugMode check\n # that.\n accept_inplace=True,\n ).create(input_storage,\n storage_map=new_storage_map)\n\n for in_ori, in_cpy, ori, cpy in zip(maker.inputs, f_cpy.maker.inputs,\n self.input_storage,\n f_cpy.input_storage):\n\n # Share immutable ShareVariable and constant input's storage\n swapped = swap is not None and in_ori.variable in swap\n\n # Using the original storage if SharedVariable will not be updated\n # and is not swapped\n if not in_ori.mutable and not swapped:\n cpy.data = ori.data\n in_cpy.value = in_ori.value\n\n # Reconstruct Function.finder which map Variable defined by user\n # to container, to make Function.value and Function.data work well.\n # Replace variable in new maker.inputs by the original ones.\n # So that user can swap SharedVariable in a swapped function\n container = f_cpy.finder.pop(in_cpy.variable)\n if not swapped:\n f_cpy.finder[in_ori.variable] = container\n in_cpy.vairable = in_ori.variable\n else:\n f_cpy.finder[swap[in_ori.variable]] = container\n in_cpy.variable = swap[in_ori.variable]\n\n f_cpy.name = name\n f_cpy.maker.fgraph.name = name\n return f_cpy\n\n def __call__(self, *args, **kwargs):\n profile = self.profile\n t0 = time.time()\n\n # Reinitialize each container's 'provided' counter\n if self.trust_input:\n i = 0\n for arg in args:\n s = self.input_storage[i]\n s.storage[0] = arg\n i += 1\n else:\n for c in self.input_storage:\n c.provided = 0\n\n if len(args) + len(kwargs) > len(self.input_storage):\n raise TypeError(\"Too many parameter passed to theano function\")\n\n # Set positional arguments\n i = 0\n for arg in args:\n # TODO: provide a Param option for skipping the filter if we\n # really want speed.\n s = self.input_storage[i]\n # see this emails for a discuation about None as input\n # https://groups.google.com/group/theano-dev/browse_thread/thread/920a5e904e8a8525/4f1b311a28fc27e5\n if arg is None:\n s.storage[0] = arg\n else:\n try:\n s.storage[0] = s.type.filter(\n arg, strict=s.strict,\n allow_downcast=s.allow_downcast)\n\n except Exception as e:\n function_name = \"theano function\"\n if self.name:\n function_name += ' with name \"' + self.name + '\" '\n e.args = (\"Bad input argument to \" + function_name +\n \" at index %d(0-based)\" % i,) + e.args\n raise\n s.provided += 1\n i += 1\n\n # Set keyword arguments\n if kwargs: # for speed, skip the iteritems for empty kwargs\n for k, arg in iteritems(kwargs):\n self[k] = arg\n\n if (not self.trust_input and\n getattr(self, '_check_for_aliased_inputs', True)):\n # Collect aliased inputs among the storage space\n args_share_memory = []\n for i in xrange(len(self.input_storage)):\n i_var = self.maker.inputs[i].variable\n i_val = self.input_storage[i].storage[0]\n if hasattr(i_var.type, 'may_share_memory'):\n is_aliased = False\n for j in xrange(len(args_share_memory)):\n\n group_j = izip(\n [self.maker.inputs[k].variable for k\n in args_share_memory[j]],\n [self.input_storage[k].storage[0] for k\n in args_share_memory[j]])\n if numpy.any([(var.type is i_var.type and\n var.type.may_share_memory(val, i_val))\n for (var, val) in group_j]):\n\n is_aliased = True\n args_share_memory[j].append(i)\n break\n\n if not is_aliased:\n args_share_memory.append([i])\n\n # Check for groups of more than one argument that share memory\n for group in args_share_memory:\n if len(group) > 1:\n # copy all but the first\n for idx in group[1:]:\n self.input_storage[i].storage[0] = copy.copy(\n self.input_storage[i].storage[0])\n\n # Check if inputs are missing, or if inputs were set more than once, or\n # if we tried to provide inputs that are supposed to be implicit.\n if not self.trust_input:\n for c in self.input_storage:\n if c.required and not c.provided:\n raise TypeError(\"Missing required input: %s\" %\n getattr(self.inv_finder[c], 'variable',\n self.inv_finder[c]))\n if c.provided > 1:\n raise TypeError(\"Multiple values for input: %s\" %\n getattr(self.inv_finder[c], 'variable',\n self.inv_finder[c]))\n if c.implicit and c.provided > 0:\n raise TypeError(\n 'Tried to provide value for implicit input: %s'\n % getattr(self.inv_finder[c], 'variable',\n self.inv_finder[c]))\n\n # Do the actual work\n t0_fn = time.time()\n try:\n outputs = self.fn()\n except Exception:\n if hasattr(self.fn, 'position_of_error'):\n # this is a new vm-provided function or c linker\n # they need this because the exception manipulation\n # done by raise_with_op is not implemented in C.\n thunk = None\n if hasattr(self.fn, 'thunks'):\n thunk = self.fn.thunks[self.fn.position_of_error]\n gof.link.raise_with_op(\n node=self.fn.nodes[self.fn.position_of_error],\n thunk=thunk,\n storage_map=getattr(self.fn, 'storage_map', None))\n else:\n # old-style linkers raise their own exceptions\n raise\n\n dt_fn = time.time() - t0_fn\n self.maker.mode.fn_time += dt_fn\n if profile:\n profile.vm_call_time += dt_fn\n\n # Retrieve the values that were computed\n if outputs is None:\n outputs = [x.data for x in self.output_storage]\n assert len(outputs) == len(self.output_storage)\n\n # Remove internal references to required inputs.\n # These cannot be re-used anyway.\n for c in self.input_storage:\n if c.required:\n c.storage[0] = None\n\n # if we are allowing garbage collection, remove the\n # output reference from the internal storage cells\n if getattr(self.fn, 'allow_gc', False):\n assert len(self.output_storage) == len(self.maker.fgraph.outputs)\n for o_container, o_variable in zip(self.output_storage,\n self.maker.fgraph.outputs):\n if o_variable.owner is not None:\n # this node is the variable of computation\n # WARNING: This circumvents the 'readonly' attribute in x\n o_container.storage[0] = None\n\n if getattr(self.fn, 'need_update_inputs', True):\n # Update the inputs that have an update function\n for input, storage in reversed(list(zip(self.maker.expanded_inputs,\n self.input_storage))):\n if input.update is not None:\n storage.data = outputs.pop()\n else:\n outputs = outputs[:self.n_returned_outputs]\n\n # Put default values back in the storage\n for i, (required, refeed, value) in enumerate(self.defaults):\n if refeed:\n if isinstance(value, gof.Container):\n value = value.storage[0]\n self[i] = value\n #\n # NOTE: This logic needs to be replicated in\n # scan.\n # grep for 'PROFILE_CODE'\n #\n\n dt_call = time.time() - t0\n self.maker.mode.call_time += dt_call\n if profile:\n profile.fct_callcount += 1\n profile.fct_call_time += dt_call\n if hasattr(self.fn, 'update_profile'):\n self.fn.update_profile(profile)\n\n if self.return_none:\n return None\n elif self.unpack_single and len(outputs) == 1:\n return outputs[0]\n else:\n\n if self.output_keys is not None:\n\n assert len(self.output_keys) == len(outputs)\n\n return dict(izip(self.output_keys, outputs))\n\n return outputs\n\n value = property(\n lambda self: self._value,\n None, # this property itself is not settable\n doc=\"dictionary-like access to the values associated with Variables\")\n container = property(\n lambda self: self._container,\n None, # this property itself is not settable\n doc=(\"dictionary-like access to the containers associated with \"\n \"Variables\"))\n\n def free(self):\n \"\"\"\n When allow_gc = False, clear the Variables in storage_map\n \"\"\"\n # 1.no allow_gc return False\n # 2.has allow_gc, if allow_gc is False, return True\n if not getattr(self.fn, 'allow_gc', True):\n for key in self.fn.storage_map:\n if not isinstance(key, theano.gof.Constant):\n self.fn.storage_map[key][0] = None\n\n for node in self.nodes_with_inner_function:\n ops_with_inner_function[node.op].free()\n\n\n# pickling/deepcopy support for Function\n\ndef _pickle_Function(f):\n # copy of the input storage list\n ins = list(f.input_storage)\n input_storage = []\n\n for (input, indices, inputs), (required, refeed, default) in \\\n zip(f.indices, f.defaults):\n if isinstance(input, SymbolicInputKit):\n li = len(indices)\n if not default:\n input_storage.append(ins[:li])\n else:\n input_storage.append(default)\n ins[:li] = []\n else:\n input_storage.append(ins[0])\n del ins[0]\n\n inputs_data = [x.data for x in f.input_storage]\n\n # HACK to detect aliased storage.\n # This is here because aliased relationships are not [currently]\n # preserved across the pickle operation\n if not (f.pickle_aliased_memory_strategy == 'ignore'):\n all_data = input_storage + inputs_data\n for i, d_i in enumerate(all_data):\n for j, d_j in enumerate(all_data):\n if ((i < j) and isinstance(d_i, numpy.ndarray) and\n isinstance(d_j, numpy.ndarray)):\n if numpy.may_share_memory(d_i, d_j):\n if f.pickle_aliased_memory_strategy == 'warn':\n _logger.warning('aliased relationship between '\n 'Function arguments %s, %s '\n 'will not be preserved by '\n 'un-pickling operation' %\n (str(d_i), str(d_j)))\n else:\n raise AliasedMemoryError(d_i, d_j)\n rval = (_constructor_Function, (f.maker, input_storage, inputs_data))\n return rval\n\n\ndef _constructor_Function(maker, input_storage, inputs_data):\n if not theano.config.unpickle_function:\n return None\n f = maker.create(input_storage, trustme=True)\n assert len(f.input_storage) == len(inputs_data)\n for container, x in zip(f.input_storage, inputs_data):\n assert (container.data is x) or \\\n (isinstance(x, numpy.ndarray) and (container.data == x).all()) or \\\n (container.data == x)\n return f\n\ncopyreg.pickle(Function, _pickle_Function)\n\n\n###\n# FunctionMaker\n###\n\ndef insert_deepcopy(fgraph, wrapped_inputs, wrapped_outputs):\n \"\"\"\n Insert deepcopy in the fgraph to break aliasing of outputs\n \"\"\"\n # This loop was inserted to remove aliasing between outputs when\n # they all evaluate to the same value. Originally it was OK for\n # outputs to be aliased, but some of the outputs can be shared\n # variables, and is not good for shared variables to be\n # aliased. It might be possible to optimize this by making sure\n # there is no aliasing only between shared variables.\n\n # If some outputs are constant, we add deep copy to respect the\n # memory contract\n\n # We don't insert deep copy when the output.borrow is True for all\n # conserned outputs.\n\n assert len(wrapped_inputs) == len(fgraph.inputs)\n assert len(wrapped_outputs) == len(fgraph.outputs)\n reason = \"insert_deepcopy\"\n updated_fgraph_inputs = [fgraph_i for i, fgraph_i in\n zip(wrapped_inputs, fgraph.inputs)\n if getattr(i, 'update', False)]\n\n # We can't use fgraph.inputs as this don't include Constant Value.\n all_graph_inputs = gof.graph.inputs(fgraph.outputs)\n\n for i in xrange(len(fgraph.outputs)):\n views_of_output_i = set()\n view_tree_set(alias_root(fgraph.outputs[i]), views_of_output_i)\n copied = False\n # do not allow outputs to be aliased\n for j in xrange(i + 1, len(fgraph.outputs)):\n # We could don't put deep copy if both outputs have borrow==True\n # and not(wrapped_outputs[i].borrow and wrapped_outputs[j].borrow):\n if fgraph.outputs[j] in views_of_output_i:\n if wrapped_outputs[i].borrow and wrapped_outputs[j].borrow:\n fgraph.change_input('output', i,\n view_op(fgraph.outputs[i]),\n reason=reason)\n else:\n fgraph.change_input('output', i,\n deep_copy_op(fgraph.outputs[i]),\n reason=reason)\n copied = True\n break\n\n if not copied:\n for input_j in all_graph_inputs:\n # do not allow outputs to be aliased to an inputs (j), unless\n # a) that j'th input has been 'destroyed' by\n # e.g. in-place computations\n # b) that j'th input is a shared variable that is also\n # being updated\n if (hasattr(fgraph, 'get_destroyers_of') and\n fgraph.get_destroyers_of(input_j)):\n continue\n if input_j in updated_fgraph_inputs:\n continue\n if input_j in views_of_output_i:\n # We don't put deep_copy_op if the input and the\n # output have borrow==True\n if input_j in fgraph.inputs:\n j = fgraph.inputs.index(input_j)\n if (wrapped_outputs[i].borrow and\n wrapped_inputs[j].borrow):\n fgraph.change_input('output', i,\n view_op(fgraph.outputs[i]),\n reason=\"insert_deepcopy\")\n break\n else:\n fgraph.change_input(\n 'output', i,\n deep_copy_op(fgraph.outputs[i]),\n reason=\"insert_deepcopy\")\n break\n elif wrapped_outputs[i].borrow:\n fgraph.change_input('output', i,\n view_op(fgraph.outputs[i]),\n reason=\"insert_deepcopy\")\n break\n else:\n fgraph.change_input('output', i,\n deep_copy_op(fgraph.outputs[i]),\n reason=\"insert_deepcopy\")\n break\n\nNODEFAULT = ['NODEFAULT']\n\n\nclass FunctionMaker(object):\n \"\"\"\n `FunctionMaker` is the class to `create` `Function` instances.\n\n This class has the fgraph, the optimizer, and the linker. When\n copying a `Function`, there is no need to duplicate the\n `FunctionMaker` instance. Deepcopy still copies both, which can\n variable in re-compilation.\n\n Parameters\n ----------\n inputs : list of SymbolicInput instances\n outputs : list of SymbolicOutput instances\n Outputs may also be a single Variable (not a list), in which case the\n functions produced by FunctionMaker will return their output value\n directly.\n mode : Mode instance\n Telling FunctionMaker how to optimize and link. None means to use the\n `config.mode`.\n accept_inplace : bool\n True iff it is acceptable to have inplace operations in the graph from\n the inputs to the outputs.\n on_unused_input : {'raise', 'warn', 'ignore', None}\n What to do if a variable in the 'inputs' list is not used in the graph.\n Possible values are:\n - 'raise': raise an error\n - 'warn': log a warning\n - 'ignore': do not do anything\n - None: Use the value in the Theano flags on_unused_input.\n\n \"\"\"\n\n @staticmethod\n def wrap_in(input):\n if isinstance(input, (SymbolicInput, SymbolicInputKit)):\n return input\n elif isinstance(input, gof.Variable):\n # r -> SymbolicInput(variable=r)\n return SymbolicInput(input)\n elif isinstance(input, (list, tuple)):\n # (r, u) -> SymbolicInput(variable=r, update=u)\n if len(input) == 2:\n return SymbolicInput(input[0], update=input[1])\n else:\n raise TypeError(\"Expected two elements in the list or tuple.\",\n input)\n else:\n raise TypeError(\"Unknown input type: %s (%s), expected Variable \"\n \"instance\", type(input), input)\n\n @staticmethod\n def expand_in(sinput, rinputs):\n # For SymbolicInputKits, this extracts a list of SymbolicInput\n # instances and corresponding indices such that these\n # SymbolicInputs are representative of some of the Variable\n # instances in inputs. For SymbolicInput, this returns None\n # as the list of indices and a list with just the\n # SymbolicInput.\n if isinstance(sinput, SymbolicInputKit):\n return sinput.complete(rinputs)\n elif isinstance(sinput, SymbolicInput):\n return [None, [sinput]]\n\n @staticmethod\n def wrap_out(output):\n if isinstance(output, SymbolicOutput):\n return output\n elif isinstance(output, gof.Variable):\n return SymbolicOutput(output)\n else:\n raise TypeError(\"Unknown output type: %s (%s)\", type(output),\n output)\n\n def optimize_graph_with_cache(self, optimizer, inputs, outputs):\n # This function is not finished\n from theano.gof.compilelock import get_lock, release_lock\n import os.path\n\n graph_db_file = os.path.join(theano.config.compiledir,\n 'optimized_graphs.pkl')\n\n # the inputs, outputs, and size of the graph to be optimized\n inputs_new = [inp.variable for inp in inputs]\n outputs_new = [out.variable for out in outputs]\n size_new = len(self.fgraph.apply_nodes)\n get_lock()\n # Beginning of cache optimizations.\n # Could be refactored in different functions.\n\n def load_graph_db():\n if os.path.isfile(graph_db_file):\n print('graph_db already exists')\n else:\n # create graph_db\n f = open(graph_db_file, 'wb')\n print('create new graph_db in %s' % graph_db_file)\n # file needs to be open and closed for every pickle\n f.close()\n # load the graph_db dictionary\n try:\n f = open(graph_db_file, 'rb')\n # Temporary hack to allow\n # theano.scan_module.tests.test_scan.T_Scan to\n # finish. Should be changed in definitive version.\n tmp = theano.config.unpickle_function\n theano.config.unpickle_function = False\n graph_db = pickle.load(f)\n\n # hack end\n f.close()\n print('graph_db loaded and it is not empty')\n except EOFError as e:\n # the file has nothing in it\n print(e)\n print('graph_db loaded and it is empty')\n graph_db = {}\n finally:\n theano.config.unpickle_function = tmp\n\n return graph_db\n\n def find_same_graph_in_db(graph_db):\n # If found_graph_in_db is None, then need to optimize.\n # Otherwise, return the graph found.\n found_graph_in_db = None\n # The sole purpose of this loop is to set 'need_optimize' by\n # going through graph_db, looking for graph that has the same\n # computation performed.\n for graph_old, graph_optimized in iteritems(graph_db):\n inputs_old = graph_old.inputs\n outputs_old = graph_old.outputs\n size_old = len(graph_old.apply_nodes)\n # Some heuristics to check is the same graphs have\n # already been optimized before.\n if len(inputs_new) != len(inputs_old):\n # If the inputs are of different size,\n # two graphs are for sure different\n print('need to optimize, because input size is different')\n continue\n elif len(outputs_new) != len(outputs_old):\n # If the inputs are of different size,\n # two graphs are for sure different\n print('need to optimize, because output size is different')\n continue\n elif not all(input_new.type == input_old.type\n for input_new, input_old in\n zip(inputs_new, inputs_old)):\n print('need to optimize, because inputs are of different '\n 'types')\n continue\n elif not all(output_new.type == output_old.type\n for output_new, output_old in\n zip(outputs_new, outputs_old)):\n print('need to optimize, because outputs are of different '\n 'types')\n continue\n elif not size_old == size_new:\n print('need to optimize, because numbers of nodes in graph'\n ' are different')\n continue\n else:\n flags = []\n for i, (output_new, output_old) in enumerate(\n zip(outputs_new, outputs_old)):\n print('loop through outputs node for both graphs')\n graph_old.variables = set(gof.graph.variables(\n graph_old.inputs, graph_old.outputs))\n\n # using clone allowed to avoid a lot of errors\n # deep copy seemed to had.\n f2 = graph_old.clone(check_integrity=False)\n t1 = output_new\n t2 = f2.outputs[i]\n\n # Used to remove \"already used by another graph error\n def removeAllFgraph(remove):\n if hasattr(remove, 'fgraph'):\n del remove.fgraph\n if hasattr(remove, 'owner'):\n if remove.owner is None:\n pass\n else:\n if hasattr(remove.owner, 'fgraph'):\n del remove.owner.fgraph\n if hasattr(remove.owner, 'inputs'):\n remove.owner.inputs = [removeAllFgraph(\n i) for i in remove.owner.inputs]\n for o in remove.owner.outputs:\n if hasattr(o, 'fgraph'):\n del o.fgraph\n return remove\n\n t2 = removeAllFgraph(t2)\n\n givens = dict(izip(gof.graph.inputs([t1]),\n gof.graph.inputs([t2])))\n\n temp = dict(izip(gof.graph.inputs([t1]),\n gof.graph.inputs([t2])))\n\n # hack to remove inconstent entry in givens\n # seems to work that but source of inconsistency\n # could be worth investigating.\n for key, value in iteritems(temp):\n if key.type != value.type:\n del givens[key]\n\n flag = is_same_graph(t1, t2, givens=givens)\n\n flags.append(flag)\n\n is_same = all(flags)\n if is_same:\n # found the match\n print('found a match, no need to optimize')\n found_graph_in_db = graph_optimized\n break\n return found_graph_in_db\n\n graph_db = load_graph_db()\n print('loaded graph_db from %s, size=%d' % (graph_db_file,\n len(graph_db)))\n found_graph = find_same_graph_in_db(graph_db)\n if found_graph:\n self.fgraph = found_graph\n optimizer_profile = None\n else:\n # this is a brand new graph, optimize it, save it to graph_db\n print('graph not found in graph_db, optimizing the graph')\n self.fgraph.variables = set(gof.graph.variables(\n self.fgraph.inputs, self.fgraph.outputs))\n # check_integrity parameters was added to ignore\n # \"excess cached variables\" errors. Works that way\n # but once again the error couldbe worth\n # investigating.\n before_opt = self.fgraph.clone(check_integrity=False)\n optimizer_profile = optimizer(self.fgraph)\n graph_db.update({before_opt: self.fgraph})\n f = open(graph_db_file, 'wb')\n pickle.dump(graph_db, f, -1)\n f.close()\n print('new graph saved into graph_db')\n release_lock()\n return optimizer_profile\n\n def __init__(self, inputs, outputs,\n mode=None, accept_inplace=False, function_builder=Function,\n profile=None, on_unused_input=None, fgraph=None,\n output_keys=None):\n mode = theano.compile.mode.get_mode(mode)\n\n # figure out which profile object to use (if any)\n # to help with forward-porting ProfileMode,\n # we allow ProfileMode to provide a ProfileStats object\n # using this somewhat awkward mechanism.\n mode_profile = getattr(mode, 'profile', None)\n if (profile is not None and\n profile is not False and\n mode_profile is not None):\n raise TypeError(\n 'profile passed via both \"mode\" and \"profile\" arguments')\n self.profile = profile = profile or mode_profile\n if profile:\n # This is very important:\n # 1) We preload the cache here to don't have its timming\n # included in optimization that compile function.\n # 2) Do not refresh the cache here by default. It cause\n # too much execution time during testing as we compile\n # much more functions then the number of compile c\n # module.\n theano.gof.cc.get_module_cache().refresh()\n # Handle the case where inputs and/or outputs is a single\n # Variable (not in a list)\n self.orig_outputs = outputs\n unpack_single = False\n return_none = False\n if outputs is None:\n return_none = True\n outputs = []\n if not isinstance(outputs, (list, tuple)):\n unpack_single = True\n outputs = [outputs]\n if not isinstance(inputs, (list, tuple)):\n inputs = [inputs]\n\n # Wrap them in In or Out instances if needed.\n inputs = [self.wrap_in(i) for i in inputs]\n outputs = [self.wrap_out(o) for o in outputs]\n _inputs = gof.graph.inputs([o.variable for o in outputs] +\n [i.update for i in inputs\n if getattr(i, 'update', False)])\n\n # Check if some input variables are unused\n self._check_unused_inputs(inputs, outputs, on_unused_input)\n\n # Make a list of (SymbolicInput|SymblicInputKits, indices,\n # [SymbolicInput,...]), one tuple for each input. (See\n # Function.indices for more details)\n indices = [[input] + self.expand_in(input, _inputs)\n for input in inputs]\n\n if fgraph is None:\n need_opt = True\n # make the fgraph (copies the graph, creates NEW INPUT AND\n # OUTPUT VARIABLES)\n fgraph, additional_outputs = std_fgraph(inputs, outputs,\n accept_inplace)\n fgraph.profile = profile\n else:\n # fgraph is already an optimized one\n need_opt = False\n updates = [spec.update for spec in inputs if spec.update]\n additional_outputs = list(map(SymbolicOutput, updates))\n\n self.fgraph = fgraph\n\n # Fetch the optimizer and linker\n optimizer, linker = mode.optimizer, copy.copy(mode.linker)\n if need_opt:\n compute_test_value_orig = theano.config.compute_test_value\n limit_orig = theano.config.traceback.limit\n # Why we add stack on node when it get done in output var?\n try:\n # optimize the fgraph\n theano.config.compute_test_value = \\\n theano.config.compute_test_value_opt\n theano.config.traceback.limit = 0\n start_optimizer = time.time()\n\n # now optimize the graph\n if theano.config.cache_optimizations:\n optimizer_profile = self.optimize_graph_with_cache(\n optimizer, inputs, outputs)\n else:\n optimizer_profile = optimizer(fgraph)\n\n end_optimizer = time.time()\n opt_time = end_optimizer - start_optimizer\n if profile:\n profile.optimizer_time += opt_time\n if theano.config.profile_optimizer:\n profile.optimizer_profile = (optimizer,\n optimizer_profile)\n elif theano.config.profile_optimizer:\n warnings.warn((\n \"config.profile_optimizer requires config.profile to \"\n \" be set to True as well\"), stacklevel=3)\n _logger.debug('Optimizing took %f seconds', opt_time)\n\n # Add deep copy to respect the memory interface\n insert_deepcopy(fgraph, inputs, outputs + additional_outputs)\n finally:\n theano.config.compute_test_value = compute_test_value_orig\n theano.config.traceback.limit = limit_orig\n\n # initialize the linker\n if not hasattr(linker, 'accept'):\n raise ValueError(\"'linker' parameter of FunctionMaker should be \"\n \"a Linker with an accept method or one of %s\" %\n list(theano.compile.mode\n .predefined_linkers.keys()))\n\n # the 'no_borrow' outputs are the ones for which that we can't\n # return the internal storage pointer.\n assert len(fgraph.outputs) == len(outputs + additional_outputs)\n no_borrow = [output for output, spec in\n zip(fgraph.outputs, outputs + additional_outputs)\n if not spec.borrow]\n if no_borrow:\n self.linker = linker.accept(\n fgraph, no_recycling=infer_reuse_pattern(fgraph, no_borrow))\n else:\n self.linker = linker.accept(fgraph)\n\n if hasattr(linker, 'accept_var_updates'):\n # hacky thing so VMLinker knows about updates\n self.linker.accept_var_updates(\n fgraph_updated_vars(fgraph, inputs))\n\n self.indices = indices\n self.inputs = inputs\n self.expanded_inputs = inputs\n self.outputs = outputs\n self.unpack_single = unpack_single\n self.return_none = return_none\n self.mode = mode\n self.accept_inplace = accept_inplace\n self.function_builder = function_builder\n self.on_unused_input = on_unused_input # Used for the pickling/copy\n self.output_keys = output_keys\n\n self.required = [(i.value is None) for i in self.inputs]\n self.refeed = [\n (i.value is not None and\n not isinstance(i.value, gof.Container) and\n i.update is None)\n for i in self.inputs]\n\n def _check_unused_inputs(self, inputs, outputs, on_unused_input):\n if on_unused_input is None:\n on_unused_input = theano.config.on_unused_input\n\n if on_unused_input == 'ignore':\n return\n\n # There should be two categories of variables in inputs:\n # - variables that have to be provided (used_inputs)\n # - shared variables that will be updated\n used_inputs = gof.graph.ancestors(\n ([o.variable for o in outputs] +\n [i.update for i in inputs if getattr(i, 'update', False)]),\n blockers=[i.variable for i in inputs])\n\n msg = (\"theano.function was asked to create a function computing \"\n \"outputs given certain inputs, but the provided input \"\n \"variable at index %i is not part of the computational graph \"\n \"needed to compute the outputs: %s.\\n%s\")\n warn_msg = (\"To make this warning into an error, you can pass the \"\n \"parameter on_unused_input='raise' to theano.function. \"\n \"To disable it completely, use on_unused_input='ignore'.\")\n err_msg = (\"To make this error into a warning, you can pass the \"\n \"parameter on_unused_input='warn' to theano.function. \"\n \"To disable it completely, use on_unused_input='ignore'.\")\n\n for i in inputs:\n if ((i.variable not in used_inputs) and (i.update is None)):\n if on_unused_input == 'warn':\n warnings.warn(msg % (inputs.index(i), i.variable,\n warn_msg), stacklevel=6)\n elif on_unused_input == 'raise':\n raise UnusedInputError(msg % (inputs.index(i),\n i.variable, err_msg))\n else:\n raise ValueError(\"Invalid value for keyword \"\n \"on_unused_input of theano.function: \"\n \"'%s'.\\nValid values are 'raise', \"\n \"'warn', and 'ignore'.\" % on_unused_input)\n\n def create(self, input_storage=None, trustme=False, storage_map=None):\n \"\"\"\n Create a function.\n\n Parameters\n ----------\n input_storage\n A list matching the inputs list and providing default values if the\n default for an input is None, then that input is a required input.\n For an input with an update, the default acts as initialization.\n trustme\n Disables some exceptions, used internally.\n\n \"\"\"\n\n if input_storage is None:\n input_storage = [None] * len(self.inputs)\n # list of independent one-element lists, will be passed to the linker\n input_storage_lists = []\n defaults = []\n\n # The following loop is to fill in the input_storage_lists and\n # defaults lists.\n assert len(self.indices) == len(input_storage)\n for i, ((input, indices, subinputs), input_storage_i) in \\\n enumerate(zip(self.indices, input_storage)):\n\n # Replace any default value given as a variable by its\n # container. Note that this makes sense only in the\n # context of shared variables, but for now we avoid\n # dealing directly with them to avoid dependency on the\n # shared variables work-in-progress repository.\n if isinstance(input_storage_i, gof.Variable):\n input_storage_i = input_storage_i.container\n\n if isinstance(input_storage_i, gof.Container):\n # If the default is a gof.Container, this means we want to\n # share the same storage. This is done by appending\n # input_storage_i.storage to input_storage_lists.\n if indices is not None:\n raise TypeError(\"Cannot take a Container instance as \"\n \"default for a SymbolicInputKit.\")\n input_storage_lists.append(input_storage_i.storage)\n\n storage = input_storage[i].storage[0]\n\n else:\n # Normal case: one new, independent storage unit\n input_storage_lists.append([input_storage_i])\n\n storage = input_storage_i\n\n required = self.required[i]\n refeed = self.refeed[i]\n # sanity check-- if an input is required it should not\n # need to be refed\n assert not (required and refeed)\n\n # shared variables need neither be input by the user nor refed\n if input.shared:\n assert not required\n assert not refeed\n storage = None\n\n # if an input is required, it never need be refed\n if required:\n storage = None\n\n # make sure that we only store a value if we actually need it\n if storage is not None:\n assert refeed or not required\n\n defaults.append((required, refeed, storage))\n\n # Get a function instance\n start_linker = time.time()\n start_import_time = theano.gof.cmodule.import_time\n limit_orig = theano.config.traceback.limit\n try:\n theano.config.traceback.limit = 0\n _fn, _i, _o = self.linker.make_thunk(\n input_storage=input_storage_lists, storage_map=storage_map)\n finally:\n theano.config.traceback.limit = limit_orig\n\n end_linker = time.time()\n\n linker_time = end_linker - start_linker\n _logger.debug('Linker took %f seconds', linker_time)\n if self.profile:\n self.profile.linker_time += linker_time\n _fn.time_thunks = self.profile.flag_time_thunks\n import_time = theano.gof.cmodule.import_time - start_import_time\n self.profile.import_time += import_time\n\n fn = self.function_builder(_fn, _i, _o, self.indices, self.outputs,\n defaults, self.unpack_single,\n self.return_none, self.output_keys, self)\n fn.profile = self.profile\n return fn\n\n\ndef _pickle_FunctionMaker(self):\n kwargs = dict(\n inputs=self.inputs,\n outputs=self.orig_outputs,\n fgraph=self.fgraph,\n mode=self.mode,\n accept_inplace=self.accept_inplace,\n function_builder=self.function_builder,\n profile=self.profile,\n on_unused_input=self.on_unused_input)\n return (_constructor_FunctionMaker, (kwargs,))\n\n\ndef _constructor_FunctionMaker(kwargs):\n if theano.config.unpickle_function:\n if theano.config.reoptimize_unpickled_function:\n del kwargs['fgraph']\n return FunctionMaker(**kwargs)\n else:\n return None\n\ncopyreg.pickle(FunctionMaker, _pickle_FunctionMaker)\n\n__checkers = []\n\n\ndef check_equal(x, y):\n for checker in __checkers:\n try:\n return checker(x, y)\n except Exception:\n continue\n return x == y\n\n\ndef register_checker(checker):\n __checkers.insert(0, checker)\n\n\ndef orig_function(inputs, outputs, mode=None, accept_inplace=False,\n name=None, profile=None, on_unused_input=None,\n output_keys=None):\n \"\"\"\n Return a Function that will calculate the outputs from the inputs.\n\n Parameters\n ----------\n inputs : list of `SymbolicInput` or `In` instances\n outputs : a SymbolicOutput or a list of `SymbolicOutput` or `Out` instances\n The return value of the returned function will match the format of this\n argument (either the value itself or a list of one or more return\n values).\n mode : descriptive string or Mode instance\n Default of None means to use `config.mode` (see below for descriptive\n string list).\n name : str\n An optional name for this fct. If used, the profile mode will print the\n time spent in this fct.\n accept_inplace : bool\n True iff the graph can contain inplace operations prior to the\n optimization phase (default is False).\n profile : None or ProfileStats instance\n on_unused_input : {'raise', 'warn', 'ignore', None}\n What to do if a variable in the 'inputs' list is not used in the graph.\n output_keys :\n If the outputs were provided to theano.function as a list, then\n output_keys is None. Otherwise, if outputs were provided as a dict,\n output_keys is the sorted list of keys from the outputs.\n\n Notes\n -----\n Currently, the library provides the following mode strings:\n\n - FAST_RUN (default) (optimize without too much time)\n\n - FAST_COMPILE (minimal optimization)\n\n - ProfileMode(deprecated): allow to print a profile mode with\n mode.print_summary\n\n - DebugMode: verify many internal conditions that are normally assumed\n (slow)\n\n \"\"\"\n\n # Every element of the input list will be upgraded to an `In` instance if\n # necessary, using the rules implemented by the `convert_function_input`\n # function.\n\n # Similarly, every element of the output list will be upgraded to an `Out`\n # instance if necessary:\n\n t1 = time.time()\n mode = theano.compile.mode.get_mode(mode)\n\n inputs = list(map(convert_function_input, inputs))\n if outputs is not None:\n if isinstance(outputs, (list, tuple)):\n outputs = list(map(FunctionMaker.wrap_out, outputs))\n else:\n outputs = FunctionMaker.wrap_out(outputs)\n\n defaults = [getattr(input, 'value', None) for input in inputs]\n\n if isinstance(mode, (list, tuple)): # \"mode comparison\" semantics\n raise Exception(\"We do not support the passing of multiple modes\")\n else:\n Maker = getattr(mode, 'function_maker', FunctionMaker)\n fn = Maker(inputs,\n outputs,\n mode,\n accept_inplace=accept_inplace,\n profile=profile,\n on_unused_input=on_unused_input,\n output_keys=output_keys).create(\n defaults)\n\n t2 = time.time()\n if profile:\n profile.compile_time += t2 - t1\n profile.nb_nodes = len(fn.maker.fgraph.apply_nodes)\n\n fn.name = name\n fn.maker.fgraph.name = name\n return fn\n\n\ndef convert_function_input(input):\n \"\"\"\n Upgrade a input shortcut to an In instance.\n\n The rules for upgrading are as follows:\n\n - a `Variable` instance r will be upgraded like `In`(r)\n\n - a tuple (name, r) will be `In`(r, name=name)\n\n - a tuple (r, val) will be `In`(r, value=value, autoname=True)\n\n - a tuple ((r,up), val) will be\n `In`(r, value=value, update=up, autoname=True)\n\n - a tuple (name, r, val) will be `In`(r, name=name, value=value)\n\n - a tuple (name, (r,up), val) will be\n `In`(r, name=name, value=val, update=up, autoname=True)\n\n \"\"\"\n if isinstance(input, (SymbolicInput, SymbolicInputKit)):\n return input\n elif isinstance(input, gof.Constant):\n raise TypeError('A Constant instance is not a legal function input',\n input)\n elif isinstance(input, gof.Variable):\n return In(input)\n elif isinstance(input, (list, tuple)):\n orig = input\n if not input:\n raise TypeError(\"Nonsensical input specification: %s\" % input)\n if isinstance(input[0], string_types):\n name = input[0]\n input = input[1:]\n else:\n name = None\n if isinstance(input[0], (list, tuple)):\n if len(input[0]) != 2 or len(input) != 2:\n raise TypeError(\"Invalid input syntax: %s (check \"\n \"documentation or use an In instance)\" % orig)\n (variable, update), value = input\n elif isinstance(input[0], gof.Variable):\n if len(input) == 1:\n variable, update, value = input[0], None, None\n elif len(input) == 2:\n (variable, value), update = input, None\n else:\n raise TypeError(\"Invalid input syntax: %s (check \"\n \"documentation or use an In instance)\" % orig)\n elif isinstance(input[0], (SymbolicInput, SymbolicInputKit)):\n if len(input) == 1:\n return input[0]\n elif len(input) == 2:\n input, value = input\n if name is not None:\n input.name = name\n input.value = value\n return input\n else:\n raise TypeError(\"The input specification is not valid: %s\" % input)\n\n if not isinstance(variable, gof.Variable):\n raise TypeError(\"Unknown input type: %s, expected Variable \"\n \"instance\" % type(variable), variable)\n if update is not None and not isinstance(update, gof.Variable):\n raise TypeError(\"Unknown update type: %s, expected Variable \"\n \"instance\" % type(update), update)\n if (value is not None and\n isinstance(value, (gof.Variable, SymbolicInput))):\n raise TypeError(\"The value for input %s should not be a Variable \"\n \"or SymbolicInput instance (got: %s)\" %\n (variable, value))\n\n return In(variable, name=name, value=value, update=update)\n else:\n raise TypeError(\"Unknown input type: %s, expected Variable instance\" %\n type(input), input)\n\n\ndef get_info_on_inputs(named_inputs, n_unnamed_inputs):\n \"\"\"\n Return a human-readable description of named and un-named inputs.\n\n \"\"\"\n n_named_inputs = len(named_inputs)\n\n def get_plural(n):\n if n > 1:\n return 's'\n else:\n return ''\n\n if n_named_inputs == 0:\n if n_unnamed_inputs == 0:\n msg = 'The function is supposed to have no input.'\n else:\n if n_unnamed_inputs == 1:\n msg = (\"The function has a single input variable which has no \"\n \"name, and thus cannot be assigned through a keyword\"\n \" argument (use 'name=...' in a Variable's \"\n \"constructor to give it a name).\")\n else:\n # Use plural.\n msg = (\"The function has %s inputs, but none of them is named,\"\n \" and thus they cannot be assigned through keyword \"\n \"arguments (use 'name=...' in a Variable's \"\n \"constructor to give it a name).\" % n_unnamed_inputs)\n else:\n if n_unnamed_inputs == 0:\n msg = (\"The function has %s named input%s (%s).\" %\n (n_named_inputs, get_plural(n_named_inputs),\n ', '.join(named_inputs)))\n else:\n msg = (\"The function has %s named input%s (%s), and %s unnamed \"\n \"input%s which thus cannot be accessed through keyword \"\n \"argument%s (use 'name=...' in a variable's constructor \"\n \"to give it a name).\" %\n (n_named_inputs, get_plural(n_named_inputs),\n ', '.join(named_inputs), n_unnamed_inputs,\n get_plural(n_unnamed_inputs),\n get_plural(n_unnamed_inputs)))\n return msg\n", "from itertools import product\nimport unittest\nimport six.moves.builtins as builtins\n\nimport numpy\n\nimport theano\nimport theano.tensor as tensor\nfrom theano.tests import unittest_tools as utt\nfrom theano.tensor.signal.downsample import (DownsampleFactorMax, max_pool_2d,\n MaxPoolGrad, AveragePoolGrad,\n DownsampleFactorMaxGrad,\n DownsampleFactorMaxGradGrad,\n max_pool_2d_same_size)\nfrom theano import function\n\n\nclass TestDownsampleFactorMax(utt.InferShapeTester):\n\n @staticmethod\n def numpy_max_pool_2d(input, ds, ignore_border=False, mode='max'):\n '''Helper function, implementing max_pool_2d in pure numpy'''\n if len(input.shape) < 2:\n raise NotImplementedError('input should have at least 2 dim,'\n ' shape is %s'\n % str(input.shape))\n xi = 0\n yi = 0\n if not ignore_border:\n if input.shape[-2] % ds[0]:\n xi += 1\n if input.shape[-1] % ds[1]:\n yi += 1\n out_shp = list(input.shape[:-2])\n out_shp.append(input.shape[-2] / ds[0] + xi)\n out_shp.append(input.shape[-1] / ds[1] + yi)\n output_val = numpy.zeros(out_shp)\n func = numpy.max\n if mode == 'sum':\n func = numpy.sum\n elif mode != 'max':\n func = numpy.average\n\n for k in numpy.ndindex(*input.shape[:-2]):\n for i in range(output_val.shape[-2]):\n ii = i * ds[0]\n for j in range(output_val.shape[-1]):\n jj = j * ds[1]\n patch = input[k][ii:ii + ds[0], jj:jj + ds[1]]\n output_val[k][i, j] = func(patch)\n return output_val\n\n @staticmethod\n def numpy_max_pool_2d_stride_padding(\n x, ds, ignore_border=True, st=None, padding=(0, 0), mode='max'):\n pad_h = padding[0]\n pad_w = padding[1]\n h = x.shape[-2]\n w = x.shape[-1]\n assert ds[0] > pad_h\n assert ds[1] > pad_w\n\n def pad_img(x):\n y = numpy.zeros(\n (x.shape[0], x.shape[1],\n x.shape[2]+pad_h*2, x.shape[3]+pad_w*2),\n dtype=x.dtype)\n y[:, :, pad_h:(x.shape[2]+pad_h), pad_w:(x.shape[3]+pad_w)] = x\n\n return y\n img_rows = h + 2 * pad_h\n img_cols = w + 2 * pad_w\n out_r = (img_rows - ds[0]) // st[0] + 1\n out_c = (img_cols - ds[1]) // st[1] + 1\n out_shp = list(x.shape[:-2])\n out_shp.append(out_r)\n out_shp.append(out_c)\n ds0, ds1 = ds\n st0, st1 = st\n output_val = numpy.zeros(out_shp)\n tt = []\n y = pad_img(x)\n func = numpy.max\n if mode == 'sum':\n func = numpy.sum\n elif mode != 'max':\n func = numpy.average\n inc_pad = mode == 'average_inc_pad'\n\n for k in numpy.ndindex(*x.shape[:-2]):\n for i in range(output_val.shape[-2]):\n ii_st = i * st[0]\n ii_end = builtins.min(ii_st + ds[0], img_rows)\n if not inc_pad:\n ii_st = builtins.max(ii_st, pad_h)\n ii_end = builtins.min(ii_end, h + pad_h)\n for j in range(output_val.shape[-1]):\n jj_st = j * st[1]\n jj_end = builtins.min(jj_st + ds[1], img_cols)\n if not inc_pad:\n jj_st = builtins.max(jj_st, pad_w)\n jj_end = builtins.min(jj_end, w + pad_w)\n patch = y[k][ii_st:ii_end, jj_st:jj_end]\n output_val[k][i, j] = func(patch)\n return output_val\n\n @staticmethod\n def numpy_max_pool_2d_stride(input, ds, ignore_border=False, st=None,\n mode='max'):\n '''Helper function, implementing max_pool_2d in pure numpy\n this function provides st input to indicate the stide size\n for the pooling regions. if not indicated, st == sd.'''\n if len(input.shape) < 2:\n raise NotImplementedError('input should have at least 2 dim,'\n ' shape is %s'\n % str(input.shape))\n\n if st is None:\n st = ds\n xi = 0\n yi = 0\n img_rows = input.shape[-2]\n img_cols = input.shape[-1]\n\n out_r = 0\n out_c = 0\n if img_rows - ds[0] >= 0:\n out_r = (img_rows - ds[0]) // st[0] + 1\n if img_cols - ds[1] >= 0:\n out_c = (img_cols - ds[1]) // st[1] + 1\n\n if not ignore_border:\n if out_r > 0:\n if img_rows - ((out_r - 1) * st[0] + ds[0]) > 0:\n rr = img_rows - out_r * st[0]\n if rr > 0:\n out_r += 1\n else:\n if img_rows > 0:\n out_r += 1\n if out_c > 0:\n if img_cols - ((out_c - 1) * st[1] + ds[1]) > 0:\n cr = img_cols - out_c * st[1]\n if cr > 0:\n out_c += 1\n else:\n if img_cols > 0:\n out_c += 1\n\n out_shp = list(input.shape[:-2])\n out_shp.append(out_r)\n out_shp.append(out_c)\n\n func = numpy.max\n if mode == 'sum':\n func = numpy.sum\n elif mode != 'max':\n func = numpy.average\n\n output_val = numpy.zeros(out_shp)\n for k in numpy.ndindex(*input.shape[:-2]):\n for i in range(output_val.shape[-2]):\n ii_st = i * st[0]\n ii_end = builtins.min(ii_st + ds[0], img_rows)\n for j in range(output_val.shape[-1]):\n jj_st = j * st[1]\n jj_end = builtins.min(jj_st + ds[1], img_cols)\n patch = input[k][ii_st:ii_end, jj_st:jj_end]\n output_val[k][i, j] = func(patch)\n return output_val\n\n def test_DownsampleFactorMax(self):\n rng = numpy.random.RandomState(utt.fetch_seed())\n # generate random images\n maxpoolshps = ((1, 1), (2, 2), (3, 3), (2, 3))\n imval = rng.rand(4, 2, 16, 16)\n images = tensor.dtensor4()\n for maxpoolshp, ignore_border, mode in product(maxpoolshps,\n [True, False],\n ['max',\n 'sum',\n 'average_inc_pad',\n 'average_exc_pad']):\n # print 'maxpoolshp =', maxpoolshp\n # print 'ignore_border =', ignore_border\n\n # Pure Numpy computation\n numpy_output_val = self.numpy_max_pool_2d(imval, maxpoolshp,\n ignore_border,\n mode=mode)\n output = max_pool_2d(images, maxpoolshp, ignore_border,\n mode=mode)\n f = function([images, ], [output, ])\n output_val = f(imval)\n utt.assert_allclose(output_val, numpy_output_val)\n\n # DownsampleFactorMax op\n maxpool_op = DownsampleFactorMax(maxpoolshp,\n ignore_border=ignore_border,\n mode=mode)(images)\n f = function([images], maxpool_op)\n output_val = f(imval)\n utt.assert_allclose(output_val, numpy_output_val)\n\n def test_DownsampleFactorMaxStride(self):\n rng = numpy.random.RandomState(utt.fetch_seed())\n maxpoolshps = ((1, 1), (3, 3), (5, 3))\n stridesizes = ((1, 1), (3, 3), (5, 7))\n # generate random images\n imval = rng.rand(4, 10, 16, 16)\n # The same for each mode\n outputshps = ((4, 10, 16, 16), (4, 10, 6, 6), (4, 10, 4, 3),\n (4, 10, 16, 16), (4, 10, 6, 6), (4, 10, 4, 3),\n (4, 10, 14, 14), (4, 10, 5, 5), (4, 10, 3, 2),\n (4, 10, 14, 14), (4, 10, 6, 6), (4, 10, 4, 3),\n (4, 10, 12, 14), (4, 10, 4, 5), (4, 10, 3, 2),\n (4, 10, 12, 14), (4, 10, 5, 6), (4, 10, 4, 3))\n images = tensor.dtensor4()\n indx = 0\n for mode, maxpoolshp, ignore_border in product(['max',\n 'sum',\n 'average_inc_pad',\n 'average_exc_pad'],\n maxpoolshps,\n [True, False]):\n for stride in stridesizes:\n outputshp = outputshps[indx % len(outputshps)]\n indx += 1\n # DownsampleFactorMax op\n numpy_output_val = \\\n self.numpy_max_pool_2d_stride(imval, maxpoolshp,\n ignore_border, stride,\n mode)\n assert numpy_output_val.shape == outputshp, (\n \"outshape is %s, calculated shape is %s\"\n % (outputshp, numpy_output_val.shape))\n maxpool_op = \\\n DownsampleFactorMax(maxpoolshp,\n ignore_border=ignore_border,\n st=stride, mode=mode)(images)\n f = function([images], maxpool_op)\n output_val = f(imval)\n utt.assert_allclose(output_val, numpy_output_val)\n\n def test_DownsampleFactorMaxStrideExtra(self):\n rng = numpy.random.RandomState(utt.fetch_seed())\n maxpoolshps = ((5, 3), (5, 3), (5, 3), (5, 5), (3, 2), (7, 7), (9, 9))\n stridesizes = ((3, 2), (7, 5), (10, 6), (1, 1),\n (2, 3), (10, 10), (1, 1))\n imvsizs = ((16, 16), (16, 16), (16, 16), (8, 5),\n (8, 5), (8, 5), (8, 5))\n outputshps = ((4, 10, 4, 7), (4, 10, 5, 8), (4, 10, 2, 3),\n (4, 10, 3, 4), (4, 10, 2, 3), (4, 10, 2, 3),\n (4, 10, 4, 1), (4, 10, 4, 1), (4, 10, 3, 2),\n (4, 10, 4, 2), (4, 10, 1, 0), (4, 10, 1, 1),\n (4, 10, 0, 0), (4, 10, 1, 1))\n images = tensor.dtensor4()\n for indx in numpy.arange(len(maxpoolshps)):\n imvsize = imvsizs[indx]\n imval = rng.rand(4, 10, imvsize[0], imvsize[1])\n stride = stridesizes[indx]\n maxpoolshp = maxpoolshps[indx]\n for ignore_border, mode in product([True, False],\n ['max', 'sum',\n 'average_inc_pad',\n 'average_exc_pad']):\n indx_out = indx * 2\n if not ignore_border:\n indx_out += 1\n outputshp = outputshps[indx_out]\n # DownsampleFactorMax op\n numpy_output_val = \\\n self.numpy_max_pool_2d_stride(imval, maxpoolshp,\n ignore_border, stride, mode)\n assert numpy_output_val.shape == outputshp, (\n \"outshape is %s, calculated shape is %s\"\n % (outputshp, numpy_output_val.shape))\n maxpool_op = \\\n DownsampleFactorMax(maxpoolshp,\n ignore_border=ignore_border,\n st=stride, mode=mode)(images)\n f = function([images], maxpool_op)\n output_val = f(imval)\n utt.assert_allclose(output_val, numpy_output_val)\n\n def test_DownsampleFactorMaxPaddingStride(self):\n ignore_border = True # padding does not support ignore_border=False\n rng = numpy.random.RandomState(utt.fetch_seed())\n maxpoolsizes = [(3, 3), (4, 4), (3, 4), (4, 3), (2, 2)]\n stridesizes = [(2, 2), (2, 2), (1, 1), (1, 2), (2, 2)]\n paddingsizes = [(2, 2), (1, 2), (2, 1), (0, 0), (1, 1)]\n imgsizes = [(5, 5), (5, 5), (5, 6), (6, 5), (5, 5)]\n m = 4 # minibatch\n c = 2 # channel size\n images = tensor.dtensor4()\n for indx, mode in product(numpy.arange(len(maxpoolsizes)),\n ['max', 'sum', 'average_inc_pad',\n 'average_exc_pad']):\n imgsize = imgsizes[indx]\n imval = rng.rand(m, c, imgsize[0], imgsize[1]) - 0.5\n\n stridesize = stridesizes[indx]\n maxpoolsize = maxpoolsizes[indx]\n paddingsize = paddingsizes[indx]\n numpy_output_val = self.numpy_max_pool_2d_stride_padding(\n imval, maxpoolsize, ignore_border,\n stridesize, paddingsize, mode)\n maxpool_op = DownsampleFactorMax(\n maxpoolsize,\n ignore_border=ignore_border,\n st=stridesize, padding=paddingsize, mode=mode)(images)\n f = function([images], maxpool_op)\n output_val = f(imval)\n utt.assert_allclose(output_val, numpy_output_val)\n\n def test_DownsampleFactorMaxPaddingStride_grad(self):\n rng = numpy.random.RandomState(utt.fetch_seed())\n imgsizes = ((10, 10), (10, 5), (5, 5))\n maxpoolsizes = ((5, 3), (3, 5), (3, 3))\n stridesizes = ((3, 2), (2, 3), (3, 3))\n paddingsizes = ((2, 2), (2, 1), (2, 2))\n # average_inc_pad and average_exc_pad do not\n # support grad with padding\n for mode in ['max', 'sum']:\n for i in range(len(imgsizes)):\n imgsize = imgsizes[i]\n imval = rng.rand(1, 1, imgsize[0], imgsize[1]) * 10.0\n maxpoolsize = maxpoolsizes[i]\n stridesize = stridesizes[i]\n paddingsize = paddingsizes[i]\n\n def mp(input):\n return DownsampleFactorMax(\n maxpoolsize, ignore_border=True,\n st=stridesize,\n padding=paddingsize,\n mode=mode,\n )(input)\n utt.verify_grad(mp, [imval], rng=rng)\n\n def test_DownsampleFactorMax_grad(self):\n rng = numpy.random.RandomState(utt.fetch_seed())\n maxpoolshps = ((1, 1), (3, 2), (2, 3))\n imval = rng.rand(2, 3, 3, 4) * 10.0\n # more variance means numeric gradient will be more accurate\n\n for maxpoolshp, ignore_border, mode in product(maxpoolshps,\n [True, False],\n ['max',\n 'sum',\n 'average_inc_pad',\n 'average_exc_pad']):\n def mp(input):\n return DownsampleFactorMax(maxpoolshp,\n ignore_border=ignore_border,\n mode=mode)(input)\n utt.verify_grad(mp, [imval], rng=rng)\n\n def test_DownsampleFactorMax_grad_st(self):\n \"\"\"checks the gradient for the case that stride is used\"\"\"\n rng = numpy.random.RandomState(utt.fetch_seed())\n maxpoolshps = ((1, 1), (3, 3), (5, 3))\n stridesizes = ((1, 1), (3, 3), (5, 7))\n imval = rng.rand(1, 2, 16, 16)\n\n for maxpoolshp, ignore_border, mode, stride in product(maxpoolshps,\n [True, False],\n ['max',\n 'sum',\n 'average_inc_pad',\n 'average_exc_pad'],\n stridesizes):\n def mp(input):\n return DownsampleFactorMax(maxpoolshp,\n ignore_border=ignore_border,\n st=stride, mode=mode)(input)\n utt.verify_grad(mp, [imval], rng=rng)\n\n def test_DownsampleFactorMax_grad_st_extra(self):\n \"\"\"checks the gradient for the case\n that stride is used for extra examples\"\"\"\n rng = numpy.random.RandomState(utt.fetch_seed())\n maxpoolshps = ((5, 3), (5, 3), (5, 3), (5, 5), (3, 2), (7, 7), (9, 9))\n stridesizes = ((3, 2), (7, 5), (10, 6), (1, 1),\n (2, 3), (10, 10), (1, 1))\n imvsizs = ((16, 16), (16, 16), (16, 16), (8, 5),\n (8, 5), (8, 5), (8, 5))\n\n for mode in ['max', 'sum', 'average_inc_pad', 'average_exc_pad']:\n for indx in numpy.arange(len(maxpoolshps)):\n imvsize = imvsizs[indx]\n imval = rng.rand(1, 2, imvsize[0], imvsize[1])\n stride = stridesizes[indx]\n maxpoolshp = maxpoolshps[indx]\n for ignore_border in [True, False]:\n def mp(input):\n return DownsampleFactorMax(maxpoolshp,\n ignore_border=ignore_border,\n st=stride,\n mode=mode)(input)\n utt.verify_grad(mp, [imval], rng=rng)\n\n def test_DownsampleFactorMaxGrad_grad(self):\n rng = numpy.random.RandomState(utt.fetch_seed())\n maxpoolshps = ((1, 1), (3, 2), (2, 3))\n imval = rng.rand(2, 3, 3, 4) * 10.0\n # more variance means numeric gradient will be more accurate\n\n for maxpoolshp in maxpoolshps:\n for ignore_border in [True, False]:\n # print 'maxpoolshp =', maxpoolshp\n # print 'ignore_border =', ignore_border\n # The shape of the gradient will be the shape of the output\n grad_shape = DownsampleFactorMax.out_shape(\n imval.shape, maxpoolshp, ignore_border=ignore_border)\n grad_val = rng.rand(*grad_shape) * 10.0\n\n def mp(input, grad):\n out = DownsampleFactorMax(\n maxpoolshp, ignore_border=ignore_border)(input)\n grad_op = MaxPoolGrad(\n maxpoolshp, ignore_border=ignore_border)\n return grad_op(input, out, grad)\n\n utt.verify_grad(mp, [imval, grad_val], rng=rng)\n\n def test_AveragePoolGrad_grad(self):\n rng = numpy.random.RandomState(utt.fetch_seed())\n avgpoolshps = ((1, 1), (3, 2), (2, 3))\n imval = rng.rand(2, 3, 3, 4) * 10.0\n # more variance means numeric gradient will be more accurate\n\n for avgpoolshp in avgpoolshps:\n for ignore_border in [True, False]:\n for mode in ['sum', 'average_inc_pad', 'average_exc_pad']:\n # print 'maxpoolshp =', maxpoolshp\n # print 'ignore_border =', ignore_border\n # The shape of the gradient will be the shape of the output\n grad_shape = DownsampleFactorMax.out_shape(\n imval.shape, avgpoolshp, ignore_border=ignore_border)\n grad_val = rng.rand(*grad_shape) * 10.0\n\n def mp(input, grad):\n grad_op = AveragePoolGrad(\n avgpoolshp, ignore_border=ignore_border, mode=mode)\n return grad_op(input, grad)\n\n utt.verify_grad(mp, [imval, grad_val], rng=rng)\n\n def test_DownsampleFactorMaxGrad_grad_st(self):\n \"\"\"checks the gradient of the gradient for\n the case that stride is used\"\"\"\n rng = numpy.random.RandomState(utt.fetch_seed())\n maxpoolshps = ((1, 1), (3, 3), (5, 3))\n stridesizes = ((1, 1), (3, 3), (5, 7))\n imval = rng.rand(1, 2, 16, 16)\n\n for maxpoolshp in maxpoolshps:\n for ignore_border in [True, False]:\n for stride in stridesizes:\n grad_shape = DownsampleFactorMax.out_shape(\n imval.shape, maxpoolshp,\n ignore_border=ignore_border, st=stride)\n grad_val = rng.rand(*grad_shape)\n\n def mp(input, grad):\n out = DownsampleFactorMax(\n maxpoolshp, ignore_border=ignore_border,\n st=stride)(input)\n grad_op = MaxPoolGrad(\n maxpoolshp, ignore_border=ignore_border,\n st=stride)\n return grad_op(input, out, grad)\n\n utt.verify_grad(mp, [imval, grad_val], rng=rng)\n\n def test_AveragePoolGrad_grad_st(self):\n \"\"\"checks the gradient of the gradient for\n the case that stride is used\"\"\"\n rng = numpy.random.RandomState(utt.fetch_seed())\n avgpoolshps = ((1, 1), (3, 3), (5, 3))\n stridesizes = ((1, 1), (3, 3), (5, 7))\n imval = rng.rand(1, 2, 16, 16)\n\n for avgpoolshp in avgpoolshps:\n for ignore_border in [True, False]:\n for mode in ['sum', 'average_inc_pad', 'average_exc_pad']:\n for stride in stridesizes:\n grad_shape = DownsampleFactorMax.out_shape(\n imval.shape, avgpoolshp,\n ignore_border=ignore_border, st=stride)\n grad_val = rng.rand(*grad_shape)\n\n def mp(input, grad):\n grad_op = AveragePoolGrad(\n avgpoolshp, ignore_border=ignore_border,\n st=stride, mode=mode)\n return grad_op(input, grad)\n\n utt.verify_grad(mp, [imval, grad_val], rng=rng)\n\n def test_DownsampleFactorMaxGrad_grad_st_extra(self):\n \"\"\"checks the gradient of the gradient for the case that\n stride is used for extra examples\"\"\"\n rng = numpy.random.RandomState(utt.fetch_seed())\n maxpoolshps = ((5, 3), (5, 3), (5, 3), (5, 5), (3, 2), (7, 7), (9, 9))\n stridesizes = ((3, 2), (7, 5), (10, 6), (1, 1),\n (2, 3), (10, 10), (1, 1))\n imvsizs = ((16, 16), (16, 16), (16, 16), (8, 5),\n (8, 5), (8, 5), (8, 5))\n\n for indx in numpy.arange(len(maxpoolshps)):\n imvsize = imvsizs[indx]\n imval = rng.rand(1, 2, imvsize[0], imvsize[1])\n stride = stridesizes[indx]\n maxpoolshp = maxpoolshps[indx]\n for ignore_border in [True, False]:\n grad_shape = DownsampleFactorMax.out_shape(\n imval.shape, maxpoolshp,\n ignore_border=ignore_border, st=stride)\n grad_val = rng.rand(*grad_shape)\n\n def mp(input, grad):\n out = DownsampleFactorMax(\n maxpoolshp, ignore_border=ignore_border,\n st=stride)(input)\n grad_op = MaxPoolGrad(\n maxpoolshp, ignore_border=ignore_border,\n st=stride)\n return grad_op(input, out, grad)\n\n # skip the grad verification when the output is empty\n if numpy.prod(grad_shape) == 0:\n continue\n utt.verify_grad(mp, [imval, grad_val], rng=rng)\n\n def test_AveragePoolGrad_grad_st_extra(self):\n \"\"\"checks the gradient of the gradient for the case that\n stride is used for extra examples\"\"\"\n rng = numpy.random.RandomState(utt.fetch_seed())\n avgpoolshps = ((5, 3), (5, 3), (5, 3), (5, 5), (3, 2), (7, 7), (9, 9))\n stridesizes = ((3, 2), (7, 5), (10, 6), (1, 1),\n (2, 3), (10, 10), (1, 1))\n imvsizs = ((16, 16), (16, 16), (16, 16), (8, 5),\n (8, 5), (8, 5), (8, 5))\n\n for indx in numpy.arange(len(avgpoolshps)):\n imvsize = imvsizs[indx]\n imval = rng.rand(1, 2, imvsize[0], imvsize[1])\n stride = stridesizes[indx]\n avgpoolshp = avgpoolshps[indx]\n for ignore_border in [True, False]:\n for mode in ['sum', 'average_inc_pad', 'average_exc_pad']:\n grad_shape = DownsampleFactorMax.out_shape(\n imval.shape, avgpoolshp,\n ignore_border=ignore_border, st=stride)\n grad_val = rng.rand(*grad_shape)\n\n def mp(input, grad):\n grad_op = AveragePoolGrad(\n avgpoolshp, ignore_border=ignore_border,\n st=stride, mode=mode)\n return grad_op(input, grad)\n\n # skip the grad verification when the output is empty\n if numpy.prod(grad_shape) == 0:\n continue\n utt.verify_grad(mp, [imval, grad_val], rng=rng)\n\n def test_DownsampleFactorMaxPaddingStride_grad_grad(self):\n rng = numpy.random.RandomState(utt.fetch_seed())\n imgsizes = ((10, 10), (10, 5), (5, 5))\n maxpoolsizes = ((5, 3), (3, 5), (3, 3))\n stridesizes = ((3, 2), (2, 3), (3, 3))\n paddingsizes = ((2, 2), (2, 1), (2, 2))\n\n for i in range(len(imgsizes)):\n imgsize = imgsizes[i]\n imval = rng.rand(1, 1, imgsize[0], imgsize[1]) * 10.0\n maxpoolsize = maxpoolsizes[i]\n stridesize = stridesizes[i]\n paddingsize = paddingsizes[i]\n\n grad_shape = DownsampleFactorMaxGradGrad.out_shape(\n imval.shape, maxpoolsize, st=stridesize,\n ignore_border=True, padding=paddingsize)\n grad_val = rng.rand(*grad_shape) * 10.0\n def mp(input, grad):\n out = DownsampleFactorMax(\n maxpoolsize, ignore_border=True,\n st=stridesize,\n padding=paddingsize,\n )(input)\n grad_op = MaxPoolGrad(maxpoolsize, ignore_border=True,\n st=stridesize, padding=paddingsize)\n return grad_op(input, out, grad)\n utt.verify_grad(mp, [imval, grad_val], rng=rng)\n\n def test_AveragePoolPaddingStride_grad_grad(self):\n rng = numpy.random.RandomState(utt.fetch_seed())\n imgsizes = ((10, 10), (10, 5), (5, 5))\n avgpoolsizes = ((5, 3), (3, 5), (3, 3))\n stridesizes = ((3, 2), (2, 3), (3, 3))\n paddingsizes = ((2, 2), (2, 1), (2, 2))\n\n for i in range(len(imgsizes)):\n imgsize = imgsizes[i]\n imval = rng.rand(1, 1, imgsize[0], imgsize[1]) * 10.0\n avgpoolsize = avgpoolsizes[i]\n stridesize = stridesizes[i]\n paddingsize = paddingsizes[i]\n\n #'average_exc_pad' with non-zero padding is not implemented\n for mode in ['sum', 'average_inc_pad']:\n grad_shape = DownsampleFactorMax.out_shape(\n imval.shape, avgpoolsize, st=stridesize,\n ignore_border=True, padding=paddingsize)\n grad_val = rng.rand(*grad_shape) * 10.0\n def mp(input, grad):\n grad_op = AveragePoolGrad(avgpoolsize, ignore_border=True,\n st=stridesize, padding=paddingsize,\n mode=mode)\n return grad_op(input, grad)\n utt.verify_grad(mp, [imval, grad_val], rng=rng)\n\n def test_DownsampleFactorMax_hessian(self):\n # Example provided by Frans Cronje, see\n # https://groups.google.com/d/msg/theano-users/qpqUy_3glhw/JMwIvlN5wX4J\n x_vec = tensor.vector('x')\n z = tensor.dot(x_vec.dimshuffle(0, 'x'),\n x_vec.dimshuffle('x', 0))\n y = max_pool_2d(input=z, ds=(2, 2), ignore_border=True)\n C = tensor.exp(tensor.sum(y))\n\n grad_hess = tensor.hessian(cost=C, wrt=x_vec)\n fn_hess = function(inputs=[x_vec], outputs=grad_hess)\n\n # The value has been manually computed from the theoretical gradient,\n # and confirmed by the implementation.\n assert numpy.allclose(fn_hess([1, 2]), [[0., 0.], [0., 982.7667]])\n\n def test_max_pool_2d_2D(self):\n rng = numpy.random.RandomState(utt.fetch_seed())\n maxpoolshps = ((1, 1), (3, 2))\n imval = rng.rand(4, 5)\n images = tensor.dmatrix()\n\n for maxpoolshp, ignore_border, mode in product(maxpoolshps,\n [True, False],\n ['max', 'sum',\n 'average_inc_pad',\n 'average_exc_pad']):\n # print 'maxpoolshp =', maxpoolshp\n # print 'ignore_border =', ignore_border\n numpy_output_val = self.numpy_max_pool_2d(imval, maxpoolshp,\n ignore_border,\n mode=mode)\n output = max_pool_2d(images, maxpoolshp, ignore_border,\n mode=mode)\n output_val = function([images], output)(imval)\n assert numpy.all(output_val == numpy_output_val), (\n \"output_val is %s, numpy_output_val is %s\"\n % (output_val, numpy_output_val))\n\n def mp(input):\n return max_pool_2d(input, maxpoolshp, ignore_border,\n mode=mode)\n utt.verify_grad(mp, [imval], rng=rng)\n\n def test_max_pool_2d_2D_same_size(self):\n rng = numpy.random.RandomState(utt.fetch_seed())\n test_input_array = numpy.array([[[\n [1., 2., 3., 4.],\n [5., 6., 7., 8.]\n ]]]).astype(theano.config.floatX)\n test_answer_array = numpy.array([[[\n [0., 0., 0., 0.],\n [0., 6., 0., 8.]\n ]]]).astype(theano.config.floatX)\n input = tensor.tensor4(name='input')\n patch_size = (2, 2)\n op = max_pool_2d_same_size(input, patch_size)\n op_output = function([input], op)(test_input_array)\n assert numpy.all(op_output == test_answer_array), (\n \"op_output is %s, test_answer_array is %s\" % (\n op_output, numpy_output_val\n )\n )\n def mp(input):\n return max_pool_2d_same_size(input, patch_size)\n utt.verify_grad(mp, [test_input_array], rng=rng)\n\n def test_max_pool_2d_3D(self):\n rng = numpy.random.RandomState(utt.fetch_seed())\n maxpoolshps = [(1, 2)]\n imval = rng.rand(2, 3, 4)\n images = tensor.dtensor3()\n\n for maxpoolshp, ignore_border, mode in product(maxpoolshps,\n [True, False],\n ['max', 'sum',\n 'average_inc_pad',\n 'average_exc_pad']):\n # print 'maxpoolshp =', maxpoolshp\n # print 'ignore_border =', ignore_border\n numpy_output_val = self.numpy_max_pool_2d(imval, maxpoolshp,\n ignore_border,\n mode)\n output = max_pool_2d(images, maxpoolshp, ignore_border,\n mode=mode)\n output_val = function([images], output)(imval)\n assert numpy.all(output_val == numpy_output_val), (\n \"output_val is %s, numpy_output_val is %s\"\n % (output_val, numpy_output_val))\n c = tensor.sum(output)\n c_val = function([images], c)(imval)\n g = tensor.grad(c, images)\n g_val = function([images],\n [g.shape,\n tensor.min(g, axis=(0, 1, 2)),\n tensor.max(g, axis=(0, 1, 2))]\n )(imval)\n\n# removed as already tested in test_max_pool_2d_2D\n# This make test in debug mode too slow.\n# def mp(input):\n# return max_pool_2d(input, maxpoolshp, ignore_border)\n# utt.verify_grad(mp, [imval], rng=rng)\n\n def test_max_pool_2d_6D(self):\n rng = numpy.random.RandomState(utt.fetch_seed())\n maxpoolshps = [(3, 2)]\n imval = rng.rand(2, 1, 1, 1, 3, 4)\n images = tensor.TensorType('float64', [False] * 6)()\n\n for maxpoolshp, ignore_border, mode in product(maxpoolshps,\n [True, False],\n ['max', 'sum',\n 'average_inc_pad',\n 'average_exc_pad']):\n # print 'maxpoolshp =', maxpoolshp\n # print 'ignore_border =', ignore_border\n numpy_output_val = self.numpy_max_pool_2d(imval, maxpoolshp,\n ignore_border,\n mode=mode)\n output = max_pool_2d(images, maxpoolshp, ignore_border,\n mode=mode)\n output_val = function([images], output)(imval)\n assert numpy.all(output_val == numpy_output_val)\n\n# removed as already tested in test_max_pool_2d_2D\n# This make test in debug mode too slow.\n# def mp(input):\n# return max_pool_2d(input, maxpoolshp, ignore_border)\n# utt.verify_grad(mp, [imval], rng=rng)\n\n def test_infer_shape(self):\n image = tensor.dtensor4()\n maxout = tensor.dtensor4()\n gz = tensor.dtensor4()\n rng = numpy.random.RandomState(utt.fetch_seed())\n maxpoolshps = ((1, 1), (2, 2), (3, 3), (2, 3), (3, 2))\n\n image_val = rng.rand(4, 6, 7, 9)\n out_shapes = [[[[4, 6, 7, 9], [4, 6, 7, 9]],\n [[4, 6, 3, 4], [4, 6, 4, 5]],\n [[4, 6, 2, 3], [4, 6, 3, 3]],\n [[4, 6, 3, 3], [4, 6, 4, 3]],\n [[4, 6, 2, 4], [4, 6, 3, 5]]],\n [[None, None],\n [[4, 6, 4, 5], None],\n [[4, 6, 3, 3], None],\n [[4, 6, 4, 3], None],\n [[4, 6, 3, 5], None]],\n [[None, None],\n [None, None],\n [[4, 6, 3, 4], None],\n [[4, 6, 4, 4], None],\n [None, None]]]\n\n for i, maxpoolshp in enumerate(maxpoolshps):\n for j, ignore_border in enumerate([True, False]):\n for k, padding in enumerate([(0,0), (1,1), (1,2)]):\n if out_shapes[k][i][j] == None:\n continue\n # checking shapes generated by DownsampleFactorMax\n self._compile_and_check([image],\n [DownsampleFactorMax(maxpoolshp,\n ignore_border=ignore_border,\n padding=padding)(image)],\n [image_val], DownsampleFactorMax)\n\n # checking shapes generated by MaxPoolGrad\n maxout_val = rng.rand(*out_shapes[k][i][j])\n gz_val = rng.rand(*out_shapes[k][i][j])\n self._compile_and_check([image, maxout, gz],\n [MaxPoolGrad(maxpoolshp,\n ignore_border=ignore_border,\n padding=padding)\n (image, maxout, gz)],\n [image_val, maxout_val, gz_val],\n MaxPoolGrad,\n warn=False)\n # checking with broadcastable input\n image = tensor.tensor(dtype='float64',\n broadcastable=(False, False, True, True))\n image_val = rng.rand(4, 6, 1, 1)\n self._compile_and_check(\n [image],\n [DownsampleFactorMax((2, 2),\n ignore_border=True,\n padding=(0, 0))(image)],\n [image_val], DownsampleFactorMax)\n\n def test_opt_max_to_average(self):\n im = theano.tensor.tensor4()\n maxout = theano.tensor.tensor4()\n grad = theano.tensor.tensor4()\n\n compilation_mode=theano.compile.get_default_mode().including(\n 'local_average_pool_grad')\n\n for mode in ['max', 'sum', 'average_inc_pad', 'average_exc_pad']:\n f = theano.function([im, maxout, grad],\n DownsampleFactorMaxGrad(ds=(3,3),\n ignore_border=False,\n mode=mode)(im, maxout, grad),\n mode=compilation_mode)\n\n if mode == 'max':\n assert any(isinstance(n.op, MaxPoolGrad)\n for n in f.maker.fgraph.toposort())\n assert not any(isinstance(n.op, AveragePoolGrad)\n for n in f.maker.fgraph.toposort())\n else:\n assert not any(isinstance(n.op, MaxPoolGrad)\n for n in f.maker.fgraph.toposort())\n assert any(isinstance(n.op, AveragePoolGrad)\n for n in f.maker.fgraph.toposort())\n\nif __name__ == '__main__':\n unittest.main()\n", "import os\n\nimport numpy\n\nfrom theano import Op, Apply, Type, Variable\nfrom theano import tensor, config\nfrom theano.gradient import grad_undefined\nfrom theano.tensor.basic import Alloc, Join, Split\n\nfrom theano.gof import HideC\nfrom theano.gof.utils import MethodNotDefined\n\nfrom collections import deque\n\nfrom six import string_types\nfrom six.moves import xrange\n\ntry:\n import pygpu\n from pygpu import gpuarray\nexcept ImportError:\n pass\n\nfrom .type import GpuArrayType, GpuArrayConstant, gpu_context_type, get_context\nfrom .fp16_help import write_w\n\n\ndef as_gpuarray_variable(x, context_name):\n # If this is already some form of variable, try to avoid an extra transfer\n if isinstance(x, Variable):\n while True:\n # If we are already a GpuArrayVariable in the right context\n # then there is nothing to do.\n if (isinstance(x.type, GpuArrayType) and\n x.type.context_name == context_name):\n return x\n\n # If x is the result of a transfer, try to dig through.\n if getattr(x, 'owner', None):\n if isinstance(x.owner.op, HostFromGpu):\n x = x.owner.inputs[0]\n continue\n if isinstance(x.owner.op, GpuFromHost):\n x = x.owner.inputs[0]\n continue\n if isinstance(x.owner.op, GpuToGpu):\n x = x.owner.inputs[0]\n continue\n\n # If none of the conditions where met, then continue with\n # the rest of the body\n break\n\n # If we couldn't deal with transfers, then maybe it's a tensor\n if isinstance(x.type, tensor.TensorType):\n return GpuFromHost(context_name)(x)\n\n # Try _as_GpuArrayVariable if possible\n if hasattr(x, '_as_GpuArrayVariable'):\n return x._as_GpuArrayVariable(context_name)\n\n # If it didn't work try for a constant\n ctx = get_context(context_name)\n\n if isinstance(x, gpuarray.GpuArray):\n if x.context.ptr != ctx.ptr:\n x = x.transfer(ctx)\n\n x = gpuarray.asarray(x, context=ctx)\n\n bcast = [(s == 1) for s in x.shape]\n return GpuArrayConstant(GpuArrayType(dtype=x.dtype,\n broadcastable=bcast,\n context_name=context_name),\n x)\n\n\ndef infer_context_name(*vars):\n \"\"\"\n Infer the context name to use from the inputs given\n\n \"\"\"\n # We try to infer the closest context first\n # TODO: What to do in case of context conflicts?\n # We currently use a first found wins approach.\n todo = deque()\n todo.extendleft(vars)\n while todo:\n v = todo.pop()\n if isinstance(v.type, GpuArrayType):\n return v.type.context_name\n if hasattr(v.tag, 'context_name'):\n return v.tag.context_name\n if v.owner:\n if isinstance(v.owner.op, HostFromGpu):\n return v.owner.inputs[0].type.context_name\n if len(v.owner.inputs) == 1:\n todo.extendleft(v.owner.inputs)\n # If we can't find a context we infer None, which is the default\n return None\n\n\nclass Kernel(object):\n \"\"\"\n This class groups together all the attributes of a gpu kernel.\n\n \"\"\"\n\n def __init__(self, code, params, name, flags,\n codevar=None, binvar=None, objvar=None):\n self.code = code\n self.params = params\n self.name = name\n self.flags = flags\n if codevar is None:\n codevar = 'kcode_' + name\n self.codevar = codevar\n if binvar is None:\n binvar = 'kbin_' + name\n self.binvar = binvar\n if objvar is None:\n objvar = 'k_' + name\n self.objvar = objvar\n\n @staticmethod\n def get_flags(*types):\n def get_dtype(t):\n if isinstance(t, string_types):\n return numpy.dtype(t)\n elif isinstance(t, Type):\n return t.dtype\n elif isinstance(t, Variable):\n return t.type.dtype\n else:\n raise TypeError(\"can't get a dtype from %s\" % (type(t),))\n dtypes = [get_dtype(t) for t in types]\n flags = dict(cluda=True)\n if any(d == numpy.float64 for d in dtypes):\n flags['have_double'] = True\n if any(d.itemsize < 4 for d in dtypes):\n flags['have_small'] = True\n if any(d.kind == 'c' for d in dtypes):\n flags['have_complex'] = True\n if any(d == numpy.float16 for d in dtypes):\n flags['have_half'] = True\n return flags\n\n def _get_c_flags(self):\n res = []\n if self.flags.get('cluda', False):\n res.append('GA_USE_CLUDA')\n if self.flags.get('have_double', False):\n res.append('GA_USE_DOUBLE')\n if self.flags.get('have_small', False):\n res.append('GA_USE_SMALL')\n if self.flags.get('have_complex', False):\n res.append('GA_USE_COMPLEX')\n if self.flags.get('have_half', False):\n res.append('GA_USE_SMALL')\n return '|'.join(res)\n\n def _get_c_types(self):\n def m(t):\n if t == gpuarray.GpuArray:\n return \"GA_BUFFER\"\n else:\n return str(gpuarray.dtype_to_typecode(t))\n return ', '.join(m(t) for t in self.params)\n\n\nclass GpuKernelBase(object):\n context_type = gpu_context_type\n\n def gpu_kernels(self, node, name):\n \"\"\"\n This is the method to override. This should return an iterable\n of Kernel objects that describe the kernels this op will need.\n\n \"\"\"\n raise MethodNotDefined('gpu_kernels')\n\n def c_headers(self):\n try:\n o = super(GpuKernelBase, self).c_headers()\n except MethodNotDefined:\n o = []\n return o + ['gpuarray/types.h']\n\n def _generate_kernel_bin(self, k, ctx):\n gk = gpuarray.GpuKernel(k.code, k.name, k.params, context=ctx,\n **k.flags)\n bin = gk._binary\n bcode = ','.join(hex(ord(c)) for c in bin)\n return (\"\"\"static const char %(bname)s[] = { %(bcode)s };\"\"\" %\n dict(bname=k.binvar, bcode=bcode))\n\n def _generate_kernel_code(self, k):\n code = '\\\\n'.join(l for l in k.code.split('\\n'))\n code = code.replace('\"', '\\\\\"')\n return (\"\"\"static const char *%(cname)s = \"%(code)s\";\"\"\" %\n dict(cname=k.codevar, code=code))\n\n def _generate_kernel_vars(self, k):\n return \"\"\"GpuKernel %(kname)s;\"\"\" % dict(kname=k.objvar)\n\n def c_support_code(self):\n return \"\"\"\n template <typename T>\n static T ceil_intdiv(T a, T b)\n {\n return (a/b) + ((a % b) ? 1: 0);\n }\n \"\"\"\n\n def c_support_code_apply(self, node, name):\n kernels = self.gpu_kernels(node, name)\n ctx = self.get_context(node)\n bins = '\\n'.join(self._generate_kernel_bin(k, ctx) for k in kernels)\n codes = '\\n'.join(self._generate_kernel_code(k) for k in kernels)\n return '\\n'.join([bins, codes])\n\n def c_support_code_struct(self, node, name):\n kernels = self.gpu_kernels(node, name)\n return '\\n'.join(self._generate_kernel_vars(k) for k in kernels)\n\n def _generate_zeros(self, k):\n return \"\"\"memset(&%(v)s, 0, sizeof(%(v)s));\"\"\" % dict(v=k.objvar)\n\n def _generate_kernel_init(self, k, fail, ctx):\n return \"\"\"{\n int err;\n int types[%(numargs)u] = {%(types)s};\n const char *bcode = %(bvar)s;\n size_t sz = sizeof(%(bvar)s);\n if (GpuKernel_init(&%(ovar)s, %(ctx)s->ops, %(ctx)s->ctx, 1, &bcode, &sz,\n \"%(kname)s\", %(numargs)u, types, GA_USE_BINARY, NULL)\n != GA_NO_ERROR) {\n if ((err = GpuKernel_init(&%(ovar)s, %(ctx)s->ops, %(ctx)s->ctx, 1,\n &%(cname)s, NULL, \"%(kname)s\", %(numargs)u,\n types, %(flags)s, NULL)) != GA_NO_ERROR) {\n PyErr_Format(PyExc_RuntimeError, \"GpuKernel_init error %%d: %%s\",\n err, Gpu_error(%(ctx)s->ops, %(ctx)s->ctx, err));\n %(fail)s\n }\n }\n}\"\"\" % dict(numargs=len(k.params), types=k._get_c_types(), bvar=k.binvar,\n ovar=k.objvar, kname=k.name, cname=k.codevar,\n flags=k._get_c_flags(), fail=fail, ctx=ctx)\n\n def c_init_code_struct(self, node, name, sub):\n ctx = sub['context']\n kernels = self.gpu_kernels(node, name)\n inits_0 = '\\n'.join(self._generate_zeros(k) for k in kernels)\n inits = '\\n'.join(self._generate_kernel_init(k, sub['fail'], ctx)\n for k in kernels)\n return '\\n'.join([inits_0, inits])\n\n def _generate_kernel_cleanup(self, k):\n return \"GpuKernel_clear(&%(ovar)s);\" % dict(ovar=k.objvar)\n\n def c_cleanup_code_struct(self, node, name):\n kernels = self.gpu_kernels(node, name)\n cleanups = '\\n'.join(self._generate_kernel_cleanup(k) for k in kernels)\n return cleanups\n\n # This is a shorthand for if your op only has a fixed version\n # You can reimplement it, but make sure to call kernel_version()\n def c_code_cache_version_apply(self, node):\n return (self.c_code_cache_version(), self.kernel_version(node))\n\n def kernel_version(self, node):\n return (3, node.get_context().bin_id)\n\n\nclass HostFromGpu(Op):\n __props__ = ()\n _f16_ok = True\n\n def __str__(self):\n return 'HostFromGpu(gpuarray)'\n\n def make_node(self, x):\n if not isinstance(x.type, GpuArrayType):\n raise TypeError(x)\n return Apply(self, [x],\n [tensor.TensorType(dtype=x.dtype,\n broadcastable=x.broadcastable)()])\n\n def perform(self, node, inp, out):\n x, = inp\n z, = out\n z[0] = numpy.asarray(x)\n\n def c_code(self, node, name, inputs, outputs, sub):\n return \"\"\"\n GpuArray %(name)s_ga_s;\n GpuArray *%(name)s_ga = NULL;\n int %(name)serr;\n PyArray_Descr *%(name)s_dtype;\n if (!GpuArray_ISONESEGMENT(&%(inp)s->ga)) {\n if (GpuArray_copy(&%(name)s_ga_s, &%(inp)s->ga, GA_C_ORDER) != GA_NO_ERROR) {\n PyErr_SetString(PyExc_RuntimeError, \"Can't make contiguous copy\");\n %(fail)s;\n }\n %(name)s_ga = &%(name)s_ga_s;\n } else {\n %(name)s_ga = &%(inp)s->ga;\n }\n %(name)s_dtype = typecode_to_dtype(%(name)s_ga->typecode);\n Py_XDECREF(%(out)s);\n // PyArray_Empty below steals a reference to the dtype we pass it\n // so we need an extra one to spare.\n Py_INCREF(%(name)s_dtype);\n %(out)s = (PyArrayObject *)PyArray_Empty(%(inp)s->ga.nd,\n (npy_intp *)%(inp)s->ga.dimensions,\n %(name)s_dtype,\n (%(inp)s->ga.flags & GA_F_CONTIGUOUS) &&\n !(%(inp)s->ga.flags & GA_C_CONTIGUOUS));\n if (%(out)s == NULL) {\n if (%(name)s_ga == &%(name)s_ga_s) GpuArray_clear(%(name)s_ga);\n %(fail)s\n }\n %(name)serr = GpuArray_read(PyArray_DATA(%(out)s),\n PyArray_NBYTES(%(out)s),\n %(name)s_ga);\n if (%(name)s_ga == &%(name)s_ga_s) GpuArray_clear(%(name)s_ga);\n if (%(name)serr != GA_NO_ERROR) {\n PyErr_SetString(PyExc_RuntimeError, \"Could not read device data.\");\n %(fail)s\n }\n \"\"\" % {'name': name, 'fail': sub['fail'], 'inp': inputs[0],\n 'out': outputs[0]}\n\n def c_code_cache_version(self):\n return (1,)\n\n def grad(self, inputs, grads):\n gz, = grads\n return [GpuFromHost(inputs[0].type.context_name)(gz)]\n\n def R_op(self, inputs, eval_points):\n ev, = eval_points\n return self(ev)\n\n def infer_shape(self, node, xshp):\n return xshp\n\nhost_from_gpu = HostFromGpu()\n\n\nclass GpuFromHost(Op):\n __props__ = ('context_name',)\n _f16_ok = True\n context_type = gpu_context_type\n\n def __init__(self, context_name):\n self.context_name = context_name\n\n def __str__(self):\n return 'GpuFromHost<%s>' % (self.context_name,)\n\n def make_node(self, x):\n if not isinstance(x.type, tensor.TensorType):\n raise TypeError(x)\n return Apply(self, [x], [GpuArrayType(broadcastable=x.broadcastable,\n context_name=self.context_name,\n dtype=x.dtype)()])\n\n def get_context(self, node):\n return get_context(self.context_name)\n\n def perform(self, node, inp, out, ctx):\n x, = inp\n z, = out\n z[0] = gpuarray.array(x, context=ctx)\n\n def grad(self, inputs, grads):\n gz, = grads\n return [host_from_gpu(as_gpuarray_variable(\n gz, context_name=self.context_name))]\n\n def R_op(self, inputs, eval_points):\n ev, = eval_points\n return self(ev)\n\n def infer_shape(self, node, xshp):\n return xshp\n\n def c_code(self, node, name, inputs, outputs, sub):\n return \"\"\"\n PyArrayObject *%(name)s_tmp;\n %(name)s_tmp = PyArray_GETCONTIGUOUS(%(inp)s);\n if (%(name)s_tmp == NULL)\n %(fail)s\n Py_XDECREF(%(out)s);\n %(out)s = pygpu_fromhostdata(PyArray_DATA(%(name)s_tmp),\n get_typecode((PyObject *)PyArray_DESCR(%(name)s_tmp)),\n PyArray_NDIM(%(name)s_tmp),\n (size_t *)PyArray_DIMS(%(name)s_tmp),\n (ssize_t *)PyArray_STRIDES(%(name)s_tmp),\n %(ctx)s,\n Py_None);\n Py_DECREF(%(name)s_tmp);\n if (%(out)s == NULL) {\n %(fail)s\n }\n \"\"\" % {'name': name, 'inp': inputs[0], 'ctx': sub['context'],\n 'out': outputs[0], 'fail': sub['fail']}\n\n def c_code_cache_version(self):\n return (7,)\n\n\nclass GpuToGpu(Op):\n __props__ = ('context_name',)\n _f16_ok = True\n context_type = gpu_context_type\n\n def __init__(self, context_name):\n self.context_name = context_name\n\n def __str__(self):\n return 'GpuToGpu<%s>' % (self.context_name,)\n\n def make_node(self, x):\n if not isinstance(x.type, GpuArrayType):\n raise TypeError(x)\n return Apply(self, [x], [GpuArrayType(broadcastable=x.broadcastable,\n context_name=self.context_name,\n dtype=x.dtype)()])\n\n def get_context(self, node):\n return get_context(self.context_name)\n\n def perform(self, node, inp, out, ctx):\n x, = inp\n z, = out\n z[0] = x.transfer(ctx)\n\n def grad(self, inputs, grads):\n gz, = grads\n return [GpuToGpu(inputs[0].type.context_name)(gz)]\n\n def R_op(self, inputs, eval_points):\n return self(eval_points[0])\n\n def infer_shape(self, node, xshp):\n return xshp\n\n def c_code(self, node, name, inputs, outputs, sub):\n return \"\"\"\n Py_XDECREF(%(out)s);\n %(out)s = pygpu_transfer(%(inp)s, %(ctx)s, 0);\n if (%(out)s == NULL) {\n %(fail)s\n }\n \"\"\" % {'inp': inputs[0], 'ctx': sub['context'],\n 'out': outputs[0], 'fail': sub['fail']}\n\n def c_code_cache_version(self):\n return (0,)\n\n\nclass GpuAlloc(HideC, Alloc):\n \"\"\"\n\n Parameters\n ----------\n context_name : str\n The name of the context in which to allocate memory\n memset_0 : bool\n It's only an optimized version. True, it means the\n value is always 0, so the c code call memset as it is faster.\n\n \"\"\"\n\n __props__ = ('memset_0', 'context_name')\n _f16_ok = True\n context_type = gpu_context_type\n\n def __init__(self, context_name, memset_0=False):\n self.context_name = context_name\n self.memset_0 = memset_0\n\n def get_context(self, node):\n return get_context(self.context_name)\n\n def __str__(self):\n # Hide the memset parameter when not used to prevent confusion.\n if self.memset_0:\n m = \"{memset_0=True}\"\n else:\n m = \"\"\n return \"%s<%s>%s\" % (self.__class__.__name__, self.context_name, m)\n\n def make_node(self, value, *shape):\n value = as_gpuarray_variable(value, context_name=self.context_name)\n sh, bcast = self.validate_shape(shape)\n if value.ndim > len(sh):\n TypeError(\"The GpuAlloc value to use has more dimensions \"\n \"than the specified shape\", value.ndim, len(sh))\n otype = value.type.clone(broadcastable=bcast)\n return Apply(self, [value] + sh, [otype()])\n\n def c_headers(self):\n return ['<numpy_compat.h>']\n\n def perform(self, node, inputs, outs, ctx):\n out, = outs\n v = inputs[0]\n sh = tuple(map(int, inputs[1:]))\n if out[0] is None or out[0].shape != sh:\n if self.memset_0:\n out[0] = gpuarray.zeros(sh, dtype=v.dtype, context=ctx)\n else:\n out[0] = gpuarray.empty(sh, dtype=v.dtype, context=ctx)\n out[0][...] = v\n else:\n out[0][...] = v\n if config.gpuarray.sync:\n out[0].sync()\n\n def c_code(self, node, name, inp, out, sub):\n vv = inp[0]\n ndim = len(inp[1:])\n zz, = out\n\n memset_0 = int(self.memset_0)\n code = \"\"\"\n int i;\n size_t %(name)s_shape[%(ndim)s];\n \"\"\" % dict(name=name, ndim=ndim)\n\n for i, shp_i in enumerate(inp[1:]):\n code += \"\"\"\n %(name)s_shape[%(i)s] = ((dtype_%(shp_i)s *)PyArray_DATA(%(shp_i)s))[0];\n \"\"\" % dict(name=name, i=i, shp_i=shp_i)\n\n code += \"\"\"\n int need_new_out = (NULL == %(zz)s || %(zz)s->ga.nd != %(ndim)s);\n\n if (!need_new_out)\n for (i = 0; i < %(ndim)s; i++)\n need_new_out |= %(zz)s->ga.dimensions[i] != %(name)s_shape[i];\n\n if (need_new_out && (%(memset_0)s)) {\n //pygpu_zeros can be faster then empty followed by memset.\n Py_XDECREF(%(zz)s);\n %(zz)s = pygpu_zeros(%(ndim)s, %(name)s_shape,\n %(vv)s->ga.typecode, GA_C_ORDER,\n %(ctx)s, Py_None);\n if (!%(zz)s) {\n %(fail)s\n }\n } else {\n if (need_new_out) {\n Py_XDECREF(%(zz)s);\n %(zz)s = pygpu_empty(%(ndim)s, %(name)s_shape,\n %(vv)s->ga.typecode, GA_C_ORDER,\n %(ctx)s, Py_None);\n if (!%(zz)s) {\n %(fail)s\n }\n }\n if (%(memset_0)s && GpuArray_ISONESEGMENT(&%(zz)s->ga))\n {\n int err = GpuArray_memset(&%(zz)s->ga, 0);\n if (err != GA_NO_ERROR)\n {\n PyErr_Format(PyExc_MemoryError,\n \"GpuAlloc: Error memsetting %%llu\"\n \" element of device memory to 0.\",\n (unsigned long long)PyGpuArray_SIZE(%(zz)s));\n %(fail)s;\n }\n }\n else if (GpuArray_setarray(&%(zz)s->ga, &%(vv)s->ga) !=\n GA_NO_ERROR) {\n PyErr_SetString(PyExc_ValueError, \"setarray failed\");\n %(fail)s\n }\n }\n \"\"\" % dict(name=name, ndim=ndim, zz=zz, vv=vv, ctx=sub['context'],\n fail=sub['fail'], memset_0=memset_0)\n\n if config.gpuarray.sync:\n code += \"GpuArray_sync(&%(zz)s->ga);\" % dict(zz=zz)\n\n return code\n\n def c_code_cache_version(self):\n return (3,)\n\n def do_constant_folding(self, node):\n from . import subtensor, blas\n for client in node.outputs[0].clients:\n if client[0] == 'output':\n # If the output is a constant, it will have to be deepcopied\n # each time the function is called. So we do not fold.\n return False\n # The following ops work inplace of their input id 0.\n elif (client[1] == 0 and\n # Ops that will work inplace on the Alloc. So if they\n # get constant_folded, they would copy the\n # constant and this is less efficients.\n\n # Not doing the constant folding could also lower\n # the peak memory usage, as we the \"constant\" won't\n # always exists.\n isinstance(client[0].op,\n (subtensor.GpuIncSubtensor,\n subtensor.GpuAdvancedIncSubtensor1,\n subtensor.GpuAdvancedIncSubtensor1_dev20,\n blas.GpuGemm, blas.GpuGemv,\n blas.GpuGer)\n )):\n return False\n # If the clients is a transfer, we don't want to fold. We\n # let the moving opt finish before deciding what to do.\n elif isinstance(client[0].op, HostFromGpu):\n return False\n return True\n\n\nclass GpuAllocEmpty(HideC, Alloc):\n __props__ = ('dtype', 'context_name')\n _f16_ok = True\n context_type = gpu_context_type\n\n def __init__(self, dtype, context_name):\n self.dtype = dtype\n self.context_name = context_name\n\n def get_context(self, node):\n return get_context(self.context_name)\n\n def make_node(self, *shape):\n sh, bcast = self.validate_shape(shape)\n output = GpuArrayType(dtype=self.dtype, broadcastable=bcast,\n context_name=self.context_name)()\n output.tag.values_eq_approx = tensor.type.values_eq_approx_always_true\n # The outut can contain nan/inf.\n output.type.filter_checks_isfinite = False\n return Apply(self, sh, [output])\n\n def perform(self, node, inputs, out_, ctx):\n out = out_[0]\n sh = [int(i) for i in inputs]\n if out[0] is None or out[0].shape != sh:\n out[0] = pygpu.empty(sh, dtype=self.dtype, context=ctx)\n # if out[0] is the right shape, we just return it\n\n def c_headers(self):\n return ['<gpuarray_helper.h>']\n\n def c_header_dirs(self):\n return [os.path.dirname(__file__)]\n\n def c_code(self, node, name, inp, out, sub):\n ndim = len(inp)\n zz = out[0]\n fail = sub['fail']\n\n code = [\"\"\"\nint i;\nsize_t shape[%(ndim)s];\n\"\"\" % dict(ndim=ndim)]\n\n for i, shp_i in enumerate(inp):\n code.append(\"\"\"\nshape[%(i)s] = ((dtype_%(shp_i)s *)PyArray_DATA(%(shp_i)s))[0];\n\"\"\" % dict(i=i, shp_i=shp_i))\n\n code.append(\"\"\"\nif (theano_prep_output(&%(zz)s, %(ndim)s, shape, %(type)s, GA_C_ORDER,\n %(ctx)s)) {\n %(fail)s\n}\n\"\"\" % dict(zz=zz, ndim=ndim, type=gpuarray.dtype_to_typecode(self.dtype),\n fail=fail, ctx=sub['context']))\n\n return ''.join(code)\n\n def c_code_cache_version(self):\n return (1,)\n\n def do_constant_folding(self, node):\n return False\n\n def infer_shape(self, node, input_shapes):\n return [node.inputs]\n\n def grad(self, *args):\n # Don't reuse the grad implementation from Alloc\n raise NotImplementedError(\"grad disabled\")\n\n\ndef empty_like(var):\n return GpuAllocEmpty(var.type.dtype, var.type.context_name)(*var.shape)\n\n\nclass GpuContiguous(Op):\n \"\"\"\n Always return a c contiguous output. Copy the input only if it is\n not already c contiguous.\n\n \"\"\"\n __props__ = ()\n view_map = {0: [0]}\n _f16_ok = True\n\n def grad(self, inputs, dout):\n x, = inputs\n dout, = dout\n dout = as_gpuarray_variable(dout, context_name=infer_context_name(x))\n\n return [dout]\n\n def make_node(self, input):\n input = as_gpuarray_variable(input,\n context_name=infer_context_name(input))\n return Apply(self, [input], [input.type()])\n\n def c_headers(self):\n return ['<numpy_compat.h>']\n\n def c_code_cache_version(self):\n return (3,)\n\n def c_code(self, node, name, inp, out, sub):\n input, = inp\n z, = out\n fail = sub['fail']\n str = \"\"\"\n {\n if (GpuArray_IS_C_CONTIGUOUS(&(%(input)s->ga))){\n Py_XDECREF(%(z)s);\n %(z)s = %(input)s;\n Py_INCREF(%(z)s);\n\n } else if ((NULL == %(z)s)\"\"\" % locals()\n for i in xrange(len(node.inputs[0].type.broadcastable)):\n str += \"\\n|| (PyGpuArray_DIMS(%(input)s)[%(i)s] != PyGpuArray_DIMS(%(z)s)[%(i)s])\" % locals()\n str += \"\"\"\n || !GpuArray_IS_C_CONTIGUOUS(&(%(z)s->ga)))\n {\n Py_XDECREF(%(z)s);\n %(z)s = pygpu_copy(%(input)s, GA_C_ORDER);\n if (!%(z)s)\n {\n %(fail)s;\n }\n }else if(pygpu_move(%(z)s, %(input)s) == -1) {\n %(fail)s;\n }\n }\n \"\"\" % locals()\n return str\n\ngpu_contiguous = GpuContiguous()\n\n\nclass GpuReshape(HideC, tensor.Reshape):\n \"\"\"\n Implement Reshape on the gpu.\n\n \"\"\"\n\n _f16_ok = True\n\n # __hash__, __eq__, __str__ come from tensor.Reshape\n def make_node(self, x, shp):\n ctx_name = infer_context_name(x)\n x = as_gpuarray_variable(x, context_name=ctx_name)\n res = host_from_gpu(x).reshape(shp, ndim=self.ndim)\n otype = GpuArrayType(dtype=res.dtype,\n broadcastable=res.broadcastable,\n context_name=ctx_name)\n return Apply(self, [x, shp], [otype()])\n\n def perform(self, node, inp, out_):\n x, shp = inp\n out, = out_\n if (len(shp) != self.ndim):\n raise ValueError('shape argument to GpuReshape.perform'\n ' has incorrect length %i'\n ', should be %i' % (len(shp), self.ndim), shp)\n\n if shp.prod() != x.size:\n # We need to do check here to raise the same error as NumPy.\n # We should make pygpu do the same.\n ss = 1\n nb_m1 = 0\n for i in shp:\n if i == -1:\n nb_m1 += 1\n else:\n ss *= i\n if nb_m1 > 1:\n raise ValueError(\"Only one -1 is accepted in the new shape\")\n elif nb_m1 == 1:\n if (x.size % ss) != 0:\n raise ValueError(\"When using -1 in new shape, the computed new shape must be an multiple of the original shape.\")\n else:\n raise ValueError(\"total size of new array must be unchanged\")\n out[0] = x.reshape(tuple(shp))\n\n def c_code_cache_version(self):\n return (1,)\n\n def c_code(self, node, name, inputs, outputs, sub):\n x, shape = inputs\n output, = outputs\n new_ndim = self.ndim\n sdtype = node.inputs[1].type.dtype_specs()[1]\n fail = sub['fail']\n return \"\"\"\n size_t old_size = 1, new_size = 1;\n size_t new_dims[%(new_ndim)s];\n int compute_axis = -1;\n\n assert (PyArray_NDIM(%(shape)s) == 1);\n if (PyArray_DIM(%(shape)s, 0) != %(new_ndim)s)\n {\n PyErr_Format(PyExc_ValueError,\n \"GpuReshape: given shape is of incorrect \"\n \"length (%%d should be %%d).\",\n PyArray_DIM(%(shape)s, 0), %(new_ndim)s);\n %(fail)s;\n }\n\n for (size_t i = 0; i < %(x)s->ga.nd; ++i)\n old_size *= %(x)s->ga.dimensions[i];\n\n for (size_t i = 0; i < %(new_ndim)s; ++i)\n {\n new_dims[i] = ((%(sdtype)s*)(\n PyArray_BYTES(%(shape)s) +\n i * PyArray_STRIDES(%(shape)s)[0]))[0];\n if (new_dims[i] == -1)\n {\n if (compute_axis != -1)\n {\n PyErr_Format(PyExc_ValueError,\n \"GpuReshape: only one -1 is accepted \"\n \"in the new shape, but got two at \"\n \"indices %%d and %%zu.\",\n compute_axis, i);\n %(fail)s;\n }\n compute_axis = i;\n }\n else\n new_size *= new_dims[i];\n }\n\n if (compute_axis == -1 && new_size != old_size)\n {\n PyErr_Format(PyExc_ValueError,\n \"GpuReshape: trying to reshape an array of \"\n \"total size %%zu into an array of total size \"\n \"%%zu.\", old_size, new_size);\n %(fail)s;\n }\n else if (compute_axis != -1 && old_size %% new_size != 0)\n {\n PyErr_Format(PyExc_ValueError,\n \"GpuReshape: -1 axis found at index %%d in \"\n \"new shape but the total size of the array \"\n \"(%%zu) is not divisible by the given shapes \"\n \"(%%zu).\", compute_axis, old_size, new_size);\n %(fail)s;\n }\n\n Py_XDECREF(%(output)s);\n %(output)s = pygpu_reshape(%(x)s, %(new_ndim)s, new_dims,\n GA_C_ORDER, 0, compute_axis);\n if (%(output)s == NULL)\n {\n %(fail)s;\n }\n \"\"\" % locals()\n\n\nclass GpuJoin(HideC, Join):\n _f16_ok = True\n context_type = gpu_context_type\n\n def make_node(self, axis, *tensors):\n node = Join.make_node(self, axis, *tensors)\n\n ctx_name = infer_context_name(*tensors)\n\n def agv(v):\n return as_gpuarray_variable(v, context_name=ctx_name)\n\n return Apply(self, [node.inputs[0]] + list(map(agv, tensors)),\n [GpuArrayType(broadcastable=node.outputs[0].broadcastable,\n dtype=node.outputs[0].dtype,\n context_name=ctx_name)()])\n\n def get_context(self, node):\n return node.outputs[0].type.context\n\n def perform(self, node, axis_and_tensors, out_, ctx):\n out, = out_\n axis = int(axis_and_tensors[0])\n tensors = axis_and_tensors[1:]\n out[0] = pygpu.concatenate(tensors, axis=axis, context=ctx).astype(\n node.outputs[0].dtype)\n\n def c_code_cache_version(self):\n return (2,)\n\n def c_code(self, node, name, inputs, out_, sub):\n copy_to_list = []\n restype = pygpu.gpuarray.dtype_to_typecode(node.outputs[0].dtype)\n for i, inp in enumerate(inputs[1:]):\n copy_to_list.append(\"als[%s] = &%s->ga;\" % (i, inp))\n return \"\"\"\nconst GpuArray **als = (const GpuArray **)PyMem_Malloc(sizeof(GpuArray *) *\n %(n)s);\nif (als == NULL) {\n PyErr_NoMemory();\n %(fail)s\n}\n%(copy_inputs_to_list)s\nPy_XDECREF(%(out)s);\n{\nint axis = PyInt_AsLong((PyObject *)%(axis)s);\nif (axis < 0) {\n if (axis == -1 && PyErr_Occurred()) {\n %(fail)s\n }\n axis += als[0]->nd;\n if (axis < 0) {\n PyErr_SetString(PyExc_IndexError, \"invalid axis\");\n %(fail)s\n }\n}\n%(out)s = pygpu_concatenate(als, %(n)s, axis,\n %(restype)s, (PyObject *)&PyGpuArrayType,\n %(ctx)s);\n}\nPyMem_Free(als);\nif (%(out)s == NULL)\n %(fail)s\n \"\"\" % dict(n=len(inputs[1:]), fail=sub['fail'], out=out_[0],\n axis=inputs[0], copy_inputs_to_list='\\n'.join(copy_to_list),\n restype=restype, ctx=sub['context'])\n\ngpu_join = GpuJoin()\n\n\nclass GpuSplit(HideC, Split):\n def make_node(self, x, axis, splits):\n node = Split.make_node(self, x, axis, splits)\n x = as_gpuarray_variable(x, infer_context_name(x))\n outs = [GpuArrayType(dtype=o.dtype, broadcastable=o.broadcastable,\n context_name=x.type.context_name)()\n for o in node.outputs]\n return Apply(self, [x] + node.inputs[1:], outs)\n # we reuse the perform of the CPU op, which is suitable\n\n\nclass GpuEye(GpuKernelBase, Op):\n __props__ = ('dtype', 'context_name')\n _f16_ok = True\n\n def __init__(self, dtype=None, context_name=None):\n if dtype is None:\n dtype = config.floatX\n self.dtype = dtype\n self.context_name = context_name\n\n def get_context(self, node):\n return get_context(self.context_name)\n\n def make_node(self, n, m, k):\n n = tensor.as_tensor_variable(n)\n m = tensor.as_tensor_variable(m)\n k = tensor.as_tensor_variable(k)\n assert n.ndim == 0\n assert m.ndim == 0\n assert k.ndim == 0\n otype = GpuArrayType(dtype=self.dtype,\n broadcastable=(False, False),\n context_name=self.context_name)\n\n # k != 0 isn't implemented on the GPU yet.\n assert tensor.get_scalar_constant_value(k) == 0\n return Apply(self, [n, m], [otype()])\n\n def infer_shape(self, node, in_shapes):\n out_shape = [node.inputs[0], node.inputs[1]]\n return [out_shape]\n\n def grad(self, inp, grads):\n return [grad_undefined(self, i, inp[i])\n for i in xrange(3)]\n\n def gpu_kernels(self, node, name):\n code = \"\"\"\nKERNEL void k(GLOBAL_MEM %(ctype)s *a, ga_size n, ga_size m) {\n ga_size nb = n < m ? n : m;\n for (ga_size i = LID_0; i < nb; i += LDIM_0) {\n a[i*m + i] = %(write_a)s(1);\n }\n}\"\"\" % dict(ctype=pygpu.gpuarray.dtype_to_ctype(self.dtype),\n name=name, write_a=write_w(self.dtype))\n return [Kernel(\n code=code, name=\"k\",\n params=[gpuarray.GpuArray, gpuarray.SIZE, gpuarray.SIZE],\n flags=Kernel.get_flags(self.dtype),\n objvar='k_eye_' + name)]\n\n def c_code(self, node, name, inp, out, sub):\n n, m = inp\n z, = out\n fail = sub['fail']\n ctx = sub['context']\n typecode = pygpu.gpuarray.dtype_to_typecode(self.dtype)\n sync = bool(config.gpuarray.sync)\n kname = self.gpu_kernels(node, name)[0].objvar\n s = \"\"\"\n size_t dims[2] = {0, 0};\n size_t ls, gs;\n void *args[3];\n int err;\n\n dims[0] = ((dtype_%(n)s*)PyArray_DATA(%(n)s))[0];\n dims[1] = ((dtype_%(m)s*)PyArray_DATA(%(m)s))[0];\n Py_CLEAR(%(z)s);\n\n %(z)s = pygpu_zeros(2, dims,\n %(typecode)s,\n GA_C_ORDER,\n %(ctx)s, Py_None);\n if (%(z)s == NULL) {\n %(fail)s\n }\n\n args[0] = %(z)s->ga.data;\n args[1] = &dims[0];\n args[2] = &dims[1];\n ls = 1;\n gs = 256;\n err = GpuKernel_call(&%(kname)s, 1, &ls, &gs, 0, args);\n if (err != GA_NO_ERROR) {\n PyErr_Format(PyExc_RuntimeError,\n \"gpuarray error: kEye: %%s. n%%lu, m=%%lu.\",\n GpuKernel_error(&%(kname)s, err),\n (unsigned long)dims[0], (unsigned long)dims[1]);\n %(fail)s;\n }\n\n if(%(sync)d)\n GpuArray_sync(&%(z)s->ga);\n \"\"\" % locals()\n\n return s\n\n def c_code_cache_version(self):\n return (5,)\n" ]
[ [ "numpy.asarray", "numpy.isnan", "numpy.dtype", "numpy.all", "numpy.prod", "numpy.get_include" ], [ "numpy.may_share_memory" ], [ "numpy.all", "numpy.prod", "numpy.ndindex", "numpy.array", "numpy.zeros" ], [ "numpy.asarray", "numpy.dtype" ] ]
futurewarning/pyro
[ "005032f10099188fea86f63b6baa46a27867983f", "005032f10099188fea86f63b6baa46a27867983f", "005032f10099188fea86f63b6baa46a27867983f", "005032f10099188fea86f63b6baa46a27867983f", "005032f10099188fea86f63b6baa46a27867983f", "11a96cde05756def826c232d76f9cff66f6e6d4f" ]
[ "pyro/distributions/transforms/affine_coupling.py", "pyro/infer/tracetmc_elbo.py", "pyro/infer/reparam/projected_normal.py", "pyro/contrib/bnn/utils.py", "pyro/contrib/examples/bart.py", "tests/infer/reparam/test_split.py" ]
[ "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport operator\nfrom functools import partial, reduce\n\nimport torch\nfrom torch.distributions.utils import _sum_rightmost\n\nfrom pyro.nn import ConditionalDenseNN, DenseNN\n\nfrom .. import constraints\nfrom ..conditional import ConditionalTransformModule\nfrom ..torch_transform import TransformModule\nfrom ..transforms.utils import clamp_preserve_gradients\nfrom ..util import copy_docs_from\n\n\n@copy_docs_from(TransformModule)\nclass AffineCoupling(TransformModule):\n r\"\"\"\n An implementation of the affine coupling layer of RealNVP (Dinh et al., 2017)\n that uses the bijective transform,\n\n :math:`\\mathbf{y}_{1:d} = \\mathbf{x}_{1:d}`\n :math:`\\mathbf{y}_{(d+1):D} = \\mu + \\sigma\\odot\\mathbf{x}_{(d+1):D}`\n\n where :math:`\\mathbf{x}` are the inputs, :math:`\\mathbf{y}` are the outputs,\n e.g. :math:`\\mathbf{x}_{1:d}` represents the first :math:`d` elements of the\n inputs, and :math:`\\mu,\\sigma` are shift and translation parameters calculated\n as the output of a function inputting only :math:`\\mathbf{x}_{1:d}`.\n\n That is, the first :math:`d` components remain unchanged, and the subsequent\n :math:`D-d` are shifted and translated by a function of the previous components.\n\n Together with :class:`~pyro.distributions.TransformedDistribution` this provides\n a way to create richer variational approximations.\n\n Example usage:\n\n >>> from pyro.nn import DenseNN\n >>> input_dim = 10\n >>> split_dim = 6\n >>> base_dist = dist.Normal(torch.zeros(input_dim), torch.ones(input_dim))\n >>> param_dims = [input_dim-split_dim, input_dim-split_dim]\n >>> hypernet = DenseNN(split_dim, [10*input_dim], param_dims)\n >>> transform = AffineCoupling(split_dim, hypernet)\n >>> pyro.module(\"my_transform\", transform) # doctest: +SKIP\n >>> flow_dist = dist.TransformedDistribution(base_dist, [transform])\n >>> flow_dist.sample() # doctest: +SKIP\n\n The inverse of the Bijector is required when, e.g., scoring the log density of a\n sample with :class:`~pyro.distributions.TransformedDistribution`. This\n implementation caches the inverse of the Bijector when its forward operation is\n called, e.g., when sampling from\n :class:`~pyro.distributions.TransformedDistribution`. However, if the cached\n value isn't available, either because it was overwritten during sampling a new\n value or an arbitary value is being scored, it will calculate it manually.\n\n This is an operation that scales as O(1), i.e. constant in the input dimension.\n So in general, it is cheap to sample *and* score (an arbitrary value) from\n :class:`~pyro.distributions.transforms.AffineCoupling`.\n\n :param split_dim: Zero-indexed dimension :math:`d` upon which to perform input/\n output split for transformation.\n :type split_dim: int\n :param hypernet: a neural network whose forward call returns a real-valued mean\n and logit-scale as a tuple. The input should have final dimension split_dim\n and the output final dimension input_dim-split_dim for each member of the\n tuple.\n :type hypernet: callable\n :param dim: the tensor dimension on which to split. This value must be negative\n and defines the event dim as `abs(dim)`.\n :type dim: int\n :param log_scale_min_clip: The minimum value for clipping the log(scale) from\n the autoregressive NN\n :type log_scale_min_clip: float\n :param log_scale_max_clip: The maximum value for clipping the log(scale) from\n the autoregressive NN\n :type log_scale_max_clip: float\n\n References:\n\n [1] Laurent Dinh, Jascha Sohl-Dickstein, and Samy Bengio. Density estimation\n using Real NVP. ICLR 2017.\n\n \"\"\"\n\n bijective = True\n\n def __init__(self, split_dim, hypernet, *, dim=-1, log_scale_min_clip=-5., log_scale_max_clip=3.):\n super().__init__(cache_size=1)\n if dim >= 0:\n raise ValueError(\"'dim' keyword argument must be negative\")\n\n self.split_dim = split_dim\n self.nn = hypernet\n self.dim = dim\n self._cached_log_scale = None\n self.log_scale_min_clip = log_scale_min_clip\n self.log_scale_max_clip = log_scale_max_clip\n\n @constraints.dependent_property(is_discrete=False)\n def domain(self):\n return constraints.independent(constraints.real, -self.dim)\n\n @constraints.dependent_property(is_discrete=False)\n def codomain(self):\n return constraints.independent(constraints.real, -self.dim)\n\n def _call(self, x):\n \"\"\"\n :param x: the input into the bijection\n :type x: torch.Tensor\n\n Invokes the bijection x=>y; in the prototypical context of a\n :class:`~pyro.distributions.TransformedDistribution` `x` is a sample from\n the base distribution (or the output of a previous transform)\n \"\"\"\n x1, x2 = x.split([self.split_dim, x.size(self.dim) - self.split_dim], dim=self.dim)\n\n # Now that we can split on an arbitrary dimension, we have do a bit of reshaping...\n mean, log_scale = self.nn(x1.reshape(x1.shape[:self.dim] + (-1,)))\n mean = mean.reshape(mean.shape[:-1] + x2.shape[self.dim:])\n log_scale = log_scale.reshape(log_scale.shape[:-1] + x2.shape[self.dim:])\n\n log_scale = clamp_preserve_gradients(log_scale, self.log_scale_min_clip, self.log_scale_max_clip)\n self._cached_log_scale = log_scale\n\n y1 = x1\n y2 = torch.exp(log_scale) * x2 + mean\n return torch.cat([y1, y2], dim=self.dim)\n\n def _inverse(self, y):\n \"\"\"\n :param y: the output of the bijection\n :type y: torch.Tensor\n\n Inverts y => x. Uses a previously cached inverse if available, otherwise\n performs the inversion afresh.\n \"\"\"\n y1, y2 = y.split([self.split_dim, y.size(self.dim) - self.split_dim], dim=self.dim)\n x1 = y1\n\n # Now that we can split on an arbitrary dimension, we have do a bit of reshaping...\n mean, log_scale = self.nn(x1.reshape(x1.shape[:self.dim] + (-1,)))\n mean = mean.reshape(mean.shape[:-1] + y2.shape[self.dim:])\n log_scale = log_scale.reshape(log_scale.shape[:-1] + y2.shape[self.dim:])\n\n log_scale = clamp_preserve_gradients(log_scale, self.log_scale_min_clip, self.log_scale_max_clip)\n self._cached_log_scale = log_scale\n\n x2 = (y2 - mean) * torch.exp(-log_scale)\n return torch.cat([x1, x2], dim=self.dim)\n\n def log_abs_det_jacobian(self, x, y):\n \"\"\"\n Calculates the elementwise determinant of the log jacobian\n \"\"\"\n x_old, y_old = self._cached_x_y\n if self._cached_log_scale is not None and x is x_old and y is y_old:\n log_scale = self._cached_log_scale\n else:\n x1, x2 = x.split([self.split_dim, x.size(self.dim) - self.split_dim], dim=self.dim)\n _, log_scale = self.nn(x1.reshape(x1.shape[:self.dim] + (-1,)))\n log_scale = log_scale.reshape(log_scale.shape[:-1] + x2.shape[self.dim:])\n log_scale = clamp_preserve_gradients(log_scale, self.log_scale_min_clip, self.log_scale_max_clip)\n return _sum_rightmost(log_scale, self.event_dim)\n\n\n@copy_docs_from(ConditionalTransformModule)\nclass ConditionalAffineCoupling(ConditionalTransformModule):\n r\"\"\"\n An implementation of the affine coupling layer of RealNVP (Dinh et al., 2017)\n that conditions on an additional context variable and uses the bijective\n transform,\n\n :math:`\\mathbf{y}_{1:d} = \\mathbf{x}_{1:d}`\n :math:`\\mathbf{y}_{(d+1):D} = \\mu + \\sigma\\odot\\mathbf{x}_{(d+1):D}`\n\n where :math:`\\mathbf{x}` are the inputs, :math:`\\mathbf{y}` are the outputs,\n e.g. :math:`\\mathbf{x}_{1:d}` represents the first :math:`d` elements of the\n inputs, and :math:`\\mu,\\sigma` are shift and translation parameters calculated\n as the output of a function input :math:`\\mathbf{x}_{1:d}` and a context\n variable :math:`\\mathbf{z}\\in\\mathbb{R}^M`.\n\n That is, the first :math:`d` components remain unchanged, and the subsequent\n :math:`D-d` are shifted and translated by a function of the previous components.\n\n Together with :class:`~pyro.distributions.ConditionalTransformedDistribution`\n this provides a way to create richer variational approximations.\n\n Example usage:\n\n >>> from pyro.nn import ConditionalDenseNN\n >>> input_dim = 10\n >>> split_dim = 6\n >>> context_dim = 4\n >>> batch_size = 3\n >>> base_dist = dist.Normal(torch.zeros(input_dim), torch.ones(input_dim))\n >>> param_dims = [input_dim-split_dim, input_dim-split_dim]\n >>> hypernet = ConditionalDenseNN(split_dim, context_dim, [10*input_dim],\n ... param_dims)\n >>> transform = ConditionalAffineCoupling(split_dim, hypernet)\n >>> pyro.module(\"my_transform\", transform) # doctest: +SKIP\n >>> z = torch.rand(batch_size, context_dim)\n >>> flow_dist = dist.ConditionalTransformedDistribution(base_dist,\n ... [transform]).condition(z)\n >>> flow_dist.sample(sample_shape=torch.Size([batch_size])) # doctest: +SKIP\n\n The inverse of the Bijector is required when, e.g., scoring the log density of a\n sample with :class:`~pyro.distributions.ConditionalTransformedDistribution`.\n This implementation caches the inverse of the Bijector when its forward\n operation is called, e.g., when sampling from\n :class:`~pyro.distributions.ConditionalTransformedDistribution`. However, if the\n cached value isn't available, either because it was overwritten during sampling\n a new value or an arbitary value is being scored, it will calculate it manually.\n\n This is an operation that scales as O(1), i.e. constant in the input dimension.\n So in general, it is cheap to sample *and* score (an arbitrary value) from\n :class:`~pyro.distributions.transforms.ConditionalAffineCoupling`.\n\n :param split_dim: Zero-indexed dimension :math:`d` upon which to perform input/\n output split for transformation.\n :type split_dim: int\n :param hypernet: A neural network whose forward call returns a real-valued mean\n and logit-scale as a tuple. The input should have final dimension split_dim\n and the output final dimension input_dim-split_dim for each member of the\n tuple. The network also inputs a context variable as a keyword argument in\n order to condition the output upon it.\n :type hypernet: callable\n :param log_scale_min_clip: The minimum value for clipping the log(scale) from\n the NN\n :type log_scale_min_clip: float\n :param log_scale_max_clip: The maximum value for clipping the log(scale) from\n the NN\n :type log_scale_max_clip: float\n\n References:\n\n Laurent Dinh, Jascha Sohl-Dickstein, and Samy Bengio. Density estimation using\n Real NVP. ICLR 2017.\n\n \"\"\"\n\n domain = constraints.real_vector\n codomain = constraints.real_vector\n bijective = True\n\n def __init__(self, split_dim, hypernet, **kwargs):\n super().__init__()\n self.split_dim = split_dim\n self.nn = hypernet\n self.kwargs = kwargs\n\n def condition(self, context):\n cond_nn = partial(self.nn, context=context)\n return AffineCoupling(self.split_dim, cond_nn, **self.kwargs)\n\n\ndef affine_coupling(input_dim, hidden_dims=None, split_dim=None, dim=-1, **kwargs):\n \"\"\"\n A helper function to create an\n :class:`~pyro.distributions.transforms.AffineCoupling` object that takes care of\n constructing a dense network with the correct input/output dimensions.\n\n :param input_dim: Dimension(s) of input variable to permute. Note that when\n `dim < -1` this must be a tuple corresponding to the event shape.\n :type input_dim: int\n :param hidden_dims: The desired hidden dimensions of the dense network. Defaults\n to using [10*input_dim]\n :type hidden_dims: list[int]\n :param split_dim: The dimension to split the input on for the coupling\n transform. Defaults to using input_dim // 2\n :type split_dim: int\n :param dim: the tensor dimension on which to split. This value must be negative\n and defines the event dim as `abs(dim)`.\n :type dim: int\n :param log_scale_min_clip: The minimum value for clipping the log(scale) from\n the autoregressive NN\n :type log_scale_min_clip: float\n :param log_scale_max_clip: The maximum value for clipping the log(scale) from\n the autoregressive NN\n :type log_scale_max_clip: float\n\n \"\"\"\n if not isinstance(input_dim, int):\n if len(input_dim) != -dim:\n raise ValueError('event shape {} must have same length as event_dim {}'.format(input_dim, -dim))\n event_shape = input_dim\n extra_dims = reduce(operator.mul, event_shape[(dim + 1):], 1)\n else:\n event_shape = [input_dim]\n extra_dims = 1\n event_shape = list(event_shape)\n\n if split_dim is None:\n split_dim = event_shape[dim] // 2\n if hidden_dims is None:\n hidden_dims = [10 * event_shape[dim] * extra_dims]\n\n hypernet = DenseNN(split_dim * extra_dims,\n hidden_dims,\n [(event_shape[dim] - split_dim) * extra_dims,\n (event_shape[dim] - split_dim) * extra_dims])\n return AffineCoupling(split_dim, hypernet, dim=dim, **kwargs)\n\n\ndef conditional_affine_coupling(input_dim, context_dim, hidden_dims=None, split_dim=None, dim=-1, **kwargs):\n \"\"\"\n A helper function to create an\n :class:`~pyro.distributions.transforms.ConditionalAffineCoupling` object that\n takes care of constructing a dense network with the correct input/output\n dimensions.\n\n :param input_dim: Dimension of input variable\n :type input_dim: int\n :param context_dim: Dimension of context variable\n :type context_dim: int\n :param hidden_dims: The desired hidden dimensions of the dense network. Defaults\n to using [10*input_dim]\n :type hidden_dims: list[int]\n :param split_dim: The dimension to split the input on for the coupling\n transform. Defaults to using input_dim // 2\n :type split_dim: int\n :param dim: the tensor dimension on which to split. This value must be negative\n and defines the event dim as `abs(dim)`.\n :type dim: int\n :param log_scale_min_clip: The minimum value for clipping the log(scale) from\n the autoregressive NN\n :type log_scale_min_clip: float\n :param log_scale_max_clip: The maximum value for clipping the log(scale) from\n the autoregressive NN\n :type log_scale_max_clip: float\n\n \"\"\"\n if not isinstance(input_dim, int):\n if len(input_dim) != -dim:\n raise ValueError('event shape {} must have same length as event_dim {}'.format(input_dim, -dim))\n event_shape = input_dim\n extra_dims = reduce(operator.mul, event_shape[(dim + 1):], 1)\n else:\n event_shape = [input_dim]\n extra_dims = 1\n event_shape = list(event_shape)\n\n if split_dim is None:\n split_dim = event_shape[dim] // 2\n if hidden_dims is None:\n hidden_dims = [10 * event_shape[dim] * extra_dims]\n\n nn = ConditionalDenseNN(split_dim * extra_dims, context_dim, hidden_dims,\n [(event_shape[dim] - split_dim) * extra_dims, (event_shape[dim] - split_dim) * extra_dims])\n return ConditionalAffineCoupling(split_dim, nn, dim=dim, **kwargs)\n", "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport queue\nimport warnings\n\nimport torch\n\nimport pyro.poutine as poutine\nfrom pyro.distributions.util import is_identically_zero\nfrom pyro.infer.elbo import ELBO\nfrom pyro.infer.enum import (\n get_importance_trace,\n iter_discrete_escape,\n iter_discrete_extend,\n)\nfrom pyro.infer.util import compute_site_dice_factor, is_validation_enabled, torch_item\nfrom pyro.ops import packed\nfrom pyro.ops.contract import einsum\nfrom pyro.poutine.enum_messenger import EnumMessenger\nfrom pyro.util import check_traceenum_requirements, warn_if_nan\n\n\ndef _compute_dice_factors(model_trace, guide_trace):\n \"\"\"\n compute per-site DiCE log-factors for non-reparameterized proposal sites\n this logic is adapted from pyro.infer.util.Dice.__init__\n \"\"\"\n log_probs = []\n for role, trace in zip((\"model\", \"guide\"), (model_trace, guide_trace)):\n for name, site in trace.nodes.items():\n if site[\"type\"] != \"sample\" or site[\"is_observed\"]:\n continue\n if role == \"model\" and name in guide_trace:\n continue\n\n log_prob, log_denom = compute_site_dice_factor(site)\n if not is_identically_zero(log_denom):\n dims = log_prob._pyro_dims\n log_prob = log_prob - log_denom\n log_prob._pyro_dims = dims\n if not is_identically_zero(log_prob):\n log_probs.append(log_prob)\n\n return log_probs\n\n\ndef _compute_tmc_factors(model_trace, guide_trace):\n \"\"\"\n compute per-site log-factors for all observed and unobserved variables\n log-factors are log(p / q) for unobserved sites and log(p) for observed sites\n \"\"\"\n log_factors = []\n for name, site in guide_trace.nodes.items():\n if site[\"type\"] != \"sample\" or site[\"is_observed\"]:\n continue\n log_proposal = site[\"packed\"][\"log_prob\"]\n log_factors.append(packed.neg(log_proposal))\n for name, site in model_trace.nodes.items():\n if site[\"type\"] != \"sample\":\n continue\n if site[\"name\"] not in guide_trace and \\\n not site[\"is_observed\"] and \\\n site[\"infer\"].get(\"enumerate\", None) == \"parallel\" and \\\n site[\"infer\"].get(\"num_samples\", -1) > 0:\n # site was sampled from the prior\n log_proposal = packed.neg(site[\"packed\"][\"log_prob\"])\n log_factors.append(log_proposal)\n log_factors.append(site[\"packed\"][\"log_prob\"])\n return log_factors\n\n\ndef _compute_tmc_estimate(model_trace, guide_trace):\n \"\"\"\n Use :func:`~pyro.ops.contract.einsum` to compute the Tensor Monte Carlo\n estimate of the marginal likelihood given parallel-sampled traces.\n \"\"\"\n # factors\n log_factors = _compute_tmc_factors(model_trace, guide_trace)\n log_factors += _compute_dice_factors(model_trace, guide_trace)\n\n if not log_factors:\n return 0.\n\n # loss\n eqn = \",\".join([f._pyro_dims for f in log_factors]) + \"->\"\n plates = \"\".join(frozenset().union(list(model_trace.plate_to_symbol.values()),\n list(guide_trace.plate_to_symbol.values())))\n tmc, = einsum(eqn, *log_factors, plates=plates,\n backend=\"pyro.ops.einsum.torch_log\",\n modulo_total=False)\n return tmc\n\n\nclass TraceTMC_ELBO(ELBO):\n \"\"\"\n A trace-based implementation of Tensor Monte Carlo [1]\n by way of Tensor Variable Elimination [2] that supports:\n - local parallel sampling over any sample site in the model or guide\n - exhaustive enumeration over any sample site in the model or guide\n\n To take multiple samples, mark the site with\n ``infer={'enumerate': 'parallel', 'num_samples': N}``.\n To configure all sites in a model or guide at once,\n use :func:`~pyro.infer.enum.config_enumerate` .\n To enumerate or sample a sample site in the ``model``,\n mark the site and ensure the site does not appear in the ``guide``.\n\n This assumes restricted dependency structure on the model and guide:\n variables outside of an :class:`~pyro.plate` can never depend on\n variables inside that :class:`~pyro.plate` .\n\n References\n\n [1] `Tensor Monte Carlo: Particle Methods for the GPU Era`,\n Laurence Aitchison (2018)\n\n [2] `Tensor Variable Elimination for Plated Factor Graphs`,\n Fritz Obermeyer, Eli Bingham, Martin Jankowiak, Justin Chiu, Neeraj Pradhan,\n Alexander Rush, Noah Goodman (2019)\n \"\"\"\n\n def _get_trace(self, model, guide, args, kwargs):\n \"\"\"\n Returns a single trace from the guide, and the model that is run\n against it.\n \"\"\"\n model_trace, guide_trace = get_importance_trace(\n \"flat\", self.max_plate_nesting, model, guide, args, kwargs)\n\n if is_validation_enabled():\n check_traceenum_requirements(model_trace, guide_trace)\n\n has_enumerated_sites = any(site[\"infer\"].get(\"enumerate\")\n for trace in (guide_trace, model_trace)\n for name, site in trace.nodes.items()\n if site[\"type\"] == \"sample\")\n\n if self.strict_enumeration_warning and not has_enumerated_sites:\n warnings.warn('Found no sample sites configured for enumeration. '\n 'If you want to enumerate sites, you need to @config_enumerate or set '\n 'infer={\"enumerate\": \"sequential\"} or infer={\"enumerate\": \"parallel\"}? '\n 'If you do not want to enumerate, consider using Trace_ELBO instead.')\n\n model_trace.compute_score_parts()\n guide_trace.pack_tensors()\n model_trace.pack_tensors(guide_trace.plate_to_symbol)\n return model_trace, guide_trace\n\n def _get_traces(self, model, guide, args, kwargs):\n \"\"\"\n Runs the guide and runs the model against the guide with\n the result packaged as a trace generator.\n \"\"\"\n if self.max_plate_nesting == float('inf'):\n self._guess_max_plate_nesting(model, guide, args, kwargs)\n if self.vectorize_particles:\n guide = self._vectorized_num_particles(guide)\n model = self._vectorized_num_particles(model)\n\n # Enable parallel enumeration over the vectorized guide and model.\n # The model allocates enumeration dimensions after (to the left of) the guide,\n # accomplished by preserving the _ENUM_ALLOCATOR state after the guide call.\n guide_enum = EnumMessenger(first_available_dim=-1 - self.max_plate_nesting)\n model_enum = EnumMessenger() # preserve _ENUM_ALLOCATOR state\n guide = guide_enum(guide)\n model = model_enum(model)\n\n q = queue.LifoQueue()\n guide = poutine.queue(guide, q,\n escape_fn=iter_discrete_escape,\n extend_fn=iter_discrete_extend)\n for i in range(1 if self.vectorize_particles else self.num_particles):\n q.put(poutine.Trace())\n while not q.empty():\n yield self._get_trace(model, guide, args, kwargs)\n\n def differentiable_loss(self, model, guide, *args, **kwargs):\n \"\"\"\n :returns: a differentiable estimate of the marginal log-likelihood\n :rtype: torch.Tensor\n :raises ValueError: if the ELBO is not differentiable (e.g. is\n identically zero)\n\n Computes a differentiable TMC estimate using ``num_particles`` many samples\n (particles). The result should be infinitely differentiable (as long\n as underlying derivatives have been implemented).\n \"\"\"\n elbo = 0.0\n for model_trace, guide_trace in self._get_traces(model, guide, args, kwargs):\n elbo_particle = _compute_tmc_estimate(model_trace, guide_trace)\n if is_identically_zero(elbo_particle):\n continue\n\n elbo = elbo + elbo_particle\n elbo = elbo / self.num_particles\n\n loss = -elbo\n warn_if_nan(loss, \"loss\")\n return loss\n\n def loss(self, model, guide, *args, **kwargs):\n with torch.no_grad():\n loss = self.differentiable_loss(model, guide, *args, **kwargs)\n if is_identically_zero(loss) or not loss.requires_grad:\n return torch_item(loss)\n return loss.item()\n\n def loss_and_grads(self, model, guide, *args, **kwargs):\n loss = self.differentiable_loss(model, guide, *args, **kwargs)\n if is_identically_zero(loss) or not loss.requires_grad:\n return torch_item(loss)\n loss.backward()\n return loss.item()\n", "# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nimport torch\n\nimport pyro\nimport pyro.distributions as dist\nfrom pyro.ops.tensor_utils import safe_normalize\n\nfrom .reparam import Reparam\n\n\nclass ProjectedNormalReparam(Reparam):\n \"\"\"\n Reparametrizer for :class:`~pyro.distributions.ProjectedNormal` latent\n variables.\n\n This reparameterization works only for latent variables, not likelihoods.\n \"\"\"\n def __call__(self, name, fn, obs):\n fn, event_dim = self._unwrap(fn)\n assert isinstance(fn, dist.ProjectedNormal)\n assert obs is None, \"ProjectedNormalReparam does not support observe statements\"\n\n # Draw parameter-free noise.\n new_fn = dist.Normal(torch.zeros_like(fn.concentration), 1).to_event(1)\n x = pyro.sample(\"{}_normal\".format(name), self._wrap(new_fn, event_dim))\n\n # Differentiably transform.\n value = safe_normalize(x + fn.concentration)\n\n # Simulate a pyro.deterministic() site.\n new_fn = dist.Delta(value, event_dim=event_dim).mask(False)\n return new_fn, value\n", "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport math\n\nimport torch\n\n\ndef xavier_uniform(D_in, D_out):\n scale = math.sqrt(6.0 / float(D_in + D_out))\n noise = torch.rand(D_in, D_out)\n return 2.0 * scale * noise - scale\n\n\ndef adjoin_ones_vector(x):\n return torch.cat([x, torch.ones(x.shape[:-1] + (1,)).type_as(x)], dim=-1)\n\n\ndef adjoin_zeros_vector(x):\n return torch.cat([x, torch.zeros(x.shape[:-1] + (1,)).type_as(x)], dim=-1)\n", "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport argparse\nimport bz2\nimport csv\nimport datetime\nimport logging\nimport multiprocessing\nimport os\nimport subprocess\nimport sys\nimport urllib\n\nimport torch\n\nfrom pyro.contrib.examples.util import _mkdir_p, get_data_directory\n\nDATA = get_data_directory(__file__)\n\n# https://www.bart.gov/about/reports/ridership\nSOURCE_DIR = \"http://64.111.127.166/origin-destination/\"\nSOURCE_FILES = [\n \"date-hour-soo-dest-2011.csv.gz\",\n \"date-hour-soo-dest-2012.csv.gz\",\n \"date-hour-soo-dest-2013.csv.gz\",\n \"date-hour-soo-dest-2014.csv.gz\",\n \"date-hour-soo-dest-2015.csv.gz\",\n \"date-hour-soo-dest-2016.csv.gz\",\n \"date-hour-soo-dest-2017.csv.gz\",\n \"date-hour-soo-dest-2018.csv.gz\",\n \"date-hour-soo-dest-2019.csv.gz\",\n]\nCACHE_URL = \"https://d2hg8soec8ck9v.cloudfront.net/datasets/bart_full.pkl.bz2\"\n\n\ndef _load_hourly_od(basename):\n filename = os.path.join(DATA, basename.replace(\".csv.gz\", \".pkl\"))\n if os.path.exists(filename):\n return filename\n\n # Download source files.\n gz_filename = os.path.join(DATA, basename)\n if not os.path.exists(gz_filename):\n url = SOURCE_DIR + basename\n logging.debug(\"downloading {}\".format(url))\n urllib.request.urlretrieve(url, gz_filename)\n csv_filename = gz_filename[:-3]\n assert csv_filename.endswith(\".csv\")\n if not os.path.exists(csv_filename):\n logging.debug(\"unzipping {}\".format(gz_filename))\n subprocess.check_call([\"gunzip\", \"-k\", gz_filename])\n assert os.path.exists(csv_filename)\n\n # Convert to PyTorch.\n logging.debug(\"converting {}\".format(csv_filename))\n start_date = datetime.datetime.strptime(\"2000-01-01\", \"%Y-%m-%d\")\n stations = {}\n num_rows = sum(1 for _ in open(csv_filename))\n logging.info(\"Formatting {} rows\".format(num_rows))\n rows = torch.empty((num_rows, 4), dtype=torch.long)\n with open(csv_filename) as f:\n for i, (date, hour, origin, destin, trip_count) in enumerate(csv.reader(f)):\n date = datetime.datetime.strptime(date, \"%Y-%m-%d\")\n date += datetime.timedelta(hours=int(hour))\n rows[i, 0] = int((date - start_date).total_seconds() / 3600)\n rows[i, 1] = stations.setdefault(origin, len(stations))\n rows[i, 2] = stations.setdefault(destin, len(stations))\n rows[i, 3] = int(trip_count)\n if i % 10000 == 0:\n sys.stderr.write(\".\")\n sys.stderr.flush()\n\n # Save data with metadata.\n dataset = {\n \"basename\": basename,\n \"start_date\": start_date,\n \"stations\": stations,\n \"rows\": rows,\n \"schema\": [\"time_hours\", \"origin\", \"destin\", \"trip_count\"],\n }\n dataset[\"rows\"]\n logging.debug(\"saving {}\".format(filename))\n torch.save(dataset, filename)\n return filename\n\n\ndef load_bart_od():\n \"\"\"\n Load a dataset of hourly origin-destination ridership counts for every pair\n of BART stations during the years 2011-2019.\n\n **Source** https://www.bart.gov/about/reports/ridership\n\n This downloads the dataset the first time it is called. On subsequent calls\n this reads from a local cached file ``.pkl.bz2``. This attempts to\n download a preprocessed compressed cached file maintained by the Pyro team.\n On cache hit this should be very fast. On cache miss this falls back to\n downloading the original data source and preprocessing the dataset,\n requiring about 350MB of file transfer, storing a few GB of temp files, and\n taking upwards of 30 minutes.\n\n :returns: a dataset is a dictionary with fields:\n\n - \"stations\": a list of strings of station names\n - \"start_date\": a :py:class:`datetime.datetime` for the first observaion\n - \"counts\": a ``torch.FloatTensor`` of ridership counts, with shape\n ``(num_hours, len(stations), len(stations))``.\n \"\"\"\n _mkdir_p(DATA)\n filename = os.path.join(DATA, \"bart_full.pkl.bz2\")\n # Work around apparent bug in torch.load(),torch.save().\n pkl_file = filename.rsplit(\".\", 1)[0]\n if not os.path.exists(pkl_file):\n try:\n urllib.request.urlretrieve(CACHE_URL, filename)\n logging.debug(\"cache hit, uncompressing\")\n with bz2.BZ2File(filename) as src, open(filename[:-4], \"wb\") as dst:\n dst.write(src.read())\n except urllib.error.HTTPError:\n logging.debug(\"cache miss, preprocessing from scratch\")\n if os.path.exists(pkl_file):\n return torch.load(pkl_file)\n\n filenames = multiprocessing.Pool(len(SOURCE_FILES)).map(_load_hourly_od, SOURCE_FILES)\n datasets = list(map(torch.load, filenames))\n\n stations = sorted(set().union(*(d[\"stations\"].keys() for d in datasets)))\n min_time = min(int(d[\"rows\"][:, 0].min()) for d in datasets)\n max_time = max(int(d[\"rows\"][:, 0].max()) for d in datasets)\n num_rows = max_time - min_time + 1\n start_date = datasets[0][\"start_date\"] + datetime.timedelta(hours=min_time),\n logging.info(\"Loaded data from {} stations, {} hours\"\n .format(len(stations), num_rows))\n\n result = torch.zeros(num_rows, len(stations), len(stations))\n for dataset in datasets:\n part_stations = sorted(dataset[\"stations\"], key=dataset[\"stations\"].__getitem__)\n part_to_whole = torch.tensor(list(map(stations.index, part_stations)))\n time = dataset[\"rows\"][:, 0] - min_time\n origin = part_to_whole[dataset[\"rows\"][:, 1]]\n destin = part_to_whole[dataset[\"rows\"][:, 2]]\n count = dataset[\"rows\"][:, 3].float()\n result[time, origin, destin] = count\n dataset.clear()\n logging.info(\"Loaded {} shaped data of mean {:0.3g}\"\n .format(result.shape, result.mean()))\n\n dataset = {\n \"stations\": stations,\n \"start_date\": start_date,\n \"counts\": result,\n }\n torch.save(dataset, pkl_file)\n subprocess.check_call([\"bzip2\", \"-k\", pkl_file])\n assert os.path.exists(filename)\n return dataset\n\n\ndef load_fake_od():\n \"\"\"\n Create a tiny synthetic dataset for smoke testing.\n \"\"\"\n dataset = {\n \"stations\": [\"12TH\", \"EMBR\", \"SFIA\"],\n \"start_date\": datetime.datetime.strptime(\"2000-01-01\", \"%Y-%m-%d\"),\n \"counts\": torch.distributions.Poisson(100).sample([24 * 7 * 8, 3, 3]),\n }\n return dataset\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"BART data preprocessor\")\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\")\n args = parser.parse_args()\n\n logging.basicConfig(format='%(relativeCreated) 9d %(message)s',\n level=logging.DEBUG if args.verbose else logging.INFO)\n load_bart_od()\n", "# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nimport pytest\nimport torch\nfrom torch.autograd import grad\n\nimport pyro\nimport pyro.distributions as dist\nfrom pyro import poutine\nfrom pyro.infer.reparam import SplitReparam\nfrom tests.common import assert_close\n\n\[email protected](\"event_shape,splits,dim\", [\n ((6,), [2, 1, 3], -1),\n ((2, 5,), [2, 3], -1),\n ((4, 2), [1, 3], -2),\n ((2, 3, 1), [1, 2], -2),\n], ids=str)\[email protected](\"batch_shape\", [(), (4,), (3, 2)], ids=str)\ndef test_normal(batch_shape, event_shape, splits, dim):\n shape = batch_shape + event_shape\n loc = torch.empty(shape).uniform_(-1., 1.).requires_grad_()\n scale = torch.empty(shape).uniform_(0.5, 1.5).requires_grad_()\n\n def model():\n with pyro.plate_stack(\"plates\", batch_shape):\n pyro.sample(\"x\",\n dist.Normal(loc, scale)\n .to_event(len(event_shape)))\n\n # Run without reparam.\n trace = poutine.trace(model).get_trace()\n expected_value = trace.nodes[\"x\"][\"value\"]\n expected_log_prob = trace.log_prob_sum()\n expected_grads = grad(expected_log_prob, [loc, scale], create_graph=True)\n\n # Run with reparam.\n split_values = {\n \"x_split_{}\".format(i): xi\n for i, xi in enumerate(expected_value.split(splits, dim))}\n rep = SplitReparam(splits, dim)\n reparam_model = poutine.reparam(model, {\"x\": rep})\n reparam_model = poutine.condition(reparam_model, split_values)\n trace = poutine.trace(reparam_model).get_trace()\n assert all(name in trace.nodes for name in split_values)\n assert isinstance(trace.nodes[\"x\"][\"fn\"], dist.Delta)\n assert trace.nodes[\"x\"][\"fn\"].batch_shape == batch_shape\n assert trace.nodes[\"x\"][\"fn\"].event_shape == event_shape\n\n # Check values.\n actual_value = trace.nodes[\"x\"][\"value\"]\n assert_close(actual_value, expected_value, atol=0.1)\n\n # Check log prob.\n actual_log_prob = trace.log_prob_sum()\n assert_close(actual_log_prob, expected_log_prob)\n actual_grads = grad(actual_log_prob, [loc, scale], create_graph=True)\n assert_close(actual_grads, expected_grads)\n" ]
[ [ "torch.exp", "torch.distributions.utils._sum_rightmost", "torch.cat" ], [ "torch.no_grad" ], [ "torch.zeros_like" ], [ "torch.ones", "torch.rand", "torch.zeros" ], [ "torch.distributions.Poisson", "torch.load", "torch.empty", "torch.save" ], [ "torch.autograd.grad", "torch.empty" ] ]
BXuan694/SOLO-pytorch
[ "aef0ac47ce6989f6633fe4f71070bd6944c39abb" ]
[ "train.py" ]
[ "from data.config import cfg, process_funcs_dict\nfrom data.coco import CocoDataset\nfrom data.loader import build_dataloader\n#from modules.solov1 import SOLOV1 as solo\n# from modules.solov2 import SOLOV2 as solo\nfrom modules.solov1d import SOLOV1 as solo\nimport time\nimport torch\nimport numpy as np\n\n# 梯度均衡\ndef clip_grads(params_):\n params_ = list(filter(lambda p: p.requires_grad and p.grad is not None, params_))\n if len(params_) > 0:\n return torch.nn.utils.clip_grad.clip_grad_norm_(params_, max_norm=35, norm_type=2)\n\n# 设置新学习率\ndef set_lr(optimizer_, newLr_):\n for paramGroup_ in optimizer_.param_groups:\n paramGroup_['lr'] = newLr_\n\n# 设置requires_grad为False\ndef gradinator(x_):\n x_.requires_grad = False\n return x_\n\n# 设置pipline\ndef build_process_pipeline(pipelineConfgs_):\n assert isinstance(pipelineConfgs_, list)\n process_pipelines = []\n for pConfig_ in pipelineConfgs_:\n assert isinstance(pConfig_, dict) and 'type' in pConfig_\n args = pConfig_.copy()\n obj_type = args.pop('type')\n if isinstance(obj_type, str):\n process_pipelines.append(process_funcs_dict[obj_type](**args))\n return process_pipelines\n\n# 计算warmup学习率\ndef get_warmup_lr(curIter_, totalIters_, baseLr_, warmupRatio_, warmUpOption='linear'):\n if warmUpOption == 'constant':\n warmupLr = baseLr_ * warmupRatio_ \n elif warmUpOption == 'linear':\n k = (1 - curIter_ / totalIters_) * (1 - warmupRatio_)\n warmupLr = baseLr_ * (1 - k)\n elif warmUpOption == 'exp':\n k = warmupRatio_**(1 - curIter_ / totalIters_)\n warmupLr = baseLr_ * k\n return warmupLr\n\n\ndef train(globalStartEpoch, totalEpoches):\n\n # train process pipelines func\n trainTransformsPiplines = build_process_pipeline(cfg.train_pipeline)\n print(trainTransformsPiplines)\n # build datashet\n casiadata = CocoDataset(ann_file=cfg.dataset.train_info,\n pipeline = trainTransformsPiplines,\n img_prefix = cfg.dataset.trainimg_prefix,\n data_root=cfg.dataset.train_prefix)\n torchdataLoader = build_dataloader(casiadata, cfg.imgs_per_gpu, cfg.workers_per_gpu, num_gpus=cfg.num_gpus, shuffle=True)\n\n if cfg.resume_from is None:\n model = solo(cfg, pretrained=None, mode='train')\n print('cfg.resume_from is None')\n else:\n model = solo(cfg, pretrained=cfg.resume_from, mode='train')\n model = model.cuda()\n model = model.train()\n\n lrOri = cfg.optimizer['lr']\n lrStages = cfg.lr_config[\"step\"]\n lrList = np.full(totalEpoches, lrOri)\n for ii in range(len(lrStages)):\n lrList[lrStages[ii]:]*=0.1\n print(\"starting epoch: \", globalStartEpoch)\n print(\"lr adapting stages: \", end=' ')\n for ii in range(len(lrStages)):\n print(cfg.lr_config[\"step\"][ii], end=\" \")\n print(\"\\ntotal training epoches: \", totalEpoches)\n\n optimizer_config = cfg.optimizer\n optimizer = torch.optim.SGD(model.parameters(), lr=optimizer_config['lr'], momentum=optimizer_config['momentum'], weight_decay=optimizer_config['weight_decay'])\n\n batchSize = cfg.imgs_per_gpu * cfg.num_gpus\n epochSize = len(casiadata) // batchSize \n # nums of trained epoches, idx of epoch to start\n pastEpoches = globalStartEpoch\n # nums of trained iters, idx of iter to start\n pastIters = (globalStartEpoch-1) * epochSize\n # nums of left epoches\n leftEpoches = totalEpoches - pastEpoches + 1\n # nums of left iters\n leftIters = leftEpoches * epochSize\n\n print('##### begin train ######')\n currentIter = 0 \n \n for epoch in range(leftEpoches):\n\n currentEpoch = epoch + pastEpoches\n # 终止训练\n if currentEpoch >= totalEpoches:\n print(\"Current epoch is larger than setting epoch nums, training stop.\")\n return\n\n # 仅用于打印\n loss_sum = 0.0 \n loss_ins = 0.0 \n loss_cate = 0.0\n \n for j, data in enumerate(torchdataLoader):\n iterStartTime = time.time()\n\n if cfg.lr_config['warmup'] is not None and pastIters < cfg.lr_config['warmup_iters']:\n cur_lr = get_warmup_lr(pastIters, cfg.lr_config['warmup_iters'],\n optimizer_config['lr'], cfg.lr_config['warmup_ratio'],\n cfg.lr_config['warmup'])\n else:\n cur_lr = lrList[currentEpoch]\n set_lr(optimizer, cur_lr)\n\n imgs = gradinator(data['img'].data[0].cuda())\n img_meta = data['img_metas'].data[0] #图片的一些原始信息\n gt_bboxes = []\n for bbox in data['gt_bboxes'].data[0]:\n bbox = gradinator(bbox.cuda())\n gt_bboxes.append(bbox)\n \n gt_masks = data['gt_masks'].data[0] #cpu numpy data\n \n gt_labels = []\n for label in data['gt_labels'].data[0]:\n label = gradinator(label.cuda())\n gt_labels.append(label)\n\n\n loss = model.forward(img=imgs,\n img_meta=img_meta,\n gt_bboxes=gt_bboxes,\n gt_labels=gt_labels,\n gt_masks=gt_masks)\n\n\n losses = loss['loss_ins'] + loss['loss_cate']\n loss_sum += losses.cpu().item()\n loss_ins += loss['loss_ins'].cpu().item()\n loss_cate += loss['loss_cate'].cpu().item()\n\n optimizer.zero_grad()\n losses.backward()\n\n if torch.isfinite(losses).item():\n grad_norm = clip_grads(model.parameters()) #梯度平衡\n optimizer.step()\n else:\n NotImplementedError(\"loss type error!can't backward!\")\n\n leftIters -= 1\n pastIters += 1\n currentIter += 1\n\n showIters = 10\n if j%int(showIters) == 0 and j != 0:\n iterLastTime = time.time() - iterStartTime\n left_seconds = iterLastTime * leftIters\n left_minutes = left_seconds / 60.0\n left_hours = left_minutes / 60.0\n left_days = left_hours // 24\n left_hours = left_hours % 24\n\n out_srt = 'epoch:['+str(currentEpoch)+']/['+str(totalEpoches)+'],' # end of epoch of idx currentEpoch\n out_srt = out_srt + '['+str(j)+']/'+str(epochSize)+'], left_time: ' + str(left_days)+'days '+format(left_hours,'.2f')+'hours,'\n print(out_srt, \"loss:\", format(loss_sum/showIters,'.4f'), 'loss_ins:', format(loss_ins/showIters,'.4f'), \"loss_cate:\", format(loss_cate/showIters,'.4f'), \"lr:\", format(cur_lr,'.8f'))\n loss_sum = 0.0 \n loss_ins = 0.0 \n loss_cate = 0.0\n\n leftEpoches -= 1\n\n save_name = \"./weights/solo1/\" + cfg.name + \"_epoch_\" + str(currentEpoch) + \".pth\"\n model.save_weights(save_name) \n\nif __name__ == '__main__':\n train(globalStartEpoch=cfg.epoch_iters_start, totalEpoches=cfg.total_epoch) #设置本次训练的起始epoch\n" ]
[ [ "torch.isfinite", "torch.nn.utils.clip_grad.clip_grad_norm_", "numpy.full" ] ]
Halo9Pan/dive-keras
[ "f1e9c76675981ee6683f54a3ce569212d551d12d", "f1e9c76675981ee6683f54a3ce569212d551d12d", "7d4c5572fa3a9fc2542a1314d06c555f67575cb0", "f1e9c76675981ee6683f54a3ce569212d551d12d", "7d4c5572fa3a9fc2542a1314d06c555f67575cb0", "7d4c5572fa3a9fc2542a1314d06c555f67575cb0", "7d4c5572fa3a9fc2542a1314d06c555f67575cb0", "7d4c5572fa3a9fc2542a1314d06c555f67575cb0", "7d4c5572fa3a9fc2542a1314d06c555f67575cb0", "7d4c5572fa3a9fc2542a1314d06c555f67575cb0", "f1e9c76675981ee6683f54a3ce569212d551d12d" ]
[ "keras/optimizer_v2/adamax.py", "keras/distribute/custom_training_loop_models_test.py", "keras/engine/base_preprocessing_layer_test.py", "keras/mixed_precision/device_compatibility_check_test.py", "keras/layers/preprocessing/benchmarks/category_vocab_list_indicator_varlen_benchmark.py", "keras/layers/preprocessing/benchmarks/image_preproc_benchmark.py", "keras/layers/preprocessing/category_encoding_test.py", "keras/initializers/initializers_test.py", "keras/layers/preprocessing/benchmarks/category_vocab_list_varlen_benchmark.py", "keras/integration_test/preprocessing_test_utils.py", "keras/distribute/keras_image_model_correctness_test.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Adamax optimizer implementation.\"\"\"\n\nimport tensorflow.compat.v2 as tf\nfrom keras import backend_config\nfrom keras.optimizer_v2 import optimizer_v2\nfrom tensorflow.python.util.tf_export import keras_export\n\n\n@keras_export('keras.optimizers.Adamax')\nclass Adamax(optimizer_v2.OptimizerV2):\n \"\"\"Optimizer that implements the Adamax algorithm.\n\n It is a variant of Adam based on the infinity norm.\n Default parameters follow those provided in the paper.\n Adamax is sometimes superior to adam, specially in models with embeddings.\n\n Initialization:\n\n ```python\n m = 0 # Initialize initial 1st moment vector\n v = 0 # Initialize the exponentially weighted infinity norm\n t = 0 # Initialize timestep\n ```\n\n The update rule for parameter `w` with gradient `g` is\n described at the end of section 7.1 of the paper:\n\n ```python\n t += 1\n m = beta1 * m + (1 - beta) * g\n v = max(beta2 * v, abs(g))\n current_lr = learning_rate / (1 - beta1 ** t)\n w = w - current_lr * m / (v + epsilon)\n ```\n\n Similarly to `Adam`, the epsilon is added for numerical stability\n (especially to get rid of division by zero when `v_t == 0`).\n\n In contrast to `Adam`, the sparse implementation of this algorithm\n (used when the gradient is an IndexedSlices object, typically because of\n `tf.gather` or an embedding lookup in the forward pass) only updates\n variable slices and corresponding `m_t`, `v_t` terms when that part of\n the variable was used in the forward pass. This means that the sparse\n behavior is contrast to the dense behavior (similar to some momentum\n implementations which ignore momentum unless a variable slice was actually\n used).\n\n Args:\n learning_rate: A `Tensor`, floating point value, or a schedule that is a\n `tf.keras.optimizers.schedules.LearningRateSchedule`. The learning rate.\n beta_1: A float value or a constant float tensor. The exponential decay\n rate for the 1st moment estimates.\n beta_2: A float value or a constant float tensor. The exponential decay\n rate for the exponentially weighted infinity norm.\n epsilon: A small constant for numerical stability.\n name: Optional name for the operations created when applying gradients.\n Defaults to `\"Adamax\"`.\n **kwargs: Keyword arguments. Allowed to be one of\n `\"clipnorm\"` or `\"clipvalue\"`.\n `\"clipnorm\"` (float) clips gradients by norm; `\"clipvalue\"` (float) clips\n gradients by value.\n\n Reference:\n - [Kingma et al., 2014](http://arxiv.org/abs/1412.6980)\n \"\"\"\n\n _HAS_AGGREGATE_GRAD = True\n\n def __init__(self,\n learning_rate=0.001,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-7,\n name='Adamax',\n **kwargs):\n super(Adamax, self).__init__(name, **kwargs)\n self._set_hyper('learning_rate', kwargs.get('lr', learning_rate))\n self._set_hyper('decay', self._initial_decay)\n self._set_hyper('beta_1', beta_1)\n self._set_hyper('beta_2', beta_2)\n self.epsilon = epsilon or backend_config.epsilon()\n\n def _create_slots(self, var_list):\n # Separate for-loops to respect the ordering of slot variables from v1.\n for var in var_list:\n self.add_slot(var, 'm') # Create slots for the first moments.\n for var in var_list:\n self.add_slot(var, 'v') # Create slots for the second moments.\n\n def _prepare_local(self, var_device, var_dtype, apply_state):\n super(Adamax, self)._prepare_local(var_device, var_dtype, apply_state)\n\n local_step = tf.cast(self.iterations + 1, var_dtype)\n beta_1_t = tf.identity(self._get_hyper('beta_1', var_dtype))\n beta_2_t = tf.identity(self._get_hyper('beta_2', var_dtype))\n beta_1_power = tf.pow(beta_1_t, local_step)\n lr_t = apply_state[(var_device, var_dtype)]['lr_t']\n\n apply_state[(var_device, var_dtype)].update(\n dict(\n neg_scaled_lr=-lr_t / (1 - beta_1_power),\n epsilon=tf.convert_to_tensor(\n self.epsilon, var_dtype),\n beta_1_t=beta_1_t,\n beta_1_power=beta_1_power,\n one_minus_beta_1_t=1 - beta_1_t,\n beta_2_t=beta_2_t,\n zero=tf.zeros((), dtype=tf.int64)))\n\n def _resource_apply_dense(self, grad, var, apply_state=None):\n var_device, var_dtype = var.device, var.dtype.base_dtype\n coefficients = ((apply_state or {}).get((var_device, var_dtype))\n or self._fallback_apply_state(var_device, var_dtype))\n\n m = self.get_slot(var, 'm')\n v = self.get_slot(var, 'v')\n return tf.raw_ops.ResourceApplyAdaMax(\n var=var.handle,\n m=m.handle,\n v=v.handle,\n beta1_power=coefficients['beta_1_power'],\n lr=coefficients['lr_t'],\n beta1=coefficients['beta_1_t'],\n beta2=coefficients['beta_2_t'],\n epsilon=coefficients['epsilon'],\n grad=grad,\n use_locking=self._use_locking)\n\n def _resource_apply_sparse(self, grad, var, indices, apply_state=None):\n var_device, var_dtype = var.device, var.dtype.base_dtype\n coefficients = ((apply_state or {}).get((var_device, var_dtype))\n or self._fallback_apply_state(var_device, var_dtype))\n\n # m_t = beta1 * m + (1 - beta1) * g_t\n m = self.get_slot(var, 'm')\n m_slice = tf.gather(m, indices, axis=coefficients['zero'])\n m_t_slice = (m_slice * coefficients['beta_1_t'] +\n grad * coefficients['one_minus_beta_1_t'])\n with tf.control_dependencies([m_t_slice]):\n m_t = self._resource_scatter_update(m, indices, m_t_slice)\n\n # u_t = max(beta2 * u, abs(g_t))\n v = self.get_slot(var, 'v')\n v_slice = tf.gather(v, indices, axis=coefficients['zero'])\n v_t_slice = tf.maximum(v_slice * coefficients['beta_2_t'],\n tf.abs(grad))\n with tf.control_dependencies([v_t_slice]):\n v_t = self._resource_scatter_update(v, indices, v_t_slice)\n # theta_t = theta - lr / (1 - beta1^t) * m_t / u_t\n var_slice = coefficients['neg_scaled_lr'] * (\n m_t_slice / (v_t_slice + coefficients['epsilon']))\n with tf.control_dependencies([var_slice]):\n var_update = self._resource_scatter_add(var, indices, var_slice)\n return tf.group(*[var_update, m_t, v_t])\n\n def get_config(self):\n config = super(Adamax, self).get_config()\n config.update({\n 'learning_rate': self._serialize_hyperparameter('learning_rate'),\n 'decay': self._initial_decay,\n 'beta_1': self._serialize_hyperparameter('beta_1'),\n 'beta_2': self._serialize_hyperparameter('beta_2'),\n 'epsilon': self.epsilon,\n })\n return config\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for custom training loops.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n\nimport os\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nimport keras\nfrom keras.distribute import strategy_combinations\nfrom keras.layers import core\nfrom keras.optimizer_v2 import gradient_descent\n\n\nclass CustomModel(tf.Module):\n\n def __init__(self, name=None):\n super(CustomModel, self).__init__(name=name)\n with self.name_scope:\n self._layers = [\n keras.layers.Dense(4, name=\"dense\"),\n ]\n\n @tf.Module.with_name_scope\n def __call__(self, x):\n for layer in self._layers:\n x = layer(x)\n return x\n\n\n@tf.__internal__.distribute.combinations.generate(\n tf.__internal__.test.combinations.combine(\n distribution=(strategy_combinations.all_strategies +\n strategy_combinations.multiworker_strategies),\n mode=[\"eager\"]\n )\n )\nclass KerasModelsTest(tf.test.TestCase, parameterized.TestCase):\n\n def test_single_keras_layer_run(self, distribution):\n dataset = _get_dataset()\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n with distribution.scope():\n model = keras.layers.Dense(4, name=\"dense\")\n\n @tf.function\n def train_step(iterator):\n def step_fn(inputs):\n images, targets = inputs\n with tf.GradientTape() as tape:\n outputs = model(images)\n loss = keras.losses.mean_squared_error(targets, outputs)\n grads = tape.gradient(loss, model.variables)\n return grads\n\n outputs = distribution.run(\n step_fn, args=(next(iterator),))\n return tf.nest.map_structure(distribution.experimental_local_results,\n outputs)\n\n train_step(input_iterator)\n\n def test_keras_model_optimizer_run(self, distribution):\n dataset = _get_dataset()\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n with distribution.scope():\n model = _get_model()\n optimizer = keras.optimizer_v2.rmsprop.RMSprop()\n\n @tf.function\n def train_step(replicated_inputs):\n def step_fn(inputs):\n images, targets = inputs\n with tf.GradientTape() as tape:\n outputs = model(images)\n loss = keras.losses.mean_squared_error(targets, outputs)\n grads = tape.gradient(loss, model.variables)\n optimizer.apply_gradients(zip(grads, model.variables))\n return loss\n\n outputs = distribution.run(step_fn, args=(replicated_inputs,))\n return tf.nest.map_structure(distribution.experimental_local_results,\n outputs)\n\n for x in input_iterator:\n train_step(x)\n\n def test_keras_subclass_model_optimizer_run(self, distribution):\n def get_subclass_model():\n\n class KerasSubclassModel(keras.Model):\n\n def __init__(self):\n super(KerasSubclassModel, self).__init__()\n self.l = keras.layers.Dense(4, name=\"dense\")\n\n def call(self, x):\n return self.l(x)\n\n return KerasSubclassModel()\n dataset = _get_dataset()\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n with distribution.scope():\n model = get_subclass_model()\n optimizer = keras.optimizer_v2.rmsprop.RMSprop()\n\n @tf.function\n def train_step(iterator):\n def step_fn(inputs):\n images, targets = inputs\n with tf.GradientTape() as tape:\n outputs = model(images)\n loss = keras.losses.mean_squared_error(targets, outputs)\n grads = tape.gradient(loss, model.variables)\n optimizer.apply_gradients(zip(grads, model.variables))\n return loss\n\n outputs = distribution.run(step_fn, args=(next(iterator),))\n return tf.nest.map_structure(distribution.experimental_local_results,\n outputs)\n\n train_step(input_iterator)\n\n def test_keras_model_optimizer_run_loop(self, distribution):\n dataset = _get_dataset()\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n with distribution.scope():\n model = _get_model()\n optimizer = keras.optimizer_v2.rmsprop.RMSprop()\n\n @tf.function\n def train_step(iterator):\n def step_fn(inputs):\n images, targets = inputs\n with tf.GradientTape() as tape:\n outputs = model(images)\n loss = keras.losses.mean_squared_error(targets, outputs)\n grads = tape.gradient(loss, model.variables)\n optimizer.apply_gradients(zip(grads, model.variables))\n return loss\n\n for _ in tf.range(4):\n distribution.run(step_fn, args=(next(iterator),))\n\n train_step(input_iterator)\n\n def test_batch_norm_with_dynamic_batch(self, distribution):\n inputs = np.zeros((10, 3, 3, 3), dtype=np.float32)\n targets = np.zeros((10, 4), dtype=np.float32)\n dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))\n dataset = dataset.repeat()\n dataset = dataset.batch(10)\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n with distribution.scope():\n x = keras.layers.Input(shape=(3, 3, 3), name=\"input\")\n y = keras.layers.BatchNormalization(fused=True, name=\"bn\")(x)\n y = keras.layers.Flatten()(y)\n y = keras.layers.Dense(4, name=\"dense\")(y)\n model = keras.Model(x, y)\n optimizer = keras.optimizer_v2.rmsprop.RMSprop()\n\n @tf.function\n def train_step(iterator):\n def step_fn(inputs):\n images, targets = inputs\n with tf.GradientTape() as tape:\n outputs = model(images, training=True)\n loss = keras.losses.mean_squared_error(targets, outputs)\n grads = tape.gradient(loss, model.variables)\n optimizer.apply_gradients(zip(grads, model.variables))\n return loss\n\n distribution.run(step_fn, args=(next(iterator),))\n\n train_step(input_iterator)\n\n def test_lstm(self, distribution):\n\n batch_size = 32\n\n def create_lstm_model():\n model = keras.models.Sequential()\n # We only have LSTM variables so we can detect no gradient issues more\n # easily.\n model.add(\n keras.layers.LSTM(1, return_sequences=False, input_shape=(10, 1)))\n return model\n\n def create_lstm_data():\n seq_length = 10\n\n x_train = np.random.rand(batch_size, seq_length, 1).astype(\"float32\")\n y_train = np.random.rand(batch_size, 1).astype(\"float32\")\n return x_train, y_train\n\n x, y = create_lstm_data()\n dataset = tf.data.Dataset.from_tensor_slices((x, y))\n dataset = dataset.batch(batch_size)\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n with distribution.scope():\n model = create_lstm_model()\n optimizer = keras.optimizer_v2.gradient_descent.SGD()\n\n @tf.function\n def train_step(input_iterator):\n\n def step_fn(inputs):\n inps, targ = inputs\n with tf.GradientTape() as tape:\n output = model(inps)\n loss = tf.reduce_mean(\n keras.losses.binary_crossentropy(\n y_true=targ, y_pred=output, from_logits=False))\n grads = tape.gradient(loss, model.variables)\n optimizer.apply_gradients(zip(grads, model.variables))\n return loss\n\n outputs = distribution.run(\n step_fn, args=(next(input_iterator),))\n return distribution.experimental_local_results(outputs)\n\n train_step(input_iterator)\n\n def test_nested_tf_functions(self, distribution):\n # The test builds two computations with keras layers, one with nested\n # tf.function, and the other without nested tf.function. We run these\n # computations independently on the model with same weights, and make sure\n # the variables are still the same after one training step.\n\n inputs = np.random.random((10, 3)).astype(np.float32)\n targets = np.ones((10, 4), dtype=np.float32)\n dataset = tf.data.Dataset.from_tensor_slices((inputs, targets)).repeat()\n dataset = dataset.batch(10)\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n def get_model():\n x = keras.layers.Input(shape=(3,), name=\"input\")\n y = keras.layers.Dense(4, name=\"dense\")(x)\n model = keras.Model(x, y)\n return model\n\n with distribution.scope():\n model = get_model()\n optimizer = keras.optimizer_v2.gradient_descent.SGD(0.1, momentum=0.01)\n weights_file = os.path.join(self.get_temp_dir(), \".h5\")\n model.save_weights(weights_file)\n model2 = get_model()\n model2.load_weights(weights_file)\n\n # Make sure model and model2 variables are in sync when initialized.\n for model_v, model2_v in zip(model.variables, model2.variables):\n self.assertAllClose(model_v.numpy(), model2_v.numpy())\n\n def compute_loss(images, targets):\n outputs = model(images)\n return keras.losses.mean_squared_error(targets, outputs)\n\n @tf.function\n def train_step_without_nested_tf_function(inputs):\n\n def step_fn(inputs):\n images, targets = inputs\n with tf.GradientTape() as tape:\n loss = compute_loss(images, targets)\n grads = tape.gradient(loss, model.variables)\n optimizer.apply_gradients(zip(grads, model.variables))\n\n distribution.run(step_fn, args=(inputs,))\n\n @tf.function\n def compute_loss2(images, targets):\n outputs = model2(images)\n return keras.losses.mean_squared_error(targets, outputs)\n\n @tf.function\n def train_step_with_nested_tf_function(inputs):\n\n def step_fn(inputs):\n images, targets = inputs\n with tf.GradientTape() as tape:\n loss = compute_loss2(images, targets)\n grads = tape.gradient(loss, model2.variables)\n optimizer.apply_gradients(zip(grads, model2.variables))\n\n distribution.run(step_fn, args=(inputs,))\n\n inputs = next(input_iterator)\n\n train_step_without_nested_tf_function(inputs)\n train_step_with_nested_tf_function(inputs)\n\n # Make sure model and model2 variables are still in sync.\n for model_v, model2_v in zip(model.variables, model2.variables):\n self.assertAllClose(model_v.numpy(), model2_v.numpy())\n\n def test_nested_tf_functions_with_control_flow(self, distribution):\n inputs = np.random.random((10, 3)).astype(np.float32)\n targets = np.ones((10, 4), dtype=np.float32)\n dataset = tf.data.Dataset.from_tensor_slices((inputs, targets)).repeat()\n dataset = dataset.batch(10)\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n def get_model():\n x = keras.layers.Input(shape=(3,), name=\"input\")\n y = keras.layers.Dense(4, name=\"dense\")(x)\n model = keras.Model(x, y)\n return model\n\n with distribution.scope():\n model = get_model()\n optimizer = keras.optimizer_v2.gradient_descent.SGD(0.1, momentum=0.01)\n\n @tf.function\n def train_step(iterator):\n\n def step_fn(inputs):\n images, targets = inputs\n with tf.GradientTape() as tape:\n outputs = model(images)\n loss = keras.losses.mean_squared_error(targets, outputs)\n grads = tape.gradient(loss, model.variables)\n optimizer.apply_gradients(zip(grads, model.variables))\n\n distribution.run(step_fn, args=(next(iterator),))\n\n @tf.function\n def train_steps(iterator):\n for _ in tf.range(10):\n train_step(iterator)\n\n train_steps(input_iterator)\n\n def test_nested_tf_functions_with_tf_function_passing_to_strategy_run(\n self, distribution):\n self.skipTest(\"b/190608193\")\n\n inputs = np.random.random((10, 3)).astype(np.float32)\n targets = np.ones((10, 4), dtype=np.float32)\n dataset = tf.data.Dataset.from_tensor_slices((inputs, targets)).repeat()\n dataset = dataset.batch(10)\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n def get_model():\n x = keras.layers.Input(shape=(3,), name=\"input\")\n y = keras.layers.Dense(4, name=\"dense\")(x)\n model = keras.Model(x, y)\n return model\n\n with distribution.scope():\n model = get_model()\n optimizer = keras.optimizer_v2.gradient_descent.SGD(0.1, momentum=0.01)\n\n @tf.function\n def compute_loss(images, targets):\n outputs = model(images)\n return keras.losses.mean_squared_error(targets, outputs)\n\n @tf.function\n def step_fn(inputs):\n images, targets = inputs\n with tf.GradientTape() as tape:\n loss = compute_loss(images, targets)\n grads = tape.gradient(loss, model.variables)\n optimizer.apply_gradients(zip(grads, model.variables))\n\n inputs = next(input_iterator)\n distribution.run(step_fn, args=(inputs,))\n\n def test_customized_tf_module_run(self, distribution):\n dataset = _get_dataset()\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n with distribution.scope():\n model = CustomModel()\n\n @tf.function\n def train_step(iterator):\n\n def step_fn(inputs):\n images, targets = inputs\n with tf.GradientTape() as tape:\n outputs = model(images)\n loss = keras.losses.mean_squared_error(targets, outputs)\n grads = tape.gradient(loss, model.variables)\n return grads\n\n outputs = distribution.run(\n step_fn, args=(next(iterator),))\n return tf.nest.map_structure(distribution.experimental_local_results,\n outputs)\n\n train_step(input_iterator)\n\n def test_reduce_loss(self, distribution):\n inputs = np.zeros((10, 4), dtype=np.float32)\n targets = np.zeros((10, 1), dtype=np.float32)\n dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))\n dataset = dataset.batch(10)\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n with distribution.scope():\n x = keras.layers.Input(shape=(4), name=\"input\")\n y = keras.layers.Dense(3, name=\"dense\")(x)\n model = keras.Model(x, y)\n\n @tf.function\n def train_step(iterator):\n\n def step_fn(inputs):\n images, targets = inputs\n outputs = model(images)\n loss = keras.losses.sparse_categorical_crossentropy(targets, outputs)\n return loss\n\n return distribution.run(step_fn, args=(next(iterator),))\n\n loss = train_step(input_iterator)\n loss = distribution.reduce(tf.distribute.ReduceOp.MEAN, loss, axis=0)\n\n def test_variable_run_argument(self, distribution):\n # Test that variables passed to run() remain variables. Previous behavior\n # in TPUStrategy was to cast to Tensor.\n\n with distribution.scope():\n optimizer = gradient_descent.SGD(0.1)\n net = core.Dense(1, trainable=True)\n dataset = tf.data.Dataset.from_tensors([[1.]])\n dataset = dataset.repeat()\n dataset = dataset.batch(2, drop_remainder=True)\n\n def replica_step(trainable_variables, features):\n\n with tf.GradientTape() as tape:\n net_out = net(features[0], training=True)\n loss = (net_out - 1.0) * (net_out - 1.0)\n gradients = tape.gradient(loss, trainable_variables)\n optimizer.apply_gradients(zip(gradients, trainable_variables))\n return loss\n\n @tf.function\n def step(features):\n per_replica_losses = distribution.run(\n replica_step,\n (net.trainable_variables, features),\n )\n loss = distribution.reduce(\n tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)\n return loss\n\n step(next(iter(dataset)))\n\n\nclass KerasModelsXLATest(tf.test.TestCase, parameterized.TestCase):\n\n @tf.__internal__.distribute.combinations.generate(\n tf.__internal__.test.combinations.combine(\n distribution=strategy_combinations.tpu_strategies, mode=[\"eager\"]))\n def test_tf_function_jit_compile(self, distribution):\n dataset = _get_dataset()\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n class CustomDense(keras.layers.Layer):\n\n def __init__(self, num_outputs):\n super(CustomDense, self).__init__()\n self.num_outputs = num_outputs\n\n def build(self, input_shape):\n self.kernel = self.add_variable(\n \"kernel\", shape=[int(input_shape[-1]), self.num_outputs])\n\n @tf.function(jit_compile=True)\n def call(self, inputs):\n return tf.matmul(inputs, self.kernel)\n\n with distribution.scope():\n x = keras.layers.Input(shape=(3,))\n y = CustomDense(4)(x)\n model = keras.Model(x, y)\n\n @tf.function\n def train_step(iterator):\n def step_fn(inputs):\n images, targets = inputs\n with tf.GradientTape() as tape:\n outputs = model(images)\n loss = keras.losses.mean_squared_error(targets, outputs)\n grads = tape.gradient(loss, model.variables)\n return grads\n\n outputs = distribution.run(\n step_fn, args=(next(iterator),))\n return tf.nest.map_structure(distribution.experimental_local_results,\n outputs)\n\n train_step(input_iterator)\n\n\ndef _get_dataset():\n inputs = np.zeros((31, 3), dtype=np.float32)\n targets = np.zeros((31, 4), dtype=np.float32)\n dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))\n dataset = dataset.batch(10)\n return dataset\n\n\ndef _get_model():\n x = keras.layers.Input(shape=(3,), name=\"input\")\n y = keras.layers.Dense(4, name=\"dense\")(x)\n model = keras.Model(x, y)\n return model\n\n\nif __name__ == \"__main__\":\n tf.__internal__.distribute.multi_process_runner.test_main()\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Keras' base preprocessing layer.\"\"\"\n\nimport os\n\nimport keras\nfrom keras import keras_parameterized\nfrom keras import testing_utils\nfrom keras.engine import base_preprocessing_layer\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\n\n# Define a test-only implementation of BasePreprocessingLayer to validate\n# its correctness directly.\nclass AddingPreprocessingLayer(base_preprocessing_layer.PreprocessingLayer):\n\n def build(self, input_shape):\n super(AddingPreprocessingLayer, self).build(input_shape)\n self.sum = tf.Variable(0., dtype=tf.float32)\n\n def update_state(self, data):\n self.sum.assign_add(tf.reduce_sum(tf.cast(data, tf.float32)))\n\n def reset_state(self): # pylint: disable=method-hidden\n self.sum.assign(0.)\n\n def set_total(self, sum_value):\n \"\"\"This is an example of how a subclass would implement a direct setter.\n\n Args:\n sum_value: The total to set.\n \"\"\"\n self.sum.assign(sum_value)\n\n def call(self, inputs):\n return inputs + self.sum\n\n\n@keras_parameterized.run_all_keras_modes(always_skip_v1=True)\nclass PreprocessingLayerTest(keras_parameterized.TestCase):\n\n def test_adapt_bad_input_fails(self):\n \"\"\"Test that non-Dataset/Numpy inputs cause a reasonable error.\"\"\"\n input_dataset = {\"foo\": 0}\n\n layer = AddingPreprocessingLayer()\n if tf.executing_eagerly():\n with self.assertRaisesRegex(ValueError, \"Failed to find data adapter\"):\n layer.adapt(input_dataset)\n else:\n with self.assertRaisesRegex(ValueError, \"requires a\"):\n layer.adapt(input_dataset)\n\n def test_adapt_infinite_dataset_fails(self):\n \"\"\"Test that preproc layers fail if an infinite dataset is passed.\"\"\"\n input_dataset = tf.data.Dataset.from_tensor_slices(\n np.array([[1], [2], [3], [4], [5], [0]])).repeat()\n\n layer = AddingPreprocessingLayer()\n if tf.executing_eagerly():\n with self.assertRaisesRegex(ValueError, \"infinite dataset\"):\n layer.adapt(input_dataset)\n else:\n with self.assertRaisesRegex(ValueError,\n \".*infinite number of elements.*\"):\n layer.adapt(input_dataset)\n\n def test_setter_update(self):\n \"\"\"Test the prototyped setter method.\"\"\"\n input_data = keras.Input(shape=(1,))\n layer = AddingPreprocessingLayer()\n output = layer(input_data)\n model = keras.Model(input_data, output)\n model._run_eagerly = testing_utils.should_run_eagerly()\n\n layer.set_total(15)\n\n self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))\n\n def test_pre_build_adapt_update_numpy(self):\n \"\"\"Test that preproc layers can adapt() before build() is called.\"\"\"\n input_dataset = np.array([1, 2, 3, 4, 5])\n\n layer = AddingPreprocessingLayer()\n layer.adapt(input_dataset)\n\n input_data = keras.Input(shape=(1,))\n output = layer(input_data)\n model = keras.Model(input_data, output)\n model._run_eagerly = testing_utils.should_run_eagerly()\n\n self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))\n\n def test_post_build_adapt_update_numpy(self):\n \"\"\"Test that preproc layers can adapt() after build() is called.\"\"\"\n input_dataset = np.array([1, 2, 3, 4, 5])\n\n input_data = keras.Input(shape=(1,))\n layer = AddingPreprocessingLayer()\n output = layer(input_data)\n model = keras.Model(input_data, output)\n model._run_eagerly = testing_utils.should_run_eagerly()\n\n layer.adapt(input_dataset)\n\n self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))\n\n def test_pre_build_adapt_update_dataset(self):\n \"\"\"Test that preproc layers can adapt() before build() is called.\"\"\"\n input_dataset = tf.data.Dataset.from_tensor_slices(\n np.array([[1], [2], [3], [4], [5], [0]]))\n\n layer = AddingPreprocessingLayer()\n layer.adapt(input_dataset)\n\n input_data = keras.Input(shape=(1,))\n output = layer(input_data)\n model = keras.Model(input_data, output)\n model._run_eagerly = testing_utils.should_run_eagerly()\n\n self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))\n\n def test_post_build_adapt_update_dataset(self):\n \"\"\"Test that preproc layers can adapt() after build() is called.\"\"\"\n input_dataset = tf.data.Dataset.from_tensor_slices(\n np.array([[1], [2], [3], [4], [5], [0]]))\n\n input_data = keras.Input(shape=(1,))\n layer = AddingPreprocessingLayer()\n output = layer(input_data)\n model = keras.Model(input_data, output)\n model._run_eagerly = testing_utils.should_run_eagerly()\n\n layer.adapt(input_dataset)\n\n self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))\n\n def test_weight_based_state_transfer(self):\n \"\"\"Test that preproc layers can transfer state via get/set weights..\"\"\"\n\n def get_model():\n input_data = keras.Input(shape=(1,))\n layer = AddingPreprocessingLayer()\n output = layer(input_data)\n model = keras.Model(input_data, output)\n model._run_eagerly = testing_utils.should_run_eagerly()\n return (model, layer)\n\n input_dataset = np.array([1, 2, 3, 4, 5])\n model, layer = get_model()\n layer.adapt(input_dataset)\n self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))\n\n # Create a new model and verify it has no state carryover.\n weights = model.get_weights()\n model_2, _ = get_model()\n self.assertAllEqual([[1], [2], [3]], model_2.predict([1., 2., 3.]))\n\n # Transfer state from model to model_2 via get/set weights.\n model_2.set_weights(weights)\n self.assertAllEqual([[16], [17], [18]], model_2.predict([1., 2., 3.]))\n\n def test_loading_without_providing_class_fails(self):\n input_data = keras.Input(shape=(1,))\n layer = AddingPreprocessingLayer()\n output = layer(input_data)\n model = keras.Model(input_data, output)\n\n if not tf.executing_eagerly():\n self.evaluate(tf.compat.v1.variables_initializer(model.variables))\n\n output_path = os.path.join(self.get_temp_dir(), \"tf_keras_saved_model\")\n model.save(output_path, save_format=\"tf\")\n\n with self.assertRaisesRegex(RuntimeError, \"Unable to restore a layer of\"):\n _ = keras.models.load_model(output_path)\n\n def test_adapt_sets_input_shape_rank(self):\n \"\"\"Check that `.adapt()` sets the `input_shape`'s rank.\"\"\"\n # Shape: (3,1,2)\n adapt_dataset = np.array([[[1., 2.]], [[3., 4.]], [[5., 6.]]],\n dtype=np.float32)\n\n layer = AddingPreprocessingLayer()\n layer.adapt(adapt_dataset)\n\n input_dataset = np.array([[[1., 2.], [3., 4.]], [[3., 4.], [5., 6.]]],\n dtype=np.float32)\n layer(input_dataset)\n\n model = keras.Sequential([layer])\n self.assertTrue(model.built)\n self.assertEqual(model.input_shape, (None, None, None))\n\n def test_adapt_doesnt_overwrite_input_shape(self):\n \"\"\"Check that `.adapt()` doesn't change the `input_shape`.\"\"\"\n # Shape: (3, 1, 2)\n adapt_dataset = np.array([[[1., 2.]], [[3., 4.]], [[5., 6.]]],\n dtype=np.float32)\n\n layer = AddingPreprocessingLayer(input_shape=[1, 2])\n layer.adapt(adapt_dataset)\n\n model = keras.Sequential([layer])\n self.assertTrue(model.built)\n self.assertEqual(model.input_shape, (None, 1, 2))\n\n\nclass PreprocessingLayerV1Test(keras_parameterized.TestCase):\n\n def test_adapt_fails(self):\n \"\"\"Test that calling adapt leads to a runtime error.\"\"\"\n input_dataset = {\"foo\": 0}\n\n with tf.Graph().as_default():\n layer = AddingPreprocessingLayer()\n with self.assertRaisesRegex(RuntimeError,\n \"`adapt` is only supported in tensorflow v2\"):\n layer.adapt(input_dataset)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests the device compatibility check.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n\nimport re\n\nfrom keras import combinations\nfrom keras.mixed_precision import device_compatibility_check\nfrom tensorflow.python.platform import tf_logging\n\n\ndef device_details(device_name, compute_capability=None):\n details = {}\n if device_name:\n details['device_name'] = device_name\n if compute_capability:\n details['compute_capability'] = compute_capability\n return details\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass DeviceCompatibilityCheckTest(tf.test.TestCase):\n\n def _test_compat_check(self, device_attr_list, should_warn, expected_regex,\n policy_name='mixed_float16'):\n with tf.compat.v1.test.mock.patch.object(tf_logging, 'warning') as mock_warn, \\\n tf.compat.v1.test.mock.patch.object(tf_logging, 'info') as mock_info:\n device_compatibility_check._log_device_compatibility_check(\n policy_name, device_attr_list)\n if should_warn:\n self.assertRegex(mock_warn.call_args[0][0], expected_regex)\n mock_info.assert_not_called()\n else:\n self.assertRegex(mock_info.call_args[0][0], expected_regex)\n mock_warn.assert_not_called()\n\n def test_supported(self):\n details_list = [device_details('GPU 1', (7, 1))]\n regex = re.compile(\n r'.*compatibility check \\(mixed_float16\\): OK\\n'\n r'Your GPU will likely run quickly with dtype policy mixed_float16 as '\n r'it has compute capability of at least 7.0. Your GPU: GPU 1, compute '\n r'capability 7.1', flags=re.MULTILINE)\n self._test_compat_check(details_list, False, regex)\n\n details_list = [\n device_details('GPU 1', (7, 0)),\n device_details('GPU 2', (7, 1)),\n device_details('GPU 3', (8, 0)),\n ]\n regex = re.compile(\n r'.*compatibility check \\(mixed_float16\\): OK\\n'\n r'Your GPUs will likely run quickly with dtype policy mixed_float16 as '\n r'they all have compute capability of at least 7.0', flags=re.MULTILINE)\n self._test_compat_check(details_list, False, regex)\n\n def test_unsupported(self):\n details_list = [\n device_details('GPU 1', (6, 0))\n ]\n regex = re.compile(\n r'.*compatibility check \\(mixed_float16\\): WARNING\\n'\n r'Your GPU may run slowly with dtype policy mixed_float16.*\\n'\n r' GPU 1, compute capability 6.0\\n'\n r'See.*', flags=re.MULTILINE)\n self._test_compat_check(details_list, True, regex)\n\n details_list = [\n device_details(None)\n ]\n regex = re.compile(\n r'.*compatibility check \\(mixed_float16\\): WARNING\\n'\n r'Your GPU may run slowly with dtype policy mixed_float16.*\\n'\n r' Unknown GPU, no compute capability \\(probably not an Nvidia GPU\\)\\n'\n r'See.*', flags=re.MULTILINE)\n self._test_compat_check(details_list, True, regex)\n\n details_list = [\n device_details('GPU 1', (6, 0)),\n device_details('GPU 2', (3, 10)),\n ]\n regex = re.compile(\n r'.*compatibility check \\(mixed_float16\\): WARNING\\n'\n r'Your GPUs may run slowly with dtype policy mixed_float16.*\\n'\n r' GPU 1, compute capability 6.0\\n'\n r' GPU 2, compute capability 3.10\\n'\n r'See.*', flags=re.MULTILINE)\n self._test_compat_check(details_list, True, regex)\n\n details_list = [\n device_details('GPU 1', (6, 0)),\n device_details('GPU 1', (6, 0)),\n device_details('GPU 1', (6, 0)),\n device_details('GPU 2', (3, 10)),\n ]\n regex = re.compile(\n r'.*compatibility check \\(mixed_float16\\): WARNING\\n'\n r'Your GPUs may run slowly with dtype policy mixed_float16.*\\n'\n r' GPU 1, compute capability 6.0 \\(x3\\)\\n'\n r' GPU 2, compute capability 3.10\\n'\n r'See.*', flags=re.MULTILINE)\n self._test_compat_check(details_list, True, regex)\n\n details_list = []\n regex = re.compile(\n r'.*compatibility check \\(mixed_float16\\): WARNING\\n'\n r'The dtype policy mixed_float16 may run slowly because this machine '\n r'does not have a GPU', flags=re.MULTILINE)\n self._test_compat_check(details_list, True, regex)\n\n def test_mix_of_supported_and_unsupported(self):\n details_list = [\n device_details('GPU 1', (7, 0)),\n device_details('GPU 1', (7, 0)),\n device_details('GPU 2', (6, 0))\n ]\n regex = re.compile(\n r'.*compatibility check \\(mixed_float16\\): WARNING\\n'\n r'Some of your GPUs may run slowly with dtype policy mixed_float16.*\\n'\n r' GPU 1, compute capability 7.0 \\(x2\\)\\n'\n r' GPU 2, compute capability 6.0\\n'\n r'See.*', flags=re.MULTILINE)\n self._test_compat_check(details_list, True, regex)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Benchmark for KPL implementation of vocabulary columns + indicator from lists with varying-length inputs.\"\"\"\n\nimport tensorflow as tf\n\nimport keras\nfrom tensorflow.python.eager.def_function import function as tf_function\nfrom keras.layers.preprocessing import category_encoding\nfrom keras.layers.preprocessing import string_lookup\nfrom keras.layers.preprocessing.benchmarks import feature_column_benchmark as fc_bm\n\n# This is required as of 3/2021 because otherwise we drop into graph mode.\ntf.compat.v1.enable_v2_behavior()\n\nNUM_REPEATS = 10\nBATCH_SIZES = [32, 256]\n\n\ndef embedding_varlen(batch_size, max_length):\n \"\"\"Benchmark a variable-length embedding.\"\"\"\n # Data and constants.\n vocab_size = 32768\n vocab = fc_bm.create_vocabulary(vocab_size)\n data = fc_bm.create_string_data(\n max_length, batch_size * NUM_REPEATS, vocab, pct_oov=0.15)\n\n # Keras implementation\n model = keras.Sequential()\n model.add(\n keras.Input(\n shape=(max_length,), name=\"data\", ragged=True, dtype=tf.string))\n model.add(string_lookup.StringLookup(vocabulary=vocab, mask_token=None))\n model.add(\n category_encoding.CategoryEncoding(\n num_tokens=vocab_size + 1, output_mode=\"count\"))\n\n # FC implementation\n fc = tf.feature_column.indicator_column(\n tf.feature_column.sequence_categorical_column_with_vocabulary_list(\n key=\"data\", vocabulary_list=vocab, num_oov_buckets=1))\n\n # Wrap the FC implementation in a tf.function for a fair comparison\n @tf_function()\n def fc_fn(tensors):\n fc.transform_feature(tf.__internal__.feature_column.FeatureTransformationCache(tensors), None)\n\n # Benchmark runs\n keras_data = {\"data\": data}\n k_avg_time = fc_bm.run_keras(keras_data, model, batch_size, NUM_REPEATS)\n\n fc_data = {\"data\": data.to_sparse()}\n fc_avg_time = fc_bm.run_fc(fc_data, fc_fn, batch_size, NUM_REPEATS)\n\n return k_avg_time, fc_avg_time\n\n\nclass BenchmarkLayer(fc_bm.LayerBenchmark):\n \"\"\"Benchmark the layer forward pass.\"\"\"\n\n def benchmark_layer(self):\n for batch in BATCH_SIZES:\n name = \"vocab_list_indicator|varlen|batch_%s\" % batch\n k_time, f_time = embedding_varlen(batch_size=batch, max_length=256)\n self.report(name, k_time, f_time, NUM_REPEATS)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Benchmark for Keras image preprocessing layer.\"\"\"\n\nimport tensorflow as tf\n\nimport functools\nimport time\n\nimport numpy as np\n\nimport keras\nfrom keras.layers.preprocessing import image_preprocessing\n\ntf.compat.v1.enable_v2_behavior()\n\nLOWER = .2\nUPPER = .4\nBATCH_SIZE = 32\n\n\ndef rotate(inputs):\n \"\"\"rotate image.\"\"\"\n inputs_shape = tf.shape(inputs)\n batch_size = inputs_shape[0]\n img_hd = tf.cast(inputs_shape[1], tf.float32)\n img_wd = tf.cast(inputs_shape[2], tf.float32)\n min_angle = LOWER * 2. * np.pi\n max_angle = UPPER * 2. * np.pi\n angles = tf.random.uniform(\n shape=[batch_size], minval=min_angle, maxval=max_angle)\n return image_preprocessing.transform(\n inputs, image_preprocessing.get_rotation_matrix(angles, img_hd, img_wd))\n\n\ndef zoom(inputs):\n \"\"\"zoom image.\"\"\"\n inputs_shape = tf.shape(inputs)\n batch_size = inputs_shape[0]\n img_hd = tf.cast(inputs_shape[1], tf.float32)\n img_wd = tf.cast(inputs_shape[2], tf.float32)\n height_zoom = tf.random.uniform(\n shape=[batch_size, 1], minval=1. + LOWER, maxval=1. + UPPER)\n width_zoom = tf.random.uniform(\n shape=[batch_size, 1], minval=1. + LOWER, maxval=1. + UPPER)\n zooms = tf.cast(\n tf.concat([width_zoom, height_zoom], axis=1), dtype=tf.float32)\n return image_preprocessing.transform(\n inputs, image_preprocessing.get_zoom_matrix(zooms, img_hd, img_wd))\n\n\ndef image_augmentation(inputs, batch_size):\n \"\"\"image augmentation.\"\"\"\n img = inputs\n img = tf.image.resize(img, size=[224, 224])\n img = tf.image.random_crop(img, size=[batch_size, 224, 224, 3])\n img = rotate(img)\n img = zoom(img)\n return img\n\n\nclass BenchmarkLayer(tf.test.Benchmark):\n \"\"\"Benchmark the layer forward pass.\"\"\"\n\n def run_dataset_implementation(self, batch_size):\n num_repeats = 5\n starts = []\n ends = []\n for _ in range(num_repeats):\n ds = tf.data.Dataset.from_tensor_slices(\n np.random.random((batch_size, 256, 256, 3)))\n ds = ds.shuffle(batch_size * 100)\n ds = ds.batch(batch_size)\n ds = ds.prefetch(batch_size)\n img_augmentation = functools.partial(\n image_augmentation, batch_size=batch_size)\n ds = ds.map(img_augmentation)\n starts.append(time.time())\n count = 0\n # Benchmarked code begins here.\n for i in ds:\n _ = i\n count += 1\n # Benchmarked code ends here.\n ends.append(time.time())\n\n avg_time = np.mean(np.array(ends) - np.array(starts)) / count\n return avg_time\n\n def bm_layer_implementation(self, batch_size):\n with tf.device(\"/gpu:0\"):\n img = keras.Input(shape=(256, 256, 3), dtype=tf.float32)\n preprocessor = keras.Sequential([\n image_preprocessing.Resizing(224, 224),\n image_preprocessing.RandomCrop(height=224, width=224),\n image_preprocessing.RandomRotation(factor=(.2, .4)),\n image_preprocessing.RandomFlip(mode=\"horizontal\"),\n image_preprocessing.RandomZoom(.2, .2)\n ])\n _ = preprocessor(img)\n\n num_repeats = 5\n starts = []\n ends = []\n for _ in range(num_repeats):\n ds = tf.data.Dataset.from_tensor_slices(\n np.random.random((batch_size, 256, 256, 3)))\n ds = ds.shuffle(batch_size * 100)\n ds = ds.batch(batch_size)\n ds = ds.prefetch(batch_size)\n starts.append(time.time())\n count = 0\n # Benchmarked code begins here.\n for i in ds:\n _ = preprocessor(i)\n count += 1\n # Benchmarked code ends here.\n ends.append(time.time())\n\n avg_time = np.mean(np.array(ends) - np.array(starts)) / count\n name = \"image_preprocessing|batch_%s\" % batch_size\n baseline = self.run_dataset_implementation(batch_size)\n extras = {\n \"dataset implementation baseline\": baseline,\n \"delta seconds\": (baseline - avg_time),\n \"delta percent\": ((baseline - avg_time) / baseline) * 100\n }\n self.report_benchmark(\n iters=num_repeats, wall_time=avg_time, extras=extras, name=name)\n\n def benchmark_vocab_size_by_batch(self):\n for batch in [32, 64, 256]:\n self.bm_layer_implementation(batch_size=batch)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Keras text category_encoding preprocessing layer.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nimport keras\nfrom keras import backend\nfrom keras import keras_parameterized\nfrom keras.layers import core\nfrom keras.layers.preprocessing import category_encoding\nfrom keras.layers.preprocessing import preprocessing_test_utils\n\n\n@keras_parameterized.run_all_keras_modes(always_skip_v1=True)\nclass CategoryEncodingInputTest(keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest\n ):\n\n def test_dense_input_sparse_output(self):\n input_array = tf.constant([[1, 2, 3], [3, 3, 0]])\n\n # The expected output should be (X for missing value):\n # [[X, 1, 1, 1, X, X]\n # [1, X, X, 2, X, X]]\n expected_indices = [[0, 1], [0, 2], [0, 3], [1, 0], [1, 3]]\n expected_values = [1, 1, 1, 1, 2]\n num_tokens = 6\n\n input_data = keras.Input(shape=(None,), dtype=tf.int32)\n layer = category_encoding.CategoryEncoding(\n num_tokens=num_tokens, output_mode=category_encoding.COUNT, sparse=True)\n int_data = layer(input_data)\n\n model = keras.Model(inputs=input_data, outputs=int_data)\n sp_output_dataset = model.predict(input_array, steps=1)\n self.assertAllEqual(expected_values, sp_output_dataset.values)\n self.assertAllEqual(expected_indices, sp_output_dataset.indices)\n\n # Assert sparse output is same as dense output.\n layer = category_encoding.CategoryEncoding(\n num_tokens=num_tokens,\n output_mode=category_encoding.COUNT,\n sparse=False)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array, steps=1)\n self.assertAllEqual(\n tf.sparse.to_dense(sp_output_dataset, default_value=0),\n output_dataset)\n\n def test_sparse_input(self):\n input_array = np.array([[1, 2, 3, 0], [0, 3, 1, 0]], dtype=np.int64)\n sparse_tensor_data = tf.sparse.from_dense(input_array)\n\n # pyformat: disable\n expected_output = [[0, 1, 1, 1, 0, 0],\n [0, 1, 0, 1, 0, 0]]\n # pyformat: enable\n num_tokens = 6\n expected_output_shape = [None, num_tokens]\n\n input_data = keras.Input(shape=(None,), dtype=tf.int64, sparse=True)\n\n layer = category_encoding.CategoryEncoding(\n num_tokens=num_tokens, output_mode=category_encoding.MULTI_HOT)\n int_data = layer(input_data)\n self.assertAllEqual(expected_output_shape, int_data.shape.as_list())\n\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(sparse_tensor_data, steps=1)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_sparse_input_with_weights(self):\n input_array = np.array([[1, 2, 3, 4], [4, 3, 1, 4]], dtype=np.int64)\n weights_array = np.array([[.1, .2, .3, .4], [.2, .1, .4, .3]])\n sparse_tensor_data = tf.sparse.from_dense(input_array)\n sparse_weight_data = tf.sparse.from_dense(weights_array)\n\n # pyformat: disable\n expected_output = [[0, .1, .2, .3, .4, 0],\n [0, .4, 0, .1, .5, 0]]\n # pyformat: enable\n num_tokens = 6\n expected_output_shape = [None, num_tokens]\n\n input_data = keras.Input(shape=(None,), dtype=tf.int64, sparse=True)\n weight_data = keras.Input(shape=(None,), dtype=tf.float32, sparse=True)\n\n layer = category_encoding.CategoryEncoding(\n num_tokens=num_tokens, output_mode=category_encoding.COUNT)\n int_data = layer(input_data, count_weights=weight_data)\n self.assertAllEqual(expected_output_shape, int_data.shape.as_list())\n\n model = keras.Model(inputs=[input_data, weight_data], outputs=int_data)\n output_dataset = model.predict([sparse_tensor_data, sparse_weight_data],\n steps=1)\n self.assertAllClose(expected_output, output_dataset)\n\n def test_sparse_input_sparse_output(self):\n sp_inp = tf.SparseTensor(\n indices=[[0, 0], [1, 1], [2, 0], [2, 1], [3, 1]],\n values=[0, 2, 1, 1, 0],\n dense_shape=[4, 2])\n input_data = keras.Input(shape=(None,), dtype=tf.int64, sparse=True)\n\n # The expected output should be (X for missing value):\n # [[1, X, X, X]\n # [X, X, 1, X]\n # [X, 2, X, X]\n # [1, X, X, X]]\n expected_indices = [[0, 0], [1, 2], [2, 1], [3, 0]]\n expected_values = [1, 1, 2, 1]\n num_tokens = 6\n\n layer = category_encoding.CategoryEncoding(\n num_tokens=num_tokens, output_mode=category_encoding.COUNT, sparse=True)\n int_data = layer(input_data)\n\n model = keras.Model(inputs=input_data, outputs=int_data)\n sp_output_dataset = model.predict(sp_inp, steps=1)\n self.assertAllEqual(expected_values, sp_output_dataset.values)\n self.assertAllEqual(expected_indices, sp_output_dataset.indices)\n\n # Assert sparse output is same as dense output.\n layer = category_encoding.CategoryEncoding(\n num_tokens=num_tokens,\n output_mode=category_encoding.COUNT,\n sparse=False)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(sp_inp, steps=1)\n self.assertAllEqual(\n tf.sparse.to_dense(sp_output_dataset, default_value=0),\n output_dataset)\n\n def test_sparse_input_sparse_output_with_weights(self):\n indices = [[0, 0], [1, 1], [2, 0], [2, 1], [3, 1]]\n sp_inp = tf.SparseTensor(\n indices=indices, values=[0, 2, 1, 1, 0], dense_shape=[4, 2])\n input_data = keras.Input(shape=(None,), dtype=tf.int64, sparse=True)\n sp_weight = tf.SparseTensor(\n indices=indices, values=[.1, .2, .4, .3, .2], dense_shape=[4, 2])\n weight_data = keras.Input(shape=(None,), dtype=tf.float32, sparse=True)\n\n # The expected output should be (X for missing value):\n # [[1, X, X, X]\n # [X, X, 1, X]\n # [X, 2, X, X]\n # [1, X, X, X]]\n expected_indices = [[0, 0], [1, 2], [2, 1], [3, 0]]\n expected_values = [.1, .2, .7, .2]\n num_tokens = 6\n\n layer = category_encoding.CategoryEncoding(\n num_tokens=num_tokens, output_mode=category_encoding.COUNT, sparse=True)\n int_data = layer(input_data, count_weights=weight_data)\n\n model = keras.Model(inputs=[input_data, weight_data], outputs=int_data)\n sp_output_dataset = model.predict([sp_inp, sp_weight], steps=1)\n self.assertAllClose(expected_values, sp_output_dataset.values)\n self.assertAllEqual(expected_indices, sp_output_dataset.indices)\n\n def test_ragged_input(self):\n input_array = tf.ragged.constant([[1, 2, 3], [3, 1]])\n\n # pyformat: disable\n expected_output = [[0, 1, 1, 1, 0, 0],\n [0, 1, 0, 1, 0, 0]]\n # pyformat: enable\n num_tokens = 6\n expected_output_shape = [None, num_tokens]\n\n input_data = keras.Input(shape=(None,), dtype=tf.int32, ragged=True)\n\n layer = category_encoding.CategoryEncoding(\n num_tokens=num_tokens, output_mode=category_encoding.MULTI_HOT)\n int_data = layer(input_data)\n\n self.assertAllEqual(expected_output_shape, int_data.shape.as_list())\n\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array, steps=1)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_ragged_input_sparse_output(self):\n input_array = tf.ragged.constant([[1, 2, 3], [3, 3]])\n\n # The expected output should be (X for missing value):\n # [[X, 1, 1, 1]\n # [X, X, X, 2]]\n expected_indices = [[0, 1], [0, 2], [0, 3], [1, 3]]\n expected_values = [1, 1, 1, 2]\n num_tokens = 6\n\n input_data = keras.Input(shape=(None,), dtype=tf.int32, ragged=True)\n layer = category_encoding.CategoryEncoding(\n num_tokens=num_tokens, output_mode=category_encoding.COUNT, sparse=True)\n int_data = layer(input_data)\n\n model = keras.Model(inputs=input_data, outputs=int_data)\n sp_output_dataset = model.predict(input_array, steps=1)\n self.assertAllEqual(expected_values, sp_output_dataset.values)\n self.assertAllEqual(expected_indices, sp_output_dataset.indices)\n\n # Assert sparse output is same as dense output.\n layer = category_encoding.CategoryEncoding(\n num_tokens=num_tokens,\n output_mode=category_encoding.COUNT,\n sparse=False)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array, steps=1)\n self.assertAllEqual(\n tf.sparse.to_dense(sp_output_dataset, default_value=0),\n output_dataset)\n\n def test_sparse_output_and_dense_layer(self):\n input_array = tf.constant([[1, 2, 3], [3, 3, 0]])\n\n num_tokens = 4\n\n input_data = keras.Input(shape=(None,), dtype=tf.int32)\n encoding_layer = category_encoding.CategoryEncoding(\n num_tokens=num_tokens, output_mode=category_encoding.COUNT, sparse=True)\n int_data = encoding_layer(input_data)\n dense_layer = keras.layers.Dense(units=1)\n output_data = dense_layer(int_data)\n\n model = keras.Model(inputs=input_data, outputs=output_data)\n _ = model.predict(input_array, steps=1)\n\n def test_dense_oov_input(self):\n valid_array = tf.constant([[0, 1, 2], [0, 1, 2]])\n invalid_array = tf.constant([[0, 1, 2], [2, 3, 1]])\n num_tokens = 3\n expected_output_shape = [None, num_tokens]\n encoder_layer = category_encoding.CategoryEncoding(num_tokens)\n input_data = keras.Input(shape=(3,), dtype=tf.int32)\n int_data = encoder_layer(input_data)\n self.assertAllEqual(expected_output_shape, int_data.shape.as_list())\n model = keras.Model(inputs=input_data, outputs=int_data)\n # Call predict once on valid input to compile a graph and test control flow.\n _ = model.predict(valid_array, steps=1)\n with self.assertRaisesRegex(\n tf.errors.InvalidArgumentError,\n \".*must be in the range 0 <= values < num_tokens.*\"):\n _ = model.predict(invalid_array, steps=1)\n\n def test_dense_negative(self):\n valid_array = tf.constant([[0, 1, 2], [0, 1, 2]])\n invalid_array = tf.constant([[1, 2, 0], [2, 2, -1]])\n num_tokens = 3\n expected_output_shape = [None, num_tokens]\n encoder_layer = category_encoding.CategoryEncoding(num_tokens)\n input_data = keras.Input(shape=(3,), dtype=tf.int32)\n int_data = encoder_layer(input_data)\n self.assertAllEqual(expected_output_shape, int_data.shape.as_list())\n model = keras.Model(inputs=input_data, outputs=int_data)\n # Call predict once on valid input to compile a graph and test control flow.\n _ = model.predict(valid_array, steps=1)\n with self.assertRaisesRegex(\n tf.errors.InvalidArgumentError,\n \".*must be in the range 0 <= values < num_tokens.*\"):\n _ = model.predict(invalid_array, steps=1)\n\n def test_legacy_max_tokens_arg(self):\n input_array = np.array([[1, 2, 3, 1]])\n expected_output = [[0, 1, 1, 1, 0, 0]]\n num_tokens = 6\n expected_output_shape = [None, num_tokens]\n\n input_data = keras.Input(shape=(None,), dtype=tf.int32)\n layer = category_encoding.CategoryEncoding(\n max_tokens=num_tokens, output_mode=category_encoding.MULTI_HOT)\n int_data = layer(input_data)\n self.assertAllEqual(expected_output_shape, int_data.shape.as_list())\n\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n\n@keras_parameterized.run_all_keras_modes\nclass CategoryEncodingOutputTest(keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest\n ):\n\n def test_one_hot_output(self):\n input_data = np.array([[3], [2], [0], [1]])\n expected_output = [\n [0, 0, 0, 1],\n [0, 0, 1, 0],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n ]\n num_tokens = 4\n expected_output_shape = [None, num_tokens]\n\n layer = category_encoding.CategoryEncoding(\n num_tokens=num_tokens, output_mode=category_encoding.ONE_HOT)\n inputs = keras.Input(shape=(1,), dtype=tf.int32)\n outputs = layer(inputs)\n model = keras.Model(inputs=inputs, outputs=outputs)\n output_dataset = model(input_data)\n self.assertAllEqual(expected_output_shape, outputs.shape.as_list())\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_one_hot_output_rank_one_input(self):\n input_data = np.array([3, 2, 0, 1])\n expected_output = [\n [0, 0, 0, 1],\n [0, 0, 1, 0],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n ]\n num_tokens = 4\n expected_output_shape = [None, num_tokens]\n\n # Test call on layer directly.\n layer = category_encoding.CategoryEncoding(\n num_tokens=num_tokens, output_mode=category_encoding.ONE_HOT)\n output_data = layer(input_data)\n self.assertAllEqual(expected_output, output_data)\n\n # Test call on model.\n inputs = keras.Input(shape=(1,), dtype=tf.int32)\n outputs = layer(inputs)\n model = keras.Model(inputs=inputs, outputs=outputs)\n output_data = model(input_data)\n self.assertAllEqual(expected_output_shape, outputs.shape.as_list())\n self.assertAllEqual(expected_output, output_data)\n\n def test_one_hot_output_rank_zero_input(self):\n input_data = np.array(3)\n expected_output = [0, 0, 0, 1]\n num_tokens = 4\n expected_output_shape = [None, num_tokens]\n\n # Test call on layer directly.\n layer = category_encoding.CategoryEncoding(\n num_tokens=num_tokens, output_mode=category_encoding.ONE_HOT)\n output_data = layer(input_data)\n self.assertAllEqual(expected_output, output_data)\n\n # Test call on model.\n inputs = keras.Input(shape=(1,), dtype=tf.int32)\n outputs = layer(inputs)\n model = keras.Model(inputs=inputs, outputs=outputs)\n output_data = model(input_data)\n\n self.assertAllEqual(expected_output_shape, outputs.shape.as_list())\n self.assertAllEqual(expected_output, output_data)\n\n def test_one_hot_rank_3_output_fails(self):\n layer = category_encoding.CategoryEncoding(\n num_tokens=4, output_mode=category_encoding.ONE_HOT)\n with self.assertRaisesRegex(ValueError, \"only outputs up to rank 2\"):\n _ = layer(keras.Input(shape=(4,), dtype=tf.int32))\n with self.assertRaisesRegex(ValueError, \"only outputs up to rank 2\"):\n _ = layer(np.array([[3, 2, 0, 1], [3, 2, 0, 1]]))\n\n def test_multi_hot_output(self):\n input_data = np.array([[1, 2, 3, 1], [0, 3, 1, 0]])\n expected_output = [\n [0, 1, 1, 1, 0, 0],\n [1, 1, 0, 1, 0, 0],\n ]\n num_tokens = 6\n expected_output_shape = [None, num_tokens]\n\n layer = category_encoding.CategoryEncoding(\n num_tokens=num_tokens, output_mode=category_encoding.MULTI_HOT)\n inputs = keras.Input(shape=(None,), dtype=tf.int32)\n outputs = layer(inputs)\n model = keras.Model(inputs=inputs, outputs=outputs)\n output_data = model.predict(input_data)\n self.assertAllEqual(expected_output_shape, outputs.shape.as_list())\n self.assertAllEqual(expected_output, output_data)\n\n def test_multi_hot_output_rank_one_input(self):\n input_data = np.array([3, 2, 0, 1])\n expected_output = [1, 1, 1, 1, 0, 0]\n num_tokens = 6\n expected_output_shape = [None, num_tokens]\n\n # Test call on layer directly.\n layer = category_encoding.CategoryEncoding(\n num_tokens=num_tokens, output_mode=category_encoding.MULTI_HOT)\n output_data = layer(input_data)\n self.assertAllEqual(expected_output, output_data)\n\n # Test call on model.\n inputs = keras.Input(shape=(4,), dtype=tf.int32)\n outputs = layer(inputs)\n model = keras.Model(inputs=inputs, outputs=outputs)\n output_data = model(input_data)\n self.assertAllEqual(expected_output_shape, outputs.shape.as_list())\n self.assertAllEqual(expected_output, output_data)\n\n def test_multi_hot_output_rank_zero_input(self):\n input_data = np.array(3)\n expected_output = [0, 0, 0, 1, 0, 0]\n num_tokens = 6\n expected_output_shape = [None, num_tokens]\n\n # Test call on layer directly.\n layer = category_encoding.CategoryEncoding(\n num_tokens=num_tokens, output_mode=category_encoding.MULTI_HOT)\n output_data = layer(input_data)\n self.assertAllEqual(expected_output, output_data)\n\n # Test call on model.\n inputs = keras.Input(shape=(4,), dtype=tf.int32)\n outputs = layer(inputs)\n model = keras.Model(inputs=inputs, outputs=outputs)\n output_data = model(input_data)\n self.assertAllEqual(expected_output_shape, outputs.shape.as_list())\n self.assertAllEqual(expected_output, output_data)\n\n def test_multi_hot_rank_3_output_fails(self):\n layer = category_encoding.CategoryEncoding(\n num_tokens=4, output_mode=category_encoding.ONE_HOT)\n with self.assertRaisesRegex(ValueError, \"only outputs up to rank 2\"):\n _ = layer(keras.Input(shape=(3, 4,), dtype=tf.int32))\n with self.assertRaisesRegex(ValueError, \"only outputs up to rank 2\"):\n _ = layer(np.array([[[3, 2, 0, 1], [3, 2, 0, 1]]]))\n\n def test_count_output(self):\n input_array = np.array([[1, 2, 3, 1], [0, 3, 1, 0]])\n\n # pyformat: disable\n expected_output = [[0, 2, 1, 1, 0, 0],\n [2, 1, 0, 1, 0, 0]]\n # pyformat: enable\n num_tokens = 6\n expected_output_shape = [None, num_tokens]\n\n input_data = keras.Input(shape=(None,), dtype=tf.int32)\n layer = category_encoding.CategoryEncoding(\n num_tokens=6, output_mode=category_encoding.COUNT)\n int_data = layer(input_data)\n self.assertAllEqual(expected_output_shape, int_data.shape.as_list())\n\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n\nclass CategoryEncodingModelBuildingTest(\n keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n @parameterized.named_parameters(\n {\n \"testcase_name\": \"count_output\",\n \"num_tokens\": 5,\n \"output_mode\": category_encoding.COUNT\n }, {\n \"testcase_name\": \"multi_hot_output\",\n \"num_tokens\": 5,\n \"output_mode\": category_encoding.MULTI_HOT\n })\n def test_end_to_end_bagged_modeling(self, output_mode, num_tokens):\n input_array = np.array([[1, 2, 3, 1], [0, 3, 1, 0]])\n\n input_data = keras.Input(shape=(None,), dtype=tf.int32)\n layer = category_encoding.CategoryEncoding(\n num_tokens=num_tokens, output_mode=output_mode)\n\n weights = []\n if num_tokens is None:\n layer.set_num_elements(5)\n layer.set_weights(weights)\n\n int_data = layer(input_data)\n float_data = backend.cast(int_data, dtype=\"float32\")\n output_data = core.Dense(64)(float_data)\n model = keras.Model(inputs=input_data, outputs=output_data)\n _ = model.predict(input_array)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Keras initializers.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n\nimport numpy as np\n\nfrom keras import backend\nfrom keras import combinations\nfrom keras import initializers\nfrom keras import models\nfrom keras import testing_utils\nfrom keras.engine import input_layer\nfrom keras.layers import core\n\n\ndef _compute_fans(shape):\n \"\"\"Computes the number of input and output units for a weight shape.\n\n Args:\n shape: Integer shape tuple or TF tensor shape.\n\n Returns:\n A tuple of integer scalars (fan_in, fan_out).\n \"\"\"\n if len(shape) < 1: # Just to avoid errors for constants.\n fan_in = fan_out = 1\n elif len(shape) == 1:\n fan_in = fan_out = shape[0]\n elif len(shape) == 2:\n fan_in = shape[0]\n fan_out = shape[1]\n else:\n # Assuming convolution kernels (2D, 3D, or more).\n # kernel shape: (..., input_depth, depth)\n receptive_field_size = 1\n for dim in shape[:-2]:\n receptive_field_size *= dim\n fan_in = shape[-2] * receptive_field_size\n fan_out = shape[-1] * receptive_field_size\n return int(fan_in), int(fan_out)\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass KerasInitializersTest(tf.test.TestCase):\n\n def _runner(self, init, shape, target_mean=None, target_std=None,\n target_max=None, target_min=None):\n variable = backend.variable(init(shape))\n output = backend.get_value(variable)\n # Test serialization (assumes deterministic behavior).\n config = init.get_config()\n reconstructed_init = init.__class__.from_config(config)\n variable = backend.variable(reconstructed_init(shape))\n output_2 = backend.get_value(variable)\n self.assertAllClose(output, output_2, atol=1e-4)\n\n def test_uniform(self):\n tensor_shape = (9, 6, 7)\n with self.cached_session():\n self._runner(\n initializers.RandomUniformV2(minval=-1, maxval=1, seed=124),\n tensor_shape,\n target_mean=0.,\n target_max=1,\n target_min=-1)\n\n def test_normal(self):\n tensor_shape = (8, 12, 99)\n with self.cached_session():\n self._runner(\n initializers.RandomNormalV2(mean=0, stddev=1, seed=153),\n tensor_shape,\n target_mean=0.,\n target_std=1)\n\n def test_truncated_normal(self):\n tensor_shape = (12, 99, 7)\n with self.cached_session():\n self._runner(\n initializers.TruncatedNormalV2(mean=0, stddev=1, seed=126),\n tensor_shape,\n target_mean=0.,\n target_max=2,\n target_min=-2)\n\n def test_constant(self):\n tensor_shape = (5, 6, 4)\n with self.cached_session():\n self._runner(\n initializers.ConstantV2(2.),\n tensor_shape,\n target_mean=2,\n target_max=2,\n target_min=2)\n\n def test_lecun_uniform(self):\n tensor_shape = (5, 6, 4, 2)\n with self.cached_session():\n fan_in, _ = _compute_fans(tensor_shape)\n std = np.sqrt(1. / fan_in)\n self._runner(\n initializers.LecunUniformV2(seed=123),\n tensor_shape,\n target_mean=0.,\n target_std=std)\n\n def test_glorot_uniform(self):\n tensor_shape = (5, 6, 4, 2)\n with self.cached_session():\n fan_in, fan_out = _compute_fans(tensor_shape)\n std = np.sqrt(2. / (fan_in + fan_out))\n self._runner(\n initializers.GlorotUniformV2(seed=123),\n tensor_shape,\n target_mean=0.,\n target_std=std)\n\n def test_he_uniform(self):\n tensor_shape = (5, 6, 4, 2)\n with self.cached_session():\n fan_in, _ = _compute_fans(tensor_shape)\n std = np.sqrt(2. / fan_in)\n self._runner(\n initializers.HeUniformV2(seed=123),\n tensor_shape,\n target_mean=0.,\n target_std=std)\n\n def test_lecun_normal(self):\n tensor_shape = (5, 6, 4, 2)\n with self.cached_session():\n fan_in, _ = _compute_fans(tensor_shape)\n std = np.sqrt(1. / fan_in)\n self._runner(\n initializers.LecunNormalV2(seed=123),\n tensor_shape,\n target_mean=0.,\n target_std=std)\n\n def test_glorot_normal(self):\n tensor_shape = (5, 6, 4, 2)\n with self.cached_session():\n fan_in, fan_out = _compute_fans(tensor_shape)\n std = np.sqrt(2. / (fan_in + fan_out))\n self._runner(\n initializers.GlorotNormalV2(seed=123),\n tensor_shape,\n target_mean=0.,\n target_std=std)\n\n def test_he_normal(self):\n tensor_shape = (5, 6, 4, 2)\n with self.cached_session():\n fan_in, _ = _compute_fans(tensor_shape)\n std = np.sqrt(2. / fan_in)\n self._runner(\n initializers.HeNormalV2(seed=123),\n tensor_shape,\n target_mean=0.,\n target_std=std)\n\n def test_orthogonal(self):\n tensor_shape = (20, 20)\n with self.cached_session():\n self._runner(\n initializers.OrthogonalV2(seed=123), tensor_shape, target_mean=0.)\n\n def test_identity(self):\n with self.cached_session():\n tensor_shape = (3, 4, 5)\n with self.assertRaises(ValueError):\n self._runner(\n initializers.IdentityV2(),\n tensor_shape,\n target_mean=1. / tensor_shape[0],\n target_max=1.)\n\n tensor_shape = (3, 3)\n self._runner(\n initializers.IdentityV2(),\n tensor_shape,\n target_mean=1. / tensor_shape[0],\n target_max=1.)\n\n def test_zero(self):\n tensor_shape = (4, 5)\n with self.cached_session():\n self._runner(\n initializers.ZerosV2(), tensor_shape, target_mean=0., target_max=0.)\n\n def test_one(self):\n tensor_shape = (4, 5)\n with self.cached_session():\n self._runner(\n initializers.OnesV2(), tensor_shape, target_mean=1., target_max=1.)\n\n def test_default_random_uniform(self):\n ru = initializers.get('uniform')\n self.assertEqual(ru.minval, -0.05)\n self.assertEqual(ru.maxval, 0.05)\n\n def test_default_random_normal(self):\n rn = initializers.get('normal')\n self.assertEqual(rn.mean, 0.0)\n self.assertEqual(rn.stddev, 0.05)\n\n def test_default_truncated_normal(self):\n tn = initializers.get('truncated_normal')\n self.assertEqual(tn.mean, 0.0)\n self.assertEqual(tn.stddev, 0.05)\n\n def test_custom_initializer_saving(self):\n\n def my_initializer(shape, dtype=None):\n return tf.ones(shape, dtype=dtype)\n\n inputs = input_layer.Input((10,))\n outputs = core.Dense(1, kernel_initializer=my_initializer)(inputs)\n model = models.Model(inputs, outputs)\n model2 = model.from_config(\n model.get_config(), custom_objects={'my_initializer': my_initializer})\n self.assertEqual(model2.layers[1].kernel_initializer, my_initializer)\n\n @testing_utils.run_v2_only\n def test_load_external_variance_scaling_v2(self):\n external_serialized_json = {\n 'class_name': 'VarianceScaling',\n 'config': {\n 'distribution': 'normal',\n 'mode': 'fan_avg',\n 'scale': 1.0,\n 'seed': None\n }\n }\n initializer = initializers.deserialize(external_serialized_json)\n self.assertEqual(initializer.distribution, 'truncated_normal')\n\n def test_partition(self):\n with self.cached_session():\n partition_enabled_initializers = [\n initializers.ZerosV2(),\n initializers.OnesV2(),\n initializers.RandomUniformV2(),\n initializers.RandomNormalV2(),\n initializers.TruncatedNormalV2(),\n initializers.LecunUniformV2(),\n initializers.GlorotUniformV2(),\n initializers.HeUniformV2()\n ]\n for initializer in partition_enabled_initializers:\n got = initializer(\n shape=(4, 2), partition_shape=(2, 2), partition_offset=(0, 0))\n self.assertEqual(got.shape, (2, 2))\n\n partition_forbidden_initializers = [\n initializers.OrthogonalV2(),\n initializers.IdentityV2()\n ]\n for initializer in partition_forbidden_initializers:\n with self.assertRaisesRegex(\n ValueError,\n \"initializer doesn't support partition-related arguments\"):\n initializer(\n shape=(4, 2), partition_shape=(2, 2), partition_offset=(0, 0))\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Benchmark for KPL implementation of vocabulary columns from lists with varying-length inputs.\"\"\"\n\nimport tensorflow as tf\n\nimport keras\nfrom tensorflow.python.eager.def_function import function as tf_function\nfrom keras.layers.preprocessing import string_lookup\nfrom keras.layers.preprocessing.benchmarks import feature_column_benchmark as fc_bm\n\n# This is required as of 3/2021 because otherwise we drop into graph mode.\ntf.compat.v1.enable_v2_behavior()\n\nNUM_REPEATS = 10\nBATCH_SIZES = [32, 256]\n\n\ndef embedding_varlen(batch_size, max_length):\n \"\"\"Benchmark a variable-length embedding.\"\"\"\n # Data and constants.\n vocab = fc_bm.create_vocabulary(32768)\n data = fc_bm.create_string_data(\n max_length, batch_size * NUM_REPEATS, vocab, pct_oov=0.15)\n\n # Keras implementation\n model = keras.Sequential()\n model.add(\n keras.Input(\n shape=(max_length,), name=\"data\", ragged=True, dtype=tf.string))\n model.add(string_lookup.StringLookup(vocabulary=vocab, mask_token=None))\n\n # FC implementation\n fc = tf.feature_column.sequence_categorical_column_with_vocabulary_list(\n key=\"data\", vocabulary_list=vocab, num_oov_buckets=1)\n\n # Wrap the FC implementation in a tf.function for a fair comparison\n @tf_function()\n def fc_fn(tensors):\n fc.transform_feature(tf.__internal__.feature_column.FeatureTransformationCache(tensors), None)\n\n # Benchmark runs\n keras_data = {\"data\": data}\n k_avg_time = fc_bm.run_keras(keras_data, model, batch_size, NUM_REPEATS)\n\n fc_data = {\"data\": data.to_sparse()}\n fc_avg_time = fc_bm.run_fc(fc_data, fc_fn, batch_size, NUM_REPEATS)\n\n return k_avg_time, fc_avg_time\n\n\nclass BenchmarkLayer(fc_bm.LayerBenchmark):\n \"\"\"Benchmark the layer forward pass.\"\"\"\n\n def benchmark_layer(self):\n for batch in BATCH_SIZES:\n name = \"vocab_list|varlen|batch_%s\" % batch\n k_time, f_time = embedding_varlen(batch_size=batch, max_length=256)\n self.report(name, k_time, f_time, NUM_REPEATS)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Common utilities for our Keras preprocessing integration tests.\"\"\"\n\nimport os\n\nimport tensorflow as tf\npreprocessing = tf.keras.layers\n\nBATCH_SIZE = 64\nDS_SIZE = BATCH_SIZE * 16\nSTEPS = DS_SIZE / BATCH_SIZE\nVOCAB_SIZE = 100\n\n\ndef make_dataset():\n \"\"\"Make a simple structured dataset.\n\n The dataset contains three feature columns.\n - float_col: an unnormalized numeric column.\n - int_col: an column of integer IDs.\n - string_col: a column of fixed vocabulary terms.\n\n Returns:\n The dataset.\n \"\"\"\n tf.random.set_seed(197011)\n floats = tf.random.uniform((DS_SIZE, 1), maxval=10, dtype=\"float64\")\n # Generate a 100 unique integer values, but over a wide range to showcase a\n # common use case for IntegerLookup.\n ints = tf.random.uniform((DS_SIZE, 1), maxval=VOCAB_SIZE, dtype=\"int64\")\n ints = ints * 1000\n # Use a fixed vocabulary of strings from 0 to 99, to showcase loading a\n # vocabulary from a file.\n strings = tf.random.uniform((DS_SIZE, 1), maxval=VOCAB_SIZE, dtype=\"int64\")\n strings = tf.strings.as_string(strings)\n features = {\"float_col\": floats, \"int_col\": ints, \"string_col\": strings}\n # Random binary label.\n labels = tf.random.uniform((DS_SIZE, 1), maxval=2, dtype=\"int64\")\n ds = tf.data.Dataset.from_tensor_slices((features, labels))\n return ds\n\n\ndef make_preprocessing_model(file_dir):\n \"\"\"Make a standalone preprocessing model.\"\"\"\n # The name of our keras.Input should match the column name in the dataset.\n float_in = tf.keras.Input(shape=(1,), dtype=\"float64\", name=\"float_col\")\n int_in = tf.keras.Input(shape=(1,), dtype=\"int64\", name=\"int_col\")\n string_in = tf.keras.Input(shape=(1,), dtype=\"string\", name=\"string_col\")\n\n # We need to batch a dataset before adapting.\n ds = make_dataset().batch(BATCH_SIZE)\n # Normalize floats by adapting the mean and variance of the input.\n normalization = preprocessing.Normalization()\n normalization.adapt(ds.map(lambda features, labels: features[\"float_col\"]))\n float_out = normalization(float_in)\n # Lookup ints by adapting a vocab of interger IDs.\n int_lookup = preprocessing.IntegerLookup()\n int_lookup.adapt(ds.map(lambda features, labels: features[\"int_col\"]))\n int_out = int_lookup(int_in)\n # Lookup strings from a fixed file based vocabulary.\n string_vocab = list(str(i) for i in range(VOCAB_SIZE))\n vocab_file = os.path.join(file_dir, \"vocab_file.txt\")\n with open(vocab_file, \"w\") as f:\n f.write(\"\\n\".join(string_vocab))\n string_lookup = preprocessing.StringLookup(vocabulary=vocab_file)\n string_out = string_lookup(string_in)\n\n return tf.keras.Model(\n inputs=(float_in, int_in, string_in),\n outputs=(float_out, int_out, string_out))\n\n\ndef make_training_model():\n \"\"\"Make a trainable model for the preprocessed inputs.\"\"\"\n float_in = tf.keras.Input(shape=(1,), dtype=\"float64\", name=\"float_col\")\n # After preprocessing, both the string and int column are integer ready for\n # embedding.\n int_in = tf.keras.Input(shape=(1,), dtype=\"int64\", name=\"int_col\")\n string_in = tf.keras.Input(shape=(1,), dtype=\"int64\", name=\"string_col\")\n\n # Feed the lookup layers into an embedding.\n int_embedding = tf.keras.layers.Embedding(VOCAB_SIZE + 1, 8, input_length=1)\n int_out = int_embedding(int_in)\n int_out = tf.keras.layers.Flatten()(int_out)\n string_embedding = tf.keras.layers.Embedding(\n VOCAB_SIZE + 1, 8, input_length=1)\n string_out = string_embedding(string_in)\n string_out = tf.keras.layers.Flatten()(string_out)\n\n # Concatenate outputs.\n concatate = tf.keras.layers.Concatenate()\n # Feed our preprocessed inputs into a simple MLP.\n x = concatate((float_in, int_out, string_out))\n x = tf.keras.layers.Dense(32, activation=\"relu\")(x)\n x = tf.keras.layers.Dense(32, activation=\"relu\")(x)\n outputs = tf.keras.layers.Dense(1, activation=\"softmax\")(x)\n return tf.keras.Model(inputs=(float_in, int_in, string_in), outputs=outputs)\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Correctness tests for tf.keras CNN models using DistributionStrategy.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n\nimport numpy as np\nimport keras\nfrom keras import testing_utils\nfrom keras.distribute import keras_correctness_test_base\nfrom keras.optimizer_v2 import gradient_descent\n\n\n@testing_utils.run_all_without_tensor_float_32(\n 'Uses Dense layers, which call matmul. Even if Dense layers run in '\n 'float64, the test sometimes fails with TensorFloat-32 enabled for unknown '\n 'reasons')\nclass DistributionStrategyCnnCorrectnessTest(\n keras_correctness_test_base.TestDistributionStrategyCorrectnessBase):\n\n def get_model(self,\n initial_weights=None,\n distribution=None,\n input_shapes=None):\n del input_shapes\n with keras_correctness_test_base.MaybeDistributionScope(distribution):\n image = keras.layers.Input(shape=(28, 28, 3), name='image')\n c1 = keras.layers.Conv2D(\n name='conv1',\n filters=16,\n kernel_size=(3, 3),\n strides=(4, 4),\n kernel_regularizer=keras.regularizers.l2(1e-4))(\n image)\n if self.with_batch_norm == 'regular':\n c1 = keras.layers.BatchNormalization(name='bn1')(c1)\n elif self.with_batch_norm == 'sync':\n # Test with parallel batch norms to verify all-reduce works OK.\n bn1 = keras.layers.SyncBatchNormalization(name='bn1')(c1)\n bn2 = keras.layers.SyncBatchNormalization(name='bn2')(c1)\n c1 = keras.layers.Add()([bn1, bn2])\n c1 = keras.layers.MaxPooling2D(pool_size=(2, 2))(c1)\n logits = keras.layers.Dense(\n 10, activation='softmax', name='pred')(\n keras.layers.Flatten()(c1))\n model = keras.Model(inputs=[image], outputs=[logits])\n\n if initial_weights:\n model.set_weights(initial_weights)\n\n model.compile(\n optimizer=gradient_descent.SGD(learning_rate=0.1),\n loss='sparse_categorical_crossentropy',\n metrics=['sparse_categorical_accuracy'])\n\n return model\n\n def _get_data(self, count, shape=(28, 28, 3), num_classes=10):\n centers = np.random.randn(num_classes, *shape)\n\n features = []\n labels = []\n for _ in range(count):\n label = np.random.randint(0, num_classes, size=1)[0]\n offset = np.random.normal(loc=0, scale=0.1, size=np.prod(shape))\n offset = offset.reshape(shape)\n labels.append(label)\n features.append(centers[label] + offset)\n\n x = np.asarray(features, dtype=np.float32)\n y = np.asarray(labels, dtype=np.float32).reshape((count, 1))\n return x, y\n\n def get_data(self):\n x_train, y_train = self._get_data(\n count=keras_correctness_test_base._GLOBAL_BATCH_SIZE *\n keras_correctness_test_base._EVAL_STEPS)\n x_predict = x_train\n return x_train, y_train, x_predict\n\n def get_data_with_partial_last_batch_eval(self):\n x_train, y_train = self._get_data(count=1280)\n x_eval, y_eval = self._get_data(count=1000)\n return x_train, y_train, x_eval, y_eval, x_eval\n\n @tf.__internal__.distribute.combinations.generate(\n keras_correctness_test_base.all_strategy_and_input_config_combinations() +\n keras_correctness_test_base.multi_worker_mirrored_eager())\n def test_cnn_correctness(self, distribution, use_numpy, use_validation_data):\n if (distribution ==\n tf.__internal__.distribute.combinations.central_storage_strategy_with_gpu_and_cpu):\n self.skipTest('b/183958183')\n self.run_correctness_test(distribution, use_numpy, use_validation_data)\n\n @tf.__internal__.distribute.combinations.generate(\n keras_correctness_test_base.all_strategy_and_input_config_combinations() +\n keras_correctness_test_base.multi_worker_mirrored_eager())\n def test_cnn_with_batch_norm_correctness(self, distribution, use_numpy,\n use_validation_data):\n self.run_correctness_test(\n distribution,\n use_numpy,\n use_validation_data,\n with_batch_norm='regular')\n\n @tf.__internal__.distribute.combinations.generate(\n keras_correctness_test_base.all_strategy_and_input_config_combinations() +\n keras_correctness_test_base.multi_worker_mirrored_eager())\n def test_cnn_with_sync_batch_norm_correctness(self, distribution, use_numpy,\n use_validation_data):\n if not tf.executing_eagerly():\n self.skipTest('SyncBatchNorm is not enabled in graph mode.')\n\n self.run_correctness_test(\n distribution,\n use_numpy,\n use_validation_data,\n with_batch_norm='sync')\n\n @tf.__internal__.distribute.combinations.generate(\n keras_correctness_test_base\n .all_strategy_and_input_config_combinations_eager() +\n keras_correctness_test_base.multi_worker_mirrored_eager() +\n keras_correctness_test_base.test_combinations_with_tpu_strategies_graph())\n def test_cnn_correctness_with_partial_last_batch_eval(self, distribution,\n use_numpy,\n use_validation_data):\n self.run_correctness_test(\n distribution,\n use_numpy,\n use_validation_data,\n partial_last_batch=True,\n training_epochs=1)\n\n @tf.__internal__.distribute.combinations.generate(\n keras_correctness_test_base.\n all_strategy_and_input_config_combinations_eager() +\n keras_correctness_test_base.multi_worker_mirrored_eager() +\n keras_correctness_test_base.test_combinations_with_tpu_strategies_graph())\n def test_cnn_with_batch_norm_correctness_and_partial_last_batch_eval(\n self, distribution, use_numpy, use_validation_data):\n self.run_correctness_test(\n distribution,\n use_numpy,\n use_validation_data,\n with_batch_norm='regular',\n partial_last_batch=True)\n\n\nif __name__ == '__main__':\n tf.__internal__.distribute.multi_process_runner.test_main()\n" ]
[ [ "tensorflow.compat.v2.control_dependencies", "tensorflow.compat.v2.abs", "tensorflow.python.util.tf_export.keras_export", "tensorflow.compat.v2.pow", "tensorflow.compat.v2.cast", "tensorflow.compat.v2.convert_to_tensor", "tensorflow.compat.v2.zeros", "tensorflow.compat.v2.gather", "tensorflow.compat.v2.raw_ops.ResourceApplyAdaMax", "tensorflow.compat.v2.group" ], [ "tensorflow.compat.v2.nest.map_structure", "tensorflow.compat.v2.data.Dataset.from_tensor_slices", "tensorflow.compat.v2.function", "numpy.random.random", "tensorflow.compat.v2.GradientTape", "tensorflow.compat.v2.__internal__.distribute.multi_process_runner.test_main", "tensorflow.compat.v2.__internal__.test.combinations.combine", "numpy.ones", "tensorflow.compat.v2.range", "numpy.random.rand", "tensorflow.compat.v2.matmul", "numpy.zeros", "tensorflow.compat.v2.data.Dataset.from_tensors" ], [ "tensorflow.compat.v2.Variable", "tensorflow.compat.v2.compat.v1.variables_initializer", "tensorflow.compat.v2.executing_eagerly", "tensorflow.compat.v2.test.main", "tensorflow.compat.v2.cast", "tensorflow.compat.v2.Graph", "numpy.array" ], [ "tensorflow.compat.v2.test.main", "tensorflow.compat.v2.compat.v1.test.mock.patch.object" ], [ "tensorflow.compat.v1.enable_v2_behavior", "tensorflow.python.eager.def_function.function", "tensorflow.test.main", "tensorflow.__internal__.feature_column.FeatureTransformationCache", "tensorflow.feature_column.sequence_categorical_column_with_vocabulary_list" ], [ "tensorflow.compat.v1.enable_v2_behavior", "tensorflow.device", "tensorflow.concat", "numpy.random.random", "tensorflow.shape", "tensorflow.cast", "tensorflow.random.uniform", "tensorflow.test.main", "tensorflow.image.random_crop", "tensorflow.image.resize", "numpy.array" ], [ "tensorflow.compat.v2.sparse.to_dense", "tensorflow.compat.v2.test.main", "tensorflow.compat.v2.constant", "tensorflow.compat.v2.sparse.from_dense", "tensorflow.compat.v2.ragged.constant", "tensorflow.compat.v2.SparseTensor", "numpy.array" ], [ "numpy.sqrt", "tensorflow.compat.v2.ones", "tensorflow.compat.v2.test.main" ], [ "tensorflow.compat.v1.enable_v2_behavior", "tensorflow.python.eager.def_function.function", "tensorflow.test.main", "tensorflow.__internal__.feature_column.FeatureTransformationCache", "tensorflow.feature_column.sequence_categorical_column_with_vocabulary_list" ], [ "tensorflow.keras.layers.Concatenate", "tensorflow.strings.as_string", "tensorflow.keras.Input", "tensorflow.keras.layers.Embedding", "tensorflow.keras.layers.Dense", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.random.uniform", "tensorflow.keras.Model", "tensorflow.keras.layers.Flatten", "tensorflow.random.set_seed" ], [ "tensorflow.compat.v2.executing_eagerly", "numpy.asarray", "tensorflow.compat.v2.__internal__.distribute.multi_process_runner.test_main", "numpy.random.randn", "numpy.prod", "numpy.random.randint" ] ]
kartik4949/keras-cv
[ "4c300f564d8ec99cd1351c445e1803ee6664915a", "4c300f564d8ec99cd1351c445e1803ee6664915a", "4c300f564d8ec99cd1351c445e1803ee6664915a" ]
[ "keras_cv/layers/preprocessing/random_sharpness_test.py", "keras_cv/layers/preprocessing/random_sharpness.py", "examples/layers/preprocessing/random_hue_demo.py" ]
[ "# Copyright 2022 The KerasCV Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport tensorflow as tf\n\nfrom keras_cv.layers import preprocessing\n\n\nclass RandomSharpnessTest(tf.test.TestCase):\n def test_random_sharpness_preserves_output_shape(self):\n img_shape = (50, 50, 3)\n xs = tf.stack(\n [2 * tf.ones(img_shape), tf.ones(img_shape)],\n axis=0,\n )\n\n layer = preprocessing.RandomSharpness(0.0, value_range=(0, 255))\n ys = layer(xs)\n\n self.assertEqual(xs.shape, ys.shape)\n self.assertAllClose(xs, ys)\n\n def test_random_sharpness_blur_effect_single_channel(self):\n xs = tf.expand_dims(\n tf.constant(\n [\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n ]\n ),\n axis=-1,\n )\n xs = tf.expand_dims(xs, axis=0)\n\n layer = preprocessing.RandomSharpness((1.0, 1.0), value_range=(0, 255))\n ys = layer(xs)\n\n self.assertEqual(xs.shape, ys.shape)\n\n result = tf.expand_dims(\n tf.constant(\n [\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1 / 13, 1 / 13, 1 / 13, 0, 0],\n [0, 0, 1 / 13, 5 / 13, 1 / 13, 0, 0],\n [0, 0, 1 / 13, 1 / 13, 1 / 13, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n ]\n ),\n axis=-1,\n )\n result = tf.expand_dims(result, axis=0)\n\n self.assertAllClose(ys, result)\n", "# Copyright 2022 The KerasCV Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport tensorflow as tf\n\nfrom keras_cv.utils import preprocessing\n\n\[email protected]_keras_serializable(package=\"keras_cv\")\nclass RandomSharpness(tf.keras.__internal__.layers.BaseImageAugmentationLayer):\n \"\"\"Randomly performs the sharpness operation on given images.\n\n The sharpness operation first performs a blur operation, then blends between the\n original image and the blurred image. This operation makes the edges of an image\n less sharp than they were in the original image.\n\n References:\n - [PIL](https://pillow.readthedocs.io/en/stable/reference/ImageEnhance.html)\n\n Args:\n factor: A tuple of two floats, a single float or `keras_cv.FactorSampler`.\n `factor` controls the extent to which the image sharpness is impacted.\n `factor=0.0` makes this layer perform a no-op operation, while a value of\n 1.0 uses the sharpened result entirely. Values between 0 and 1 result in\n linear interpolation between the original image and the sharpened image.\n Values should be between `0.0` and `1.0`. If a tuple is used, a `factor` is\n sampled between the two values for every image augmented. If a single float\n is used, a value between `0.0` and the passed float is sampled. In order to\n ensure the value is always the same, please pass a tuple with two identical\n floats: `(0.5, 0.5)`.\n value_range: the range of values the incoming images will have.\n Represented as a two number tuple written [low, high].\n This is typically either `[0, 1]` or `[0, 255]` depending\n on how your preprocessing pipeline is setup.\n \"\"\"\n\n def __init__(\n self,\n factor,\n value_range,\n seed=None,\n **kwargs,\n ):\n super().__init__(seed=seed, **kwargs)\n self.value_range = value_range\n self.factor = preprocessing.parse_factor(factor)\n self.seed = seed\n\n def get_random_transformation(self, image=None, label=None, bounding_box=None):\n return self.factor()\n\n def augment_image(self, image, transformation=None):\n image = preprocessing.transform_value_range(\n image, original_range=self.value_range, target_range=(0, 255)\n )\n original_image = image\n\n # Make image 4D for conv operation.\n image = tf.expand_dims(image, axis=0)\n\n # [1 1 1]\n # [1 5 1]\n # [1 1 1]\n # all divided by 13 is the default 3x3 gaussian smoothing kernel.\n # Correlating or Convolving with this filter is equivalent to performing a\n # gaussian blur.\n kernel = (\n tf.constant(\n [[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=tf.float32, shape=[3, 3, 1, 1]\n )\n / 13.0\n )\n\n # Tile across channel dimension.\n channels = tf.shape(image)[-1]\n kernel = tf.tile(kernel, [1, 1, channels, 1])\n strides = [1, 1, 1, 1]\n\n smoothed_image = tf.nn.depthwise_conv2d(\n image, kernel, strides, padding=\"VALID\", dilations=[1, 1]\n )\n smoothed_image = tf.clip_by_value(smoothed_image, 0.0, 255.0)\n smoothed_image = tf.squeeze(smoothed_image, axis=0)\n\n # For the borders of the resulting image, fill in the values of the\n # original image.\n mask = tf.ones_like(smoothed_image)\n padded_mask = tf.pad(mask, [[1, 1], [1, 1], [0, 0]])\n padded_smoothed_image = tf.pad(smoothed_image, [[1, 1], [1, 1], [0, 0]])\n\n result = tf.where(\n tf.equal(padded_mask, 1), padded_smoothed_image, original_image\n )\n # Blend the final result.\n result = preprocessing.blend(original_image, result, transformation)\n result = preprocessing.transform_value_range(\n result, original_range=(0, 255), target_range=self.value_range\n )\n return result\n\n def augment_label(self, label, transformation=None):\n return label\n\n def get_config(self):\n config = super().get_config()\n config.update(\n {\"factor\": self.factor, \"value_range\": self.value_range, \"seed\": self.seed}\n )\n return config\n", "# Copyright 2022 The KerasCV Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"random_hue_demo.py shows how to use the RandomHue preprocessing layer.\nOperates on the oxford_flowers102 dataset. In this script the flowers\nare loaded, then are passed through the preprocessing layers.\nFinally, they are shown using matplotlib.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport tensorflow_datasets as tfds\n\nfrom keras_cv.layers import preprocessing\n\nIMG_SIZE = (224, 224)\nBATCH_SIZE = 64\n\n\ndef resize(image, label):\n image = tf.image.resize(image, IMG_SIZE)\n return image, label\n\n\ndef main():\n data, ds_info = tfds.load(\"oxford_flowers102\", with_info=True, as_supervised=True)\n train_ds = data[\"train\"]\n\n train_ds = train_ds.map(lambda x, y: resize(x, y)).batch(BATCH_SIZE)\n random_hue = preprocessing.RandomHue(factor=(0.0, 1.0), value_range=(0, 255))\n train_ds = train_ds.map(\n lambda x, y: (random_hue(x), y), num_parallel_calls=tf.data.AUTOTUNE\n )\n\n for images, labels in train_ds.take(1):\n plt.figure(figsize=(8, 8))\n for i in range(9):\n plt.subplot(3, 3, i + 1)\n plt.imshow(images[i].numpy().astype(\"uint8\"))\n plt.axis(\"off\")\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "tensorflow.constant", "tensorflow.expand_dims", "tensorflow.ones" ], [ "tensorflow.clip_by_value", "tensorflow.constant", "tensorflow.shape", "tensorflow.equal", "tensorflow.ones_like", "tensorflow.expand_dims", "tensorflow.keras.utils.register_keras_serializable", "tensorflow.squeeze", "tensorflow.pad", "tensorflow.tile", "tensorflow.nn.depthwise_conv2d" ], [ "tensorflow.image.resize", "matplotlib.pyplot.subplot", "matplotlib.pyplot.axis", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
ezvk7740/robotics-rl-srl
[ "aad209d6edd1bf28d886132fecd0e503d2a7af93" ]
[ "replay/compare_plots.py" ]
[ "import argparse\nimport os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nfrom matplotlib.ticker import FuncFormatter\n\nfrom replay.aggregate_plots import lightcolors, darkcolors, Y_LIM_SHAPED_REWARD, Y_LIM_SPARSE_REWARD, millions\nfrom srl_zoo.utils import printGreen, printRed\n\n# Init seaborn\nsns.set()\n# Style for the title\nfontstyle = {'fontname': 'DejaVu Sans', 'fontsize': 16}\n\n\ndef comparePlots(path, plots, y_limits, title=\"Learning Curve\",\n timesteps=False, truncate_x=-1, no_display=False):\n \"\"\"\n :param path: (str) path to the folder where the plots are stored\n :param plots: ([str]) List of saved plots as npz file\n :param y_limits: ([float]) y-limits for the plot\n :param title: (str) plot title\n :param timesteps: (bool) Plot timesteps instead of episodes\n :param truncate_x: (int) Truncate the experiments after n ticks on the x-axis\n :param no_display: (bool) Set to true, the plot won't be displayed (useful when only saving plot)\n \"\"\"\n y_list = []\n x_list = []\n for plot in plots:\n saved_plot = np.load('{}/{}'.format(path, plot))\n x_list.append(saved_plot['x'])\n y_list.append(saved_plot['y'])\n\n lengths = list(map(len, x_list))\n min_x, max_x = np.min(lengths), np.max(lengths)\n\n print(\"Min x: {}\".format(min_x))\n print(\"Max x: {}\".format(max_x))\n\n if truncate_x > 0:\n min_x = min(truncate_x, min_x)\n print(\"Truncating the x-axis at {}\".format(min_x))\n\n x = np.array(x_list[0][:min_x])\n\n printGreen(\"{} Experiments\".format(len(y_list)))\n # print(\"Min, Max rewards:\", np.min(y), np.max(y))\n\n fig = plt.figure(title)\n for i in range(len(y_list)):\n label = plots[i].split('.npz')[0]\n y = y_list[i][:, :min_x]\n print('{}: {} experiments'.format(label, len(y)))\n # Compute mean for different seeds\n m = np.mean(y, axis=0)\n # Compute standard error\n s = np.squeeze(np.asarray(np.std(y, axis=0)))\n n = y.shape[0]\n plt.fill_between(x, m - s / np.sqrt(n), m + s / np.sqrt(n), color=lightcolors[i % len(lightcolors)], alpha=0.5)\n plt.plot(x, m, color=darkcolors[i % len(darkcolors)], label=label, linewidth=2)\n\n if timesteps:\n formatter = FuncFormatter(millions)\n plt.xlabel('Number of Timesteps')\n fig.axes[0].xaxis.set_major_formatter(formatter)\n else:\n plt.xlabel('Number of Episodes')\n plt.ylabel('Rewards')\n\n plt.title(title, **fontstyle)\n plt.ylim(y_limits)\n\n plt.legend(framealpha=0.8, frameon=True, labelspacing=0.01, loc='lower right', fontsize=16)\n\n if not no_display:\n plt.show()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"Plot trained agent\")\n parser.add_argument('-i', '--input-dir', help='folder with the plots as npz files', type=str, required=True)\n parser.add_argument('-t', '--title', help='Plot title', type=str, default='Learning Curve')\n parser.add_argument('--episode_window', type=int, default=40,\n help='Episode window for moving average plot (default: 40)')\n parser.add_argument('--shape-reward', action='store_true', default=False,\n help='Change the y_limit to correspond shaped reward bounds')\n parser.add_argument('--y-lim', nargs=2, type=float, default=[-1, -1], help=\"limits for the y axis\")\n parser.add_argument('--truncate-x', type=int, default=-1,\n help=\"Truncate the experiments after n ticks on the x-axis (default: -1, no truncation)\")\n parser.add_argument('--timesteps', action='store_true', default=False,\n help='Plot timesteps instead of episodes')\n parser.add_argument('--no-display', action='store_true', default=False, help='Do not display plot')\n args = parser.parse_args()\n\n y_limits = args.y_lim\n if y_limits[0] == y_limits[1]:\n if args.shape_reward:\n y_limits = Y_LIM_SHAPED_REWARD\n else:\n y_limits = Y_LIM_SPARSE_REWARD\n print(\"Using default limits:\", y_limits)\n\n plots = [f for f in os.listdir(args.input_dir) if f.endswith('.npz')]\n plots.sort()\n\n if len(plots) == 0:\n printRed(\"No npz files found in {}\".format(args.input_dir))\n exit(-1)\n\n comparePlots(args.input_dir, plots, title=args.title, y_limits=y_limits, no_display=args.no_display,\n timesteps=args.timesteps, truncate_x=args.truncate_x)\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.sqrt", "matplotlib.pyplot.title", "numpy.min", "matplotlib.pyplot.ylim", "matplotlib.pyplot.figure", "numpy.max", "numpy.std", "numpy.mean", "matplotlib.pyplot.xlabel", "matplotlib.ticker.FuncFormatter", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
maobubu/stock-prediction
[ "b2442ccb027c25809a33a610f010cdec077bf61a", "b2442ccb027c25809a33a610f010cdec077bf61a" ]
[ "stuff/preprocess_bilstm.py", "scripts/ESIM/main.py" ]
[ "import json\nimport pandas as pd\nimport re, os, glob\nimport numpy as np\nfrom collections import defaultdict\nimport nltk\nimport string\nfrom gensim.models import Phrases\nfrom gensim.utils import SaveLoad\nfrom gensim.models.phrases import Phraser\nfrom nltk.corpus import stopwords # Import the stop word list\n#from sklearn.model_selection import train_test_split\n#from sklearn.feature_extraction.text import CountVectorizer\nfrom datetime import datetime\nfrom datetime import timedelta\nimport timeit\nimport sys\nfrom tqdm import tqdm\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.corpus import wordnet as wn\nfrom nltk.stem import PorterStemmer, WordNetLemmatizer\n#from nltk import pos_tag, word_tokenize\nfrom nltk.tag import PerceptronTagger\n# Pywsd's Lemmatizer.\nporter = PorterStemmer()\nwnl = WordNetLemmatizer()\ntagger = PerceptronTagger()\npos_tag = tagger.tag\ntokenizer = RegexpTokenizer(r'\\w+')\n\n\ndef lemmatize(ambiguous_word, pos=None, neverstem=True, \n lemmatizer=wnl, stemmer=porter):\n \"\"\"\n Tries to convert a surface word into lemma, and if lemmatize word is not in\n wordnet then try and convert surface word into its stem.\n This is to handle the case where users input a surface word as an ambiguous \n word and the surface word is a not a lemma.\n \"\"\"\n if pos:\n lemma = lemmatizer.lemmatize(ambiguous_word, pos=pos)\n else:\n lemma = lemmatizer.lemmatize(ambiguous_word)\n stem = stemmer.stem(ambiguous_word)\n # Ensure that ambiguous word is a lemma.\n if not wn.synsets(lemma):\n if neverstem:\n return ambiguous_word\n if not wn.synsets(stem):\n return ambiguous_word\n else:\n return stem\n else:\n return lemma\n\ndef penn2morphy(penntag, returnNone=False):\n morphy_tag = {'NN':wn.NOUN, 'JJ':wn.ADJ,\n 'VB':wn.VERB, 'RB':wn.ADV}\n try:\n return morphy_tag[penntag[:2]]\n except:\n return None if returnNone else ''\n\ndef word_tokenize(text,tokenize=tokenizer):\n return tokenize.tokenize(text.lower())#doesn't remove stopwords\n #return [w for w in tokenize.tokenize(text.lower()) if not w in stopwords.words(\"english\")]\n\ndef lemmatize_sentence(sentence, neverstem=False, keepWordPOS=False, \n tokenizer=word_tokenize, postagger=pos_tag, \n lemmatizer=wnl, stemmer=porter):\n words, lemmas, poss = [], [], []\n for word, pos in postagger(sentence):#change tokenizer(sentence) to sentence\n pos = penn2morphy(pos)\n lemmas.append(lemmatize(word.lower(), pos, neverstem,\n lemmatizer, stemmer))\n poss.append(pos)\n words.append(word)\n if keepWordPOS:\n return words, lemmas, [None if i == '' else i for i in poss]\n return lemmas\n\ndef Ding_abstract(label,bigram,trigram,types='title'):\n start = timeit.default_timer()\n print('start processing Ding_abstract data')\n article = defaultdict(list)\n with open('/home/jialong/Documents/phrase_embedding/j_news.json', \"r\") as data: # title+ abstract + article\n title = pd.DataFrame(json.loads(line) for line in data).set_index('date')\n title=title.replace(['UPDATE\\s\\d-', \"'s\"], '', regex=True)\n title=title.replace(['\\d+\\S\\d+','\\d+'], 'xxx', regex=True)\n title[types] = title[types].str.replace('[{}]'.format(string.punctuation), ' ')\n title = title.drop_duplicates(subset=[types], keep='first')\n for j in label.index:\n try:\n day = (datetime.strptime(j, '%Y-%m-%d') - timedelta(days=1)).strftime('%Y-%m-%d')\n article[j].extend(set(title.loc[day, types].values))\n except (AttributeError, KeyError, TypeError) as e:\n continue\n train_ding = split(article, label, bigram, trigram)\n length = train_ding.shape[0]\n train = train_ding.iloc[0:int(length * 0.8), :]\n validate = train_ding.iloc[int(length * 0.8):int(length * 0.9), :]\n test = train_ding.iloc[int(length * 0.9):-1, :]\n stop = timeit.default_timer()\n print(\"run time for ding:\", stop - start)\n #os.chdir('/Users/maobu/Dropbox/stock/data/ding/')\n os.chdir('/home/jialong/Documents/phrase_embedding/data/yunke_'+types+'/')\n train.to_csv(\"one_train.csv\", index=False, encoding='utf-8')\n df_train = np.split(train, [1], axis=1)\n df_train[1].to_csv('one_train_text.txt', header=None, index=None, encoding='utf-8')\n df_train[0].to_csv('one_train_label.txt', header=None, index=None, encoding='utf-8')\n validate.to_csv(\"one_validate.csv\", index=False, encoding='utf-8')\n df_validate = np.split(validate, [1], axis=1)\n df_validate[1].to_csv('one_validate_text.txt', header=None, index=None, encoding='utf-8')\n df_validate[0].to_csv('one_validate_label.txt', header=None, index=None, encoding='utf-8')\n test.to_csv(\"one_test.csv\", index=False, encoding='utf-8')\n df_test = np.split(test, [1], axis=1)\n df_test[1].to_csv('one_test_text.txt', header=None, index=None, encoding='utf-8')\n df_test[0].to_csv('one_test_label.txt', header=None, index=None, encoding='utf-8')\n\ndef Ding(label, bigram, trigram,types='title'):\n start = timeit.default_timer()\n print('start processing Ding data')\n article = defaultdict(list)\n title = pd.read_table('reuters_news_title.txt', names=[\"Date\", 'title']).set_index('Date')\n #tt=pd.read_table('bloomberg_news_title.txt',names = [\"Date\", 'title']).set_index('Date')\n #title=title.append(tt)\n title=title.replace(['UPDATE\\s\\d-', \"'s\"], '', regex=True)\n title=title.replace(['\\d+\\S\\d+','\\d+'], 'xxx', regex=True)\n #title['title'] = title['title'].str.replace('[{}]'.format(string.digits), '_NUM_ ')\n title[types] = title[types].str.replace('[{}]'.format(string.punctuation), ' ')\n title = title.drop_duplicates(subset=[types], keep='first')\n for j in label.index:\n try:\n day = (datetime.strptime(j, '%Y-%m-%d') - timedelta(days=1)).strftime('%Y-%m-%d')\n article[j].extend(set(title.loc[day, types].values))\n except (AttributeError, KeyError, TypeError) as e:\n continue\n train_ding = split(article, label, bigram, trigram)\n length = train_ding.shape[0]\n train = train_ding.iloc[0:int(length * 0.8), :]\n validate = train_ding.iloc[int(length * 0.8):int(length * 0.9), :]\n test = train_ding.iloc[int(length * 0.9):-1, :]\n stop = timeit.default_timer()\n print(\"run time for ding:\", stop - start)\n os.chdir('/Users/maobu/Dropbox/stock/data/ding/')\n train.to_csv(\"one_train.csv\", index=False, encoding='utf-8')\n df_train = np.split(train, [1], axis=1)\n df_train[1].to_csv('one_train_text.txt', header=None, index=None, encoding='utf-8')\n df_train[0].to_csv('one_train_label.txt', header=None, index=None, encoding='utf-8')\n validate.to_csv(\"one_validate.csv\", index=False, encoding='utf-8')\n df_validate = np.split(validate, [1], axis=1)\n df_validate[1].to_csv('one_validate_text.txt', header=None, index=None, encoding='utf-8')\n df_validate[0].to_csv('one_validate_label.txt', header=None, index=None, encoding='utf-8')\n test.to_csv(\"one_test.csv\", index=False, encoding='utf-8')\n df_test = np.split(test, [1], axis=1)\n df_test[1].to_csv('one_test_text.txt', header=None, index=None, encoding='utf-8')\n df_test[0].to_csv('one_test_label.txt', header=None, index=None, encoding='utf-8')\n\n\ndef convert(reviews, bigram, trigram, remove_stopwords=True):\n #letters_only = re.sub(\"[^a-zA-Z0-9]\", \" \", str(reviews))\n #words = list(filter(None, letters_only.lower().split()))\n words= word_tokenize(reviews)#tokenize and remove punctuation\n if remove_stopwords:#remove stopwords\n words = [w for w in words if not w in stopwords.words(\"english\")]\n words = trigram[bigram[words]]#to phrase\n words= lemmatize_sentence(words)#lemma\n return \" \".join(words) + ' .'\n\n\ndef split(data, label, bigram, trigram):\n data_clean = []\n lab = []\n date = []\n for key, value in data.items():\n for j in set(value):\n try:\n lab.append(label[key])\n data_clean.append(convert(j, bigram, trigram, True))\n except (KeyError, TypeError) as e:\n continue\n print(len(data_clean))\n ll = pd.DataFrame({'label': lab}, dtype='int32')\n d = pd.DataFrame({'title': data_clean}) # put the convert words into a new Dataframe\n final = pd.merge(ll, d, left_index=True, right_index=True) # merge two list\n return final\n\n\ndef maobu(label, d, article, abstract, days, add=False):\n for j in label.index:\n # for i in range(1, days + 1):\n try:\n day = (datetime.strptime(j, '%Y-%m-%d') - timedelta(days=days)).strftime('%Y-%m-%d')\n article[j].extend(set(d.loc[day, \"title\"].values))\n if add:\n abstract[j].extend(set(d.loc[day, \"abstract\"].values))\n except (AttributeError, KeyError, TypeError) as e:\n continue\n\n\ndef main():\n arg1 = sys.argv[1]\n one_train, abstract_train, seven_train, month_train = defaultdict(list), defaultdict(list), defaultdict(\n list), defaultdict(list)\n one_test, seven_test, month_test = defaultdict(list), defaultdict(list), defaultdict(list)\n # nltk.download('stopwords')\n print(\"start pre-processing the data\")\n bigram = SaveLoad.load(\"data/phrase_xxx/big_phrase.pickle\")\n trigram = SaveLoad.load(\"data/phrase_xxx/trig_phrase.pickle\")\n label_one = pd.read_pickle(\"data/label_one_new.pickle\")\n label_seven = pd.read_pickle(\"data/label_seven.pickle\")\n label_month = pd.read_pickle(\"data/label_month.pickle\")\n print(\"starting the training selecting phase\")\n Ding(label_one, bigram, trigram,types=arg1)\n #Ding_abstract(label_one, bigram, trigram,types=str(arg1))\n '''os.chdir('/home/huicheng/PycharmProjects/stock/pickle')\n subfolder_list = glob.glob('*.pickle')\n pbar = tqdm(total=len(subfolder_list))\n for i, file in enumerate(glob.glob(\"*.pickle\")):\n D = pd.read_pickle(file)\n pbar.set_description('processing number:{} name:{}'.format(i, file))\n pbar.update(1)\n maobu(label_one, D, one_train, abstract_train, 1, add=False) # add abstract or not\n # maobu(label_seven, D, seven_train, 7)\n # maobu(label_month, D, month_train, 30)\n pbar.close()\n start = timeit.default_timer()\n train_one = split(one_train, label_one, bigram, trigram)\n length = train_one.shape[0]\n train = train_one.iloc[0:int(length * 0.8), :]\n validate = train_one.iloc[int(length * 0.8):int(length * 0.9), :]\n test = train_one.iloc[int(length * 0.9):-1, :]\n # train_seven = split(seven_train, label_seven)\n # train_month = split(month_train, label_month)\n stop = timeit.default_timer()\n print(\"run time for training:\", stop - start)\n os.chdir('/home/huicheng/PycharmProjects/stock/data/our')\n train.to_csv(\"one_train.csv\", index=False, encoding='utf-8')\n df_train = np.split(train, [1], axis=1)\n df_train[1].to_csv('one_train_text.txt', header=None, index=None, encoding='utf-8')\n df_train[0].to_csv('one_train_label.txt', header=None, index=None, encoding='utf-8')\n validate.to_csv(\"one_validate.csv\", index=False, encoding='utf-8')\n df_validate = np.split(validate, [1], axis=1)\n df_validate[1].to_csv('one_validate_text.txt', header=None, index=None, encoding='utf-8')\n df_validate[0].to_csv('one_validate_label.txt', header=None, index=None, encoding='utf-8')\n test.to_csv(\"one_test.csv\", index=False, encoding='utf-8')\n df_test = np.split(test, [1], axis=1)\n df_test[1].to_csv('one_test_text.txt', header=None, index=None, encoding='utf-8')\n df_test[0].to_csv('one_test_label.txt', header=None, index=None, encoding='utf-8')\n\n # TODO split the abstract\n train_abstract = split(abstract_train, label_one, bigram, trigram)\n length = train_abstract.shape[0]\n train = train_abstract.iloc[0:int(length * 0.8), :]\n validate = train_abstract.iloc[int(length * 0.8):int(length * 0.9), :]\n test = train_abstract.iloc[int(length * 0.9):-1, :]\n # train_seven = split(seven_train, label_seven)\n # train_month = split(month_train, label_month)\n stop = timeit.default_timer()\n print(\"run time for training2:\", stop - start)\n os.chdir('/home/huicheng/PycharmProjects/stock/data/our_abstract')\n train.to_csv(\"one_train.csv\", index=False, encoding='utf-8')\n df_train = np.split(train, [1], axis=1)\n df_train[1].to_csv('one_train_text.txt', header=None, index=None, encoding='utf-8')\n df_train[0].to_csv('one_train_label.txt', header=None, index=None, encoding='utf-8')\n validate.to_csv(\"one_validate.csv\", index=False, encoding='utf-8')\n df_validate = np.split(validate, [1], axis=1)\n df_validate[1].to_csv('one_validate_text.txt', header=None, index=None, encoding='utf-8')\n df_validate[0].to_csv('one_validate_label.txt', header=None, index=None, encoding='utf-8')\n test.to_csv(\"one_test.csv\", index=False, encoding='utf-8')\n df_test = np.split(test, [1], axis=1)\n df_test[1].to_csv('one_test_text.txt', header=None, index=None, encoding='utf-8')\n df_test[0].to_csv('one_test_label.txt', header=None, index=None, encoding='utf-8')\n '''\n\nif __name__ == '__main__':\n main()\n", "import os\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\ngpus = '1'\nimport numpy\nimport tensorflow as tf\nimport logging\nfrom tensorflow import logging as log\nfrom collections import OrderedDict\nfrom data_iterator import TextIterator\nfrom tensorflow.contrib import rnn\nimport warnings\nimport pickle as pkl\nimport sys\nimport pprint\nimport pdb\nimport os\nimport copy\nimport time\n\nlogger = logging.getLogger(__name__)\n\n\ndef _s(pp, name): # add perfix\n return '{}_{}'.format(pp, name)\n\n\ndef load_params(path, params):\n pp = numpy.load(path)\n for kk, vv in params.iteritems():\n if kk not in pp:\n warnings.warn('{} is not in the archive'.format(kk))\n continue\n params[kk] = pp[kk]\n\n return params\n\n\ndef ortho_weight(ndim): # used by norm_weight below\n \"\"\"\n Random orthogonal weights\n\n Used by norm_weights(below), in which case, we\n are ensuring that the rows are orthogonal\n (i.e W = U \\Sigma V, U has the same\n # of rows, V has the same # of cols)\n \"\"\"\n W = numpy.random.randn(ndim, ndim)\n u, s, v = numpy.linalg.svd(W)\n return u.astype('float32')\n\n\ndef norm_weight(nin, nout=None, scale=0.01, ortho=True):\n \"\"\"\n Random weights drawn from a Gaussian\n \"\"\"\n if nout is None:\n nout = nin\n if nout == nin and ortho:\n W = ortho_weight(nin)\n else:\n # W = numpy.random.uniform(-0.5,0.5,size=(nin,nout))\n W = scale * numpy.random.randn(nin, nout)\n return W.astype('float32')\n\n\ndef prepare_data(sequence, sequence_d1, sequence_d2, labels, options, maxlen=None, max_word=100):\n # length = [len(s) for s in sequence]\n length, length_d1, length_d2 = [], [], []\n for i, d1, d2 in zip(sequence, sequence_d1, sequence_d2):\n dd1, dd2 = list(), list()\n length.append(len(i))\n for day in d1:\n dd1.append(len(day))\n length_d1.append(dd1)\n for day in d2:\n dd2.append(len(day))\n length_d2.append(dd2)\n if maxlen is not None: # max length is the sentence level\n new_sequence = []\n new_lengths = []\n new_sequence_d1 = []\n new_lengths_d1 = []\n new_sequence_d2 = []\n new_lengths_d2 = []\n for l, s, ld1, sd1, ld2, sd2 in zip(length, sequence, length_d1, sequence_d1, length_d2, sequence_d2):\n dd1, lld1, dd2, lld2 = list(), list(), list(), list()\n if l < maxlen:\n new_sequence.append(s)\n new_lengths.append(l)\n for i, j in zip(ld1, sd1):\n if i < maxlen:\n dd1.append(j)\n lld1.append(i)\n new_sequence_d1.append(dd1)\n new_lengths_d1.append(lld1)\n for i, j in zip(ld2, sd2):\n if i < maxlen:\n dd2.append(j)\n lld2.append(i)\n new_sequence_d2.append(dd2)\n new_lengths_d2.append(lld2)\n\n length = new_lengths # This step is to filter the sentence which length is bigger\n sequence = new_sequence # than the max length. length means number of news. sequence means \n # length of each sentence\n length_d1 = new_lengths_d1\n sequence_d1 = new_sequence_d1\n length_d2 = new_lengths_d2\n sequence_d2 = new_sequence_d2\n day1 = len(sequence_d1[0])\n day2 = len(sequence_d2[0])\n ##TODO need to be careful, set the max length bigger to avoid bug\n if len(length) < 1:\n return None, None, None, None, None, None, None\n\n maxlen_x = numpy.max(length) # max time step\n maxlen_xd1 = numpy.max([numpy.max(i) for i in length_d1])\n maxlen_xd2 = numpy.max([numpy.max(i) for i in length_d2])\n n_samples = len(sequence) # number of samples== batch\n max_sequence = max(len(j) for i in sequence for j in i) # find the sequence max length\n max_sequence_d1 = max(len(j) for i in sequence_d1 for z in i for j in z)\n max_sequence_d2 = max(len(j) for i in sequence_d2 for z in i for j in z)\n max_sequence = max_word if max_sequence > max_word else max_sequence # shrink the data size\n max_sequence_d1 = max_word if max_sequence_d1 > max_word else max_sequence_d1 # shrink the data size\n max_sequence_d2 = max_word if max_sequence_d2 > max_word else max_sequence_d2 # shrink the data size\n ##TODO for x\n x = numpy.zeros((maxlen_x, n_samples, max_sequence)).astype('int64')\n x_mask = numpy.zeros((maxlen_x, n_samples)).astype('float32')\n ##TODO for x_d1\n x_d1 = numpy.zeros((day1, maxlen_xd1, n_samples, max_sequence_d1)).astype('int64')\n x_d1_mask = numpy.zeros((day1,maxlen_xd1, n_samples)).astype('float32')\n ##TODO for x_d2\n x_d2 = numpy.zeros((day2, maxlen_xd2, n_samples, max_sequence_d2)).astype('int64')\n x_d2_mask = numpy.zeros((day2,maxlen_xd2, n_samples)).astype('float32')\n # l = numpy.array(labels).astype('int64')\n ##TODO for label\n l = numpy.zeros((n_samples,)).astype('int64')\n for index, (i, j, k, ll) in enumerate(zip(sequence, sequence_d1, sequence_d2, labels)): # batch size\n l[index] = ll\n for idx, ss in enumerate(i): # time step\n # x[idx, index, :sequence_length[idx]] = ss\n if len(ss) < max_sequence:\n x[idx, index, :len(ss)] = ss\n else:\n x[idx, index, :max_sequence] = ss[:max_sequence]\n x_mask[idx, index] = 1.\n for jj, day in enumerate(j):\n for idx, ss in enumerate(day):\n if len(ss) < max_sequence_d1:\n x_d1[jj, idx, index, :len(ss)] = ss\n else:\n x_d1[jj, idx, index, :max_sequence_d1] = ss[:max_sequence_d1]\n x_d1_mask[jj, idx, index] = 1.\n for jj, day in enumerate(k):\n for idx, ss in enumerate(day):\n if len(ss) < max_sequence_d2:\n x_d2[jj, idx, index, :len(ss)] = ss\n else:\n x_d2[jj, idx, index, :max_sequence_d2] = ss[:max_sequence_d2]\n x_d2_mask[jj, idx, index] = 1.\n\n return x, x_mask, x_d1, x_d1_mask, x_d2, x_d2_mask, l\n\n\ndef old_sequence_lstm(input, sequence_mask, keep_prob, is_training, options):\n # input time_step,batch,sequence_step,embedding, 40*32*13*100\n # sequence_mask shape is time_step,batch,sequence_step, 40*32*13\n def fn(inp):\n out = bilstm_filter(tf.transpose(inp[0], [1, 0, 2]), tf.transpose(inp[1], [1, 0]), keep_prob,\n prefix='sequence_encode', dim=options['dim'],\n is_training=is_training) # output shape: sequence_step,batch,2*lstm_unit(concate) 13*32*600\n return tf.transpose(tf.concat(out, axis=2), perm=[1, 0, 2])\n\n outputs = tf.map_fn(fn, (input, sequence_mask), dtype=tf.float32)\n print(tf.shape(outputs)) # outputs shape 40*32*13*600\n outputs = outputs * tf.expand_dims(sequence_mask, -1) # mask the output\n with tf.variable_scope('words_attention'):\n hidden = tf.layers.dense(outputs, units=300, activation=tf.nn.tanh, use_bias=False,\n kernel_initializer=tf.random_normal_initializer(stddev=0.02), reuse=tf.AUTO_REUSE)\n hidden = tf.nn.dropout(hidden, keep_prob)\n # hidden 40*32*13*1200 #attention 40*32*13*1\n attention = tf.layers.dense(hidden, units=1, use_bias=False,\n kernel_initializer=tf.random_normal_initializer(stddev=0.02), activation=None)\n padding = tf.fill(tf.shape(attention), float(-1e8)) # float('-inf')\n attention = tf.where(tf.equal(tf.expand_dims(sequence_mask, -1), 0.), padding,\n attention) # fill 0 with -1e8 for softmax\n attention = tf.transpose(tf.nn.softmax(tf.transpose(attention, perm=[0, 1, 3, 2])),\n perm=[0, 1, 3, 2]) # attention 40*32*13*r\n attention = attention * tf.expand_dims(sequence_mask, -1) # mask the attention\n outputs = tf.reduce_sum(outputs * attention, axis=2)\n print(tf.shape(outputs))\n return outputs\n\n\ndef sequence_lstm(input, sequence_mask, keep_prob, is_training, options):\n # input time_step,batch,sequence_step,embedding, 40*32*13*100\n time_step = tf.shape(input)[0]\n # time_step = input.get_shape().as_list()[0]\n output_list = tf.TensorArray(dtype=tf.float32, size=time_step)\n # sequence_mask shape is time_step,batch,sequence_step, 40*32*13\n t = tf.constant(0, dtype=tf.int32)\n\n def cond(i, *args):\n return i < time_step\n\n def body(i, x, mask, out_):\n out = bilstm_filter(tf.transpose(x[i], [1, 0, 2]), tf.transpose(mask[i], [1, 0]), keep_prob,\n prefix='sequence_encode', dim=options['dim'],\n is_training=is_training) # output shape: sequence_step,batch,2*lstm_unit(concate) 13*32*600\n '''out = bilstm_filter(tf.concat(out, 2) * tf.expand_dims(tf.transpose(mask[i], [1, 0]), 2), tf.transpose(mask[i], [1, 0]), keep_prob,\n prefix='sequence_encode', dim=options['dim'],\n is_training=is_training)\n '''\n out = tf.concat(out, 2) * tf.expand_dims(tf.transpose(mask[i], [1, 0]), -1) # mask the output 13*32*600\n att = attention_v1(tf.transpose(out, [1, 0, 2]), mask[i],\n name='attention_1', keep=keep_prob) # attention shape 32*600\n\n out_ = out_.write(i, att)\n\n return i + 1, x, mask, out_\n\n _, _, _, result = tf.while_loop(cond, body, [t, input, sequence_mask, output_list])\n result = result.stack() # result shape is time_step,batch,hidden units 40*32*600\n\n return result\n\n\ndef attention_v1(input, masks, name='attention', nin=600, keep=1.0):\n # input is batch,time_step,hidden_state 32*40*600 mask 32*40\n # hidden layer is:batch,hidden_shape,attention_hidden_size 32*40*1200 or 32*40*600\n # attention shape after squeeze is 32*40, # batch,time_step,attention_size 32*40*1\n hidden = tf.layers.dense(input, nin / 2, activation=tf.nn.tanh, use_bias=False,\n kernel_initializer=tf.random_normal_initializer(stddev=0.02),\n name=_s(name, 'hidden'), reuse=tf.AUTO_REUSE)\n hidden = tf.nn.dropout(hidden, keep)\n attention = tf.layers.dense(hidden, 1 , activation=None, use_bias=False,\n kernel_initializer=tf.random_normal_initializer(stddev=0.02), name=_s(name, 'out'),\n reuse=tf.AUTO_REUSE)\n padding = tf.fill(tf.shape(attention), float('-1e8')) # float('-inf')\n attention = tf.where(tf.equal(tf.expand_dims(masks,-1), 0.), padding, attention) # fill 0 with a small number for softmax\n attention = tf.nn.softmax(attention, 1) *tf.expand_dims(masks,-1) # 32*40*r #mask the attention here is not really neccesary,\n outputs = tf.reduce_sum(input * attention, axis=1)#32*600\n #outputs = tf.squeeze(tf.matmul(tf.transpose(attention, [0, 2, 1]), input)) # transpose to batch,hidden,time_step\n return outputs\n\ndef attention_v2(input, masks, name='attention', nin=600, keep=1.0, r=4, beta=1.):\n # input is batch,time_step,hidden_state 32*40*600 mask 32*40\n # hidden layer is:batch,hidden_shape,attention_hidden_size 32*40*1200 or 32*40*600\n # attention shape after squeeze is 32*40, # batch,time_step,attention_size 32*40*1\n masks = tf.stack([masks] * r, -1) # copy r time for filling 32*40*r\n iden = tf.eye(tf.shape(input)[1], batch_shape=[tf.shape(input)[0]]) # an identity matrix 32*40*40\n hidden = tf.layers.dense(input, nin / 2, activation=tf.nn.tanh, use_bias=False,\n kernel_initializer=tf.random_normal_initializer(stddev=0.02),\n name=_s(name, 'hidden'), reuse=tf.AUTO_REUSE)\n hidden = tf.nn.dropout(hidden, keep)\n attention = tf.layers.dense(hidden, r, activation=None, use_bias=False,\n kernel_initializer=tf.random_normal_initializer(stddev=0.02), name=_s(name, 'out'),\n reuse=tf.AUTO_REUSE)\n padding = tf.fill(tf.shape(attention), float('-1e8')) # float('-inf')\n attention = tf.where(tf.equal(masks, 0.), padding, attention) # fill 0 with a small number for softmax\n attention = tf.nn.softmax(attention, 1) * masks # 32*40*r #mask the attention here is not really neccesary,\n penalty = tf.norm((tf.matmul(attention, tf.transpose(attention, [0, 2, 1])) - iden), ord='fro',\n axis=(-2, -1)) # the Frobenius norm penalty 32 dimension\n attention = attention + beta * tf.expand_dims(tf.expand_dims(penalty, -1), -1) # expand twice\n # outputs = tf.reduce_sum(input * attention, axis=1)#32*600\n outputs = tf.matmul(tf.transpose(attention, [0, 2, 1]), input) # transpose to batch,hidden,time_step\n outputs = tf.reshape(outputs, [tf.shape(outputs)[0], -1])\n if name == 'attention_2':\n outputs.set_shape([None, nin * (r ** 2)])\n else:\n outputs.set_shape([None, nin * r])\n\n return outputs # result shape is batch, hidden_unit 32*600\n\n\ndef fflayer_2D(options, input, name='feed_forward', activation_function=None, nin=None, nout=None):\n with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n W = tf.get_variable(\n _s(name, 'W'),\n shape=[nin, nout],\n # initializer=tf.random_uniform_initializer(-0.1, 0.1),\n initializer=tf.random_normal_initializer(stddev=0.02),\n dtype=tf.float32\n )\n bias = tf.get_variable(\n _s(name, 'bias'),\n shape=[nout],\n initializer=tf.constant_initializer(0.),\n dtype=tf.float32\n )\n\n # result = tf.nn.bias_add(tf.matmul(input, W), bias)\n result = tf.nn.bias_add(tf.tensordot(input, W, [[-1], [0]]), bias)\n if activation_function is None:\n outputs = result\n else:\n outputs = activation_function(result)\n return outputs\n\n\ndef bilstm_filter(input, mask, keep_prob, prefix='lstm', dim=300, is_training=True):\n with tf.variable_scope(name_or_scope=prefix, reuse=tf.AUTO_REUSE):\n sequence = tf.cast(tf.reduce_sum(mask, 0), tf.int32)\n lstm_fw_cell = rnn.LSTMCell(dim, forget_bias=0.0, initializer=tf.orthogonal_initializer(), state_is_tuple=True)\n # back directions\n lstm_bw_cell = rnn.LSTMCell(dim, forget_bias=0.0, initializer=tf.orthogonal_initializer(), state_is_tuple=True)\n keep_rate = tf.cond(is_training is not False and keep_prob < 1, lambda: 0.8, lambda: 1.0)\n cell_dp_fw = rnn.DropoutWrapper(cell=lstm_fw_cell, output_keep_prob=keep_rate)\n cell_dp_bw = rnn.DropoutWrapper(cell=lstm_bw_cell, output_keep_prob=keep_rate)\n outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell_dp_fw, cell_dp_bw, input, sequence_length=sequence,\n dtype=tf.float32,\n time_major=True)\n return outputs\n\n\ndef init_params(options, worddicts):\n params = OrderedDict()\n # embedding\n params['Wemb'] = norm_weight(options['n_words'], options['dim_word'])\n # read embedding from GloVe\n if options['embedding']:\n with open(options['embedding'], 'r') as f:\n for line in f:\n tmp = line.split()\n word = tmp[0]\n vector = tmp[1:]\n if word in worddicts and worddicts[word] < options['n_words']:\n try:\n params['Wemb'][worddicts[word], :] = vector\n # encoder: bidirectional RNN\n except ValueError as e:\n print(str(e))\n return params\n\n\ndef word_embedding(options, params):\n embeddings = tf.get_variable(\"embeddings\", shape=[options['n_words'], options['dim_word']],\n initializer=tf.constant_initializer(numpy.array(params['Wemb'])))\n return embeddings\n\n\ndef build_model(embedding, options):\n \"\"\" Builds the entire computational graph used for training\n \"\"\"\n # description string: #words x #samples\n with tf.device('/gpu:1'):\n with tf.variable_scope('input'):\n x = tf.placeholder(tf.int64, shape=[None, None, None],\n name='x') # 3D vector timestep, batch and sequence(before embedding)40*32*13\n x_mask = tf.placeholder(tf.float32, shape=[None, None], name='x_mask') # mask time step, batch\n y = tf.placeholder(tf.int64, shape=[None], name='y')\n ##TODO important\n keep_prob = tf.placeholder(tf.float32, [], name='keep_prob')\n is_training = tf.placeholder(tf.bool, name='is_training')\n ##TODO important\n # n_timesteps = x.get_shape().as_list()[0] # time steps\n # n_samples = x.get_shape().as_list()[1] # n samples\n sequence_mask = tf.cast(tf.abs(tf.sign(x)), tf.float32) # 3D\n n_timesteps = tf.shape(x)[0] # time steps\n n_samples = tf.shape(x)[1] # n samples\n # # word embedding\n ##TODO word embedding\n emb = tf.nn.embedding_lookup(embedding, x)\n with tf.device('/gpu:1'):\n # emb = tf.reduce_mean(emb, -2) # average embedding\n # fed into the input of BILSTM from the official document\n '''if options['use_dropout']:\n emb = tf.nn.dropout(emb, keep_prob)'''\n emb = sequence_lstm(emb, sequence_mask, keep_prob, is_training, options)\n emb = emb * tf.expand_dims(x_mask, -1) # mask before attention\n # TODO bilstm layers\n # Change the time step and batch\n\n att = attention_v1(tf.transpose(emb, [1, 0, 2]), tf.transpose(x_mask, [1, 0]),\n name='attention_2', keep=keep_prob) # already masked after attention\n # maxpolling and sum pooling from batch\n if options['use_dropout']:\n att = tf.nn.dropout(att, keep_prob)\n\n '''conv1 = tf.layers.conv2d(inputs=tf.expand_dims(tf.transpose(emb,[1,0,2])),filters=32,kernel_size=[3, 2400],padding=\"same\",activation=tf.nn.relu)\n pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)'''\n logit = fflayer_2D(options, att, name='ff', activation_function=tf.nn.tanh, nin=2 * options['dim'],\n nout=300) # 2 * options['dim']'''\n if options['use_dropout']:\n logit = tf.nn.dropout(logit, keep_prob)\n pred = fflayer_2D(options, logit, name='fout', activation_function=None, nin=300, nout=2)\n # with tf.device('/cpu:0'):\n logger.info('Building f_cost...')\n # todo not same\n labels = tf.one_hot(y, depth=2, axis=1)\n # labels = y\n preds = tf.nn.softmax(pred, 1)\n # preds = tf.nn.sigmoid(pred)\n # pred=tf.reshape(pred,[-1])\n cost = tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred, labels=labels)\n # cost = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=labels,logits=pred),1)\n # cost = -tf.reduce_sum((tf.cast(labels, tf.float32) * tf.log(preds + 1e-8)),axis=1)\n # cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=y)\n logger.info('Done')\n '''\n logit1 = tf.reduce_sum(ctx1 * tf.expand_dims(x_mask, 2), 0) / tf.expand_dims(tf.reduce_sum(x_mask, 0), 1)\n logit2 = tf.reduce_max(ctx1 * tf.expand_dims(x_mask, 2), 0)\n logit = tf.concat([logit1, logit2], 1)\n '''\n\n with tf.variable_scope('logging'):\n tf.summary.scalar('current_cost', tf.reduce_mean(cost))\n tf.summary.histogram('predicted_value', preds)\n summary = tf.summary.merge_all()\n\n return is_training, cost, x, x_mask, y, n_timesteps, preds, summary\n\n\ndef predict_pro_acc(sess, cost, prepare_data, model_options, iterator, maxlen, correct_pred, pred, summary, eidx,\n is_training, writer=None):\n # fo = open(_s(prefix,'pre.txt'), \"w\")\n num = 0\n valid_acc = 0\n total_cost = 0\n loss = 0\n result = 0\n for x_sent, x_d1_sent, x_d2_sent, y_sent in iterator:\n num += len(x_sent)\n data_x, data_x_mask, data_x_d1, data_x_d1_mask, data_x_d2, data_x_d2_mask, data_y = prepare_data(x_sent, x_d1_sent, x_d2_sent, y_sent, model_options, maxlen=maxlen)\n\n loss, result, preds = sess.run([cost, correct_pred, pred],\n feed_dict={'input/x:0': data_x, 'input/x_mask:0': data_x_mask,\n 'input/y:0': data_y, 'input/keep_prob:0': 1.,\n 'input/is_training:0': is_training})\n valid_acc += result.sum()\n total_cost += loss.sum()\n final_acc = 1.0 * valid_acc / num\n final_loss = 1.0 * total_cost / num\n # if writer is not None:\n # writer.add_summary(test_summary, eidx)\n\n # print result,preds,loss,result_\n print(preds, result, num)\n\n return final_acc, final_loss\n\n\ndef train(\n dim_word=100, # word vector dimensionality\n dim=100, # the number of GRU units\n encoder='lstm', # encoder model\n decoder='lstm', # decoder model\n patience=10, # early stopping patience\n max_epochs=5000,\n finish_after=10000000, # finish after this many updates\n decay_c=0., # L2 regularization penalty\n clip_c=-1., # gradient clipping threshold\n lrate=0.0004, # learning rate\n n_words=100000, # vocabulary size\n n_words_lemma=100000,\n maxlen=100, # maximum length of the description\n optimizer='adam',\n batch_size=32,\n valid_batch_size=32,\n save_model='../../models/',\n saveto='model.npz',\n dispFreq=100,\n validFreq=1000,\n saveFreq=1000, # save the parameters after every saveFreq updates\n use_dropout=False,\n reload_=False,\n verbose=False, # print verbose information for debug but slow speed\n delay1=3,\n delay2=7,\n types='title',\n cut_word=False,\n cut_sentence=False,\n datasets=[],\n valid_datasets=[],\n test_datasets=[],\n dictionary=[],\n kb_dicts=[],\n embedding='', # pretrain embedding file, such as word2vec, GLOVE\n dim_kb=5,\n RUN_NAME=\"histogram_visualization\",\n wait_N=10\n):\n logging.basicConfig(level=logging.DEBUG, format=\"%(asctime)s: %(name)s: %(levelname)s: %(message)s\",\n filename='./log_result.txt')\n # Model options\n model_options = locals().copy()\n # tf.set_random_seed(2345)\n with open(dictionary, 'rb') as f:\n worddicts = pkl.load(f)\n\n logger.info(\"Loading knowledge base ...\")\n\n # reload options\n if reload_ and os.path.exists(saveto):\n logger.info(\"Reload options\")\n with open('%s.pkl' % saveto, 'rb') as f:\n model_options = pkl.load(f)\n\n logger.debug(pprint.pformat(model_options))\n\n logger.info(\"Loading data\")\n train = TextIterator(datasets[0], datasets[1],\n dict=dictionary,\n delay1=delay1,\n delay2=delay2,\n types=types,\n n_words=n_words,\n batch_size=batch_size,\n cut_word=cut_word,\n cut_sentence=cut_sentence,\n shuffle=True)\n train_valid = TextIterator(datasets[0], datasets[1],\n dict=dictionary,\n delay1=delay1,\n delay2=delay2,\n types=types,\n n_words=n_words,\n batch_size=valid_batch_size,\n cut_word=cut_word,\n cut_sentence=cut_sentence,\n shuffle=False)\n valid = TextIterator(valid_datasets[0], valid_datasets[1],\n dict=dictionary,\n delay1=delay1,\n delay2=delay2,\n types=types,\n n_words=n_words,\n batch_size=valid_batch_size,\n cut_word=cut_word,\n cut_sentence=cut_sentence,\n shuffle=False)\n test = TextIterator(test_datasets[0], test_datasets[1],\n dict=dictionary,\n delay1=delay1,\n delay2=delay2,\n types=types,\n n_words=n_words,\n batch_size=valid_batch_size,\n cut_word=cut_word,\n cut_sentence=cut_sentence,\n shuffle=False)\n\n # Initialize (or reload) the parameters using 'model_options'\n # then build the tensorflow graph\n logger.info(\"init_word_embedding\")\n params = init_params(model_options, worddicts)\n embedding = word_embedding(model_options, params)\n is_training, cost, x, x_mask, y, n_timesteps, pred, summary = build_model(embedding, model_options)\n lr = tf.Variable(0.0, trainable=False)\n\n def assign_lr(session, lr_value):\n session.run(tf.assign(lr, lr_value))\n\n logger.info('Building optimizers...')\n optimizer = tf.train.AdamOptimizer(learning_rate=lr)\n logger.info('Done')\n # print all variables\n tvars = tf.trainable_variables()\n for var in tvars:\n print(var.name, var.shape)\n lossL2 = tf.add_n([tf.nn.l2_loss(v) for v in tvars if ('embeddings' or 'bias') not in v.name]) * 0.0001 #\n cost = cost + lossL2\n # regularization_cost = 0.0003 * tf.reduce_sum([tf.nn.l2_loss(v) for v in tvars])\n # cost = cost + regularization_cost\n grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), model_options['clip_c'])\n train_op = optimizer.apply_gradients(zip(grads, tvars))\n # train_op = optimizer.minimize(cost)\n op_loss = tf.reduce_mean(cost)\n logger.info(\"correct_pred\")\n correct_pred = tf.equal(tf.argmax(input=pred, axis=1), y) # make prediction\n logger.info(\"Done\")\n\n temp_accuracy = tf.cast(correct_pred, tf.float32) # change to float32\n\n logger.info(\"init variables\")\n init = tf.global_variables_initializer()\n logger.info(\"Done\")\n # saver\n saver = tf.train.Saver(max_to_keep=15)\n\n config = tf.ConfigProto()\n # config.gpu_options.per_process_gpu_memory_fraction = 0.4\n config.gpu_options.allow_growth = True\n with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)) as sess:\n training_writer = tf.summary.FileWriter(\"./logs/{}/training\".format(RUN_NAME), sess.graph)\n validate_writer = tf.summary.FileWriter(\"./logs/{}/validate\".format(RUN_NAME), sess.graph)\n testing_writer = tf.summary.FileWriter(\"./logs/{}/testing\".format(RUN_NAME), sess.graph)\n sess.run(init)\n history_errs = []\n # reload history\n if reload_ and os.path.exists(saveto):\n logger.info(\"Reload history error\")\n history_errs = list(numpy.load(saveto)['history_errs'])\n\n bad_counter = 0\n\n if validFreq == -1:\n validFreq = len(train[0]) / batch_size\n if saveFreq == -1:\n saveFreq = len(train[0]) / batch_size\n\n uidx = 0\n estop = False\n valid_acc_record = []\n test_acc_record = []\n best_num = -1\n best_epoch_num = 0\n lr_change_list = []\n wait_counter = 0\n wait_N = model_options['wait_N']\n learning_rate = model_options['lrate']\n assign_lr(sess, learning_rate)\n for eidx in range(max_epochs):\n n_samples = 0\n for x, x_d1, x_d2, y in train:\n n_samples += len(x)\n uidx += 1\n keep_prob = 0.5\n is_training = True\n data_x, data_x_mask, data_x_d1, data_x_d1_mask, data_x_d2, data_x_d2_mask, data_y = prepare_data(x,\n x_d1,\n x_d2,\n y,\n model_options,\n maxlen=maxlen)\n print(data_x.shape, data_x_mask.shape, data_y.shape)\n assert data_y.shape[0] == data_x.shape[1], 'Size does not match'\n if x is None:\n logger.debug('Minibatch with zero sample under length {0}'.format(maxlen))\n uidx -= 1\n continue\n ud_start = time.time()\n _, loss = sess.run([train_op, op_loss],\n feed_dict={'input/x:0': data_x, 'input/x_mask:0': data_x_mask, 'input/y:0': data_y,\n 'input/keep_prob:0': keep_prob, 'input/is_training:0': is_training})\n ud = time.time() - ud_start\n '''train_summary = sess.run(summary, feed_dict={'input/x:0': data_x, 'input/x_mask:0': data_x_mask,\n 'input/y:0': data_y,'input/keep_prob:0':keep_prob,'input/is_training:0':is_training})\n training_writer.add_summary(train_summary, eidx)'''\n if numpy.mod(uidx, dispFreq) == 0:\n logger.debug('Epoch {0} Update {1} Cost {2} TIME {3}'.format(eidx, uidx, loss, ud))\n\n # validate model on validation set and early stop if necessary\n if numpy.mod(uidx, validFreq) == 0:\n keep_prob = 1\n is_training = False\n\n valid_acc, valid_loss = predict_pro_acc(sess, cost, prepare_data, model_options, valid, maxlen,\n correct_pred, pred, summary, eidx, is_training,\n validate_writer)\n test_acc, test_loss = predict_pro_acc(sess, cost, prepare_data, model_options, test, maxlen,\n correct_pred, pred, summary, eidx, is_training,\n testing_writer)\n valid_err = 1.0 - valid_acc\n # valid_err = valid_loss\n history_errs.append(valid_err)\n\n logger.debug('Epoch {0}'.format(eidx))\n logger.debug('Valid cost {0}'.format(valid_loss))\n logger.debug('Valid accuracy {0}'.format(valid_acc))\n logger.debug('Test cost {0}'.format(test_loss))\n logger.debug('Test accuracy {0}'.format(test_acc))\n logger.debug('learning_rate: {0}'.format(learning_rate))\n\n valid_acc_record.append(valid_acc)\n test_acc_record.append(test_acc)\n if uidx == 0 and valid_err <= numpy.array(history_errs).min():\n best_num = best_num + 1\n best_epoch_num = eidx\n wait_counter = 0\n logger.info(\"Saving...\")\n saver.save(sess, _s(_s(_s(save_model, \"epoch\"), str(best_num)), \"model.ckpt\"))\n logger.info(_s(_s(_s(save_model, \"epoch\"), str(best_num)), \"model.ckpt\"))\n numpy.savez(saveto, history_errs=history_errs, **params)\n pkl.dump(model_options, open('{}.pkl'.format(saveto), 'wb'))\n logger.info(\"Done\")\n\n if valid_err > numpy.array(history_errs).min():\n wait_counter += 1\n # wait_counter +=1 if valid_err>numpy.array(history_errs).min() else 0\n if wait_counter >= wait_N:\n logger.info(\"wait_counter max, need to half the lr\")\n # print 'wait_counter max, need to half the lr'\n bad_counter += 1\n wait_counter = 0\n logger.debug('bad_counter: {0}'.format(bad_counter))\n # TODO change the learining rate\n learning_rate = learning_rate * 0.5\n # learning_rate = learning_rate\n assign_lr(sess, learning_rate)\n lr_change_list.append(eidx)\n logger.debug('lrate change to: {0}'.format(learning_rate))\n # print 'lrate change to: ' + str(lrate)\n\n if bad_counter > patience:\n logger.info(\"Early Stop!\")\n estop = True\n break\n\n if numpy.isnan(valid_err):\n pdb.set_trace()\n\n # finish after this many updates\n if uidx >= finish_after:\n logger.debug('Finishing after iterations! {0}'.format(uidx))\n # print 'Finishing after %d iterations!' % uidx\n estop = True\n break\n logger.debug('Seen samples: {0}'.format(n_samples))\n # print 'Seen %d samples' % n_samples\n\n if estop:\n break\n\n with tf.Session() as sess:\n # Restore variables from disk.\n saver.restore(sess, _s(_s(_s(save_model, \"epoch\"), str(best_num)), \"model.ckpt\"))\n keep_prob = 1\n is_training = False\n logger.info('=' * 80)\n logger.info('Final Result')\n logger.info('=' * 80)\n logger.debug('best epoch {0}'.format(best_epoch_num))\n\n valid_acc, valid_cost = predict_pro_acc(sess, cost, prepare_data, model_options, valid,\n maxlen, correct_pred, pred, summary, eidx, is_training, None)\n logger.debug('Valid cost {0}'.format(valid_cost))\n logger.debug('Valid accuracy {0}'.format(valid_acc))\n # print 'Valid cost', valid_cost\n # print 'Valid accuracy', valid_acc\n\n test_acc, test_cost = predict_pro_acc(sess, cost, prepare_data, model_options, test,\n maxlen, correct_pred, pred, summary, eidx, is_training, None)\n logger.debug('Test cost {0}'.format(test_cost))\n logger.debug('Test accuracy {0}'.format(test_acc))\n\n # print 'best epoch ', best_epoch_num\n train_acc, train_cost = predict_pro_acc(sess, cost, prepare_data, model_options, train_valid,\n maxlen, correct_pred, pred, summary, eidx, is_training, None)\n logger.debug('Train cost {0}'.format(train_cost))\n logger.debug('Train accuracy {0}'.format(train_acc))\n # print 'Train cost', train_cost\n # print 'Train accuracy', train_acc\n\n # print 'Test cost ', test_cost\n # print 'Test accuracy ', test_acc\n\n return None\n\n\nif __name__ == '__main__':\n pass\n" ]
[ [ "numpy.split", "pandas.merge", "pandas.DataFrame", "pandas.read_table", "pandas.read_pickle" ], [ "tensorflow.cond", "tensorflow.device", "tensorflow.sign", "tensorflow.concat", "numpy.savez", "tensorflow.reduce_sum", "tensorflow.stack", "tensorflow.cast", "tensorflow.equal", "tensorflow.nn.bidirectional_dynamic_rnn", "tensorflow.orthogonal_initializer", "numpy.max", "tensorflow.nn.l2_loss", "tensorflow.map_fn", "numpy.random.randn", "tensorflow.train.AdamOptimizer", "numpy.linalg.svd", "tensorflow.while_loop", "tensorflow.Variable", "tensorflow.gradients", "tensorflow.ConfigProto", "tensorflow.Session", "tensorflow.trainable_variables", "numpy.load", "tensorflow.train.Saver", "tensorflow.argmax", "numpy.zeros", "tensorflow.random_normal_initializer", "tensorflow.tensordot", "tensorflow.nn.dropout", "tensorflow.shape", "tensorflow.TensorArray", "numpy.mod", "numpy.isnan", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.summary.merge_all", "tensorflow.one_hot", "numpy.array", "tensorflow.nn.embedding_lookup", "tensorflow.summary.histogram", "tensorflow.nn.softmax", "tensorflow.constant", "tensorflow.transpose", "tensorflow.reduce_mean", "tensorflow.contrib.rnn.DropoutWrapper", "tensorflow.assign", "tensorflow.expand_dims", "tensorflow.constant_initializer", "tensorflow.nn.softmax_cross_entropy_with_logits_v2", "tensorflow.variable_scope" ] ]
zye1996/3DSSD
[ "036983e282cd13e6a5bf0b51ff6ad31639a75b07", "036983e282cd13e6a5bf0b51ff6ad31639a75b07" ]
[ "lib/builder/postprocessor.py", "lib/utils/tf_ops/evaluation/tf_evaluate_op_test.py" ]
[ "import numpy as np\nimport tensorflow as tf\n\nfrom lib.core.config import cfg\nfrom lib.utils.anchors_util import project_to_bev\nfrom lib.utils.box_3d_utils import box_3d_to_anchor\n\nimport lib.dataset.maps_dict as maps_dict\n\nclass PostProcessor:\n def __init__(self, stage, cls_num):\n if stage == 0:\n self.postprocessor_cfg = cfg.MODEL.FIRST_STAGE\n elif stage == 1:\n self.postprocessor_cfg = cfg.MODEL.SECOND_STAGE\n else: raise Exception('Not Implementation Error')\n\n self.max_output_size = self.postprocessor_cfg.MAX_OUTPUT_NUM\n self.nms_threshold = self.postprocessor_cfg.NMS_THRESH\n\n self.cls_num = cls_num\n \n \n def class_unaware_format(self, pred_anchors_3d, pred_score):\n \"\"\" (for rpn propose)\n Change prediction format from class-aware-format to class-ignorance-format\n pred_anchors_3d: [bs, points_num, 1/cls_num, 7]\n pred_score: [bs, points_num, cls_num]\n\n return: pred_anchors_3d: [bs, points_num, 1, 7]\n pred_score: [bs, points_num, 1]\n \"\"\" \n unaware_pred_score = tf.reduce_max(pred_score, axis=-1, keepdims=True)\n cls_num = pred_anchors_3d.get_shape().as_list()[2]\n if cls_num == 1:\n return pred_anchors_3d, unaware_pred_score\n\n # class-aware in boundingbox prediction\n pred_cls = tf.argmax(pred_score, axis=-1)\n pred_cls_onehot = tf.cast(tf.one_hot(pred_cls, depth=cls_num, on_value=1, off_value=0, axis=-1), tf.float32)\n # bs, pts_num, cls_num, 7\n unaware_pred_anchors_3d = pred_anchors_3d * tf.expand_dims(pred_cls_onehot, axis=-1)\n unaware_pred_anchors_3d = tf.reduce_sum(unaware_pred_anchors_3d, axis=2, keepdims=True)\n return unaware_pred_anchors_3d, unaware_pred_score\n\n \n\n\n def forward(self, pred_anchors_3d, pred_score, output_dict, pred_attribute=None, pred_velocity=None):\n \"\"\"\n pred_anchors_3d: [bs, points_num, 1/cls_num, 7]\n pred_score: [bs, points_num, cls_num]\n pred_attribute: [bs, points_num, 1/cls_num, 8]\n pred_velocity: [bs, points_num, 1/cls_num, 2]\n \"\"\"\n cls_num = pred_score.get_shape().as_list()[-1] \n if cls_num != self.cls_num: # format predictions to class-unaware predictions\n assert pred_attribute == None and pred_velocity == None, 'Not support the predictions of attribute and velocity in RPN phase'\n pred_anchors_3d, pred_score = self.class_unaware_format(pred_anchors_3d, pred_score)\n\n pred_anchors_3d_list = tf.unstack(pred_anchors_3d, axis=0)\n pred_scores_list = tf.unstack(pred_score, axis=0)\n\n pred_3d_bbox_list = []\n pred_3d_cls_score_list = []\n pred_3d_cls_cat_list = []\n pred_attribute_list = []\n pred_velocity_list = []\n for batch_idx, pred_anchors_3d, pred_scores in zip(range(len(pred_anchors_3d_list)), pred_anchors_3d_list, pred_scores_list):\n cur_pred_3d_bbox_list = []\n cur_pred_3d_cls_score_list = []\n cur_pred_3d_cls_cat_list = []\n cur_pred_attribute_list = []\n cur_pred_velocity_list = []\n\n for i in range(self.cls_num):\n reg_i = min(i, pred_anchors_3d.get_shape().as_list()[1] - 1)\n cur_pred_anchors_3d = pred_anchors_3d[:, reg_i, :] \n\n cur_pred_anchors = box_3d_to_anchor(cur_pred_anchors_3d) \n cur_pred_anchors_bev = project_to_bev(cur_pred_anchors) # [-1, 4]\n\n cur_cls_score = pred_scores[:, i]\n nms_index = tf.image.non_max_suppression(cur_pred_anchors_bev, cur_cls_score, max_output_size=self.max_output_size, iou_threshold=self.nms_threshold)\n \n cur_pred_3d_bbox_list.append(tf.gather(cur_pred_anchors_3d, nms_index)) \n cur_pred_3d_cls_score_list.append(tf.gather(cur_cls_score, nms_index))\n cur_pred_3d_cls_cat_list.append(tf.cast(tf.ones_like(nms_index), tf.int32) * i)\n\n if pred_attribute is not None:\n cur_pred_attribute_list.append(tf.gather(pred_attribute[batch_idx, :, reg_i, :], nms_index))\n if pred_velocity is not None:\n cur_pred_velocity_list.append(tf.gather(pred_velocity[batch_idx, :, reg_i, :], nms_index))\n\n cur_pred_3d_bbox_list = tf.concat(cur_pred_3d_bbox_list, axis=0)\n cur_pred_3d_cls_score_list = tf.concat(cur_pred_3d_cls_score_list, axis=0)\n cur_pred_3d_cls_cat_list = tf.concat(cur_pred_3d_cls_cat_list, axis=0)\n\n pred_3d_bbox_list.append(cur_pred_3d_bbox_list)\n pred_3d_cls_score_list.append(cur_pred_3d_cls_score_list)\n pred_3d_cls_cat_list.append(cur_pred_3d_cls_cat_list)\n\n if pred_attribute is not None:\n cur_pred_attribute_list = tf.concat(cur_pred_attribute_list, axis=0)\n pred_attribute_list.append(cur_pred_attribute_list)\n\n if pred_velocity is not None:\n cur_pred_velocity_list = tf.concat(cur_pred_velocity_list, axis=0)\n pred_velocity_list.append(cur_pred_velocity_list)\n\n pred_3d_bbox_list = tf.stack(pred_3d_bbox_list, axis=0)\n pred_3d_cls_score_list = tf.stack(pred_3d_cls_score_list, axis=0)\n pred_3d_cls_cat_list = tf.stack(pred_3d_cls_cat_list, axis=0)\n \n output_dict[maps_dict.PRED_3D_BBOX].append(pred_3d_bbox_list)\n output_dict[maps_dict.PRED_3D_SCORE].append(pred_3d_cls_score_list)\n output_dict[maps_dict.PRED_3D_CLS_CATEGORY].append(pred_3d_cls_cat_list)\n if pred_attribute is not None:\n output_dict[maps_dict.PRED_3D_ATTRIBUTE].append(tf.stack(pred_attribute_list, axis=0))\n if pred_velocity is not None:\n output_dict[maps_dict.PRED_3D_VELOCITY].append(tf.stack(pred_velocity_list, axis=0))\n\n return output_dict\n", "import tensorflow as tf\nimport numpy as np\nfrom tf_evaluate import evaluate\n\nclass GroupPointTest(tf.test.TestCase):\n def test(self):\n with self.test_session() as sess:\n dets = tf.constant([\n [2, 645.60, 167.95, 680.98, 198.93, -1.65, 4.59, 1.32, 45.84, 1.86, 0.60, 2.02, -1.55, 0.80],\n [0, 387.63, 181.54, 423.81, 203.12, 1.85, -16.53, 2.39, 58.49, 1.67, 1.87, 3.69, 1.57, 0.99], \n [1, 712.40, 143.00, 810.73, 307.92, -0.20, 1.84, 1.47, 8.41, 1.89, 0.48, 1.20, 0.01, 0.70], \n [0, 614.24, 181.78, 727.31, 284.77, 1.55, 1.00, 1.75, 13.22, 1.57, 1.73, 4.15, 1.62, 0.99],\n ])\n names = tf.constant(['/root/kitti_native_evaluation/gtfiles/000001.txt', '/root/kitti_native_evaluation/gtfiles/000000.txt', '/root/kitti_native_evaluation/gtfiles/000003.txt'])\n numlist = tf.constant([2, 1, 1])\n outs = evaluate(dets, names, numlist) \n print(outs)\n pi, ai, pg, ag, p3, a3 = sess.run(outs)\n print(pi)\n print(pi.shape)\n print(pg)\n print(pg.shape)\n print(p3)\n print(p3.shape)\n\n def test_grad(self):\n pass\n\nif __name__=='__main__':\n tf.test.main() \n" ]
[ [ "tensorflow.reduce_max", "tensorflow.concat", "tensorflow.unstack", "tensorflow.reduce_sum", "tensorflow.stack", "tensorflow.image.non_max_suppression", "tensorflow.ones_like", "tensorflow.expand_dims", "tensorflow.gather", "tensorflow.one_hot", "tensorflow.argmax" ], [ "tensorflow.constant", "tensorflow.test.main" ] ]
wix-playground/incubator-tvm
[ "99734d29b77ef88fe81b0fd0cb2b71db8dc2608e", "99734d29b77ef88fe81b0fd0cb2b71db8dc2608e" ]
[ "tests/python/relay/test_op_level5.py", "python/tvm/relay/testing/tf.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\" Support level5 operator test cases.\n\"\"\"\nimport math\nimport numpy as np\nimport tvm\nfrom tvm import relay\nfrom tvm.relay import transform\nfrom tvm.relay.testing import ctx_list\nimport topi.testing\n\ndef run_infer_type(expr):\n mod = relay.Module.from_expr(expr)\n mod = transform.InferType()(mod)\n entry = mod[\"main\"]\n return entry if isinstance(expr, relay.Function) else entry.body\n\ndef test_resize_infer_type():\n n, c, h, w = tvm.var(\"n\"), tvm.var(\"c\"), tvm.var(\"h\"), tvm.var(\"w\")\n x = relay.var(\"x\", relay.TensorType((n, c, h, w), \"int8\"))\n th, tw = tvm.var(\"th\"), tvm.var(\"tw\")\n z = relay.image.resize(x, (th, tw))\n zz = run_infer_type(z)\n assert zz.checked_type == relay.TensorType((n, c, th, tw), \"int8\")\n\n x = relay.var(\"x\", relay.TensorType((n, c, h, w), \"int8\"))\n z= relay.image.resize(x, (100, 200), \"NCHW\", \"bilinear\", True)\n assert \"size=\" in z.astext()\n zz = run_infer_type(z)\n assert zz.checked_type == relay.TensorType((n, c, 100, 200), \"int8\")\n\ndef test_resize():\n def verify_resize(dshape, scale, method, layout):\n if layout == \"NHWC\":\n size = (dshape[1] * scale, dshape[2] * scale)\n else:\n size = (dshape[2] * scale, dshape[3] * scale)\n\n x_data = np.random.uniform(size=dshape).astype(\"float32\")\n if method == \"bilinear\":\n ref_res = topi.testing.bilinear_resize_python(x_data, size, layout)\n else:\n ref_res = topi.testing.upsampling_python(x_data, (scale, scale), layout)\n x = relay.var(\"x\", relay.TensorType(dshape, \"float32\"))\n z = relay.image.resize(x, size, layout, method, True)\n assert \"size=\" in z.astext()\n zz = run_infer_type(z)\n assert zz.checked_type == relay.TensorType(ref_res.shape, \"float32\")\n func = relay.Function([x], z)\n\n for target, ctx in ctx_list():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, ctx=ctx, target=target)\n op_res = intrp.evaluate(func)(x_data)\n tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-4)\n for method in [\"bilinear\", \"nearest_neighbor\"]:\n for layout in [\"NHWC\", \"NCHW\"]:\n verify_resize((1, 4, 4, 4), 2, method, layout)\n\n\ndef test_multibox_prior():\n def get_ref_result(dshape, sizes=(1.0,),\n ratios=(1.0,), steps=(-1.0, -1.0),\n offsets=(0.5, 0.5), clip=True):\n in_height = dshape[2]\n in_width = dshape[3]\n num_sizes = len(sizes)\n num_ratios = len(ratios)\n size_ratio_concat = sizes + ratios\n steps_h = steps[0] if steps[0] > 0 else 1.0 / in_height\n steps_w = steps[1] if steps[1] > 0 else 1.0 / in_width\n offset_h = offsets[0]\n offset_w = offsets[1]\n\n oshape = (1, in_height * in_width * (num_sizes + num_ratios - 1), 4)\n dtype = \"float32\"\n np_out = np.zeros(oshape).astype(dtype)\n\n for i in range(in_height):\n center_h = (i + offset_h) * steps_h\n for j in range(in_width):\n center_w = (j + offset_w) * steps_w\n for k in range(num_sizes + num_ratios - 1):\n w = size_ratio_concat[k] * in_height / in_width / 2.0 if k < num_sizes else \\\n size_ratio_concat[0] * in_height / in_width * math.sqrt(size_ratio_concat[k + 1]) / 2.0\n h = size_ratio_concat[k] / 2.0 if k < num_sizes else \\\n size_ratio_concat[0] / math.sqrt(size_ratio_concat[k + 1]) / 2.0\n count = i * in_width * (num_sizes + num_ratios - 1) + j * (num_sizes + num_ratios - 1) + k\n np_out[0][count][0] = center_w - w\n np_out[0][count][1] = center_h - h\n np_out[0][count][2] = center_w + w\n np_out[0][count][3] = center_h + h\n if clip:\n np_out = np.clip(np_out, 0, 1)\n\n return np_out\n\n def verify_multibox_prior(x, dshape, ref_res, sizes=(1.0,),\n ratios=(1.0,), steps=(-1.0, -1.0),\n offsets=(0.5, 0.5), clip=True, check_size=False,\n check_type_only=False):\n\n z = relay.vision.multibox_prior(x, sizes, ratios, steps, offsets, clip)\n zz = run_infer_type(z)\n if check_size:\n assert \"sizes=\" in z.astext()\n assert zz.checked_type == relay.TensorType(\n (1, dshape[2] * dshape[3] * (len(sizes) + len(ratios) - 1), 4),\n \"float32\")\n\n if check_type_only:\n return\n\n data = np.random.uniform(low=-1, high=1, size=dshape).astype(\"float32\")\n func = relay.Function([x], z)\n func = run_infer_type(func)\n for target, ctx in ctx_list():\n intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target)\n op_res1 = intrp1.evaluate(func)(data)\n tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)\n intrp2 = relay.create_executor(\"debug\", ctx=ctx, target=target)\n op_res2 = intrp2.evaluate(func)(data)\n tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)\n\n sizes = (0.3, 1.5, 0.7)\n ratios = (1.3, 2.4)\n steps = (2.0, 1.5)\n offsets = (0.2, 0.3)\n dshape = (1, 3, 56, 56)\n ref_res = get_ref_result(dshape, sizes, ratios, steps, offsets)\n x = relay.var(\"x\", relay.TensorType(dshape, \"float32\"))\n verify_multibox_prior(x, dshape, ref_res, sizes, ratios, steps, offsets,\n check_size=True)\n y = relay.var(\"y\", relay.TensorType((tvm.var(\"n\"), 3, 56, 56), \"float32\"))\n verify_multibox_prior(x, dshape, ref_res, sizes, ratios, steps, offsets,\n check_size=True, check_type_only=True)\n\n dshape = (1, 24, 32, 32)\n ref_res = get_ref_result(dshape, clip=False)\n x = relay.var(\"x\", relay.TensorType(dshape, \"float32\"))\n verify_multibox_prior(x, dshape, ref_res, clip=False)\n y = relay.var(\"y\", relay.TensorType((tvm.var(\"n\"), 24, 32, 32), \"float32\"))\n verify_multibox_prior(x, dshape, ref_res, clip=False, check_type_only=True)\n\n\ndef test_get_valid_counts():\n def verify_get_valid_counts(dshape, score_threshold, id_index, score_index):\n dtype = \"float32\"\n batch_size, num_anchor, elem_length = dshape\n np_data = np.random.uniform(low=-2, high=2, size=dshape).astype(dtype)\n np_out1 = np.zeros(shape=(batch_size,))\n np_out2 = np.zeros(shape=dshape).astype(dtype)\n for i in range(batch_size):\n np_out1[i] = 0\n inter_idx = 0\n for j in range(num_anchor):\n score = np_data[i, j, score_index]\n if score > score_threshold and (id_index < 0 or np_data[i, j, id_index] >= 0):\n for k in range(elem_length):\n np_out2[i, inter_idx, k] = np_data[i, j, k]\n np_out1[i] += 1\n inter_idx += 1\n if j >= np_out1[i]:\n for k in range(elem_length):\n np_out2[i, j, k] = -1.0\n\n x = relay.var(\"x\", relay.ty.TensorType(dshape, dtype))\n z = relay.vision.get_valid_counts(x, score_threshold, id_index, score_index)\n assert \"score_threshold\" in z.astext()\n func = relay.Function([x], z.astuple())\n func = run_infer_type(func)\n for target, ctx in ctx_list():\n if target == 'cuda':\n return\n intrp = relay.create_executor(\"debug\", ctx=ctx, target=target)\n out = intrp.evaluate(func)(np_data)\n tvm.testing.assert_allclose(out[0].asnumpy(), np_out1, rtol=1e-3, atol=1e-04)\n tvm.testing.assert_allclose(out[1].asnumpy(), np_out2, rtol=1e-3, atol=1e-04)\n\n verify_get_valid_counts((1, 2500, 6), 0, 0, 1)\n verify_get_valid_counts((1, 2500, 5), -1, -1, 0)\n verify_get_valid_counts((3, 1000, 6), 0.55, 1, 0)\n verify_get_valid_counts((16, 500, 5), 0.95, -1, 0)\n\n\ndef test_non_max_suppression():\n def verify_nms(x0_data, x1_data, dshape, ref_res, ref_indices_res,\n iou_threshold=0.5, force_suppress=False, top_k=-1,\n check_type_only=False):\n x0 = relay.var(\"x0\", relay.ty.TensorType(dshape, \"float32\"))\n x1 = relay.var(\"x1\", relay.ty.TensorType((dshape[0],), \"int32\"))\n z = relay.vision.non_max_suppression(x0, x1, max_output_size = -1, \\\n iou_threshold = iou_threshold, force_suppress = force_suppress, \\\n top_k = top_k, return_indices=False)\n z_indices = relay.vision.non_max_suppression(x0, x1, max_output_size = -1, \\\n iou_threshold = iou_threshold, force_suppress = force_suppress, \\\n top_k = top_k)\n assert \"iou_threshold\" in z.astext()\n assert \"iou_threshold\" in z_indices.astext()\n zz = run_infer_type(z)\n zz_indices = run_infer_type(z_indices)\n assert zz.checked_type == relay.ty.TensorType(dshape, \"float32\")\n assert zz_indices.checked_type == relay.ty.TensorType((dshape[0], dshape[1]), \"int32\")\n\n if check_type_only:\n return\n\n func = relay.Function([x0, x1], z)\n func = run_infer_type(func)\n func_indices = relay.Function([x0, x1], z_indices)\n func_indices = run_infer_type(func_indices)\n for target, ctx in ctx_list():\n intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target)\n op_res1 = intrp1.evaluate(func)(x0_data, x1_data)\n op_indices_res1 = intrp1.evaluate(func_indices)(x0_data, x1_data)\n tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)\n tvm.testing.assert_allclose(op_indices_res1.asnumpy(), ref_indices_res, rtol=1e-5)\n intrp2 = relay.create_executor(\"debug\", ctx=ctx, target=target)\n op_res2 = intrp2.evaluate(func)(x0_data, x1_data)\n op_indices_res2 = intrp2.evaluate(func_indices)(x0_data, x1_data)\n tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)\n tvm.testing.assert_allclose(op_indices_res2.asnumpy(), ref_indices_res, rtol=1e-5)\n\n np_data = np.array([[[0, 0.8, 1, 20, 25, 45], [1, 0.7, 30, 60, 50, 80],\n [0, 0.4, 4, 21, 19, 40], [2, 0.9, 35, 61, 52, 79],\n [1, 0.5, 100, 60, 70, 110]]]).astype(\"float32\")\n np_valid_count = np.array([4]).astype(\"int32\")\n np_result = np.array([[[2, 0.9, 35, 61, 52, 79], [0, 0.8, 1, 20, 25, 45],\n [-1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1]]])\n np_indices_result = np.array([[3, 0, -1, -1, -1]])\n num_anchors = 5\n\n dshape = (tvm.var(\"n\"), num_anchors, 6)\n verify_nms(np_data, np_valid_count, dshape, np_result, np_indices_result,\n force_suppress=True, top_k=2, check_type_only=True)\n dshape = (1, num_anchors, 6)\n verify_nms(np_data, np_valid_count, dshape, np_result, np_indices_result,\n force_suppress=True, top_k=2, check_type_only=False)\n\n np_result = np.array([[[2, 0.9, 35, 61, 52, 79], [0, 0.8, 1, 20, 25, 45],\n [1, 0.7, 30, 60, 50, 80], [-1, -1, -1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1]]])\n np_indices_result = np.array([[3, 0, 1, -1, -1]])\n dshape = (tvm.var(\"n\"), num_anchors, 6)\n verify_nms(np_data, np_valid_count, dshape, np_result,\n np_indices_result, check_type_only=True)\n dshape = (1, num_anchors, 6)\n verify_nms(np_data, np_valid_count, dshape, np_result,\n np_indices_result, top_k=3)\n\n\ndef test_multibox_transform_loc():\n def test_default_value():\n num_anchors = 3\n num_classes = 3\n\n np_cls_prob = np.array(\n [[[0.2, 0.5, 0.3], [0.25, 0.3, 0.45],\n [0.7, 0.1, 0.2]]]).astype(\"float32\")\n np_loc_preds = np.array(\n [[0.1, -0.2, 0.3, 0.2, 0.2, 0.4, 0.5, -0.3, 0.7, -0.2, -0.4,\n -0.8]]).astype(\"float32\")\n np_anchors = np.array(\n [[[-0.1, -0.1, 0.1, 0.1], [-0.2, -0.2, 0.2, 0.2],\n [1.2, 1.2, 1.5, 1.5]]]).astype(\"float32\")\n\n expected_np_out = np.array([[[1, 0.69999999, 0, 0, 0.10818365, 0.10008108],\n [0, 0.44999999, 1, 1, 1, 1],\n [0, 0.30000001, 0, 0, 0.22903419, 0.20435292]]])\n\n\n cls_prob = relay.var(\n \"cls_prob\",\n relay.ty.TensorType((1, num_anchors, num_classes), \"float32\"))\n loc_pred = relay.var(\n \"loc_pred\", relay.ty.TensorType((1, num_anchors * 4), \"float32\"))\n anchors = relay.var(\n \"anchors\", relay.ty.TensorType((1, num_anchors, 4), \"float32\"))\n\n mtl = relay.vision.multibox_transform_loc(\n cls_prob=cls_prob, loc_pred=loc_pred, anchor=anchors)\n ret = run_infer_type(mtl.astuple())\n ref_type = relay.ty.TupleType(\n tvm.convert([\n relay.ty.TensorType((1, num_anchors, 6), \"float32\"),\n relay.ty.TensorType((1, ), \"int\")\n ]))\n\n assert ret.checked_type == ref_type\n\n nms = relay.vision.non_max_suppression(mtl[0], mtl[1], return_indices=False)\n func = relay.Function([cls_prob, loc_pred, anchors], nms)\n func = run_infer_type(func)\n for target, ctx in ctx_list():\n intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target)\n op_res1 = intrp1.evaluate(func)(np_cls_prob, np_loc_preds,\n np_anchors)\n tvm.testing.assert_allclose(op_res1.asnumpy(), expected_np_out, rtol=1e-5)\n intrp2 = relay.create_executor(\"debug\", ctx=ctx, target=target)\n op_res2 = intrp2.evaluate(func)(np_cls_prob, np_loc_preds,\n np_anchors)\n tvm.testing.assert_allclose(op_res2.asnumpy(), expected_np_out, rtol=1e-5)\n\n def test_threshold():\n num_anchors = 5\n num_classes = 5\n n = tvm.var(\"n\")\n cls_prob = relay.var(\n \"cls_prob\",\n relay.ty.TensorType((n, num_anchors, num_classes), \"float32\"))\n loc_pred = relay.var(\n \"loc_pred\", relay.ty.TensorType((n, num_anchors * 4), \"float32\"))\n anchors = relay.var(\n \"anchors\", relay.ty.TensorType((1, num_anchors, 4), \"float32\"))\n threshold = 0.02\n variances = (0.2, 0.2, 0.3, 0.3)\n\n ret = relay.vision.multibox_transform_loc(\n cls_prob=cls_prob,\n loc_pred=loc_pred,\n anchor=anchors,\n threshold=threshold,\n variances=variances)\n ret = run_infer_type(ret.astuple())\n ref_type = relay.ty.TupleType(\n tvm.convert([\n relay.ty.TensorType((n, num_anchors, 6), \"float32\"),\n relay.ty.TensorType((n, ), \"int\")\n ]))\n assert ret.checked_type == ref_type\n\n test_default_value()\n test_threshold()\n\n\ndef test_roi_align():\n def verify_roi_align(data_shape, rois_shape, pooled_size, spatial_scale, sample_ratio):\n data = relay.var(\"data\", relay.ty.TensorType(data_shape, \"float32\"))\n rois = relay.var(\"rois\", relay.ty.TensorType(rois_shape, \"float32\"))\n z = relay.vision.roi_align(data, rois, pooled_size=(pooled_size, pooled_size),\n spatial_scale=spatial_scale, sample_ratio=sample_ratio,\n layout=\"NCHW\")\n zz = run_infer_type(z)\n batch, channel, in_size, _ = data_shape\n num_roi = rois_shape[0]\n assert zz.checked_type == relay.ty.TensorType(\n (num_roi, channel, pooled_size, pooled_size), \"float32\")\n\n func = relay.Function([data, rois], z)\n func = run_infer_type(func)\n np_data = np.random.uniform(size=data_shape).astype(\"float32\")\n np_rois = np.random.uniform(size=rois_shape).astype('float32') * in_size\n np_rois[:, 0] = np.random.randint(low = 0, high = batch, size = num_roi)\n ref_res = topi.testing.roi_align_nchw_python(np_data, np_rois, pooled_size=pooled_size,\n spatial_scale=spatial_scale,\n sample_ratio=sample_ratio)\n for target, ctx in ctx_list():\n intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target)\n op_res1 = intrp1.evaluate(func)(np_data, np_rois)\n tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-4)\n intrp2 = relay.create_executor(\"debug\", ctx=ctx, target=target)\n op_res2 = intrp2.evaluate(func)(np_data, np_rois)\n tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-4)\n\n verify_roi_align((1, 4, 16, 16), (32, 5), pooled_size=7, spatial_scale=1.0, sample_ratio=-1)\n verify_roi_align((4, 4, 16, 16), (32, 5), pooled_size=7, spatial_scale=0.5, sample_ratio=2)\n\n\ndef test_roi_pool():\n def verify_roi_pool(data_shape, rois_shape, pooled_size, spatial_scale):\n data = relay.var(\"data\", relay.ty.TensorType(data_shape, \"float32\"))\n rois = relay.var(\"rois\", relay.ty.TensorType(rois_shape, \"float32\"))\n z = relay.vision.roi_pool(data, rois, pooled_size=(pooled_size, pooled_size),\n spatial_scale=spatial_scale, layout=\"NCHW\")\n zz = run_infer_type(z)\n batch, channel, in_size, _ = data_shape\n num_roi = rois_shape[0]\n assert zz.checked_type == relay.ty.TensorType(\n (num_roi, channel, pooled_size, pooled_size), \"float32\")\n\n func = relay.Function([data, rois], z)\n func = run_infer_type(func)\n np_data = np.random.uniform(size=data_shape).astype(\"float32\")\n np_rois = np.random.uniform(size=rois_shape).astype('float32') * in_size\n np_rois[:, 0] = np.random.randint(low = 0, high = batch, size = num_roi).astype('float32')\n ref_res = topi.testing.roi_pool_nchw_python(np_data, np_rois, pooled_size=pooled_size,\n spatial_scale=spatial_scale)\n for target, ctx in ctx_list():\n intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target)\n op_res1 = intrp1.evaluate(func)(np_data, np_rois)\n tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-4)\n intrp2 = relay.create_executor(\"debug\", ctx=ctx, target=target)\n op_res2 = intrp2.evaluate(func)(np_data, np_rois)\n tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-4)\n\n verify_roi_pool((1, 4, 16, 16), (32, 5), pooled_size=7, spatial_scale=1.0)\n verify_roi_pool((4, 4, 16, 16), (32, 5), pooled_size=7, spatial_scale=0.5)\n\n\ndef test_proposal():\n def verify_proposal(np_cls_prob, np_bbox_pred, np_im_info, np_out, attrs):\n cls_prob = relay.var(\"cls_prob\", relay.ty.TensorType(np_cls_prob.shape, \"float32\"))\n bbox_pred = relay.var(\"bbox_pred\", relay.ty.TensorType(np_bbox_pred.shape, \"float32\"))\n im_info = relay.var(\"im_info\", relay.ty.TensorType(np_im_info.shape, \"float32\"))\n z = relay.vision.proposal(cls_prob, bbox_pred, im_info, **attrs)\n zz = run_infer_type(z)\n assert zz.checked_type == relay.ty.TensorType(np_out.shape, \"float32\")\n\n func = relay.Function([cls_prob, bbox_pred, im_info], z)\n func = run_infer_type(func)\n for target in ['llvm', 'cuda']:\n if not tvm.module.enabled(target):\n print(\"Skip test because %s is not enabled.\" % target)\n continue\n ctx = tvm.context(target, 0)\n intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target)\n op_res1 = intrp1.evaluate(func)(np_cls_prob, np_bbox_pred, np_im_info)\n tvm.testing.assert_allclose(op_res1.asnumpy(), np_out, rtol=1e-4)\n intrp2 = relay.create_executor(\"debug\", ctx=ctx, target=target)\n op_res2 = intrp2.evaluate(func)(np_cls_prob, np_bbox_pred, np_im_info)\n tvm.testing.assert_allclose(op_res2.asnumpy(), np_out, rtol=1e-4)\n\n attrs = {\n 'scales': (0.5,),\n 'ratios': (0.5,),\n 'feature_stride': 16,\n 'iou_loss': False,\n 'rpn_min_size': 16,\n 'threshold': 0.7,\n 'rpn_pre_nms_top_n': 200,\n 'rpn_post_nms_top_n': 4,\n }\n\n np_cls_prob = np.array([[\n [[0.3, 0.6, 0.2], [0.4, 0.7, 0.5], [0.1, 0.4, 0.3]],\n [[0.7, 0.5, 0.3], [0.6, 0.4, 0.8], [0.9, 0.2, 0.5]]\n ]], dtype='float32')\n np_bbox_pred = np.array([[\n [[0.5, 1.0, 0.6], [0.8, 1.2, 2.0], [0.9, 1.0, 0.8]],\n [[0.5, 1.0, 0.7], [0.8, 1.2, 1.6], [2.1, 1.5, 0.7]],\n [[1.0, 0.5, 0.7], [1.5, 0.9, 1.6], [1.4, 1.5, 0.8]],\n [[1.0, 0.5, 0.6], [1.5, 0.9, 2.0], [1.8, 1.0, 0.9]],\n ]], dtype='float32')\n np_im_info = np.array([[48., 48., 1.]], dtype='float32')\n np_out = np.array([\n [0., 0., 2.8451548,28.38012, 18.154846],\n [0., 0., 15.354933, 41.96971, 41.245064],\n [0., 18.019852, 1.0538368, 51.98015, 25.946163],\n [0., 27.320923, -1.266357, 55., 24.666357]\n ], dtype='float32')\n\n\n verify_proposal(np_cls_prob, np_bbox_pred, np_im_info, np_out, attrs)\n\n np_out = np.array([\n [ 0., -5.25, -2.5, 21.75, 19.],\n [ 0., 11.25, -2., 37.25, 18.5],\n [ 0., 26.849998, -2.3000002, 53.45, 18.6],\n [ 0., -4.95, 13.799999, 22.25, 35.5]\n ], dtype='float32')\n attrs['iou_loss'] = True\n verify_proposal(np_cls_prob, np_bbox_pred, np_im_info, np_out, attrs)\n\n\ndef test_yolo_reorg_infer_shape():\n def verify_yolo_reorg(shape, stride, out_shape):\n x = relay.var(\"x\", relay.TensorType(shape, \"float32\"))\n z = relay.vision.yolo_reorg(x, stride=stride)\n zz = run_infer_type(z)\n assert \"stride=\" in z.astext()\n assert zz.checked_type == relay.ty.TensorType(out_shape, \"float32\")\n\n n, c, h, w = tvm.var(\"n\"), tvm.var(\"c\"), tvm.var(\"h\"), tvm.var(\"w\")\n idxd = tvm.indexdiv\n verify_yolo_reorg((n, c, 20, 20), 10, (n, c*10*10, 2, 2))\n verify_yolo_reorg((n, c, h, w), 2, (n, c*2*2, idxd(h, 2), idxd(w, 2)))\n\ndef test_yolo_reorg():\n def verify_yolo_reorg(shape, stride):\n x_data = np.random.uniform(low=-1, high=1, size=shape).astype(\"float32\")\n ref_res = topi.testing.reorg_python(x_data, stride)\n\n x = relay.var(\"x\", relay.TensorType(shape, \"float32\"))\n z = relay.vision.yolo_reorg(x, stride=stride)\n zz = run_infer_type(z)\n assert \"stride=\" in z.astext()\n assert zz.checked_type == relay.ty.TensorType(ref_res.shape, \"float32\")\n\n func = relay.Function([x], z)\n\n for target, ctx in ctx_list():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, ctx=ctx, target=target)\n op_res = intrp.evaluate(func)(x_data)\n tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)\n\n verify_yolo_reorg((1, 100, 20, 20), 10)\n verify_yolo_reorg((1, 4, 6, 6), 2)\n\n\ndef test_deformable_conv2d():\n def test_infer_type(batch, in_channel, size, out_channel, deformable_groups, groups):\n data_shape = (batch, in_channel, size, size)\n data = relay.var(\"data\", shape=data_shape)\n offset = relay.var(\"offset\")\n kernel = relay.var(\"kernel\")\n kernel_size = (3, 3)\n y = relay.nn.deformable_conv2d(data, offset, kernel,\n strides=(1, 1),\n padding=(1, 1),\n dilation=(1, 1),\n kernel_size=kernel_size,\n deformable_groups=deformable_groups,\n groups=groups,\n channels=out_channel)\n weight_shape = (out_channel, in_channel // groups, kernel_size[0], kernel_size[1])\n out_shape = (batch, out_channel, size, size)\n offset_shape = (batch, 2 * kernel_size[0] * kernel_size[1] * deformable_groups, out_shape[2], out_shape[3])\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType(out_shape)\n assert yy.args[1].checked_type == relay.TensorType(offset_shape), yy.args[1].checked_type\n assert yy.args[2].checked_type == relay.TensorType(weight_shape)\n\n test_infer_type(1, 4, 16, 4, 4, 1)\n test_infer_type(2, 4, 16, 4, 1, 2)\n\n\n def test_run(batch, in_channel, size, out_channel, deformable_groups, groups):\n kernel_size = (3, 3)\n data_shape = (batch, in_channel, size, size)\n offset_shape = (batch, 2 * kernel_size[0] * kernel_size[1] * deformable_groups, size, size)\n kernel_shape = (out_channel, in_channel // groups, kernel_size[0], kernel_size[1])\n dtype = 'float32'\n data = relay.var(\"data\", shape=data_shape, dtype=dtype)\n offset = relay.var(\"offset\")\n kernel = relay.var(\"kernel\")\n y = relay.nn.deformable_conv2d(data, offset, kernel,\n strides=(1, 1),\n padding=(1, 1),\n dilation=(1, 1),\n kernel_size=kernel_size,\n deformable_groups=deformable_groups,\n groups=groups,\n channels=out_channel)\n func = relay.Function([data, offset, kernel], y)\n data = np.random.uniform(size=data_shape).astype(dtype)\n offset = np.random.uniform(size=offset_shape).astype(dtype)\n kernel = np.random.uniform(size=kernel_shape).astype(dtype)\n ref_res = topi.testing.deformable_conv2d_nchw_python(data, offset, kernel, stride=(1, 1), padding=(1, 1), dilation=(1, 1), deformable_groups=deformable_groups, groups=groups)\n\n for target, ctx in ctx_list():\n for kind in [\"graph\", \"debug\"]:\n intrp1 = relay.create_executor(kind, ctx=ctx, target=target)\n op_res1 = intrp1.evaluate(func)(data, offset, kernel)\n tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)\n test_run(1, 4, 16, 4, 1, 1)\n test_run(2, 4, 16, 4, 4, 1)\n\n\nif __name__ == \"__main__\":\n test_resize_infer_type()\n test_resize()\n test_multibox_prior()\n test_multibox_transform_loc()\n test_get_valid_counts()\n test_roi_align()\n test_roi_pool()\n test_proposal()\n test_yolo_reorg_infer_shape()\n test_yolo_reorg()\n test_non_max_suppression()\n test_deformable_conv2d()\n", "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=invalid-name, unused-variable, unused-argument, no-init\n\"\"\"\nTensorflow Model Helpers\n========================\nSome helper definitions for tensorflow models.\n\"\"\"\nimport re\nimport os.path\nimport collections\nimport numpy as np\n\n# Tensorflow imports\nimport tensorflow as tf\nfrom tensorflow.core.framework import graph_pb2\n\nfrom tvm.contrib.download import download_testdata\n\n######################################################################\n# Some helper functions\n# ---------------------\n\ndef ProcessGraphDefParam(graph_def):\n \"\"\"Type-checks and possibly canonicalizes `graph_def`.\n\n Parameters\n ----------\n graph_def : Obj\n tensorflow graph definition.\n\n Returns\n -------\n graph_def : Obj\n tensorflow graph devinition\n\n \"\"\"\n\n if not isinstance(graph_def, graph_pb2.GraphDef):\n # `graph_def` could be a dynamically-created message, so try a duck-typed\n # approach\n try:\n old_graph_def = graph_def\n graph_def = graph_pb2.GraphDef()\n graph_def.MergeFrom(old_graph_def)\n except TypeError:\n raise TypeError('graph_def must be a GraphDef proto.')\n return graph_def\n\n\ndef AddShapesToGraphDef(session, out_node):\n \"\"\" Add shapes attribute to nodes of the graph.\n Input graph here is the default graph in context.\n\n Parameters\n ----------\n session : tf.Session\n Tensorflow session\n out_node : String\n Final output node of the graph.\n\n Returns\n -------\n graph_def : Obj\n tensorflow graph definition with shapes attribute added to nodes.\n\n \"\"\"\n\n graph_def = tf.compat.v1.graph_util.convert_variables_to_constants(\n session,\n session.graph.as_graph_def(add_shapes=True),\n [out_node],\n )\n return graph_def\n\nclass NodeLookup(object):\n \"\"\"Converts integer node ID's to human readable labels.\"\"\"\n\n def __init__(self,\n label_lookup_path=None,\n uid_lookup_path=None):\n self.node_lookup = self.load(label_lookup_path, uid_lookup_path)\n\n def load(self, label_lookup_path, uid_lookup_path):\n \"\"\"Loads a human readable English name for each softmax node.\n\n Parameters\n ----------\n label_lookup_path: String\n File containing String UID to integer node ID mapping .\n\n uid_lookup_path: String\n File containing String UID to human-readable string mapping.\n\n Returns\n -------\n node_id_to_name: dict\n dict from integer node ID to human-readable string.\n\n \"\"\"\n if not tf.compat.v1.io.gfile.exists(uid_lookup_path):\n tf.logging.fatal('File does not exist %s', uid_lookup_path)\n if not tf.compat.v1.io.gfile.exists(label_lookup_path):\n tf.logging.fatal('File does not exist %s', label_lookup_path)\n\n # Loads mapping from string UID to human-readable string\n proto_as_ascii_lines = tf.compat.v1.gfile.GFile(uid_lookup_path).readlines()\n uid_to_human = {}\n p = re.compile(r'[n\\d]*[ \\S,]*')\n for line in proto_as_ascii_lines:\n parsed_items = p.findall(line)\n uid = parsed_items[0]\n human_string = parsed_items[2]\n uid_to_human[uid] = human_string\n\n # Loads mapping from string UID to integer node ID.\n node_id_to_uid = {}\n proto_as_ascii = tf.compat.v1.gfile.GFile(label_lookup_path).readlines()\n for line in proto_as_ascii:\n if line.startswith(' target_class:'):\n target_class = int(line.split(': ')[1])\n if line.startswith(' target_class_string:'):\n target_class_string = line.split(': ')[1]\n node_id_to_uid[target_class] = target_class_string[1:-2]\n\n # Loads the final mapping of integer node ID to human-readable string\n node_id_to_name = {}\n for key, val in node_id_to_uid.items():\n if val not in uid_to_human:\n tf.logging.fatal('Failed to locate: %s', val)\n name = uid_to_human[val]\n node_id_to_name[key] = name\n\n return node_id_to_name\n\n def id_to_string(self, node_id):\n if node_id not in self.node_lookup:\n return ''\n return self.node_lookup[node_id]\n\ndef get_workload_official(model_url, model_sub_path):\n \"\"\" Import workload from tensorflow official\n\n Parameters\n ----------\n model_url: str\n URL from where it will be downloaded.\n\n model_sub_path:\n Sub path in extracted tar for the ftozen protobuf file.\n\n Returns\n -------\n model_path: str\n Full path to saved model file\n\n \"\"\"\n\n model_tar_name = os.path.basename(model_url)\n model_path = download_testdata(model_url, model_tar_name, module=['tf', 'official'])\n dir_path = os.path.dirname(model_path)\n\n import tarfile\n if model_path.endswith(\"tgz\") or model_path.endswith(\"gz\"):\n tar = tarfile.open(model_path)\n tar.extractall(path=dir_path)\n tar.close()\n else:\n raise RuntimeError('Could not decompress the file: ' + model_path)\n return os.path.join(dir_path, model_sub_path)\n\ndef get_workload(model_path, model_sub_path=None):\n \"\"\" Import workload from frozen protobuf\n\n Parameters\n ----------\n model_path: str\n model_path on remote repository to download from.\n\n model_sub_path: str\n Model path in the compressed archive.\n\n Returns\n -------\n graph_def: graphdef\n graph_def is the tensorflow workload.\n\n \"\"\"\n\n if model_sub_path:\n path_model = get_workload_official(model_path, model_sub_path)\n else:\n repo_base = 'https://github.com/dmlc/web-data/raw/master/tensorflow/models/'\n model_url = os.path.join(repo_base, model_path)\n path_model = download_testdata(model_url, model_path, module='tf')\n\n # Creates graph from saved graph_def.pb.\n with tf.compat.v1.gfile.FastGFile(path_model, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n graph = tf.import_graph_def(graph_def, name='')\n return graph_def\n\n#######################################################################\n# PTB LSTMBlockCell Model\n# -----------------------\n\nclass PTBSmallConfig(object):\n \"\"\"Small config.\n This configurations are used when training the model\n \"\"\"\n num_layers = 2\n num_steps = 1\n hidden_size = 200\n batch_size = 1\n vocab_size = 10000\n init_scale = 0.1\n\ndef get_config():\n \"\"\"Configuration used for training the model\"\"\"\n return PTBSmallConfig()\n\ndef pick_from_weight(weight, pows=1.0):\n \"\"\"Identify token from Softmax output.\n This token will be mapped to word in the vocabulary.\n \"\"\"\n weight = weight**pows\n t = np.cumsum(weight)\n s = np.sum(weight)\n return int(np.searchsorted(t, 0.5 * s))\n\ndef do_tf_sample(session, data, in_states, num_samples):\n \"\"\"Sampled from the model\"\"\"\n samples = []\n sample = None\n #Cell inputs c and h should be passed for each layer explicitly.\n state_input_name = ['Model/MultiRNNCellZeroState/LSTMBlockCellZeroState/zeros:0',\n 'Model/MultiRNNCellZeroState/LSTMBlockCellZeroState/zeros_1:0',\n 'Model/MultiRNNCellZeroState/LSTMBlockCellZeroState_1/zeros:0',\n 'Model/MultiRNNCellZeroState/LSTMBlockCellZeroState_1/zeros_1:0']\n state = session.run(state_input_name)\n\n #Graph nodes to be fetched as run output. Tensorflow LSTMBlockCell create internal\n #nodes for intermediate operations (gates) in the cell during run.\n #Cell state (c) is ':1'and cell output (h) is ':6' for each layer.\n fetches = [['Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell:1',\n 'Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell:6',\n 'Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_1:1',\n 'Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_1:6'],\n 'Model/Softmax:0']\n\n def _get_feed_dict(input_name, input_data):\n \"\"\"Create feed dict\"\"\"\n feed_dict = {}\n if isinstance(input_data, list):\n for i, e in enumerate(input_name):\n feed_dict[e] = input_data[i]\n else:\n feed_dict[input_name] = input_data\n return feed_dict\n\n for x in data:\n feed_dict = _get_feed_dict(state_input_name, state)\n feed_dict['Model/Placeholder:0'] = [[x]]\n state, probs = session.run(fetches, feed_dict)\n sample = pick_from_weight(probs[0])\n if sample is not None:\n samples.append(sample)\n else:\n samples.append(0)\n\n k = 1\n while k < num_samples:\n feed_dict = _get_feed_dict(state_input_name, state)\n feed_dict['Model/Placeholder:0'] = [[samples[-1]]]\n state, probs = session.run(fetches, feed_dict)\n sample = pick_from_weight(probs[0])\n samples.append(sample)\n k += 1\n return samples, state\n\ndef _create_ptb_vocabulary(data_dir):\n \"\"\"Read the PTB sample data input to create vocabulary\"\"\"\n data_path = os.path.join(data_dir, 'simple-examples/data/')\n file_name = 'ptb.train.txt'\n def _read_words(filename):\n \"\"\"Read the data for creating vocabulary\"\"\"\n with tf.compat.v1.gfile.GFile(filename, \"r\") as f:\n return f.read().encode(\"utf-8\").decode(\"utf-8\").replace(\"\\n\", \"<eos>\").split()\n\n def _build_vocab(filename):\n \"\"\"Create vocabulary\"\"\"\n data = _read_words(filename)\n counter = collections.Counter(data)\n count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))\n words, _ = list(zip(*count_pairs))\n word_to_id = dict(zip(words, range(len(words))))\n #for python 3.x\n id_to_word = dict((v, k) for k, v in word_to_id.items())\n return word_to_id, id_to_word\n\n def ptb_raw_data(data_path, file_name):\n \"\"\"Read the sample data and create vocabulary\"\"\"\n train_path = os.path.join(data_path, file_name)\n word_to_id, id_2_word = _build_vocab(train_path)\n return word_to_id, id_2_word\n return ptb_raw_data(data_path, file_name)\n\ndef get_workload_ptb():\n \"\"\" Import ptb workload from frozen protobuf\n\n Parameters\n ----------\n Nothing.\n\n Returns\n -------\n graph_def: graphdef\n graph_def is the tensorflow workload for ptb.\n\n word_to_id : dict\n English word to integer id mapping\n\n id_to_word : dict\n Integer id to English word mapping\n \"\"\"\n sample_repo = 'http://www.fit.vutbr.cz/~imikolov/rnnlm/'\n sample_data_file = 'simple-examples.tgz'\n sample_url = sample_repo+sample_data_file\n ptb_model_file = 'RNN/ptb/ptb_model_with_lstmblockcell.pb'\n\n import tarfile\n file_path = download_testdata(sample_url, sample_data_file, module=['data', 'ptb_data'])\n dir_path = os.path.dirname(file_path)\n t = tarfile.open(file_path, 'r')\n t.extractall(dir_path)\n\n word_to_id, id_to_word = _create_ptb_vocabulary(dir_path)\n return word_to_id, id_to_word, get_workload(ptb_model_file)\n" ]
[ [ "numpy.clip", "numpy.random.uniform", "numpy.array", "numpy.zeros", "numpy.random.randint" ], [ "tensorflow.core.framework.graph_pb2.GraphDef", "tensorflow.compat.v1.io.gfile.exists", "tensorflow.import_graph_def", "tensorflow.logging.fatal", "tensorflow.compat.v1.gfile.FastGFile", "numpy.cumsum", "numpy.searchsorted", "tensorflow.compat.v1.gfile.GFile", "tensorflow.GraphDef", "numpy.sum" ] ]
gauthamkrishna-g/Gest-Face
[ "f20def897d8ce2b10c6312b02cb57cb7241a9d93" ]
[ "other/9_morphological.py" ]
[ "import cv2\r\nimport numpy as np\r\n\r\ncap = cv2.VideoCapture(0)\r\n\r\nwhile True:\r\n ret, frame = cap.read()\r\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\r\n \r\n lower_red = np.array([30, 150, 50])\r\n upper_red = np.array([255, 255, 180])\r\n \r\n mask = cv2.inRange(hsv, lower_red, upper_red)\r\n res = cv2.bitwise_and(frame, frame, mask= mask)\r\n\r\n kernel = np.ones((5, 5), np.uint8)\r\n erosion = cv2.erode(mask, kernel, iterations = 1)\r\n dilation = cv2.dilate(mask, kernel, iterations = 1)\r\n opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)\r\n closing = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)\r\n\r\n\r\n cv2.imshow('frame', frame)\r\n cv2.imshow('mask', mask)\r\n #cv2.imshow('erosion',erosion)\r\n #cv2.imshow('dilation',dilation)\r\n cv2.imshow('opening', opening)\r\n cv2.imshow('closing', closing)\r\n \r\n if cv2.waitKey(5) & 0xFF == 27:\r\n break\r\n \r\ncv2.destroyAllWindows()\r\ncap.release() \r\n" ]
[ [ "numpy.array", "numpy.ones" ] ]
Nobuo-Namura/EPBII
[ "ad50b7c4e291ea53a9b3924f24cb84aed4d347b2" ]
[ "indicator.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nindicator.py\nCopyright (c) 2020 Nobuo Namura\nThis code is released under the MIT License.\n\"\"\"\n\nimport numpy as np\nfrom scipy.spatial import distance\n\n#======================================================================\ndef rmse_history(x_rmse, problem, func, nfg=0):\n rmse = 0.0\n for x in x_rmse:\n rmse += (problem(x)[nfg] - func(x))**2.0\n rmse = np.sqrt(rmse/float(len(x_rmse[:,0])))\n return rmse\n\n#======================================================================\ndef igd_history(f, igd_ref):\n dist = distance.cdist(f, igd_ref)\n igd = np.mean(np.min(dist,axis=0))\n\n return igd" ]
[ [ "scipy.spatial.distance.cdist", "numpy.min" ] ]
wchen459/hgan_jmd_2019
[ "ca8e58b4525eb59f51ec699f1e874eca455a6bac" ]
[ "SC/feasibility.py" ]
[ "import numpy as np\nfrom SC.build_data import check_feasibility\n\n\nif __name__ == '__main__':\n \n X = np.load('../results/SC/SC.npy')\n X0 = X[:,:64]\n X1 = X[:,64:]\n for i in range(X.shape[0]):\n is_feasibe = check_feasibility(X0[i], X1[i])\n print('{}: {}'.format(i, is_feasibe))\n if not is_feasibe:\n break" ]
[ [ "numpy.load" ] ]
lengyuner/hyperpose4fly
[ "c9866bce1a0109e1b9c727ca550b5a380eb3ee17" ]
[ "hyperpose/Model/openpose/model/mbv2_th_openpose.py" ]
[ "import tensorflow as tf\nimport tensorlayer as tl\nfrom tensorlayer import layers\nfrom tensorlayer.models import Model\nfrom tensorlayer.layers import BatchNorm2d, Conv2d, DepthwiseConv2d, LayerList, MaxPool2d\nfrom ..utils import tf_repeat\nfrom ..define import CocoPart,CocoLimb\n\ninitial_w=tl.initializers.random_normal(stddev=0.01)\ninitial_b=tl.initializers.constant(value=0.0)\n\nclass MobilenetThinOpenpose(Model):\n def __init__(self,parts=CocoPart,limbs=CocoLimb,colors=None,n_pos=19,n_limbs=19,num_channels=128,\\\n hin=368,win=368,hout=46,wout=46,backbone=None,pretraining=False,data_format=\"channels_first\"):\n super().__init__()\n self.num_channels=num_channels\n self.parts=parts\n self.limbs=limbs\n self.n_pos=n_pos\n self.colors=colors\n self.n_limbs=n_limbs\n self.n_confmaps=n_pos\n self.n_pafmaps=2*n_limbs\n self.hin=hin\n self.win=win\n self.hout=hout\n self.wout=wout\n self.data_format=data_format\n if(self.data_format==\"channels_first\"):\n self.concat_dim=1\n else:\n self.concat_dim=-1\n if(backbone==None):\n self.backbone=self.Mobilenetv2_variant(data_format=self.data_format)\n else:\n self.backbone=backbone(scale_size=8,pretraining=pretraining,data_format=self.data_format)\n self.init_stage=self.Init_stage(n_confmaps=self.n_confmaps,in_channels=self.backbone.out_channels,data_format=self.data_format)\n self.refinement_stage_1=self.Refinement_stage(n_confmaps=self.n_confmaps,n_pafmaps=self.n_pafmaps,in_channels=self.backbone.out_channels+self.n_confmaps+self.n_pafmaps,data_format=self.data_format)\n self.refinement_stage_2=self.Refinement_stage(n_confmaps=self.n_confmaps,n_pafmaps=self.n_pafmaps,in_channels=self.backbone.out_channels+self.n_confmaps+self.n_pafmaps,data_format=self.data_format)\n self.refinement_stage_3=self.Refinement_stage(n_confmaps=self.n_confmaps,n_pafmaps=self.n_pafmaps,in_channels=self.backbone.out_channels+self.n_confmaps+self.n_pafmaps,data_format=self.data_format)\n self.refinement_stage_4=self.Refinement_stage(n_confmaps=self.n_confmaps,n_pafmaps=self.n_pafmaps,in_channels=self.backbone.out_channels+self.n_confmaps+self.n_pafmaps,data_format=self.data_format)\n self.refinement_stage_5=self.Refinement_stage(n_confmaps=self.n_confmaps,n_pafmaps=self.n_pafmaps,in_channels=self.backbone.out_channels+self.n_confmaps+self.n_pafmaps,data_format=self.data_format)\n \n @tf.function\n def forward(self,x,is_train=False,stage_num=5,domainadapt=False):\n conf_list=[]\n paf_list=[] \n backbone_features=self.backbone.forward(x)\n conf_map,paf_map=self.init_stage.forward(backbone_features)\n conf_list.append(conf_map)\n paf_list.append(paf_map)\n for refinement_stage_idx in range(1,stage_num+1):\n x=tf.concat([backbone_features,conf_list[-1],paf_list[-1]],self.concat_dim)\n conf_map,paf_map=eval(f\"self.refinement_stage_{refinement_stage_idx}.forward(x)\")\n conf_list.append(conf_map)\n paf_list.append(paf_map)\n if(domainadapt):\n return conf_list[-1],paf_list[-1],conf_list,paf_list,backbone_features\n if(is_train):\n return conf_list[-1],paf_list[-1],conf_list,paf_list\n else:\n return conf_list[-1],paf_list[-1]\n \n @tf.function(experimental_relax_shapes=True)\n def infer(self,x):\n conf_map,paf_map=self.forward(x,is_train=False)\n return conf_map,paf_map\n \n def cal_loss(self,gt_conf,gt_paf,mask,stage_confs,stage_pafs):\n stage_losses=[]\n batch_size=gt_conf.shape[0]\n mask_conf=tf_repeat(mask, [1,self.n_confmaps ,1,1])\n mask_paf=tf_repeat(mask,[1,self.n_pafmaps ,1,1])\n loss_confs,loss_pafs=[],[]\n for stage_conf,stage_paf in zip(stage_confs,stage_pafs):\n loss_conf=tf.nn.l2_loss((gt_conf-stage_conf)*mask_conf)\n loss_paf=tf.nn.l2_loss((gt_paf-stage_paf)*mask_paf)\n stage_losses.append(loss_conf)\n stage_losses.append(loss_paf)\n loss_confs.append(loss_conf)\n loss_pafs.append(loss_paf)\n pd_loss=tf.reduce_mean(stage_losses)/batch_size\n return pd_loss,loss_confs,loss_pafs\n\n class Mobilenetv2_variant(Model):\n def __init__(self,data_format=\"channels_first\"):\n super().__init__()\n self.data_format=data_format\n if(self.data_format==\"channels_first\"):\n self.concat_dim=1\n else:\n self.concat_dim=-1\n self.out_channels=1152\n self.scale_size=8\n self.convblock_0=conv_block(n_filter=32,in_channels=3,filter_size=(3,3),strides=(2,2),act=tf.nn.relu,data_format=self.data_format)\n self.convblock_1=separable_block(n_filter=64,in_channels=32,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format)\n self.convblock_2=separable_block(n_filter=128,in_channels=64,filter_size=(3,3),strides=(2,2),act=tf.nn.relu,data_format=self.data_format)\n self.convblock_3=separable_block(n_filter=128,in_channels=128,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format)\n self.convblock_4=separable_block(n_filter=256,in_channels=128,filter_size=(3,3),strides=(2,2),act=tf.nn.relu,data_format=self.data_format)\n self.convblock_5=separable_block(n_filter=256,in_channels=256,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format)\n self.convblock_6=separable_block(n_filter=512,in_channels=256,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format)\n self.convblock_7=separable_block(n_filter=512,in_channels=512,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format)\n self.convblock_8=separable_block(n_filter=512,in_channels=512,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format)\n self.convblock_9=separable_block(n_filter=512,in_channels=512,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format)\n self.convblock_10=separable_block(n_filter=512,in_channels=512,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format)\n self.convblock_11=separable_block(n_filter=512,in_channels=512,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format)\n self.maxpool=MaxPool2d(filter_size=(2,2),strides=(2,2),padding=\"SAME\",data_format=self.data_format)\n \n \n def forward(self,x):\n concat_list=[]\n x=self.convblock_0.forward(x)\n x=self.convblock_1.forward(x)\n x=self.convblock_2.forward(x)\n x=self.convblock_3.forward(x)\n concat_list.append(self.maxpool.forward(x))\n x=self.convblock_4.forward(x)\n x=self.convblock_5.forward(x)\n x=self.convblock_6.forward(x)\n x=self.convblock_7.forward(x)\n concat_list.append(x)\n x=self.convblock_8.forward(x)\n x=self.convblock_9.forward(x)\n x=self.convblock_10.forward(x)\n x=self.convblock_11.forward(x)\n concat_list.append(x)\n x=tf.concat(concat_list,self.concat_dim)\n return x\n \n class Init_stage(Model):\n def __init__(self,n_confmaps=19,n_pafmaps=38,in_channels=1152,data_format=\"channels_first\"):\n super().__init__()\n self.n_confmaps=n_confmaps\n self.n_pafmaps=n_pafmaps\n self.in_channels=in_channels\n self.data_format=data_format\n #conf block\n self.conf_block=LayerList([\n separable_block(n_filter=128,in_channels=self.in_channels,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format),\n separable_block(n_filter=128,in_channels=128,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format),\n separable_block(n_filter=128,in_channels=128,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format),\n separable_block(n_filter=512,in_channels=128,filter_size=(1,1),strides=(1,1),act=tf.nn.relu,data_format=self.data_format),\n separable_block(n_filter=self.n_confmaps,in_channels=512,filter_size=(1,1),strides=(1,1),act=None,data_format=self.data_format)\n ])\n #paf block\n self.paf_block=LayerList([\n separable_block(n_filter=128,in_channels=self.in_channels,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format),\n separable_block(n_filter=128,in_channels=128,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format),\n separable_block(n_filter=128,in_channels=128,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format),\n separable_block(n_filter=512,in_channels=128,filter_size=(1,1),strides=(1,1),act=tf.nn.relu,data_format=self.data_format),\n separable_block(n_filter=self.n_pafmaps,in_channels=512,filter_size=(1,1),strides=(1,1),act=None,data_format=self.data_format)\n ])\n \n def forward(self,x):\n conf_map=self.conf_block.forward(x)\n paf_map=self.paf_block.forward(x)\n return conf_map,paf_map\n \n class Refinement_stage(Model):\n def __init__(self,n_confmaps=19,n_pafmaps=38,in_channels=19+38+1152,data_format=\"channels_first\"):\n super().__init__()\n self.n_confmaps=n_confmaps\n self.n_pafmaps=n_pafmaps\n self.in_channels=in_channels\n self.data_format=data_format\n #conf_block\n self.conf_block=LayerList([\n separable_block(n_filter=128,in_channels=self.in_channels,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format),\n separable_block(n_filter=128,in_channels=128,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format),\n separable_block(n_filter=128,in_channels=128,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format),\n separable_block(n_filter=128,in_channels=128,filter_size=(1,1),strides=(1,1),act=tf.nn.relu,data_format=self.data_format),\n separable_block(n_filter=self.n_confmaps,in_channels=128,filter_size=(1,1),strides=(1,1),act=None,data_format=self.data_format),\n ])\n #paf_block\n self.paf_block=LayerList([\n separable_block(n_filter=128,in_channels=self.in_channels,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format),\n separable_block(n_filter=128,in_channels=128,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format),\n separable_block(n_filter=128,in_channels=128,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format),\n separable_block(n_filter=128,in_channels=128,filter_size=(1,1),strides=(1,1),act=tf.nn.relu,data_format=self.data_format),\n separable_block(n_filter=self.n_pafmaps,in_channels=128,filter_size=(1,1),strides=(1,1),act=None,data_format=self.data_format),\n ])\n \n def forward(self,x):\n conf_map=self.conf_block.forward(x)\n paf_map=self.paf_block.forward(x)\n return conf_map,paf_map\n\ndef conv_block(n_filter=32,in_channels=3,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,padding=\"SAME\",data_format=\"channels_first\"):\n layer_list=[]\n layer_list.append(Conv2d(n_filter=n_filter,in_channels=in_channels,filter_size=filter_size,strides=strides,act=act,\\\n W_init=initial_w,b_init=initial_b,data_format=data_format,padding=padding))\n layer_list.append(BatchNorm2d(num_features=n_filter,decay=0.999,is_train=True,act=act,data_format=data_format))\n return LayerList(layer_list)\n\ndef separable_block(n_filter=32,in_channels=3,filter_size=(3,3),strides=(1,1),dilation_rate=(1,1),act=tf.nn.relu,data_format=\"channels_first\"):\n layer_list=[]\n layer_list.append(DepthwiseConv2d(filter_size=filter_size,strides=strides,in_channels=in_channels,\n dilation_rate=dilation_rate,W_init=initial_w,b_init=None,data_format=data_format))\n layer_list.append(BatchNorm2d(decay=0.99,act=act,num_features=in_channels,data_format=data_format,is_train=True))\n layer_list.append(Conv2d(n_filter=n_filter,filter_size=(1,1),strides=(1,1),in_channels=in_channels,W_init=initial_w,b_init=None,data_format=data_format))\n layer_list.append(BatchNorm2d(decay=0.99,act=act,num_features=n_filter,data_format=data_format,is_train=True))\n return layers.LayerList(layer_list)" ]
[ [ "tensorflow.nn.l2_loss", "tensorflow.function", "tensorflow.concat", "tensorflow.reduce_mean" ] ]
Basketkase/openpilot
[ "769e1cf7a8322ca83d1a86a2f547acf5e3a5a52e", "769e1cf7a8322ca83d1a86a2f547acf5e3a5a52e" ]
[ "selfdrive/car/volkswagen/carstate.py", "selfdrive/debug/cpu_usage_stat.py" ]
[ "import numpy as np\nfrom cereal import car\nfrom selfdrive.config import Conversions as CV\nfrom selfdrive.car.interfaces import CarStateBase\nfrom opendbc.can.parser import CANParser\nfrom opendbc.can.can_define import CANDefine\nfrom selfdrive.car.volkswagen.values import DBC_FILES, CANBUS, NetworkLocation, TransmissionType, GearShifter, BUTTON_STATES, CarControllerParams\n\nclass CarState(CarStateBase):\n def __init__(self, CP):\n super().__init__(CP)\n can_define = CANDefine(DBC_FILES.mqb)\n if CP.transmissionType == TransmissionType.automatic:\n self.shifter_values = can_define.dv[\"Getriebe_11\"][\"GE_Fahrstufe\"]\n elif CP.transmissionType == TransmissionType.direct:\n self.shifter_values = can_define.dv[\"EV_Gearshift\"][\"GearPosition\"]\n self.hca_status_values = can_define.dv[\"LH_EPS_03\"][\"EPS_HCA_Status\"]\n self.buttonStates = BUTTON_STATES.copy()\n\n def update(self, pt_cp, cam_cp, ext_cp, trans_type):\n ret = car.CarState.new_message()\n # Update vehicle speed and acceleration from ABS wheel speeds.\n ret.wheelSpeeds.fl = pt_cp.vl[\"ESP_19\"][\"ESP_VL_Radgeschw_02\"] * CV.KPH_TO_MS\n ret.wheelSpeeds.fr = pt_cp.vl[\"ESP_19\"][\"ESP_VR_Radgeschw_02\"] * CV.KPH_TO_MS\n ret.wheelSpeeds.rl = pt_cp.vl[\"ESP_19\"][\"ESP_HL_Radgeschw_02\"] * CV.KPH_TO_MS\n ret.wheelSpeeds.rr = pt_cp.vl[\"ESP_19\"][\"ESP_HR_Radgeschw_02\"] * CV.KPH_TO_MS\n\n ret.vEgoRaw = float(np.mean([ret.wheelSpeeds.fl, ret.wheelSpeeds.fr, ret.wheelSpeeds.rl, ret.wheelSpeeds.rr]))\n ret.vEgo, ret.aEgo = self.update_speed_kf(ret.vEgoRaw)\n\n ret.standstill = ret.vEgoRaw < 0.1\n\n # Update steering angle, rate, yaw rate, and driver input torque. VW send\n # the sign/direction in a separate signal so they must be recombined.\n ret.steeringAngleDeg = pt_cp.vl[\"LH_EPS_03\"][\"EPS_Berechneter_LW\"] * (1, -1)[int(pt_cp.vl[\"LH_EPS_03\"][\"EPS_VZ_BLW\"])]\n ret.steeringRateDeg = pt_cp.vl[\"LWI_01\"][\"LWI_Lenkradw_Geschw\"] * (1, -1)[int(pt_cp.vl[\"LWI_01\"][\"LWI_VZ_Lenkradw_Geschw\"])]\n ret.steeringTorque = pt_cp.vl[\"LH_EPS_03\"][\"EPS_Lenkmoment\"] * (1, -1)[int(pt_cp.vl[\"LH_EPS_03\"][\"EPS_VZ_Lenkmoment\"])]\n ret.steeringPressed = abs(ret.steeringTorque) > CarControllerParams.STEER_DRIVER_ALLOWANCE\n ret.yawRate = pt_cp.vl[\"ESP_02\"][\"ESP_Gierrate\"] * (1, -1)[int(pt_cp.vl[\"ESP_02\"][\"ESP_VZ_Gierrate\"])] * CV.DEG_TO_RAD\n\n # Verify EPS readiness to accept steering commands\n hca_status = self.hca_status_values.get(pt_cp.vl[\"LH_EPS_03\"][\"EPS_HCA_Status\"])\n ret.steerError = hca_status in [\"DISABLED\", \"FAULT\"]\n ret.steerWarning = hca_status in [\"INITIALIZING\", \"REJECTED\"]\n\n # Update gas, brakes, and gearshift.\n ret.gas = pt_cp.vl[\"Motor_20\"][\"MO_Fahrpedalrohwert_01\"] / 100.0\n ret.gasPressed = ret.gas > 0\n ret.brake = pt_cp.vl[\"ESP_05\"][\"ESP_Bremsdruck\"] / 250.0 # FIXME: this is pressure in Bar, not sure what OP expects\n ret.brakePressed = bool(pt_cp.vl[\"ESP_05\"][\"ESP_Fahrer_bremst\"])\n\n # Update gear and/or clutch position data.\n if trans_type == TransmissionType.automatic:\n ret.gearShifter = self.parse_gear_shifter(self.shifter_values.get(pt_cp.vl[\"Getriebe_11\"][\"GE_Fahrstufe\"], None))\n elif trans_type == TransmissionType.direct:\n ret.gearShifter = self.parse_gear_shifter(self.shifter_values.get(pt_cp.vl[\"EV_Gearshift\"][\"GearPosition\"], None))\n elif trans_type == TransmissionType.manual:\n ret.clutchPressed = not pt_cp.vl[\"Motor_14\"][\"MO_Kuppl_schalter\"]\n if bool(pt_cp.vl[\"Gateway_72\"][\"BCM1_Rueckfahrlicht_Schalter\"]):\n ret.gearShifter = GearShifter.reverse\n else:\n ret.gearShifter = GearShifter.drive\n\n # Update door and trunk/hatch lid open status.\n ret.doorOpen = any([pt_cp.vl[\"Gateway_72\"][\"ZV_FT_offen\"],\n pt_cp.vl[\"Gateway_72\"][\"ZV_BT_offen\"],\n pt_cp.vl[\"Gateway_72\"][\"ZV_HFS_offen\"],\n pt_cp.vl[\"Gateway_72\"][\"ZV_HBFS_offen\"],\n pt_cp.vl[\"Gateway_72\"][\"ZV_HD_offen\"]])\n\n # Update seatbelt fastened status.\n ret.seatbeltUnlatched = pt_cp.vl[\"Airbag_02\"][\"AB_Gurtschloss_FA\"] != 3\n\n # Update driver preference for metric. VW stores many different unit\n # preferences, including separate units for for distance vs. speed.\n # We use the speed preference for OP.\n self.displayMetricUnits = not pt_cp.vl[\"Einheiten_01\"][\"KBI_MFA_v_Einheit_02\"]\n\n # Consume blind-spot monitoring info/warning LED states, if available.\n # Infostufe: BSM LED on, Warnung: BSM LED flashing\n if self.CP.enableBsm:\n ret.leftBlindspot = bool(ext_cp.vl[\"SWA_01\"][\"SWA_Infostufe_SWA_li\"]) or bool(ext_cp.vl[\"SWA_01\"][\"SWA_Warnung_SWA_li\"])\n ret.rightBlindspot = bool(ext_cp.vl[\"SWA_01\"][\"SWA_Infostufe_SWA_re\"]) or bool(ext_cp.vl[\"SWA_01\"][\"SWA_Warnung_SWA_re\"])\n\n # Consume factory LDW data relevant for factory SWA (Lane Change Assist)\n # and capture it for forwarding to the blind spot radar controller\n self.ldw_lane_warning_left = bool(cam_cp.vl[\"LDW_02\"][\"LDW_SW_Warnung_links\"])\n self.ldw_lane_warning_right = bool(cam_cp.vl[\"LDW_02\"][\"LDW_SW_Warnung_rechts\"])\n self.ldw_side_dlc_tlc = bool(cam_cp.vl[\"LDW_02\"][\"LDW_Seite_DLCTLC\"])\n self.ldw_dlc = cam_cp.vl[\"LDW_02\"][\"LDW_DLC\"]\n self.ldw_tlc = cam_cp.vl[\"LDW_02\"][\"LDW_TLC\"]\n\n # Stock FCW is considered active if the release bit for brake-jerk warning\n # is set. Stock AEB considered active if the partial braking or target\n # braking release bits are set.\n # Refer to VW Self Study Program 890253: Volkswagen Driver Assistance\n # Systems, chapter on Front Assist with Braking: Golf Family for all MQB\n ret.stockFcw = bool(ext_cp.vl[\"ACC_10\"][\"AWV2_Freigabe\"])\n ret.stockAeb = bool(ext_cp.vl[\"ACC_10\"][\"ANB_Teilbremsung_Freigabe\"]) or bool(ext_cp.vl[\"ACC_10\"][\"ANB_Zielbremsung_Freigabe\"])\n\n # Update ACC radar status.\n accStatus = pt_cp.vl[\"TSK_06\"][\"TSK_Status\"]\n if accStatus == 2:\n # ACC okay and enabled, but not currently engaged\n ret.cruiseState.available = True\n ret.cruiseState.enabled = False\n elif accStatus in [3, 4, 5]:\n # ACC okay and enabled, currently engaged and regulating speed (3) or engaged with driver accelerating (4) or overrun (5)\n ret.cruiseState.available = True\n ret.cruiseState.enabled = True\n else:\n # ACC okay but disabled (1), or a radar visibility or other fault/disruption (6 or 7)\n ret.cruiseState.available = False\n ret.cruiseState.enabled = False\n\n # Update ACC setpoint. When the setpoint is zero or there's an error, the\n # radar sends a set-speed of ~90.69 m/s / 203mph.\n ret.cruiseState.speed = ext_cp.vl[\"ACC_02\"][\"ACC_Wunschgeschw\"] * CV.KPH_TO_MS\n if ret.cruiseState.speed > 90:\n ret.cruiseState.speed = 0\n\n # Update control button states for turn signals and ACC controls.\n self.buttonStates[\"accelCruise\"] = bool(pt_cp.vl[\"GRA_ACC_01\"][\"GRA_Tip_Hoch\"])\n self.buttonStates[\"decelCruise\"] = bool(pt_cp.vl[\"GRA_ACC_01\"][\"GRA_Tip_Runter\"])\n self.buttonStates[\"cancel\"] = bool(pt_cp.vl[\"GRA_ACC_01\"][\"GRA_Abbrechen\"])\n self.buttonStates[\"setCruise\"] = bool(pt_cp.vl[\"GRA_ACC_01\"][\"GRA_Tip_Setzen\"])\n self.buttonStates[\"resumeCruise\"] = bool(pt_cp.vl[\"GRA_ACC_01\"][\"GRA_Tip_Wiederaufnahme\"])\n self.buttonStates[\"gapAdjustCruise\"] = bool(pt_cp.vl[\"GRA_ACC_01\"][\"GRA_Verstellung_Zeitluecke\"])\n ret.leftBlinker = bool(pt_cp.vl[\"Blinkmodi_02\"][\"Comfort_Signal_Left\"])\n ret.rightBlinker = bool(pt_cp.vl[\"Blinkmodi_02\"][\"Comfort_Signal_Right\"])\n\n # Read ACC hardware button type configuration info that has to pass thru\n # to the radar. Ends up being different for steering wheel buttons vs\n # third stalk type controls.\n self.graHauptschalter = pt_cp.vl[\"GRA_ACC_01\"][\"GRA_Hauptschalter\"]\n self.graTypHauptschalter = pt_cp.vl[\"GRA_ACC_01\"][\"GRA_Typ_Hauptschalter\"]\n self.graButtonTypeInfo = pt_cp.vl[\"GRA_ACC_01\"][\"GRA_ButtonTypeInfo\"]\n self.graTipStufe2 = pt_cp.vl[\"GRA_ACC_01\"][\"GRA_Tip_Stufe_2\"]\n # Pick up the GRA_ACC_01 CAN message counter so we can sync to it for\n # later cruise-control button spamming.\n self.graMsgBusCounter = pt_cp.vl[\"GRA_ACC_01\"][\"COUNTER\"]\n\n # Additional safety checks performed in CarInterface.\n self.parkingBrakeSet = bool(pt_cp.vl[\"Kombi_01\"][\"KBI_Handbremse\"]) # FIXME: need to include an EPB check as well\n ret.espDisabled = pt_cp.vl[\"ESP_21\"][\"ESP_Tastung_passiv\"] != 0\n\n return ret\n\n @staticmethod\n def get_can_parser(CP):\n # this function generates lists for signal, messages and initial values\n signals = [\n # sig_name, sig_address, default\n (\"EPS_Berechneter_LW\", \"LH_EPS_03\", 0), # Absolute steering angle\n (\"EPS_VZ_BLW\", \"LH_EPS_03\", 0), # Steering angle sign\n (\"LWI_Lenkradw_Geschw\", \"LWI_01\", 0), # Absolute steering rate\n (\"LWI_VZ_Lenkradw_Geschw\", \"LWI_01\", 0), # Steering rate sign\n (\"ESP_VL_Radgeschw_02\", \"ESP_19\", 0), # ABS wheel speed, front left\n (\"ESP_VR_Radgeschw_02\", \"ESP_19\", 0), # ABS wheel speed, front right\n (\"ESP_HL_Radgeschw_02\", \"ESP_19\", 0), # ABS wheel speed, rear left\n (\"ESP_HR_Radgeschw_02\", \"ESP_19\", 0), # ABS wheel speed, rear right\n (\"ESP_Gierrate\", \"ESP_02\", 0), # Absolute yaw rate\n (\"ESP_VZ_Gierrate\", \"ESP_02\", 0), # Yaw rate sign\n (\"ZV_FT_offen\", \"Gateway_72\", 0), # Door open, driver\n (\"ZV_BT_offen\", \"Gateway_72\", 0), # Door open, passenger\n (\"ZV_HFS_offen\", \"Gateway_72\", 0), # Door open, rear left\n (\"ZV_HBFS_offen\", \"Gateway_72\", 0), # Door open, rear right\n (\"ZV_HD_offen\", \"Gateway_72\", 0), # Trunk or hatch open\n (\"Comfort_Signal_Left\", \"Blinkmodi_02\", 0), # Left turn signal including comfort blink interval\n (\"Comfort_Signal_Right\", \"Blinkmodi_02\", 0), # Right turn signal including comfort blink interval\n (\"AB_Gurtschloss_FA\", \"Airbag_02\", 0), # Seatbelt status, driver\n (\"AB_Gurtschloss_BF\", \"Airbag_02\", 0), # Seatbelt status, passenger\n (\"ESP_Fahrer_bremst\", \"ESP_05\", 0), # Brake pedal pressed\n (\"ESP_Bremsdruck\", \"ESP_05\", 0), # Brake pressure applied\n (\"MO_Fahrpedalrohwert_01\", \"Motor_20\", 0), # Accelerator pedal value\n (\"EPS_Lenkmoment\", \"LH_EPS_03\", 0), # Absolute driver torque input\n (\"EPS_VZ_Lenkmoment\", \"LH_EPS_03\", 0), # Driver torque input sign\n (\"EPS_HCA_Status\", \"LH_EPS_03\", 3), # EPS HCA control status\n (\"ESP_Tastung_passiv\", \"ESP_21\", 0), # Stability control disabled\n (\"KBI_MFA_v_Einheit_02\", \"Einheiten_01\", 0), # MPH vs KMH speed display\n (\"KBI_Handbremse\", \"Kombi_01\", 0), # Manual handbrake applied\n (\"TSK_Status\", \"TSK_06\", 0), # ACC engagement status from drivetrain coordinator\n (\"GRA_Hauptschalter\", \"GRA_ACC_01\", 0), # ACC button, on/off\n (\"GRA_Abbrechen\", \"GRA_ACC_01\", 0), # ACC button, cancel\n (\"GRA_Tip_Setzen\", \"GRA_ACC_01\", 0), # ACC button, set\n (\"GRA_Tip_Hoch\", \"GRA_ACC_01\", 0), # ACC button, increase or accel\n (\"GRA_Tip_Runter\", \"GRA_ACC_01\", 0), # ACC button, decrease or decel\n (\"GRA_Tip_Wiederaufnahme\", \"GRA_ACC_01\", 0), # ACC button, resume\n (\"GRA_Verstellung_Zeitluecke\", \"GRA_ACC_01\", 0), # ACC button, time gap adj\n (\"GRA_Typ_Hauptschalter\", \"GRA_ACC_01\", 0), # ACC main button type\n (\"GRA_Tip_Stufe_2\", \"GRA_ACC_01\", 0), # unknown related to stalk type\n (\"GRA_ButtonTypeInfo\", \"GRA_ACC_01\", 0), # unknown related to stalk type\n (\"COUNTER\", \"GRA_ACC_01\", 0), # GRA_ACC_01 CAN message counter\n ]\n\n checks = [\n # sig_address, frequency\n (\"LWI_01\", 100), # From J500 Steering Assist with integrated sensors\n (\"LH_EPS_03\", 100), # From J500 Steering Assist with integrated sensors\n (\"ESP_19\", 100), # From J104 ABS/ESP controller\n (\"ESP_05\", 50), # From J104 ABS/ESP controller\n (\"ESP_21\", 50), # From J104 ABS/ESP controller\n (\"Motor_20\", 50), # From J623 Engine control module\n (\"TSK_06\", 50), # From J623 Engine control module\n (\"ESP_02\", 50), # From J104 ABS/ESP controller\n (\"GRA_ACC_01\", 33), # From J533 CAN gateway (via LIN from steering wheel controls)\n (\"Gateway_72\", 10), # From J533 CAN gateway (aggregated data)\n (\"Airbag_02\", 5), # From J234 Airbag control module\n (\"Kombi_01\", 2), # From J285 Instrument cluster\n (\"Blinkmodi_02\", 1), # From J519 BCM (sent at 1Hz when no lights active, 50Hz when active)\n (\"Einheiten_01\", 1), # From J??? not known if gateway, cluster, or BCM\n ]\n\n if CP.transmissionType == TransmissionType.automatic:\n signals += [(\"GE_Fahrstufe\", \"Getriebe_11\", 0)] # Auto trans gear selector position\n checks += [(\"Getriebe_11\", 20)] # From J743 Auto transmission control module\n elif CP.transmissionType == TransmissionType.direct:\n signals += [(\"GearPosition\", \"EV_Gearshift\", 0)] # EV gear selector position\n checks += [(\"EV_Gearshift\", 10)] # From J??? unknown EV control module\n elif CP.transmissionType == TransmissionType.manual:\n signals += [(\"MO_Kuppl_schalter\", \"Motor_14\", 0), # Clutch switch\n (\"BCM1_Rueckfahrlicht_Schalter\", \"Gateway_72\", 0)] # Reverse light from BCM\n checks += [(\"Motor_14\", 10)] # From J623 Engine control module\n\n if CP.networkLocation == NetworkLocation.fwdCamera:\n # Radars are here on CANBUS.pt\n signals += MqbExtraSignals.fwd_radar_signals\n checks += MqbExtraSignals.fwd_radar_checks\n if CP.enableBsm:\n signals += MqbExtraSignals.bsm_radar_signals\n checks += MqbExtraSignals.bsm_radar_checks\n\n return CANParser(DBC_FILES.mqb, signals, checks, CANBUS.pt)\n\n @staticmethod\n def get_cam_can_parser(CP):\n\n signals = [\n # sig_name, sig_address, default\n (\"LDW_SW_Warnung_links\", \"LDW_02\", 0), # Blind spot in warning mode on left side due to lane departure\n (\"LDW_SW_Warnung_rechts\", \"LDW_02\", 0), # Blind spot in warning mode on right side due to lane departure\n (\"LDW_Seite_DLCTLC\", \"LDW_02\", 0), # Direction of most likely lane departure (left or right)\n (\"LDW_DLC\", \"LDW_02\", 0), # Lane departure, distance to line crossing\n (\"LDW_TLC\", \"LDW_02\", 0), # Lane departure, time to line crossing\n ]\n\n checks = [\n # sig_address, frequency\n (\"LDW_02\", 10) # From R242 Driver assistance camera\n ]\n\n if CP.networkLocation == NetworkLocation.gateway:\n # Radars are here on CANBUS.cam\n signals += MqbExtraSignals.fwd_radar_signals\n checks += MqbExtraSignals.fwd_radar_checks\n if CP.enableBsm:\n signals += MqbExtraSignals.bsm_radar_signals\n checks += MqbExtraSignals.bsm_radar_checks\n\n return CANParser(DBC_FILES.mqb, signals, checks, CANBUS.cam)\n\nclass MqbExtraSignals:\n # Additional signal and message lists for optional or bus-portable controllers\n fwd_radar_signals = [\n (\"ACC_Wunschgeschw\", \"ACC_02\", 0), # ACC set speed\n (\"AWV2_Freigabe\", \"ACC_10\", 0), # FCW brake jerk release\n (\"ANB_Teilbremsung_Freigabe\", \"ACC_10\", 0), # AEB partial braking release\n (\"ANB_Zielbremsung_Freigabe\", \"ACC_10\", 0), # AEB target braking release\n ]\n fwd_radar_checks = [\n (\"ACC_10\", 50), # From J428 ACC radar control module\n (\"ACC_02\", 17), # From J428 ACC radar control module\n ]\n bsm_radar_signals = [\n (\"SWA_Infostufe_SWA_li\", \"SWA_01\", 0), # Blind spot object info, left\n (\"SWA_Warnung_SWA_li\", \"SWA_01\", 0), # Blind spot object warning, left\n (\"SWA_Infostufe_SWA_re\", \"SWA_01\", 0), # Blind spot object info, right\n (\"SWA_Warnung_SWA_re\", \"SWA_01\", 0), # Blind spot object warning, right\n ]\n bsm_radar_checks = [\n (\"SWA_01\", 20), # From J1086 Lane Change Assist\n ]\n", "#!/usr/bin/env python3\n# type: ignore\n'''\nSystem tools like top/htop can only show current cpu usage values, so I write this script to do statistics jobs.\n Features:\n Use psutil library to sample cpu usage(avergage for all cores) of openpilot processes, at a rate of 5 samples/sec.\n Do cpu usage statistics periodically, 5 seconds as a cycle.\n Caculate the average cpu usage within this cycle.\n Caculate minumium/maximium/accumulated_average cpu usage as long term inspections.\n Monitor multiple processes simuteneously.\n Sample usage:\n root@localhost:/data/openpilot$ python selfdrive/debug/cpu_usage_stat.py boardd,ubloxd\n ('Add monitored proc:', './boardd')\n ('Add monitored proc:', 'python locationd/ubloxd.py')\n boardd: 1.96%, min: 1.96%, max: 1.96%, acc: 1.96%\n ubloxd.py: 0.39%, min: 0.39%, max: 0.39%, acc: 0.39%\n'''\nimport psutil\nimport time\nimport os\nimport sys\nimport numpy as np\nimport argparse\nimport re\nfrom collections import defaultdict\n\nfrom selfdrive.manager.process_config import managed_processes\n\n# Do statistics every 5 seconds\nPRINT_INTERVAL = 5\nSLEEP_INTERVAL = 0.2\n\nmonitored_proc_names = [\n # android procs\n 'SurfaceFlinger', 'sensors.qcom'\n] + list(managed_processes.keys())\n\ncpu_time_names = ['user', 'system', 'children_user', 'children_system']\n\ntimer = getattr(time, 'monotonic', time.time)\n\n\ndef get_arg_parser():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument(\"proc_names\", nargs=\"?\", default='',\n help=\"Process names to be monitored, comma seperated\")\n parser.add_argument(\"--list_all\", action='store_true',\n help=\"Show all running processes' cmdline\")\n parser.add_argument(\"--detailed_times\", action='store_true',\n help=\"show cpu time details (split by user, system, child user, child system)\")\n return parser\n\n\nif __name__ == \"__main__\":\n args = get_arg_parser().parse_args(sys.argv[1:])\n if args.list_all:\n for p in psutil.process_iter():\n print('cmdline', p.cmdline(), 'name', p.name())\n sys.exit(0)\n\n if len(args.proc_names) > 0:\n monitored_proc_names = args.proc_names.split(',')\n monitored_procs = []\n stats = {}\n for p in psutil.process_iter():\n if p == psutil.Process():\n continue\n matched = any([l for l in p.cmdline() if any([pn for pn in monitored_proc_names if re.match(r'.*{}.*'.format(pn), l, re.M | re.I)])])\n if matched:\n k = ' '.join(p.cmdline())\n print('Add monitored proc:', k)\n stats[k] = {'cpu_samples': defaultdict(list), 'min': defaultdict(lambda: None), 'max': defaultdict(lambda: None),\n 'avg': defaultdict(lambda: 0.0), 'last_cpu_times': None, 'last_sys_time': None}\n stats[k]['last_sys_time'] = timer()\n stats[k]['last_cpu_times'] = p.cpu_times()\n monitored_procs.append(p)\n i = 0\n interval_int = int(PRINT_INTERVAL / SLEEP_INTERVAL)\n while True:\n for p in monitored_procs:\n k = ' '.join(p.cmdline())\n cur_sys_time = timer()\n cur_cpu_times = p.cpu_times()\n cpu_times = np.subtract(cur_cpu_times, stats[k]['last_cpu_times']) / (cur_sys_time - stats[k]['last_sys_time'])\n stats[k]['last_sys_time'] = cur_sys_time\n stats[k]['last_cpu_times'] = cur_cpu_times\n cpu_percent = 0\n for num, name in enumerate(cpu_time_names):\n stats[k]['cpu_samples'][name].append(cpu_times[num])\n cpu_percent += cpu_times[num]\n stats[k]['cpu_samples']['total'].append(cpu_percent)\n time.sleep(SLEEP_INTERVAL)\n i += 1\n if i % interval_int == 0:\n l = []\n for k, stat in stats.items():\n if len(stat['cpu_samples']) <= 0:\n continue\n for name, samples in stat['cpu_samples'].items():\n samples = np.array(samples)\n avg = samples.mean()\n c = samples.size\n min_cpu = np.amin(samples)\n max_cpu = np.amax(samples)\n if stat['min'][name] is None or min_cpu < stat['min'][name]:\n stat['min'][name] = min_cpu\n if stat['max'][name] is None or max_cpu > stat['max'][name]:\n stat['max'][name] = max_cpu\n stat['avg'][name] = (stat['avg'][name] * (i - c) + avg * c) / (i)\n stat['cpu_samples'][name] = []\n\n msg = 'avg: {1:.2%}, min: {2:.2%}, max: {3:.2%} {0}'.format(os.path.basename(k), stat['avg']['total'], stat['min']['total'], stat['max']['total'])\n if args.detailed_times:\n for stat_type in ['avg', 'min', 'max']:\n msg += '\\n {}: {}'.format(stat_type, [name + ':' + str(round(stat[stat_type][name]*100, 2)) for name in cpu_time_names])\n l.append((os.path.basename(k), stat['avg']['total'], msg))\n l.sort(key=lambda x: -x[1])\n for x in l:\n print(x[2])\n print('avg sum: {0:.2%} over {1} samples {2} seconds\\n'.format(\n sum([stat['avg']['total'] for k, stat in stats.items()]), i, i * SLEEP_INTERVAL\n ))\n" ]
[ [ "numpy.mean" ], [ "numpy.amin", "numpy.amax", "numpy.array", "numpy.subtract" ] ]
christophershultz/spatial_ag
[ "1c56e2e5fbc15a4f56d6d7bb94fab6a796d07dbf" ]
[ "usda_data/joinUSDA.py" ]
[ "import pandas as pd\nimport numpy as np\nimport os, pdb, sys\n\ndef netIncome(): \n df = pd.read_csv('usda_data/net_income.csv')\n df = df[df['Year'] == 2017].reset_index().drop(['index'], axis = 1)\n df = df[['Year', 'State', 'State ANSI', 'County', 'County ANSI', 'Zip Code', 'Value']]\n df.columns = ['yr', 'st', 'st_ansi', 'cty', 'cty_ansi', 'zip', 'netinc']\n df['st_cty_yr'] = [df['st'][i] + '_' + df['cty'][i] + '_' + str(df['yr'][i]) for i in range(len(df))]\n print(str(len(df)))\n return df\n\ndef joinData(df, col):\n new = pd.read_csv('usda_data/' + col + '.csv')\n if col == 'labor': new = new[new['Domain'] == 'TOTAL'].reset_index().drop(['index'], axis = 1)\n new = new[['Year', 'State', 'State ANSI', 'County', 'County ANSI', 'Zip Code', 'Value']]\n new.columns = ['yr', 'st', 'st_ansi', 'cty', 'cty_ansi', 'zip', col]\n new['st_cty_yr'] = [new['st'][i] + '_' + new['cty'][i] + '_' + str(new['yr'][i]) for i in range(len(new))]\n new = new[['st_cty_yr', col]]\n df = pd.merge(df, new, how = 'left', on = 'st_cty_yr')\n print(str(len(df)))\n return df\n\ndef updateFips(df): \n df['st_ansi'] = [str(i) for i in df['st_ansi']]\n df['st_ansi'] = ['0' + i if len(i) == 1 else i for i in df['st_ansi']]\n df['cty_ansi'] = [int(i) if str(i).lower() != 'nan' else 0 for i in df['cty_ansi']]\n df['cty_ansi'] = [str(i) for i in df['cty_ansi']]\n df['cty_ansi'] = ['0'*(3-len(i)) + i if len(i) != 3 else i for i in df['cty_ansi']]\n df['fips'] = [st + '-' + cty for st, cty in zip(df['st_ansi'], df['cty_ansi'])]\n return df\n\ndef main(): \n df = netIncome()\n for column in ['fertilizer', 'fuel', 'labor', 'land', 'machinery', 'tractors', 'trucks']: \n print(\"Joining \" + column)\n df = joinData(df, column)\n df = updateFips(df)\n df.to_csv('usda_data/joined_usda_df.csv', index = None)\n\nmain()" ]
[ [ "pandas.merge", "pandas.read_csv" ] ]
kmedian/potpourri
[ "54f7c517b6de5be82577e35849f67a0ead4410ae", "54f7c517b6de5be82577e35849f67a0ead4410ae" ]
[ "potpourri/simi3.py", "verto/rnd1.py" ]
[ "\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import SGDRegressor\nimport scipy.stats as ss\n\nmodel = Pipeline(steps=[\n ('scl', StandardScaler()),\n ('lin', SGDRegressor(\n # Logistic Regression\n loss = 'squared_loss',\n penalty = 'l1',\n l1_ratio = 1,\n fit_intercept = True,\n # solver settings\n max_iter = 1000,\n tol = 1e-3,\n shuffle = True,\n random_state = 42,\n # adaptive learning\n learning_rate = 'adaptive',\n eta0 = 0.5,\n # early stopping\n early_stopping = True,\n validation_fraction = 0.15,\n n_iter_no_change = 10,\n # other\n warm_start = True,\n average = False, # disable for Lasso!\n ))\n])\n\nhyper = {\n 'lin__alpha': ss.gamma(a=1.2, loc=1e-6, scale=.08), # alpha ~ [1e-6, 1]\n}\n\nmeta = {\n 'id': \"simi3\",\n 'name': 'LinReg Lasso',\n 'descriptions': (\n \"Lasso Regression (L1 penalty), SGD solver, squared loss function.\"),\n 'solver': 'Stochastic Gradient Descent',\n 'active': True,\n 'keywords': [\n 'linear regression', 'univariate regression', 'multiple regression'],\n 'output_num': 'single',\n 'output_scale': 'interval',\n 'output_dtype': 'float',\n 'input_num': 'multi',\n 'input_scale': 'interval',\n 'input_dtype': 'float'\n}\n", "from sklearn.base import BaseEstimator, TransformerMixin\nimport numpy as np\n\n\nclass RandomFeature(BaseEstimator, TransformerMixin):\n def __init__(self, n_cols=1):\n self.n_cols = n_cols\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n n_rows = len(X)\n return np.random.standard_normal((n_rows, self.n_cols))\n\n\ntrans = RandomFeature()\n\nmeta = {\n 'id': 'rnd1',\n 'name': 'Random Feature',\n 'description': (\n \"Generate 1 or more random standard normal variable(s). \"\n \"Use it in pre-analysing feature importances.\"),\n 'keywords': ['standard_normal'],\n 'feature_names_prefix': 'rnd_std'\n}\n" ]
[ [ "sklearn.linear_model.SGDRegressor", "sklearn.preprocessing.StandardScaler", "scipy.stats.gamma" ], [ "numpy.random.standard_normal" ] ]
christianb93/MachineLearning
[ "30d3b182d33f19b210aa393208236e626eaf5f6a" ]
[ "RBM/Base.py" ]
[ "#####################################################\n#\n# Base class for restricted Boltzmann machines\n#\n#\n# Copyright (c) 2018 christianb93\n# Permission is hereby granted, free of charge, to \n# any person obtaining a copy of this software and \n# associated documentation files (the \"Software\"), \n# to deal in the Software without restriction, \n# including without limitation the rights to use, \n# copy, modify, merge, publish, distribute, \n# sublicense, and/or sell copies of the Software, \n# and to permit persons to whom the Software is \n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice \n# shall be included in all copies or substantial \n# portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY \n# OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT \n# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, \n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. \n# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS \n# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, \n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, \n# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE \n# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#####################################################\n\nimport numpy as np\nfrom scipy.special import expit\n\n\nclass BaseRBM:\n \n \n #\n # Call this after the training has completed\n #\n def postTraining(self):\n pass\n \n #\n # Run one step in a Gibbs sampling Markov chain. \n # We sample the hidden units from the visible\n # units V and the visible units V' from the\n # hidden units. V' is returned\n # \n def runGibbsStep(self, V, size = 1):\n #\n # Sample hidden units from visible units\n # \n E = expit(self.beta*(np.matmul(V.astype(int), self.W) + self.c), dtype=self.np_type)\n U = np.random.random_sample(size=(size, self.hidden)).astype(self.np_type)\n H = (U <= E).astype(int)\n #\n # and now sample visible units from hidden units\n #\n P = expit(self.beta*(np.matmul(H, np.transpose(self.W)) + self.b), dtype=self.np_type)\n U = np.random.random_sample(size=(size, self.visible)).astype(self.np_type)\n return (U <= P).astype(int), E\n\n #\n # Sample from the learned distribution, starting with a \n # random value\n #\n def sample(self, iterations = 100, size = 1):\n return self.sampleFrom(np.random.randint(low=0, high=2, size=(size,self.visible)), iterations = iterations, size = size)\n\n #\n # Sample from the learned distribution, starting at some\n # initial value\n #\n def sampleFrom(self, initial, iterations = 100, size = 1):\n V = initial.astype(int)\n for i in range(iterations):\n V, _ = self.runGibbsStep(V, size = size)\n if (iterations > 1000):\n if 0 == i % 1000:\n print(\"Sampling iteration \", i)\n return V\n \n \n #\n # Visualize the weights\n #\n def showWeights(self, fig, cols, rows, x_pix, y_pix):\n for r in range(rows):\n for c in range(cols):\n j = r*cols + c\n #\n # We display the weigths connected to hidden unit j\n #\n w = self.W[:,j]\n #\n # Normalize\n #\n min = np.min(w)\n w = w + min\n max = np.max(w)\n w = w / max\n ax = fig.add_subplot(rows, cols, j+1)\n ax.imshow(w.reshape(x_pix, y_pix), \"Greys\")\n ax.set_yticks([],[])\n ax.set_xticks([],[])\n \n #\n # Retrieve the weights and parameters\n #\n def getParameters(self):\n params = {}\n params['W'] = self.W\n params['b'] = self.b\n params['c'] = self.c\n return params\n \n #\n # Set parameter\n #\n def setParameters(self, params):\n self.W = params['W'].astype(self.np_type)\n self.b = params['b'].astype(self.np_type)\n self.c = params['c'].astype(self.np_type)\n\n" ]
[ [ "numpy.min", "numpy.random.random_sample", "numpy.max", "numpy.transpose", "numpy.random.randint" ] ]
arpanmangal/Regression
[ "06969286d7db65a537e89ac37905310592542ca9" ]
[ "Q4/read.py" ]
[ "\"\"\"\nModule for reading data from 'q4x.csv' and 'q4y.csv'\n\"\"\"\n\nimport numpy as np\n\ndef loadData (x_file=\"../ass1_data/q4x.dat\", y_file=\"../ass1_data/q4y.dat\"):\n \"\"\"\n Loads the X, Y matrices.\n \"\"\"\n\n X = np.genfromtxt(x_file, delimiter=' ', dtype=int)\n labels = np.genfromtxt(y_file, dtype=str)\n Y = []\n for label in labels:\n if (label == \"Alaska\"):\n Y.append(0)\n else:\n Y.append(1)\n\n return (X, Y)\n" ]
[ [ "numpy.genfromtxt" ] ]
mfkoerner/icarus
[ "eb480596be127f760d10531d27569290df3e8ff9" ]
[ "photons.py" ]
[ "########################################\n# written for Python 3 #\n# by Doug Fabini ([email protected]) #\n########################################\n\n'''\n\n This script requires the following files to be located in 'baseDir':\n - IBZKPT (to extract number of k points) POSSIBLY NO LONGER NEEDED\n - DOSCAR (to extract bandgap)\n - OUTCAR (to extract dielectric properties and energy resolution)\n\nCurrently only handles an isotropic equivalent for the dielectric / absorption tensors.\n\n'''\n\n\n\n# import packages, apply stylesheet\nimport config\nimport os\nfrom electrons import np, plt, getTotalDOS, bandgap\n\n\n\n# ****************** #\n# DATA I/O FUNCTIONS #\n# ****************** #\n\ndef getNkPts(bd):\n\t''' Parse OUTCAR for number of k-points '''\n\tfname = os.path.join(bd, 'OUTCAR')\n\t# print(fname) #debug line\n\twith open(fname, 'r') as f:\n\t\tfor line in f:\n\t\t\tif 'irreducible k-points' in line:\n\t\t\t\t# print(line) #debug line\n\t\t\t\treturn int(line.split()[1])\n\t\t\t\tbreak\n\ndef getDielectric(bd, anisotropic=False):\n\t''' Parse OUTCAR for dielectric properties, convert to appropriate form '''\n\tfname = os.path.join(bd, 'OUTCAR')\n\twith open(fname, 'r') as f:\n\t\traw = []\n\t\tlnImag, lnReal = 0, 0\n\t\tfor i, line in enumerate(f):\n\t\t\traw.append(line)\n\t\t\tif 'NEDOS' in line: \t\t\t\t\t#This an below find number points per section and start of lines for sections\n\t\t\t\tNEDOS = int(line.split()[5])\n\t\t\tif 'IMAGINARY DIELECTRIC' in line and lnImag is 0:\t#Selecting the first set of Dielectric numbers from VASP\n\t\t\t\tlnImag = i\n\t\t\tif 'REAL DIELECTRIC' in line and lnReal is 0:\n\t\t\t\tlnReal = i\n\tEepsRe, EepsIm = [], []\n\tfor i in range(lnImag+3,lnImag+NEDOS+3):\t\t#All of the imaginary dielectric components (NEDOS components and start point of lnImag+3)\n\t\tif len(raw[i]) < 5:\t\t\t\t\t\t\t#Checking for early termination of DIELECTRIC DATA (printing to output)\n\t\t\tprint('DIELECTRIC DATA TERMINATED AT ONLY {} POINTS'.format(i-lnImag-3))\n\t\t\tbreak\n\t\tEepsIm.append([float(ri) for ri in raw[i].strip('\\n').split()])\t#Energy (frequency) then X,Y,Z,XY,YZ,ZX for imaginary component\n\tE = np.array(EepsIm)[:,0]\t\t\t\t\t\t#Energies pulled from first part of EepsIm\n\tfor i in range(lnReal+3,lnReal+NEDOS+3):\n\t\tif len(raw[i]) < 5:\n\t\t\t# print('DIELECTRIC DATA TERMINATED AT ONLY {} POINTS'.format(i-lnReal-3))\n\t\t\tbreak\n\t\tEepsRe.append([float(ri) for ri in raw[i].strip('\\n').split()])\t#Real part from above\n\tif anisotropic:\n\t\tepsIm = np.array([row[1:] for row in EepsIm])\n\t\tepsRe = np.array([row[1:] for row in EepsRe])\n\telse:\n\t\tepsIm = np.array([isotropic(row[1:]) for row in EepsIm])\t#epsIm is the isotropic equivilent values for each energy\n\t\tepsRe = np.array([isotropic(row[1:]) for row in EepsRe])\t#Real part for epsIm, this time is epsRe\n\treturn E, epsRe + 1j*epsIm \t\t\t\t\t#Returns list of isotropic equivalent values\n\ndef saveResults(bd, E, alpha, eps):\n\t''' Store absorption coefficient and dielectric function '''\n\tout = np.hstack((E, alpha, eps.real, eps.imag))\n\tout = np.reshape(out, (-1, 4), order='F')\n\tnp.savetxt(os.path.join(bd, 'optical.csv'), out, header='h*nu (eV), alpha_iso (cm^-1), Re[eps_iso] (eps_0), Im[eps_iso] (eps_0)')\n\ndef getSolarSpectrum():\n\t''' Get direct+diffuse solar irradiance at global tilt, ASTM G173-03 '''\n\td = np.loadtxt('data/ASTMG173.dat')\n\treturn d[:,0], d[:,2]\n\n\n# ****************** #\n# ANALYSIS FUNCTIONS #\n# ****************** #\n\ndef nm2eV(lam):\n\t''' Convert wavelength in nm to energy in eV '''\n\th = 4.136e-15 # Planck constant, eV / s\n\tc = 2.998e8 # speed of light, m / s\n\treturn h*c/(lam*1e-9)\n\ndef eV2nm(hnu):\n\t''' Convert energy in eV to wavelength in nm '''\n\th = 4.136e-15 # Planck constant, eV / s\n\tc = 2.998e8 # speed of light, m / s\n\treturn h*c/hnu*1e9\n\ndef isotropic(sixElements):\n\t''' Returns an isotropic equivalent value for a symmetric 3x3 matrix '''\n\txx, yy, zz, xy, yz, zx = sixElements\n\tA = np.array([[xx, xy, zx], [xy, yy, yz], [zx, yz, zz]])\n\teigval, _ = np.linalg.eigh(A)\n\treturn np.mean(eigval)\n\ndef dielec2optical(hnu, eps):\n\t''' Calculate complex refractive index and absorption coefficient from dielectric function '''\n\th = 4.136e-15 # Planck constant, eV / s\n\tc = 2.998e8 # speed of light, m / s\n\tN = np.sqrt(eps)\n\talpha = 4*np.pi/(h*c)*hnu*N.imag/100 # divisor of 100 takes from m-1 to cm-1\n\treturn N, alpha\n\ndef FOM(hnu, alpha, Eg):\n\n\txx = np.linspace(100, eV2nm(Eg), int(1e4)) \t\t\t\t\t\t\t#proper range of light to think about (100 nm [13eV] to band gap wavelength)\n\txSun, ySun = getSolarSpectrum() \t\t\t\t\t\t\t\t\t\t#xSun -> wavelength of sun, ySun -> intensity of sun\n\tyySun = np.interp(xx, xSun, ySun) \t\t\t\t\t\t\t\t\t\t#ySun calculated at the points for xx (so that we have the right resolution)\n\tyyMat = np.interp(xx, np.flipud(eV2nm(hnu[1:])), np.flipud(alpha[1:])) \t#absorption as a function of wavelength\n\tfrom scipy.integrate import cumtrapz \t\t\t\t\t\t\t\t\t#Trapezoidal numeric integration\n\treturn xx, yySun, yyMat, cumtrapz(yySun*yyMat, xx) \t\t\t\t\t\t#FOM is the last value, which is integral of sum intensity time absorption along wavel\n\n\n\n# ****************** #\n# PLOTTING FUNCTIONS #\n# ****************** #\n\ndef plotDielectric(ax, E, eps, N, El=(0, 10)):\n\t''' Plot complex dielectric function and complex refractive index '''\n\tax.plot(E, eps.real, label='$\\\\epsilon_r\\\\prime$')\n\tax.plot(E, eps.imag, label='$\\\\epsilon_r\\\\prime\\\\prime$')\n\tax.plot(E, N.real, label='$n$')\n\tax.plot(E, N.imag, label='$k$')\n\tax.set_xlim(El)\n\tax.set_xlabel('$h\\\\nu$ (eV)')\n\tax.legend()\n\ndef plotAbsorption(ax, hnu, alpha, xl=(0, 4), yl=(1e2, 1e7), rel2eg=None, lbl=None, wavelength=False):\n\t''' Plot absorption coefficient '''\n\tif wavelength:\n\t\tif rel2eg is not None:\n\t\t\traise Exception('Relative to gap option not available when plotting by wavelength')\n\t\tlh, = ax.semilogy(eV2nm(hnu), alpha, '.-', label=lbl)\n\t\tax.set_xlabel('$\\\\lambda$ (nm)')\n\telif not wavelength and rel2eg is None:\n\t\tlh, = ax.semilogy(hnu, alpha, '.-', label=lbl)\n\t\tax.set_xlabel('$h\\\\nu$ (eV)')\n\telse:\n\t\tlh, = ax.semilogy(hnu-rel2eg, alpha, '.-', label=lbl)\n\t\tax.set_xlabel('$h\\\\nu-E_g$ (eV)')\n\tax.set_xlim(xl)\n\tax.set_ylim(yl)\n\tax.set_ylabel('$\\\\alpha$ (cm$^{-1}$)')\n\treturn lh\n\n\n\n# ********** #\n# HIGH LEVEL #\n# ********** #\n\ndef optical(bd, save=False):\n\t''' DESCRIPTION GOES HERE '''\n\tNk = getNkPts(bd) \t\t\t\t\t\t\t#Gets number of irreducible kpoints but never uses it :O\n\tE, eps = getDielectric(bd) \t\t\t\t#Gets lists of E and equivilent eigenvalues (real + i*imag) for dialectric function\n\tN, alpha = dielec2optical(E, eps)\t\t\t#N (dielectric constant) and alpha (absorption coefficient) from dielectric equivilent eigenvalues\n\n\tEdos, tdos = getTotalDOS(bd)\t\t\t\t#arrays of len NEDOS with energy and DOS at that energy\n\tEg = bandgap(Edos, tdos)\t\t\t\t\t#Calculates bandgap from DOS data\n\n\tif save:\n\t\tsaveResults(bd, E, alpha, eps)\t\t\t\t#Saves Energy, absorption, eigenvalue to basedir/optical.csv\n\treturn E, alpha, eps, N, Eg \t\t\t\t#Returns Energy, absorption, eigenvalue, refractive index, bandgap\n" ]
[ [ "scipy.integrate.cumtrapz" ] ]
nirandaperera/pipedream
[ "bc05a4e8ce150f681ba6066805604873a3a7cf97", "bc05a4e8ce150f681ba6066805604873a3a7cf97" ]
[ "runtime/runtime.py", "runtime/image_classification/models/8/resnet101/gpus=4_straight/stage3.py" ]
[ "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport collections\nimport itertools\nimport time\nimport torch\nimport torch.distributed as dist\n\nimport communication\nimport runtime_utilities\n\nIMAGE_CLASSIFICATION = \"image_classification\"\nTRANSLATION = \"translation\"\nSPEECH_TO_TEXT = \"speech_to_text\"\n\n\nclass ModulesWithDependencies:\n def __init__(self, modules_with_dependencies):\n self._modules = []\n self._all_input_names = []\n self._all_output_names = []\n for (module, input_names, output_names) in modules_with_dependencies:\n self._modules.append(module)\n self._all_input_names.append(input_names)\n self._all_output_names.append(output_names)\n\n def modules(self):\n return self._modules\n\n def all_input_names(self):\n return self._all_input_names\n\n def all_output_names(self):\n return self._all_output_names\n\n def is_input_tensor(self, tensor_name):\n for module_input_names in self._all_input_names:\n if tensor_name in module_input_names:\n return True\n return False\n\n\nclass StageRuntime:\n def __init__(self, model, distributed_backend, fp16, loss_scale,\n training_tensor_shapes, eval_tensor_shapes,\n training_tensor_dtypes, inputs_module_destinations,\n target_tensor_names, configuration_maps, master_addr,\n rank, local_rank, num_ranks_in_server, verbose_freq,\n model_type, enable_recompute=False):\n # Metadata needed for forward and backward pass within this stage.\n self.tensors = []\n self.gradients = {}\n self.distributed_backend = distributed_backend\n self.fp16 = fp16\n self.loss_scale = loss_scale\n self.training_tensor_shapes = training_tensor_shapes\n self.eval_tensor_shapes = eval_tensor_shapes\n self.training_tensor_dtypes = training_tensor_dtypes\n self.model_type = model_type\n self.target_tensor_names = target_tensor_names\n\n self.initialize(model, inputs_module_destinations, configuration_maps,\n master_addr, rank, local_rank, num_ranks_in_server)\n\n self.verbose_freq = verbose_freq\n self.forward_only = False\n\n self.forward_stats = runtime_utilities.RuntimeStats(forward=True)\n self.backward_stats = runtime_utilities.RuntimeStats(forward=False)\n\n # Enable recomputation to prevent the need to save activations\n # computed from the forward pass for the backward pass.\n self.enable_recompute = enable_recompute\n\n # Disable recomputation for the last stage.\n if rank == num_ranks_in_server - 1:\n self.enable_recompute = False\n\n def initialize(self, model, inputs_module_destinations,\n configuration_maps, master_addr, rank,\n local_rank, num_ranks_in_server):\n self.send_ranks = {}\n self.receive_ranks = {}\n self.rank = rank\n self.local_rank = local_rank\n self.stage = None\n self.tensor_tags = {}\n self.forward_minibatch_id = 0\n self.backward_minibatch_id = 0\n self.criterion_input_name = str(model[-1][1][0])\n\n tensor_tag = 1\n for (_, input_tensors, output_tensors) in model:\n for input_tensor in input_tensors:\n if input_tensor not in self.tensor_tags:\n self.tensor_tags[input_tensor] = tensor_tag\n tensor_tag += 1\n for output_tensor in output_tensors:\n if output_tensor not in self.tensor_tags:\n self.tensor_tags[output_tensor] = tensor_tag\n tensor_tag += 1\n for target_tensor_name in sorted(self.target_tensor_names):\n self.tensor_tags[target_tensor_name] = tensor_tag\n tensor_tag += 1\n self.tensor_tags[\"ack\"] = tensor_tag\n tensor_tag += 1\n\n module_to_stage_map = configuration_maps['module_to_stage_map']\n stage_to_rank_map = configuration_maps['stage_to_rank_map']\n stage_to_depth_map = configuration_maps['stage_to_depth_map']\n\n if module_to_stage_map is None:\n # If IP addresses not specified, resort to all layers on\n # single machine.\n assert self.rank is None\n self.modules_with_dependencies = ModulesWithDependencies(model)\n self.is_criterion = True\n self.rank_in_stage = 0\n self.num_ranks = 1\n self.num_ranks_in_first_stage = 1\n self.num_ranks_in_previous_stage = 0\n self.num_ranks_in_next_stage = 0\n self.num_stages = 1\n self.num_ranks_in_stage = 1\n self.num_warmup_minibatches = 0\n self.comm_handler = None\n else:\n assert len(module_to_stage_map) == len(model)\n assert self.rank is not None\n\n stage_to_module_map = collections.defaultdict(list)\n for module in range(len(module_to_stage_map)):\n stage_to_module_map[module_to_stage_map[module]].append(module)\n\n rank_to_stage_map = {}\n for stage in stage_to_rank_map:\n for rank in stage_to_rank_map[stage]:\n rank_to_stage_map[rank] = stage\n\n # Now, use this mapping to determine the modules contained in\n # each stage.\n assert 0 <= self.rank < len(rank_to_stage_map)\n self.num_ranks = len(rank_to_stage_map)\n self.num_stages = len(stage_to_module_map)\n self.stage = rank_to_stage_map[self.rank]\n self.rank_in_stage = stage_to_rank_map[self.stage].index(self.rank)\n self.num_ranks_in_stage = len(stage_to_rank_map[self.stage])\n self.num_ranks_in_first_stage = len(stage_to_rank_map[0])\n self.num_ranks_in_previous_stage = 0\n self.ranks_in_previous_stage = []\n if self.stage > 0:\n self.num_ranks_in_previous_stage = len(\n stage_to_rank_map[self.stage - 1])\n self.ranks_in_previous_stage = stage_to_rank_map[self.stage - 1]\n self.num_ranks_in_next_stage = 0\n self.ranks_in_next_stage = []\n if self.stage < self.num_stages - 1:\n self.num_ranks_in_next_stage = len(\n stage_to_rank_map[self.stage + 1])\n self.ranks_in_next_stage = stage_to_rank_map[self.stage + 1]\n modules = stage_to_module_map[self.stage]\n self.modules_with_dependencies = ModulesWithDependencies(\n [model[module] for module in modules])\n self.is_criterion = self.stage == (self.num_stages - 1)\n if stage_to_depth_map is not None:\n self.num_warmup_minibatches = stage_to_depth_map[\n str(self.stage)]\n else:\n self.num_warmup_minibatches = self.num_ranks - 1\n for i in range(self.stage):\n self.num_warmup_minibatches -= len(\n stage_to_rank_map[i])\n self.num_warmup_minibatches = self.num_warmup_minibatches // \\\n self.num_ranks_in_stage\n\n # To determine where tensors should be sent and received, first\n # determine the \"producing\" and \"consuming\" module IDs of each\n # tensor. We then use the corresponding machine ranks to send\n # and receive tensors.\n master_port = 12345\n self.comm_handler = communication.CommunicationHandler(\n master_addr=master_addr,\n master_port=master_port,\n rank=self.rank,\n local_rank=self.local_rank,\n num_ranks_in_server=num_ranks_in_server,\n world_size=self.num_ranks,\n fp16=self.fp16,\n backend=self.distributed_backend)\n\n for i in range(len(model)):\n for j in range(i + 1, len(model)):\n for tensor_name in model[i][2]:\n if tensor_name in model[j][1]:\n if module_to_stage_map[i] == \\\n module_to_stage_map[j]:\n continue\n # For now, assume that each stage is served by only\n # a single machine.\n if module_to_stage_map[j] == self.stage:\n self.receive_ranks[tensor_name] = \\\n stage_to_rank_map[module_to_stage_map[i]]\n if module_to_stage_map[i] == self.stage:\n self.send_ranks[tensor_name] = \\\n stage_to_rank_map[module_to_stage_map[j]]\n\n for model_inputs in inputs_module_destinations.keys():\n destination_stage = module_to_stage_map[\n inputs_module_destinations[model_inputs]]\n if destination_stage > self.stage:\n self.send_ranks[model_inputs] = \\\n self.ranks_in_next_stage\n\n if 0 < self.stage <= destination_stage:\n self.receive_ranks[model_inputs] = \\\n self.ranks_in_previous_stage\n\n if destination_stage > 0:\n if model_inputs not in self.tensor_tags:\n self.tensor_tags[model_inputs] = tensor_tag\n tensor_tag += 1\n\n modules = self.modules_with_dependencies.modules()\n for i in range(len(modules)):\n modules[i] = modules[i].cuda()\n if self.fp16:\n import apex.fp16_utils as fp16_utils\n modules[i] = fp16_utils.BN_convert_float(modules[i].half())\n\n # Initialize all groups in the same order on every worker.\n if stage_to_rank_map is not None:\n groups = []\n for stage in range(self.num_stages):\n ranks = stage_to_rank_map[stage]\n if len(ranks) > 1:\n groups.append(dist.new_group(ranks=ranks))\n else:\n groups.append(None)\n group = groups[self.stage]\n else:\n group = None\n\n # self.modules_with_dependencies contains a list of PyTorch\n # modules, along with a list of user-defined input and output\n # tensor names. We use our module_executor.ModuleExecutor\n # class to wrap these dependencies, and use run_forward and\n # run_backward methods downstream.\n num_parameters = 0\n for i in range(len(modules)):\n if group is not None:\n if ((i < (len(modules) - 1) and self.is_criterion)\n or not self.is_criterion):\n num_parameters += \\\n sum(x.size()[0] * x.size()[1]\n if len(x.size()) > 1 else x.size()[0]\n for x in modules[i].parameters() if x.size())\n modules[i] = torch.nn.parallel.DistributedDataParallel(\n modules[i],\n process_group=group,\n device_ids=[local_rank],\n output_device=local_rank)\n if self.num_ranks_in_stage > 1:\n module_size = 4. * num_parameters\n print(\"Replicating stage: ranks=%d, module_size=%.3f\" % (\n self.num_ranks_in_stage, module_size))\n\n if self.fp16:\n self.master_parameters = []\n self.model_parameters = []\n for i in range(len(modules)):\n import apex.fp16_utils as fp16_utils\n module_parameters, module_master_parameters = \\\n fp16_utils.prep_param_lists(modules[i])\n self.master_parameters.extend(module_master_parameters)\n self.model_parameters.extend(module_parameters)\n else:\n self.master_parameters = list(self.parameters())\n self.model_parameters = None\n\n if self.comm_handler is not None:\n self.comm_handler.initialize(\n self.receive_ranks,\n self.send_ranks,\n self.tensor_tags,\n self.target_tensor_names,\n self.training_tensor_dtypes,\n self.rank_in_stage,\n self.num_ranks_in_stage,\n self.ranks_in_previous_stage,\n self.ranks_in_next_stage)\n\n @property\n def target(self):\n return self.tensors[-1][\"target\"]\n\n def modules(self):\n return self.modules_with_dependencies.modules()\n\n def parameters(self):\n parameter_iterators = []\n for module in self.modules_with_dependencies.modules():\n parameter_iterators.append(module.parameters())\n return itertools.chain(*parameter_iterators)\n\n def state_dict(self):\n state_dict = collections.OrderedDict()\n for i, module in enumerate(self.modules_with_dependencies.modules()):\n state_dict[\"module%d\" % i] = module.state_dict()\n if self.fp16:\n state_dict[\"master_parameters\"] = self.master_parameters\n return state_dict\n\n def load_state_dict(self, state_dict):\n for i, module in enumerate(self.modules_with_dependencies.modules()):\n module.load_state_dict(state_dict[\"module%d\" % i])\n if self.fp16:\n saved_master_parameters = state_dict[\"master_parameters\"]\n for master_parameter, saved_master_parameter in zip(\n self.master_parameters, saved_master_parameters):\n master_parameter.data.copy_(saved_master_parameter.data)\n\n def cuda(self):\n modules = self.modules_with_dependencies.modules()\n for i in range(len(modules)):\n modules[i] = modules[i].cuda()\n\n def zero_grad(self):\n modules = self.modules_with_dependencies.modules()\n for i in range(len(modules)):\n modules[i].zero_grad()\n\n def train(self, num_iterations):\n self.tensors = []\n self.gradients = {}\n self.tensor_shapes = self.training_tensor_shapes\n self.forward_only = False\n\n self.forward_minibatch_id = 0\n self.backward_minibatch_id = 0\n\n if self.comm_handler is not None:\n self.comm_handler.set_tensor_shapes(self.tensor_shapes)\n self.comm_handler.start_helper_threads(\n num_iterations, forward_only=False)\n\n modules = self.modules_with_dependencies.modules()\n for i in range(len(modules)):\n modules[i].train()\n\n def eval(self, num_iterations):\n self.tensors = []\n self.gradients = {}\n self.tensor_shapes = self.eval_tensor_shapes\n self.tensor_shapes[\"ack\"] = (1,)\n self.forward_only = True\n\n self.forward_minibatch_id = 0\n self.backward_minibatch_id = 0\n\n if self.comm_handler is not None:\n self.comm_handler.set_tensor_shapes(self.tensor_shapes)\n self.comm_handler.start_helper_threads(\n num_iterations, forward_only=True)\n\n modules = self.modules_with_dependencies.modules()\n for i in range(len(modules)):\n modules[i].eval()\n\n def set_loader(self, loader):\n if loader is not None:\n self.loader_iter = iter(loader)\n else:\n self.loader_iter = None\n\n def receive_tensors_forward(self):\n if self.forward_only and len(self.tensors) > 0:\n self.tensors.pop(0)\n self.tensors.append({})\n if self.loader_iter is not None:\n # print(f\"### rcvt0 {time.time()}\")\n input = next(self.loader_iter)\n # print(f\"### rcvt1 {time.time()}\")\n if self.model_type == TRANSLATION:\n (input, target) = input\n src, src_length = input\n tgt, tgt_length = target\n\n self.tensors[-1][\"input0\"] = src.cuda(non_blocking=True)\n self.tensors[-1][\"input1\"] = torch.LongTensor(src_length).cuda(\n non_blocking=True)\n self.tensors[-1][\"input2\"] = tgt[:-1].cuda(non_blocking=True)\n self.tensors[-1][\"target\"] = tgt[1:].cuda().contiguous().view(-1)\n self.tensors[-1][\"target_length\"] = \\\n torch.tensor([int(sum(torch.LongTensor(tgt_length) - 1))],\n dtype=torch.int).cuda()\n elif self.model_type == IMAGE_CLASSIFICATION:\n (input, target) = input\n if self.fp16:\n input = input.half()\n self.tensors[-1][\"input0\"] = input.cuda(non_blocking=True)\n self.tensors[-1][\"target\"] = target.cuda(non_blocking=True)\n elif self.model_type == SPEECH_TO_TEXT:\n input, target, input_percentages, target_sizes = input\n input_sizes = input_percentages.mul_(int(input.size(3))).int()\n self.tensors[-1][\"input0\"] = input.cuda(non_blocking=True)\n self.tensors[-1][\"input1\"] = input_sizes.cuda(non_blocking=True)\n self.tensors[-1][\"target\"] = target.cuda(non_blocking=True)\n self.tensors[-1][\"target_length\"] = target_sizes.cuda(\n non_blocking=True)\n # print(f\"### rcv2 {time.time()}\")\n else:\n # Receive all required tensors from upstream machines.\n for input_name in self.receive_ranks:\n if input_name == \"ack\":\n continue\n\n self.tensors[-1][input_name] = \\\n self.comm_handler.recv(\n input_name,\n forward_minibatch_id=self.forward_minibatch_id,\n backward_minibatch_id=self.backward_minibatch_id,\n backward=False)\n\n self.forward_stats.stats['receive_tensors_size'] += \\\n (self.tensors[-1][input_name].element_size() *\n self.tensors[-1][input_name].nelement())\n\n # Used to track where to receive forward from.\n self.comm_handler.increment_messaging_index(\n sending=False)\n\n def send_tensors_forward(self):\n # Send all required tensors downstream.\n for output_name in self.send_ranks:\n if output_name == \"ack\":\n continue\n\n self.comm_handler.send(\n output_name,\n self.tensors[-1][output_name],\n forward_minibatch_id=self.forward_minibatch_id,\n backward_minibatch_id=self.backward_minibatch_id,\n backward=False)\n\n self.forward_stats.stats['send_tensors_size'] += \\\n (self.tensors[-1][output_name].element_size() *\n self.tensors[-1][output_name].nelement())\n\n def receive_tensors_backward(self):\n # Receive all required gradients from downstream\n # machines.\n for output_name in self.send_ranks:\n if output_name in self.target_tensor_names:\n continue\n\n self.gradients[output_name] = \\\n self.comm_handler.recv(\n output_name,\n forward_minibatch_id=self.forward_minibatch_id,\n backward_minibatch_id=self.backward_minibatch_id,\n backward=True)\n\n self.backward_stats.stats['receive_tensors_size'] += \\\n (self.gradients[output_name].element_size() *\n self.gradients[output_name].nelement())\n\n def send_tensors_backward(self):\n # Send all required gradients upstream.\n for input_name in self.receive_ranks:\n if input_name in self.target_tensor_names:\n continue\n\n self.comm_handler.send(\n input_name,\n self.gradients[input_name],\n forward_minibatch_id=self.forward_minibatch_id,\n backward_minibatch_id=self.backward_minibatch_id,\n backward=True)\n\n self.backward_stats.stats['send_tensors_size'] += \\\n (self.gradients[input_name].element_size() *\n self.gradients[input_name].nelement())\n\n if self.num_ranks_in_previous_stage > 0:\n # Used to track where to send tensors in the\n # backward pass.\n self.comm_handler.increment_messaging_index(\n sending=True)\n\n def run_forward(self, recompute_step=False):\n \"\"\"Run forward pass.\n \"\"\"\n # Receive tensors from previous worker.\n ts1 = time.time()\n self.receive_tensors_forward()\n tensors = self.tensors[-1]\n ts2 = time.time()\n\n # Run forward pass.\n self._run_forward(tensors)\n ts3 = time.time()\n\n # Send tensors forward.\n self.send_tensors_forward()\n ts4 = time.time()\n\n if self.verbose_freq > 0 and self.forward_minibatch_id % self.verbose_freq == 0:\n self.forward_stats.print_stats()\n # print(f\"### fwd_rcvd {self.rank} {epoch} {self.forward_minibatch_id} {ts1:.3f} {ts2:.3f} {ts3:.3f} {ts4:.3f}\")\n # print(f\"### fwd_comp r:{self.rank} e:{epoch} b:{self.forward_minibatch_id}/{num_batches} ts: {ts2 - ts1:.3f}\")\n # print(f\"### fwd_snd_q r:{self.rank} e:{epoch} b:{self.forward_minibatch_id}/{num_batches} ts: {ts2:.3f}\")\n\n self.forward_stats.reset_stats()\n self.forward_minibatch_id += 1\n\n return ts1, ts2, ts3, ts4\n\n def _run_forward(self, tensors):\n # Perform forward pass through model (self.modules_with_dependencies already\n # has modules in topological order).\n modules = self.modules_with_dependencies.modules()\n all_input_names = self.modules_with_dependencies.all_input_names()\n all_output_names = self.modules_with_dependencies.all_output_names()\n for i, (module, input_names, output_names) in \\\n enumerate(zip(modules, all_input_names, all_output_names)):\n if i == (len(modules) - 1) and self.is_criterion:\n # If layer is criterion (loss).\n if self.model_type == SPEECH_TO_TEXT:\n output = tensors[\"output\"].transpose(0, 1).float()\n output_sizes = tensors[\"output_sizes\"].cpu()\n target = tensors[\"target\"].cpu()\n target_sizes = tensors[\"target_length\"].cpu()\n input0_size = tensors[\"input0_size\"].cpu()\n module_outputs = [module(output, target, output_sizes, target_sizes) / input0_size[0]]\n else:\n module_outputs = [module(tensors[input_name],\n tensors[\"target\"])\n for input_name in input_names]\n module_outputs = [sum(module_outputs)]\n else:\n # If layer is non-criterion.\n module_outputs = module(*[tensors[input_name]\n for input_name in input_names])\n if not isinstance(module_outputs, tuple):\n module_outputs = (module_outputs,)\n module_outputs = list(module_outputs)\n\n for (output_name, module_output) in zip(output_names, module_outputs):\n tensors[output_name] = module_output\n\n self.output = tensors[input_names[0]]\n if self.is_criterion and self.model_type == TRANSLATION:\n loss_per_batch = tensors[output_names[0]] * tensors[self.criterion_input_name].size(1)\n loss_per_token = loss_per_batch / tensors[\"target_length\"][0].item()\n self.loss = loss_per_token\n elif self.is_criterion:\n self.loss = tensors[output_names[0]]\n else:\n self.loss = 1\n\n def run_backward(self):\n # Receive input gradients needed for backward pass.\n ts1 = time.time()\n self.receive_tensors_backward()\n ts2 = time.time()\n # Backward pass through modules in reverse order.\n inputs = {}\n outputs = {}\n input_gradients = {}\n output_gradients = {}\n\n # Get input and output names spanning all modules in this stage.\n all_input_names_set = set()\n all_output_names_set = set()\n\n modules = self.modules_with_dependencies.modules()\n all_input_names = self.modules_with_dependencies.all_input_names()\n all_output_names = self.modules_with_dependencies.all_output_names()\n\n for (input_names, output_names) in zip(all_input_names, all_output_names):\n for input_name in input_names:\n all_input_names_set.add(input_name)\n for output_name in output_names:\n all_output_names_set.add(output_name)\n\n tensors = self.tensors.pop(0)\n # Set inputs, outputs, and output_gradients.\n # Only set outputs/output_gradients for tensors that are not inputs of\n # other modules in this stage.\n # Similarly, only set inputs for tensors that are not outputs of other\n # modules in this stage.\n for (module, input_names, output_names) in \\\n zip(reversed(modules), reversed(all_input_names), reversed(all_output_names)):\n for output_name in output_names:\n if output_name not in all_input_names_set:\n if output_name not in self.gradients:\n output_gradients[output_name] = None\n else:\n output_gradients[output_name] = self.gradients[output_name]\n if tensors[output_name].requires_grad:\n outputs[output_name] = tensors[output_name]\n for input_name in input_names:\n if input_name not in all_output_names_set:\n inputs[input_name] = tensors[input_name]\n\n # Hook to record input gradients.\n def hook_wrapper(input_name):\n def hook(input_gradient):\n input_gradients[input_name] = input_gradient\n\n return hook\n\n for input_name in inputs:\n if input_name != \"input0\" and input_name != \"input1\" and input_name != \"input2\" \\\n and inputs[input_name].requires_grad:\n inputs[input_name].register_hook(hook_wrapper(input_name))\n\n if \"loss\" in outputs:\n outputs[\"loss\"] *= self.loss_scale\n\n # Perform backward pass.\n torch.autograd.backward(tuple([outputs[output_name] for output_name in outputs]),\n grad_tensors=tuple([output_gradients[output_name]\n for output_name in outputs]))\n\n # Input tensors don't need gradients.\n for input_name in inputs:\n if not inputs[input_name].requires_grad:\n self.gradients[input_name] = inputs[input_name]\n continue\n\n if input_name != \"input0\" and input_name != \"input1\" and input_name != \"input2\" and input_name != \"input\":\n self.gradients[input_name] = input_gradients[input_name]\n\n ts3 = time.time()\n\n # Send output gradients.\n self.send_tensors_backward()\n ts4 = time.time()\n\n if self.verbose_freq > 0 and self.backward_minibatch_id % self.verbose_freq == 0:\n self.backward_stats.print_stats()\n # print(\n # f\"### bwd_rcvd {self.rank} {epoch} {self.backward_minibatch_id} {ts1:.3f} {ts2:.3f} {ts3:.3f} {ts4:.3f}\")\n # print(f\"### bwd_comp r:{self.rank} e:{epoch} b:{self.backward_minibatch_id}/{num_batches} ts: {ts2 - ts1:.3f}\")\n # print(f\"### bwd_snd_q r:{self.rank} e:{epoch} b:{self.backward_minibatch_id}/{num_batches} ts: {ts2:.3f}\")\n\n self.backward_stats.reset_stats()\n self.backward_minibatch_id += 1\n\n return ts1, ts2, ts3, ts4\n\n def num_tokens(self):\n return self.tensors[-1][\"target_length\"][0].item()\n\n def run_ack(self):\n # No need for ack if running on a single worker.\n if self.rank is None:\n return\n\n # Receive ack from next stage. Send ack to previous stage.\n if self.stage < (self.num_stages - 1):\n self.comm_handler.recv(\n \"ack\",\n forward_minibatch_id=self.forward_minibatch_id,\n backward_minibatch_id=self.backward_minibatch_id,\n backward=True)\n if self.stage > 0:\n self.comm_handler.send(\n \"ack\",\n torch.zeros(self.tensor_shapes[\"ack\"],\n dtype=torch.int64).cuda(),\n forward_minibatch_id=self.forward_minibatch_id,\n backward_minibatch_id=self.backward_minibatch_id,\n backward=True)\n\n # Used to track where to receive forward from.\n self.comm_handler.increment_messaging_index(sending=True)\n\n self.backward_minibatch_id += 1\n\n def wait(self):\n if self.comm_handler is not None:\n self.comm_handler.wait()\n\n def num_iterations(self, loader_size):\n \"\"\" Determines number of iterations for this stage\n\n TODO: don't currently support uneven configurations.\n \"\"\"\n if self.stage == 0 or self.stage is None:\n return loader_size\n\n num_iterations = loader_size * self.num_ranks_in_first_stage\n assert num_iterations % self.num_ranks_in_stage == 0\n num_iterations = num_iterations // self.num_ranks_in_stage\n\n return num_iterations\n\n def get_adjusted_learning_rate(self, base_lr):\n if self.stage == 0:\n return base_lr\n\n adjusted_lr = float(base_lr) * float(self.num_ranks_in_stage) \\\n / float(self.num_ranks_in_first_stage)\n\n return adjusted_lr\n", "import torch\n\n\nclass Stage3(torch.nn.Module):\n def __init__(self):\n super(Stage3, self).__init__()\n self.layer2 = torch.nn.ReLU(inplace=True)\n self.layer3 = torch.nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n self.layer4 = torch.nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.layer5 = torch.nn.ReLU(inplace=True)\n self.layer6 = torch.nn.Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n self.layer7 = torch.nn.BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.layer9 = torch.nn.ReLU(inplace=True)\n self.layer10 = torch.nn.Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n self.layer11 = torch.nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.layer12 = torch.nn.ReLU(inplace=True)\n self.layer13 = torch.nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n self.layer14 = torch.nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.layer15 = torch.nn.ReLU(inplace=True)\n self.layer16 = torch.nn.Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n self.layer17 = torch.nn.BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.layer19 = torch.nn.ReLU(inplace=True)\n self.layer20 = torch.nn.Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n self.layer21 = torch.nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.layer22 = torch.nn.ReLU(inplace=True)\n self.layer23 = torch.nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n self.layer24 = torch.nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.layer25 = torch.nn.ReLU(inplace=True)\n self.layer26 = torch.nn.Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n self.layer27 = torch.nn.BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.layer29 = torch.nn.ReLU(inplace=True)\n self.layer30 = torch.nn.Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n self.layer31 = torch.nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.layer32 = torch.nn.ReLU(inplace=True)\n self.layer33 = torch.nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n self.layer34 = torch.nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.layer35 = torch.nn.ReLU(inplace=True)\n self.layer36 = torch.nn.Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)\n self.layer37 = torch.nn.BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.layer38 = torch.nn.Conv2d(1024, 2048, kernel_size=(1, 1), stride=(2, 2), bias=False)\n self.layer39 = torch.nn.BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.layer41 = torch.nn.ReLU(inplace=True)\n self.layer42 = torch.nn.Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n self.layer43 = torch.nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.layer44 = torch.nn.ReLU(inplace=True)\n self.layer45 = torch.nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n self.layer46 = torch.nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.layer47 = torch.nn.ReLU(inplace=True)\n self.layer48 = torch.nn.Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)\n self.layer49 = torch.nn.BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.layer51 = torch.nn.ReLU(inplace=True)\n self.layer52 = torch.nn.Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n self.layer53 = torch.nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.layer54 = torch.nn.ReLU(inplace=True)\n self.layer55 = torch.nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n self.layer56 = torch.nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.layer57 = torch.nn.ReLU(inplace=True)\n self.layer58 = torch.nn.Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)\n self.layer59 = torch.nn.BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.layer61 = torch.nn.ReLU(inplace=True)\n self.layer62 = torch.nn.AvgPool2d(kernel_size=7, stride=1, padding=0)\n self.layer65 = torch.nn.Linear(in_features=2048, out_features=1000, bias=True)\n\n \n\n def forward(self, input1, input0):\n out0 = input0.clone()\n out1 = input1.clone()\n out2 = self.layer2(out0)\n out3 = self.layer3(out2)\n out4 = self.layer4(out3)\n out5 = self.layer5(out4)\n out6 = self.layer6(out5)\n out7 = self.layer7(out6)\n out7 = out7 + out1\n out9 = self.layer9(out7)\n out10 = self.layer10(out9)\n out11 = self.layer11(out10)\n out12 = self.layer12(out11)\n out13 = self.layer13(out12)\n out14 = self.layer14(out13)\n out15 = self.layer15(out14)\n out16 = self.layer16(out15)\n out17 = self.layer17(out16)\n out17 = out17 + out9\n out19 = self.layer19(out17)\n out20 = self.layer20(out19)\n out21 = self.layer21(out20)\n out22 = self.layer22(out21)\n out23 = self.layer23(out22)\n out24 = self.layer24(out23)\n out25 = self.layer25(out24)\n out26 = self.layer26(out25)\n out27 = self.layer27(out26)\n out27 = out27 + out19\n out29 = self.layer29(out27)\n out30 = self.layer30(out29)\n out31 = self.layer31(out30)\n out32 = self.layer32(out31)\n out33 = self.layer33(out32)\n out34 = self.layer34(out33)\n out35 = self.layer35(out34)\n out36 = self.layer36(out35)\n out37 = self.layer37(out36)\n out38 = self.layer38(out29)\n out39 = self.layer39(out38)\n out37 = out37 + out39\n out41 = self.layer41(out37)\n out42 = self.layer42(out41)\n out43 = self.layer43(out42)\n out44 = self.layer44(out43)\n out45 = self.layer45(out44)\n out46 = self.layer46(out45)\n out47 = self.layer47(out46)\n out48 = self.layer48(out47)\n out49 = self.layer49(out48)\n out49 = out49 + out41\n out51 = self.layer51(out49)\n out52 = self.layer52(out51)\n out53 = self.layer53(out52)\n out54 = self.layer54(out53)\n out55 = self.layer55(out54)\n out56 = self.layer56(out55)\n out57 = self.layer57(out56)\n out58 = self.layer58(out57)\n out59 = self.layer59(out58)\n out59 = out59 + out51\n out61 = self.layer61(out59)\n out62 = self.layer62(out61)\n out63 = out62.size(0)\n out64 = out62.view(out63, -1)\n out65 = self.layer65(out64)\n return out65\n" ]
[ [ "torch.LongTensor", "torch.distributed.new_group", "torch.nn.parallel.DistributedDataParallel", "torch.zeros" ], [ "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.AvgPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
NajusAnaxi/UNet-based-for-Brain-Tumor-Segmentation
[ "24ca4432873f145ad33810f40c851ac10bf030fa" ]
[ "setup_scripts/extract_images.py" ]
[ "import h5py\r\nimport numpy as np\r\nimport matplotlib.image as mpimg\r\nfrom tqdm import tqdm\r\nimport os\r\n\r\n\r\ndef clear_screen():\r\n \"\"\"Clears the console screen irrespective of os used\"\"\"\r\n import platform\r\n if platform.system() == 'Windows':\r\n os.system('cls')\r\n return\r\n os.system('clear')\r\n\r\n\r\ndef make_folder(target_folder):\r\n \"\"\"Creates folder if there is no folder in the specified path.\r\n Parameters: \r\n target_folder(str): path of the folder which needs to be created.\r\n\r\n Returns: None\r\n \"\"\"\r\n if not (os.path.isdir(target_folder)):\r\n print(f'Creating {target_folder} folder')\r\n os.mkdir(target_folder)\r\n\r\n\r\ndef get_image_data(filename, path):\r\n \"\"\" Reads the mat image file and returns the image & mask array.\r\n Parameters:\r\n filename(str): Name of the file without the extension.\r\n path(str): Path where the filename is located.\r\n\r\n Returns:\r\n data(dict): A dictionary with the image & mask numpy array.\r\n 'image': The numpy array for image.\r\n 'mask' : The numpy array for the above image mask.\r\n \"\"\"\r\n path = os.path.join(path, filename+'.mat')\r\n file = h5py.File(path, 'r')\r\n data = dict()\r\n data['image'] = np.array(file.get('cjdata/image'))\r\n data['mask'] = np.array(file.get('cjdata/tumorMask'))\r\n return data\r\n\r\n\r\ndef save_image_data(filename, path, data):\r\n \"\"\" Saves the image & mask array in png format.\r\n Parameters:\r\n filename(str): Name of the file without the extension.\r\n path(str): Path where the filename is to be saved.\r\n data(dict): A dictionary with the image & mask numpy array.\r\n 'image': The numpy array for image.\r\n 'mask' : The numpy array for the above image mask.\r\n\r\n Returns: None\r\n \"\"\"\r\n path_image = os.path.join(path, filename+'.png')\r\n path_mask = os.path.join(path, filename+'_mask.png')\r\n mpimg.imsave(path_image, data['image'], cmap='gray', format='png')\r\n mpimg.imsave(path_mask, data['mask'], cmap='gray', format='png')\r\n\r\n\r\ndef main():\r\n # Total number of images\r\n total_images = 3064\r\n\r\n # Dataset paths\r\n data_read_path = os.path.join('dataset', 'mat_dataset')\r\n data_save_path = os.path.join('dataset', 'png_dataset')\r\n\r\n clear_screen()\r\n\r\n # Make if folder is missing.\r\n make_folder(data_save_path)\r\n\r\n print(f'Starting to save images in {data_save_path}')\r\n\r\n for filename in tqdm(range(1, total_images+1)):\r\n filename = str(filename)\r\n data = get_image_data(filename, data_read_path)\r\n save_image_data(str(int(filename)-1), data_save_path, data)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n" ]
[ [ "matplotlib.image.imsave" ] ]
marcelovca90-inatel/EC017
[ "61bbf3c93c13a6743b829c0098d5e33340703f1f", "61bbf3c93c13a6743b829c0098d5e33340703f1f" ]
[ "NeuralNetworks-python/Perceptron.py", "NeuralNetworks-python/_data/DataSets.py" ]
[ "import numpy as np\r\nfrom _data import DataSets\r\nfrom _math import ActivationFunctions\r\nfrom _plot import PlotUtils\r\n\r\nclass Perceptron:\r\n\r\n def __init__(self, n, g):\r\n self.n = n # learning rate\r\n self.g = g # activation function\r\n self.plot_data_x = [] # epochs for plotting\r\n self.plot_data_y = [] # error for plotting\r\n\r\n def train(self, x, d):\r\n k = len(x)\r\n w = np.random.rand(len(x[0]))\r\n epoch = 0\r\n error = True\r\n while error and epoch < 10000:\r\n error = False\r\n for i in range(0, k):\r\n v = np.dot(np.transpose(w), x[i])\r\n y = self.g(v)\r\n if y != d[i]:\r\n w = np.add(w, np.multiply(self.n * (d[i] - y), x[i]))\r\n error = True\r\n epoch = epoch + 1\r\n print(f\"Epoch: {epoch}\\tWeights: {w}\")\r\n self.plot_data_x.append(epoch)\r\n self.plot_data_y.append(1 if error else 0)\r\n return w\r\n\r\n def test(self, w, x):\r\n v = np.dot(np.transpose(w), x)\r\n y = self.g(v)\r\n return y\r\n \r\n def evaluate(self, w, x, d):\r\n correct = 0\r\n total = len(x)\r\n for i in range(0, len(x)):\r\n y = self.test(w, x[i])\r\n if (y == d[i]):\r\n correct = correct + 1\r\n accuracy = 100.0 * (float(correct) / float(total))\r\n print(f\"Accuracy: {accuracy:.2f}% ({correct}/{total})\")\r\n return accuracy\r\n\r\nif __name__ == \"__main__\":\r\n\r\n # set random number generator seed\r\n np.random.seed(NUMERO_DE_MATRICULA)\r\n\r\n # set floating point formatting when printing\r\n np.set_printoptions(formatter={\"float\": \"{: 0.6f}\".format})\r\n\r\n # load data\r\n x = DataSets.NOME_DO_DATASET.input\r\n d = DataSets.NOME_DO_DATASET.output\r\n\r\n # define the network parameters\r\n n = TAXA_DE_APRENDIZADO\r\n g = ActivationFunctions.FUNCAO_DE_ATIVACAO\r\n\r\n # create the neural network\r\n nn = Perceptron(n, g)\r\n\r\n # train the neural network\r\n w = nn.train(x, d)\r\n\r\n # evaluate the neural network\r\n acc = nn.evaluate(w, x, d)\r\n \r\n # plot epoch versus error data\r\n PlotUtils.plot(nn.plot_data_x, \"epoch\", nn.plot_data_y, \"error\")\r\n", "import numpy as np\r\nimport os, sys\r\nfrom numpy.random import sample\r\nfrom numpy import append, genfromtxt\r\n\r\nclass DataSets:\r\n \r\n @staticmethod\r\n def read(folder, filename, flatten):\r\n filename_abs = os.path.join(os.path.dirname(__file__), folder, filename)\r\n return genfromtxt(filename_abs, delimiter=',', dtype=float)\r\n\r\n @staticmethod\r\n def add_bias(arr, bias = -1):\r\n biased_arr = np.ndarray(shape=(arr.shape[0], arr.shape[1]+1), dtype=float)\r\n for i in range(0, len(arr)):\r\n biased_arr[i] = np.append(bias, arr[i])\r\n return biased_arr\r\n\r\n# https://en.wikipedia.org/wiki/AND_gate\r\nclass LOGIC_GATE_AND:\r\n input = DataSets.add_bias(DataSets.read('logic-gate-and', 'input.txt', False))\r\n output = DataSets.read('logic-gate-and', 'output.txt', True)\r\n\r\n# https://en.wikipedia.org/wiki/OR_gate\r\nclass LOGIC_GATE_OR:\r\n input = DataSets.add_bias(DataSets.read('logic-gate-or', 'input.txt', False))\r\n output = DataSets.read('logic-gate-or', 'output.txt', True)\r\n\r\n# https://en.wikipedia.org/wiki/XOR_gate\r\nclass LOGIC_GATE_XOR:\r\n input = DataSets.add_bias(DataSets.read('logic-gate-xor', 'input.txt', False))\r\n output = DataSets.read('logic-gate-xor', 'output.txt', True)\r\n\r\n# http://archive.ics.uci.edu/ml/datasets/Blood+Transfusion+Service+Center\r\nclass BLOOD_TRANSFUSION:\r\n input = DataSets.add_bias(DataSets.read('blood-transfusion', 'input.txt', False))\r\n output = DataSets.read('blood-transfusion', 'output.txt', True)\r\n\r\n# http://archive.ics.uci.edu/ml/datasets/Cryotherapy+Dataset+\r\nclass CRYOTHERAPY:\r\n input = DataSets.add_bias(DataSets.read('cryotherapy', 'input.txt', False))\r\n output = DataSets.read('cryotherapy', 'output.txt', True)\r\n\r\n# https://www.kaggle.com/kumargh/pimaindiansdiabetescsv\r\nclass DIABETES:\r\n input = DataSets.add_bias(DataSets.read('diabetes', 'input.txt', False))\r\n output = DataSets.read('diabetes', 'output.txt', True)\r\n\r\n# https://archive.ics.uci.edu/ml/datasets/Tic-Tac-Toe+Endgame\r\nclass TIC_TAC_TOE_ENDGAME:\r\n input = DataSets.add_bias(DataSets.read('tic-tac-toe-endgame', 'input.txt', False))\r\n output = DataSets.read('tic-tac-toe-endgame', 'output.txt', True)\r\n" ]
[ [ "numpy.multiply", "numpy.set_printoptions", "numpy.random.seed", "numpy.transpose" ], [ "numpy.append", "numpy.ndarray", "numpy.genfromtxt" ] ]
waitong94/nn_physical_concepts
[ "f8cc03d46431641e7ef2ecbaeb1a1494a95f2550" ]
[ "scinet/data_loader.py" ]
[ "# Copyright 2018 SciNet (https://github.com/eth-nn-physics/nn_physical_concepts)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport cPickle\nimport gzip\nimport io\nimport numpy as np\n\n\ndef load(validation_size_p, file_name):\n \"\"\"\n Params:\n validation_size_p: percentage of data to be used for validation\n file_name (str): File containing the data\n \"\"\"\n f = gzip.open(io.data_path + file_name + \".plk.gz\", 'rb')\n data, states, params = cPickle.load(f)\n states = np.array(states)\n train_val_separation = int(len(data[0]) * (1 - validation_size_p / 100.))\n training_data = [data[i][:train_val_separation] for i in [0, 1, 2]]\n training_states = states[:train_val_separation]\n validation_data = [data[i][train_val_separation:] for i in [0, 1, 2]]\n validation_states = states[train_val_separation:]\n f.close()\n return (training_data, validation_data, training_states, validation_states, params)\n" ]
[ [ "numpy.array" ] ]
ravikumarvc/incubator-tvm
[ "9826947ffce0ed40e9d47a0db2abb033e394279e" ]
[ "apps/howto_deploy/python_deploy.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# brief Example code on load and run TVM module.s\n# file python_deploy.py\n\nimport tvm\nimport numpy as np\n\ndef verify(mod, fname):\n # Get the function from the module\n f = mod.get_function(fname)\n # Use tvm.nd.array to convert numpy ndarray to tvm\n # NDArray type, so that function can be invoked normally\n N = 10 \n x = tvm.nd.array(np.arange(N, dtype=np.float32))\n y = tvm.nd.array(np.zeros(N, dtype=np.float32))\n # Invoke the function\n f(x, y)\n np_x = x.asnumpy() \n np_y = y.asnumpy() \n # Verify correctness of function\n assert(np.all([xi+1 == yi for xi, yi in zip(np_x, np_y)]))\n print(\"Finish verification...\")\n \n\nif __name__ == \"__main__\":\n # The normal dynamic loading method for deployment\n mod_dylib = tvm.module.load(\"lib/test_addone_dll.so\")\n print(\"Verify dynamic loading from test_addone_dll.so\")\n verify(mod_dylib, \"addone\")\n # There might be methods to use the system lib way in\n # python, but dynamic loading is good enough for now.\n" ]
[ [ "numpy.arange", "numpy.zeros" ] ]
manhcuogntin4/handwritting-ocr
[ "aa55c2d46156a10663ad55e2fa4590c3e1333130" ]
[ "ocr/charSeg.py" ]
[ "# -*- coding: utf-8 -*-\nimport numpy as np\nimport tensorflow as tf\nfrom .helpers import *\nfrom .tfhelpers import Graph\nimport cv2\nimport math\n\n# Preloading trained model with activation function\n# Loading is slow -> prevent multiple loads\nprint(\"Loading Segmantation model:\")\nsegCNNGraph = Graph('models/gap-clas/CNN-CG')\nsegLargeCNNGraph = Graph('models/gap-clas/large/CNN-CG')\nsegRNNGraph = Graph('models/gap-clas/RNN/Bi-RNN', 'prediction')\nsegRNNDenseGraph = Graph('models/gap-clas/RNN/Bi-RNN-dense', 'prediction')\n\ndef classify(img, step=2, RNN=False, large=False):\n if large and RNN:\n slider = (60, 60)\n elif large:\n slider = (60, 120)\n else:\n slider = (60, 30)\n \n length = (img.shape[1] - slider[1]) // 2 + 1\n if RNN:\n input_seq = np.zeros((1, length, slider[0]*slider[1]), dtype=np.float32)\n input_seq[0][:] = [img[:, loc * step: loc * step + slider[1]].flatten()\n for loc in range(length)]\n if large:\n pred = segRNNDenseGraph.eval_feed({'inputs:0': input_seq,\n 'length:0': [length],\n 'keep_prob:0': 1})[0]\n else:\n pred = segRNNGraph.eval_feed({'inputs:0': input_seq,\n 'length:0': [length],\n 'keep_prob:0': 1})[0]\n else:\n input_seq = np.zeros((length, slider[0]*slider[1]), dtype=np.float32)\n input_seq[:] = [img[:, loc * step: loc * step + slider[1]].flatten()\n for loc in range(length)]\n if large:\n pred = segLargeCNNGraph.run(input_seq)\n else:\n pred = segCNNGraph.run(input_seq)\n \n return pred\n \n\ndef segmentation(img, step=2, RNN=False, large=False, debug=False):\n \"\"\"\n Take preprocessed image of word\n and return array of positions separating chars - gaps\n \"\"\" \n if large:\n slider = (60, 120)\n else:\n slider = (60, 30)\n length = (img.shape[1] - slider[1]) // 2 + 1\n \n pred = classify(img, step, RNN, large)\n\n gaps = []\n\n lastGap = 0\n gapCount = 1\n gapPositionSum = slider[1] / 2\n firstGap = True\n gapBlockFirst = 0\n gapBlockLast = slider[1]/2\n\n for i, p in enumerate(pred):\n if p == 1:\n gapPositionSum += i * step + slider[1] / 2\n gapBlockLast = i * step + slider[1] / 2\n gapCount += 1\n lastGap = 0\n if gapBlockFirst == 0:\n gapBlockFirst = i * step + slider[1] / 2\n else:\n if gapCount != 0 and lastGap >= 1:\n if firstGap:\n gaps.append(int(gapBlockLast))\n firstGap = False\n else:\n gaps.append(int(gapPositionSum // gapCount))\n gapPositionSum = 0\n gapCount = 0\n gapBlockFirst = 0\n lastGap += 1\n\n # Adding final gap position\n if gapBlockFirst != 0:\n gaps.append(int(gapBlockFirst))\n else:\n gapPositionSum += (length - 1) * 2 + slider[1]/2\n gaps.append(int(gapPositionSum / (gapCount + 1)))\n \n if debug:\n # Drawing lines\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)\n for gap in gaps:\n cv2.line(img,\n ((int)(gap), 0),\n ((int)(gap), slider[1]),\n (0, 255, 0), 1)\n implt(img, t=\"Separated characters\")\n \n return gaps" ]
[ [ "numpy.zeros" ] ]
cmutel/SALib
[ "32e33c423bcc981d0cfd4339a3e2435d6b945de1" ]
[ "src/SALib/test_functions/Sobol_G.py" ]
[ "from __future__ import division\r\n\r\nimport numpy as np\r\n\r\n\r\n# Non-monotonic Sobol G Function (8 parameters)\r\n# First-order indices:\r\n# x1: 0.7165\r\n# x2: 0.1791\r\n# x3: 0.0237\r\n# x4: 0.0072\r\n# x5-x8: 0.0001\r\ndef evaluate(values, a=None):\r\n if type(values) != np.ndarray:\r\n raise TypeError(\"The argument `values` must be a numpy ndarray\")\r\n if a is None:\r\n a = [0, 1, 4.5, 9, 99, 99, 99, 99]\r\n\r\n ltz = values < 0\r\n gto = values > 1\r\n\r\n if ltz.any() == True:\r\n raise ValueError(\"Sobol G function called with values less than zero\")\r\n elif gto.any() == True:\r\n raise ValueError(\"Sobol G function called with values greater than one\")\r\n\r\n Y = np.ones([values.shape[0]])\r\n\r\n len_a = len(a)\r\n for i, row in enumerate(values):\r\n for j in range(len_a):\r\n x = row[j]\r\n a_j = a[j]\r\n Y[i] *= (np.abs(4 * x - 2) + a_j) / (1 + a_j)\r\n\r\n return Y\r\n\r\n\r\ndef partial_first_order_variance(a=None):\r\n if a is None:\r\n a = [0, 1, 4.5, 9, 99, 99, 99, 99]\r\n a = np.array(a)\r\n return np.divide(1, np.multiply(3, np.square(1 + a)))\r\n\r\n\r\ndef total_variance(a=None):\r\n if a is None:\r\n a = [0, 1, 4.5, 9, 99, 99, 99, 99]\r\n a = np.array(a)\r\n return np.add(-1, np.product(1 + partial_first_order_variance(a), axis=0))\r\n\r\n\r\ndef sensitivity_index(a):\r\n a = np.array(a)\r\n return np.divide(partial_first_order_variance(a), total_variance(a))\r\n\r\n\r\ndef total_sensitivity_index(a):\r\n a = np.array(a)\r\n \r\n pv = partial_first_order_variance(a)\r\n tv = total_variance(a)\r\n \r\n sum_pv = pv.sum(axis=0)\r\n \r\n return np.subtract(1, np.divide(np.subtract(sum_pv, pv.T), tv))\r\n" ]
[ [ "numpy.square", "numpy.abs", "numpy.subtract", "numpy.ones", "numpy.array" ] ]
fmobrj/doctr
[ "b149266ea57fd59047193a01c328c2b8ecb9330a" ]
[ "doctr/models/recognition/crnn/tensorflow.py" ]
[ "# Copyright (C) 2021, Mindee.\n\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nfrom copy import deepcopy\nimport tensorflow as tf\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.models import Sequential, Model\nfrom typing import Tuple, Dict, Any, Optional, List\n\nfrom ...backbones import vgg16_bn, resnet31, mobilenet_v3_small_r, mobilenet_v3_large_r\nfrom ...utils import load_pretrained_params\nfrom ..core import RecognitionModel, RecognitionPostProcessor\nfrom ....datasets import VOCABS\n\n__all__ = ['CRNN', 'crnn_vgg16_bn', 'CTCPostProcessor', 'crnn_mobilenet_v3_small',\n 'crnn_mobilenet_v3_large']\n\ndefault_cfgs: Dict[str, Dict[str, Any]] = {\n 'crnn_vgg16_bn': {\n 'mean': (0.694, 0.695, 0.693),\n 'std': (0.299, 0.296, 0.301),\n 'backbone': vgg16_bn, 'rnn_units': 128,\n 'input_shape': (32, 128, 3),\n 'vocab': VOCABS['legacy_french'],\n 'url': 'https://github.com/mindee/doctr/releases/download/v0.3.0/crnn_vgg16_bn-76b7f2c6.zip',\n },\n 'crnn_mobilenet_v3_small': {\n 'mean': (0.694, 0.695, 0.693),\n 'std': (0.299, 0.296, 0.301),\n 'backbone': mobilenet_v3_small_r, 'rnn_units': 128,\n 'input_shape': (32, 128, 3),\n 'vocab': VOCABS['french'],\n 'url': 'https://github.com/mindee/doctr/releases/download/v0.3.1/crnn_mobilenet_v3_small-7f36edec.zip',\n },\n 'crnn_mobilenet_v3_large': {\n 'mean': (0.694, 0.695, 0.693),\n 'std': (0.299, 0.296, 0.301),\n 'backbone': mobilenet_v3_large_r, 'rnn_units': 128,\n 'input_shape': (32, 128, 3),\n 'vocab': VOCABS['french'],\n 'url': None,\n },\n}\n\n\nclass CTCPostProcessor(RecognitionPostProcessor):\n \"\"\"\n Postprocess raw prediction of the model (logits) to a list of words using CTC decoding\n\n Args:\n vocab: string containing the ordered sequence of supported characters\n ignore_case: if True, ignore case of letters\n ignore_accents: if True, ignore accents of letters\n \"\"\"\n\n def __call__(\n self,\n logits: tf.Tensor\n ) -> List[Tuple[str, float]]:\n \"\"\"\n Performs decoding of raw output with CTC and decoding of CTC predictions\n with label_to_idx mapping dictionnary\n\n Args:\n logits: raw output of the model, shape BATCH_SIZE X SEQ_LEN X NUM_CLASSES + 1\n\n Returns:\n A list of decoded words of length BATCH_SIZE\n\n \"\"\"\n # Decode CTC\n _decoded, _log_prob = tf.nn.ctc_beam_search_decoder(\n tf.transpose(logits, perm=[1, 0, 2]),\n tf.fill(logits.shape[0], logits.shape[1]),\n beam_width=1, top_paths=1,\n )\n out_idxs = tf.sparse.to_dense(_decoded[0], default_value=len(self.vocab))\n probs = tf.math.exp(tf.squeeze(_log_prob, axis=1))\n\n # Map it to characters\n _decoded_strings_pred = tf.strings.reduce_join(\n inputs=tf.nn.embedding_lookup(tf.constant(self._embedding, dtype=tf.string), out_idxs),\n axis=-1\n )\n _decoded_strings_pred = tf.strings.split(_decoded_strings_pred, \"<eos>\")\n decoded_strings_pred = tf.sparse.to_dense(_decoded_strings_pred.to_sparse(), default_value='not valid')[:, 0]\n word_values = [word.decode() for word in decoded_strings_pred.numpy().tolist()]\n\n return list(zip(word_values, probs.numpy().tolist()))\n\n\nclass CRNN(RecognitionModel, Model):\n \"\"\"Implements a CRNN architecture as described in `\"An End-to-End Trainable Neural Network for Image-based\n Sequence Recognition and Its Application to Scene Text Recognition\" <https://arxiv.org/pdf/1507.05717.pdf>`_.\n\n Args:\n feature_extractor: the backbone serving as feature extractor\n vocab: vocabulary used for encoding\n rnn_units: number of units in the LSTM layers\n cfg: configuration dictionary\n \"\"\"\n\n _children_names: List[str] = ['feat_extractor', 'decoder', 'postprocessor']\n\n def __init__(\n self,\n feature_extractor: tf.keras.Model,\n vocab: str,\n rnn_units: int = 128,\n cfg: Optional[Dict[str, Any]] = None,\n ) -> None:\n # Initialize kernels\n h, w, c = feature_extractor.output_shape[1:]\n\n super().__init__()\n self.vocab = vocab\n self.max_length = w\n self.cfg = cfg\n self.feat_extractor = feature_extractor\n\n self.decoder = Sequential(\n [\n layers.Bidirectional(layers.LSTM(units=rnn_units, return_sequences=True)),\n layers.Bidirectional(layers.LSTM(units=rnn_units, return_sequences=True)),\n layers.Dense(units=len(vocab) + 1)\n ]\n )\n self.decoder.build(input_shape=(None, w, h * c))\n\n self.postprocessor = CTCPostProcessor(vocab=vocab)\n\n def compute_loss(\n self,\n model_output: tf.Tensor,\n target: List[str],\n ) -> tf.Tensor:\n \"\"\"Compute CTC loss for the model.\n\n Args:\n gt: the encoded tensor with gt labels\n model_output: predicted logits of the model\n seq_len: lengths of each gt word inside the batch\n\n Returns:\n The loss of the model on the batch\n \"\"\"\n gt, seq_len = self.compute_target(target)\n batch_len = model_output.shape[0]\n input_length = tf.fill((batch_len,), model_output.shape[1])\n ctc_loss = tf.nn.ctc_loss(\n gt, model_output, seq_len, input_length, logits_time_major=False, blank_index=len(self.vocab)\n )\n return ctc_loss\n\n def call(\n self,\n x: tf.Tensor,\n target: Optional[List[str]] = None,\n return_model_output: bool = False,\n return_preds: bool = False,\n **kwargs: Any,\n ) -> Dict[str, Any]:\n\n features = self.feat_extractor(x, **kwargs)\n # B x H x W x C --> B x W x H x C\n transposed_feat = tf.transpose(features, perm=[0, 2, 1, 3])\n w, h, c = transposed_feat.get_shape().as_list()[1:]\n # B x W x H x C --> B x W x H * C\n features_seq = tf.reshape(transposed_feat, shape=(-1, w, h * c))\n logits = self.decoder(features_seq, **kwargs)\n\n out: Dict[str, tf.Tensor] = {}\n if return_model_output:\n out[\"out_map\"] = logits\n\n if target is None or return_preds:\n # Post-process boxes\n out[\"preds\"] = self.postprocessor(logits)\n\n if target is not None:\n out['loss'] = self.compute_loss(logits, target)\n\n return out\n\n\ndef _crnn(\n arch: str,\n pretrained: bool,\n pretrained_backbone: bool = True,\n input_shape: Optional[Tuple[int, int, int]] = None,\n **kwargs: Any\n) -> CRNN:\n\n pretrained_backbone = pretrained_backbone and not pretrained\n\n # Patch the config\n _cfg = deepcopy(default_cfgs[arch])\n _cfg['input_shape'] = input_shape or _cfg['input_shape']\n _cfg['vocab'] = kwargs.get('vocab', _cfg['vocab'])\n _cfg['rnn_units'] = kwargs.get('rnn_units', _cfg['rnn_units'])\n\n # Feature extractor\n feat_extractor = _cfg['backbone'](\n input_shape=_cfg['input_shape'],\n include_top=False,\n pretrained=pretrained_backbone,\n )\n\n kwargs['vocab'] = _cfg['vocab']\n kwargs['rnn_units'] = _cfg['rnn_units']\n\n # Build the model\n model = CRNN(feat_extractor, cfg=_cfg, **kwargs)\n # Load pretrained parameters\n if pretrained:\n load_pretrained_params(model, _cfg['url'])\n\n return model\n\n\ndef crnn_vgg16_bn(pretrained: bool = False, **kwargs: Any) -> CRNN:\n \"\"\"CRNN with a VGG-16 backbone as described in `\"An End-to-End Trainable Neural Network for Image-based\n Sequence Recognition and Its Application to Scene Text Recognition\" <https://arxiv.org/pdf/1507.05717.pdf>`_.\n\n Example::\n >>> import tensorflow as tf\n >>> from doctr.models import crnn_vgg16_bn\n >>> model = crnn_vgg16_bn(pretrained=True)\n >>> input_tensor = tf.random.uniform(shape=[1, 32, 128, 3], maxval=1, dtype=tf.float32)\n >>> out = model(input_tensor)\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on our text recognition dataset\n\n Returns:\n text recognition architecture\n \"\"\"\n\n return _crnn('crnn_vgg16_bn', pretrained, **kwargs)\n\n\ndef crnn_mobilenet_v3_small(pretrained: bool = False, **kwargs: Any) -> CRNN:\n \"\"\"CRNN with a MobileNet V3 Small backbone as described in `\"An End-to-End Trainable Neural Network for Image-based\n Sequence Recognition and Its Application to Scene Text Recognition\" <https://arxiv.org/pdf/1507.05717.pdf>`_.\n\n Example::\n >>> import tensorflow as tf\n >>> from doctr.models import crnn_mobilenet_v3_small\n >>> model = crnn_mobilenet_v3_small(pretrained=True)\n >>> input_tensor = tf.random.uniform(shape=[1, 32, 128, 3], maxval=1, dtype=tf.float32)\n >>> out = model(input_tensor)\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on our text recognition dataset\n\n Returns:\n text recognition architecture\n \"\"\"\n\n return _crnn('crnn_mobilenet_v3_small', pretrained, **kwargs)\n\n\ndef crnn_mobilenet_v3_large(pretrained: bool = False, **kwargs: Any) -> CRNN:\n \"\"\"CRNN with a MobileNet V3 Large backbone as described in `\"An End-to-End Trainable Neural Network for Image-based\n Sequence Recognition and Its Application to Scene Text Recognition\" <https://arxiv.org/pdf/1507.05717.pdf>`_.\n\n Example::\n >>> import tensorflow as tf\n >>> from doctr.models import crnn_mobilenet_v3_large\n >>> model = crnn_mobilenet_v3_large(pretrained=True)\n >>> input_tensor = tf.random.uniform(shape=[1, 32, 128, 3], maxval=1, dtype=tf.float32)\n >>> out = model(input_tensor)\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on our text recognition dataset\n\n Returns:\n text recognition architecture\n \"\"\"\n\n return _crnn('crnn_mobilenet_v3_large', pretrained, **kwargs)\n" ]
[ [ "tensorflow.fill", "tensorflow.transpose", "tensorflow.constant", "tensorflow.reshape", "tensorflow.strings.split", "tensorflow.squeeze", "tensorflow.keras.layers.LSTM" ] ]
emmahodcroft/treetime
[ "8926e49e17538c19ad0950e365f15035a76c8fc5" ]
[ "treetime/treeanc.py" ]
[ "from __future__ import print_function, division\nimport time\nimport config as ttconf\nfrom Bio import Phylo\nfrom Bio import AlignIO\nimport numpy as np\nfrom gtr import GTR\nimport seq_utils\nfrom version import tt_version as __version__\ntry:\n from itertools import izip\nexcept ImportError: #python3.x\n izip = zip\n\n\nclass TreeAnc(object):\n \"\"\"\n Class defines simple tree object with basic interface methods: reading and\n saving from/to files, initializing leaves with sequences from the\n alignment, making ancestral state inferrence\n \"\"\"\n\n def __init__(self, tree=None, aln=None, gtr=None, fill_overhangs=True,\n ref=None, verbose = ttconf.VERBOSE, ignore_gaps=True,\n convert_upper=True, seq_multiplicity=None, log=None, **kwargs):\n \"\"\"\n TreeAnc constructor. It prepares tree, attach sequences to the leaf nodes,\n and sets some configuration parameters.\n\n Parameters\n ----------\n\n tree : str, Bio.Phylo.Tree\n Phylogenetic tree. String passed is interpreted as a filename to\n construct the Biopython tree.\n\n aln : str, Bio.Align.MultipleSequenceAlignment\n Sequence alignment. If a string passed, it is interpreted as the\n filename to read Biopython alignment from.\n\n gtr : str, GTR\n gtr model object. If string passed, it is interpreted as the type of\n the GTR model. A new GTR instance will be created for this type.\n **Note** some GTR types require additional configuration parameters.\n If the new GTR is being instantiated, these parameters are expected\n to be passed as kwargs. If nothing is passed, the default values are\n used, which might cause unexpected results.\n\n fill_overhangs : bool\n In some cases, the missing data on both ends of the alignment is\n filled with the gap sign('-'). As we suppose that the most\n appropriate way to deal with the missing data is to assign it to the\n \"unknown\" character ('N' for nucleotides, 'X' for aminoacids). If the\n parameter is set to True, the end-gaps are converted to unknown\n symbols. Otherwise, the alignment is treated as-is\n\n ignore_gaps: bool\n ignore gaps in branch length calculations\n\n verbose : int\n verbosity level as number from 0 (lowest) to 10 (highest).\n\n seq_multiplicity: dict\n if individual nodes in the tree correspond to multiple sampled sequences\n (i.e. read count in a deep sequencing experiment), these can be\n specified as a dictionary\n\n Keyword Args\n ------------\n\n Keyword arguments to construct GTR model\n\n \"\"\"\n if tree is None:\n raise TypeError(\"TreeAnc requires a tree!\")\n self.__version__ = __version__\n self.t_start = time.time()\n self.verbose = verbose\n self.log=log\n self.logger(\"TreeAnc: set-up\",1)\n self._internal_node_count = 0\n self.use_mutation_length=False\n self.one_mutation = None\n self.fill_overhangs = fill_overhangs\n self.is_vcf = False #this is set true when aln is set, if aln is dict\n\n self.var_positions = None #set during seq compression, if aln is dict\n self.inferred_const_sites = [] #keeps track of pos where ambig sites replaced with base\n #This preserves original compressed sequence so ambiguous positions can be recovered later\n self.ambigPos = {}\n\n self.seq_multiplicity = {} if seq_multiplicity is None else seq_multiplicity\n\n self.ignore_gaps = ignore_gaps\n self.set_gtr(gtr if gtr is not None else 'JC69', **kwargs)\n\n self.tree = tree\n if tree is None:\n self.logger(\"TreeAnc: tree loading failed! exiting\",0)\n return\n\n # will be None if not set\n self.ref = ref\n\n # force all sequences to be upper case letters\n # (desired for nuc or aa, not for other discrete states)\n self.convert_upper = convert_upper\n\n # set alignment and attach sequences to tree on success.\n # otherwise self.aln will be None\n self.aln = aln\n\n\n def logger(self, msg, level, warn=False):\n \"\"\"\n Print log message *msg* to stdout.\n\n Parameters\n -----------\n\n msg : str\n string to print on the screen\n\n level : int\n log-level. Only the messages with the level higher than the\n current verbose level will be shown.\n\n warn : bool\n warning flag. If True, the message will be displayed\n regardless of its log-level.\n\n \"\"\"\n if level<self.verbose or (warn and level<=self.verbose):\n dt = time.time() - self.t_start\n outstr = '\\n' if level<2 else ''\n outstr+=format(dt, '4.2f')+'\\t'\n outstr+= level*'-'\n outstr+=msg\n try:\n log.write(outstr+'\\n')\n log.flush()\n except:\n print(outstr)\n\n\n####################################################################\n## SET-UP\n####################################################################\n @property\n def leaves_lookup(self):\n \"\"\"\n Leaves lookup is the {leaf-name:leaf-node} dictionary. It enables fast\n search of a tree leaf object by its name.\n \"\"\"\n return self._leaves_lookup\n\n @property\n def gtr(self):\n \"\"\"\n Get GTR object currently used.\n \"\"\"\n return self._gtr\n\n @gtr.setter\n def gtr(self, value):\n \"\"\"\n Set a new GTR object\n\n Parameters\n -----------\n\n value :GTR\n the new GTR object\n \"\"\"\n if not isinstance(value, GTR):\n raise TypeError(\" GTR instance expected\")\n self._gtr = value\n\n\n def set_gtr(self, in_gtr, **kwargs):\n \"\"\"\n Create new GTR model if needed, and set the model as an attribute of the\n TreeAnc class\n\n Parameters\n -----------\n\n in_gtr : str, GTR\n The gtr model to be assigned. If string is passed,\n it is understood as the name of the standard GTR model, and is\n attempted to be created through GTR.standard() interface. In case\n GTR instance is passed, it is directly set as the class attribute\n\n Keyword Args\n ------------\n\n All parameters needed for the gtr creation. If none passed, defaults are assumed.\n Refer to the particular GTR models for the exact parameter values\n\n \"\"\"\n if type(in_gtr)==str:\n self._gtr = GTR.standard(model=in_gtr, **kwargs)\n self._gtr.logger = self.logger\n\n elif isinstance(in_gtr, GTR):\n self._gtr = in_gtr\n self._gtr.logger=self.logger\n else:\n self.logger(\"TreeAnc.gtr_setter: can't interpret GTR model\", 1, warn=True)\n raise TypeError(\"Cannot set GTR model in TreeAnc class: GTR or \"\n \"string expected\")\n\n if self._gtr.ambiguous is None:\n self.fill_overhangs=False\n\n\n @property\n def tree(self):\n \"\"\"\n Get reference to the phylogenetic tree currently used by the TreeAnc.\n \"\"\"\n return self._tree\n\n @tree.setter\n def tree(self, in_tree):\n '''\n assigns a tree to the internal self._tree variable. The tree is either\n loaded from file (if in_tree is str) or assigned (if in_tree is a Phylo.tree)\n '''\n from os.path import isfile\n if isinstance(in_tree, Phylo.BaseTree.Tree):\n self._tree = in_tree\n elif type(in_tree) in [str, unicode] and isfile(in_tree):\n try:\n self._tree=Phylo.read(in_tree, 'newick')\n except:\n fmt = in_tree.split('.')[-1]\n if fmt in ['nexus', 'nex']:\n self._tree=Phylo.read(in_tree, 'nexus')\n else:\n self.logger('TreeAnc: could not load tree, format needs to be nexus or newick! input was '+str(in_tree),1)\n self._tree = None\n return\n else:\n self.logger('TreeAnc: could not load tree! input was '+str(in_tree),1)\n self._tree = None\n return\n\n # remove all existing sequence attributes\n for node in self._tree.find_clades():\n if hasattr(node, \"sequence\"):\n node.__delattr__(\"sequence\")\n node.original_length = node.branch_length\n node.mutation_length = node.branch_length\n self.prepare_tree()\n\n\n @property\n def aln(self):\n \"\"\"\n Get the multiple sequence alignment currently used by the TreeAnc\n \"\"\"\n return self._aln\n\n @aln.setter\n def aln(self,in_aln):\n # load alignment from file if necessary\n from os.path import isfile\n from Bio.Align import MultipleSeqAlignment\n self._aln = None\n if isinstance(in_aln, MultipleSeqAlignment):\n self._aln = in_aln\n elif type(in_aln) in [str, unicode] and isfile(in_aln):\n for fmt in ['fasta', 'phylip-relaxed', 'nexus']:\n try:\n self._aln=AlignIO.read(in_aln, 'fasta')\n break\n except:\n continue\n elif type(in_aln) is dict: #if is read in from VCF file\n self._aln = in_aln\n self.is_vcf = True\n\n if self._aln is None:\n self.logger(\"TreeAnc: loading alignment failed... \",1, warn=True)\n return\n\n #Convert to uppercase here, rather than in _attach_sequences_to_nodes\n #(which used to do it through seq2array in seq_utils.py)\n #so that it is controlled by param convert_upper. This way for\n #mugration (ancestral reconstruction of non-sequences), you can\n #use upper- and lower case characters for discrete states!\n if (not self.is_vcf) and self.convert_upper:\n self._aln = MultipleSeqAlignment([seq.upper() for seq in self._aln])\n\n if hasattr(self, '_tree'):\n self._attach_sequences_to_nodes()\n else:\n self.logger(\"TreeAnc.aln: sequences not yet attached to tree\", 3, warn=True)\n\n @property\n def ref(self):\n \"\"\"\n Get the str reference nucleotide sequence currently used by TreeAnc\n When having read in from a VCF, this is what variants map to\n \"\"\"\n return self._ref\n\n\n @ref.setter\n def ref(self, in_ref):\n self._ref = in_ref\n\n\n def _attach_sequences_to_nodes(self):\n #print (\"inside attach seq to nodes\")\n '''\n For each node of the tree, check whether there is a sequence available\n in the alignment and assign this sequence as a character array\n '''\n if type(self.aln) is dict:\n self.seq_len = len(self.ref)\n else:\n self.seq_len = self.aln.get_alignment_length()\n self.one_mutation = 1.0/self.seq_len\n\n failed_leaves= 0\n if type(self.aln) is dict:\n # if alignment is specified as difference from ref\n dic_aln = self.aln\n self.seq_len = len(self.ref)\n else:\n # if full alignment is specified\n dic_aln = {k.name: seq_utils.seq2array(k.seq, fill_overhangs=self.fill_overhangs,\n ambiguous_character=self.gtr.ambiguous)\n for k in self.aln} #\n self.seq_len = self.aln.get_alignment_length()\n\n self.one_mutation = 1.0/self.seq_len\n\n\n # loop over tree,\n for l in self.tree.find_clades():\n if l.name in dic_aln:\n l.sequence= dic_aln[l.name]\n if l.name in self.seq_multiplicity:\n l.count = self.seq_multiplicity[l.name]\n else:\n l.count = 1.0\n elif l.is_terminal():\n self.logger(\"***WARNING: TreeAnc._attach_sequences_to_nodes: NO SEQUENCE FOR LEAF: %s\" % l.name, 0, warn=True)\n failed_leaves += 1\n l.sequence = seq_utils.seq2array(self.gtr.ambiguous*self.seq_len, fill_overhangs=self.fill_overhangs,\n ambiguous_character=self.gtr.ambiguous)\n if failed_leaves > self.tree.count_terminals() / 3:\n self.logger(\"ERROR: At least 30\\\\% terminal nodes cannot be assigned with a sequence!\\n\", 0, warn=True)\n self.logger(\"Are you sure the alignment belongs to the tree?\", 2, warn=True)\n break\n else: # could not assign sequence for internal node - is OK\n pass\n\n if failed_leaves:\n self.logger(\"***WARNING: TreeAnc: %d nodes don't have a matching sequence in the alignment. POSSIBLE ERROR.\"%failed_leaves, 0, warn=True)\n\n self.make_reduced_alignment()\n\n\n def make_reduced_alignment(self):\n \"\"\"\n Create the reduced alignment from the full sequences attached to (some)\n tree nodes. The methods collects all sequences from the tree nodes, creates\n the alignment, counts the multiplicity for each column of the alignment\n ('alignment pattern'), and creates the reduced alignment, where only the\n unique patterns are present. The reduced alignment and the pattern multiplicity\n are sufficient for the GTR calculations and allow to save memory on profile\n instantiation.\n The maps from full sequence to reduced sequence and back are also stored to allow\n compressing and expanding the sequences.\n\n The following attributes are assigned by the method:\n\n - full_to_reduced_sequence_map: map to reduce a sequence\n - reduced_to_full_sequence_map: map to restore sequence from reduced alignment\n - multiplicity: numpy array, which stores the pattern multiplicity for\n each position of the reduced alignment.\n - reduced_alignment: 2D numpy array, representing the alignment. Shape is\n (N x L'), where N is number of sequences, L' - number of unique alignment patterns\n\n\n In addition, each node gets\n\n - cseq: compressed sequence (corresponding row of the reduced alignment)\n\n \"\"\"\n\n self.logger(\"TreeAnc: making reduced alignment...\", 1)\n\n from collections import defaultdict\n\n # bind positions in real sequence to that of the reduced (compressed) sequence\n self.full_to_reduced_sequence_map = np.zeros(self.seq_len, dtype=int)\n\n # bind position in reduced sequence to the array of positions in real (expanded) sequence\n self.reduced_to_full_sequence_map = {}\n\n #if is a dict, want to be efficient and not iterate over a bunch of const_sites\n #so pre-load alignment_patterns with the location of const sites!\n #and get the sites that we want to iterate over only!\n if type(self.aln) is dict:\n tmp_reduced_aln, alignment_patterns, positions = self.process_alignment_dict()\n seqNames = self.aln.keys() #store seqName order to put back on tree\n else:\n # transpose real alignment, for ease of iteration\n alignment_patterns = {}\n tmp_reduced_aln = []\n # NOTE the order of tree traversal must be the same as below\n # for assigning the cseq attributes to the nodes.\n seqs = [n.sequence for n in self.tree.find_clades() if hasattr(n, 'sequence')]\n if len(np.unique([len(x) for x in seqs]))>1:\n self.logger(\"TreeAnc: Sequences differ in in length! ABORTING\",0, warn=True)\n aln_transpose = None\n return\n else:\n aln_transpose = np.array(seqs).T\n positions = range(self.seq_len)\n\n for pi in positions:\n if type(self.aln) is dict:\n pattern = [ self.aln[k][pi] if pi in self.aln[k].keys()\n else self.ref[pi] for k,v in self.aln.iteritems() ]\n else:\n pattern = aln_transpose[pi]\n\n str_pat = \"\".join(pattern)\n # if the column contains only one state and ambiguous nucleotides, replace\n # those with the state in other strains right away\n unique_letters = list(np.unique(pattern))\n if hasattr(self.gtr, \"ambiguous\"):\n if len(unique_letters)==2 and self.gtr.ambiguous in unique_letters:\n self.inferred_const_sites.append(pi) #keep track\n other = [c for c in unique_letters if c!=self.gtr.ambiguous][0]\n str_pat = str_pat.replace(self.gtr.ambiguous, other)\n unique_letters = [other]\n # if there is a mutation in this column, give it its private pattern\n # this is required when sampling mutations from reconstructed profiles.\n # otherwise, all mutations corresponding to the same pattern will be coupled.\n if len(unique_letters)>1:\n str_pat += '_%d'%pi\n\n # if the pattern is not yet seen,\n if str_pat not in alignment_patterns:\n # bind the index in the reduced aln, index in sequence to the pattern string\n alignment_patterns[str_pat] = (len(tmp_reduced_aln), [pi])\n # append this pattern to the reduced alignment\n tmp_reduced_aln.append(pattern)\n else:\n # if the pattern is already seen, append the position in the real\n # sequence to the reduced aln<->sequence_pos_indexes map\n alignment_patterns[str_pat][1].append(pi)\n\n # count how many times each column is repeated in the real alignment\n self.multiplicity = np.zeros(len(alignment_patterns))\n for p, pos in alignment_patterns.values():\n self.multiplicity[p]=len(pos)\n\n # create the reduced alignment as np array\n self.reduced_alignment = np.array(tmp_reduced_aln).T\n\n # create map to compress a sequence\n for p, pos in alignment_patterns.values():\n self.full_to_reduced_sequence_map[np.array(pos)]=p\n\n # create a map to reconstruct full sequence from the reduced (compressed) sequence\n for p, val in alignment_patterns.iteritems():\n self.reduced_to_full_sequence_map[val[0]]=np.array(val[1], dtype=int)\n\n # assign compressed sequences to all nodes of the tree, which have sequence assigned\n # for dict we cannot assume this is in the same order, as it does below!\n # so do it explicitly\n if type(self.aln) is dict:\n seq_reduce_align = {n:self.reduced_alignment[i]\n for i, n in enumerate(seqNames)}\n #This copy of the compressed sequences can be used to recover Ambiguous variable positions later\n #after all other processing has been done (see \"recover_var_ambigs\")\n self.ambigPos = {n:self.reduced_alignment[i]\n for i, n in enumerate(seqNames)}\n\n for n in self.tree.find_clades():\n if hasattr(n, 'sequence'):\n n.cseq = seq_reduce_align[n.name]\n else:\n # NOTE the order of tree traversal must be the same as above to catch the\n # index in the reduced alignment correctly\n seq_count = 0\n for n in self.tree.find_clades():\n if hasattr(n, 'sequence'):\n n.cseq = self.reduced_alignment[seq_count]\n seq_count+=1\n\n self.logger(\"TreeAnc: finished reduced alignment...\", 1)\n\n\n def process_alignment_dict(self):\n \"\"\"\n prepare the dictionary specifying differences from a reference sequence\n to construct the reduced alignment with variable sites only. NOTE:\n - sites can be constant but different from the reference\n - sites can be constant plus a ambiguous sites\n\n assigns:\n - self.nonref_positions: at least one sequence is different from ref\n returns:\n - reduced_alignment_const: reduced alignment accounting for\n non-variable postitions\n - alignment_patterns_const:\n dict pattern -> (pos in reduced alignment, list of pos in full alignment)\n - variable_positions: list of variable positions needed to construct remaining\n \"\"\"\n\n # number of sequences in alignment\n nseq = len(self.aln)\n\n from collections import defaultdict\n inv_map = defaultdict(list)\n for k,v in self.aln.iteritems():\n for pos, bs in v.iteritems():\n inv_map[pos].append(bs)\n\n self.nonref_positions = np.sort(inv_map.keys())\n\n ambiguous_char = self.gtr.ambiguous\n nonref_const = []\n nonref_alleles = []\n ambiguous_const = []\n variable_pos = []\n for pos, bs in inv_map.iteritems(): #loop over positions and patterns\n bases = \"\".join(np.unique(bs))\n if len(bs) == nseq:\n if (len(bases)<=2 and ambiguous_char in bases) or len(bases)==1:\n # all sequences different from reference, but only one state\n # (other than ambiguous_char) in column\n nonref_const.append(pos)\n nonref_alleles.append(bases.replace(ambiguous_char, ''))\n if ambiguous_char in bases: #keep track of sites 'made constant'\n self.inferred_const_sites.append(pos)\n else:\n # at least two non-reference alleles\n variable_pos.append(pos)\n else:\n # not every sequence different from reference\n if bases==ambiguous_char:\n ambiguous_const.append(pos)\n self.inferred_const_sites.append(pos) #keep track of sites 'made constant'\n else:\n # at least one non ambiguous non-reference allele not in\n # every sequence\n variable_pos.append(pos)\n\n refMod = np.fromstring(self.ref, 'S1')\n # place constant non reference positions by their respective allele\n refMod[nonref_const] = nonref_alleles\n # mask variable positions\n states = self.gtr.alphabet\n # maybe states = np.unique(refMod)\n refMod[variable_pos] = '.'\n\n # for each base in the gtr, make constant alignment pattern and\n # assign it to all const positions in the modified reference sequence\n reduced_alignment_const = []\n alignment_patterns_const = {}\n for base in states:\n p = base*nseq\n #if the alignment doesn't have a const site of this base, don't add! (ex: no '----' site!)\n if len(np.where(refMod==base)[0]):\n alignment_patterns_const[p] = [len(reduced_alignment_const),\n list(np.where(refMod==base)[0])]\n reduced_alignment_const.append(list(p))\n\n return reduced_alignment_const, alignment_patterns_const, variable_pos\n\n\n def prepare_tree(self):\n \"\"\"\n Set link to parent and net distance to root for all tree nodes.\n Should be run once the tree is read and after every tree topology or branch\n length optimizations.\n \"\"\"\n if self.one_mutation is None:\n self.tree.root.branch_length = 0.001\n else:\n self.tree.root.branch_length = self.one_mutation\n self.tree.root.mutation_length = self.tree.root.branch_length\n self.tree.root.mutations = []\n self.tree.ladderize()\n self._prepare_nodes()\n self._leaves_lookup = {node.name:node for node in self.tree.get_terminals()}\n\n\n def _prepare_nodes(self):\n \"\"\"\n Set auxilliary parameters to every node of the tree.\n \"\"\"\n self.tree.root.up = None\n self.tree.root.bad_branch=self.tree.root.bad_branch if hasattr(self.tree.root, 'bad_branch') else False\n internal_node_count = 0\n for clade in self.tree.get_nonterminals(order='preorder'): # parents first\n internal_node_count+=1\n if clade.name is None:\n clade.name = \"NODE_\" + format(self._internal_node_count, '07d')\n self._internal_node_count += 1\n for c in clade.clades:\n c.bad_branch=c.bad_branch if hasattr(c, 'bad_branch') else False\n c.up = clade\n self._calc_dist2root()\n self._internal_node_count = max(internal_node_count, self._internal_node_count)\n\n def _calc_dist2root(self):\n \"\"\"\n For each node in the tree, set its root-to-node distance as dist2root\n attribute\n \"\"\"\n self.tree.root.dist2root = 0.0\n for clade in self.tree.get_nonterminals(order='preorder'): # parents first\n for c in clade.clades:\n if not hasattr(c, 'mutation_length'):\n c.mutation_length=c.branch_length\n c.dist2root = c.up.dist2root + c.mutation_length\n\n\n####################################################################\n## END SET-UP\n####################################################################\n\n def infer_gtr(self, print_raw=False, marginal=False, normalized_rate=True,\n fixed_pi=None, pc=5.0, **kwargs):\n \"\"\"\n Calculates GTR model given the multiple sequence alignment and the tree.\n It performs ancestral sequence inferrence (joint or marginal) followed by\n the branch lengths optimization. Then, the numbers of mutations are counted\n in the optimal tree and related to the time within the mutation happened.\n From this statistics, the relative state transition probabilities are inferred,\n and the transition matrix is computed.\n The result is used to construct the new GTR model of type 'custom'.\n The model is assigned to the TreeAnc and is used in the following analysis.\n\n Parameters\n -----------\n\n print_raw : bool\n Should print the inferred GTR model?\n\n marginal : bool\n Should use marginal sequence reconstruction?\n\n normalized_rate : bool\n If True, will set the mutation rate prefactor to 1.0.\n\n fixed_pi : np.array, None\n Provide the equilibrium character concentrations.\n If None is passed, the concentrations will be inferred from scratch.\n\n pc: float, 5.0\n Number of pseudo counts to use in gtr inference\n\n Returns\n -------\n\n gtr : GTR\n The inferred GTR model.\n \"\"\"\n\n # decide which type of the Maximum-likelihood reconstruction use\n # (marginal) or (joint)\n if marginal:\n _ml_anc = self._ml_anc_marginal\n else:\n _ml_anc = self._ml_anc_joint\n\n self.logger(\"TreeAnc inferring the GTR model from the tree...\", 1)\n _ml_anc(final=True, **kwargs) # call one of the reconstruction types\n alpha = list(self.gtr.alphabet)\n n=len(alpha)\n nij = np.zeros((n,n))\n Ti = np.zeros(n)\n\n self.logger(\"TreeAnc.infer_gtr: counting mutations...\", 2)\n for node in self.tree.find_clades():\n if hasattr(node,'mutations'):\n for a,pos, d in node.mutations:\n i,j = alpha.index(a), alpha.index(d)\n nij[i,j]+=1\n Ti[i] += 0.5*self._branch_length_to_gtr(node)\n Ti[j] -= 0.5*self._branch_length_to_gtr(node)\n for ni,nuc in enumerate(node.cseq):\n i = alpha.index(nuc)\n Ti[i] += self._branch_length_to_gtr(node)*self.multiplicity[ni]\n self.logger(\"TreeAnc.infer_gtr: counting mutations...done\", 3)\n if print_raw:\n print('alphabet:',alpha)\n print('n_ij:', nij)\n print('T_i:', Ti)\n root_state = np.array([np.sum((self.tree.root.cseq==nuc)*self.multiplicity) for nuc in alpha])\n\n self._gtr = GTR.infer(nij, Ti, root_state, fixed_pi=fixed_pi, pc=pc,\n alphabet=self.gtr.alphabet, logger=self.logger,\n prof_map = self.gtr.profile_map)\n if normalized_rate:\n self.logger(\"TreeAnc.infer_gtr: setting overall rate to 1.0...\", 2)\n self._gtr.mu=1.0\n return self._gtr\n\n\n###################################################################\n### ancestral reconstruction\n###################################################################\n def infer_ancestral_sequences(self,*args, **kwargs):\n \"\"\"Shortcut for :meth:`reconstruct_anc`\n\n Reconstruct ancestral states\n\n Parameters\n -----------\n\n method : str\n Method to use. Supported values are \"fitch\" and \"ml\"\n\n Returns\n -------\n\n N_diff : int\n Number of nucleotides different from the previous\n reconstruction. If there were no pre-set sequences, returns N*L\n\n \"\"\"\n self.reconstruct_anc(*args,**kwargs)\n\n\n def reconstruct_anc(self, method='ml', infer_gtr=False, marginal=False, **kwargs):\n \"\"\"\n\n Reconstruct ancestral states\n\n Parameters\n -----------\n\n method : str\n Method to use. Supported values are \"fitch\" and \"ml\"\n\n Returns\n -------\n\n N_diff : int\n Number of nucleotides different from the previous\n reconstruction. If there were no pre-set sequences, returns N*L\n \"\"\"\n self.logger(\"TreeAnc.infer_ancestral_sequences: method: \" + method, 1)\n\n if method == 'ml':\n if marginal:\n _ml_anc = self._ml_anc_marginal\n else:\n _ml_anc = self._ml_anc_joint\n else:\n _ml_anc = self._fitch_anc\n\n if infer_gtr:\n self.infer_gtr(marginal=marginal, **kwargs)\n N_diff = _ml_anc(**kwargs)\n else:\n N_diff = _ml_anc(**kwargs)\n\n return N_diff\n\n\n def recover_var_ambigs(self):\n \"\"\"\n Recalculates mutations using the original compressed sequence for terminal nodes\n which will recover ambiguous bases at variable sites. (See 'get_mutations')\n\n Once this has been run, infer_gtr and other functions which depend on self.gtr.alphabet\n will not work, as ambiguous bases are not part of that alphabet (only A, C, G, T, -).\n This is why it's left for the user to choose when to run\n \"\"\"\n for node in self.tree.find_clades(order='preorder'):\n if node.is_terminal():\n node.mutations = self.get_mutations(node, keep_var_ambigs=True)\n\n\n\n def get_mutations(self, node, keep_var_ambigs=False):\n \"\"\"\n Get the mutations on a tree branch. Take compressed sequences from both sides\n of the branch (attached to the node), compute mutations between them, and\n expand these mutations to the positions in the real sequences.\n\n Parameters\n ----------\n\n node : PhyloTree.Clade\n Tree node, which is the child node attached to the branch.\n\n keep_var_ambigs : boolean\n If true, generates mutations based on the *original* _compressed_ sequence, which\n may include ambiguities. Note sites that only have 1 unambiguous base and ambiguous\n bases (\"AAAAANN\") are stripped of ambiguous bases *before* compression, so ambiguous\n bases will *not* be preserved.\n\n Returns\n -------\n\n muts : list\n List of mutations. Each mutation is represented as tuple of\n (parent_state, position, child_state).\n\n \"\"\"\n nodeseq = node.cseq\n if keep_var_ambigs and self.ambigPos and node.is_terminal():\n #use the original compressed sequence with ambiguous positions\n nodeseq = self.ambigPos[node.name]\n muts = []\n for p, (anc, der) in enumerate(izip(node.up.cseq, nodeseq)):\n # only if the states in compressed sequences differ:\n if anc!=der:\n # expand to the positions in real sequence\n muts.extend([(anc, pos, der) for pos in self.reduced_to_full_sequence_map[p]])\n\n #sort by position\n return sorted(muts, key=lambda x:x[1])\n\n\n def expanded_sequence(self, node):\n \"\"\"\n Get node's compressed sequence and expand it to the real sequence\n\n Parameters\n ----------\n\n node : PhyloTree.Clade\n Tree node\n\n Returns\n -------\n\n seq : np.array\n Sequence as np.array of chars\n \"\"\"\n seq = np.zeros_like(self.full_to_reduced_sequence_map, dtype='S1')\n for pos, state in enumerate(node.cseq):\n seq[self.reduced_to_full_sequence_map[pos]] = state\n\n return seq\n\n\n def dict_sequence(self, node, keep_var_ambigs=False):\n \"\"\"\n For VCF-based TreeAnc objects, we do not want to store the entire\n sequence on every node - not space efficient! Instead, return the dict\n of mutation locations for this sequence. This is used in place of\n 'expanded_sequence' for VCF-based obj throughout TreeAnc. However, users\n can still call 'expanded_sequence' if they do actually want the whole thing!\n\n Parameters\n ----------\n node : PhyloTree.Clade\n Tree node\n\n keep_var_ambigs : boolean\n If true, generates dict sequence based on the *original* _compressed_ sequence, which\n may include ambiguities. Note sites that only have 1 unambiguous base and ambiguous\n bases (\"AAAAANN\") are stripped of ambiguous bases *before* compression, so ambiguous\n bases will *not* be preserved.\n\n Returns\n -------\n seq : dict\n dict where keys are position and value is the mutation\n\n EBH 6 Dec 2017\n \"\"\"\n seq = {}\n\n nodeseq = node.cseq\n if keep_var_ambigs and self.ambigPos and node.is_terminal():\n #use the original compressed sequence with ambiguous positions\n nodeseq = self.ambigPos[node.name]\n\n for pos in self.nonref_positions:\n cseqLoc = self.full_to_reduced_sequence_map[pos]\n base = nodeseq[cseqLoc]\n if self.ref[pos] != base:\n seq[pos] = base\n\n return seq\n\n###################################################################\n### FITCH\n###################################################################\n def _fitch_anc(self, **kwargs):\n \"\"\"\n Reconstruct ancestral states using Fitch's algorithm. The method requires\n sequences to be assigned to leaves. It implements the iteration from\n leaves to the root constructing the Fitch profiles for each character of\n the sequence, and then by propagating from the root to the leaves,\n reconstructs the sequences of the internal nodes.\n\n Keyword Args\n ------------\n\n\n Returns\n -------\n\n Ndiff : int\n Number of the characters that changed since the previous\n reconstruction. These changes are determined from the pre-set\n sequence attributes of the nodes. If there are no sequences available\n (i.e., no reconstruction has been made before), returns the total\n number of characters in the tree.\n\n \"\"\"\n # set fitch profiiles to each terminal node\n\n for l in self.tree.get_terminals():\n l.state = [[k] for k in l.cseq]\n\n L = len(self.tree.get_terminals()[0].cseq)\n\n self.logger(\"TreeAnc._fitch_anc: Walking up the tree, creating the Fitch profiles\",2)\n for node in self.tree.get_nonterminals(order='postorder'):\n node.state = [self._fitch_state(node, k) for k in range(L)]\n\n ambs = [i for i in range(L) if len(self.tree.root.state[i])>1]\n if len(ambs) > 0:\n for amb in ambs:\n self.logger(\"Ambiguous state of the root sequence \"\n \"in the position %d: %s, \"\n \"choosing %s\" % (amb, str(self.tree.root.state[amb]),\n self.tree.root.state[amb][0]), 4)\n self.tree.root.cseq = np.array([k[np.random.randint(len(k)) if len(k)>1 else 0]\n for k in self.tree.root.state])\n\n if self.is_vcf:\n self.tree.root.sequence = self.dict_sequence(self.tree.root)\n else:\n self.tree.root.sequence = self.expanded_sequence(self.tree.root)\n\n\n self.logger(\"TreeAnc._fitch_anc: Walking down the self.tree, generating sequences from the \"\n \"Fitch profiles.\", 2)\n N_diff = 0\n for node in self.tree.get_nonterminals(order='preorder'):\n if node.up != None: # not root\n sequence = np.array([node.up.cseq[i]\n if node.up.cseq[i] in node.state[i]\n else node.state[i][0] for i in range(L)])\n if hasattr(node, 'sequence'):\n N_diff += (sequence!=node.cseq).sum()\n else:\n N_diff += L\n node.cseq = sequence\n if self.is_vcf:\n node.sequence = self.dict_sequence(node)\n else:\n node.sequence = self.expanded_sequence(node)\n node.mutations = self.get_mutations(node)\n\n node.profile = seq_utils.seq2prof(node.cseq, self.gtr.profile_map)\n del node.state # no need to store Fitch states\n self.logger(\"Done ancestral state reconstruction\",3)\n for node in self.tree.get_terminals():\n node.profile = seq_utils.seq2prof(node.cseq, self.gtr.profile_map)\n return N_diff\n\n def _fitch_state(self, node, pos):\n \"\"\"\n Determine the Fitch profile for a single character of the node's sequence.\n The profile is essentially the intersection between the children's\n profiles or, if the former is empty, the union of the profiles.\n\n Parameters\n ----------\n\n node : PhyloTree.Clade:\n Internal node which the profiles are to be determined\n\n pos : int\n Position in the node's sequence which the profiles should\n be determinedf for.\n\n Returns\n -------\n state : numpy.array\n Fitch profile for the character at position pos of the given node.\n \"\"\"\n state = self._fitch_intersect([k.state[pos] for k in node.clades])\n if len(state) == 0:\n state = np.concatenate([k.state[pos] for k in node.clades])\n return state\n\n def _fitch_intersect(self, arrays):\n \"\"\"\n Find the intersection of any number of 1D arrays.\n Return the sorted, unique values that are in all of the input arrays.\n Adapted from numpy.lib.arraysetops.intersect1d\n \"\"\"\n def pairwise_intersect(arr1, arr2):\n s2 = set(arr2)\n b3 = [val for val in arr1 if val in s2]\n return b3\n\n arrays = list(arrays) # allow assignment\n N = len(arrays)\n while N > 1:\n arr1 = arrays.pop()\n arr2 = arrays.pop()\n arr = pairwise_intersect(arr1, arr2)\n arrays.append(arr)\n N = len(arrays)\n\n return arrays[0]\n\n\n\n###################################################################\n### Maximum Likelihood\n###################################################################\n\n def ancestral_likelihood(self):\n \"\"\"\n Calculate the likelihood of the given realization of the sequences in\n the tree\n\n Returns\n -------\n\n log_lh : float\n The tree likelihood given the sequences\n \"\"\"\n log_lh = np.zeros(self.tree.root.cseq.shape[0])\n for node in self.tree.find_clades(order='postorder'):\n\n if node.up is None: # root node\n # 0-1 profile\n profile = seq_utils.seq2prof(node.cseq, self.gtr.profile_map)\n # get the probabilities to observe each nucleotide\n profile *= self.gtr.Pi\n profile = profile.sum(axis=1)\n log_lh += np.log(profile) # product over all characters\n continue\n\n t = node.branch_length\n\n indices = np.array([(np.argmax(self.gtr.alphabet==a),\n np.argmax(self.gtr.alphabet==b)) for a, b in izip(node.up.cseq, node.cseq)])\n\n logQt = np.log(self.gtr.expQt(t))\n lh = logQt[indices[:, 1], indices[:, 0]]\n log_lh += lh\n\n return log_lh\n\n def _branch_length_to_gtr(self, node):\n \"\"\"\n Set branch lengths to either mutation lengths of given branch lengths.\n The assigend values are to be used in the following ML analysis.\n \"\"\"\n if self.use_mutation_length:\n return max(ttconf.MIN_BRANCH_LENGTH*self.one_mutation, node.mutation_length)\n else:\n return max(ttconf.MIN_BRANCH_LENGTH*self.one_mutation, node.branch_length)\n\n\n def _ml_anc_marginal(self, verbose=0, store_compressed=True, final=True,\n sample_from_profile=False,\n debug=False, **kwargs):\n \"\"\"\n Perform marginal ML reconstruction of the ancestral states. In contrast to\n joint reconstructions, this needs to access the probabilities rather than only\n log probabilities and is hence handled by a separate function.\n\n Keyword Args\n ------------\n\n store_lh : bool\n If True, all likelihoods will be stored for all nodes. Useful for\n testing, diagnostics and if special post-processing is required.\n\n verbose :int\n How verbose the output should be\n \"\"\"\n\n tree = self.tree\n # number of nucleotides changed from prev reconstruction\n N_diff = 0\n\n L = self.tree.get_terminals()[0].cseq.shape[0]\n n_states = self.gtr.alphabet.shape[0]\n self.logger(\"TreeAnc._ml_anc_marginal: type of reconstruction: Marginal\", 2)\n\n self.logger(\"Walking up the tree, computing likelihoods... \", 3)\n # set the leaves profiles\n for leaf in tree.get_terminals():\n # in any case, set the profile\n leaf.marginal_subtree_LH = seq_utils.seq2prof(leaf.cseq, self.gtr.profile_map)\n leaf.marginal_subtree_LH_prefactor = np.zeros(L)\n\n # propagate leaves -->> root, set the marginal-likelihood messages\n for node in tree.get_nonterminals(order='postorder'): #leaves -> root\n # regardless of what was before, set the profile to ones\n node.marginal_subtree_LH_prefactor = np.zeros(L)\n node.marginal_subtree_LH = np.ones((L, n_states)) # we will multiply it\n for ch in node.clades:\n ch.marginal_Lx = self.gtr.propagate_profile(ch.marginal_subtree_LH,\n self._branch_length_to_gtr(ch), return_log=False) # raw prob to transfer prob up\n node.marginal_subtree_LH *= ch.marginal_Lx\n node.marginal_subtree_LH_prefactor += ch.marginal_subtree_LH_prefactor\n\n pre = node.marginal_subtree_LH.sum(axis=1) #sum over nucleotide states\n node.marginal_subtree_LH = (node.marginal_subtree_LH.T/pre).T # normalize so that the sum is 1\n node.marginal_subtree_LH_prefactor += np.log(pre) # and store log-prefactor\n\n self.logger(\"Computing root node sequence and total tree likelihood...\",3)\n # reconstruct the root node sequence\n tree.root.marginal_subtree_LH *= self.gtr.Pi # Msg to the root from the distant part (equ frequencies)\n pre=tree.root.marginal_subtree_LH.sum(axis=1)\n tree.root.marginal_profile = (tree.root.marginal_subtree_LH.T/pre).T\n tree.root.marginal_subtree_LH_prefactor += np.log(pre)\n\n # choose sequence characters from this profile.\n # treat root node differently to avoid piling up mutations on the longer branch\n if sample_from_profile=='root':\n root_sample_from_profile = True\n other_sample_from_profile = False\n elif isinstance(sample_from_profile, bool):\n root_sample_from_profile = sample_from_profile\n other_sample_from_profile = sample_from_profile\n\n seq, prof_vals, idxs = seq_utils.prof2seq(tree.root.marginal_profile,\n self.gtr, sample_from_prof=root_sample_from_profile)\n\n self.tree.sequence_LH = np.log(prof_vals) + tree.root.marginal_subtree_LH_prefactor\n self.tree.sequence_marginal_LH = (self.tree.sequence_LH*self.multiplicity).sum()\n self.tree.root.cseq = seq\n if final:\n if self.is_vcf:\n self.tree.root.sequence = self.dict_sequence(self.tree.root)\n else:\n self.tree.root.sequence = self.expanded_sequence(self.tree.root)\n\n # need this fake msg to account for the complementary subtree when traversing tree back\n tree.root.seq_msg_from_parent = np.repeat([self.gtr.Pi], len(tree.root.cseq), axis=0)\n\n self.logger(\"Walking down the tree, computing maximum likelihood sequences...\",3)\n # propagate root -->> leaves, reconstruct the internal node sequences\n # provided the upstream message + the message from the complementary subtree\n for node in tree.find_clades(order='preorder'):\n if node.up is None: # skip if node is root\n continue\n\n # integrate the information coming from parents with the information\n # of all children my multiplying it to the prev computed profile\n tmp_msg = np.copy(node.up.seq_msg_from_parent)\n for c in node.up.clades:\n if c != node:\n tmp_msg*=c.marginal_Lx\n norm_vector = tmp_msg.sum(axis=1)\n tmp_msg=(tmp_msg.T/norm_vector).T\n node.seq_msg_from_parent = self.gtr.propagate_profile(tmp_msg,\n self._branch_length_to_gtr(node), return_log=False)\n node.marginal_profile = node.marginal_subtree_LH * node.seq_msg_from_parent\n\n norm_vector = node.marginal_profile.sum(axis=1)\n node.marginal_profile=(node.marginal_profile.T/norm_vector).T\n # choose sequence based maximal marginal LH.\n seq, prof_vals, idxs = seq_utils.prof2seq(node.marginal_profile, self.gtr,\n sample_from_prof=other_sample_from_profile)\n\n if hasattr(node, 'cseq') and node.cseq is not None:\n N_diff += (seq!=node.cseq).sum()\n else:\n N_diff += L\n\n #assign new sequence\n node.cseq = seq\n if final:\n if self.is_vcf:\n node.sequence = self.dict_sequence(node)\n else:\n node.sequence = self.expanded_sequence(node)\n node.mutations = self.get_mutations(node)\n\n\n # note that the root doesn't contribute to N_diff (intended, since root sequence is often ambiguous)\n self.logger(\"TreeAnc._ml_anc_marginal: ...done\", 3)\n if store_compressed:\n self._store_compressed_sequence_pairs()\n\n # do clean-up:\n if not debug:\n for node in self.tree.find_clades():\n del node.marginal_subtree_LH\n del node.marginal_subtree_LH_prefactor\n del node.seq_msg_from_parent\n\n return N_diff\n\n\n def _ml_anc_joint(self, verbose=0, store_compressed=True, final=True,\n sample_from_profile=False,\n debug=False, **kwargs):\n\n \"\"\"\n Perform joint ML reconstruction of the ancestral states. In contrast to\n marginal reconstructions, this only needs to compare and multiply LH and\n can hence operate in log space.\n\n Keyword Args\n ------------\n\n store_lh : bool\n If True, all likelihoods will be stored for all nodes. Useful for\n testing, diagnostics and if special post-processing is required.\n\n verbose : int\n How verbose the output should be\n\n \"\"\"\n N_diff = 0 # number of sites differ from perv reconstruction\n L = self.tree.get_terminals()[0].cseq.shape[0]\n n_states = self.gtr.alphabet.shape[0]\n\n self.logger(\"TreeAnc._ml_anc_joint: type of reconstruction: Joint\", 2)\n\n self.logger(\"TreeAnc._ml_anc_joint: Walking up the tree, computing likelihoods... \", 3)\n # for the internal nodes, scan over all states j of this node, maximize the likelihood\n for node in self.tree.find_clades(order='postorder'):\n if node.up is None:\n node.joint_Cx=None # not needed for root\n continue\n\n # preallocate storage\n node.joint_Lx = np.zeros((L, n_states)) # likelihood array\n node.joint_Cx = np.zeros((L, n_states), dtype=int) # max LH indices\n branch_len = self._branch_length_to_gtr(node)\n # transition matrix from parent states to the current node states.\n # denoted as Pij(i), where j - parent state, i - node state\n log_transitions = np.log(self.gtr.expQt(branch_len))\n\n if node.is_terminal():\n msg_from_children = np.log(np.maximum(seq_utils.seq2prof(node.cseq, self.gtr.profile_map), ttconf.TINY_NUMBER))\n msg_from_children[np.isnan(msg_from_children) | np.isinf(msg_from_children)] = -ttconf.BIG_NUMBER\n else:\n # Product (sum-Log) over all child subtree likelihoods.\n # this is prod_ch L_x(i)\n msg_from_children = np.sum(np.stack([c.joint_Lx for c in node.clades], axis=0), axis=0)\n\n # for every possible state of the parent node,\n # get the best state of the current node\n # and compute the likelihood of this state\n for char_i, char in enumerate(self.gtr.alphabet):\n # Pij(i) * L_ch(i) for given parent state j\n msg_to_parent = (log_transitions.T[char_i, :] + msg_from_children)\n # For this parent state, choose the best state of the current node:\n node.joint_Cx[:, char_i] = msg_to_parent.argmax(axis=1)\n # compute the likelihood of the best state of the current node\n # given the state of the parent (char_i)\n node.joint_Lx[:, char_i] = msg_to_parent.max(axis=1)\n\n # root node profile = likelihood of the total tree\n msg_from_children = np.sum(np.stack([c.joint_Lx for c in self.tree.root.clades], axis = 0), axis=0)\n # Pi(i) * Prod_ch Lch(i)\n self.tree.root.joint_Lx = msg_from_children + np.log(self.gtr.Pi)\n normalized_profile = (self.tree.root.joint_Lx.T - self.tree.root.joint_Lx.max(axis=1)).T\n\n # choose sequence characters from this profile.\n # treat root node differently to avoid piling up mutations on the longer branch\n if sample_from_profile=='root':\n root_sample_from_profile = True\n elif isinstance(sample_from_profile, bool):\n root_sample_from_profile = sample_from_profile\n\n seq, anc_lh_vals, idxs = seq_utils.prof2seq(np.exp(normalized_profile),\n self.gtr, sample_from_prof = root_sample_from_profile)\n\n # compute the likelihood of the most probable root sequence\n self.tree.sequence_LH = np.choose(idxs, self.tree.root.joint_Lx.T)\n self.tree.sequence_joint_LH = (self.tree.sequence_LH*self.multiplicity).sum()\n self.tree.root.cseq = seq\n self.tree.root.seq_idx = idxs\n if final:\n if self.is_vcf:\n self.tree.root.sequence = self.dict_sequence(self.tree.root)\n else:\n self.tree.root.sequence = self.expanded_sequence(self.tree.root)\n\n self.logger(\"TreeAnc._ml_anc_joint: Walking down the tree, computing maximum likelihood sequences...\",3)\n # for each node, resolve the conditioning on the parent node\n for node in self.tree.find_clades(order='preorder'):\n\n # root node has no mutations, everything else has been alread y set\n if node.up is None:\n node.mutations = []\n continue\n\n # choose the value of the Cx(i), corresponding to the state of the\n # parent node i. This is the state of the current node\n node.seq_idx = np.choose(node.up.seq_idx, node.joint_Cx.T)\n # reconstruct seq, etc\n tmp_sequence = np.choose(node.seq_idx, self.gtr.alphabet)\n if hasattr(node, 'sequence') and node.cseq is not None:\n N_diff += (tmp_sequence!=node.cseq).sum()\n else:\n N_diff += L\n\n node.cseq = tmp_sequence\n\n if final:\n node.mutations = self.get_mutations(node)\n if self.is_vcf:\n node.sequence = self.dict_sequence(node)\n else:\n node.sequence = self.expanded_sequence(node)\n\n\n self.logger(\"TreeAnc._ml_anc_joint: ...done\", 3)\n if store_compressed:\n self._store_compressed_sequence_pairs()\n\n # do clean-up\n if not debug:\n for node in self.tree.find_clades(order='preorder'):\n del node.joint_Lx\n del node.joint_Cx\n del node.seq_idx\n\n return N_diff\n\n\n def _store_compressed_sequence_to_node(self, node):\n \"\"\"\n make a compressed representation of a pair of sequences only counting\n the number of times a particular pair of states (e.g. (A,T)) is observed\n the the aligned sequences of parent and child.\n\n Parameters\n -----------\n\n node : PhyloTree.Clade\n Tree node. **Note** because the method operates\n on the sequences on both sides of a branch, sequence reconstruction\n must be performed prior to calling this method.\n\n \"\"\"\n seq_pairs, multiplicity = self.gtr.compress_sequence_pair(node.up.cseq,\n node.cseq,\n pattern_multiplicity = self.multiplicity,\n ignore_gaps = self.ignore_gaps)\n node.compressed_sequence = {'pair':seq_pairs, 'multiplicity':multiplicity}\n\n\n def _store_compressed_sequence_pairs(self):\n \"\"\"\n Traverse the tree, and for each node store the compressed sequence pair.\n **Note** sequence reconstruction should be performed prior to calling\n this method.\n \"\"\"\n self.logger(\"TreeAnc._store_compressed_sequence_pairs...\",2)\n for node in self.tree.find_clades():\n if node.up is None:\n continue\n self._store_compressed_sequence_to_node(node)\n self.logger(\"TreeAnc._store_compressed_sequence_pairs...done\",3)\n\n\n###################################################################\n### Branch length\n###################################################################\n def optimize_branch_len(self, **kwargs):\n self.optimize_branch_length(**kwargs)\n\n def optimize_branch_length(self, **kwargs):\n \"\"\"\n Perform optimization for the branch lengths of the whole tree or any\n subtree. **Note** this method assumes that each node stores information\n about its sequence as numpy.array object (node.sequence attribute).\n Therefore, before calling this method, sequence reconstruction with\n either of the available models must be performed.\n\n Keyword Args\n ------------\n\n verbose : int\n Output detalization\n\n store_old : bool\n If True, the old lenths will be saved in node._old_dist attribute.\n Useful for testing, and special post-processing.\n\n Returns\n -------\n None, the phylogenetic tree is modified in-place.\n\n \"\"\"\n\n self.logger(\"TreeAnc.optimize_branch_length: running branch length optimization...\",1)\n\n verbose = 0\n store_old_dist = False\n\n if 'verbose' in kwargs:\n verbose = int(kwargs['verbose'])\n if 'store_old' in kwargs:\n store_old_dist = kwargs['store_old'] == True\n\n for node in self.tree.find_clades(order='postorder'):\n if node.up is None: continue # this is the root\n if store_old_dist:\n node._old_length = node.branch_length\n\n new_len = self.optimal_branch_length(node)\n\n if new_len < 0:\n continue\n\n self.logger(\"Optimization results: old_len=%.4f, new_len=%.4f \"\n \" Updating branch length...\"%(node.branch_length, new_len), 5)\n\n node.branch_length = new_len\n node.mutation_length=new_len\n\n # as branch lengths changed, the params must be fixed\n self.tree.root.up = None\n self.tree.root.dist2root = 0.0\n self._prepare_nodes()\n\n\n def optimal_branch_length(self, node):\n '''\n Calculate optimal branch length given the sequences of node and parent\n\n Parameters\n -----------\n\n node : PhyloTree.Clade\n TreeNode, attached to the branch.\n\n Returns\n -------\n\n new_len : float\n Optimal length of the given branch\n\n '''\n if node.up is None:\n return self.one_mutation\n\n parent = node.up\n if hasattr(node, 'compressed_sequence'):\n new_len = self.gtr.optimal_t_compressed(node.compressed_sequence['pair'],\n node.compressed_sequence['multiplicity'])\n else:\n new_len = self.gtr.optimal_t(parent.cseq, node.cseq,\n pattern_multiplicity=self.multiplicity,\n ignore_gaps=self.ignore_gaps)\n return new_len\n\n\n def prune_short_branches(self):\n \"\"\"\n If the branch length is less than the minimal value, remove the branch\n from the tree. **Requires** the ancestral sequence reconstruction\n \"\"\"\n self.logger(\"TreeAnc.prune_short_branches: pruning short branches (max prob at zero)...\", 1)\n for node in self.tree.find_clades():\n if node.up is None or node.is_terminal():\n continue\n\n # probability of the two seqs separated by zero time is not zero\n if self.gtr.prob_t(node.up.cseq, node.cseq, 0.0,\n pattern_multiplicity=self.multiplicity) > 0.1:\n # re-assign the node children directly to its parent\n node.up.clades = [k for k in node.up.clades if k != node] + node.clades\n for clade in node.clades:\n clade.up = node.up\n\n def optimize_sequences_and_branch_length(self,*args, **kwargs):\n \"\"\"This method is a schortcut for :py:meth:`optimize_seq_and_branch_len`\n\n Iteratively set branch lengths and reconstruct ancestral sequences until\n the values of either former or latter do not change. The algorithm assumes\n knowing only the topology of the tree, and requires that sequences are assigned\n to all leaves of the tree. The first step is to pre-reconstruct ancestral\n states using Fitch reconstruction algorithm or ML using existing branch length\n estimates. Then, optimize branch lengths and re-do reconstruction until\n convergence using ML method.\n \"\"\"\n self.optimize_seq_and_branch_len(*args,**kwargs)\n\n def optimize_seq_and_branch_len(self,reuse_branch_len=True,prune_short=True,\n max_iter=5, infer_gtr=False, **kwargs):\n \"\"\"\n Iteratively set branch lengths and reconstruct ancestral sequences until\n the values of either former or latter do not change. The algorithm assumes\n knowing only the topology of the tree, and requires that sequences are assigned\n to all leaves of the tree. The first step is to pre-reconstruct ancestral\n states using Fitch reconstruction algorithm or ML using existing branch length\n estimates. Then, optimize branch lengths and re-do reconstruction until\n convergence using ML method.\n\n Parameters\n -----------\n\n reuse_branch_len : bool, default True\n If True, rely on the initial branch lenghts, and start with the\n Maximum-likelihood ancestral sequence inference using existing branch\n lengths. Otherwise, initial reconstruction of ancestral states with\n Fitch algorithm, which uses only the tree topology.\n\n prune_short : bool, default True\n If True, the branches with zero optimal length will be pruned from\n the tree hence creating polytomies. The polytomies could be further\n processde using resolve_polytomies from the TreeTime class.\n\n \"\"\"\n self.logger(\"TreeAnc.optimize_sequences_and_branch_length: sequences...\", 1)\n if reuse_branch_len:\n N_diff = self.reconstruct_anc(method='ml', infer_gtr=infer_gtr, **kwargs)\n else:\n N_diff = self.reconstruct_anc(method='fitch', infer_gtr=infer_gtr, **kwargs)\n\n self.optimize_branch_len(verbose=0, store_old=False)\n\n n = 0\n while n<max_iter:\n n += 1\n if prune_short:\n self.prune_short_branches()\n N_diff = self.reconstruct_anc(method='ml', infer_gtr=False,**kwargs)\n\n self.logger(\"TreeAnc.optimize_sequences_and_branch_length: Iteration %d.\"\n \" #Nuc changed since prev reconstructions: %d\" %(n, N_diff), 2)\n\n if N_diff < 1:\n break\n self.optimize_branch_len(verbose=0, store_old=False)\n\n self.tree.unconstrained_sequence_LH = (self.tree.sequence_LH*self.multiplicity).sum()\n self._prepare_nodes() # fix dist2root and up-links after reconstruction\n self.logger(\"TreeAnc.optimize_sequences_and_branch_length: Unconstrained sequence LH:%f\" % self.tree.unconstrained_sequence_LH , 2)\n return\n\n###############################################################################\n### Utility functions\n###############################################################################\n def get_reconstructed_alignment(self):\n \"\"\"\n Get the multiple sequence alignment including reconstructed sequences for\n the internal nodes.\n \"\"\"\n from Bio.Align import MultipleSeqAlignment\n from Bio.Seq import Seq\n from Bio.SeqRecord import SeqRecord\n self.logger(\"TreeAnc.get_reconstructed_alignment ...\",2)\n if not hasattr(self.tree.root, 'sequence'):\n self.logger(\"TreeAnc.reconstructed_alignment... reconstruction not yet done\",3)\n self.reconstruct_anc('ml')\n\n new_aln = MultipleSeqAlignment([SeqRecord(id=n.name, seq=Seq(\"\".join(n.sequence)), description=\"\")\n for n in self.tree.find_clades()])\n\n return new_aln\n\n def get_tree_dict(self, keep_var_ambigs=False):\n \"\"\"\n For VCF-based objects, returns a nested dict with all information required to\n reconstruct sequences for all nodes (terminal and internal) in the format:\n {'reference':'AGCTCGA..A',\n 'sequences': { 'seq1':{4:'A', 7:'-'}, 'seq2':{100:'C'} },\n 'positions': [1,4,7,10,100...],\n 'inferred_const_sites': [7,100....] <this is optional>\n }\n self.inferred_const_sites\n\n Reference being the reference sequence to which the variable sites are mapped;\n sequence containing a dict for each sequence with the position and base of\n mutations; and positions containing a list of all the variable positions.\n If included, inferred_const_sites is positions that were constant except\n ambiguous bases, which were converted into constant sites (ex: 'AAAN' -> 'AAAA')\n\n keep_var_ambigs : boolean\n If true, generates dict sequence based on the *original* _compressed_ sequence, which\n may include ambiguities. Note sites that only have 1 unambiguous base and ambiguous\n bases (\"AAAAANN\") are stripped of ambiguous bases *before* compression, so ambiguous\n bases will *not* be preserved.\n\n EBH 7 Dec 2017\n \"\"\"\n if self.is_vcf:\n tree_dict = {}\n tree_dict['reference'] = self.ref\n tree_dict['positions'] = self.nonref_positions\n\n tree_aln = {}\n for n in self.tree.find_clades():\n if hasattr(n, 'sequence'):\n if keep_var_ambigs: #regenerate dict to include ambig bases\n tree_aln[n.name] = self.dict_sequence(n, keep_var_ambigs)\n else:\n tree_aln[n.name] = n.sequence\n\n tree_dict['sequences'] = tree_aln\n\n if len(self.inferred_const_sites) != 0:\n tree_dict['inferred_const_sites'] = self.inferred_const_sites\n\n return tree_dict\n else:\n raise(\"A dict can only be returned for trees created with VCF-input!\")\n\n\nif __name__==\"__main__\":\n\n from Bio import Phylo\n from StringIO import StringIO\n from Bio import Phylo,AlignIO\n\n tiny_tree = Phylo.read(StringIO(\"((A:.0060,B:.30)C:.030,D:.020)E:.004;\"), 'newick')\n tiny_aln = AlignIO.read(StringIO(\">A\\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT\\n\"\n \">B\\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT\\n\"\n \">C\\nAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTT\\n\"\n \">D\\nAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTT\\n\"\n \">E\\nACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGT\\n\"), 'fasta')\n\n mygtr = GTR.custom(alphabet = np.array(['A', 'C', 'G', 'T']),\n pi = np.array([0.25, 0.95, 0.005, 0.05]), W=np.ones((4,4)))\n\n myTree = TreeAnc(gtr=mygtr, tree = tiny_tree,\n aln =tiny_aln, verbose = 4)\n\n logLH = myTree.ancestral_likelihood()\n LH = np.exp(logLH)\n print (\"Net probability (for all possible realizations): \" + str(np.exp(logLH).sum()))\n print (np.exp(logLH))\n" ]
[ [ "numpy.log", "numpy.isinf", "numpy.unique", "numpy.isnan", "numpy.stack", "numpy.ones", "numpy.concatenate", "numpy.copy", "numpy.fromstring", "numpy.zeros_like", "numpy.choose", "numpy.where", "numpy.argmax", "numpy.array", "numpy.exp", "numpy.zeros", "numpy.sum" ] ]
SimoneGasperini/rboost
[ "5e0108d821077da76964e1e797f0d775b3999f56" ]
[ "rboost/gui/listlabels.py" ]
[ "import pandas as pd\nfrom PySide2.QtWidgets import (\n QWidget,\n QHBoxLayout,\n QVBoxLayout,\n QFormLayout,\n QTableView,\n QPushButton,\n QComboBox,\n QHeaderView\n)\n\nfrom rboost.gui.utils.pandasmodel import PandasModel\n\n\nclass ListLabelsWindow(QWidget):\n\n def __init__(self, rboost):\n super().__init__()\n self.rboost = rboost\n\n self.layout = QVBoxLayout()\n self._add_form_layout()\n self._add_buttons_layout()\n self.table_view = None\n self._add_table_view_layout()\n self.setLayout(self.layout)\n\n def _add_form_layout(self):\n labtype_form = self._create_labtype_form()\n self.layout.addLayout(labtype_form)\n\n def _add_buttons_layout(self):\n self.buttons_layout = QHBoxLayout()\n show_button = QPushButton('Show list')\n show_button.clicked.connect(self.show_table)\n clear_button = QPushButton('Clear list')\n clear_button.clicked.connect(self.clear_table)\n self.buttons_layout.addWidget(show_button)\n self.buttons_layout.addWidget(clear_button)\n self.layout.addLayout(self.buttons_layout)\n\n def _add_table_view_layout(self, df=None):\n self.table_view_layout = QHBoxLayout()\n if self.table_view is not None:\n self.table_view_layout.removeWidget(self.table_view)\n self.table_view.deleteLater()\n self.table_view = self._create_table_view(df=df)\n self.table_view_layout.addWidget(self.table_view)\n self.layout.addLayout(self.table_view_layout)\n\n def _create_labtype_form(self):\n labtype_form = QFormLayout()\n items = [None] + sorted(list(self.rboost.labtypes))\n self.labtype_combobox = QComboBox()\n self.labtype_combobox.addItems(items)\n labtype_form.addRow('Label type', self.labtype_combobox)\n return labtype_form\n\n def _create_table_view(self, df):\n if df is None:\n df = pd.DataFrame()\n model = PandasModel(df)\n table_view = QTableView()\n table_view.setModel(model)\n table_view.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n return table_view\n\n def _get_labels(self):\n labels = [self.rboost.network.graph.nodes[n]['label']\n for n in self.rboost.network.graph.nodes]\n labtype = str(self.labtype_combobox.currentText())\n if labtype:\n labels = [label for label in labels if labtype in label.types]\n labels.sort(reverse=True)\n return labels\n\n def _get_dataframe(self, labels):\n columns = [self.rboost.labels_df_cols[k]\n for k in ['label', 'types', 'queries', 'uploads']]\n data = [[lab.name, lab.types, lab.queries_count, lab.uploads_count]\n for lab in labels]\n df = pd.DataFrame(data=data, columns=columns)\n return df\n\n def show_table(self):\n labels = self._get_labels()\n df = self._get_dataframe(labels=labels)\n self._add_table_view_layout(df=df)\n self.setLayout(self.layout)\n\n def clear_table(self):\n empty_df = pd.DataFrame()\n self._add_table_view_layout(df=empty_df)\n self.setLayout(self.layout)\n" ]
[ [ "pandas.DataFrame" ] ]
eladapplbaum/IML.HUJI
[ "6a08721e143b0d766f7085c70882f32f60088550" ]
[ "IMLearn/learners/gaussian_estimators.py" ]
[ "from __future__ import annotations\nimport numpy as np\nfrom numpy.linalg import inv, det, slogdet\n\n\nclass UnivariateGaussian:\n \"\"\"\n Class for univariate Gaussian Distribution Estimator\n \"\"\"\n\n def __init__(self, biased_var: bool = False) -> UnivariateGaussian:\n \"\"\"\n Estimator for univariate Gaussian mean and variance parameters\n\n Parameters\n ----------\n biased_var : bool, default=False\n Should fitted estimator of variance be a biased or unbiased estimator\n\n Attributes\n ----------\n fitted_ : bool\n Initialized as false indicating current estimator instance has not been fitted.\n To be set as True in `UnivariateGaussian.fit` function.\n\n mu_: float\n Estimated expectation initialized as None. To be set in `UnivariateGaussian.fit`\n function.\n\n var_: float\n Estimated variance initialized as None. To be set in `UnivariateGaussian.fit`\n function.\n \"\"\"\n self.biased_ = biased_var\n self.fitted_, self.mu_, self.var_ = False, None, None\n\n def fit(self, X: np.ndarray) -> UnivariateGaussian:\n \"\"\"\n Estimate Gaussian expectation and variance from given samples\n\n Parameters\n ----------\n X: ndarray of shape (n_samples, )\n Training data\n\n Returns\n -------\n self : returns an instance of self.\n\n Notes\n -----\n Sets `self.mu_`, `self.var_` attributes according to calculated estimation (where\n estimator is either biased or unbiased). Then sets `self.fitted_` attribute to `True`\n \"\"\"\n\n self.mu_ = sum(X) / X.size\n var_sum = 0\n for s in X:\n var_sum += (s - self.mu_) ** 2\n\n self.var_ = (var_sum / (X.size if self.biased_ else X.size - 1)) ** 0.5\n\n self.fitted_ = True\n return self\n\n def pdf(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Calculate PDF of observations under Gaussian model with fitted estimators\n\n Parameters\n ----------\n X: ndarray of shape (n_samples, )\n Samples to calculate PDF for\n\n Returns\n -------\n pdfs: ndarray of shape (n_samples, )\n Calculated values of given samples for PDF function of N(mu_, var_)\n\n Raises\n ------\n ValueError: In case function was called prior fitting the model\n \"\"\"\n if not self.fitted_:\n raise ValueError(\n \"Estimator must first be fitted before calling `pdf` function\")\n pdfs = np.ndarray(X.size)\n for i in range(X.size):\n pdfs[i] = np.exp(\n -((X[i] - self.mu_) ** 2) / 2 * self.var_) / np.sqrt(\n 2 * np.pi * self.var_)\n return pdfs\n\n @staticmethod\n def log_likelihood(mu: float, sigma: float, X: np.ndarray) -> float:\n \"\"\"\n Calculate the log-likelihood of the data under a specified Gaussian model\n\n Parameters\n ----------\n mu : float\n Expectation of Gaussian\n sigma : float\n Variance of Gaussian\n X : ndarray of shape (n_samples, )\n Samples to calculate log-likelihood with\n\n Returns\n -------\n log_likelihood: float\n log-likelihood calculated\n \"\"\"\n temp_sum = 0\n for i in range(X.size):\n temp_sum += (X[i] - mu) ** 2\n return -(X.size / 2) * (\n np.log(2 * np.pi) + np.log(sigma)) - temp_sum / (\n 2 * sigma)\n\n\nclass MultivariateGaussian:\n \"\"\"\n Class for multivariate Gaussian Distribution Estimator\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialize an instance of multivariate Gaussian estimator\n\n Attributes\n ----------\n fitted_ : bool\n Initialized as false indicating current estimator instance has not been fitted.\n To be set as True in `MultivariateGaussian.fit` function.\n\n mu_: ndarray of shape (n_features,)\n Estimated expectation initialized as None. To be set in `MultivariateGaussian.fit`\n function.\n\n cov_: ndarray of shape (n_features, n_features)\n Estimated covariance initialized as None. To be set in `MultivariateGaussian.fit`\n function.\n \"\"\"\n self.mu_, self.cov_ = None, None\n self.fitted_ = False\n\n def fit(self, X: np.ndarray) -> MultivariateGaussian:\n \"\"\"\n Estimate Gaussian expectation and covariance from given samples\n\n Parameters\n ----------\n X: ndarray of shape (n_samples, n_features)\n Training data\n\n Returns\n -------\n self : returns an instance of self\n\n Notes\n -----\n Sets `self.mu_`, `self.cov_` attributes according to calculated estimation.\n Then sets `self.fitted_` attribute to `True`\n \"\"\"\n rows, cols = X.shape\n self.mu_ = np.sum(X, axis=0) / rows\n X_gal = np.array([X[i] - self.mu_ for i in range(rows)])\n self.cov_ = np.dot(X_gal.transpose(), X_gal) / (rows - 1)\n self.fitted_ = True\n return self\n\n def pdf(self, X: np.ndarray):\n \"\"\"\n Calculate PDF of observations under Gaussian model with fitted estimators\n\n Parameters\n ----------\n X: ndarray of shape (n_samples, n_features)\n Samples to calculate PDF for\n\n Returns\n -------\n pdfs: ndarray of shape (n_samples, )\n Calculated values of given samples for PDF function of N(mu_, cov_)\n\n Raises\n ------\n ValueError: In case function was called prior fitting the model\n \"\"\"\n if not self.fitted_:\n raise ValueError(\n \"Estimator must first be fitted before calling `pdf` function\")\n mahalanobis = np.einsum(\"bi,ij,bj->b\", X-self.mu_, inv(self.cov_), X-self.mu_)\n\n return np.exp(-.5 * mahalanobis) / \\\n np.sqrt((2*np.pi) ** len(X) * det(self.cov_))\n\n\n @staticmethod\n def log_likelihood(mu: np.ndarray, cov: np.ndarray,\n X: np.ndarray) -> float:\n \"\"\"\n Calculate the log-likelihood of the data under a specified Gaussian model\n\n Parameters\n ----------\n mu : ndarray of shape (n_features,)\n Expectation of Gaussian\n cov : ndarray of shape (n_features, n_features)\n covariance matrix of Gaussian\n X : ndarray of shape (n_samples, n_features)\n Samples to calculate log-likelihood with\n\n Returns\n -------\n log_likelihood: float\n log-likelihood calculated over all input data and under given parameters of Gaussian\n \"\"\"\n rows, cols = X.shape\n X_gal = np.array([X[i] - mu for i in range(rows)])\n\n temp_sun = 0\n for i in range(rows):\n temp_sun += np.linalg.multi_dot([X_gal[i].transpose(),\n np.linalg.inv(cov),\n X_gal[i]])\n return -(X.size / 2) * (cols * np.log(2 * np.pi) + np.log(\n np.linalg.det(cov))) - 0.5 * temp_sun\n\n\n" ]
[ [ "numpy.log", "numpy.sqrt", "numpy.linalg.inv", "numpy.ndarray", "numpy.linalg.det", "numpy.exp", "numpy.sum" ] ]
Julian-Theis/stat-kiste
[ "b436e881c5ad79781a60dc767c08aa1165e4fb8b" ]
[ "backend/stat/normality_tests.py" ]
[ "\"\"\"\nCode originates from: https://machinelearningmastery.com/a-gentle-introduction-to-normality-tests-in-python/\n\n\"\"\"\n\nfrom scipy.stats import shapiro, normaltest, anderson\n\n\"\"\"\nShapiro-Wilk Test of Normality\nThe Shapiro-Wilk Test is more appropriate for small sample sizes (< 50 samples), but can also handle sample sizes as large as 2000.\nThe Shapiro-Wilk test is used as a numerical means of assessing normality.\n\"\"\"\ndef run_shapiro_wilk_normality_test(data, alpha=0.05, print_results=True):\n stat, p = shapiro(data)\n if print_results:\n print('Statistics=%.3f, p=%.3f' % (stat, p))\n if p > alpha:\n print('Sample looks Gaussian (fail to reject H0) at significance level ', alpha)\n else:\n print('Sample does not look Gaussian (reject H0) at significance level ', alpha)\n return stat, p\n\ndef run_dagostino_pearson_test(data, alpha, print_results=True):\n stat, p = normaltest(data)\n if print_results:\n print('Statistics=%.3f, p=%.3f' % (stat, p))\n if p > alpha:\n print('Sample looks Gaussian (fail to reject H0) at significance level ', alpha)\n else:\n print('Sample does not look Gaussian (reject H0) at significance level ', alpha)\n return stat, p\n\ndef run_anderson_darling(data, print_results=True):\n result = anderson(data)\n print('Statistic: %.3f' % result.statistic)\n if print_results:\n for i in range(len(result.critical_values)):\n sl, cv = result.significance_level[i], result.critical_values[i]\n if result.statistic < result.critical_values[i]:\n print('%.3f: %.3f, data looks normal (fail to reject H0)' % (sl, cv))\n else:\n print('%.3f: %.3f, data does not look normal (reject H0)' % (sl, cv))\n return result\n\n\n" ]
[ [ "scipy.stats.anderson", "scipy.stats.shapiro", "scipy.stats.normaltest" ] ]
JamalRahman/hybridtfidf
[ "0409aae0083b1eae32c1a049f87f484740289be1" ]
[ "hybridtfidf/utils.py" ]
[ "from numpy.linalg import norm\r\nfrom numpy import dot\r\n\r\n\r\ndef cosine_sim(vec1, vec2):\r\n \"\"\"Calculates the cosine similarity between two vectors\r\n\r\n Args:\r\n vec1 (list of float): A vector\r\n vec2 (list of float): A vector\r\n\r\n Returns:\r\n The cosine similarity between the two input vectors\r\n \"\"\"\r\n return dot(vec1, vec2) / (norm(vec1) * norm(vec2))\r\n\r\n\r\ndef select_salient_posts(post_vectors, post_weights, k=10, similarity_threshold=0.4):\r\n \"\"\"\r\n Selects the top k most salient posts in a collection of posts.\r\n To avoid redundancy, any post too similar to other-posts are disregarded. Each selected post will\r\n therefore be both highly salient and representative of unique semantics.\r\n\r\n Note:\r\n post_vectors and post_weights must be in the same order. The ith element of post_weights must reflect\r\n the ith element of post_vectors\r\n\r\n Args:\r\n post_vectors (list of (list of float)): Hybrid tfidf representation of the documents\r\n as a document-term matrix\r\n\r\n post_weights (list of float): Hybrid Tfidf weight for each document\r\n\r\n k (int): The number of posts to select as output\r\n\r\n similarity_threshold (float): The maximum cosine similiarity for a post to be selected\r\n\r\n \"\"\"\r\n\r\n sorted_keyed_vectors = [z for _, z in sorted(zip(post_weights, enumerate(post_vectors)), key=lambda i: i[0],\r\n reverse=True)] # z is (i,vi) sorted by weight\r\n\r\n i = 1\r\n\r\n veclength = len(post_vectors)\r\n loop_condition = True\r\n\r\n significant_indices = [0]\r\n unsorted_indices = [sorted_keyed_vectors[0][0]]\r\n\r\n while loop_condition:\r\n is_similar = False\r\n\r\n for j in significant_indices:\r\n sim = cosine_sim(sorted_keyed_vectors[j][1], sorted_keyed_vectors[i][1])\r\n if sim >= similarity_threshold:\r\n is_similar = True\r\n\r\n if not is_similar:\r\n significant_indices.append(i)\r\n unsorted_indices.append(sorted_keyed_vectors[i][0])\r\n\r\n if (len(significant_indices) >= k) or (i >= veclength - 1):\r\n loop_condition = False\r\n i += 1\r\n\r\n return unsorted_indices\r\n" ]
[ [ "numpy.dot", "numpy.linalg.norm" ] ]
jb2020-super/nunif
[ "eab6952d93e85951ed4e4cff30cd26c09e1dbb63" ]
[ "nunif/cli/waifu2x.py" ]
[ "# waifu2x\nimport os\nfrom os import path\nimport torch\nimport argparse\nimport csv\nfrom tqdm import tqdm\nfrom concurrent.futures import ThreadPoolExecutor as PoolExecutor\nfrom .. logger import logger\nfrom .. utils import load_image, save_image, ImageLoader\nfrom .. tasks.waifu2x import Waifu2x\n\nif os.getenv(\"NUNIF_MODEL_DIR\") is not None:\n MODEL_DIR = os.getenv(\"NUNIF_MODEL_DIR\")\nelse:\n MODEL_DIR = path.abspath(path.join(path.dirname(path.abspath(__file__)),\n \"..\", \"..\", \"pretrained_models\"))\nDEFAULT_MODEL_DIR = path.join(MODEL_DIR, \"waifu2x\", \"cunet\", \"art\")\n\n\ndef convert_files(ctx, files, args):\n loader = ImageLoader(files=files, max_queue_size=128)\n os.makedirs(args.output, exist_ok=True)\n with torch.no_grad(), PoolExecutor() as pool:\n for im, meta in tqdm(loader, ncols=60):\n z = ctx.convert(im, meta, args.method, args.noise_level, args.tile_size, args.batch_size, args.tta)\n output_filename = path.splitext(path.basename(meta[\"filename\"]))[0] + \".png\"\n pool.submit(save_image, z, meta, path.join(args.output, output_filename))\n\n\ndef convert_file(ctx, args):\n with torch.no_grad():\n im, meta = load_image(args.input)\n z = ctx.convert(im, meta, args.method, args.noise_level, args.tile_size, args.batch_size, args.tta)\n save_image(z, meta, args.output)\n\n\ndef load_files(txt):\n files = []\n with open(txt, \"r\") as f:\n reader = csv.reader(f)\n for row in reader:\n files.append(row[0])\n return files\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model-dir\", type=str, default=DEFAULT_MODEL_DIR, help=\"model dir\")\n parser.add_argument(\"--noise-level\", \"-n\", type=int, default=0, choices=[0, 1, 2, 3], help=\"noise level\")\n parser.add_argument(\"--method\", \"-m\", type=str, choices=[\"scale\", \"noise\", \"noise_scale\"], default=\"noise_scale\", help=\"method\")\n parser.add_argument(\"--gpu\", \"-g\", type=int, nargs=\"+\", default=[0], help=\"GPU device ids. -1 for CPU\")\n parser.add_argument(\"--batch-size\", type=int, default=4, help=\"minibatch_size\")\n parser.add_argument(\"--tile-size\", type=int, default=256, help=\"tile size for tiled render\")\n parser.add_argument(\"--output\", \"-o\", type=str, required=True, help=\"output file or directory\")\n parser.add_argument(\"--input\", \"-i\", type=str, required=True, help=\"input file or directory. (*.txt, *.csv) for image list\")\n parser.add_argument(\"--tta\", action=\"store_true\", help=\"TTA mode\")\n args = parser.parse_args()\n logger.debug(str(args))\n\n ctx = Waifu2x(model_dir=args.model_dir, gpus=args.gpu)\n ctx.load_model(args.method, args.noise_level)\n\n if path.isdir(args.input):\n convert_files(ctx, ImageLoader.listdir(args.input), args)\n else:\n if path.splitext(args.input)[-1] in (\".txt\", \".csv\"):\n convert_files(ctx, load_files(args.input), args)\n else:\n convert_file(ctx, args)\n\n return 0\n" ]
[ [ "torch.no_grad" ] ]
fordanic/cmiv-ai-course
[ "c51e51485d18c38bece67d6bcb3bd7422b56da97" ]
[ "notebooks/figures/plot_interactive_tree.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets import make_blobs\nfrom sklearn.tree import DecisionTreeClassifier\n\nfrom sklearn.externals.six import StringIO # doctest: +SKIP\nfrom sklearn.tree import export_graphviz\nfrom scipy.misc import imread\nfrom scipy import ndimage\n\nimport re\n\nX, y = make_blobs(centers=[[0, 0], [1, 1]], random_state=61526, n_samples=50)\n\n\ndef tree_image(tree, fout=None):\n try:\n import pydot\n except ImportError:\n # make a hacky white plot\n x = np.ones((10, 10))\n x[0, 0] = 0\n return x\n dot_data = StringIO()\n export_graphviz(tree, out_file=dot_data)\n data = re.sub(r\"gini = 0\\.[0-9]+\\\\n\", \"\", dot_data.getvalue())\n data = re.sub(r\"samples = [0-9]+\\\\n\", \"\", data)\n data = re.sub(r\"\\\\nsamples = [0-9]+\", \"\", data)\n\n graph = pydot.graph_from_dot_data(data)[0]\n if fout is None:\n fout = \"tmp.png\"\n graph.write_png(fout)\n return imread(fout)\n\n\ndef plot_tree(max_depth=1):\n fig, ax = plt.subplots(1, 2, figsize=(15, 7))\n h = 0.02\n\n x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5\n y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n\n if max_depth != 0:\n tree = DecisionTreeClassifier(max_depth=max_depth, random_state=1).fit(X, y)\n Z = tree.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]\n Z = Z.reshape(xx.shape)\n faces = tree.tree_.apply(np.c_[xx.ravel(), yy.ravel()].astype(np.float32))\n faces = faces.reshape(xx.shape)\n border = ndimage.laplace(faces) != 0\n ax[0].contourf(xx, yy, Z, alpha=.4)\n ax[0].scatter(xx[border], yy[border], marker='.', s=1)\n ax[0].set_title(\"max_depth = %d\" % max_depth)\n ax[1].imshow(tree_image(tree))\n ax[1].axis(\"off\")\n else:\n ax[0].set_title(\"data set\")\n ax[1].set_visible(False)\n ax[0].scatter(X[:, 0], X[:, 1], c=np.array(['b', 'r'])[y], s=60)\n ax[0].set_xlim(x_min, x_max)\n ax[0].set_ylim(y_min, y_max)\n ax[0].set_xticks(())\n ax[0].set_yticks(())\n\n\ndef plot_tree_interactive():\n from ipywidgets import interactive, IntSlider\n slider = IntSlider(min=0, max=8, step=1, value=0)\n return interactive(plot_tree, max_depth=slider)" ]
[ [ "sklearn.tree.export_graphviz", "scipy.ndimage.laplace", "numpy.arange", "matplotlib.pyplot.subplots", "numpy.ones", "sklearn.externals.six.StringIO", "sklearn.tree.DecisionTreeClassifier", "scipy.misc.imread", "numpy.array", "sklearn.datasets.make_blobs" ] ]
erelsgl/fair-diminishing-differences
[ "ae64ff4a4c6cfde5a1261e67484c905414607d36" ]
[ "simulations.py" ]
[ "#!python3\n\n\"\"\"\nUtilities for conducting simulations on random utility profiles.\n\nAuthor: Erel Segai-Halevi\nDate: 2019-07\n\"\"\"\n\nimport pandas, numpy as np\nfrom pandas import DataFrame\nimport matplotlib.pyplot as plt\nfrom partitions import equalPartitions\nimport operator\nfrom timeit import default_timer as timer\n\nfrom PrefProfile import PrefProfile\nfrom mean_and_stderr import mean_and_stderr\n\ntrace = lambda *x: None # To enable tracing, set trace=print\n\n\ndef avergeOverRandomProfiles(checkSingleProfile,\n agents:list, items:list, lowMarketValue:float, highMarketValue:float, maxNoiseSize:float, iterations:int) -> (list,list):\n \"\"\"\n Create many random utility profiles, and calculate various stats on them.\n\n :param checkSingleProfile: a function that takes a single PrefProfile object, and returns a vector of numbers describing it.\n :param agents: a list of agent-names.\n :param items: a list of item-names.\n :param lowMarketValue, highMarketValue, maxNoiseSize: used for creating the random valuations.\n :param iterations: number of times to randomize.\n\n :return (means, stderrs):\n means is a vector of floats, representing the average of the numbers returned for all random PrefProfiles.\n stderrs is a corresponding vector of the standard-errors.\n\n\n >>> dummyCheckSingleProfile = lambda profile: [True,False,5]\n >>> list(avergeOverRandomProfiles(dummyCheckSingleProfile, [\"A\",\"B\"], [\"x\",\"y\",\"z\"], 1, 2, 0.5, 10))\n [array([ 1., 0., 5.]), array([ 0., 0., 0.])]\n \"\"\"\n generator = lambda: np.array(checkSingleProfile(PrefProfile.randomCardinal(agents, items, lowMarketValue, highMarketValue, maxNoiseSize)))\n return mean_and_stderr(iterations, generator)\n\n\ndef simulate(checkSingleProfile, columnNames:list,\n agents:list, itemCounts:list, noiseSizes:list,\n lowMarketValue:float, highMarketValue:float, iterations:int, filename:str)->DataFrame:\n \"\"\"\n Runs an experiment with random cardinal utility profiles.\n\n :param checkSingleProfile: a function that takes a single PrefProfile object, and returns a vector of numbers describing it.\n :param columnNames: a list of column-names. Should be of the same size as the vector returned by checkSingleProfile.\n\n :param agents: a list of agent-names.\n :param itemCounts: a list of different item-counts to try.\n :param noiseSizes: a list of different noise-amplitudes to try.\n :param lowMarketValue, highMarketValue: range for randomly selecting the market-value of each item.\n :param iterations: number of iterations to run randomly.\n :param filename: name of file for saving the results. Will be created in subfolder \"results/\" with extension \"csv\".\n\n :return: a DataFrame with the experiment results.\n\n >>> pandas.set_option('display.max_columns', 500)\n >>> pandas.set_option('display.width', 500)\n >>> dummyCheckSingleProfile = lambda profile: [True,5]\n >>> dummyColumns = [\"col1\",\"col2\"]\n >>> simulate(dummyCheckSingleProfile, dummyColumns, [\"A\",\"B\"], [2,3,4], [0.3,0.7], 1, 2, 10, \"doctest-simulation\")\n Agents Iterations Noise size Items per agent col1 col2 col1 err col2 err\n 0 2.0 10.0 0.3 2.0 1.0 5.0 0.0 0.0\n 1 2.0 10.0 0.3 3.0 1.0 5.0 0.0 0.0\n 2 2.0 10.0 0.3 4.0 1.0 5.0 0.0 0.0\n 3 2.0 10.0 0.7 2.0 1.0 5.0 0.0 0.0\n 4 2.0 10.0 0.7 3.0 1.0 5.0 0.0 0.0\n 5 2.0 10.0 0.7 4.0 1.0 5.0 0.0 0.0\n \"\"\"\n meanColumnNames = list(columnNames)\n stderrColumnNames = [c+\" err\" for c in columnNames]\n results = DataFrame(columns=['Agents', 'Iterations', 'Noise size', 'Items per agent'] + meanColumnNames + stderrColumnNames)\n agentCount = len(agents)\n for maxNoiseSize in noiseSizes:\n for itemCount in itemCounts:\n start = timer()\n trace(\"noise=\"+str(maxNoiseSize)+\" items=\"+str(itemCount)+\" file=\"+filename)\n (means,stderrs) = avergeOverRandomProfiles(checkSingleProfile,\n agents, range(itemCount * len(agents)),\n lowMarketValue, highMarketValue, maxNoiseSize, iterations)\n if len(means)!=len(columnNames):\n raise ValueError(\"checkSingleProfile returned {} values, but columnNames has {} values\".format(len(means),len(columnNames)))\n results.loc[len(results)] = [agentCount, iterations, maxNoiseSize, itemCount] + list(means) + list(stderrs)\n results.to_csv(\"results/\"+filename+\".csv\")\n trace(\" \" + str(timer() - start)+\" seconds\")\n return results\n\n\n\ndef simulateTwice(checkSingleProfile, columnNames:list,\n agents:list, iterations:int, filename:str)->(DataFrame,DataFrame):\n \"\"\"\n Run two simulation experiments: one with variable noise and one with variable item-count.\n\n :param agents: a list of agent names.\n :param iterations: number of iterations to randomize.\n :param filename: base filename for saving the results.\n :return: Two pandas.DataFrame objects, representing the results of two experiments:\n 1. Fixed item-count and variable noise (written to file \"<filename>-noise.csv\"),\n 2. Fixed noise and variable item-count (written to file \"<filename>-items.csv\").\n\n >>> pandas.set_option('display.max_columns', 500)\n >>> pandas.set_option('display.width', 500)\n >>> dummyCheckSingleProfile = lambda profile: [True,False,5]\n >>> dummyColumns = [\"col1\",\"col2\",\"col3\"]\n >>> (results1,results2) = simulateTwice(dummyCheckSingleProfile, dummyColumns, [\"A\",\"B\"], 10, \"doctest-simulation\")\n >>> results1\n Agents Iterations Noise size Items per agent col1 col2 col3 col1 err col2 err col3 err\n 0 2.0 10.0 0.1 5.0 1.0 0.0 5.0 0.0 0.0 0.0\n 1 2.0 10.0 0.2 5.0 1.0 0.0 5.0 0.0 0.0 0.0\n 2 2.0 10.0 0.3 5.0 1.0 0.0 5.0 0.0 0.0 0.0\n 3 2.0 10.0 0.4 5.0 1.0 0.0 5.0 0.0 0.0 0.0\n 4 2.0 10.0 0.5 5.0 1.0 0.0 5.0 0.0 0.0 0.0\n 5 2.0 10.0 0.6 5.0 1.0 0.0 5.0 0.0 0.0 0.0\n 6 2.0 10.0 0.7 5.0 1.0 0.0 5.0 0.0 0.0 0.0\n 7 2.0 10.0 0.8 5.0 1.0 0.0 5.0 0.0 0.0 0.0\n 8 2.0 10.0 0.9 5.0 1.0 0.0 5.0 0.0 0.0 0.0\n 9 2.0 10.0 1.0 5.0 1.0 0.0 5.0 0.0 0.0 0.0\n >>> results2\n Agents Iterations Noise size Items per agent col1 col2 col3 col1 err col2 err col3 err\n 0 2.0 10.0 0.5 2.0 1.0 0.0 5.0 0.0 0.0 0.0\n 1 2.0 10.0 0.5 3.0 1.0 0.0 5.0 0.0 0.0 0.0\n 2 2.0 10.0 0.5 4.0 1.0 0.0 5.0 0.0 0.0 0.0\n 3 2.0 10.0 0.5 5.0 1.0 0.0 5.0 0.0 0.0 0.0\n 4 2.0 10.0 0.5 6.0 1.0 0.0 5.0 0.0 0.0 0.0\n 5 2.0 10.0 0.5 7.0 1.0 0.0 5.0 0.0 0.0 0.0\n 6 2.0 10.0 0.5 8.0 1.0 0.0 5.0 0.0 0.0 0.0\n \"\"\"\n agentCount = len(agents)\n\n fixedItemCount = 5 if agentCount==2 else 4\n results1 = simulate(checkSingleProfile, columnNames,\n agents,\n itemCounts = [fixedItemCount],\n noiseSizes=[.1,.2,.3,.4,.5,.6,.7,.8,.9,1],\n\n lowMarketValue=1,\n highMarketValue=2,\n iterations = iterations,\n filename = filename+\"-noise\"\n )\n trace(results1)\n\n itemCounts = [2,3,4,5,6,7,8] if agentCount==2 else [2,3,4,5]\n results2 = simulate(checkSingleProfile, columnNames,\n agents,\n itemCounts = itemCounts,\n noiseSizes=[.5],\n\n lowMarketValue=1,\n highMarketValue=2,\n iterations = iterations,\n filename = filename+\"-items\"\n )\n trace(results2)\n\n return (results1, results2)\n\n\n\ntitleFontSize = 14\nlegendFontSize = 12\naxesFontSize = 13\nmarkerSize=12\n\n\ndef plotResults(results1:DataFrame, results2:DataFrame, columnsAndStyles:list, title:str=\"probability\", errorbars:bool=False, bbox_to_anchor=None):\n\n ### Subplot 1: by noise size\n\n ax = plt.subplot(1, 2, 1)\n agentCount = int(results1['Agents'][0])\n # iterations = int(results1['Iterations'][0])\n itemCounts1 = int(results1['Items per agent'][0])\n\n ax.set_title(title+\" vs. noise, \" + str(agentCount) + ' agents, ' + str(itemCounts1) + ' items per agent',\n fontsize=titleFontSize, weight='bold')\n ax.set_xlabel('Noise size', fontsize=axesFontSize)\n\n x_values = results1['Noise size']\n for columnName,style in columnsAndStyles:\n y_values = results1[columnName]\n if errorbars:\n yerr_values = results1[columnName + \" err\"]\n ax.errorbar(x_values, y_values, yerr=yerr_values, fmt=style, markersize=markerSize)\n else:\n ax.plot(x_values, y_values, fmt=style, markersize=markerSize)\n plt.xticks(x_values.tolist(), fontsize=axesFontSize)\n plt.yticks([0,0.2,0.4,0.6,0.8,1], fontsize=axesFontSize)\n\n\n ### Subplot 2: by number of items\n\n ax = plt.subplot(1, 2, 2, sharey=ax)\n agentCount = int(results2['Agents'][0])\n iterations = int(results2['Iterations'][0])\n maxNoise = results2['Noise size'][0]\n\n ax.set_title(title+\" vs. items, \" + str(agentCount) + ' agents, |noise|<=' + str(maxNoise),\n fontsize=titleFontSize, weight='bold')\n ax.set_xlabel('Items per agent', fontsize=axesFontSize)\n x_values = results2['Items per agent']\n for columnName,style in columnsAndStyles:\n y_values = results2[columnName]\n if errorbars:\n yerr_values = results2[columnName + \" err\"]\n ax.errorbar(x_values, y_values, yerr=yerr_values, fmt=style, markersize=markerSize)\n else:\n ax.plot(x_values, y_values, fmt=style, markersize=markerSize)\n plt.xticks(x_values.tolist(), fontsize=axesFontSize)\n plt.yticks([0,0.2,0.4,0.6,0.8,1], fontsize=axesFontSize)\n\n ax.legend(prop={'size': legendFontSize}, loc='center left')\n the_legend = ax.legend()\n the_legend.set_bbox_to_anchor([1.3,0.7])\n for t in the_legend.get_texts():\n t.set_text(t.get_text().replace(title,\"\"))\n\n\n plt.show()\n\n\n\nif __name__ == \"__main__\":\n import doctest\n (failures, tests) = doctest.testmod(report=True)\n print(\"{} failures, {} tests\".format(failures, tests))\n" ]
[ [ "matplotlib.pyplot.yticks", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "pandas.DataFrame" ] ]
remifan/commplax
[ "e8ee5bc86ab0dfd90773202579237ecf42488cd0" ]
[ "tests/xop_test.py" ]
[ "from commplax import xop\nimport numpy as np\nfrom jax import random, numpy as jnp\n\n\ndef conv_input_complex(n, m):\n key1 = random.PRNGKey(0)\n key2 = random.PRNGKey(1)\n k1, k2 = random.split(key1)\n k3, k4 = random.split(key2)\n x = random.normal(k1, (n,)) + 1j * random.normal(k2, (n,))\n h = random.normal(k3, (m,)) + 1j * random.normal(k4, (m,))\n return x, h\n\n\ndef conv_input_float(n, m):\n key1 = random.PRNGKey(0)\n k1, k2 = random.split(key1)\n x = random.normal(k1, (n,))\n h = random.normal(k2, (m,))\n return x, h\n\n\ndef test_convolve():\n for n, m in zip([1, 5, 5, 5, 5, 6, 6, 6, 1000, 1000, 1001, 1001],\n [1, 1, 2, 3, 4, 2, 3, 4, 7, 8, 7, 8]):\n\n for mode in ['same', 'valid', 'full']:\n x, h = conv_input_complex(n, m)\n a = np.convolve(x, h, mode=mode)\n b = xop.convolve(x, h, mode=mode)\n assert np.allclose(a, b, rtol=2e-05), \"\\nn={}, m={}, mode={}\".format(n, m, mode)\n\n for mode in ['same', 'valid', 'full']:\n x, h = conv_input_float(n, m)\n a = np.convolve(x, h, mode=mode)\n b = xop.convolve(x, h, mode=mode)\n assert np.allclose(a, b, rtol=1e-05, atol=5e-06), \"\\nn={}, m={}, mode={}\".format(n, m, mode)\n\n\n" ]
[ [ "numpy.convolve", "numpy.allclose" ] ]
max-stack/MWP-SS-Metrics
[ "01268f2d6da716596216b04de4197e345b96c219" ]
[ "mwp_solver/module/Graph/gcn.py" ]
[ "# Code Taken from https://github.com/LYH-YF/MWPToolkit\n# -*- encoding: utf-8 -*-\n# @Author: Yihuai Lan\n# @Time: 2021/08/29 21:49:49\n# @File: gcn.py\n\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom module.Layer.graph_layers import GraphConvolution\n\n\nclass GCN(nn.Module):\n def __init__(self, in_feat_dim, nhid, out_feat_dim, dropout):\n super(GCN, self).__init__()\n self.gc1 = GraphConvolution(in_feat_dim, nhid)\n self.gc2 = GraphConvolution(nhid, out_feat_dim)\n self.dropout = dropout\n\n def forward(self, x, adj):\n \"\"\"\n Args:\n x (torch.Tensor): input features, shape [batch_size, node_num, in_feat_dim]\n adj (torch.Tensor): adjacency matrix, shape [batch_size, node_num, node_num]\n \n Returns:\n torch.Tensor: gcn_enhance_feature, shape [batch_size, node_num, out_feat_dim]\n \"\"\"\n x = F.relu(self.gc1(x, adj))\n x = F.dropout(x, self.dropout, training=self.training)\n x = self.gc2(x, adj)\n return x" ]
[ [ "torch.nn.functional.dropout" ] ]
SubstraFoundation/distributed-learning-contributivity
[ "170ed8a660f7d7b4972c140f27782e085c4d63db" ]
[ "mplc/multi_partner_learning/basic_mpl.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nFunctions for model training and evaluation (single-partner and multi-partner cases)\n\"\"\"\n\nimport operator\nimport os\nfrom abc import ABC, abstractmethod\nfrom copy import deepcopy\nfrom timeit import default_timer as timer\n\nimport numpy as np\nimport random\nimport tensorflow as tf\nfrom loguru import logger\nfrom sklearn.metrics import confusion_matrix\nfrom tensorflow.keras import Input, Model\nfrom tensorflow.keras.backend import clear_session\nfrom tensorflow.keras.callbacks import EarlyStopping\n\nfrom .utils import History\nfrom ..utils import project_onto_the_simplex\nfrom .. import constants\nfrom ..models import NoiseAdaptationChannel, EnsemblePredictionsModel\nfrom ..partner import Partner, PartnerMpl\n\nALLOWED_PARAMETERS = ('partners_list',\n 'epoch_count',\n 'minibatch_count',\n 'dataset',\n 'aggregation',\n 'is_early_stopping',\n 'is_save_data',\n 'save_folder',\n 'init_model_from',\n 'use_saved_weights')\n\n\nclass MultiPartnerLearning(ABC):\n name = 'abstract'\n\n def __init__(self, scenario, **kwargs):\n \"\"\"\n\n :type scenario: Scenario\n \"\"\"\n # Attributes related to the data and the model\n self.dataset = scenario.dataset\n self.partners_list = scenario.partners_list\n self.init_model_from = scenario.init_model_from\n self.use_saved_weights = scenario.use_saved_weights\n self.amounts_per_partner = scenario.amounts_per_partner\n self.val_set = scenario.val_set\n self.test_set = scenario.test_set\n\n # Attributes related to iterating at different levels\n self.epoch_count = scenario.epoch_count\n self.minibatch_count = scenario.minibatch_count\n self.is_early_stopping = scenario.is_early_stopping\n\n # Attributes to store results\n self.save_folder = scenario.save_folder\n\n # Erase the default parameters (which mostly come from the scenario) if some parameters have been specified\n self.__dict__.update((k, v) for k, v in kwargs.items() if k in ALLOWED_PARAMETERS)\n\n # Unpack dataset-related parameters\n self.val_data = (self.dataset.x_val, self.dataset.y_val)\n self.test_data = (self.dataset.x_test, self.dataset.y_test)\n self.dataset_name = self.dataset.name\n self.generate_new_model = self.dataset.generate_new_model\n\n # Initialize the model\n model = self.init_model()\n self.model_weights = model.get_weights()\n self.metrics_names = self.dataset.model_metrics_names\n\n # Initialize iterators\n self.epoch_index = 0\n self.minibatch_index = 0\n self.learning_computation_time = 0\n\n # Convert partners to Mpl partners\n for partner in self.partners_list:\n assert isinstance(partner, Partner)\n partners_list = sorted(self.partners_list, key=operator.attrgetter(\"id\"))\n logger.info(\n f\"## Preparation of model's training on partners with ids: {['#' + str(p.id) for p in partners_list]}\")\n self.partners_list = [PartnerMpl(partner, self) for partner in self.partners_list]\n\n # Attributes related to the aggregation approach\n self.aggregator = self.init_aggregation_function(scenario.aggregation)\n\n # Initialize History\n self.history = History(self)\n\n # Initialize result folder\n if self.save_folder is not None:\n if 'custom_name' in kwargs:\n self.save_folder = self.save_folder / kwargs[\"custom_name\"]\n else:\n self.save_folder = self.save_folder / 'multi_partner_learning'\n self.save_folder.mkdir(parents=True, exist_ok=False)\n\n logger.debug(\"MultiPartnerLearning object instantiated.\")\n\n def __str__(self):\n return f'{self.name}'\n\n @property\n def partners_count(self):\n return len(self.partners_list)\n\n def init_aggregation_function(self, aggregator):\n return aggregator(self)\n\n def build_model(self):\n return self.build_model_from_weights(self.model_weights)\n\n def build_model_from_weights(self, new_weights):\n \"\"\"Generate a new model initialized with weights passed as arguments\"\"\"\n new_model = self.generate_new_model()\n new_model.set_weights(new_weights)\n return new_model\n\n def init_model(self):\n new_model = self.generate_new_model()\n\n if self.use_saved_weights:\n logger.info(\"Init model with previous coalition model\")\n new_model.load_weights(self.init_model_from)\n else:\n logger.info(\"Init new model\")\n\n return new_model\n\n def save_final_model(self):\n \"\"\"Save final model weights\"\"\"\n\n model_folder = os.path.join(self.save_folder, 'model')\n\n if not os.path.isdir(model_folder):\n os.makedirs(model_folder)\n\n np.save(os.path.join(model_folder, self.dataset_name + '_final_weights.npy'), self.model_weights)\n\n model_to_save = self.build_model()\n model_to_save.save_weights(os.path.join(model_folder, self.dataset_name + '_final_weights.h5'))\n\n def save_data(self):\n if self.save_folder is None:\n raise ValueError(\"The path to the save folder is None, history data cannot be saved, nor model weights\")\n\n self.save_final_model()\n self.history.save_data()\n\n def log_partner_perf(self, partner_id, partner_index, history):\n for key_history in self.history.metrics:\n self.history.history[partner_id][key_history][self.epoch_index,\n self.minibatch_index] = history[key_history][-1]\n\n epoch_nb_str = f\"Epoch {str(self.epoch_index).zfill(2)}/{str(self.epoch_count - 1).zfill(2)}\"\n mb_nb_str = f\"Minibatch {str(self.minibatch_index).zfill(2)}/{str(self.minibatch_count - 1).zfill(2)}\"\n partner_id_str = f\"Partner partner_id #{partner_id} ({partner_index}/{self.partners_count - 1})\"\n val_acc_str = f\"{round(history['val_accuracy'][-1], 2)}\"\n\n logger.debug(f\"{epoch_nb_str} > {mb_nb_str} > {partner_id_str} > val_acc: {val_acc_str}\")\n\n def eval_and_log_model_val_perf(self):\n\n model = self.build_model()\n\n if self.val_set == 'global':\n hist = model.evaluate(self.val_data[0],\n self.val_data[1],\n batch_size=constants.DEFAULT_BATCH_SIZE,\n verbose=0,\n )\n elif self.val_set == 'local':\n hist = [0.0, 0.0]\n for p in self.partners_list:\n hist_partner = model.evaluate(p.x_val,\n p.y_val,\n batch_size=constants.DEFAULT_BATCH_SIZE,\n verbose=0,\n )\n hist[0] += hist_partner[0] / self.partners_count\n hist[1] += hist_partner[1] / self.partners_count\n else:\n raise ValueError(\"validation set should be 'local' or 'global', not {self.val_set}\")\n\n self.history.history['mpl_model']['val_loss'][self.epoch_index, self.minibatch_index] = hist[0]\n self.history.history['mpl_model']['val_accuracy'][self.epoch_index, self.minibatch_index] = hist[1]\n\n if self.minibatch_index >= self.minibatch_count - 1:\n epoch_nb_str = f\"{str(self.epoch_index).zfill(2)}/{str(self.epoch_count - 1).zfill(2)}\"\n logger.info(f\" Model evaluation at the end of the epoch \"\n f\"{epoch_nb_str}: \"\n f\"{['%.3f' % elem for elem in hist]}\")\n\n def eval_and_log_final_model_test_perf(self):\n logger.info(\"### Evaluating model on test data:\")\n model = self.build_model()\n if self.test_set == 'global':\n hist = model.evaluate(self.test_data[0],\n self.test_data[1],\n batch_size=constants.DEFAULT_BATCH_SIZE,\n verbose=0,\n )\n elif self.test_set == 'local':\n hist = [0.0, 0.0]\n for p in self.partners_list:\n hist_partner = model.evaluate(p.x_test,\n p.y_test,\n batch_size=constants.DEFAULT_BATCH_SIZE,\n verbose=0,\n )\n hist[0] += hist_partner[0] / self.partners_count\n hist[1] += hist_partner[1] / self.partners_count\n else:\n raise ValueError(\"test set should be 'local' or 'global', not {self.val_set}\")\n\n self.history.score = hist[1]\n self.history.nb_epochs_done = self.epoch_index + 1\n logger.info(f\" Model metrics names: {self.metrics_names}\")\n logger.info(f\" Model metrics values: {['%.3f' % elem for elem in hist]}\")\n\n def split_in_minibatches(self):\n \"\"\"Split the dataset passed as argument in mini-batches\"\"\"\n\n for partner in self.partners_list:\n partner.split_minibatches()\n\n def early_stop(self):\n logger.debug(\" Checking if early stopping criteria are met:\")\n if self.is_early_stopping:\n # Early stopping parameters\n if (\n self.epoch_index >= constants.PATIENCE\n and self.history.history['mpl_model']['val_loss'][self.epoch_index,\n self.minibatch_index] >\n self.history.history['mpl_model']['val_loss'][self.epoch_index - constants.PATIENCE,\n self.minibatch_index]\n ):\n logger.debug(\" -> Early stopping criteria are met, stopping here.\")\n return True\n else:\n logger.debug(\" -> Early stopping criteria are not met, continuing with training.\")\n else:\n return False\n\n def fit(self):\n \"\"\"Return the score on test data of a final aggregated model trained in a federated way on each partner\"\"\"\n\n start = timer()\n # Train model (iterate for each epoch and mini-batch)\n while self.epoch_index < self.epoch_count:\n\n self.fit_epoch() # perform an epoch on the self.model\n\n if self.early_stop():\n break\n self.epoch_index += 1\n\n # After last epoch or if early stopping was triggered, evaluate model on the global testset\n self.eval_and_log_final_model_test_perf()\n\n end = timer()\n self.learning_computation_time = end - start\n logger.info(f\"Training and evaluation on multiple partners: \"\n f\"done. ({np.round(self.learning_computation_time, 3)} seconds)\")\n if self.save_folder is not None:\n self.save_data() # Save the model weights and the history data\n\n @abstractmethod\n def fit_epoch(self):\n while self.minibatch_index < self.minibatch_count:\n self.fit_minibatch()\n self.minibatch_index += 1\n self.eval_and_log_model_val_perf()\n\n @abstractmethod\n def fit_minibatch(self):\n pass\n\n\nclass SinglePartnerLearning(MultiPartnerLearning):\n name = 'Single Partner learning'\n\n def __init__(self, scenario, **kwargs):\n super(SinglePartnerLearning, self).__init__(scenario, **kwargs)\n if self.partners_count != 1:\n raise ValueError('More than one partner is provided')\n self.partner = self.partners_list[0]\n\n def fit(self):\n \"\"\"Return the score on test data of a model trained on a single partner\"\"\"\n\n start = timer()\n logger.info(f\"## Training and evaluating model on partner with partner_id #{self.partner.id}\")\n\n # Set if early stopping if needed\n cb = []\n es = None\n if self.is_early_stopping:\n es = EarlyStopping(monitor='val_loss', mode='min', verbose=0, patience=constants.PATIENCE)\n cb.append(es)\n\n # Train model\n logger.info(\" Training model...\")\n model = self.build_model()\n if self.val_set == 'global':\n history = model.fit(self.partner.x_train,\n self.partner.y_train,\n batch_size=self.partner.batch_size,\n epochs=self.epoch_count,\n verbose=0,\n validation_data=self.val_data,\n callbacks=cb)\n elif self.val_set == 'local':\n history = model.fit(self.partner.x_train,\n self.partner.y_train,\n batch_size=self.partner.batch_size,\n epochs=self.epoch_count,\n verbose=0,\n validation_data=(self.partner.x_val, self.partner.y_val),\n callbacks=cb)\n else:\n raise ValueError(\"validation set should be 'local' or 'global', not {self.val_set}\")\n\n self.model_weights = model.get_weights()\n self.log_partner_perf(self.partner.id, 0, history.history)\n del self.history.history['mpl_model']\n # Evaluate trained model on test data\n self.eval_and_log_final_model_test_perf()\n self.history.nb_epochs_done = (es.stopped_epoch + 1) if es.stopped_epoch != 0 else self.epoch_count\n\n end = timer()\n self.learning_computation_time = end - start\n\n def fit_epoch(self):\n pass\n\n def fit_minibatch(self):\n pass\n\n\nclass FederatedAverageLearning(MultiPartnerLearning):\n name = 'Federated averaging'\n\n def __init__(self, scenario, **kwargs):\n # First, if only one partner, fall back to dedicated single partner function\n super(FederatedAverageLearning, self).__init__(scenario, **kwargs)\n if self.partners_count == 1:\n raise ValueError('Only one partner is provided. Please use the dedicated SinglePartnerLearning class')\n\n def fit_epoch(self):\n # Clear Keras' old models\n clear_session()\n\n # Split the train dataset in mini-batches\n self.split_in_minibatches()\n\n # Iterate over mini-batches and train\n for i in range(self.minibatch_count):\n self.minibatch_index = i\n self.fit_minibatch()\n\n # At the end of each minibatch,aggregate the models\n self.model_weights = self.aggregator.aggregate_model_weights()\n self.minibatch_index = 0\n\n def fit_minibatch(self):\n \"\"\"Proceed to a collaborative round with a federated averaging approach\"\"\"\n\n logger.debug(\"Start new fedavg collaborative round ...\")\n\n # Starting model for each partner is the aggregated model from the previous mini-batch iteration\n logger.info(f\"(fedavg) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, \"\n f\"init each partner's models with a copy of the global model\")\n\n for partner in self.partners_list:\n partner.model_weights = self.model_weights\n\n # Evaluate and store accuracy of mini-batch start model\n self.eval_and_log_model_val_perf()\n\n # Iterate over partners for training each individual model\n for partner_index, partner in enumerate(self.partners_list):\n # Reference the partner's model\n partner_model = partner.build_model()\n\n # Train on partner local data set\n if self.val_set == 'global':\n history = partner_model.fit(partner.minibatched_x_train[self.minibatch_index],\n partner.minibatched_y_train[self.minibatch_index],\n batch_size=partner.batch_size,\n verbose=0,\n validation_data=self.val_data)\n elif self.val_set == 'local':\n history = partner_model.fit(partner.minibatched_x_train[self.minibatch_index],\n partner.minibatched_y_train[self.minibatch_index],\n batch_size=partner.batch_size,\n verbose=0,\n validation_data=(partner.x_val, partner.y_val))\n else:\n raise ValueError(\"validation set should be 'local' or 'global', not {self.val_set}\")\n\n # Log results of the round\n self.log_partner_perf(partner.id, partner_index, history.history)\n\n # Update the partner's model in the models' list\n partner.model_weights = partner_model.get_weights()\n\n logger.debug(\"End of fedavg collaborative round.\")\n\n\nclass DistributionallyRobustFederatedAveragingLearning(MultiPartnerLearning):\n \"\"\"\n - This class implements the Distributionally Robust Federated Averaging (DRFA) Algorithm,\n only a subset of partners are chosen to participate in a given collaborative\n learning round. based on a global mixing parameter called lambda\n - Lambda is updated at the end of each collaborative learning round using its own update rule\n - DRFA is considered a framework under which we can implement other FL algorithms such as FedAvg\n - Link to the paper : https://arxiv.org/abs/2102.12660\n \"\"\"\n name = \"Distributionally Robust Federated Averaging\"\n\n def __init__(self, scenario, **kwargs):\n super(DistributionallyRobustFederatedAveragingLearning, self).__init__(scenario, **kwargs)\n if self.partners_count == 1:\n raise ValueError('Only one partner is provided. Please use the dedicated SinglePartnerLearning class')\n self.active_partners_count = scenario.active_partners_count\n\n self.lambda_vector = self.init_lambda()\n self.active_partners_list = list()\n self.update_active_partners_list()\n\n self.local_steps = scenario.gradient_updates_per_pass_count\n self.partners_training_data = {}\n self.partners_participation = self.initialize_participation_dict()\n self.lambda_learning_rate = 8e-3\n\n self.local_steps_index = 0\n self.local_steps_index_t = 0\n self.global_model_at_index_t = None\n self.model_weights_at_index_t = list()\n self.loss_for_model_at_index_t = np.zeros(self.partners_count)\n\n self.subset_u_partners = list()\n self.loss_vector_v = list()\n\n def fit_epoch(self):\n\n # Split the train dataset in mini-batches\n self.split_in_minibatches()\n\n # convert partners training data into tf Dataset, reference: fast_mpl\n for partner_id, partner in enumerate(self.partners_list):\n self.partners_training_data[partner.id] = list()\n for minibatch_index in range(self.minibatch_count):\n # convert training data\n data_train = tf.data.Dataset.from_tensor_slices((partner.minibatched_x_train[minibatch_index],\n partner.minibatched_y_train[minibatch_index]))\n data_train = data_train.shuffle(len(partner.minibatched_x_train[minibatch_index]))\n data_train = data_train.batch(partner.batch_size)\n data_train = data_train.prefetch(1)\n self.partners_training_data[partner.id].append(data_train)\n\n # Iterate over mini-batches and train\n for i in range(self.minibatch_count):\n self.minibatch_index = i\n\n self.local_steps_index = 0\n self.local_steps_index_t = np.random.randint(0, self.local_steps - 1)\n\n logger.info(\n f\"Active partner in this round \"\n f\"{['#'+str(active_partner.id) for active_partner in self.active_partners_list]} \"\n f\"according to lambda vector > {self.lambda_vector}\")\n logger.info(f\"Local step index t > {self.local_steps_index_t}\")\n\n self.fit_minibatch()\n\n # update partner participations\n self.partners_participation[self.epoch_index][self.minibatch_index][[p.id for p\n in self.active_partners_list]] = 1\n\n self.update_lambda()\n self.update_active_partners_list()\n self.log_partners_participation_rate()\n\n self.minibatch_index = 0\n\n def fit_minibatch(self):\n \"\"\"Proceed to a collaborative round with a distributionally robust federated averaging approach\"\"\"\n\n # Starting model for each partner is the aggregated model from the previous mini-batch iteration\n logger.info(f\"(drfa) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, \"\n f\"init each partner's models with a copy of the global model\")\n\n for partner in self.partners_list:\n partner.model_weights = self.model_weights\n\n # Evaluate and store accuracy of mini-batch start model\n self.eval_and_log_model_val_perf()\n\n # Iterate over partners for training\n for partner_index, partner in enumerate(self.active_partners_list):\n partner_model = partner.build_model()\n # loop through each partner's minibatch\n minibatched_x_y = self.partners_training_data[partner.id][self.minibatch_index]\n for idx, batch_x_y in enumerate(minibatched_x_y):\n with tf.GradientTape() as tape:\n p_pred = partner_model(batch_x_y[0])\n loss = partner_model.compiled_loss(batch_x_y[1], p_pred)\n\n partner_model.optimizer.minimize(loss, partner_model.trainable_weights, tape=tape)\n\n self.local_steps_index += 1\n if self.local_steps_index == self.local_steps_index_t:\n # save model weights for each partner at local step t\n self.model_weights_at_index_t.append(partner.model_weights)\n\n partner.model_weights = partner_model.get_weights()\n self.local_steps_index = 0\n\n # aggregate final global model weights\n self.model_weights = self.aggregate_model_weights(self.active_partners_list)\n\n # build the model for each partner using weights gathered at index t\n for active_partner, weights_t in zip(self.active_partners_list, self.model_weights_at_index_t):\n active_partner.model_weights = weights_t\n\n # aggregate global model weights at index t\n self.global_model_at_index_t = self.aggregate_model_weights(self.active_partners_list)\n\n # sample a new subset of partners of size active_partners_count\n subset_index = random.sample(range(self.partners_count), self.active_partners_count)\n self.subset_u_partners = [self.partners_list[index] for index in subset_index]\n logger.info(\n f\"Subset of partners chosen for lambda update \"\n f\"{['#'+ str(partner.id) for partner in self.subset_u_partners]}\")\n\n # compute losses over a random batch using the global model at index t\n for partner, index in zip(self.subset_u_partners, subset_index):\n random_minibatch_index = np.random.randint(0, self.minibatch_count - 1)\n random_minibatch = self.partners_training_data[partner.id][random_minibatch_index]\n random_batch_index = np.random.randint(0, len(random_minibatch) - 1)\n random_batch = list(random_minibatch)[random_batch_index]\n partner_model = self.build_model_from_weights(self.global_model_at_index_t)\n loss = partner_model.compiled_loss(random_batch[1], partner_model(random_batch[0]))\n # compute (n/m)*loss and add it to the loss vector\n # n is the total number of partners, m is the number of active partners\n self.loss_for_model_at_index_t[index] = \\\n ((self.partners_count / self.active_partners_count) * np.mean(loss.numpy()))\n\n def init_lambda(self):\n \"\"\"\n - initialize lambda vector according to each partner's dataset size\n - this is a probability vector of size partners_count\n \"\"\"\n return np.array(self.amounts_per_partner)\n\n def update_lambda(self):\n \"\"\"\n The update rule for lambda is : lambda_vector(i) =\n Projection(lambda_vector(i-1) + (local_step_index_t * lambda_learning_rate * local_losses_at_index_t))\n \"\"\"\n self.lambda_vector += (self.local_steps_index_t * self.lambda_learning_rate * self.loss_for_model_at_index_t)\n self.lambda_vector = project_onto_the_simplex(self.lambda_vector)\n\n # The projection can produce zero probabilities for certain partners which prevents them from\n # participating in the training. To avoid this, we assign 1e-3 to each probability smaller than this value.\n if any(self.lambda_vector < 1e-3):\n self.lambda_vector[self.lambda_vector < 1e-3] = 1e-3\n # normalize the probability vector\n self.lambda_vector = self.lambda_vector / np.sum(self.lambda_vector)\n\n def update_active_partners_list(self):\n \"\"\"\n Update the active partners list according to lambda vector\n \"\"\"\n active_partners_indices = (-self.lambda_vector).argsort()[:self.active_partners_count]\n self.active_partners_list = [self.partners_list[index] for index in active_partners_indices]\n\n def initialize_participation_dict(self):\n participation = {}\n for epoch_index in range(self.epoch_count):\n participation[epoch_index] = {}\n for minibatch_index in range(self.minibatch_count):\n participation[epoch_index][minibatch_index] = np.zeros(self.partners_count)\n return participation\n\n def log_partners_participation_rate(self):\n epoch_participation_vector = np.zeros(self.partners_count)\n percentages = []\n for minibatch_index, vect in self.partners_participation[self.epoch_index].items():\n epoch_participation_vector += vect\n percentages = [str(np.round(p_v / self.minibatch_count, 2) * 100) + ' %'\n for p_v in list(epoch_participation_vector)]\n logger.info(f\"Partners {['#' + str(p.id) for p in self.partners_list]} \"\n f\"have the following participation rates, respectively : \"\n f\"{percentages} \"\n f\"at the end of Epoch > {self.epoch_index}\")\n\n final_participation_vector = np.zeros(self.partners_count)\n if self.epoch_index == self.epoch_count - 1:\n for epoch_index in range(self.epoch_count):\n for minibatch_index, vect in self.partners_participation[epoch_index].items():\n final_participation_vector += vect\n percentages = [str(np.round(f_p_v / (self.minibatch_count * self.epoch_count), 2) * 100) + '%'\n for f_p_v in list(final_participation_vector)]\n logger.info(f\"Partners {['#' + str(p.id) for p in self.partners_list]} \"\n f\"have the following participation rates : \"\n f\"{percentages} \"\n f\"during the training\")\n\n @staticmethod\n def aggregate_model_weights(partners_list):\n \"\"\" This method is identical to the one in the aggregator class with few modifications.\n I couldn't use the original aggregator method since it operates on the entire list of partners and\n DRFA requires model aggregation over a subset of partners list only\n \"\"\"\n aggregation_weights = np.ones(len(partners_list), dtype='float32')\n weights_per_layer = list(zip(*[partner.model_weights for partner in partners_list]))\n new_weights = list()\n\n for weights_for_layer in weights_per_layer:\n avg_weights_for_layer = np.average(\n np.array(weights_for_layer), axis=0, weights=aggregation_weights\n )\n new_weights.append(avg_weights_for_layer)\n\n return new_weights\n\n\nclass SequentialLearning(MultiPartnerLearning): # seq-pure\n name = 'Sequential learning'\n\n def __init__(self, scenario, **kwargs):\n super(SequentialLearning, self).__init__(scenario, **kwargs)\n if self.partners_count == 1:\n raise ValueError('Only one partner is provided. Please use the dedicated SinglePartnerLearning class')\n\n def fit_epoch(self):\n # Clear Keras' old models\n clear_session()\n\n # Split the train dataset in mini-batches\n self.split_in_minibatches()\n\n # Iterate over mini-batches and train\n for i in range(self.minibatch_count):\n self.minibatch_index = i\n logger.info(f\"(seq-pure) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}\")\n self.fit_minibatch()\n\n def fit_minibatch(self):\n \"\"\"Proceed to a collaborative round with a sequential averaging approach\"\"\"\n\n logger.debug(\"Start new seq collaborative round ...\")\n\n model_for_round = self.build_model()\n\n # Evaluate and store accuracy of mini-batch start model\n self.eval_and_log_model_val_perf()\n # Iterate over partners for training each individual model\n shuffled_indexes = np.random.permutation(self.partners_count)\n logger.debug(f\"(seq) Shuffled order for this seqavg collaborative round: {shuffled_indexes}\")\n for idx, partner_index in enumerate(shuffled_indexes):\n partner = self.partners_list[partner_index]\n\n # Train on partner local data set\n if self.val_set == 'global':\n history = model_for_round.fit(partner.minibatched_x_train[self.minibatch_index],\n partner.minibatched_y_train[self.minibatch_index],\n batch_size=partner.batch_size,\n verbose=0,\n validation_data=self.val_data)\n elif self.val_set == 'local':\n history = model_for_round.fit(partner.minibatched_x_train[self.minibatch_index],\n partner.minibatched_y_train[self.minibatch_index],\n batch_size=partner.batch_size,\n verbose=0,\n validation_data=(partner.x_val, partner.y_val))\n else:\n raise ValueError(\"validation set should be 'local' or 'global', not {self.val_set}\")\n\n # Log results\n self.log_partner_perf(partner.id, idx, history.history)\n\n # Save the partner's model in the models' list\n partner.model_weights = model_for_round.get_weights()\n self.model_weights = model_for_round.get_weights()\n\n logger.debug(\"End of seq collaborative round.\")\n\n\nclass SequentialWithFinalAggLearning(SequentialLearning):\n name = 'Sequential learning with final aggregation'\n\n def __init__(self, scenario, **kwargs):\n super(SequentialWithFinalAggLearning, self).__init__(scenario, **kwargs)\n if self.partners_count == 1:\n raise ValueError('Only one partner is provided. Please use the dedicated SinglePartnerLearning class')\n\n def fit_epoch(self):\n # Clear Keras' old models\n clear_session()\n\n # Split the train dataset in mini-batches\n self.split_in_minibatches()\n\n # Iterate over mini-batches and train\n for i in range(self.minibatch_count):\n logger.info(f\"(seq-final-agg) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, \"\n f\"init model with a copy of the global model\")\n self.minibatch_index = i\n self.fit_minibatch()\n\n # At the end of each epoch, aggregate the models\n self.model_weights = self.aggregator.aggregate_model_weights()\n\n\nclass SequentialAverageLearning(SequentialLearning):\n name = 'Sequential averaged learning'\n\n def __init__(self, scenario, **kwargs):\n super(SequentialAverageLearning, self).__init__(scenario, **kwargs)\n if self.partners_count == 1:\n raise ValueError('Only one partner is provided. Please use the dedicated SinglePartnerLearning class')\n\n def fit_epoch(self):\n # Clear Keras' old models\n clear_session()\n\n # Split the train dataset in mini-batches\n self.split_in_minibatches()\n\n # Iterate over mini-batches and train\n for i in range(self.minibatch_count):\n logger.info(f\"(seqavg) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, \"\n f\"init model with a copy of the global model\")\n self.minibatch_index = i\n self.fit_minibatch()\n\n # At the end of each minibatch, aggregate the models\n self.model_weights = self.aggregator.aggregate_model_weights()\n\n\nclass FedAvgSmodel(FederatedAverageLearning):\n name = 'Federated learning with label flipping'\n\n def __init__(self, scenario, pretrain_epochs=0, epsilon=0.5, **kwargs):\n super(FedAvgSmodel, self).__init__(scenario, **kwargs)\n self.pretrain_epochs = pretrain_epochs\n self.epsilon = epsilon\n if pretrain_epochs > 0:\n self.pretrain_mpl = FederatedAverageLearning(scenario=scenario,\n epoch_count=self.pretrain_epochs,\n is_save_data=False)\n\n def fit(self):\n if self.pretrain_epochs > 0:\n logger.info('Start pre-train...')\n self.pretrain_mpl.fit()\n pretrain_model = self.pretrain_mpl.build_model()\n for p in self.partners_list:\n confusion = confusion_matrix(np.argmax(p.y_train, axis=1),\n np.argmax(pretrain_model.predict(p.x_train), axis=1),\n normalize='pred')\n p.noise_layer_weights = [np.log(confusion.T + 1e-8)]\n self.model_weights[:-1] = self.pretrain_mpl.model_weights[:-1]\n else:\n for p in self.partners_list:\n confusion = np.identity(10) * (1 - self.epsilon) + (self.epsilon / 10)\n p.noise_layer_weights = [np.log(confusion + 1e-8)]\n super(FedAvgSmodel, self).fit()\n\n def fit_minibatch(self):\n \"\"\"Proceed to a collaborative round with a S-Model federated averaging approach\"\"\"\n\n logger.debug(\"Start new S-Model collaborative round ...\")\n\n # Starting model for each partner is the aggregated model from the previous mini-batch iteration\n logger.info(f\"(S-Model) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, \"\n f\"init each partner's models with a copy of the global model\")\n\n for partner in self.partners_list:\n partner.model_weights = self.model_weights\n\n # Evaluate and store accuracy of mini-batch start model\n self.eval_and_log_model_val_perf()\n\n # Iterate over partners for training each individual model\n for partner_index, partner in enumerate(self.partners_list):\n # Reference the partner's model\n partner_model = partner.build_model()\n x_batch = partner.minibatched_x_train[self.minibatch_index]\n y_batch = partner.minibatched_y_train[self.minibatch_index]\n\n model_input = Input(shape=self.dataset.input_shape)\n x = partner_model(model_input)\n outputs = NoiseAdaptationChannel(weights=partner.noise_layer_weights, name='s-model')(x)\n full_model = Model(inputs=model_input, outputs=outputs, name=f\"full_model_partner_{partner_index}\")\n\n full_model.compile(\n loss=partner_model.loss,\n optimizer=partner_model.optimizer,\n metrics='accuracy',\n )\n\n # Train on partner local data set\n history = full_model.fit(x_batch,\n y_batch,\n batch_size=partner.batch_size,\n verbose=0,\n validation_data=self.val_data)\n\n # Log results of the round\n self.log_partner_perf(partner.id, partner_index, history.history)\n\n # Update the partner's model in the models' list\n partner.noise_layer_weights = full_model.get_layer('s-model').get_weights()\n partner.model_weights = partner_model.get_weights()\n\n logger.debug(\"End of S-Model collaborative round.\")\n\n\nclass FederatedGradients(MultiPartnerLearning):\n def __init__(self, scenario, **kwargs):\n super(FederatedGradients, self).__init__(scenario, **kwargs)\n if self.partners_count == 1:\n raise ValueError('Only one partner is provided. Please use the dedicated SinglePartnerLearning class')\n self.model = self.build_model()\n\n def fit_epoch(self):\n # Split the train dataset in mini-batches\n self.split_in_minibatches()\n # Iterate over mini-batches and train\n for i in range(self.minibatch_count):\n self.minibatch_index = i\n self.fit_minibatch()\n\n self.minibatch_index = 0\n\n def fit_minibatch(self):\n \"\"\"Proceed to a collaborative round with a federated averaging approach\"\"\"\n\n logger.debug(\"Start new gradients fusion collaborative round ...\")\n\n # Starting model for each partner is the aggregated model from the previous mini-batch iteration\n logger.info(f\"(gradient fusion) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, \"\n f\"init each partner's models with a copy of the global model\")\n\n for partner in self.partners_list:\n # Evaluate and store accuracy of mini-batch start model\n partner.model_weights = self.model_weights\n self.eval_and_log_model_val_perf()\n\n # Iterate over partners for training each individual model\n for partner_index, partner in enumerate(self.partners_list):\n with tf.GradientTape() as tape:\n loss = self.model.loss(partner.minibatched_y_train[self.minibatch_index],\n self.model(partner.minibatched_x_train[self.minibatch_index]))\n partner.grads = tape.gradient(loss, self.model.trainable_weights)\n\n global_grad = self.aggregator.aggregate_gradients()\n self.model.optimizer.apply_gradients(zip(global_grad, self.model.trainable_weights))\n self.model_weights = self.model.get_weights()\n\n for partner_index, partner in enumerate(self.partners_list):\n val_history = self.model.evaluate(self.val_data[0], self.val_data[1], verbose=False)\n history = self.model.evaluate(partner.minibatched_x_train[self.minibatch_index],\n partner.minibatched_y_train[self.minibatch_index], verbose=False)\n history = {\n \"loss\": [history[0]],\n 'accuracy': [history[1]],\n 'val_loss': [val_history[0]],\n 'val_accuracy': [val_history[1]]\n }\n\n # Log results of the round\n self.log_partner_perf(partner.id, partner_index, history)\n\n logger.debug(\"End of grads-fusion collaborative round.\")\n\n\nclass EnsemblePredictions(MultiPartnerLearning):\n \"\"\"\n Ensemble (average) prediction of several input models\n This approach can only be used with the EnsemblePredictionsModel\n \"\"\"\n\n def __init__(self, scenario, **kwargs):\n super(EnsemblePredictions, self).__init__(scenario, **kwargs)\n\n # First, if only one partner, fall back to dedicated single partner function\n if self.partners_count == 1:\n raise ValueError('Only one partner is provided. Please use the dedicated SinglePartnerLearning class')\n\n partner_model_list = [self.dataset.generate_new_model() for _ in range(self.partners_count)]\n self.model = EnsemblePredictionsModel(partner_model_list)\n\n for partner in self.partners_list:\n partner.model_weights = deepcopy(self.model_weights)\n print(id(partner.model_weights))\n\n logger.info(\"Init EnsemblePredictionsModel model\")\n\n def build_model(self):\n partner_model_list = [partner.build_model() for partner in self.partners_list]\n return EnsemblePredictionsModel(partner_model_list)\n\n def fit_epoch(self):\n # Clear Keras' old models\n clear_session()\n\n self.eval_and_log_model_val_perf()\n\n for partner_index, partner in enumerate(self.partners_list):\n\n partner_model = partner.build_model()\n\n # Train on partner local data set\n history = partner_model.fit(partner.x_train,\n partner.y_train,\n batch_size=partner.batch_size,\n verbose=0,\n validation_data=self.val_data)\n\n # Log results of the round\n self.log_partner_perf(partner.id, partner_index, history.history)\n\n # Update the partner's model in the models' list\n partner.model_weights = partner_model.get_weights()\n\n def fit_minibatch(self):\n pass\n" ]
[ [ "numpy.log", "tensorflow.keras.Input", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.keras.callbacks.EarlyStopping", "tensorflow.keras.Model", "numpy.round", "tensorflow.GradientTape", "numpy.random.permutation", "tensorflow.keras.backend.clear_session", "numpy.argmax", "numpy.identity", "numpy.array", "numpy.zeros", "numpy.sum", "numpy.random.randint" ] ]
aws-samples/amazon-sagemaker-predict-training-resource-usage
[ "a2926c7b5727197e2123679ddc8a6993425df2ec" ]
[ "Canary_Training/quick_start_example_notebooks/3_bert_fine_tuning_canary_train_example/code/.ipynb_checkpoints/train_deploy-checkpoint.py" ]
[ "import argparse\nimport json\nimport logging\nimport os\nimport sys\n\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.distributed as dist\nimport torch.utils.data\nimport torch.utils.data.distributed\nfrom torch.utils.data import DataLoader, RandomSampler, TensorDataset\nfrom transformers import AdamW, BertForSequenceClassification, BertTokenizer\nimport glob\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nlogger.addHandler(logging.StreamHandler(sys.stdout))\n\nMAX_LEN = 64 # this is the max length of the sentence\n\nprint(\"Loading BERT tokenizer...\")\ntokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\", do_lower_case=True)\n\n\ndef flat_accuracy(preds, labels):\n pred_flat = np.argmax(preds, axis=1).flatten()\n labels_flat = labels.flatten()\n return np.sum(pred_flat == labels_flat) / len(labels_flat)\n\n\ndef _get_train_data_loader(batch_size, training_dir, is_distributed):\n logger.info(\"Get train data loader\")\n\n dataset = pd.concat(map(pd.read_csv, glob.glob(os.path.join(training_dir, \"*.csv\")))) #current dir and all csvs\n sentences = dataset.sentence.values\n labels = dataset.label.values\n\n input_ids = []\n for sent in sentences:\n encoded_sent = tokenizer.encode(sent, add_special_tokens=True)\n input_ids.append(encoded_sent)\n\n # pad shorter sentences\n input_ids_padded = []\n for i in input_ids:\n while len(i) < MAX_LEN:\n i.append(0)\n input_ids_padded.append(i)\n input_ids = input_ids_padded\n\n # mask; 0: added, 1: otherwise\n attention_masks = []\n # For each sentence...\n for sent in input_ids:\n att_mask = [int(token_id > 0) for token_id in sent]\n attention_masks.append(att_mask)\n\n # convert to PyTorch data types.\n train_inputs = torch.tensor(input_ids)\n train_labels = torch.tensor(labels)\n train_masks = torch.tensor(attention_masks)\n\n train_data = TensorDataset(train_inputs, train_masks, train_labels)\n if is_distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(dataset)\n else:\n train_sampler = RandomSampler(train_data)\n train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)\n\n return train_dataloader\n\n\ndef _get_test_data_loader(test_batch_size, training_dir):\n dataset = pd.concat(map(pd.read_csv, glob.glob(os.path.join(training_dir, \"*.csv\")))) #current dir and all csvs\n sentences = dataset.sentence.values\n labels = dataset.label.values\n\n input_ids = []\n for sent in sentences:\n encoded_sent = tokenizer.encode(sent, add_special_tokens=True)\n input_ids.append(encoded_sent)\n\n # pad shorter sentences\n input_ids_padded = []\n for i in input_ids:\n while len(i) < MAX_LEN:\n i.append(0)\n input_ids_padded.append(i)\n input_ids = input_ids_padded\n\n # mask; 0: added, 1: otherwise\n attention_masks = []\n # For each sentence...\n for sent in input_ids:\n att_mask = [int(token_id > 0) for token_id in sent]\n attention_masks.append(att_mask)\n\n # convert to PyTorch data types.\n train_inputs = torch.tensor(input_ids)\n train_labels = torch.tensor(labels)\n train_masks = torch.tensor(attention_masks)\n\n train_data = TensorDataset(train_inputs, train_masks, train_labels)\n train_sampler = RandomSampler(train_data)\n train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=test_batch_size)\n\n return train_dataloader\n\n\ndef train(args):\n is_distributed = len(args.hosts) > 1 and args.backend is not None\n logger.debug(\"Distributed training - %s\", is_distributed)\n use_cuda = args.num_gpus > 0\n logger.debug(\"Number of gpus available - %d\", args.num_gpus)\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n if is_distributed:\n # Initialize the distributed environment.\n world_size = len(args.hosts)\n os.environ[\"WORLD_SIZE\"] = str(world_size)\n host_rank = args.hosts.index(args.current_host)\n os.environ[\"RANK\"] = str(host_rank)\n dist.init_process_group(backend=args.backend, rank=host_rank, world_size=world_size)\n logger.info(\n \"Initialized the distributed environment: '%s' backend on %d nodes. \"\n \"Current host rank is %d. Number of gpus: %d\",\n args.backend, dist.get_world_size(),\n dist.get_rank(), args.num_gpus\n )\n\n # set the seed for generating random numbers\n torch.manual_seed(args.seed)\n if use_cuda:\n torch.cuda.manual_seed(args.seed)\n\n train_loader = _get_train_data_loader(args.batch_size, args.data_dir, is_distributed)\n test_loader = _get_test_data_loader(args.test_batch_size, args.test)\n\n logger.debug(\n \"Processes {}/{} ({:.0f}%) of train data\".format(\n len(train_loader.sampler),\n len(train_loader.dataset),\n 100.0 * len(train_loader.sampler) / len(train_loader.dataset),\n )\n )\n\n logger.debug(\n \"Processes {}/{} ({:.0f}%) of test data\".format(\n len(test_loader.sampler),\n len(test_loader.dataset),\n 100.0 * len(test_loader.sampler) / len(test_loader.dataset),\n )\n )\n\n logger.info(\"Starting BertForSequenceClassification\\n\")\n model = BertForSequenceClassification.from_pretrained(\n \"bert-base-uncased\", # Use the 12-layer BERT model, with an uncased vocab.\n num_labels=args.num_labels, # The number of output labels--2 for binary classification.\n output_attentions=False, # Whether the model returns attentions weights.\n output_hidden_states=False, # Whether the model returns all hidden-states.\n )\n\n model = model.to(device)\n if is_distributed and use_cuda:\n # multi-machine multi-gpu case\n model = torch.nn.parallel.DistributedDataParallel(model)\n else:\n # single-machine multi-gpu case or single-machine or multi-machine cpu case\n model = torch.nn.DataParallel(model)\n optimizer = AdamW(\n model.parameters(),\n lr=2e-5, # args.learning_rate - default is 5e-5, our notebook had 2e-5\n eps=1e-8, # args.adam_epsilon - default is 1e-8.\n )\n\n logger.info(\"End of defining BertForSequenceClassification\\n\")\n for epoch in range(1, args.epochs + 1):\n total_loss = 0\n model.train()\n for step, batch in enumerate(train_loader):\n b_input_ids = batch[0].to(device)\n b_input_mask = batch[1].to(device)\n b_labels = batch[2].to(device)\n model.zero_grad()\n\n outputs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels)\n loss = outputs[0]\n\n total_loss += loss.item()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\n # modified based on their gradients, the learning rate, etc.\n optimizer.step()\n if step % args.log_interval == 0:\n logger.info(\n \"Train Epoch: {} [{}/{} ({:.0f}%)] Loss: {:.6f}\".format(\n epoch,\n step * len(batch[0]),\n len(train_loader.sampler),\n 100.0 * step / len(train_loader),\n loss.item(),\n )\n )\n\n logger.info(\"Average training loss: %f\\n\", total_loss / len(train_loader))\n\n test(model, test_loader, device)\n\n logger.info(\"Saving tuned model.\")\n model_2_save = model.module if hasattr(model, \"module\") else model\n model_2_save.save_pretrained(save_directory=args.model_dir)\n\n\ndef test(model, test_loader, device):\n model.eval()\n _, eval_accuracy = 0, 0\n\n with torch.no_grad():\n for batch in test_loader:\n b_input_ids = batch[0].to(device)\n b_input_mask = batch[1].to(device)\n b_labels = batch[2].to(device)\n\n outputs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask)\n logits = outputs[0]\n logits = logits.detach().cpu().numpy()\n label_ids = b_labels.to(\"cpu\").numpy()\n tmp_eval_accuracy = flat_accuracy(logits, label_ids)\n eval_accuracy += tmp_eval_accuracy\n\n logger.info(\"Test set: Accuracy: %f\\n\", tmp_eval_accuracy)\n\n\ndef model_fn(model_dir):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print(\"================ objects in model_dir ===================\")\n print(os.listdir(model_dir))\n model = BertForSequenceClassification.from_pretrained(model_dir)\n print(\"================ model loaded ===========================\")\n return model.to(device)\n\n\n\n\ndef input_fn(request_body, request_content_type):\n \"\"\"An input_fn that loads a pickled tensor\"\"\"\n if request_content_type == \"application/json\":\n data = json.loads(request_body)\n print(\"================ input sentences ===============\")\n print(data)\n \n if isinstance(data, str):\n data = [data]\n elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], str):\n pass\n else:\n raise ValueError(\"Unsupported input type. Input type can be a string or an non-empty list. \\\n I got {}\".format(data))\n \n #encoded = [tokenizer.encode(x, add_special_tokens=True) for x in data]\n #encoded = tokenizer(data, add_special_tokens=True) \n \n # for backward compatibility use the following way to encode \n # https://github.com/huggingface/transformers/issues/5580\n input_ids = [tokenizer.encode(x, add_special_tokens=True) for x in data]\n \n print(\"================ encoded sentences ==============\")\n print(input_ids)\n\n # pad shorter sentence\n padded = torch.zeros(len(input_ids), MAX_LEN) \n for i, p in enumerate(input_ids):\n padded[i, :len(p)] = torch.tensor(p)\n \n # create mask\n mask = (padded != 0)\n \n print(\"================= padded input and attention mask ================\")\n print(padded, '\\n', mask)\n\n return padded.long(), mask.long()\n raise ValueError(\"Unsupported content type: {}\".format(request_content_type))\n \n\ndef predict_fn(input_data, model):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model.to(device)\n model.eval()\n\n input_id, input_mask = input_data\n input_id = input_id.to(device)\n input_mask = input_mask.to(device)\n print(\"============== encoded data =================\")\n print(input_id, input_mask)\n with torch.no_grad():\n y = model(input_id, attention_mask=input_mask)[0]\n print(\"=============== inference result =================\")\n print(y)\n return y\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n # Data and model checkpoints directories\n parser.add_argument(\n \"--num_labels\", type=int, default=2, metavar=\"N\", help=\"input batch size for training (default: 64)\"\n )\n\n parser.add_argument(\n \"--batch-size\", type=int, default=64, metavar=\"N\", help=\"input batch size for training (default: 64)\"\n )\n parser.add_argument(\n \"--test-batch-size\", type=int, default=1000, metavar=\"N\", help=\"input batch size for testing (default: 1000)\"\n )\n parser.add_argument(\"--epochs\", type=int, default=2, metavar=\"N\", help=\"number of epochs to train (default: 10)\")\n parser.add_argument(\"--lr\", type=float, default=0.01, metavar=\"LR\", help=\"learning rate (default: 0.01)\")\n parser.add_argument(\"--momentum\", type=float, default=0.5, metavar=\"M\", help=\"SGD momentum (default: 0.5)\")\n parser.add_argument(\"--seed\", type=int, default=1, metavar=\"S\", help=\"random seed (default: 1)\")\n parser.add_argument(\n \"--log-interval\",\n type=int,\n default=50,\n metavar=\"N\",\n help=\"how many batches to wait before logging training status\",\n )\n parser.add_argument(\n \"--backend\",\n type=str,\n default=None,\n help=\"backend for distributed training (tcp, gloo on cpu and gloo, nccl on gpu)\",\n )\n\n # Container environment\n parser.add_argument(\"--hosts\", type=list, default=json.loads(os.environ[\"SM_HOSTS\"]))\n parser.add_argument(\"--current-host\", type=str, default=os.environ[\"SM_CURRENT_HOST\"])\n parser.add_argument(\"--model-dir\", type=str, default=os.environ[\"SM_MODEL_DIR\"])\n parser.add_argument(\"--data-dir\", type=str, default=os.environ[\"SM_CHANNEL_TRAIN\"])\n parser.add_argument(\"--test\", type=str, default=os.environ[\"SM_CHANNEL_TESTING\"])\n parser.add_argument(\"--num-gpus\", type=int, default=os.environ[\"SM_NUM_GPUS\"])\n\n train(parser.parse_args())\n" ]
[ [ "torch.distributed.init_process_group", "torch.utils.data.distributed.DistributedSampler", "torch.cuda.manual_seed", "torch.manual_seed", "torch.utils.data.TensorDataset", "torch.utils.data.RandomSampler", "torch.utils.data.DataLoader", "torch.tensor", "numpy.argmax", "torch.no_grad", "torch.cuda.is_available", "torch.device", "torch.distributed.get_rank", "torch.nn.DataParallel", "numpy.sum", "torch.nn.parallel.DistributedDataParallel", "torch.distributed.get_world_size" ] ]
dabreegster/RAMP-UA
[ "04b7473aed441080ee10b6f68eb8b9135dac6879" ]
[ "tests/opencl/test_summary.py" ]
[ "import numpy as np\r\n\r\nfrom microsim.opencl.ramp.summary import Summary\r\nfrom microsim.opencl.ramp.snapshot import Snapshot\r\n\r\n\r\ndef test_summary_update():\r\n npeople = 50 + 34 + 101 + 551\r\n summary = Summary(snapshot=Snapshot.random(nplaces=10, npeople=npeople, nslots=10), max_time=20)\r\n\r\n time = 10\r\n\r\n statuses = np.concatenate((\r\n np.full(50, 0),\r\n np.full(34, 1),\r\n np.full(101, 4),\r\n np.full(551, 6),\r\n ))\r\n np.random.shuffle(statuses)\r\n\r\n summary.update(time, statuses)\r\n\r\n assert summary.total_counts[0][time] == 50\r\n assert summary.total_counts[1][time] == 34\r\n assert summary.total_counts[2][time] == 0\r\n assert summary.total_counts[3][time] == 0\r\n assert summary.total_counts[4][time] == 101\r\n assert summary.total_counts[5][time] == 0\r\n assert summary.total_counts[6][time] == 551\r\n" ]
[ [ "numpy.random.shuffle", "numpy.full" ] ]
vcfgv/mars
[ "ef9e2282208798a5a82e9f9a19538ac92bafee8d" ]
[ "mars/dataframe/datasource/tests/test_datasource_execution.py" ]
[ "# Copyright 1999-2021 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport tempfile\nimport time\nfrom collections import OrderedDict\nfrom datetime import datetime\nfrom string import printable\n\nimport numpy as np\nimport pandas as pd\nimport pytest\ntry:\n import pyarrow as pa\nexcept ImportError: # pragma: no cover\n pa = None\ntry:\n import fastparquet\nexcept ImportError: # pragma: no cover\n fastparquet = None\ntry:\n import sqlalchemy\nexcept ImportError: # pragma: no cover\n sqlalchemy = None\n\nfrom .... import tensor as mt\nfrom .... import dataframe as md\nfrom ....config import option_context\nfrom ....tests.core import require_cudf\nfrom ....utils import arrow_array_to_objects\nfrom ..dataframe import from_pandas as from_pandas_df\nfrom ..series import from_pandas as from_pandas_series\nfrom ..index import from_pandas as from_pandas_index, from_tileable\nfrom ..from_tensor import dataframe_from_tensor, dataframe_from_1d_tileables\nfrom ..from_records import from_records\n\n\ndef test_from_pandas_dataframe_execution(setup):\n # test empty DataFrame\n pdf = pd.DataFrame()\n df = from_pandas_df(pdf)\n\n result = df.execute().fetch()\n pd.testing.assert_frame_equal(pdf, result)\n\n pdf = pd.DataFrame(columns=list('ab'))\n df = from_pandas_df(pdf)\n\n result = df.execute().fetch()\n pd.testing.assert_frame_equal(pdf, result)\n\n pdf = pd.DataFrame(np.random.rand(20, 30), index=[np.arange(20), np.arange(20, 0, -1)])\n df = from_pandas_df(pdf, chunk_size=(13, 21))\n\n result = df.execute().fetch()\n pd.testing.assert_frame_equal(pdf, result)\n\n\ndef test_from_pandas_series_execution(setup):\n # test empty Series\n ps = pd.Series(name='a')\n series = from_pandas_series(ps, chunk_size=13)\n\n result = series.execute().fetch()\n pd.testing.assert_series_equal(ps, result)\n\n series = from_pandas_series(ps)\n\n result = series.execute().fetch()\n pd.testing.assert_series_equal(ps, result)\n\n ps = pd.Series(np.random.rand(20), index=[np.arange(20), np.arange(20, 0, -1)], name='a')\n series = from_pandas_series(ps, chunk_size=13)\n\n result = series.execute().fetch()\n pd.testing.assert_series_equal(ps, result)\n\n\ndef test_from_pandas_index_execution(setup):\n pd_index = pd.timedelta_range('1 days', periods=10)\n index = from_pandas_index(pd_index, chunk_size=7)\n\n result = index.execute().fetch()\n pd.testing.assert_index_equal(pd_index, result)\n\n\ndef test_index_execution(setup):\n rs = np.random.RandomState(0)\n pdf = pd.DataFrame(rs.rand(20, 10), index=np.arange(20, 0, -1),\n columns=['a' + str(i) for i in range(10)])\n df = from_pandas_df(pdf, chunk_size=13)\n\n # test df.index\n result = df.index.execute().fetch()\n pd.testing.assert_index_equal(result, pdf.index)\n\n result = df.columns.execute().fetch()\n pd.testing.assert_index_equal(result, pdf.columns)\n\n # df has unknown chunk shape on axis 0\n df = df[df.a1 < 0.5]\n\n # test df.index\n result = df.index.execute().fetch()\n pd.testing.assert_index_equal(result, pdf[pdf.a1 < 0.5].index)\n\n s = pd.Series(pdf['a1'], index=pd.RangeIndex(20))\n series = from_pandas_series(s, chunk_size=13)\n\n # test series.index which has value\n result = series.index.execute().fetch()\n pd.testing.assert_index_equal(result, s.index)\n\n s = pdf['a2']\n series = from_pandas_series(s, chunk_size=13)\n\n # test series.index\n result = series.index.execute().fetch()\n pd.testing.assert_index_equal(result, s.index)\n\n # test tensor\n raw = rs.random(20)\n t = mt.tensor(raw, chunk_size=13)\n\n result = from_tileable(t).execute().fetch()\n pd.testing.assert_index_equal(result, pd.Index(raw))\n\n\ndef test_initializer_execution(setup):\n arr = np.random.rand(20, 30)\n\n pdf = pd.DataFrame(arr, index=[np.arange(20), np.arange(20, 0, -1)])\n df = md.DataFrame(pdf, chunk_size=(15, 10))\n result = df.execute().fetch()\n pd.testing.assert_frame_equal(pdf, result)\n\n df = md.DataFrame(arr, index=md.date_range('2020-1-1', periods=20))\n result = df.execute().fetch()\n pd.testing.assert_frame_equal(\n result, pd.DataFrame(arr, index=pd.date_range('2020-1-1', periods=20)))\n\n df = md.DataFrame({\"prices\": [100, 101, np.nan, 100, 89, 88]},\n index=md.date_range('1/1/2010', periods=6, freq='D'))\n result = df.execute().fetch()\n pd.testing.assert_frame_equal(\n result, pd.DataFrame({\"prices\": [100, 101, np.nan, 100, 89, 88]},\n index=pd.date_range('1/1/2010', periods=6, freq='D')))\n\n s = np.random.rand(20)\n\n ps = pd.Series(s, index=[np.arange(20), np.arange(20, 0, -1)], name='a')\n series = md.Series(ps, chunk_size=7)\n result = series.execute().fetch()\n pd.testing.assert_series_equal(ps, result)\n\n series = md.Series(s, index=md.date_range('2020-1-1', periods=20))\n result = series.execute().fetch()\n pd.testing.assert_series_equal(\n result, pd.Series(s, index=pd.date_range('2020-1-1', periods=20)))\n\n pi = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)])\n index = md.Index(md.Index(pi))\n result = index.execute().fetch()\n pd.testing.assert_index_equal(pi, result)\n\n\ndef test_index_only(setup):\n df = md.DataFrame(index=[1, 2, 3])\n pd.testing.assert_frame_equal(df.execute().fetch(),\n pd.DataFrame(index=[1, 2, 3]))\n\n s = md.Series(index=[1, 2, 3])\n pd.testing.assert_series_equal(s.execute().fetch(),\n pd.Series(index=[1, 2, 3]))\n\n df = md.DataFrame(index=md.Index([1, 2, 3]))\n pd.testing.assert_frame_equal(df.execute().fetch(),\n pd.DataFrame(index=[1, 2, 3]))\n\n s = md.Series(index=md.Index([1, 2, 3]), dtype=object)\n pd.testing.assert_series_equal(s.execute().fetch(),\n pd.Series(index=[1, 2, 3], dtype=object))\n\n\ndef test_series_from_tensor(setup):\n data = np.random.rand(10)\n series = md.Series(mt.tensor(data), name='a')\n pd.testing.assert_series_equal(series.execute().fetch(),\n pd.Series(data, name='a'))\n\n series = md.Series(mt.tensor(data, chunk_size=3))\n pd.testing.assert_series_equal(series.execute().fetch(),\n pd.Series(data))\n\n series = md.Series(mt.ones((10,), chunk_size=4))\n pd.testing.assert_series_equal(series.execute().fetch(),\n pd.Series(np.ones(10,)))\n\n index_data = np.random.rand(10)\n series = md.Series(mt.tensor(data, chunk_size=3), name='a',\n index=mt.tensor(index_data, chunk_size=4))\n pd.testing.assert_series_equal(series.execute().fetch(),\n pd.Series(data, name='a', index=index_data))\n\n series = md.Series(mt.tensor(data, chunk_size=3), name='a',\n index=md.date_range('2020-1-1', periods=10))\n pd.testing.assert_series_equal(series.execute().fetch(),\n pd.Series(data, name='a', index=pd.date_range('2020-1-1', periods=10)))\n\n\ndef test_from_tensor_execution(setup):\n tensor = mt.random.rand(10, 10, chunk_size=5)\n df = dataframe_from_tensor(tensor)\n tensor_res = tensor.execute().fetch()\n pdf_expected = pd.DataFrame(tensor_res)\n df_result = df.execute().fetch()\n pd.testing.assert_index_equal(df_result.index, pd.RangeIndex(0, 10))\n pd.testing.assert_index_equal(df_result.columns, pd.RangeIndex(0, 10))\n pd.testing.assert_frame_equal(df_result, pdf_expected)\n\n # test from tensor with unknown shape\n tensor2 = tensor[tensor[:, 0] < 0.9]\n df = dataframe_from_tensor(tensor2)\n df_result = df.execute().fetch()\n tensor_res = tensor2.execute().fetch()\n pdf_expected = pd.DataFrame(tensor_res)\n pd.testing.assert_frame_equal(df_result.reset_index(drop=True), pdf_expected)\n\n # test converted with specified index_value and columns\n tensor2 = mt.random.rand(2, 2, chunk_size=1)\n df2 = dataframe_from_tensor(tensor2, index=pd.Index(['a', 'b']), columns=pd.Index([3, 4]))\n df_result = df2.execute().fetch()\n pd.testing.assert_index_equal(df_result.index, pd.Index(['a', 'b']))\n pd.testing.assert_index_equal(df_result.columns, pd.Index([3, 4]))\n\n # test converted from 1-d tensor\n tensor3 = mt.array([1, 2, 3])\n df3 = dataframe_from_tensor(tensor3)\n result3 = df3.execute().fetch()\n pdf_expected = pd.DataFrame(np.array([1, 2, 3]))\n pd.testing.assert_frame_equal(pdf_expected, result3)\n\n # test converted from identical chunks\n tensor4 = mt.ones((10, 10), chunk_size=3)\n df4 = dataframe_from_tensor(tensor4)\n result4 = df4.execute().fetch()\n pdf_expected = pd.DataFrame(tensor4.execute().fetch())\n pd.testing.assert_frame_equal(pdf_expected, result4)\n\n # from tensor with given index\n tensor5 = mt.ones((10, 10), chunk_size=3)\n df5 = dataframe_from_tensor(tensor5, index=np.arange(0, 20, 2))\n result5 = df5.execute().fetch()\n pdf_expected = pd.DataFrame(tensor5.execute().fetch(),\n index=np.arange(0, 20, 2))\n pd.testing.assert_frame_equal(pdf_expected, result5)\n\n # from tensor with given index that is a tensor\n raw7 = np.random.rand(10, 10)\n tensor7 = mt.tensor(raw7, chunk_size=3)\n index_raw7 = np.random.rand(10)\n index7 = mt.tensor(index_raw7, chunk_size=4)\n df7 = dataframe_from_tensor(tensor7, index=index7)\n result7 = df7.execute().fetch()\n pdf_expected = pd.DataFrame(raw7, index=index_raw7)\n pd.testing.assert_frame_equal(pdf_expected, result7)\n\n # from tensor with given index is a md.Index\n raw10 = np.random.rand(10, 10)\n tensor10 = mt.tensor(raw10, chunk_size=3)\n index10 = md.date_range('2020-1-1', periods=10, chunk_size=3)\n df10 = dataframe_from_tensor(tensor10, index=index10)\n result10 = df10.execute().fetch()\n pdf_expected = pd.DataFrame(raw10, index=pd.date_range('2020-1-1', periods=10))\n pd.testing.assert_frame_equal(pdf_expected, result10)\n\n # from tensor with given columns\n tensor6 = mt.ones((10, 10), chunk_size=3)\n df6 = dataframe_from_tensor(tensor6, columns=list('abcdefghij'))\n result6 = df6.execute().fetch()\n pdf_expected = pd.DataFrame(tensor6.execute().fetch(),\n columns=list('abcdefghij'))\n pd.testing.assert_frame_equal(pdf_expected, result6)\n\n # from 1d tensors\n raws8 = [('a', np.random.rand(8)), ('b', np.random.randint(10, size=8)),\n ('c', [''.join(np.random.choice(list(printable), size=6)) for _ in range(8)])]\n tensors8 = OrderedDict((r[0], mt.tensor(r[1], chunk_size=3)) for r in raws8)\n raws8.append(('d', 1))\n raws8.append(('e', pd.date_range('2020-1-1', periods=8)))\n tensors8['d'] = 1\n tensors8['e'] = raws8[-1][1]\n df8 = dataframe_from_1d_tileables(tensors8, columns=[r[0] for r in raws8])\n result = df8.execute().fetch()\n pdf_expected = pd.DataFrame(OrderedDict(raws8))\n pd.testing.assert_frame_equal(result, pdf_expected)\n\n # from 1d tensors and specify index with a tensor\n index_raw9 = np.random.rand(8)\n index9 = mt.tensor(index_raw9, chunk_size=4)\n df9 = dataframe_from_1d_tileables(tensors8, columns=[r[0] for r in raws8],\n index=index9)\n result = df9.execute().fetch()\n pdf_expected = pd.DataFrame(OrderedDict(raws8), index=index_raw9)\n pd.testing.assert_frame_equal(result, pdf_expected)\n\n # from 1d tensors and specify index\n df11 = dataframe_from_1d_tileables(tensors8, columns=[r[0] for r in raws8],\n index=md.date_range('2020-1-1', periods=8))\n result = df11.execute().fetch()\n pdf_expected = pd.DataFrame(OrderedDict(raws8),\n index=pd.date_range('2020-1-1', periods=8))\n pd.testing.assert_frame_equal(result, pdf_expected)\n\n\ndef test_from_records_execution(setup):\n dtype = np.dtype([('x', 'int'), ('y', 'double'), ('z', '<U16')])\n\n ndarr = np.ones((10,), dtype=dtype)\n pdf_expected = pd.DataFrame.from_records(ndarr, index=pd.RangeIndex(10))\n\n # from structured array of mars\n tensor = mt.ones((10,), dtype=dtype, chunk_size=3)\n df1 = from_records(tensor)\n df1_result = df1.execute().fetch()\n pd.testing.assert_frame_equal(df1_result, pdf_expected)\n\n # from structured array of numpy\n df2 = from_records(ndarr)\n df2_result = df2.execute().fetch()\n pd.testing.assert_frame_equal(df2_result, pdf_expected)\n\n\ndef test_read_csv_execution(setup):\n with tempfile.TemporaryDirectory() as tempdir:\n file_path = os.path.join(tempdir, 'test.csv')\n\n df = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int64), columns=['a', 'b', 'c'])\n df.to_csv(file_path)\n\n pdf = pd.read_csv(file_path, index_col=0)\n r = md.read_csv(file_path, index_col=0)\n mdf = r.execute().fetch()\n pd.testing.assert_frame_equal(pdf, mdf)\n # size_res = self.executor.execute_dataframe(r, mock=True)\n # assert sum(s[0] for s in size_res) == os.stat(file_path).st_size\n\n mdf2 = md.read_csv(file_path, index_col=0, chunk_bytes=10).execute().fetch()\n pd.testing.assert_frame_equal(pdf, mdf2)\n\n mdf = md.read_csv(file_path, index_col=0, nrows=1).execute().fetch()\n pd.testing.assert_frame_equal(df[:1], mdf)\n\n # test names and usecols\n with tempfile.TemporaryDirectory() as tempdir:\n file_path = os.path.join(tempdir, 'test.csv')\n df = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int64),\n columns=['a', 'b', 'c'])\n df.to_csv(file_path, index=False)\n\n mdf = md.read_csv(file_path, usecols=['c', 'b']).execute().fetch()\n pd.testing.assert_frame_equal(\n pd.read_csv(file_path, usecols=['c', 'b']), mdf)\n\n mdf = md.read_csv(file_path, names=['a', 'b', 'c'],\n usecols=['c', 'b']).execute().fetch()\n pd.testing.assert_frame_equal(\n pd.read_csv(file_path, names=['a', 'b', 'c'], usecols=['c', 'b']), mdf)\n\n mdf = md.read_csv(file_path, names=['a', 'b', 'c'],\n usecols=['a', 'c']).execute().fetch()\n pd.testing.assert_frame_equal(\n pd.read_csv(file_path, names=['a', 'b', 'c'], usecols=['a', 'c']), mdf)\n\n mdf = md.read_csv(file_path, usecols=['a', 'c']).execute().fetch()\n pd.testing.assert_frame_equal(\n pd.read_csv(file_path, usecols=['a', 'c']), mdf)\n\n # test sep\n with tempfile.TemporaryDirectory() as tempdir:\n file_path = os.path.join(tempdir, 'test.csv')\n\n df = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), columns=['a', 'b', 'c'])\n df.to_csv(file_path, sep=';')\n\n pdf = pd.read_csv(file_path, sep=';', index_col=0)\n mdf = md.read_csv(file_path, sep=';', index_col=0).execute().fetch()\n pd.testing.assert_frame_equal(pdf, mdf)\n\n mdf2 = md.read_csv(file_path, sep=';', index_col=0, chunk_bytes=10).execute().fetch()\n pd.testing.assert_frame_equal(pdf, mdf2)\n\n # test missing value\n with tempfile.TemporaryDirectory() as tempdir:\n file_path = os.path.join(tempdir, 'test.csv')\n\n df = pd.DataFrame({'c1': [np.nan, 'a', 'b', 'c'], 'c2': [1, 2, 3, np.nan],\n 'c3': [np.nan, np.nan, 3.4, 2.2]})\n df.to_csv(file_path)\n\n pdf = pd.read_csv(file_path, index_col=0)\n mdf = md.read_csv(file_path, index_col=0).execute().fetch()\n pd.testing.assert_frame_equal(pdf, mdf)\n\n mdf2 = md.read_csv(file_path, index_col=0, chunk_bytes=12).execute().fetch()\n pd.testing.assert_frame_equal(pdf, mdf2)\n\n with tempfile.TemporaryDirectory() as tempdir:\n file_path = os.path.join(tempdir, 'test.csv')\n\n index = pd.date_range(start='1/1/2018', periods=100)\n df = pd.DataFrame({\n 'col1': np.random.rand(100),\n 'col2': np.random.choice(['a', 'b', 'c'], (100,)),\n 'col3': np.arange(100)\n }, index=index)\n df.to_csv(file_path)\n\n pdf = pd.read_csv(file_path, index_col=0)\n mdf = md.read_csv(file_path, index_col=0).execute().fetch()\n pd.testing.assert_frame_equal(pdf, mdf)\n\n mdf2 = md.read_csv(file_path, index_col=0, chunk_bytes=100).execute().fetch()\n pd.testing.assert_frame_equal(pdf, mdf2)\n\n # test nan\n with tempfile.TemporaryDirectory() as tempdir:\n file_path = os.path.join(tempdir, 'test.csv')\n\n df = pd.DataFrame({\n 'col1': np.random.rand(100, ),\n 'col2': np.random.choice(['a', 'b', 'c'], (100,)),\n 'col3': np.arange(100)\n })\n df.iloc[20:, :] = pd.NA\n df.to_csv(file_path)\n\n pdf = pd.read_csv(file_path, index_col=0)\n mdf = md.read_csv(file_path, index_col=0, head_lines=10, chunk_bytes=200)\n result = mdf.execute().fetch()\n pd.testing.assert_frame_equal(pdf, result)\n\n # dtypes is inferred as expected\n pd.testing.assert_series_equal(mdf.dtypes, pd.Series(['float64', 'object', 'int64'],\n index=df.columns))\n\n # test compression\n with tempfile.TemporaryDirectory() as tempdir:\n file_path = os.path.join(tempdir, 'test.gzip')\n\n index = pd.date_range(start='1/1/2018', periods=100)\n df = pd.DataFrame({\n 'col1': np.random.rand(100),\n 'col2': np.random.choice(['a', 'b', 'c'], (100,)),\n 'col3': np.arange(100)\n }, index=index)\n df.to_csv(file_path, compression='gzip')\n\n pdf = pd.read_csv(file_path, compression='gzip', index_col=0)\n mdf = md.read_csv(file_path, compression='gzip', index_col=0).execute().fetch()\n pd.testing.assert_frame_equal(pdf, mdf)\n\n mdf2 = md.read_csv(file_path, compression='gzip', index_col=0,\n chunk_bytes='1k').execute().fetch()\n pd.testing.assert_frame_equal(pdf, mdf2)\n\n # test multiple files\n with tempfile.TemporaryDirectory() as tempdir:\n df = pd.DataFrame(np.random.rand(300, 3), columns=['a', 'b', 'c'])\n\n file_paths = [os.path.join(tempdir, f'test{i}.csv') for i in range(3)]\n df[:100].to_csv(file_paths[0])\n df[100:200].to_csv(file_paths[1])\n df[200:].to_csv(file_paths[2])\n\n mdf = md.read_csv(file_paths, index_col=0).execute().fetch()\n pd.testing.assert_frame_equal(df, mdf)\n\n mdf2 = md.read_csv(file_paths, index_col=0, chunk_bytes=50).execute().fetch()\n pd.testing.assert_frame_equal(df, mdf2)\n\n # test wildcards in path\n with tempfile.TemporaryDirectory() as tempdir:\n df = pd.DataFrame(np.random.rand(300, 3), columns=['a', 'b', 'c'])\n\n file_paths = [os.path.join(tempdir, f'test{i}.csv') for i in range(3)]\n df[:100].to_csv(file_paths[0])\n df[100:200].to_csv(file_paths[1])\n df[200:].to_csv(file_paths[2])\n\n # As we can not guarantee the order in which these files are processed,\n # the result may not keep the original order.\n mdf = md.read_csv(f'{tempdir}/*.csv', index_col=0).execute().fetch()\n pd.testing.assert_frame_equal(df, mdf.sort_index())\n\n mdf2 = md.read_csv(f'{tempdir}/*.csv', index_col=0, chunk_bytes=50).execute().fetch()\n pd.testing.assert_frame_equal(df, mdf2.sort_index())\n\n # test read directory\n with tempfile.TemporaryDirectory() as tempdir:\n testdir = os.path.join(tempdir, 'test_dir')\n os.makedirs(testdir, exist_ok=True)\n\n df = pd.DataFrame(np.random.rand(300, 3), columns=['a', 'b', 'c'])\n\n file_paths = [os.path.join(testdir, f'test{i}.csv') for i in range(3)]\n df[:100].to_csv(file_paths[0])\n df[100:200].to_csv(file_paths[1])\n df[200:].to_csv(file_paths[2])\n\n # As we can not guarantee the order in which these files are processed,\n # the result may not keep the original order.\n mdf = md.read_csv(testdir, index_col=0).execute().fetch()\n pd.testing.assert_frame_equal(df, mdf.sort_index())\n\n mdf2 = md.read_csv(testdir, index_col=0, chunk_bytes=50).execute().fetch()\n pd.testing.assert_frame_equal(df, mdf2.sort_index())\n\n\[email protected](pa is None, reason='pyarrow not installed')\ndef test_read_csv_use_arrow_dtype(setup):\n rs = np.random.RandomState(0)\n df = pd.DataFrame({\n 'col1': rs.rand(100),\n 'col2': rs.choice(['a' * 2, 'b' * 3, 'c' * 4], (100,)),\n 'col3': np.arange(100)\n })\n with tempfile.TemporaryDirectory() as tempdir:\n file_path = os.path.join(tempdir, 'test.csv')\n df.to_csv(file_path, index=False)\n\n pdf = pd.read_csv(file_path)\n mdf = md.read_csv(file_path, use_arrow_dtype=True)\n result = mdf.execute().fetch()\n assert isinstance(mdf.dtypes.iloc[1], md.ArrowStringDtype)\n assert isinstance(result.dtypes.iloc[1], md.ArrowStringDtype)\n pd.testing.assert_frame_equal(arrow_array_to_objects(result), pdf)\n\n with tempfile.TemporaryDirectory() as tempdir:\n with option_context({'dataframe.use_arrow_dtype': True}):\n file_path = os.path.join(tempdir, 'test.csv')\n df.to_csv(file_path, index=False)\n\n pdf = pd.read_csv(file_path)\n mdf = md.read_csv(file_path)\n result = mdf.execute().fetch()\n assert isinstance(mdf.dtypes.iloc[1], md.ArrowStringDtype)\n assert isinstance(result.dtypes.iloc[1], md.ArrowStringDtype)\n pd.testing.assert_frame_equal(arrow_array_to_objects(result), pdf)\n\n # test compression\n with tempfile.TemporaryDirectory() as tempdir:\n file_path = os.path.join(tempdir, 'test.gzip')\n df.to_csv(file_path, compression='gzip', index=False)\n\n pdf = pd.read_csv(file_path, compression='gzip')\n mdf = md.read_csv(file_path, compression='gzip', use_arrow_dtype=True)\n result = mdf.execute().fetch()\n assert isinstance(mdf.dtypes.iloc[1], md.ArrowStringDtype)\n assert isinstance(result.dtypes.iloc[1], md.ArrowStringDtype)\n pd.testing.assert_frame_equal(arrow_array_to_objects(result), pdf)\n\n\n@require_cudf\ndef test_read_csv_gpu_execution(setup_gpu):\n with tempfile.TemporaryDirectory() as tempdir:\n file_path = os.path.join(tempdir, 'test.csv')\n\n df = pd.DataFrame({\n 'col1': np.random.rand(100),\n 'col2': np.random.choice(['a', 'b', 'c'], (100,)),\n 'col3': np.arange(100)\n })\n df.to_csv(file_path, index=False)\n\n pdf = pd.read_csv(file_path)\n mdf = md.read_csv(file_path, gpu=True).execute().fetch()\n pd.testing.assert_frame_equal(pdf.reset_index(drop=True), mdf.to_pandas().reset_index(drop=True))\n\n mdf2 = md.read_csv(file_path, gpu=True, chunk_bytes=200).execute().fetch()\n pd.testing.assert_frame_equal(pdf.reset_index(drop=True), mdf2.to_pandas().reset_index(drop=True))\n\n\ndef test_read_csv_without_index(setup):\n # test csv file without storing index\n with tempfile.TemporaryDirectory() as tempdir:\n file_path = os.path.join(tempdir, 'test.csv')\n\n df = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), columns=['a', 'b', 'c'])\n df.to_csv(file_path, index=False)\n\n pdf = pd.read_csv(file_path)\n mdf = md.read_csv(file_path).execute().fetch()\n pd.testing.assert_frame_equal(pdf, mdf)\n\n mdf2 = md.read_csv(file_path, chunk_bytes=10).execute().fetch()\n pd.testing.assert_frame_equal(pdf, mdf2)\n\n file_path2 = os.path.join(tempdir, 'test.csv')\n df = pd.DataFrame(np.random.RandomState(0).rand(100, 10),\n columns=[f'col{i}' for i in range(10)])\n df.to_csv(file_path2, index=False)\n\n mdf3 = md.read_csv(file_path2, chunk_bytes=os.stat(file_path2).st_size / 5)\n result = mdf3.execute().fetch()\n expected = pd.read_csv(file_path2)\n pd.testing.assert_frame_equal(result, expected)\n\n # test incremental_index = False\n mdf4 = md.read_csv(file_path2, chunk_bytes=os.stat(file_path2).st_size / 5,\n incremental_index=False)\n result = mdf4.execute().fetch()\n assert not result.index.is_monotonic_increasing\n expected = pd.read_csv(file_path2)\n pd.testing.assert_frame_equal(result.reset_index(drop=True), expected)\n\n\[email protected](sqlalchemy is None, reason='sqlalchemy not installed')\ndef test_read_sql_execution(setup):\n import sqlalchemy as sa\n\n rs = np.random.RandomState(0)\n test_df = pd.DataFrame({'a': np.arange(10).astype(np.int64, copy=False),\n 'b': [f's{i}' for i in range(10)],\n 'c': rs.rand(10),\n 'd': [datetime.fromtimestamp(time.time() + 3600 * (i - 5))\n for i in range(10)]})\n\n with tempfile.TemporaryDirectory() as d:\n table_name = 'test'\n table_name2 = 'test2'\n uri = 'sqlite:///' + os.path.join(d, 'test.db')\n\n test_df.to_sql(table_name, uri, index=False)\n\n # test read with table name\n r = md.read_sql_table('test', uri, chunk_size=4)\n result = r.execute().fetch()\n pd.testing.assert_frame_equal(result, test_df)\n\n # test read with sql string and offset method\n r = md.read_sql_query('select * from test where c > 0.5', uri,\n parse_dates=['d'], chunk_size=4)\n result = r.execute().fetch()\n pd.testing.assert_frame_equal(result, test_df[test_df.c > 0.5].reset_index(drop=True))\n\n # test read with sql string and partition method with integer cols\n r = md.read_sql('select * from test where b > \\'s5\\'', uri,\n parse_dates=['d'], partition_col='a', num_partitions=3)\n result = r.execute().fetch()\n pd.testing.assert_frame_equal(result, test_df[test_df.b > 's5'].reset_index(drop=True))\n\n # test read with sql string and partition method with datetime cols\n r = md.read_sql_query('select * from test where b > \\'s5\\'', uri,\n parse_dates={'d': '%Y-%m-%d %H:%M:%S'},\n partition_col='d', num_partitions=3)\n result = r.execute().fetch()\n pd.testing.assert_frame_equal(result, test_df[test_df.b > 's5'].reset_index(drop=True))\n\n # test read with sql string and partition method with datetime cols\n r = md.read_sql_query('select * from test where b > \\'s5\\'', uri,\n parse_dates=['d'], partition_col='d', num_partitions=3,\n index_col='d')\n result = r.execute().fetch()\n pd.testing.assert_frame_equal(result, test_df[test_df.b > 's5'].set_index('d'))\n\n # test SQL that return no result\n r = md.read_sql_query('select * from test where a > 1000', uri)\n result = r.execute().fetch()\n pd.testing.assert_frame_equal(result, pd.DataFrame(columns=test_df.columns))\n\n engine = sa.create_engine(uri)\n m = sa.MetaData()\n try:\n # test index_col and columns\n r = md.read_sql_table('test', engine.connect(), chunk_size=4,\n index_col='a', columns=['b', 'd'])\n result = r.execute().fetch()\n expected = test_df.copy(deep=True)\n expected.set_index('a', inplace=True)\n del expected['c']\n pd.testing.assert_frame_equal(result, expected)\n\n # do not specify chunk_size\n r = md.read_sql_table('test', engine.connect(),\n index_col='a', columns=['b', 'd'])\n result = r.execute().fetch()\n pd.testing.assert_frame_equal(result, expected)\n\n table = sa.Table(table_name, m, autoload=True,\n autoload_with=engine)\n r = md.read_sql_table(table, engine, chunk_size=4,\n index_col=[table.columns['a'], table.columns['b']],\n columns=[table.columns['c'], 'd'])\n result = r.execute().fetch()\n expected = test_df.copy(deep=True)\n expected.set_index(['a', 'b'], inplace=True)\n pd.testing.assert_frame_equal(result, expected)\n\n # test table with primary key\n sa.Table(table_name2, m,\n sa.Column('id', sa.Integer, primary_key=True),\n sa.Column('a', sa.Integer),\n sa.Column('b', sa.String),\n sa.Column('c', sa.Float),\n sa.Column('d', sa.DateTime))\n m.create_all(engine)\n test_df = test_df.copy(deep=True)\n test_df.index.name = 'id'\n test_df.to_sql(table_name2, uri, if_exists='append')\n\n r = md.read_sql_table(table_name2, engine, chunk_size=4, index_col='id')\n result = r.execute().fetch()\n pd.testing.assert_frame_equal(result, test_df)\n finally:\n engine.dispose()\n\n\[email protected](pa is None, reason='pyarrow not installed')\ndef test_read_sql_use_arrow_dtype(setup):\n rs = np.random.RandomState(0)\n test_df = pd.DataFrame({'a': np.arange(10).astype(np.int64, copy=False),\n 'b': [f's{i}' for i in range(10)],\n 'c': rs.rand(10),\n 'd': [datetime.fromtimestamp(time.time() + 3600 * (i - 5))\n for i in range(10)]})\n\n with tempfile.TemporaryDirectory() as d:\n table_name = 'test'\n uri = 'sqlite:///' + os.path.join(d, 'test.db')\n\n test_df.to_sql(table_name, uri, index=False)\n\n r = md.read_sql_table('test', uri, chunk_size=4, use_arrow_dtype=True)\n result = r.execute().fetch()\n assert isinstance(r.dtypes.iloc[1], md.ArrowStringDtype)\n assert isinstance(result.dtypes.iloc[1], md.ArrowStringDtype)\n pd.testing.assert_frame_equal(arrow_array_to_objects(result), test_df)\n\n # test read with sql string and offset method\n r = md.read_sql_query('select * from test where c > 0.5', uri,\n parse_dates=['d'], chunk_size=4,\n use_arrow_dtype=True)\n result = r.execute().fetch()\n assert isinstance(r.dtypes.iloc[1], md.ArrowStringDtype)\n assert isinstance(result.dtypes.iloc[1], md.ArrowStringDtype)\n pd.testing.assert_frame_equal(arrow_array_to_objects(result),\n test_df[test_df.c > 0.5].reset_index(drop=True))\n\n\ndef test_date_range_execution(setup):\n for closed in [None, 'left', 'right']:\n # start, periods, freq\n dr = md.date_range('2020-1-1', periods=10, chunk_size=3, closed=closed)\n\n result = dr.execute().fetch()\n expected = pd.date_range('2020-1-1', periods=10, closed=closed)\n pd.testing.assert_index_equal(result, expected)\n\n # end, periods, freq\n dr = md.date_range(end='2020-1-10', periods=10, chunk_size=3, closed=closed)\n\n result = dr.execute().fetch()\n expected = pd.date_range(end='2020-1-10', periods=10, closed=closed)\n pd.testing.assert_index_equal(result, expected)\n\n # start, end, freq\n dr = md.date_range('2020-1-1', '2020-1-10', chunk_size=3, closed=closed)\n\n result = dr.execute().fetch()\n expected = pd.date_range('2020-1-1', '2020-1-10', closed=closed)\n pd.testing.assert_index_equal(result, expected)\n\n # start, end and periods\n dr = md.date_range('2020-1-1', '2020-1-10', periods=19,\n chunk_size=3, closed=closed)\n\n result = dr.execute().fetch()\n expected = pd.date_range('2020-1-1', '2020-1-10', periods=19,\n closed=closed)\n pd.testing.assert_index_equal(result, expected)\n\n # start, end and freq\n dr = md.date_range('2020-1-1', '2020-1-10', freq='12H',\n chunk_size=3, closed=closed)\n\n result = dr.execute().fetch()\n expected = pd.date_range('2020-1-1', '2020-1-10', freq='12H',\n closed=closed)\n pd.testing.assert_index_equal(result, expected)\n\n # test timezone\n dr = md.date_range('2020-1-1', periods=10, tz='Asia/Shanghai', chunk_size=7)\n\n result = dr.execute().fetch()\n expected = pd.date_range('2020-1-1', periods=10, tz='Asia/Shanghai')\n pd.testing.assert_index_equal(result, expected)\n\n # test periods=0\n dr = md.date_range('2020-1-1', periods=0)\n\n result = dr.execute().fetch()\n expected = pd.date_range('2020-1-1', periods=0)\n pd.testing.assert_index_equal(result, expected)\n\n # test start == end\n dr = md.date_range('2020-1-1', '2020-1-1', periods=1)\n\n result = dr.execute().fetch()\n expected = pd.date_range('2020-1-1', '2020-1-1', periods=1)\n pd.testing.assert_index_equal(result, expected)\n\n # test normalize=True\n dr = md.date_range('2020-1-1', periods=10, normalize=True, chunk_size=4)\n\n result = dr.execute().fetch()\n expected = pd.date_range('2020-1-1', periods=10, normalize=True)\n pd.testing.assert_index_equal(result, expected)\n\n # test freq\n dr = md.date_range(start='1/1/2018', periods=5, freq='M', chunk_size=3)\n\n result = dr.execute().fetch()\n expected = pd.date_range(start='1/1/2018', periods=5, freq='M')\n pd.testing.assert_index_equal(result, expected)\n\n\[email protected](pa is None, reason='pyarrow not installed')\ndef test_read_parquet_arrow(setup):\n test_df = pd.DataFrame({'a': np.arange(10).astype(np.int64, copy=False),\n 'b': [f's{i}' for i in range(10)],\n 'c': np.random.rand(10), })\n\n with tempfile.TemporaryDirectory() as tempdir:\n file_path = os.path.join(tempdir, 'test.csv')\n test_df.to_parquet(file_path)\n\n df = md.read_parquet(file_path)\n result = df.execute().fetch()\n pd.testing.assert_frame_equal(result, test_df)\n # size_res = self.executor.execute_dataframe(df, mock=True)\n # assert sum(s[0] for s in size_res) > test_df.memory_usage(deep=True).sum()\n\n with tempfile.TemporaryDirectory() as tempdir:\n file_path = os.path.join(tempdir, 'test.parquet')\n test_df.to_parquet(file_path, row_group_size=3)\n\n df = md.read_parquet(file_path, groups_as_chunks=True, columns=['a', 'b'])\n result = df.execute().fetch()\n pd.testing.assert_frame_equal(result.reset_index(drop=True), test_df[['a', 'b']])\n\n with tempfile.TemporaryDirectory() as tempdir:\n file_path = os.path.join(tempdir, 'test.parquet')\n test_df.to_parquet(file_path, row_group_size=5)\n\n df = md.read_parquet(file_path, groups_as_chunks=True,\n use_arrow_dtype=True,\n incremental_index=True)\n result = df.execute().fetch()\n assert isinstance(df.dtypes.iloc[1], md.ArrowStringDtype)\n assert isinstance(result.dtypes.iloc[1], md.ArrowStringDtype)\n pd.testing.assert_frame_equal(arrow_array_to_objects(result), test_df)\n\n # test wildcards in path\n with tempfile.TemporaryDirectory() as tempdir:\n df = pd.DataFrame({'a': np.arange(300).astype(np.int64, copy=False),\n 'b': [f's{i}' for i in range(300)],\n 'c': np.random.rand(300), })\n\n file_paths = [os.path.join(tempdir, f'test{i}.parquet') for i in range(3)]\n df[:100].to_parquet(file_paths[0], row_group_size=50)\n df[100:200].to_parquet(file_paths[1], row_group_size=30)\n df[200:].to_parquet(file_paths[2])\n\n mdf = md.read_parquet(f'{tempdir}/*.parquet')\n r = mdf.execute().fetch()\n pd.testing.assert_frame_equal(df, r.sort_values('a').reset_index(drop=True))\n\n mdf = md.read_parquet(f'{tempdir}/*.parquet', groups_as_chunks=True)\n r = mdf.execute().fetch()\n pd.testing.assert_frame_equal(df, r.sort_values('a').reset_index(drop=True))\n\n\[email protected](fastparquet is None, reason='fastparquet not installed')\ndef test_read_parquet_fast_parquet(setup):\n test_df = pd.DataFrame({'a': np.arange(10).astype(np.int64, copy=False),\n 'b': [f's{i}' for i in range(10)],\n 'c': np.random.rand(10), })\n\n # test fastparquet engine\n with tempfile.TemporaryDirectory() as tempdir:\n file_path = os.path.join(tempdir, 'test.csv')\n test_df.to_parquet(file_path, compression=None)\n\n df = md.read_parquet(file_path, engine='fastparquet')\n result = df.execute().fetch()\n pd.testing.assert_frame_equal(result, test_df)\n # size_res = self.executor.execute_dataframe(df, mock=True)\n # assert sum(s[0] for s in size_res) > test_df.memory_usage(deep=True).sum()\n" ]
[ [ "pandas.testing.assert_series_equal", "pandas.Series", "pandas.RangeIndex", "pandas.DataFrame", "numpy.dtype", "pandas.testing.assert_frame_equal", "numpy.random.randint", "pandas.read_csv", "numpy.arange", "pandas.Index", "pandas.testing.assert_index_equal", "numpy.random.choice", "numpy.random.rand", "pandas.date_range", "numpy.array", "numpy.random.RandomState", "pandas.timedelta_range", "pandas.IntervalIndex.from_tuples", "numpy.ones" ] ]
less-lab-uva/CS4501-Website
[ "7583e2d800c4450192ea5c22e8e815f6d2ab7edb" ]
[ "labs/images/lab5/train_model.py" ]
[ "# Thanks: https://machinelearningmastery.com/how-to-develop-a-cnn-from-scratch-for-fashion-mnist-clothing-classification/\n\n# model with double the filters for the fashion mnist dataset\nimport cv2\nimport glob\nimport argparse\nimport numpy as np\n\nfrom numpy import mean\nfrom numpy import std\nfrom numpy import argmax\nfrom matplotlib import pyplot\nfrom sklearn.model_selection import KFold\nfrom keras.optimizers import Adam\nfrom keras.callbacks import EarlyStopping\nfrom keras.models import Sequential\nfrom keras.models import load_model\nfrom keras.utils import to_categorical\nfrom keras.preprocessing.image import load_img\nfrom keras.preprocessing.image import img_to_array\nfrom keras.layers import Conv2D, Dropout, MaxPooling2D, Dense, Flatten\n\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' \n\nparser = argparse.ArgumentParser()\nparser.add_argument('--epochs', type=int, default=50)\nparser.add_argument('--batch_size', type=int, default=64)\nparser.add_argument('--h', type=int, default=48)\nparser.add_argument('--w', type=int, default=48)\nargs = parser.parse_args()\n\n# define dnn model (simple)\ndef define_model(number_classes):\n model = Sequential()\n # model.add(Conv2D(64, (3, 3), padding='same', activation='relu', kernel_initializer='he_uniform', input_shape=(args.h, args.w, 1)))\n # model.add(MaxPooling2D((2, 2)))\n model.add(Flatten(input_shape=(args.h, args.w, 1)))\n model.add(Dense(500, activation='relu', kernel_initializer='he_uniform'))\n # model.add(Dropout(0.2))\n model.add(Dense(500, activation='relu', kernel_initializer='he_uniform'))\n # model.add(Dropout(0.2))\n model.add(Dense(number_classes, activation='softmax'))\n opt = Adam(lr=0.0001)\n # compile model\n model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])\n return model\n\n# get the classes\ndef get_classes(dataset):\n # Get the class names from the folder names\n classes = glob.glob(dataset)\n classes.sort()\n for i in range(len(classes)):\n classes[i] = classes[i][:-1]\n pos = classes[i].rfind('/')\n classes[i] = classes[i][pos+1:]\n return classes\n\n# load and prepare the image\ndef load_image(filename):\n # load the image\n img = load_img(filename, grayscale=True, target_size=(args.h, args.w))\n # convert to array\n img = img_to_array(img)\n # reshape into a single sample with 1 channel\n img = img.reshape(1, args.h, args.w, 1)\n # prepare pixel data\n img = img.astype('float32')\n img = img / 255.0\n return img\n\n# convert a folder to an array\ndef folder_to_array(file_names, classes):\n x = []\n y = []\n for f in file_names:\n # Create data\n image = load_image(f)\n x.append(image)\n # Create label\n label = []\n # Get the subfolder\n folder_name = f\n pos = folder_name.rfind('/')\n folder_name = folder_name[:pos]\n pos = folder_name.rfind('/')\n folder_name = folder_name[pos+1:]\n # Check if the name is in the subfolder\n for c in classes:\n if c in folder_name:\n label.append(1)\n else:\n label.append(0)\n y.append(label)\n\n x = np.array(x, dtype='float64')\n y = np.array(y, dtype='int64')\n\n return x, y\n\n# load the dataset from the folders\ndef load_dataset():\n\n # Get the classes\n classes = get_classes(\"./training_data/*/\")\n print(\"Classes: \" + str(classes))\n\n # Create the training data\n training_files = glob.glob (\"./training_data/*/*.jp*\") # your image path\n trainX, trainY = folder_to_array(training_files, classes)\n\n # Create the testing data\n testing_files = glob.glob (\"./testing_data/*/*.jp*\") # your image path\n testX, testY = folder_to_array(testing_files, classes)\n\n # Shuffle the data\n idx = np.random.permutation(len(trainX))\n trainX, trainY = trainX[idx], trainY[idx]\n\n trainX = trainX.reshape((trainX.shape[0], args.h, args.w, 1))\n testX = testX.reshape((testX.shape[0], args.h, args.w, 1))\n\n print(\"Training data shape: \" + str(trainX.shape))\n print(\"Training label shape: \" + str(trainY.shape))\n\n print(\"Test data shape: \" + str(testX.shape))\n print(\"Test label shape: \" + str(testY.shape))\n\n\n return trainX, trainY, testX, testY\n\n# plot diagnostic learning curves\ndef summarize_diagnostics(history):\n # plot loss\n pyplot.subplot(111)\n pyplot.title('Classification Accuracy')\n pyplot.plot(history.history['acc'], color='blue', label='training accuracy')\n pyplot.plot(history.history['val_acc'], color='orange', label='validation accuracy')\n pyplot.legend()\n pyplot.show()\n\n# summarize model performance\ndef summarize_performance(scores):\n # print summary\n print('Accuracy: mean=%.3f std=%.3f, n=%d' % (mean(scores)*100, std(scores)*100, len(scores)))\n # box and whisker plots of results\n pyplot.boxplot(scores)\n pyplot.show()\n\n# run the training and save a model\ndef run_training():\n # load dataset\n trainX, trainY, testX, testY = load_dataset()\n # define model\n model = define_model(number_classes=len(testY[0]))\n # Define early stopping\n callback = EarlyStopping(monitor=\"val_acc\", patience=250)\n # fit model\n history = model.fit(trainX, trainY, epochs=args.epochs, batch_size=args.batch_size, verbose=1, validation_split=0.1, shuffle=True, callbacks=[callback])\n # save model\n print(model.summary())\n model.save('marine_model.h5')\n # Display the training data\n summarize_diagnostics(history)\n\n# run for evaluating a model\ndef run_testing():\n # load dataset\n trainX, trainY, testX, testY = load_dataset()\n # load model\n model = load_model('marine_model.h5')\n # evaluate model on test dataset\n _, acc = model.evaluate(testX, testY, verbose=1)\n print('Test Accuracy: ' + str(acc * 100.0))\n\n# load an image and predict the class\ndef run_single_image():\n classes = get_classes(\"./training_data/*/\")\n # load model\n model = load_model('marine_model.h5')\n # For all images in single_prediction\n sample_images = glob.glob(\"./testing_data/*.jp*\")\n for img_name in sample_images:\n # Load the image\n image = load_image(img_name)\n # predict the class\n prediction = model.predict(image)\n result = argmax(prediction, axis=-1)\n print('Single image class (' + img_name + '): ' + str(classes[result[0]]))\n\n# Running the code\nrun_training()\nrun_testing()\nrun_single_image()\n\n\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.boxplot", "matplotlib.pyplot.title", "matplotlib.pyplot.plot", "numpy.std", "matplotlib.pyplot.subplot", "numpy.argmax", "numpy.mean", "numpy.array", "matplotlib.pyplot.show" ] ]
TescaF/point_cloud_io
[ "a5848d48f341b88b43f6b28b88d8b048eeefcf8a" ]
[ "src/pub_pose.py" ]
[ "#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import String\nfrom geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion\nimport numpy as np\nimport math\n\ndef publish():\n pub = rospy.Publisher('pose_truth', PoseStamped, queue_size=10)\n rospy.init_node('talker', anonymous=True)\n rate = rospy.Rate(10) # 10hz\n #pt = [0.21,-0.011,0.4,0.3,-0.6,-0.01]\n # Sciossors_01_28 pt = [0.21,-0.011,0.4,0.3,-0.6,-0.01]\n #Shears_02_01 pt = [0.189,-0.015,0.4,-0.4,-0.6,-0.01]\n pt = [0.188,-0.015,0.4,-0.45,-0.6,-0.01]\n # Scissors_08_01 pt = [0.2,-0.012,0.4,0,-1,0]\n\n ests = [['scissors_01_00000027', [0.024235617160797116,-0.011359463453292846,0.019534289836883545]], \n['scissors_01_00000060', [0.0011834951639175398,-0.013148486614227295,-0.005846852660179138]], \n['scissors_01_00000003', [0.024251672744750975,-0.011589790105819703,0.0003066921234130859]], \n['shears_01_00000009', [-0.009251792550086976,-0.017923964738845825,0.010005302429199218]], \n['shears_01_00000033', [-0.027354883074760434,-0.012586298942565919,0.031511585712432864]], \n['shears_01_00000090', [-0.03358910477161407,-0.013879684925079346,-0.014482853412628173]]] \n pt = ests[0][1] + [0,0,1]\n #pt[2] += 0.05\n\n pos = pose_from_vec(pt)\n pose = PoseStamped()\n pose.pose = pos\n pose.header.frame_id = \"base_link\"\n\n while not rospy.is_shutdown():\n pub.publish(pose)\n rate.sleep()\n\ndef pose_from_vec(waypoint):\n pose = Pose()\n pose.position.x = waypoint[0]\n pose.position.y = waypoint[1]\n pose.position.z = waypoint[2] \n\n u = [1,0,0]\n norm = np.linalg.norm(np.array(waypoint[3:]))\n v = np.array(waypoint[3:])/norm \n if (np.array_equal(u, v)):\n pose.orientation.w = 1\n pose.orientation.x = 0\n pose.orientation.y = 0\n pose.orientation.z = 0\n elif (np.array_equal(u, np.negative(v))):\n pose.orientation.w = 0\n pose.orientation.x = 0\n pose.orientation.y = 0\n pose.orientation.z = 1\n else:\n half = [u[0]+v[0], u[1]+v[1], u[2]+v[2]]\n pose.orientation.w = np.dot(u, half)\n temp = np.cross(u, half)\n pose.orientation.x = temp[0]\n pose.orientation.y = temp[1]\n pose.orientation.z = temp[2]\n norm = math.sqrt(pose.orientation.x*pose.orientation.x + pose.orientation.y*pose.orientation.y + \n pose.orientation.z*pose.orientation.z + pose.orientation.w*pose.orientation.w)\n if norm == 0:\n norm = 1\n pose.orientation.x /= norm\n pose.orientation.y /= norm\n pose.orientation.z /= norm\n pose.orientation.w /= norm\n return pose\n\nif __name__ == '__main__':\n try:\n publish()\n except rospy.ROSInterruptException:\n pass\n" ]
[ [ "numpy.dot", "numpy.array_equal", "numpy.cross", "numpy.negative", "numpy.array" ] ]
d3netxer/peartree
[ "577b077c169c7f102d5947b5f9f273fc965eb41f" ]
[ "peartree/paths.py" ]
[ "from typing import Any, Dict, List\n\nimport networkx as nx\nimport numpy as np\nimport partridge as ptg\n\nfrom .graph import (generate_empty_md_graph, generate_summary_graph_elements,\n make_synthetic_system_network, populate_graph)\nfrom .synthetic import SyntheticTransitNetwork\nfrom .toolkit import generate_random_name\nfrom .utilities import generate_nodes_gdf_from_graph, log\n\nFALLBACK_STOP_COST_DEFAULT = (30 * 60) # 30 minutes, converted to seconds\n\n\nclass InvalidGTFS(Exception):\n # Let's have a custom exception for when we read in GTFS files\n pass\n\n\nclass InvalidTimeBracket(Exception):\n pass\n\n\ndef _calculate_means_default(\n target_time_start: float,\n target_time_end: float,\n arrival_times: List) -> float:\n # This is the default method that is provided to the load feed operation\n # and applied to the observed arrival times at a given stop. From this\n # array of arrival times, the average delay between stops is calcualted\n if len(arrival_times) < 2:\n return np.nan\n\n # Make sure that values are in ascending order (also converts to list)\n arrival_times = np.array(arrival_times)\n arrival_times.sort()\n\n # Recast as numpy array\n first = arrival_times[1:]\n second = arrival_times[:-1]\n wait_seconds = list(first - second)\n\n # Recast arrival times as just a python list\n arrival_times = list(arrival_times)\n\n # Also ensure that both the first and last trip include context\n # framed by the evaluation time period\n from_start_time_to_first_arrival = arrival_times[0] - target_time_start\n wait_seconds.append(from_start_time_to_first_arrival)\n\n from_last_arrival_to_end_time = target_time_end - arrival_times[-1]\n wait_seconds.append(from_last_arrival_to_end_time)\n\n # Note: Can implement something more substantial here that takes into\n # account divergent/erratic performance or intentional timing\n # clusters that are not evenly dispersed in a custom method that\n # would replace this default method\n na = np.array(wait_seconds)\n\n # Prune 0-second delays as these excessively reduce wait-time estimates\n na_no_zeroes = na[na > 0]\n\n # Naive implementation: halve the headway to get average wait time\n average_wait = na_no_zeroes.mean() / 2\n return average_wait\n\n\ndef get_representative_feed(file_loc: str,\n day_type: str='busiest') -> ptg.gtfs.Feed:\n \"\"\"\n Given a filepath, extract a partridge feed object, holding a \\\n representative set of schedule patterns, extracted from the GTFS zip \\\n file, as a set of pandas DataFrames.\n\n Parameters\n ----------\n file_loc : str\n The location (filepath) of the GTFS zip file.\n day_type : str\n The name of the type of representative feed desired. Currently, only \\\n one type is supported, busiest. This extracts the schedule pattern \\\n for a day that has the most service on it. This is determined by the \\\n day with the most trips on it.\n\n Returns\n -------\n feed : ptg.gtfs.Feed\n A partridge feed object, holding related schedule information as \\\n pandas DataFrames for the busiest day in the available schedule.\n \"\"\"\n\n # Extract service ids and then trip counts by those dates\n try:\n service_ids_by_date = ptg.read_service_ids_by_date(file_loc)\n trip_counts_by_date = ptg.read_trip_counts_by_date(file_loc)\n\n # Raised by partridge if no valid dates returned\n except AssertionError:\n # Make sure we have some valid values returned in trips\n raise InvalidGTFS('No valid trip counts by date '\n 'were identified in GTFS.')\n\n # TODO: Due to partridge's assertion error being raised, this\n # check may no longer be needed.\n if not len(trip_counts_by_date.items()):\n # Otherwise, error out\n raise InvalidGTFS('No valid trip counts by date '\n 'were identified in GTFS.')\n\n # At this point, different methods can be implemented to help select how\n # to pick which date/schedule id to use\n if day_type == 'busiest':\n # Choose the service id that has the most trips associated with it\n (selected_date,\n trip_count) = max(trip_counts_by_date.items(), key=lambda p: p[1])\n else:\n raise NotImplementedError('Unsupported day type string supplied.')\n\n log('Selected_date: {}'.format(selected_date))\n log('Number of trips on that date: {}'.format(trip_count))\n\n all_service_ids = '\\n\\t'.join(service_ids_by_date[selected_date])\n log('\\nAll related service IDs: \\n\\t{}'.format(all_service_ids))\n\n sub = service_ids_by_date[selected_date]\n feed_query = {'trips.txt': {'service_id': sub}}\n return ptg.load_feed(file_loc, view=feed_query)\n\n\ndef load_feed_as_graph(feed: ptg.gtfs.Feed,\n start_time: int,\n end_time: int,\n name: str=None,\n existing_graph: nx.MultiDiGraph=None,\n connection_threshold: float=50.0,\n walk_speed_kmph: float=4.5,\n stop_cost_method: Any=_calculate_means_default,\n fallback_stop_cost: bool=FALLBACK_STOP_COST_DEFAULT,\n interpolate_times: bool=True,\n impute_walk_transfers: bool=False,\n use_multiprocessing: bool=False,\n add_trips_per_edge: bool=False):\n \"\"\"\n Convert a feed object into a NetworkX Graph, or connect to an existing \\\n NetworkX graph if one is supplied.\n\n Parameters\n ----------\n feed : ptg.gtfs.Feed\n A feed object from Partridge holding a representation of the \\\n desired schedule ids and their releated scheudule data from an \\\n operator GTFS\n start_time : int\n Represented in seconds after midnight; indicates the start time \\\n with which to take the subset of the target feed schedule \\\n to be used to measure impedance between stops along \\\n the route, as well as cost (wait time) to board at each stop\n end_time : int\n Represented in seconds after midnight; indicates the end time \\\n with which to take the subset of the target feed schedule \\\n to be used to measure impedance between stops along \\\n the route, as well as cost (wait time) to board at each stop\n name : str\n Name of the operator, which is used to create a unique ID for each \\\n of the stops, routes, etc. in the feed being supplied\n existing_graph : networkx.Graph\n An existing graph containing other operator or schedule data\n connection_threshold : float\n Treshold by which to create a connection with an existing stop \\\n in the existing_graph graph, measured in meters\n walk_speed_kmph : float\n Walk speed in km/h, that is used to determine the cost in time when \\\n walking between two nodes that get an internal connection created\n stop_cost_method : Any\n A method is passed in here that handles an arrival time numpy array \\\n and, from that array, calcualtes a representative average wait time \\\n value, in seconds, for that stop.\n fallback_stop_cost: bool\n Cost in seconds to board a line at a stop if no other data is able \\\n to be calculated from schedule data for that stop to determine \\\n what wait time is. Example of this situation would be when \\\n there is only one scheduled stop time found for the stop id.\n interpolate_times : bool\n A boolean flag to indicate whether or not to infill intermediary \\\n stops that do not have all intermediary stop arrival times specified \\\n in the GTFS schedule.\n impute_walk_transfers : bool\n A flag to indicate whether to add in walk connections between nodes \\\n that are close enough, as measured using connection_trheshold\n use_multiprocessing: bool\n A flag to indicate whether or not to leverage multiprocessing where \\\n available to attempt to speed up trivially parallelizable operations\n\n Returns\n -------\n G : nx.MultiDiGraph\n networkx.Graph, the loaded, combined representation of the schedule \\\n data from the feed subset by the time parameters provided\n \"\"\"\n # Generate a random name for name if it is None\n if not name:\n name = generate_random_name()\n\n # Some sanity checking, to make sure only positive values are provided\n if (start_time < 0) or (end_time < 0):\n raise InvalidTimeBracket('Invalid start or end target times provided.')\n\n if end_time <= start_time:\n raise InvalidTimeBracket('Invalid ordering: Start time '\n 'is greater than end time.')\n\n (summary_edge_costs,\n wait_times_by_stop) = generate_summary_graph_elements(feed,\n start_time,\n end_time,\n fallback_stop_cost,\n interpolate_times,\n stop_cost_method,\n use_multiprocessing)\n\n #print(\"print summary_edge_costs\")\n #print(summary_edge_costs)\n\n # This is a flag used to check if we need to run any additional steps\n # after the feed is returned to ensure that new nodes and edge can connect\n # with existing ones (if they exist/a graph is passed in)\n existing_graph_supplied = bool(existing_graph)\n\n # G is either a new MultiDiGraph or one pass from before\n if existing_graph_supplied:\n # TODO: If passed from before we should run some checks to ensure\n # it is valid as well as set a flag to create join points with\n # other feeds so that they can be linked when the next is added.\n G = existing_graph\n else:\n G = generate_empty_md_graph(name)\n\n return populate_graph(G,\n name,\n feed,\n wait_times_by_stop,\n summary_edge_costs,\n connection_threshold,\n walk_speed_kmph,\n impute_walk_transfers,\n add_trips_per_edge)\n\n\ndef load_synthetic_network_as_graph(\n reference_geojson: Dict,\n name: str=None,\n existing_graph: nx.MultiDiGraph=None,\n connection_threshold: float=50.0,\n walk_speed_kmph: float=4.5,\n impute_walk_transfers: bool=True,\n wait_time_cost_method: Any=lambda x: x / 2) -> nx.MultiDiGraph:\n \"\"\"\n Convert formatted transit FeatureCollection into a directed network graph.\n\n Utilizing a correctly formatted transit FeatureCollection, generate a \\\n directed networ graph (or add to an existing one), based off of features \\\n included in the reference_geojson parameter.\n\n Parameters\n ———————\n reference_geojson : dict\n The TransitJSON; a specifically formatted GeoJSON\n name : str\n The name of the graph\n existing_graph : nx.MultiDiGraph\n An existing, populated transit NetworkX graph generated from peartree\n connection_threshold : float\n Distance in meters within which a nearby transit stops should be \\\n deemed acceptably close for a walk transfer to be also added\n walk_speed_kmph : float\n Speed in kilometers per hour to be used as the reference walk speed \\\n for calculating cost (impedance in time) of walk transfers\n impute_walk_transfers : bool\n A flag to indicate whether or not walk transfers should be calculated\n wait_time_cost_method: Any\n Function that, given a headway float value, produces a wait time value\n\n Returns\n ——\n G : nx.MultiDiGraph\n The muti-directed graph\n \"\"\"\n\n # Generate a random name for name if it is None\n if not name:\n name = generate_random_name()\n\n # This is a flag used to check if we need to run any additional steps\n # after the feed is returned to ensure that new nodes and edge can connect\n # with existing ones (if they exist/a graph is passed in)\n existing_graph_supplied = bool(existing_graph)\n\n # G is either a new MultiDiGraph or one pass from before\n if existing_graph_supplied:\n # TODO: If passed from before we should run some checks to ensure\n # it is valid as well as set a flag to create join points with\n # other feeds so that they can be linked when the next is added.\n G = existing_graph\n existing_graph_nodes = generate_nodes_gdf_from_graph(\n G, to_epsg_crs=2163)\n else:\n G = generate_empty_md_graph(name)\n existing_graph_nodes = None\n\n # First, instantiate whole TransitJSON as a SyntheticTransitNetwork object;\n # will provide necessory validation prior to synthetic network construction\n as_synthetic_network = SyntheticTransitNetwork(\n reference_geojson,\n wait_time_cost_method,\n existing_graph_nodes)\n\n return make_synthetic_system_network(\n G,\n name,\n as_synthetic_network,\n connection_threshold,\n walk_speed_kmph,\n impute_walk_transfers)\n" ]
[ [ "numpy.array" ] ]
plertvilai/birdCam_jetson
[ "8e74bbc81c289b3e0158edbd471fda0f3ed2b9fb" ]
[ "python/birdVid_ML/JetsonYolo.py" ]
[ "import cv2\nimport numpy as np\nfrom elements.yolo import OBJ_DETECTION\n\nObject_classes = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',\n 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',\n 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',\n 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',\n 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',\n 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',\n 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',\n 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',\n 'hair drier', 'toothbrush' ]\n\nObject_colors = list(np.random.rand(80,3)*255)\nObject_detector = OBJ_DETECTION('weights/yolov5s.pt', Object_classes)\n\ndef gstreamer_pipeline(\n capture_width=1280,\n capture_height=720,\n display_width=1280,\n display_height=720,\n framerate=60,\n flip_method=0,\n):\n return (\n \"nvarguscamerasrc ! \"\n \"video/x-raw(memory:NVMM), \"\n \"width=(int)%d, height=(int)%d, \"\n \"format=(string)NV12, framerate=(fraction)%d/1 ! \"\n \"nvvidconv flip-method=%d ! \"\n \"video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! \"\n \"videoconvert ! \"\n \"video/x-raw, format=(string)BGR ! appsink\"\n % (\n capture_width,\n capture_height,\n framerate,\n flip_method,\n display_width,\n display_height,\n )\n )\n\n\n# To flip the image, modify the flip_method parameter (0 and 2 are the most common)\nprint(gstreamer_pipeline(flip_method=0))\n\n# cap = cv2.VideoCapture(gstreamer_pipeline(flip_method=0), cv2.CAP_GSTREAMER)\ncap = cv2.VideoCapture(\"1627775013.mp4\")\nif cap.isOpened():\n window_handle = cv2.namedWindow(\"CSI Camera\", cv2.WINDOW_AUTOSIZE)\n # Window\n while cv2.getWindowProperty(\"CSI Camera\", 0) >= 0:\n ret, frame = cap.read()\n if ret and not(frame is None):\n # detection process\n objs = Object_detector.detect(frame)\n\n # plotting\n for obj in objs:\n # print(obj)\n label = obj['label']\n score = obj['score']\n [(xmin,ymin),(xmax,ymax)] = obj['bbox']\n color = Object_colors[Object_classes.index(label)]\n frame = cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), color, 2) \n frame = cv2.putText(frame, f'{label} ({str(score)})', (xmin,ymin), cv2.FONT_HERSHEY_SIMPLEX , 0.75, color, 1, cv2.LINE_AA)\n else:\n break\n cv2.imshow(\"CSI Camera\", frame)\n keyCode = cv2.waitKey(30)\n if keyCode == ord('q'):\n break\n cap.release()\n cv2.destroyAllWindows()\nelse:\n print(\"Unable to open camera\")\n" ]
[ [ "numpy.random.rand" ] ]
andyljones/ray
[ "52dfde1cbb7131fd62ebcb00f5a2b22ced7321ad" ]
[ "test/runtest.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\nimport logging\nimport os\nimport random\nimport re\nimport setproctitle\nimport shutil\nimport socket\nimport string\nimport subprocess\nimport sys\nimport tempfile\nimport threading\nimport time\nfrom collections import defaultdict, namedtuple, OrderedDict\nfrom concurrent.futures import ThreadPoolExecutor\n\nimport numpy as np\nimport pickle\nimport pytest\n\nimport ray\nimport ray.test.cluster_utils\nimport ray.test.test_utils\nfrom ray.utils import _random_string\n\nlogger = logging.getLogger(__name__)\n\n\ndef assert_equal(obj1, obj2):\n module_numpy = (type(obj1).__module__ == np.__name__\n or type(obj2).__module__ == np.__name__)\n if module_numpy:\n empty_shape = ((hasattr(obj1, \"shape\") and obj1.shape == ())\n or (hasattr(obj2, \"shape\") and obj2.shape == ()))\n if empty_shape:\n # This is a special case because currently np.testing.assert_equal\n # fails because we do not properly handle different numerical\n # types.\n assert obj1 == obj2, (\"Objects {} and {} are \"\n \"different.\".format(obj1, obj2))\n else:\n np.testing.assert_equal(obj1, obj2)\n elif hasattr(obj1, \"__dict__\") and hasattr(obj2, \"__dict__\"):\n special_keys = [\"_pytype_\"]\n assert (set(list(obj1.__dict__.keys()) + special_keys) == set(\n list(obj2.__dict__.keys()) + special_keys)), (\"Objects {} \"\n \"and {} are \"\n \"different.\".format(\n obj1, obj2))\n for key in obj1.__dict__.keys():\n if key not in special_keys:\n assert_equal(obj1.__dict__[key], obj2.__dict__[key])\n elif type(obj1) is dict or type(obj2) is dict:\n assert_equal(obj1.keys(), obj2.keys())\n for key in obj1.keys():\n assert_equal(obj1[key], obj2[key])\n elif type(obj1) is list or type(obj2) is list:\n assert len(obj1) == len(obj2), (\"Objects {} and {} are lists with \"\n \"different lengths.\".format(\n obj1, obj2))\n for i in range(len(obj1)):\n assert_equal(obj1[i], obj2[i])\n elif type(obj1) is tuple or type(obj2) is tuple:\n assert len(obj1) == len(obj2), (\"Objects {} and {} are tuples with \"\n \"different lengths.\".format(\n obj1, obj2))\n for i in range(len(obj1)):\n assert_equal(obj1[i], obj2[i])\n elif (ray.serialization.is_named_tuple(type(obj1))\n or ray.serialization.is_named_tuple(type(obj2))):\n assert len(obj1) == len(obj2), (\"Objects {} and {} are named tuples \"\n \"with different lengths.\".format(\n obj1, obj2))\n for i in range(len(obj1)):\n assert_equal(obj1[i], obj2[i])\n else:\n assert obj1 == obj2, \"Objects {} and {} are different.\".format(\n obj1, obj2)\n\n\nif sys.version_info >= (3, 0):\n long_extras = [0, np.array([[\"hi\", u\"hi\"], [1.3, 1]])]\nelse:\n\n long_extras = [\n long(0), # noqa: E501,F821\n np.array([\n [\"hi\", u\"hi\"],\n [1.3, long(1)] # noqa: E501,F821\n ])\n ]\n\nPRIMITIVE_OBJECTS = [\n 0, 0.0, 0.9, 1 << 62, 1 << 100, 1 << 999, [1 << 100, [1 << 100]], \"a\",\n string.printable, \"\\u262F\", u\"hello world\", u\"\\xff\\xfe\\x9c\\x001\\x000\\x00\",\n None, True, False, [], (), {},\n np.int8(3),\n np.int32(4),\n np.int64(5),\n np.uint8(3),\n np.uint32(4),\n np.uint64(5),\n np.float32(1.9),\n np.float64(1.9),\n np.zeros([100, 100]),\n np.random.normal(size=[100, 100]),\n np.array([\"hi\", 3]),\n np.array([\"hi\", 3], dtype=object)\n] + long_extras\n\nCOMPLEX_OBJECTS = [\n [[[[[[[[[[[[]]]]]]]]]]]],\n {\"obj{}\".format(i): np.random.normal(size=[100, 100])\n for i in range(10)},\n # {(): {(): {(): {(): {(): {(): {(): {(): {(): {(): {\n # (): {(): {}}}}}}}}}}}}},\n (\n (((((((((), ), ), ), ), ), ), ), ), ),\n {\n \"a\": {\n \"b\": {\n \"c\": {\n \"d\": {}\n }\n }\n }\n }\n]\n\n\nclass Foo(object):\n def __init__(self, value=0):\n self.value = value\n\n def __hash__(self):\n return hash(self.value)\n\n def __eq__(self, other):\n return other.value == self.value\n\n\nclass Bar(object):\n def __init__(self):\n for i, val in enumerate(PRIMITIVE_OBJECTS + COMPLEX_OBJECTS):\n setattr(self, \"field{}\".format(i), val)\n\n\nclass Baz(object):\n def __init__(self):\n self.foo = Foo()\n self.bar = Bar()\n\n def method(self, arg):\n pass\n\n\nclass Qux(object):\n def __init__(self):\n self.objs = [Foo(), Bar(), Baz()]\n\n\nclass SubQux(Qux):\n def __init__(self):\n Qux.__init__(self)\n\n\nclass CustomError(Exception):\n pass\n\n\nPoint = namedtuple(\"Point\", [\"x\", \"y\"])\nNamedTupleExample = namedtuple(\"Example\",\n \"field1, field2, field3, field4, field5\")\n\nCUSTOM_OBJECTS = [\n Exception(\"Test object.\"),\n CustomError(),\n Point(11, y=22),\n Foo(),\n Bar(),\n Baz(), # Qux(), SubQux(),\n NamedTupleExample(1, 1.0, \"hi\", np.zeros([3, 5]), [1, 2, 3])\n]\n\nBASE_OBJECTS = PRIMITIVE_OBJECTS + COMPLEX_OBJECTS + CUSTOM_OBJECTS\n\nLIST_OBJECTS = [[obj] for obj in BASE_OBJECTS]\nTUPLE_OBJECTS = [(obj, ) for obj in BASE_OBJECTS]\n# The check that type(obj).__module__ != \"numpy\" should be unnecessary, but\n# otherwise this seems to fail on Mac OS X on Travis.\nDICT_OBJECTS = (\n [{\n obj: obj\n } for obj in PRIMITIVE_OBJECTS\n if (obj.__hash__ is not None and type(obj).__module__ != \"numpy\")] + [{\n 0: obj\n } for obj in BASE_OBJECTS] + [{\n Foo(123): Foo(456)\n }])\n\nRAY_TEST_OBJECTS = BASE_OBJECTS + LIST_OBJECTS + TUPLE_OBJECTS + DICT_OBJECTS\n\n\[email protected]\ndef ray_start():\n # Start the Ray processes.\n ray.init(num_cpus=1)\n yield None\n # The code after the yield will run as teardown code.\n ray.shutdown()\n\n\[email protected]\ndef shutdown_only():\n yield None\n # The code after the yield will run as teardown code.\n ray.shutdown()\n\n\ndef test_passing_arguments_by_value(ray_start):\n @ray.remote\n def f(x):\n return x\n\n # Check that we can pass arguments by value to remote functions and\n # that they are uncorrupted.\n for obj in RAY_TEST_OBJECTS:\n assert_equal(obj, ray.get(f.remote(obj)))\n\n\ndef test_ray_recursive_objects(ray_start):\n class ClassA(object):\n pass\n\n # Make a list that contains itself.\n lst = []\n lst.append(lst)\n # Make an object that contains itself as a field.\n a1 = ClassA()\n a1.field = a1\n # Make two objects that contain each other as fields.\n a2 = ClassA()\n a3 = ClassA()\n a2.field = a3\n a3.field = a2\n # Make a dictionary that contains itself.\n d1 = {}\n d1[\"key\"] = d1\n # Create a list of recursive objects.\n recursive_objects = [lst, a1, a2, a3, d1]\n\n # Check that exceptions are thrown when we serialize the recursive\n # objects.\n for obj in recursive_objects:\n with pytest.raises(Exception):\n ray.put(obj)\n\n\ndef test_passing_arguments_by_value_out_of_the_box(ray_start):\n @ray.remote\n def f(x):\n return x\n\n # Test passing lambdas.\n\n def temp():\n return 1\n\n assert ray.get(f.remote(temp))() == 1\n assert ray.get(f.remote(lambda x: x + 1))(3) == 4\n\n # Test sets.\n assert ray.get(f.remote(set())) == set()\n s = {1, (1, 2, \"hi\")}\n assert ray.get(f.remote(s)) == s\n\n # Test types.\n assert ray.get(f.remote(int)) == int\n assert ray.get(f.remote(float)) == float\n assert ray.get(f.remote(str)) == str\n\n class Foo(object):\n def __init__(self):\n pass\n\n # Make sure that we can put and get a custom type. Note that the result\n # won't be \"equal\" to Foo.\n ray.get(ray.put(Foo))\n\n\ndef test_putting_object_that_closes_over_object_id(ray_start):\n # This test is here to prevent a regression of\n # https://github.com/ray-project/ray/issues/1317.\n\n class Foo(object):\n def __init__(self):\n self.val = ray.put(0)\n\n def method(self):\n f\n\n f = Foo()\n ray.put(f)\n\n\ndef test_put_get(shutdown_only):\n ray.init(num_cpus=0)\n\n for i in range(100):\n value_before = i * 10**6\n objectid = ray.put(value_before)\n value_after = ray.get(objectid)\n assert value_before == value_after\n\n for i in range(100):\n value_before = i * 10**6 * 1.0\n objectid = ray.put(value_before)\n value_after = ray.get(objectid)\n assert value_before == value_after\n\n for i in range(100):\n value_before = \"h\" * i\n objectid = ray.put(value_before)\n value_after = ray.get(objectid)\n assert value_before == value_after\n\n for i in range(100):\n value_before = [1] * i\n objectid = ray.put(value_before)\n value_after = ray.get(objectid)\n assert value_before == value_after\n\n\ndef test_custom_serializers(shutdown_only):\n ray.init(num_cpus=1)\n\n class Foo(object):\n def __init__(self):\n self.x = 3\n\n def custom_serializer(obj):\n return 3, \"string1\", type(obj).__name__\n\n def custom_deserializer(serialized_obj):\n return serialized_obj, \"string2\"\n\n ray.register_custom_serializer(\n Foo, serializer=custom_serializer, deserializer=custom_deserializer)\n\n assert ray.get(ray.put(Foo())) == ((3, \"string1\", Foo.__name__), \"string2\")\n\n class Bar(object):\n def __init__(self):\n self.x = 3\n\n ray.register_custom_serializer(\n Bar, serializer=custom_serializer, deserializer=custom_deserializer)\n\n @ray.remote\n def f():\n return Bar()\n\n assert ray.get(f.remote()) == ((3, \"string1\", Bar.__name__), \"string2\")\n\n\ndef test_serialization_final_fallback(ray_start):\n pytest.importorskip(\"catboost\")\n # This test will only run when \"catboost\" is installed.\n from catboost import CatBoostClassifier\n\n model = CatBoostClassifier(\n iterations=2,\n depth=2,\n learning_rate=1,\n loss_function=\"Logloss\",\n logging_level=\"Verbose\")\n\n reconstructed_model = ray.get(ray.put(model))\n assert set(model.get_params().items()) == set(\n reconstructed_model.get_params().items())\n\n\ndef test_register_class(shutdown_only):\n ray.init(num_cpus=2)\n\n # Check that putting an object of a class that has not been registered\n # throws an exception.\n class TempClass(object):\n pass\n\n ray.get(ray.put(TempClass()))\n\n # Test subtypes of dictionaries.\n value_before = OrderedDict([(\"hello\", 1), (\"world\", 2)])\n object_id = ray.put(value_before)\n assert value_before == ray.get(object_id)\n\n value_before = defaultdict(lambda: 0, [(\"hello\", 1), (\"world\", 2)])\n object_id = ray.put(value_before)\n assert value_before == ray.get(object_id)\n\n value_before = defaultdict(lambda: [], [(\"hello\", 1), (\"world\", 2)])\n object_id = ray.put(value_before)\n assert value_before == ray.get(object_id)\n\n # Test passing custom classes into remote functions from the driver.\n @ray.remote\n def f(x):\n return x\n\n foo = ray.get(f.remote(Foo(7)))\n assert foo == Foo(7)\n\n regex = re.compile(r\"\\d+\\.\\d*\")\n new_regex = ray.get(f.remote(regex))\n # This seems to fail on the system Python 3 that comes with\n # Ubuntu, so it is commented out for now:\n # assert regex == new_regex\n # Instead, we do this:\n assert regex.pattern == new_regex.pattern\n\n # Test returning custom classes created on workers.\n @ray.remote\n def g():\n return SubQux(), Qux()\n\n subqux, qux = ray.get(g.remote())\n assert subqux.objs[2].foo.value == 0\n\n # Test exporting custom class definitions from one worker to another\n # when the worker is blocked in a get.\n class NewTempClass(object):\n def __init__(self, value):\n self.value = value\n\n @ray.remote\n def h1(x):\n return NewTempClass(x)\n\n @ray.remote\n def h2(x):\n return ray.get(h1.remote(x))\n\n assert ray.get(h2.remote(10)).value == 10\n\n # Test registering multiple classes with the same name.\n @ray.remote(num_return_vals=3)\n def j():\n class Class0(object):\n def method0(self):\n pass\n\n c0 = Class0()\n\n class Class0(object):\n def method1(self):\n pass\n\n c1 = Class0()\n\n class Class0(object):\n def method2(self):\n pass\n\n c2 = Class0()\n\n return c0, c1, c2\n\n results = []\n for _ in range(5):\n results += j.remote()\n for i in range(len(results) // 3):\n c0, c1, c2 = ray.get(results[(3 * i):(3 * (i + 1))])\n\n c0.method0()\n c1.method1()\n c2.method2()\n\n assert not hasattr(c0, \"method1\")\n assert not hasattr(c0, \"method2\")\n assert not hasattr(c1, \"method0\")\n assert not hasattr(c1, \"method2\")\n assert not hasattr(c2, \"method0\")\n assert not hasattr(c2, \"method1\")\n\n @ray.remote\n def k():\n class Class0(object):\n def method0(self):\n pass\n\n c0 = Class0()\n\n class Class0(object):\n def method1(self):\n pass\n\n c1 = Class0()\n\n class Class0(object):\n def method2(self):\n pass\n\n c2 = Class0()\n\n return c0, c1, c2\n\n results = ray.get([k.remote() for _ in range(5)])\n for c0, c1, c2 in results:\n c0.method0()\n c1.method1()\n c2.method2()\n\n assert not hasattr(c0, \"method1\")\n assert not hasattr(c0, \"method2\")\n assert not hasattr(c1, \"method0\")\n assert not hasattr(c1, \"method2\")\n assert not hasattr(c2, \"method0\")\n assert not hasattr(c2, \"method1\")\n\n\ndef test_keyword_args(shutdown_only):\n @ray.remote\n def keyword_fct1(a, b=\"hello\"):\n return \"{} {}\".format(a, b)\n\n @ray.remote\n def keyword_fct2(a=\"hello\", b=\"world\"):\n return \"{} {}\".format(a, b)\n\n @ray.remote\n def keyword_fct3(a, b, c=\"hello\", d=\"world\"):\n return \"{} {} {} {}\".format(a, b, c, d)\n\n ray.init(num_cpus=1)\n\n x = keyword_fct1.remote(1)\n assert ray.get(x) == \"1 hello\"\n x = keyword_fct1.remote(1, \"hi\")\n assert ray.get(x) == \"1 hi\"\n x = keyword_fct1.remote(1, b=\"world\")\n assert ray.get(x) == \"1 world\"\n x = keyword_fct1.remote(a=1, b=\"world\")\n assert ray.get(x) == \"1 world\"\n\n x = keyword_fct2.remote(a=\"w\", b=\"hi\")\n assert ray.get(x) == \"w hi\"\n x = keyword_fct2.remote(b=\"hi\", a=\"w\")\n assert ray.get(x) == \"w hi\"\n x = keyword_fct2.remote(a=\"w\")\n assert ray.get(x) == \"w world\"\n x = keyword_fct2.remote(b=\"hi\")\n assert ray.get(x) == \"hello hi\"\n x = keyword_fct2.remote(\"w\")\n assert ray.get(x) == \"w world\"\n x = keyword_fct2.remote(\"w\", \"hi\")\n assert ray.get(x) == \"w hi\"\n\n x = keyword_fct3.remote(0, 1, c=\"w\", d=\"hi\")\n assert ray.get(x) == \"0 1 w hi\"\n x = keyword_fct3.remote(0, b=1, c=\"w\", d=\"hi\")\n assert ray.get(x) == \"0 1 w hi\"\n x = keyword_fct3.remote(a=0, b=1, c=\"w\", d=\"hi\")\n assert ray.get(x) == \"0 1 w hi\"\n x = keyword_fct3.remote(0, 1, d=\"hi\", c=\"w\")\n assert ray.get(x) == \"0 1 w hi\"\n x = keyword_fct3.remote(0, 1, c=\"w\")\n assert ray.get(x) == \"0 1 w world\"\n x = keyword_fct3.remote(0, 1, d=\"hi\")\n assert ray.get(x) == \"0 1 hello hi\"\n x = keyword_fct3.remote(0, 1)\n assert ray.get(x) == \"0 1 hello world\"\n x = keyword_fct3.remote(a=0, b=1)\n assert ray.get(x) == \"0 1 hello world\"\n\n # Check that we cannot pass invalid keyword arguments to functions.\n @ray.remote\n def f1():\n return\n\n @ray.remote\n def f2(x, y=0, z=0):\n return\n\n # Make sure we get an exception if too many arguments are passed in.\n with pytest.raises(Exception):\n f1.remote(3)\n\n with pytest.raises(Exception):\n f1.remote(x=3)\n\n with pytest.raises(Exception):\n f2.remote(0, w=0)\n\n with pytest.raises(Exception):\n f2.remote(3, x=3)\n\n # Make sure we get an exception if too many arguments are passed in.\n with pytest.raises(Exception):\n f2.remote(1, 2, 3, 4)\n\n @ray.remote\n def f3(x):\n return x\n\n assert ray.get(f3.remote(4)) == 4\n\n\ndef test_variable_number_of_args(shutdown_only):\n @ray.remote\n def varargs_fct1(*a):\n return \" \".join(map(str, a))\n\n @ray.remote\n def varargs_fct2(a, *b):\n return \" \".join(map(str, b))\n\n try:\n\n @ray.remote\n def kwargs_throw_exception(**c):\n return ()\n\n kwargs_exception_thrown = False\n except Exception:\n kwargs_exception_thrown = True\n\n ray.init(num_cpus=1)\n\n x = varargs_fct1.remote(0, 1, 2)\n assert ray.get(x) == \"0 1 2\"\n x = varargs_fct2.remote(0, 1, 2)\n assert ray.get(x) == \"1 2\"\n\n assert kwargs_exception_thrown\n\n @ray.remote\n def f1(*args):\n return args\n\n @ray.remote\n def f2(x, y, *args):\n return x, y, args\n\n assert ray.get(f1.remote()) == ()\n assert ray.get(f1.remote(1)) == (1, )\n assert ray.get(f1.remote(1, 2, 3)) == (1, 2, 3)\n with pytest.raises(Exception):\n f2.remote()\n with pytest.raises(Exception):\n f2.remote(1)\n assert ray.get(f2.remote(1, 2)) == (1, 2, ())\n assert ray.get(f2.remote(1, 2, 3)) == (1, 2, (3, ))\n assert ray.get(f2.remote(1, 2, 3, 4)) == (1, 2, (3, 4))\n\n def testNoArgs(self):\n @ray.remote\n def no_op():\n pass\n\n self.init_ray()\n\n ray.get(no_op.remote())\n\n\ndef test_defining_remote_functions(shutdown_only):\n ray.init(num_cpus=3)\n\n # Test that we can define a remote function in the shell.\n @ray.remote\n def f(x):\n return x + 1\n\n assert ray.get(f.remote(0)) == 1\n\n # Test that we can redefine the remote function.\n @ray.remote\n def f(x):\n return x + 10\n\n while True:\n val = ray.get(f.remote(0))\n assert val in [1, 10]\n if val == 10:\n break\n else:\n logger.info(\"Still using old definition of f, trying again.\")\n\n # Test that we can close over plain old data.\n data = [\n np.zeros([3, 5]), (1, 2, \"a\"), [0.0, 1.0, 1 << 62], 1 << 60, {\n \"a\": np.zeros(3)\n }\n ]\n\n @ray.remote\n def g():\n return data\n\n ray.get(g.remote())\n\n # Test that we can close over modules.\n @ray.remote\n def h():\n return np.zeros([3, 5])\n\n assert_equal(ray.get(h.remote()), np.zeros([3, 5]))\n\n @ray.remote\n def j():\n return time.time()\n\n ray.get(j.remote())\n\n # Test that we can define remote functions that call other remote\n # functions.\n @ray.remote\n def k(x):\n return x + 1\n\n @ray.remote\n def k2(x):\n return ray.get(k.remote(x))\n\n @ray.remote\n def m(x):\n return ray.get(k2.remote(x))\n\n assert ray.get(k.remote(1)) == 2\n assert ray.get(k2.remote(1)) == 2\n assert ray.get(m.remote(1)) == 2\n\n def test_submit_api(shutdown_only):\n ray.init(num_cpus=1, num_gpus=1, resources={\"Custom\": 1})\n\n @ray.remote\n def f(n):\n return list(range(n))\n\n @ray.remote\n def g():\n return ray.get_gpu_ids()\n\n assert f._remote([0], num_return_vals=0) is None\n id1 = f._remote(args=[1], num_return_vals=1)\n assert ray.get(id1) == [0]\n id1, id2 = f._remote(args=[2], num_return_vals=2)\n assert ray.get([id1, id2]) == [0, 1]\n id1, id2, id3 = f._remote(args=[3], num_return_vals=3)\n assert ray.get([id1, id2, id3]) == [0, 1, 2]\n assert ray.get(\n g._remote(\n args=[], num_cpus=1, num_gpus=1,\n resources={\"Custom\": 1})) == [0]\n infeasible_id = g._remote(args=[], resources={\"NonexistentCustom\": 1})\n ready_ids, remaining_ids = ray.wait([infeasible_id], timeout=0.05)\n assert len(ready_ids) == 0\n assert len(remaining_ids) == 1\n\n @ray.remote\n class Actor(object):\n def __init__(self, x, y=0):\n self.x = x\n self.y = y\n\n def method(self, a, b=0):\n return self.x, self.y, a, b\n\n def gpu_ids(self):\n return ray.get_gpu_ids()\n\n a = Actor._remote(\n args=[0], kwargs={\"y\": 1}, num_gpus=1, resources={\"Custom\": 1})\n\n id1, id2, id3, id4 = a.method._remote(\n args=[\"test\"], kwargs={\"b\": 2}, num_return_vals=4)\n assert ray.get([id1, id2, id3, id4]) == [0, 1, \"test\", 2]\n\n\ndef test_get_multiple(shutdown_only):\n ray.init(num_cpus=1)\n object_ids = [ray.put(i) for i in range(10)]\n assert ray.get(object_ids) == list(range(10))\n\n # Get a random choice of object IDs with duplicates.\n indices = list(np.random.choice(range(10), 5))\n indices += indices\n results = ray.get([object_ids[i] for i in indices])\n assert results == indices\n\n\ndef test_get_multiple_experimental(shutdown_only):\n ray.init(num_cpus=1)\n object_ids = [ray.put(i) for i in range(10)]\n\n object_ids_tuple = tuple(object_ids)\n assert ray.experimental.get(object_ids_tuple) == list(range(10))\n\n object_ids_nparray = np.array(object_ids)\n assert ray.experimental.get(object_ids_nparray) == list(range(10))\n\n\ndef test_get_dict(shutdown_only):\n ray.init(num_cpus=1)\n d = {str(i): ray.put(i) for i in range(5)}\n for i in range(5, 10):\n d[str(i)] = i\n result = ray.experimental.get(d)\n expected = {str(i): i for i in range(10)}\n assert result == expected\n\n\ndef test_wait(shutdown_only):\n ray.init(num_cpus=1)\n\n @ray.remote\n def f(delay):\n time.sleep(delay)\n return 1\n\n objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]\n ready_ids, remaining_ids = ray.wait(objectids)\n assert len(ready_ids) == 1\n assert len(remaining_ids) == 3\n ready_ids, remaining_ids = ray.wait(objectids, num_returns=4)\n assert set(ready_ids) == set(objectids)\n assert remaining_ids == []\n\n objectids = [f.remote(0.5), f.remote(0.5), f.remote(0.5), f.remote(0.5)]\n start_time = time.time()\n ready_ids, remaining_ids = ray.wait(objectids, timeout=1.75, num_returns=4)\n assert time.time() - start_time < 2\n assert len(ready_ids) == 3\n assert len(remaining_ids) == 1\n ray.wait(objectids)\n objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]\n start_time = time.time()\n ready_ids, remaining_ids = ray.wait(objectids, timeout=5.0)\n assert time.time() - start_time < 5\n assert len(ready_ids) == 1\n assert len(remaining_ids) == 3\n\n # Verify that calling wait with duplicate object IDs throws an\n # exception.\n x = ray.put(1)\n with pytest.raises(Exception):\n ray.wait([x, x])\n\n # Make sure it is possible to call wait with an empty list.\n ready_ids, remaining_ids = ray.wait([])\n assert ready_ids == []\n assert remaining_ids == []\n\n # Test semantics of num_returns with no timeout.\n oids = [ray.put(i) for i in range(10)]\n (found, rest) = ray.wait(oids, num_returns=2)\n assert len(found) == 2\n assert len(rest) == 8\n\n # Verify that incorrect usage raises a TypeError.\n x = ray.put(1)\n with pytest.raises(TypeError):\n ray.wait(x)\n with pytest.raises(TypeError):\n ray.wait(1)\n with pytest.raises(TypeError):\n ray.wait([1])\n\n\ndef test_wait_iterables(shutdown_only):\n ray.init(num_cpus=1)\n\n @ray.remote\n def f(delay):\n time.sleep(delay)\n return 1\n\n objectids = (f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5))\n ready_ids, remaining_ids = ray.experimental.wait(objectids)\n assert len(ready_ids) == 1\n assert len(remaining_ids) == 3\n\n objectids = np.array(\n [f.remote(1.0),\n f.remote(0.5),\n f.remote(0.5),\n f.remote(0.5)])\n ready_ids, remaining_ids = ray.experimental.wait(objectids)\n assert len(ready_ids) == 1\n assert len(remaining_ids) == 3\n\n\ndef test_multiple_waits_and_gets(shutdown_only):\n # It is important to use three workers here, so that the three tasks\n # launched in this experiment can run at the same time.\n ray.init(num_cpus=3)\n\n @ray.remote\n def f(delay):\n time.sleep(delay)\n return 1\n\n @ray.remote\n def g(l):\n # The argument l should be a list containing one object ID.\n ray.wait([l[0]])\n\n @ray.remote\n def h(l):\n # The argument l should be a list containing one object ID.\n ray.get(l[0])\n\n # Make sure that multiple wait requests involving the same object ID\n # all return.\n x = f.remote(1)\n ray.get([g.remote([x]), g.remote([x])])\n\n # Make sure that multiple get requests involving the same object ID all\n # return.\n x = f.remote(1)\n ray.get([h.remote([x]), h.remote([x])])\n\n\ndef test_caching_functions_to_run(shutdown_only):\n # Test that we export functions to run on all workers before the driver\n # is connected.\n def f(worker_info):\n sys.path.append(1)\n\n ray.worker.global_worker.run_function_on_all_workers(f)\n\n def f(worker_info):\n sys.path.append(2)\n\n ray.worker.global_worker.run_function_on_all_workers(f)\n\n def g(worker_info):\n sys.path.append(3)\n\n ray.worker.global_worker.run_function_on_all_workers(g)\n\n def f(worker_info):\n sys.path.append(4)\n\n ray.worker.global_worker.run_function_on_all_workers(f)\n\n ray.init(num_cpus=1)\n\n @ray.remote\n def get_state():\n time.sleep(1)\n return sys.path[-4], sys.path[-3], sys.path[-2], sys.path[-1]\n\n res1 = get_state.remote()\n res2 = get_state.remote()\n assert ray.get(res1) == (1, 2, 3, 4)\n assert ray.get(res2) == (1, 2, 3, 4)\n\n # Clean up the path on the workers.\n def f(worker_info):\n sys.path.pop()\n sys.path.pop()\n sys.path.pop()\n sys.path.pop()\n\n ray.worker.global_worker.run_function_on_all_workers(f)\n\n\ndef test_running_function_on_all_workers(shutdown_only):\n ray.init(num_cpus=1)\n\n def f(worker_info):\n sys.path.append(\"fake_directory\")\n\n ray.worker.global_worker.run_function_on_all_workers(f)\n\n @ray.remote\n def get_path1():\n return sys.path\n\n assert \"fake_directory\" == ray.get(get_path1.remote())[-1]\n\n def f(worker_info):\n sys.path.pop(-1)\n\n ray.worker.global_worker.run_function_on_all_workers(f)\n\n # Create a second remote function to guarantee that when we call\n # get_path2.remote(), the second function to run will have been run on\n # the worker.\n @ray.remote\n def get_path2():\n return sys.path\n\n assert \"fake_directory\" not in ray.get(get_path2.remote())\n\n\ndef test_profiling_api(shutdown_only):\n ray.init(num_cpus=2)\n\n @ray.remote\n def f():\n with ray.profile(\n \"custom_event\",\n extra_data={\"name\": \"custom name\"}) as ray_prof:\n ray_prof.set_attribute(\"key\", \"value\")\n\n ray.put(1)\n object_id = f.remote()\n ray.wait([object_id])\n ray.get(object_id)\n\n # Wait until all of the profiling information appears in the profile\n # table.\n timeout_seconds = 20\n start_time = time.time()\n while True:\n if time.time() - start_time > timeout_seconds:\n raise Exception(\"Timed out while waiting for information in \"\n \"profile table.\")\n profile_data = ray.global_state.chrome_tracing_dump()\n event_types = {event[\"cat\"] for event in profile_data}\n expected_types = [\n \"worker_idle\",\n \"task\",\n \"task:deserialize_arguments\",\n \"task:execute\",\n \"task:store_outputs\",\n \"wait_for_function\",\n \"ray.get\",\n \"ray.put\",\n \"ray.wait\",\n \"submit_task\",\n \"fetch_and_run_function\",\n \"register_remote_function\",\n \"custom_event\", # This is the custom one from ray.profile.\n ]\n\n if all(expected_type in event_types\n for expected_type in expected_types):\n break\n\n\[email protected]()\ndef ray_start_cluster():\n cluster = ray.test.cluster_utils.Cluster()\n yield cluster\n\n # The code after the yield will run as teardown code.\n ray.shutdown()\n cluster.shutdown()\n\n\ndef test_object_transfer_dump(ray_start_cluster):\n cluster = ray_start_cluster\n\n num_nodes = 3\n # Set the inline object size to 0 to force all objects to be written to\n # plasma.\n config = json.dumps({\"inline_object_max_size_bytes\": 0})\n for i in range(num_nodes):\n cluster.add_node(\n resources={str(i): 1},\n object_store_memory=10**9,\n _internal_config=config)\n ray.init(redis_address=cluster.redis_address)\n\n @ray.remote\n def f(x):\n return\n\n # These objects will live on different nodes.\n object_ids = [\n f._remote(args=[1], resources={str(i): 1}) for i in range(num_nodes)\n ]\n\n # Broadcast each object from each machine to each other machine.\n for object_id in object_ids:\n ray.get([\n f._remote(args=[object_id], resources={str(i): 1})\n for i in range(num_nodes)\n ])\n\n # The profiling information only flushes once every second.\n time.sleep(1.1)\n\n transfer_dump = ray.global_state.chrome_tracing_object_transfer_dump()\n # Make sure the transfer dump can be serialized with JSON.\n json.loads(json.dumps(transfer_dump))\n assert len(transfer_dump) >= num_nodes**2\n assert len({\n event[\"pid\"]\n for event in transfer_dump if event[\"name\"] == \"transfer_receive\"\n }) == num_nodes\n assert len({\n event[\"pid\"]\n for event in transfer_dump if event[\"name\"] == \"transfer_send\"\n }) == num_nodes\n\n\ndef test_identical_function_names(shutdown_only):\n # Define a bunch of remote functions and make sure that we don't\n # accidentally call an older version.\n ray.init(num_cpus=1)\n\n num_calls = 200\n\n @ray.remote\n def f():\n return 1\n\n results1 = [f.remote() for _ in range(num_calls)]\n\n @ray.remote\n def f():\n return 2\n\n results2 = [f.remote() for _ in range(num_calls)]\n\n @ray.remote\n def f():\n return 3\n\n results3 = [f.remote() for _ in range(num_calls)]\n\n @ray.remote\n def f():\n return 4\n\n results4 = [f.remote() for _ in range(num_calls)]\n\n @ray.remote\n def f():\n return 5\n\n results5 = [f.remote() for _ in range(num_calls)]\n\n assert ray.get(results1) == num_calls * [1]\n assert ray.get(results2) == num_calls * [2]\n assert ray.get(results3) == num_calls * [3]\n assert ray.get(results4) == num_calls * [4]\n assert ray.get(results5) == num_calls * [5]\n\n @ray.remote\n def g():\n return 1\n\n @ray.remote # noqa: F811\n def g():\n return 2\n\n @ray.remote # noqa: F811\n def g():\n return 3\n\n @ray.remote # noqa: F811\n def g():\n return 4\n\n @ray.remote # noqa: F811\n def g():\n return 5\n\n result_values = ray.get([g.remote() for _ in range(num_calls)])\n assert result_values == num_calls * [5]\n\n\ndef test_illegal_api_calls(shutdown_only):\n ray.init(num_cpus=1)\n\n # Verify that we cannot call put on an ObjectID.\n x = ray.put(1)\n with pytest.raises(Exception):\n ray.put(x)\n # Verify that we cannot call get on a regular value.\n with pytest.raises(Exception):\n ray.get(3)\n\n\ndef test_multithreading(shutdown_only):\n # This test requires at least 2 CPUs to finish since the worker does not\n # relase resources when joining the threads.\n ray.init(num_cpus=2)\n\n def run_test_in_multi_threads(test_case, num_threads=20, num_repeats=50):\n \"\"\"A helper function that runs test cases in multiple threads.\"\"\"\n\n def wrapper():\n for _ in range(num_repeats):\n test_case()\n time.sleep(random.randint(0, 10) / 1000.0)\n return \"ok\"\n\n executor = ThreadPoolExecutor(max_workers=num_threads)\n futures = [executor.submit(wrapper) for _ in range(num_threads)]\n for future in futures:\n assert future.result() == \"ok\"\n\n @ray.remote\n def echo(value, delay_ms=0):\n if delay_ms > 0:\n time.sleep(delay_ms / 1000.0)\n return value\n\n @ray.remote\n class Echo(object):\n def echo(self, value):\n return value\n\n def test_api_in_multi_threads():\n \"\"\"Test using Ray api in multiple threads.\"\"\"\n\n # Test calling remote functions in multiple threads.\n def test_remote_call():\n value = random.randint(0, 1000000)\n result = ray.get(echo.remote(value))\n assert value == result\n\n run_test_in_multi_threads(test_remote_call)\n\n # Test multiple threads calling one actor.\n actor = Echo.remote()\n\n def test_call_actor():\n value = random.randint(0, 1000000)\n result = ray.get(actor.echo.remote(value))\n assert value == result\n\n run_test_in_multi_threads(test_call_actor)\n\n # Test put and get.\n def test_put_and_get():\n value = random.randint(0, 1000000)\n result = ray.get(ray.put(value))\n assert value == result\n\n run_test_in_multi_threads(test_put_and_get)\n\n # Test multiple threads waiting for objects.\n num_wait_objects = 10\n objects = [\n echo.remote(i, delay_ms=10) for i in range(num_wait_objects)\n ]\n\n def test_wait():\n ready, _ = ray.wait(\n objects,\n num_returns=len(objects),\n timeout=1000.0,\n )\n assert len(ready) == num_wait_objects\n assert ray.get(ready) == list(range(num_wait_objects))\n\n run_test_in_multi_threads(test_wait, num_repeats=1)\n\n # Run tests in a driver.\n test_api_in_multi_threads()\n\n # Run tests in a worker.\n @ray.remote\n def run_tests_in_worker():\n test_api_in_multi_threads()\n return \"ok\"\n\n assert ray.get(run_tests_in_worker.remote()) == \"ok\"\n\n # Test actor that runs background threads.\n @ray.remote\n class MultithreadedActor(object):\n def __init__(self):\n self.lock = threading.Lock()\n self.thread_results = []\n\n def background_thread(self, wait_objects):\n try:\n # Test wait\n ready, _ = ray.wait(\n wait_objects,\n num_returns=len(wait_objects),\n timeout=1000.0,\n )\n assert len(ready) == len(wait_objects)\n for _ in range(50):\n num = 20\n # Test remote call\n results = [echo.remote(i) for i in range(num)]\n assert ray.get(results) == list(range(num))\n # Test put and get\n objects = [ray.put(i) for i in range(num)]\n assert ray.get(objects) == list(range(num))\n time.sleep(random.randint(0, 10) / 1000.0)\n except Exception as e:\n with self.lock:\n self.thread_results.append(e)\n else:\n with self.lock:\n self.thread_results.append(\"ok\")\n\n def spawn(self):\n wait_objects = [echo.remote(i, delay_ms=10) for i in range(20)]\n self.threads = [\n threading.Thread(\n target=self.background_thread, args=(wait_objects, ))\n for _ in range(20)\n ]\n [thread.start() for thread in self.threads]\n\n def join(self):\n [thread.join() for thread in self.threads]\n assert self.thread_results == [\"ok\"] * len(self.threads)\n return \"ok\"\n\n actor = MultithreadedActor.remote()\n actor.spawn.remote()\n ray.get(actor.join.remote()) == \"ok\"\n\n\ndef test_free_objects_multi_node(ray_start_cluster):\n # This test will do following:\n # 1. Create 3 raylets that each hold an actor.\n # 2. Each actor creates an object which is the deletion target.\n # 3. Invoke 64 methods on each actor to flush plasma client.\n # 4. After flushing, the plasma client releases the targets.\n # 5. Check that the deletion targets have been deleted.\n # Caution: if remote functions are used instead of actor methods,\n # one raylet may create more than one worker to execute the\n # tasks, so the flushing operations may be executed in different\n # workers and the plasma client holding the deletion target\n # may not be flushed.\n cluster = ray_start_cluster\n config = json.dumps({\"object_manager_repeated_push_delay_ms\": 1000})\n for i in range(3):\n cluster.add_node(\n num_cpus=1,\n resources={\"Custom{}\".format(i): 1},\n _internal_config=config)\n ray.init(redis_address=cluster.redis_address)\n\n @ray.remote(resources={\"Custom0\": 1})\n class ActorOnNode0(object):\n def get(self):\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n @ray.remote(resources={\"Custom1\": 1})\n class ActorOnNode1(object):\n def get(self):\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n @ray.remote(resources={\"Custom2\": 1})\n class ActorOnNode2(object):\n def get(self):\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n def create(actors):\n a = actors[0].get.remote()\n b = actors[1].get.remote()\n c = actors[2].get.remote()\n (l1, l2) = ray.wait([a, b, c], num_returns=3)\n assert len(l1) == 3\n assert len(l2) == 0\n return (a, b, c)\n\n def flush(actors):\n # Flush the Release History.\n # Current Plasma Client Cache will maintain 64-item list.\n # If the number changed, this will fail.\n logger.info(\"Start Flush!\")\n for i in range(64):\n ray.get([actor.get.remote() for actor in actors])\n logger.info(\"Flush finished!\")\n\n def run_one_test(actors, local_only):\n (a, b, c) = create(actors)\n # The three objects should be generated on different object stores.\n assert ray.get(a) != ray.get(b)\n assert ray.get(a) != ray.get(c)\n assert ray.get(c) != ray.get(b)\n ray.internal.free([a, b, c], local_only=local_only)\n flush(actors)\n return (a, b, c)\n\n actors = [\n ActorOnNode0.remote(),\n ActorOnNode1.remote(),\n ActorOnNode2.remote()\n ]\n # Case 1: run this local_only=False. All 3 objects will be deleted.\n (a, b, c) = run_one_test(actors, False)\n (l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=1)\n # All the objects are deleted.\n assert len(l1) == 0\n assert len(l2) == 3\n # Case 2: run this local_only=True. Only 1 object will be deleted.\n (a, b, c) = run_one_test(actors, True)\n (l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=3)\n # One object is deleted and 2 objects are not.\n assert len(l1) == 2\n assert len(l2) == 1\n # The deleted object will have the same store with the driver.\n local_return = ray.worker.global_worker.plasma_client.store_socket_name\n for object_id in l1:\n assert ray.get(object_id) != local_return\n\n\ndef test_local_mode(shutdown_only):\n @ray.remote\n def local_mode_f():\n return np.array([0, 0])\n\n @ray.remote\n def local_mode_g(x):\n x[0] = 1\n return x\n\n ray.init(local_mode=True)\n\n @ray.remote\n def f():\n return np.ones([3, 4, 5])\n\n xref = f.remote()\n # Remote functions should return by value.\n assert_equal(xref, np.ones([3, 4, 5]))\n # Check that ray.get is the identity.\n assert_equal(xref, ray.get(xref))\n y = np.random.normal(size=[11, 12])\n # Check that ray.put is the identity.\n assert_equal(y, ray.put(y))\n\n # Make sure objects are immutable, this example is why we need to copy\n # arguments before passing them into remote functions in python mode\n aref = local_mode_f.remote()\n assert_equal(aref, np.array([0, 0]))\n bref = local_mode_g.remote(aref)\n # Make sure local_mode_g does not mutate aref.\n assert_equal(aref, np.array([0, 0]))\n assert_equal(bref, np.array([1, 0]))\n\n # wait should return the first num_returns values passed in as the\n # first list and the remaining values as the second list\n num_returns = 5\n object_ids = [ray.put(i) for i in range(20)]\n ready, remaining = ray.wait(\n object_ids, num_returns=num_returns, timeout=None)\n assert_equal(ready, object_ids[:num_returns])\n assert_equal(remaining, object_ids[num_returns:])\n\n # Test actors in LOCAL_MODE.\n\n @ray.remote\n class LocalModeTestClass(object):\n def __init__(self, array):\n self.array = array\n\n def set_array(self, array):\n self.array = array\n\n def get_array(self):\n return self.array\n\n def modify_and_set_array(self, array):\n array[0] = -1\n self.array = array\n\n test_actor = LocalModeTestClass.remote(np.arange(10))\n # Remote actor functions should return by value\n assert_equal(test_actor.get_array.remote(), np.arange(10))\n\n test_array = np.arange(10)\n # Remote actor functions should not mutate arguments\n test_actor.modify_and_set_array.remote(test_array)\n assert_equal(test_array, np.arange(10))\n # Remote actor functions should keep state\n test_array[0] = -1\n assert_equal(test_array, test_actor.get_array.remote())\n\n # Check that actor handles work in Python mode.\n\n @ray.remote\n def use_actor_handle(handle):\n array = np.ones(10)\n handle.set_array.remote(array)\n assert np.alltrue(array == ray.get(handle.get_array.remote()))\n\n ray.get(use_actor_handle.remote(test_actor))\n\n\ndef test_resource_constraints(shutdown_only):\n num_workers = 20\n ray.init(num_cpus=10, num_gpus=2)\n\n @ray.remote(num_cpus=0)\n def get_worker_id():\n time.sleep(0.1)\n return os.getpid()\n\n # Attempt to wait for all of the workers to start up.\n while True:\n if len(\n set(\n ray.get([\n get_worker_id.remote() for _ in range(num_workers)\n ]))) == num_workers:\n break\n\n time_buffer = 0.3\n\n # At most 10 copies of this can run at once.\n @ray.remote(num_cpus=1)\n def f(n):\n time.sleep(n)\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(10)])\n duration = time.time() - start_time\n assert duration < 0.5 + time_buffer\n assert duration > 0.5\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(11)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n @ray.remote(num_cpus=3)\n def f(n):\n time.sleep(n)\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(3)])\n duration = time.time() - start_time\n assert duration < 0.5 + time_buffer\n assert duration > 0.5\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(4)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n @ray.remote(num_gpus=1)\n def f(n):\n time.sleep(n)\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(2)])\n duration = time.time() - start_time\n assert duration < 0.5 + time_buffer\n assert duration > 0.5\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(3)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(4)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n\ndef test_multi_resource_constraints(shutdown_only):\n num_workers = 20\n ray.init(num_cpus=10, num_gpus=10)\n\n @ray.remote(num_cpus=0)\n def get_worker_id():\n time.sleep(0.1)\n return os.getpid()\n\n # Attempt to wait for all of the workers to start up.\n while True:\n if len(\n set(\n ray.get([\n get_worker_id.remote() for _ in range(num_workers)\n ]))) == num_workers:\n break\n\n @ray.remote(num_cpus=1, num_gpus=9)\n def f(n):\n time.sleep(n)\n\n @ray.remote(num_cpus=9, num_gpus=1)\n def g(n):\n time.sleep(n)\n\n time_buffer = 0.3\n\n start_time = time.time()\n ray.get([f.remote(0.5), g.remote(0.5)])\n duration = time.time() - start_time\n assert duration < 0.5 + time_buffer\n assert duration > 0.5\n\n start_time = time.time()\n ray.get([f.remote(0.5), f.remote(0.5)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n start_time = time.time()\n ray.get([g.remote(0.5), g.remote(0.5)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n start_time = time.time()\n ray.get([f.remote(0.5), f.remote(0.5), g.remote(0.5), g.remote(0.5)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n\ndef test_gpu_ids(shutdown_only):\n num_gpus = 10\n ray.init(num_cpus=10, num_gpus=num_gpus)\n\n @ray.remote(num_gpus=0)\n def f0():\n time.sleep(0.1)\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 0\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n for gpu_id in gpu_ids:\n assert gpu_id in range(num_gpus)\n return gpu_ids\n\n @ray.remote(num_gpus=1)\n def f1():\n time.sleep(0.1)\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 1\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n for gpu_id in gpu_ids:\n assert gpu_id in range(num_gpus)\n return gpu_ids\n\n @ray.remote(num_gpus=2)\n def f2():\n time.sleep(0.1)\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 2\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n for gpu_id in gpu_ids:\n assert gpu_id in range(num_gpus)\n return gpu_ids\n\n @ray.remote(num_gpus=3)\n def f3():\n time.sleep(0.1)\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 3\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n for gpu_id in gpu_ids:\n assert gpu_id in range(num_gpus)\n return gpu_ids\n\n @ray.remote(num_gpus=4)\n def f4():\n time.sleep(0.1)\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 4\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n for gpu_id in gpu_ids:\n assert gpu_id in range(num_gpus)\n return gpu_ids\n\n @ray.remote(num_gpus=5)\n def f5():\n time.sleep(0.1)\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 5\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n for gpu_id in gpu_ids:\n assert gpu_id in range(num_gpus)\n return gpu_ids\n\n # Wait for all workers to start up.\n @ray.remote\n def f():\n time.sleep(0.1)\n return os.getpid()\n\n start_time = time.time()\n while True:\n if len(set(ray.get([f.remote() for _ in range(10)]))) == 10:\n break\n if time.time() > start_time + 10:\n raise Exception(\"Timed out while waiting for workers to start \"\n \"up.\")\n\n list_of_ids = ray.get([f0.remote() for _ in range(10)])\n assert list_of_ids == 10 * [[]]\n\n list_of_ids = ray.get([f1.remote() for _ in range(10)])\n set_of_ids = {tuple(gpu_ids) for gpu_ids in list_of_ids}\n assert set_of_ids == {(i, ) for i in range(10)}\n\n list_of_ids = ray.get([f2.remote(), f4.remote(), f4.remote()])\n all_ids = [gpu_id for gpu_ids in list_of_ids for gpu_id in gpu_ids]\n assert set(all_ids) == set(range(10))\n\n remaining = [f5.remote() for _ in range(20)]\n for _ in range(10):\n t1 = time.time()\n ready, remaining = ray.wait(remaining, num_returns=2)\n t2 = time.time()\n # There are only 10 GPUs, and each task uses 2 GPUs, so there\n # should only be 2 tasks scheduled at a given time, so if we wait\n # for 2 tasks to finish, then it should take at least 0.1 seconds\n # for each pair of tasks to finish.\n assert t2 - t1 > 0.09\n list_of_ids = ray.get(ready)\n all_ids = [gpu_id for gpu_ids in list_of_ids for gpu_id in gpu_ids]\n # Commenting out the below assert because it seems to fail a lot.\n # assert set(all_ids) == set(range(10))\n\n # Test that actors have CUDA_VISIBLE_DEVICES set properly.\n\n @ray.remote\n class Actor0(object):\n def __init__(self):\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 0\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n # Set self.x to make sure that we got here.\n self.x = 1\n\n def test(self):\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 0\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n return self.x\n\n @ray.remote(num_gpus=1)\n class Actor1(object):\n def __init__(self):\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 1\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n # Set self.x to make sure that we got here.\n self.x = 1\n\n def test(self):\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 1\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n return self.x\n\n a0 = Actor0.remote()\n ray.get(a0.test.remote())\n\n a1 = Actor1.remote()\n ray.get(a1.test.remote())\n\n\ndef test_zero_cpus(shutdown_only):\n ray.init(num_cpus=0)\n\n @ray.remote(num_cpus=0)\n def f():\n return 1\n\n # The task should be able to execute.\n ray.get(f.remote())\n\n\ndef test_zero_cpus_actor(ray_start_cluster):\n cluster = ray_start_cluster\n cluster.add_node(num_cpus=0)\n cluster.add_node(num_cpus=2)\n ray.init(redis_address=cluster.redis_address)\n\n local_plasma = ray.worker.global_worker.plasma_client.store_socket_name\n\n @ray.remote\n class Foo(object):\n def method(self):\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # Make sure tasks and actors run on the remote local scheduler.\n a = Foo.remote()\n assert ray.get(a.method.remote()) != local_plasma\n\n\ndef test_fractional_resources(shutdown_only):\n ray.init(num_cpus=6, num_gpus=3, resources={\"Custom\": 1})\n\n @ray.remote(num_gpus=0.5)\n class Foo1(object):\n def method(self):\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 1\n return gpu_ids[0]\n\n foos = [Foo1.remote() for _ in range(6)]\n gpu_ids = ray.get([f.method.remote() for f in foos])\n for i in range(3):\n assert gpu_ids.count(i) == 2\n del foos\n\n @ray.remote\n class Foo2(object):\n def method(self):\n pass\n\n # Create an actor that requires 0.7 of the custom resource.\n f1 = Foo2._remote([], {}, resources={\"Custom\": 0.7})\n ray.get(f1.method.remote())\n # Make sure that we cannot create an actor that requires 0.7 of the\n # custom resource. TODO(rkn): Re-enable this once ray.wait is\n # implemented.\n f2 = Foo2._remote([], {}, resources={\"Custom\": 0.7})\n ready, _ = ray.wait([f2.method.remote()], timeout=0.5)\n assert len(ready) == 0\n # Make sure we can start an actor that requries only 0.3 of the custom\n # resource.\n f3 = Foo2._remote([], {}, resources={\"Custom\": 0.3})\n ray.get(f3.method.remote())\n\n del f1, f3\n\n # Make sure that we get exceptions if we submit tasks that require a\n # fractional number of resources greater than 1.\n\n @ray.remote(num_cpus=1.5)\n def test():\n pass\n\n with pytest.raises(ValueError):\n test.remote()\n\n with pytest.raises(ValueError):\n Foo2._remote([], {}, resources={\"Custom\": 1.5})\n\n\ndef test_multiple_local_schedulers(ray_start_cluster):\n # This test will define a bunch of tasks that can only be assigned to\n # specific local schedulers, and we will check that they are assigned\n # to the correct local schedulers.\n cluster = ray_start_cluster\n cluster.add_node(num_cpus=11, num_gpus=0)\n cluster.add_node(num_cpus=5, num_gpus=5)\n cluster.add_node(num_cpus=10, num_gpus=1)\n ray.init(redis_address=cluster.redis_address)\n cluster.wait_for_nodes()\n\n # Define a bunch of remote functions that all return the socket name of\n # the plasma store. Since there is a one-to-one correspondence between\n # plasma stores and local schedulers (at least right now), this can be\n # used to identify which local scheduler the task was assigned to.\n\n # This must be run on the zeroth local scheduler.\n @ray.remote(num_cpus=11)\n def run_on_0():\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # This must be run on the first local scheduler.\n @ray.remote(num_gpus=2)\n def run_on_1():\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # This must be run on the second local scheduler.\n @ray.remote(num_cpus=6, num_gpus=1)\n def run_on_2():\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # This can be run anywhere.\n @ray.remote(num_cpus=0, num_gpus=0)\n def run_on_0_1_2():\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # This must be run on the first or second local scheduler.\n @ray.remote(num_gpus=1)\n def run_on_1_2():\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # This must be run on the zeroth or second local scheduler.\n @ray.remote(num_cpus=8)\n def run_on_0_2():\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n def run_lots_of_tasks():\n names = []\n results = []\n for i in range(100):\n index = np.random.randint(6)\n if index == 0:\n names.append(\"run_on_0\")\n results.append(run_on_0.remote())\n elif index == 1:\n names.append(\"run_on_1\")\n results.append(run_on_1.remote())\n elif index == 2:\n names.append(\"run_on_2\")\n results.append(run_on_2.remote())\n elif index == 3:\n names.append(\"run_on_0_1_2\")\n results.append(run_on_0_1_2.remote())\n elif index == 4:\n names.append(\"run_on_1_2\")\n results.append(run_on_1_2.remote())\n elif index == 5:\n names.append(\"run_on_0_2\")\n results.append(run_on_0_2.remote())\n return names, results\n\n client_table = ray.global_state.client_table()\n store_names = []\n store_names += [\n client[\"ObjectStoreSocketName\"] for client in client_table\n if client[\"Resources\"][\"GPU\"] == 0\n ]\n store_names += [\n client[\"ObjectStoreSocketName\"] for client in client_table\n if client[\"Resources\"][\"GPU\"] == 5\n ]\n store_names += [\n client[\"ObjectStoreSocketName\"] for client in client_table\n if client[\"Resources\"][\"GPU\"] == 1\n ]\n assert len(store_names) == 3\n\n def validate_names_and_results(names, results):\n for name, result in zip(names, ray.get(results)):\n if name == \"run_on_0\":\n assert result in [store_names[0]]\n elif name == \"run_on_1\":\n assert result in [store_names[1]]\n elif name == \"run_on_2\":\n assert result in [store_names[2]]\n elif name == \"run_on_0_1_2\":\n assert (result in [\n store_names[0], store_names[1], store_names[2]\n ])\n elif name == \"run_on_1_2\":\n assert result in [store_names[1], store_names[2]]\n elif name == \"run_on_0_2\":\n assert result in [store_names[0], store_names[2]]\n else:\n raise Exception(\"This should be unreachable.\")\n assert set(ray.get(results)) == set(store_names)\n\n names, results = run_lots_of_tasks()\n validate_names_and_results(names, results)\n\n # Make sure the same thing works when this is nested inside of a task.\n\n @ray.remote\n def run_nested1():\n names, results = run_lots_of_tasks()\n return names, results\n\n @ray.remote\n def run_nested2():\n names, results = ray.get(run_nested1.remote())\n return names, results\n\n names, results = ray.get(run_nested2.remote())\n validate_names_and_results(names, results)\n\n\ndef test_custom_resources(ray_start_cluster):\n cluster = ray_start_cluster\n cluster.add_node(num_cpus=3, resources={\"CustomResource\": 0})\n cluster.add_node(num_cpus=3, resources={\"CustomResource\": 1})\n ray.init(redis_address=cluster.redis_address)\n\n @ray.remote\n def f():\n time.sleep(0.001)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n @ray.remote(resources={\"CustomResource\": 1})\n def g():\n time.sleep(0.001)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n @ray.remote(resources={\"CustomResource\": 1})\n def h():\n ray.get([f.remote() for _ in range(5)])\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # The f tasks should be scheduled on both local schedulers.\n assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2\n\n local_plasma = ray.worker.global_worker.plasma_client.store_socket_name\n\n # The g tasks should be scheduled only on the second local scheduler.\n local_scheduler_ids = set(ray.get([g.remote() for _ in range(50)]))\n assert len(local_scheduler_ids) == 1\n assert list(local_scheduler_ids)[0] != local_plasma\n\n # Make sure that resource bookkeeping works when a task that uses a\n # custom resources gets blocked.\n ray.get([h.remote() for _ in range(5)])\n\n\ndef test_two_custom_resources(ray_start_cluster):\n cluster = ray_start_cluster\n cluster.add_node(\n num_cpus=3, resources={\n \"CustomResource1\": 1,\n \"CustomResource2\": 2\n })\n cluster.add_node(\n num_cpus=3, resources={\n \"CustomResource1\": 3,\n \"CustomResource2\": 4\n })\n ray.init(redis_address=cluster.redis_address)\n\n @ray.remote(resources={\"CustomResource1\": 1})\n def f():\n time.sleep(0.001)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n @ray.remote(resources={\"CustomResource2\": 1})\n def g():\n time.sleep(0.001)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n @ray.remote(resources={\"CustomResource1\": 1, \"CustomResource2\": 3})\n def h():\n time.sleep(0.001)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n @ray.remote(resources={\"CustomResource1\": 4})\n def j():\n time.sleep(0.001)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n @ray.remote(resources={\"CustomResource3\": 1})\n def k():\n time.sleep(0.001)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # The f and g tasks should be scheduled on both local schedulers.\n assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2\n assert len(set(ray.get([g.remote() for _ in range(50)]))) == 2\n\n local_plasma = ray.worker.global_worker.plasma_client.store_socket_name\n\n # The h tasks should be scheduled only on the second local scheduler.\n local_scheduler_ids = set(ray.get([h.remote() for _ in range(50)]))\n assert len(local_scheduler_ids) == 1\n assert list(local_scheduler_ids)[0] != local_plasma\n\n # Make sure that tasks with unsatisfied custom resource requirements do\n # not get scheduled.\n ready_ids, remaining_ids = ray.wait([j.remote(), k.remote()], timeout=0.5)\n assert ready_ids == []\n\n\ndef test_many_custom_resources(shutdown_only):\n num_custom_resources = 10000\n total_resources = {\n str(i): np.random.randint(1, 7)\n for i in range(num_custom_resources)\n }\n ray.init(num_cpus=5, resources=total_resources)\n\n def f():\n return 1\n\n remote_functions = []\n for _ in range(20):\n num_resources = np.random.randint(0, num_custom_resources + 1)\n permuted_resources = np.random.permutation(\n num_custom_resources)[:num_resources]\n random_resources = {\n str(i): total_resources[str(i)]\n for i in permuted_resources\n }\n remote_function = ray.remote(resources=random_resources)(f)\n remote_functions.append(remote_function)\n\n remote_functions.append(ray.remote(f))\n remote_functions.append(ray.remote(resources=total_resources)(f))\n\n results = []\n for remote_function in remote_functions:\n results.append(remote_function.remote())\n results.append(remote_function.remote())\n results.append(remote_function.remote())\n\n ray.get(results)\n\n\[email protected]\ndef save_gpu_ids_shutdown_only():\n # Record the curent value of this environment variable so that we can\n # reset it after the test.\n original_gpu_ids = os.environ.get(\"CUDA_VISIBLE_DEVICES\", None)\n\n yield None\n\n # The code after the yield will run as teardown code.\n ray.shutdown()\n # Reset the environment variable.\n if original_gpu_ids is not None:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = original_gpu_ids\n else:\n del os.environ[\"CUDA_VISIBLE_DEVICES\"]\n\n\ndef test_specific_gpus(save_gpu_ids_shutdown_only):\n allowed_gpu_ids = [4, 5, 6]\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \",\".join(\n [str(i) for i in allowed_gpu_ids])\n ray.init(num_gpus=3)\n\n @ray.remote(num_gpus=1)\n def f():\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 1\n assert gpu_ids[0] in allowed_gpu_ids\n\n @ray.remote(num_gpus=2)\n def g():\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 2\n assert gpu_ids[0] in allowed_gpu_ids\n assert gpu_ids[1] in allowed_gpu_ids\n\n ray.get([f.remote() for _ in range(100)])\n ray.get([g.remote() for _ in range(100)])\n\n\ndef test_blocking_tasks(shutdown_only):\n ray.init(num_cpus=1)\n\n @ray.remote\n def f(i, j):\n return (i, j)\n\n @ray.remote\n def g(i):\n # Each instance of g submits and blocks on the result of another\n # remote task.\n object_ids = [f.remote(i, j) for j in range(2)]\n return ray.get(object_ids)\n\n @ray.remote\n def h(i):\n # Each instance of g submits and blocks on the result of another\n # remote task using ray.wait.\n object_ids = [f.remote(i, j) for j in range(2)]\n return ray.wait(object_ids, num_returns=len(object_ids))\n\n ray.get([h.remote(i) for i in range(4)])\n\n @ray.remote\n def _sleep(i):\n time.sleep(0.01)\n return (i)\n\n @ray.remote\n def sleep():\n # Each instance of sleep submits and blocks on the result of\n # another remote task, which takes some time to execute.\n ray.get([_sleep.remote(i) for i in range(10)])\n\n ray.get(sleep.remote())\n\n\ndef test_max_call_tasks(shutdown_only):\n ray.init(num_cpus=1)\n\n @ray.remote(max_calls=1)\n def f():\n return os.getpid()\n\n pid = ray.get(f.remote())\n ray.test.test_utils.wait_for_pid_to_exit(pid)\n\n @ray.remote(max_calls=2)\n def f():\n return os.getpid()\n\n pid1 = ray.get(f.remote())\n pid2 = ray.get(f.remote())\n assert pid1 == pid2\n ray.test.test_utils.wait_for_pid_to_exit(pid1)\n\n\ndef attempt_to_load_balance(remote_function,\n args,\n total_tasks,\n num_nodes,\n minimum_count,\n num_attempts=100):\n attempts = 0\n while attempts < num_attempts:\n locations = ray.get(\n [remote_function.remote(*args) for _ in range(total_tasks)])\n names = set(locations)\n counts = [locations.count(name) for name in names]\n logger.info(\"Counts are {}.\".format(counts))\n if (len(names) == num_nodes\n and all(count >= minimum_count for count in counts)):\n break\n attempts += 1\n assert attempts < num_attempts\n\n\ndef test_load_balancing(ray_start_cluster):\n # This test ensures that tasks are being assigned to all local\n # schedulers in a roughly equal manner.\n cluster = ray_start_cluster\n num_nodes = 3\n num_cpus = 7\n for _ in range(num_nodes):\n cluster.add_node(num_cpus=num_cpus)\n ray.init(redis_address=cluster.redis_address)\n\n @ray.remote\n def f():\n time.sleep(0.01)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n attempt_to_load_balance(f, [], 100, num_nodes, 10)\n attempt_to_load_balance(f, [], 1000, num_nodes, 100)\n\n\ndef test_load_balancing_with_dependencies(ray_start_cluster):\n # This test ensures that tasks are being assigned to all local\n # schedulers in a roughly equal manner even when the tasks have\n # dependencies.\n cluster = ray_start_cluster\n num_nodes = 3\n for _ in range(num_nodes):\n cluster.add_node(num_cpus=1)\n ray.init(redis_address=cluster.redis_address)\n\n @ray.remote\n def f(x):\n time.sleep(0.010)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # This object will be local to one of the local schedulers. Make sure\n # this doesn't prevent tasks from being scheduled on other local\n # schedulers.\n x = ray.put(np.zeros(1000000))\n\n attempt_to_load_balance(f, [x], 100, num_nodes, 25)\n\n\ndef wait_for_num_tasks(num_tasks, timeout=10):\n start_time = time.time()\n while time.time() - start_time < timeout:\n if len(ray.global_state.task_table()) >= num_tasks:\n return\n time.sleep(0.1)\n raise Exception(\"Timed out while waiting for global state.\")\n\n\ndef wait_for_num_objects(num_objects, timeout=10):\n start_time = time.time()\n while time.time() - start_time < timeout:\n if len(ray.global_state.object_table()) >= num_objects:\n return\n time.sleep(0.1)\n raise Exception(\"Timed out while waiting for global state.\")\n\n\[email protected](\n os.environ.get(\"RAY_USE_NEW_GCS\") == \"on\",\n reason=\"New GCS API doesn't have a Python API yet.\")\ndef test_global_state_api(shutdown_only):\n with pytest.raises(Exception):\n ray.global_state.object_table()\n\n with pytest.raises(Exception):\n ray.global_state.task_table()\n\n with pytest.raises(Exception):\n ray.global_state.client_table()\n\n with pytest.raises(Exception):\n ray.global_state.function_table()\n\n ray.init(num_cpus=5, num_gpus=3, resources={\"CustomResource\": 1})\n\n resources = {\"CPU\": 5, \"GPU\": 3, \"CustomResource\": 1}\n assert ray.global_state.cluster_resources() == resources\n\n assert ray.global_state.object_table() == {}\n\n driver_id = ray.experimental.state.binary_to_hex(\n ray.worker.global_worker.worker_id)\n driver_task_id = ray.worker.global_worker.current_task_id.hex()\n\n # One task is put in the task table which corresponds to this driver.\n wait_for_num_tasks(1)\n task_table = ray.global_state.task_table()\n assert len(task_table) == 1\n assert driver_task_id == list(task_table.keys())[0]\n task_spec = task_table[driver_task_id][\"TaskSpec\"]\n nil_id_hex = ray.ObjectID.nil().hex()\n\n assert task_spec[\"TaskID\"] == driver_task_id\n assert task_spec[\"ActorID\"] == nil_id_hex\n assert task_spec[\"Args\"] == []\n assert task_spec[\"DriverID\"] == driver_id\n assert task_spec[\"FunctionID\"] == nil_id_hex\n assert task_spec[\"ReturnObjectIDs\"] == []\n\n client_table = ray.global_state.client_table()\n node_ip_address = ray.worker.global_worker.node_ip_address\n\n assert len(client_table) == 1\n assert client_table[0][\"NodeManagerAddress\"] == node_ip_address\n\n @ray.remote\n def f(*xs):\n return 1\n\n x_id = ray.put(1)\n result_id = f.remote(1, \"hi\", x_id)\n\n # Wait for one additional task to complete.\n wait_for_num_tasks(1 + 1)\n task_table = ray.global_state.task_table()\n assert len(task_table) == 1 + 1\n task_id_set = set(task_table.keys())\n task_id_set.remove(driver_task_id)\n task_id = list(task_id_set)[0]\n\n function_table = ray.global_state.function_table()\n task_spec = task_table[task_id][\"TaskSpec\"]\n assert task_spec[\"ActorID\"] == nil_id_hex\n assert task_spec[\"Args\"] == [1, \"hi\", x_id]\n assert task_spec[\"DriverID\"] == driver_id\n assert task_spec[\"ReturnObjectIDs\"] == [result_id]\n function_table_entry = function_table[task_spec[\"FunctionID\"]]\n assert function_table_entry[\"Name\"] == \"runtest.f\"\n assert function_table_entry[\"DriverID\"] == driver_id\n assert function_table_entry[\"Module\"] == \"runtest\"\n\n assert task_table[task_id] == ray.global_state.task_table(task_id)\n\n # Wait for two objects, one for the x_id and one for result_id.\n wait_for_num_objects(2)\n\n def wait_for_object_table():\n timeout = 10\n start_time = time.time()\n while time.time() - start_time < timeout:\n object_table = ray.global_state.object_table()\n tables_ready = (object_table[x_id][\"ManagerIDs\"] is not None and\n object_table[result_id][\"ManagerIDs\"] is not None)\n if tables_ready:\n return\n time.sleep(0.1)\n raise Exception(\"Timed out while waiting for object table to \"\n \"update.\")\n\n object_table = ray.global_state.object_table()\n assert len(object_table) == 2\n\n assert object_table[x_id][\"IsEviction\"][0] is False\n\n assert object_table[result_id][\"IsEviction\"][0] is False\n\n assert object_table[x_id] == ray.global_state.object_table(x_id)\n object_table_entry = ray.global_state.object_table(result_id)\n assert object_table[result_id] == object_table_entry\n\n\n# TODO(rkn): Pytest actually has tools for capturing stdout and stderr, so we\n# should use those, but they seem to conflict with Ray's use of faulthandler.\nclass CaptureOutputAndError(object):\n \"\"\"Capture stdout and stderr of some span.\n\n This can be used as follows.\n\n captured = {}\n with CaptureOutputAndError(captured):\n # Do stuff.\n # Access captured[\"out\"] and captured[\"err\"].\n \"\"\"\n\n def __init__(self, captured_output_and_error):\n if sys.version_info >= (3, 0):\n import io\n self.output_buffer = io.StringIO()\n self.error_buffer = io.StringIO()\n else:\n import cStringIO\n self.output_buffer = cStringIO.StringIO()\n self.error_buffer = cStringIO.StringIO()\n self.captured_output_and_error = captured_output_and_error\n\n def __enter__(self):\n sys.stdout.flush()\n sys.stderr.flush()\n self.old_stdout = sys.stdout\n self.old_stderr = sys.stderr\n sys.stdout = self.output_buffer\n sys.stderr = self.error_buffer\n\n def __exit__(self, exc_type, exc_value, traceback):\n sys.stdout.flush()\n sys.stderr.flush()\n sys.stdout = self.old_stdout\n sys.stderr = self.old_stderr\n self.captured_output_and_error[\"out\"] = self.output_buffer.getvalue()\n self.captured_output_and_error[\"err\"] = self.error_buffer.getvalue()\n\n\ndef test_logging_to_driver(shutdown_only):\n ray.init(num_cpus=1, log_to_driver=True)\n\n @ray.remote\n def f():\n for i in range(100):\n print(i)\n print(100 + i, file=sys.stderr)\n sys.stdout.flush()\n sys.stderr.flush()\n\n captured = {}\n with CaptureOutputAndError(captured):\n ray.get(f.remote())\n time.sleep(1)\n\n output_lines = captured[\"out\"]\n assert len(output_lines) == 0\n error_lines = captured[\"err\"]\n for i in range(200):\n assert str(i) in error_lines\n\n\ndef test_not_logging_to_driver(shutdown_only):\n ray.init(num_cpus=1, log_to_driver=False)\n\n @ray.remote\n def f():\n for i in range(100):\n print(i)\n print(100 + i, file=sys.stderr)\n sys.stdout.flush()\n sys.stderr.flush()\n\n captured = {}\n with CaptureOutputAndError(captured):\n ray.get(f.remote())\n time.sleep(1)\n\n output_lines = captured[\"out\"]\n assert len(output_lines) == 0\n error_lines = captured[\"err\"]\n assert len(error_lines) == 0\n\n\[email protected](\n os.environ.get(\"RAY_USE_NEW_GCS\") == \"on\",\n reason=\"New GCS API doesn't have a Python API yet.\")\ndef test_workers(shutdown_only):\n num_workers = 3\n ray.init(redirect_worker_output=True, num_cpus=num_workers)\n\n @ray.remote\n def f():\n return id(ray.worker.global_worker), os.getpid()\n\n # Wait until all of the workers have started.\n worker_ids = set()\n while len(worker_ids) != num_workers:\n worker_ids = set(ray.get([f.remote() for _ in range(10)]))\n\n worker_info = ray.global_state.workers()\n assert len(worker_info) >= num_workers\n for worker_id, info in worker_info.items():\n assert \"node_ip_address\" in info\n assert \"plasma_store_socket\" in info\n assert \"stderr_file\" in info\n assert \"stdout_file\" in info\n\n\ndef test_specific_driver_id():\n dummy_driver_id = ray.DriverID(b\"00112233445566778899\")\n ray.init(driver_id=dummy_driver_id)\n\n @ray.remote\n def f():\n return ray.worker.global_worker.task_driver_id.binary()\n\n assert_equal(dummy_driver_id.binary(), ray.worker.global_worker.worker_id)\n\n task_driver_id = ray.get(f.remote())\n assert_equal(dummy_driver_id.binary(), task_driver_id)\n\n ray.shutdown()\n\n\ndef test_object_id_properties():\n id_bytes = b\"00112233445566778899\"\n object_id = ray.ObjectID(id_bytes)\n assert object_id.binary() == id_bytes\n object_id = ray.ObjectID.nil()\n assert object_id.is_nil()\n with pytest.raises(ValueError, match=r\".*needs to have length 20.*\"):\n ray.ObjectID(id_bytes + b\"1234\")\n with pytest.raises(ValueError, match=r\".*needs to have length 20.*\"):\n ray.ObjectID(b\"0123456789\")\n object_id = ray.ObjectID(_random_string())\n assert not object_id.is_nil()\n assert object_id.binary() != id_bytes\n id_dumps = pickle.dumps(object_id)\n id_from_dumps = pickle.loads(id_dumps)\n assert id_from_dumps == object_id\n\n\[email protected]\ndef shutdown_only_with_initialization_check():\n yield None\n # The code after the yield will run as teardown code.\n ray.shutdown()\n assert not ray.is_initialized()\n\n\ndef test_initialized(shutdown_only_with_initialization_check):\n assert not ray.is_initialized()\n ray.init(num_cpus=0)\n assert ray.is_initialized()\n\n\ndef test_initialized_local_mode(shutdown_only_with_initialization_check):\n assert not ray.is_initialized()\n ray.init(num_cpus=0, local_mode=True)\n assert ray.is_initialized()\n\n\ndef test_wait_reconstruction(shutdown_only):\n ray.init(num_cpus=1, object_store_memory=10**8)\n\n @ray.remote\n def f():\n return np.zeros(6 * 10**7, dtype=np.uint8)\n\n x_id = f.remote()\n ray.wait([x_id])\n ray.wait([f.remote()])\n assert not ray.worker.global_worker.plasma_client.contains(\n ray.pyarrow.plasma.ObjectID(x_id.binary()))\n ready_ids, _ = ray.wait([x_id])\n assert len(ready_ids) == 1\n\n\ndef test_inline_objects(shutdown_only):\n config = json.dumps({\"initial_reconstruction_timeout_milliseconds\": 200})\n ray.init(num_cpus=1, object_store_memory=10**7, _internal_config=config)\n\n @ray.remote\n class Actor(object):\n def create_inline_object(self):\n return \"inline\"\n\n def create_non_inline_object(self):\n return 10000 * [1]\n\n def get(self):\n return\n\n a = Actor.remote()\n # Count the number of objects that were successfully inlined.\n inlined = 0\n for _ in range(100):\n inline_object = a.create_inline_object.remote()\n ray.get(inline_object)\n plasma_id = ray.pyarrow.plasma.ObjectID(inline_object.binary())\n ray.worker.global_worker.plasma_client.delete([plasma_id])\n # Make sure we can still get an inlined object created by an actor even\n # after it has been evicted.\n try:\n value = ray.get(inline_object)\n assert value == \"inline\"\n inlined += 1\n except ray.worker.RayTaskError:\n pass\n # Make sure some objects were inlined. Some of them may not get inlined\n # because we evict the object soon after creating it.\n assert inlined > 0\n\n # Non-inlined objects are not able to be recreated after eviction.\n for _ in range(10):\n non_inline_object = a.create_non_inline_object.remote()\n ray.get(non_inline_object)\n plasma_id = ray.pyarrow.plasma.ObjectID(non_inline_object.binary())\n # This while loop is necessary because sometimes the object is still\n # there immediately after plasma_client.delete.\n while ray.worker.global_worker.plasma_client.contains(plasma_id):\n ray.worker.global_worker.plasma_client.delete([plasma_id])\n # Objects created by an actor that were evicted and larger than the\n # maximum inline object size cannot be retrieved or reconstructed.\n with pytest.raises(ray.worker.RayTaskError):\n ray.get(non_inline_object) == 10000 * [1]\n\n\ndef test_ray_setproctitle(shutdown_only):\n ray.init(num_cpus=2)\n\n @ray.remote\n class UniqueName(object):\n def __init__(self):\n assert setproctitle.getproctitle() == \"ray_UniqueName:__init__()\"\n\n def f(self):\n assert setproctitle.getproctitle() == \"ray_UniqueName:f()\"\n\n @ray.remote\n def unique_1():\n assert setproctitle.getproctitle() == \"ray_worker:runtest.unique_1()\"\n\n actor = UniqueName.remote()\n ray.get(actor.f.remote())\n ray.get(unique_1.remote())\n\n\ndef test_duplicate_error_messages(shutdown_only):\n ray.init(num_cpus=0)\n\n driver_id = ray.DriverID.nil()\n error_data = ray.gcs_utils.construct_error_message(driver_id, \"test\",\n \"message\", 0)\n\n # Push the same message to the GCS twice (they are the same because we\n # do not include a timestamp).\n\n r = ray.worker.global_worker.redis_client\n\n r.execute_command(\"RAY.TABLE_APPEND\", ray.gcs_utils.TablePrefix.ERROR_INFO,\n ray.gcs_utils.TablePubsub.ERROR_INFO, driver_id.binary(),\n error_data)\n\n # Before https://github.com/ray-project/ray/pull/3316 this would\n # give an error\n r.execute_command(\"RAY.TABLE_APPEND\", ray.gcs_utils.TablePrefix.ERROR_INFO,\n ray.gcs_utils.TablePubsub.ERROR_INFO, driver_id.binary(),\n error_data)\n\n\[email protected](\n os.getenv(\"TRAVIS\") is None,\n reason=\"This test should only be run on Travis.\")\ndef test_ray_stack(shutdown_only):\n ray.init(num_cpus=2)\n\n def unique_name_1():\n time.sleep(1000)\n\n @ray.remote\n def unique_name_2():\n time.sleep(1000)\n\n @ray.remote\n def unique_name_3():\n unique_name_1()\n\n unique_name_2.remote()\n unique_name_3.remote()\n\n success = False\n start_time = time.time()\n while time.time() - start_time < 30:\n # Attempt to parse the \"ray stack\" call.\n output = ray.utils.decode(subprocess.check_output([\"ray\", \"stack\"]))\n if (\"unique_name_1\" in output and \"unique_name_2\" in output\n and \"unique_name_3\" in output):\n success = True\n break\n\n if not success:\n raise Exception(\"Failed to find necessary information with \"\n \"'ray stack'\")\n\n\ndef test_pandas_parquet_serialization():\n # Only test this if pandas is installed\n pytest.importorskip(\"pandas\")\n\n import pandas as pd\n import pyarrow as pa\n import pyarrow.parquet as pq\n\n tempdir = tempfile.mkdtemp()\n filename = os.path.join(tempdir, \"parquet-test\")\n pd.DataFrame({\"col1\": [0, 1], \"col2\": [0, 1]}).to_parquet(filename)\n with open(os.path.join(tempdir, \"parquet-compression\"), \"wb\") as f:\n table = pa.Table.from_arrays([pa.array([1, 2, 3])], [\"hello\"])\n pq.write_table(table, f, compression=\"lz4\")\n # Clean up\n shutil.rmtree(tempdir)\n\n\ndef test_socket_dir_not_existing(shutdown_only):\n random_name = ray.ObjectID(_random_string()).hex()\n temp_raylet_socket_dir = \"/tmp/ray/tests/{}\".format(random_name)\n temp_raylet_socket_name = os.path.join(temp_raylet_socket_dir,\n \"raylet_socket\")\n ray.init(num_cpus=1, raylet_socket_name=temp_raylet_socket_name)\n\n\ndef test_raylet_is_robust_to_random_messages(shutdown_only):\n\n ray.init(num_cpus=1)\n node_manager_address = None\n node_manager_port = None\n for client in ray.global_state.client_table():\n if \"NodeManagerAddress\" in client:\n node_manager_address = client[\"NodeManagerAddress\"]\n node_manager_port = client[\"NodeManagerPort\"]\n assert node_manager_address\n assert node_manager_port\n # Try to bring down the node manager:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((node_manager_address, node_manager_port))\n s.send(1000 * b'asdf')\n\n @ray.remote\n def f():\n return 1\n\n assert ray.get(f.remote()) == 1\n" ]
[ [ "numpy.testing.assert_equal", "numpy.uint32", "numpy.arange", "numpy.uint8", "numpy.int32", "numpy.int8", "pandas.DataFrame", "numpy.ones", "numpy.int64", "numpy.random.normal", "numpy.random.permutation", "numpy.uint64", "numpy.float64", "numpy.float32", "numpy.array", "numpy.zeros", "numpy.random.randint" ] ]
SimonMcLain/Project_Repository_Programming_and_Scripting_2018
[ "c10769973461d4c30b2c0a9d3a1f0e812049ca44" ]
[ "RoughWork/stddev.py" ]
[ "#Simon McLain 2018-04-25\n# Experimenting with numpy\n# https://docs.scipy.org/doc/numpy/reference/generated/numpy.std.html reference to standard deviation \n# calculate the standard deviation of each column\n\nimport numpy\n#imports numpy library providing math functions to operate on them \ndata = numpy.genfromtxt('iris.csv', delimiter=',')\n# Reads the data into an array\ncol1 = data[:,0] \nstdcol1 = numpy.std(data[:,0])\ncol2 =data[:, 1]\nstdcol2 = numpy.std(data[:, 1])\ncol3 =data[:, 2]\nstdcol3 = numpy.std(data[:, 2])\ncol4 =data[:, 3]\nstdcol4 = numpy.std(data[:, 3])\n# Individually looks at each column and returns the standard deviation for that column\n\nprint(\"The standard deviation in petal length is: \", numpy.around(stdcol1, decimals = 2))\nprint(\"The standard deviation in petal width is: \", numpy.around(stdcol2, decimals = 2))\nprint(\"The standard deviation in sepal length is: \", numpy.around(stdcol3, decimals = 2))\nprint(\"The standard deviation in sepal width is: \", numpy.around(stdcol4, decimals = 2))\n" ]
[ [ "numpy.around", "numpy.std", "numpy.genfromtxt" ] ]
paulowiz/AiesecBot
[ "ac77cc5426ed6382772603afa8015208020c0fba", "ac77cc5426ed6382772603afa8015208020c0fba" ]
[ "Get Retroativo/2017_09.py", "Get Retroativo/2018_08_1.py" ]
[ "import psycopg2.extras\nfrom controller import RobotRotine as rr\nfrom api import graphqlconsume, querygraphql\nimport time\nimport datetime\nimport numpy as np\n\"\"\"\ncurrent = np.datetime64(datetime.datetime.now())\ncurrentab = np.datetime64(current) + np.timedelta64(5, 'h')\nlastdate = np.datetime64(currentab) - np.timedelta64(15, 'm')\nprint(lastdate)\nprint(currentab)\nprint('-')\n\n\"\"\"\nrobo5 = rr.RobotRotine()\ni = 0\ndtinit = '2017-09-01T00:00:00'\nwhile i < 31:\n print(dtinit)\n dtfim = np.datetime64(dtinit) + np.timedelta64(24, 'h')\n robo5.ExecutaRotina('created_at', dtinit,\n dtfim, 1)\n i = i+1\n dtinit = np.datetime64(dtinit) + np.timedelta64(24, 'h')\n\nprint('Periodo Executado com sucesso')\n", "import psycopg2.extras\nfrom controller import RobotRotine as rr\nfrom api import graphqlconsume, querygraphql\nimport time\nimport datetime\nimport numpy as np\n\"\"\"\ncurrent = np.datetime64(datetime.datetime.now())\ncurrentab = np.datetime64(current) + np.timedelta64(5, 'h')\nlastdate = np.datetime64(currentab) - np.timedelta64(15, 'm')\nprint(lastdate)\nprint(currentab)\nprint('-')\n\n\"\"\"\nrobo5 = rr.RobotRotine()\ni = 0\ndtinit = '2018-08-01T00:00:00'\nwhile i < 31:\n print(dtinit)\n dtfim = np.datetime64(dtinit) + np.timedelta64(24, 'h')\n robo5.ExecutaRotina('created_at', dtinit,\n dtfim, 1)\n i = i+1\n dtinit = np.datetime64(dtinit) + np.timedelta64(24, 'h')\n\nprint('Periodo Executado com sucesso')\n" ]
[ [ "numpy.timedelta64", "numpy.datetime64" ], [ "numpy.timedelta64", "numpy.datetime64" ] ]
GuilhermeToso/masters-project
[ "01d5acfddaedb3cbf7fa9247a88108530547e155", "01d5acfddaedb3cbf7fa9247a88108530547e155", "01d5acfddaedb3cbf7fa9247a88108530547e155" ]
[ "tests/5 - Models segmentation by sync/5.1 - Hodgkin-Huxley/shh_Nneurons_sync_test.py", "tests/3 - Stochastic Models Synchronization/3.3 - Integrate-and-Fire/sif_couple_var.py", "tests/11 - NSC Data Tests/11.1 - Parameters combination/par_combination.py" ]
[ "\"\"\" \nStochastic Hodgkin-Huxley Neurons\n=================================\n\nAnalysis of 12 Neurons coupled in 3 different groups\n----------------------------------------------------\n\n**Author**: Guilherme M. Toso\n**Tittle**: shh_Nneurons_sunc_test.py\n**Project**: Semi-Supervised Learning Using Competition for Neurons' Synchronization\n\n\n**Description**:\n\n This script uses the Hodgkin-Huxley Biological Neuron Model with Stochastic terms,\n and synchronizes 12 neurons in 3 different groups, such that the neurons in the same group\n are synchronized, while the neurons in different groups are desynchronized. This script plots \n 12 stochastic trajectories, as much as their differences (|V\\:sub:`i` - V\\:sub:`j`|),\n the growing phases (\\phi\\:sub:`i`, \\phi\\:sub:`j`), and the phases difference(|\\phi\\:sub:`i` - \\phi\\:sub:`j`|)\n\n\"\"\"\n\n\"\"\" Dependencies \"\"\"\nimport sys\nimport os\npath = os.getcwd()\nsys.path.insert(0,path)\nfrom nsc import HodgkinHuxley, SDE, Chemical, Couple\nfrom nsc import unwrap\nfrom nsc import ngplot\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom matplotlib import colors\nimport sys\nnp.random.seed(0)\n\n\"\"\" Define the total amount of neurons \"\"\"\nneurons = 12\n\n\"\"\" Define the initial parameters \"\"\"\nv_na = np.zeros(neurons) + 115\nv_k = np.zeros(neurons) - 12\nv_l = np.zeros(neurons) + 86\ng_na = np.zeros(neurons) + 120\ng_k = np.zeros(neurons) + 36\ng_l = np.zeros(neurons) + .3\nc = np.ones(neurons)\nsigma = 1.0\nsigma_external = 0.5\n\n\"\"\" Frequency \"\"\" \nfreq = 50.0\n\n\"\"\" Period \"\"\"\nT = 1/freq\n\n\"\"\" Define the step value for every step \"\"\"\nstep = T\n\n\"\"\" Instantiate the Hodgkin-Huxley Class \"\"\"\nhh = HodgkinHuxley(v_na, v_k, v_l, g_na, g_k, g_l, c)\nsde = SDE(step, sigma_external, sigma, v_na, v_k, v_l, g_na, g_k, g_l, c)\n\n\"\"\" Define the Chemical class params an instantiate it \"\"\"\njch = 0.1\nv_reversal = -70\nch = Chemical(jch, v_reversal)\n\n\"\"\" Define the total time \"\"\"\nt_max = 300.0\n\n\"\"\" Define the number of iterations \"\"\"\nn_iter = int(t_max/step)\n\n\n\"\"\" Define the initial time t, and the variables V, M, N, and H \"\"\"\nt = 0.0\nv = np.random.uniform(0.0, 4.0, (neurons))\nm = hh.m0(v)[2]\nn = hh.n0(v)[2]\nh = hh.h0(v)[2]\ny = ch.y0(v)[2]\n\n\n\"\"\" Define the array where will be stored all Variables (V, M, N and H) of all Neurons at all times. \"\"\"\ndata = np.zeros((n_iter,5,neurons))\n\n\"\"\" Initialize the matrix init that contains all Variables of all Neurons at time t \"\"\"\ninit = np.array([v,m,n,h,y])\n\n\"\"\" Create the array of time \"\"\"\ntime = np.zeros((n_iter))\n\n\"\"\" Cluster Amount \"\"\"\ncluster = 3\n\n\"\"\" Determine the coupling force \"\"\"\nk = np.zeros(shape=(neurons,neurons)) + 0.8\nprint(k)\n\n\"\"\" Determine the adjacent matrix that represents which oscillators are coupled\n and in this case, none of them \"\"\"\nadjacency = np.array([\\\n [0,0,0,0,1,0,0,0,1,0,0,1],\\\n [0,0,1,0,0,1,0,0,0,0,1,0],\\\n [0,1,0,0,0,1,0,0,0,0,1,0],\\\n [0,0,0,0,0,0,1,1,0,1,0,0],\\\n [1,0,0,0,0,0,0,0,1,0,0,1],\\\n [0,1,1,0,0,0,0,0,0,0,1,0],\\\n [0,0,0,1,0,0,0,1,0,1,0,0],\\\n [0,0,0,1,0,0,1,0,0,1,0,0],\\\n [1,0,0,0,1,0,0,0,0,0,0,1],\\\n [0,0,0,1,0,0,1,1,0,0,0,0],\\\n [0,1,1,0,0,1,0,0,0,0,0,0],\\\n [1,0,0,0,1,0,0,0,1,0,0,0]])\n\nk = k*adjacency\n\n\"\"\" Instantiate the Couple class \"\"\"\ncouple = Couple()\n\n\"\"\" Begin the Iteration Process \"\"\"\nfor i in range(len(time)):\n \n \"\"\" Stores the matrix init at the data array in the time i \"\"\"\n data[i] = init\n \n \"\"\" The array time at iteration i receives the value of t \"\"\"\n time[i] = t\n \n \"\"\" Define the initial Variables \"\"\"\n v = init[0]\n m = init[1]\n n = init[2]\n h = init[3]\n y = init[4]\n \n \"\"\" Set the electrical current I \"\"\"\n current = 20\n\n couple.data = v\n\n next_v = v + sde.membrane_potential(v,m,n,h,current) - ch.synapse(y,v)*step - couple.synapse(k)\n next_m = m + sde.stochastic_sodium_activate(m,v)\n next_h = h + sde.stochastic_sodium_deactivate(h,v)\n next_n = n + sde.stochastic_potassium_activate(n,v)\n next_y = y + sde.stochastic_chemical_transmitter(y,v)\n\n init[0] = next_v\n init[1] = next_m\n init[2] = next_n\n init[3] = next_h\n init[4] = next_y\n\n \n \"\"\" Update Time \"\"\"\n t = t + step\n\n\n\n\"\"\" Transpose the data array \"\"\"\ndata1 = np.transpose(data,(1,2,0))\n\n\n\"\"\" Calculate the Sodium and Potassium Conductances \"\"\"\ngna1 = 120*(data1[1]**3)*data1[3]\ngk1 = 36*(data1[2]**4)\n\nred_colors = [\"orangered\", 'darkred', 'firebrick', 'red']\ngreen_colors = [\"limegreen\", 'forestgreen', 'darkgreen', 'green']\nblue_colors = [\"royalblue\", \"midnightblue\",\"mediumblue\", \"blue\"]\n\n\"\"\" Total colors \"\"\"\ncolors = red_colors + green_colors + blue_colors\n\n\"\"\" Organize the data \"\"\"\ndata2 = np.array([data1[0][0], data1[0][4], data1[0][8], data1[0][11],\\\n data1[0][1], data1[0][2], data1[0][5], data1[0][10], \\\n data1[0][3], data1[0][6], data1[0][7], data1[0][9]])\n\n\n\"\"\" Get the peak indexes, times and the periods between them \"\"\"\ninds, times, pers = unwrap.get_peaks_indexes(data2[:,:].T, 40, T)\n#print(inds)\n\nneurons_array = []\nfor i in range(len(times)):\n\n neurons_array.append(np.zeros(times[i].size)+i)\n\ncols = ['r','g','b']\nlabeled = {'Grupo 1':[0,1,2,3], 'Grupo 2':[4,5,6,7], 'Grupo 3':[8,9,10,11]}\nngplot.neural_activity(times, neurons_array,t_max, colors = cols, labeled=labeled)\n\n\"\"\" Get the phases \"\"\"\nphases = unwrap.unwrap_static_2(data2.shape[1], inds, T, model='HH')\n\n\"\"\" Plot phases \"\"\"\nngplot.phases(phases, colors, T)\n\n\n\"\"\" Plot the trajectories \"\"\"\n\nplt.plot(time, data1[0][0], c = red_colors[0])\nplt.plot(time, data1[0][4], c = red_colors[1])\nplt.plot(time, data1[0][8], c = red_colors[2])\nplt.plot(time, data1[0][11], c = red_colors[3])\nplt.xlabel('t [ms]',fontsize=34, labelpad=10)\nplt.ylabel('V [mV]',fontsize=34, labelpad=30)\nplt.yticks(fontsize=34)\nplt.xticks(fontsize=34)\nplt.grid(True)\nplt.title(\"Neurônios Sincronizados pertencentes ao Grupo 1\", fontsize = 24)\nplt.show()\n\n\nplt.plot(time, data1[0][1], c = green_colors[0])\nplt.plot(time, data1[0][2], c = green_colors[1])\nplt.plot(time, data1[0][5], c = green_colors[2])\nplt.plot(time, data1[0][10], c = green_colors[3])\nplt.xlabel('t [ms]',fontsize=34, labelpad=10)\nplt.ylabel('V [mV]',fontsize=34, labelpad=30)\nplt.yticks(fontsize=34)\nplt.xticks(fontsize=34)\nplt.grid(True)\nplt.title(\"Neurônios Sincronizados pertencentes ao Grupo 2\", fontsize = 24)\nplt.show()\n\nplt.plot(time, data1[0][3], c = blue_colors[0])\nplt.plot(time, data1[0][6], c = blue_colors[1])\nplt.plot(time, data1[0][7], c = blue_colors[2])\nplt.plot(time, data1[0][9], c = blue_colors[3])\nplt.xlabel('t [ms]',fontsize=34, labelpad=10)\nplt.ylabel('V [mV]',fontsize=34, labelpad=30)\nplt.yticks(fontsize=34)\nplt.xticks(fontsize=34)\nplt.grid(True)\nplt.title(\"Neurônios Sincronizados pertencentes ao Grupo 3\", fontsize = 24)\nplt.show()\n\nplt.plot(time, data1[0][11], c = red_colors[3])\nplt.plot(time, data1[0][10], c = green_colors[3])\nplt.plot(time, data1[0][3], c = blue_colors[0])\nplt.plot(time, data1[0][6], c = blue_colors[1])\nplt.plot(time, data1[0][7], c = blue_colors[2])\nplt.plot(time, data1[0][9], c = blue_colors[3])\nplt.xlabel('t [ms]',fontsize=34, labelpad=10)\nplt.ylabel('V [mV]',fontsize=34, labelpad=30)\nplt.yticks(fontsize=34)\nplt.xticks(fontsize=34)\nplt.grid(True)\nplt.title(\"Neurônios Dessincronizados dos grupos 1, 2 e 3\", fontsize = 24)\nplt.show()\n\n\"\"\" Get the Phases difference with group1 as reference\"\"\"\nngplot.phases_diff_3D(0, phases, T)\n\n\"\"\" Get the Phases difference with group2 as reference\"\"\"\nngplot.phases_diff_3D(4, phases, T)\n\n\"\"\" Get the Phases difference with group2 as reference\"\"\"\nngplot.phases_diff_3D(8, phases, T)\n\n\"\"\" Get the Trajectories difference with group1 as reference\"\"\"\nngplot.trajecs_diff_3D(0, data2, T)\n\n\"\"\" Get the Trajectories difference with group2 as reference\"\"\"\nngplot.trajecs_diff_3D(4, data2, T)\n\n\"\"\" Get the Trajectories difference with group3 as reference\"\"\"\nngplot.trajecs_diff_3D(8, data2, T)", "\"\"\" \nStochastic Integrate-and-Fire Neurons\n=================================\n\nCoupling Force Variation\n------------------------\n\n**Author**: Guilherme M. Toso\n**Tittle**: sif_couple_var.py\n**Project**: Semi-Supervised Learning Using Competition for Neurons' Synchronization\n\n**Description**:\n\n This script uses the Integrate-and-Fire Biological Neuron Model with Stochastic terms,\n it uses two neurons and then try to synchronize them by varying yhe coupling force k. This script plots \n the differences (|V\\:sub:`i` - V\\:sub:`j`|) and the phases difference(|\\phi\\:sub:`i` - \\phi\\:sub:`j`|) of the two trajectories\n of every k value.\n\n\"\"\"\n\nimport sys\nimport os\npath = os.getcwd()\nsys.path.insert(0,path)\nfrom nsc import IntegrateAndFire, Couple, ngplot, unwrap\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport matplotlib.cm as cm\nfrom tqdm import tqdm\n\n#np.random.seed(0)\n\n\"\"\" Integrate and Fire Parameters \"\"\"\nvrest = 0.0\nr = 1.0\ntau = 10.0\nthreshold = 1.0\nI = 2.5\n\n\"\"\" Instantiates the Integrate and Fire Model Class \"\"\"\nIF = IntegrateAndFire(vrest,r,tau,threshold)\n\n\"\"\" Neurons amount \"\"\"\nneurons = 2\n\"\"\" Time Properties \"\"\"\ntotal = 200\n\n\"\"\" Coupling Force Vector \"\"\"\nk = np.linspace(0,1.2,num=100)\ndef force_variation(k, neurons, decimals = 2):\n\n num = k.size\n k = np.repeat(k,repeats=neurons**2)\n k = np.reshape(k,(num,neurons,neurons))\n k[:,np.arange(neurons), np.arange(neurons)] = 0\n k = np.around(k,decimals=decimals)\n\n return k\nforce = force_variation(k,neurons)\n\ncouple = Couple()\n\n\"\"\" Create the data structure to store the trajectories differences and the phases \"\"\"\ndiff_data = np.zeros((k.size, total+1))\nphases_data = np.zeros((k.size, total+1))\n\nsigma = 0.3\n\nfor i in tqdm(range(k.size)):\n\n \n time = np.linspace(0,total,(total+1))\n\n \"\"\" Data array \"\"\"\n data = np.zeros((total+1,neurons))\n\n u = np.random.uniform(0,0.5,size=neurons)\n\n\n for j in tqdm(range(time.size)):\n\n data[j] = u\n\n couple.data = u\n\n next_u = u + IF.lif(u,I) + sigma*u*np.random.normal(0,0.2,size=neurons) - couple.synapse(force[i])\n\n u = IF.reset(data[j],next_u)\n\n \"\"\" Store the trajecs difference data with the ith element of coupling force k \"\"\"\n diff_data[i] = np.abs(data[:,0] - data[:,1])\n\n \"\"\" Get the peak indexes, times and the periods between them \"\"\"\n inds, times, pers = unwrap.get_peaks_indexes(data[:,:], threshold, 1)\n \"\"\" Get the phases \"\"\"\n phases = unwrap.unwrap_static_2(total+1, inds, 1,model='IAF')\n\n \"\"\" Store the phases difference data with the ith element of coupling force k \"\"\"\n phases_data[i] = np.abs(phases[0] - phases[1])\n\nngplot.coupling(diff_data, phases_data, k, time)\n", "\"\"\" \nAccuracy per Parameter Combinations\n===================================\n\n**Author**: Guilherme M. Toso\n**Tittle**: par_combination.py\n**Project**: Semi-Supervised Learning Using Competition for Neurons' Synchronization\n\n**Description**:\n\n This script takes 5 artificial datasets and apply them in the Neuron Synchronization Competition\n algrithm, by varying the parameters combinations of the limit connections amount (Neighbors),\n the speed of the search sphere radius (Expand), and the total amount of initial classified\n samples (Seeds). \n\n\"\"\"\n\n\n\nimport sys\nimport os\npath = os.getcwd()\nsys.path.insert(0,path)\nfrom nsc import NSC, ngplot, unwrap\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport matplotlib.cm as cm\nfrom tqdm import tqdm\nimport sklearn.datasets as datasets\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.semi_supervised import LabelPropagation, LabelSpreading\nimport multiprocessing\n\ndef concat_data(x,y):\n\n x_pd = pd.DataFrame(x,columns=['X','Y'])\n y_pd = pd.DataFrame(y,columns=['target'])\n\n df = pd.concat([x_pd,y_pd],axis=1)\n\n return df\n\n\"\"\" Define the range of the params \"\"\"\n\ndef multi_get_params(data,name):\n\n print(\"Process ID: \", os.getpid())\n\n seeds = [2,5,10,12,15,20,22,25,30,32,35,40,42,45,50]\n expand = [2,3,4,5,10,20,30,40,50,75,100,200,300,400,500]\n neighbors = [5,10,15,20,25,50]\n\n epochs = 600\n\n features = data.drop(columns=['target'],axis=1)\n\n numeric = features.select_dtypes(include=np.number)\n\n numeric_names = numeric.columns\n\n data.loc[:,numeric_names] = (numeric-numeric.mean())/numeric.std()\n\n\n # Cria a instância do modelo\n ng = NSC(data=data, target='target', similarity='Euclidean',\n model='Izhikevic', alpha = 0.1, w_step = 0.3, time_step=0.5, print_info=False,\n print_steps=False, beta=2.0, gamma=1.5)\n\n labels = np.unique(ng.data['target'])\n\n accuracy = np.zeros(shape=(len(neighbors), len(labels), len(seeds),len(expand)))\n accuracy_mean = np.zeros(shape=(50,len(labels)))\n \n # Para cada valor de vizinhos\n for n in tqdm(range(len(neighbors))):\n print(\"{} data neighbors: {}/6\".format(name,n))\n ng.neighbors = neighbors[n]\n # Para cada valor de seeds\n for s in tqdm(range(len(seeds))):\n print(\"{} data seeds: {}/15\".format(name,s))\n # Para cada valor de expand\n for e in tqdm(range(len(expand))):\n print(\"{} data expand: {}/15\".format(name,e))\n ng.search_expand = expand[e]\n \n # Para 50 iterações\n for i in range(50):\n \n # Preprocessa os dados\n ng.preprocess_data(not_null=seeds[s], standarlize=False)\n \n # Fit\n ng.fit(epochs,ng.numerical)\n \n diag = np.diagonal(ng.confusion_matrix)\n if diag.size==len(ng.labels_list):\n accuracy_mean[i,:] = diag\n elif diag.size > len(ng.labels_list):\n accuracy_mean[i,:] = diag[-len(ng.labels_list):]\n \n ng.data = data\n \n ng.y_predicted = -np.ones(shape=ng.neurons)\n\n ng.degree_out = np.zeros(shape=(ng.neurons,len(ng.labels_list)+1))\n ng.labels_array = -np.ones(shape=(ng.neurons,ng.neurons))\n ng.incident_degree = np.zeros(shape=(ng.neurons,len(ng.labels_list)))\n ng.inner_degree = {}\n ng.disputed = np.array([])\n ng.capacity = np.zeros(shape=(ng.neurons))\n\n accuracy[n,:,s,e] = np.mean(accuracy_mean,axis=0)\n\n np.save(name,accuracy)\n\nif __name__ == \"__main__\":\n \n\n x_clusters_09,y_clusters_09 = datasets.make_blobs(n_samples=400,centers=3,n_features=2, cluster_std=0.9)\n clusters_09 = concat_data(x_clusters_09,y_clusters_09)\n \n x_clusters_12,y_clusters_12 = datasets.make_blobs(n_samples=400,centers=3,n_features=2, cluster_std=1.2)\n clusters_12 = concat_data(x_clusters_12,y_clusters_12)\n \n x_circles,y_circles = datasets.make_circles(n_samples=400,noise=0.05,factor=0.5)\n circles = concat_data(x_circles, y_circles)\n\n x_moons,y_moons = datasets.make_moons(n_samples=400,noise=0.1)\n moons = concat_data(x_moons, y_moons)\n\n x_classification, y_classification = datasets.make_classification(n_samples=400,n_features=2,n_informative=2,n_redundant=0,n_clusters_per_class=1)\n classification = concat_data(x_classification,y_classification)\n\n dic_of_data = {'Cluster_09':clusters_09, 'Cluster_12':clusters_12, 'Circles':circles, 'Moons':moons, 'Classification':classification}\n\n keys = list(dic_of_data.keys())\n print(keys)\n datas = list(dic_of_data.values())\n\n processes = []\n\n for i in tqdm(range(5)):\n\n p = multiprocessing.Process(target=multi_get_params,args=(datas[i],keys[i],))\n p.start()\n processes.append(p)\n \n for p in processes:\n p.join()" ]
[ [ "numpy.random.seed", "matplotlib.pyplot.title", "matplotlib.pyplot.show", "numpy.ones", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.grid", "numpy.transpose", "numpy.random.uniform", "matplotlib.pyplot.yticks", "numpy.array", "numpy.zeros", "matplotlib.pyplot.xticks", "matplotlib.pyplot.ylabel" ], [ "numpy.abs", "numpy.linspace", "numpy.reshape", "numpy.around", "numpy.arange", "numpy.random.normal", "numpy.random.uniform", "numpy.repeat", "numpy.zeros" ], [ "pandas.concat", "sklearn.datasets.make_classification", "numpy.unique", "sklearn.datasets.make_moons", "numpy.save", "pandas.DataFrame", "numpy.ones", "sklearn.datasets.make_circles", "numpy.mean", "numpy.array", "numpy.zeros", "numpy.diagonal", "sklearn.datasets.make_blobs" ] ]
InkToYou/TreeWasserstein
[ "b3f26dd50cc5f06a40e076b2e68f6e5c83786e7b" ]
[ "tests/test_treewasserstein.py" ]
[ "import numpy as np\nimport pytest\n\nimport networkx as nx\nimport ot\nimport tw\n\n\nclass TestBuildValidTreeMetric(object):\n @pytest.mark.parametrize(\n \"num_node, edges\",\n [\n (5, [(i % 5, (i + 1) % 5, i + 1) for i in range(5)]),\n (3, [(i % 3, (i + 1) % 3, i + 1) for i in range(3)]),\n ],\n )\n def test_invalid_tree(self, num_node, edges):\n with pytest.raises(ValueError):\n first_prob = np.zeros(num_node)\n second_prob = np.zeros(num_node)\n first_prob[0] = 1.0\n second_prob[-1] = 1.0\n tw.distance(first_prob, second_prob, edges)\n\n\nclass TestTreeWasserstein(object):\n def test_tree_wasserstein(self):\n for i in range(100):\n num_node = np.random.randint(10, 200)\n G = nx.generators.random_tree(num_node)\n edges = [(fr, to, 1) for (fr, to) in list(G.edges())]\n first_prob = np.random.rand(num_node)\n first_prob = first_prob / first_prob.sum()\n second_prob = np.random.rand(num_node)\n second_prob = second_prob / second_prob.sum()\n twd = tw.distance(first_prob, second_prob, edges)\n\n adj_dict = dict(nx.all_pairs_shortest_path_length(G))\n metric = np.array(\n [[adj_dict[i][j] for i in range(num_node)] for j in range(num_node)]\n )\n ans = ot.lp.emd2(first_prob, second_prob, metric)\n\n assert np.allclose([twd], [ans]), f\"i: {i}, TW : {twd}, WD : {ans}\"\n" ]
[ [ "numpy.allclose", "numpy.zeros", "numpy.random.rand", "numpy.random.randint" ] ]
reveriel/depconv
[ "4f50d8651655c3a275f15422559eac82879704da" ]
[ "second/pytorch/models/voxelnet.py" ]
[ "import time\nfrom enum import Enum\nfrom functools import reduce\nimport contextlib\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nimport torchplus\nfrom second.pytorch.core import box_torch_ops\nfrom second.pytorch.core.losses import (WeightedSigmoidClassificationLoss,\n WeightedSmoothL1LocalizationLoss,\n WeightedSoftmaxClassificationLoss)\nfrom second.pytorch.models import middle, pointpillars, rpn, voxel_encoder\nfrom torchplus import metrics\nfrom second.pytorch.utils import torch_timer\n\nfrom second.sphere.model import DepConvNet3, ConvNet\n\ndef _get_pos_neg_loss(cls_loss, labels):\n # cls_loss: [N, num_anchors, num_class]\n # labels: [N, num_anchors]\n batch_size = cls_loss.shape[0]\n if cls_loss.shape[-1] == 1 or len(cls_loss.shape) == 2:\n cls_pos_loss = (labels > 0).type_as(cls_loss) * cls_loss.view(\n batch_size, -1)\n cls_neg_loss = (labels == 0).type_as(cls_loss) * cls_loss.view(\n batch_size, -1)\n cls_pos_loss = cls_pos_loss.sum() / batch_size\n cls_neg_loss = cls_neg_loss.sum() / batch_size\n else:\n cls_pos_loss = cls_loss[..., 1:].sum() / batch_size\n cls_neg_loss = cls_loss[..., 0].sum() / batch_size\n return cls_pos_loss, cls_neg_loss\n\nREGISTERED_NETWORK_CLASSES = {}\n\ndef register_voxelnet(cls, name=None):\n global REGISTERED_NETWORK_CLASSES\n if name is None:\n name = cls.__name__\n assert name not in REGISTERED_NETWORK_CLASSES, f\"exist class: {REGISTERED_NETWORK_CLASSES}\"\n REGISTERED_NETWORK_CLASSES[name] = cls\n return cls\n\ndef get_voxelnet_class(name):\n global REGISTERED_NETWORK_CLASSES\n assert name in REGISTERED_NETWORK_CLASSES, f\"available class: {REGISTERED_NETWORK_CLASSES}\"\n return REGISTERED_NETWORK_CLASSES[name]\n\nclass LossNormType(Enum):\n NormByNumPositives = \"norm_by_num_positives\"\n NormByNumExamples = \"norm_by_num_examples\"\n NormByNumPosNeg = \"norm_by_num_pos_neg\"\n DontNorm = \"dont_norm\"\n\n@register_voxelnet\nclass VoxelNet(nn.Module):\n def __init__(self,\n output_shape,\n num_class=2,\n num_input_features=4,\n vfe_class_name=\"VoxelFeatureExtractor\",\n vfe_num_filters=[32, 128],\n with_distance=False,\n middle_class_name=\"SparseMiddleExtractor\",\n middle_num_input_features=-1,\n middle_num_filters_d1=[64],\n middle_num_filters_d2=[64, 64],\n rpn_class_name=\"RPN\",\n rpn_num_input_features=-1,\n rpn_layer_nums=[3, 5, 5],\n rpn_layer_strides=[2, 2, 2],\n rpn_num_filters=[128, 128, 256],\n rpn_upsample_strides=[1, 2, 4],\n rpn_num_upsample_filters=[256, 256, 256],\n use_norm=True,\n use_groupnorm=False,\n num_groups=32,\n use_direction_classifier=True,\n use_sigmoid_score=False,\n encode_background_as_zeros=True,\n use_rotate_nms=True,\n multiclass_nms=False,\n nms_score_thresholds=None,\n nms_pre_max_sizes=None,\n nms_post_max_sizes=None,\n nms_iou_thresholds=None,\n target_assigner=None,\n cls_loss_weight=1.0,\n loc_loss_weight=1.0,\n pos_cls_weight=1.0,\n neg_cls_weight=1.0,\n direction_loss_weight=1.0,\n loss_norm_type=LossNormType.NormByNumPositives,\n encode_rad_error_by_sin=False,\n loc_loss_ftor=None,\n cls_loss_ftor=None,\n measure_time=False,\n voxel_generator=None,\n post_center_range=None,\n dir_offset=0.0,\n sin_error_factor=1.0,\n nms_class_agnostic=False,\n num_direction_bins=2,\n direction_limit_offset=0,\n name='voxelnet'):\n super().__init__()\n self.name = name\n self._sin_error_factor = sin_error_factor\n self._num_class = num_class\n self._use_rotate_nms = use_rotate_nms\n self._multiclass_nms = multiclass_nms\n self._nms_score_thresholds = nms_score_thresholds\n self._nms_pre_max_sizes = nms_pre_max_sizes\n self._nms_post_max_sizes = nms_post_max_sizes\n self._nms_iou_thresholds = nms_iou_thresholds\n self._use_sigmoid_score = use_sigmoid_score\n self._encode_background_as_zeros = encode_background_as_zeros\n self._use_direction_classifier = use_direction_classifier\n self._num_input_features = num_input_features\n self._box_coder = target_assigner.box_coder\n self.target_assigner = target_assigner\n self.voxel_generator = voxel_generator\n self._pos_cls_weight = pos_cls_weight\n self._neg_cls_weight = neg_cls_weight\n self._encode_rad_error_by_sin = encode_rad_error_by_sin\n self._loss_norm_type = loss_norm_type\n self._dir_loss_ftor = WeightedSoftmaxClassificationLoss()\n self._diff_loc_loss_ftor = WeightedSmoothL1LocalizationLoss()\n self._dir_offset = dir_offset\n self._loc_loss_ftor = loc_loss_ftor\n self._cls_loss_ftor = cls_loss_ftor\n self._direction_loss_weight = direction_loss_weight\n self._cls_loss_weight = cls_loss_weight\n self._loc_loss_weight = loc_loss_weight\n self._post_center_range = post_center_range or []\n self.measure_time = measure_time\n self._nms_class_agnostic = nms_class_agnostic\n self._num_direction_bins = num_direction_bins\n self._dir_limit_offset = direction_limit_offset\n self.voxel_feature_extractor = voxel_encoder.get_vfe_class(vfe_class_name)(\n num_input_features,\n use_norm,\n num_filters=vfe_num_filters,\n with_distance=with_distance,\n voxel_size=self.voxel_generator.voxel_size,\n pc_range=self.voxel_generator.point_cloud_range,\n )\n self.middle_feature_extractor = middle.get_middle_class(middle_class_name)(\n output_shape,\n use_norm,\n num_input_features=middle_num_input_features,\n num_filters_down1=middle_num_filters_d1,\n num_filters_down2=middle_num_filters_d2)\n # self.feature_extractor = DepConvNet3(5)\n # self.feature_extractor = ConvNet(5)\n\n self.rpn = rpn.get_rpn_class(rpn_class_name)(\n use_norm=True,\n num_class=num_class,\n layer_nums=rpn_layer_nums,\n layer_strides=rpn_layer_strides,\n num_filters=rpn_num_filters,\n upsample_strides=rpn_upsample_strides,\n num_upsample_filters=rpn_num_upsample_filters,\n num_input_features=rpn_num_input_features,\n num_anchor_per_loc=target_assigner.num_anchors_per_location,\n encode_background_as_zeros=encode_background_as_zeros,\n use_direction_classifier=use_direction_classifier,\n use_groupnorm=use_groupnorm,\n num_groups=num_groups,\n box_code_size=target_assigner.box_coder.code_size,\n num_direction_bins=self._num_direction_bins)\n self.rpn_acc = metrics.Accuracy(\n dim=-1, encode_background_as_zeros=encode_background_as_zeros)\n self.rpn_precision = metrics.Precision(dim=-1)\n self.rpn_recall = metrics.Recall(dim=-1)\n self.rpn_metrics = metrics.PrecisionRecall(\n dim=-1,\n thresholds=[0.1, 0.3, 0.5, 0.7, 0.8, 0.9, 0.95],\n use_sigmoid_score=use_sigmoid_score,\n encode_background_as_zeros=encode_background_as_zeros)\n\n self.rpn_cls_loss = metrics.Scalar()\n self.rpn_loc_loss = metrics.Scalar()\n self.rpn_total_loss = metrics.Scalar()\n self.register_buffer(\"global_step\", torch.LongTensor(1).zero_())\n\n self._time_dict = {}\n self._time_total_dict = {}\n self._time_count_dict = {}\n\n def start_timer(self, *names):\n if not self.measure_time:\n return\n torch.cuda.synchronize()\n for name in names:\n self._time_dict[name] = time.time()\n\n def end_timer(self, name):\n if not self.measure_time:\n return\n torch.cuda.synchronize()\n time_elapsed = time.time() - self._time_dict[name]\n if name not in self._time_count_dict:\n self._time_count_dict[name] = 1\n self._time_total_dict[name] = time_elapsed\n else:\n self._time_count_dict[name] += 1\n self._time_total_dict[name] += time_elapsed\n self._time_dict[name] = 0\n\n def clear_timer(self):\n self._time_count_dict.clear()\n self._time_dict.clear()\n self._time_total_dict.clear()\n\n @contextlib.contextmanager\n def profiler(self):\n old_measure_time = self.measure_time\n self.measure_time = True\n yield\n self.measure_time = old_measure_time\n\n def get_avg_time_dict(self):\n ret = {}\n for name, val in self._time_total_dict.items():\n count = self._time_count_dict[name]\n ret[name] = val / max(1, count)\n return ret\n\n def update_global_step(self):\n self.global_step += 1\n\n def get_global_step(self):\n return int(self.global_step.cpu().numpy()[0])\n\n def clear_global_step(self):\n self.global_step.zero_()\n\n def loss(self, example, preds_dict):\n box_preds = preds_dict[\"box_preds\"]\n cls_preds = preds_dict[\"cls_preds\"]\n batch_size_dev = cls_preds.shape[0]\n self.start_timer(\"loss forward\")\n labels = example['labels']\n reg_targets = example['reg_targets']\n importance = example['importance']\n self.start_timer(\"prepare weight forward\")\n cls_weights, reg_weights, cared = prepare_loss_weights(\n labels,\n pos_cls_weight=self._pos_cls_weight,\n neg_cls_weight=self._neg_cls_weight,\n loss_norm_type=self._loss_norm_type,\n dtype=box_preds.dtype)\n\n cls_targets = labels * cared.type_as(labels)\n cls_targets = cls_targets.unsqueeze(-1)\n self.end_timer(\"prepare weight forward\")\n self.start_timer(\"create_loss forward\")\n loc_loss, cls_loss = create_loss(\n self._loc_loss_ftor,\n self._cls_loss_ftor,\n box_preds=box_preds,\n cls_preds=cls_preds,\n cls_targets=cls_targets,\n cls_weights=cls_weights * importance,\n reg_targets=reg_targets,\n reg_weights=reg_weights * importance,\n num_class=self._num_class,\n encode_rad_error_by_sin=self._encode_rad_error_by_sin,\n encode_background_as_zeros=self._encode_background_as_zeros,\n box_code_size=self._box_coder.code_size,\n sin_error_factor=self._sin_error_factor,\n num_direction_bins=self._num_direction_bins,\n )\n loc_loss_reduced = loc_loss.sum() / batch_size_dev\n loc_loss_reduced *= self._loc_loss_weight\n cls_pos_loss, cls_neg_loss = _get_pos_neg_loss(cls_loss, labels)\n cls_pos_loss /= self._pos_cls_weight\n cls_neg_loss /= self._neg_cls_weight\n cls_loss_reduced = cls_loss.sum() / batch_size_dev\n cls_loss_reduced *= self._cls_loss_weight\n loss = loc_loss_reduced + cls_loss_reduced\n self.end_timer(\"create_loss forward\")\n if self._use_direction_classifier:\n dir_targets = get_direction_target(\n example['anchors'],\n reg_targets,\n dir_offset=self._dir_offset,\n num_bins=self._num_direction_bins)\n dir_logits = preds_dict[\"dir_cls_preds\"].view(\n batch_size_dev, -1, self._num_direction_bins)\n weights = (labels > 0).type_as(dir_logits) * importance\n weights /= torch.clamp(weights.sum(-1, keepdim=True), min=1.0)\n dir_loss = self._dir_loss_ftor(\n dir_logits, dir_targets, weights=weights)\n dir_loss = dir_loss.sum() / batch_size_dev\n loss += dir_loss * self._direction_loss_weight\n self.end_timer(\"loss forward\")\n res = {\n \"loss\": loss,\n \"cls_loss\": cls_loss,\n \"loc_loss\": loc_loss,\n \"cls_pos_loss\": cls_pos_loss,\n \"cls_neg_loss\": cls_neg_loss,\n \"cls_preds\": cls_preds,\n \"cls_loss_reduced\": cls_loss_reduced,\n \"loc_loss_reduced\": loc_loss_reduced,\n \"cared\": cared,\n }\n if self._use_direction_classifier:\n res[\"dir_loss_reduced\"] = dir_loss\n return res\n\n # def network_forward(self, feature, batch_size):\n def network_forward(self, voxels, num_points, coors, batch_size):\n \"\"\"this function is used for subclass.\n you can add custom network architecture by subclass VoxelNet class\n and override this function.\n Returns:\n preds_dict: {\n box_preds: ...\n cls_preds: ...\n dir_cls_preds: ...\n }\n \"\"\"\n self.start_timer(\"voxel_feature_extractor\")\n voxel_features = self.voxel_feature_extractor(voxels, num_points,\n coors)\n self.end_timer(\"voxel_feature_extractor\")\n self.start_timer(\"middle forward\")\n spatial_features = self.middle_feature_extractor(\n voxel_features, coors, batch_size)\n # spatial_features = self.feature_extractor(feature)\n self.end_timer(\"middle forward\")\n self.start_timer(\"rpn forward\")\n preds_dict = self.rpn(spatial_features)\n self.end_timer(\"rpn forward\")\n return preds_dict\n\n def forward(self, example):\n \"\"\"module's forward should always accept dict and return loss.\n \"\"\"\n voxels = example[\"voxels\"]\n num_points = example[\"num_points\"]\n coors = example[\"coordinates\"]\n # feature = example[\"feature\"]\n # feature = torch.tensor(feature, device=\"cuda\", dtype=torch.float32)\n if len(num_points.shape) == 2: # multi-gpu\n num_voxel_per_batch = example[\"num_voxels\"].cpu().numpy().reshape(\n -1)\n voxel_list = []\n num_points_list = []\n coors_list = []\n for i, num_voxel in enumerate(num_voxel_per_batch):\n voxel_list.append(voxels[i, :num_voxel])\n num_points_list.append(num_points[i, :num_voxel])\n coors_list.append(coors[i, :num_voxel])\n voxels = torch.cat(voxel_list, dim=0)\n num_points = torch.cat(num_points_list, dim=0)\n coors = torch.cat(coors_list, dim=0)\n coors[:,0] -= coors[:,0].min()\n # print(\"voxels shape = \", voxels.shape)\n # print(\"coors shape = \", coors.shape)\n # print(\"num_points shape = \", num_points.shape)\n batch_anchors = example[\"anchors\"]\n # print(\"batch anchor shpae\", batch_anchors.shape)\n batch_size_dev = batch_anchors.shape[0]\n # features: [num_voxels, max_num_points_per_voxel, 7]\n # num_points: [num_voxels]\n # coors: [num_voxels, 4]\n preds_dict = self.network_forward(voxels, num_points, coors, batch_size_dev)\n # preds_dict = self.network_forward(feature, batch_size_dev)\n\n # need to check size.\n box_preds = preds_dict[\"box_preds\"].view(batch_size_dev, -1, self._box_coder.code_size)\n err_msg = f\"num_anchors={batch_anchors.shape[1]}, but num_output={box_preds.shape[1]}. please check size\"\n assert batch_anchors.shape[1] == box_preds.shape[1], err_msg\n if self.training:\n return self.loss(example, preds_dict)\n else:\n self.start_timer(\"predict\")\n with torch.no_grad():\n res = self.predict(example, preds_dict)\n self.end_timer(\"predict\")\n return res\n\n def predict(self, example, preds_dict):\n \"\"\"start with v1.6.0, this function don't contain any kitti-specific code.\n Returns:\n predict: list of pred_dict.\n pred_dict: {\n box3d_lidar: [N, 7] 3d box.\n scores: [N]\n label_preds: [N]\n metadata: meta-data which contains dataset-specific information.\n for kitti, it contains image idx (label idx),\n for nuscenes, sample_token is saved in it.\n }\n \"\"\"\n batch_size = example['anchors'].shape[0]\n if \"metadata\" not in example or len(example[\"metadata\"]) == 0:\n meta_list = [None] * batch_size\n else:\n meta_list = example[\"metadata\"]\n batch_anchors = example[\"anchors\"].view(batch_size, -1,\n example[\"anchors\"].shape[-1])\n if \"anchors_mask\" not in example:\n batch_anchors_mask = [None] * batch_size\n else:\n batch_anchors_mask = example[\"anchors_mask\"].view(batch_size, -1)\n\n t = time.time()\n batch_box_preds = preds_dict[\"box_preds\"]\n batch_cls_preds = preds_dict[\"cls_preds\"]\n batch_box_preds = batch_box_preds.view(batch_size, -1,\n self._box_coder.code_size)\n num_class_with_bg = self._num_class\n if not self._encode_background_as_zeros:\n num_class_with_bg = self._num_class + 1\n\n batch_cls_preds = batch_cls_preds.view(batch_size, -1,\n num_class_with_bg)\n batch_box_preds = self._box_coder.decode_torch(batch_box_preds,\n batch_anchors)\n if self._use_direction_classifier:\n batch_dir_preds = preds_dict[\"dir_cls_preds\"]\n batch_dir_preds = batch_dir_preds.view(batch_size, -1,\n self._num_direction_bins)\n else:\n batch_dir_preds = [None] * batch_size\n\n predictions_dicts = []\n post_center_range = None\n if len(self._post_center_range) > 0:\n post_center_range = torch.tensor(\n self._post_center_range,\n dtype=batch_box_preds.dtype,\n device=batch_box_preds.device).float()\n for box_preds, cls_preds, dir_preds, a_mask, meta in zip(\n batch_box_preds, batch_cls_preds, batch_dir_preds,\n batch_anchors_mask, meta_list):\n if a_mask is not None:\n box_preds = box_preds[a_mask]\n cls_preds = cls_preds[a_mask]\n box_preds = box_preds.float()\n cls_preds = cls_preds.float()\n if self._use_direction_classifier:\n if a_mask is not None:\n dir_preds = dir_preds[a_mask]\n dir_labels = torch.max(dir_preds, dim=-1)[1]\n if self._encode_background_as_zeros:\n # this don't support softmax\n assert self._use_sigmoid_score is True\n total_scores = torch.sigmoid(cls_preds)\n else:\n # encode background as first element in one-hot vector\n if self._use_sigmoid_score:\n total_scores = torch.sigmoid(cls_preds)[..., 1:]\n else:\n total_scores = F.softmax(cls_preds, dim=-1)[..., 1:]\n # Apply NMS in birdeye view\n if self._use_rotate_nms:\n nms_func = box_torch_ops.rotate_nms\n else:\n nms_func = box_torch_ops.nms\n feature_map_size_prod = batch_box_preds.shape[\n 1] // self.target_assigner.num_anchors_per_location\n if self._multiclass_nms:\n assert self._encode_background_as_zeros is True\n boxes_for_nms = box_preds[:, [0, 1, 3, 4, 6]]\n if not self._use_rotate_nms:\n box_preds_corners = box_torch_ops.center_to_corner_box2d(\n boxes_for_nms[:, :2], boxes_for_nms[:, 2:4],\n boxes_for_nms[:, 4])\n boxes_for_nms = box_torch_ops.corner_to_standup_nd(\n box_preds_corners)\n\n selected_boxes, selected_labels, selected_scores = [], [], []\n selected_dir_labels = []\n\n scores = total_scores\n boxes = boxes_for_nms\n selected_per_class = []\n score_threshs = self._nms_score_thresholds\n pre_max_sizes = self._nms_pre_max_sizes\n post_max_sizes = self._nms_post_max_sizes\n iou_thresholds = self._nms_iou_thresholds\n for class_idx, score_thresh, pre_ms, post_ms, iou_th in zip(\n range(self._num_class),\n score_threshs,\n pre_max_sizes, post_max_sizes, iou_thresholds):\n if self._nms_class_agnostic:\n class_scores = total_scores.view(\n feature_map_size_prod, -1,\n self._num_class)[..., class_idx]\n class_scores = class_scores.contiguous().view(-1)\n class_boxes_nms = boxes.view(-1,\n boxes_for_nms.shape[-1])\n class_boxes = box_preds\n class_dir_labels = dir_labels\n else:\n anchors_range = self.target_assigner.anchors_range(class_idx)\n class_scores = total_scores.view(\n -1,\n self._num_class)[anchors_range[0]:anchors_range[1], class_idx]\n class_boxes_nms = boxes.view(-1,\n boxes_for_nms.shape[-1])[anchors_range[0]:anchors_range[1], :]\n class_scores = class_scores.contiguous().view(-1)\n class_boxes_nms = class_boxes_nms.contiguous().view(\n -1, boxes_for_nms.shape[-1])\n class_boxes = box_preds.view(-1,\n box_preds.shape[-1])[anchors_range[0]:anchors_range[1], :]\n class_boxes = class_boxes.contiguous().view(\n -1, box_preds.shape[-1])\n if self._use_direction_classifier:\n class_dir_labels = dir_labels.view(-1)[anchors_range[0]:anchors_range[1]]\n class_dir_labels = class_dir_labels.contiguous(\n ).view(-1)\n if score_thresh > 0.0:\n class_scores_keep = class_scores >= score_thresh\n if class_scores_keep.shape[0] == 0:\n selected_per_class.append(None)\n continue\n class_scores = class_scores[class_scores_keep]\n if class_scores.shape[0] != 0:\n if score_thresh > 0.0:\n class_boxes_nms = class_boxes_nms[\n class_scores_keep]\n class_boxes = class_boxes[class_scores_keep]\n class_dir_labels = class_dir_labels[\n class_scores_keep]\n keep = nms_func(class_boxes_nms, class_scores, pre_ms,\n post_ms, iou_th)\n if keep.shape[0] != 0:\n selected_per_class.append(keep)\n else:\n selected_per_class.append(None)\n else:\n selected_per_class.append(None)\n selected = selected_per_class[-1]\n\n if selected is not None:\n selected_boxes.append(class_boxes[selected])\n selected_labels.append(\n torch.full([class_boxes[selected].shape[0]],\n class_idx,\n dtype=torch.int64,\n device=box_preds.device))\n if self._use_direction_classifier:\n selected_dir_labels.append(\n class_dir_labels[selected])\n selected_scores.append(class_scores[selected])\n selected_boxes = torch.cat(selected_boxes, dim=0)\n selected_labels = torch.cat(selected_labels, dim=0)\n selected_scores = torch.cat(selected_scores, dim=0)\n if self._use_direction_classifier:\n selected_dir_labels = torch.cat(selected_dir_labels, dim=0)\n else:\n # get highest score per prediction, than apply nms\n # to remove overlapped box.\n if num_class_with_bg == 1:\n top_scores = total_scores.squeeze(-1)\n top_labels = torch.zeros(\n total_scores.shape[0],\n device=total_scores.device,\n dtype=torch.long)\n else:\n top_scores, top_labels = torch.max(\n total_scores, dim=-1)\n if self._nms_score_thresholds[0] > 0.0:\n top_scores_keep = top_scores >= self._nms_score_thresholds[0]\n top_scores = top_scores.masked_select(top_scores_keep)\n\n if top_scores.shape[0] != 0:\n if self._nms_score_thresholds[0] > 0.0:\n box_preds = box_preds[top_scores_keep]\n if self._use_direction_classifier:\n dir_labels = dir_labels[top_scores_keep]\n top_labels = top_labels[top_scores_keep]\n boxes_for_nms = box_preds[:, [0, 1, 3, 4, 6]]\n if not self._use_rotate_nms:\n box_preds_corners = box_torch_ops.center_to_corner_box2d(\n boxes_for_nms[:, :2], boxes_for_nms[:, 2:4],\n boxes_for_nms[:, 4])\n boxes_for_nms = box_torch_ops.corner_to_standup_nd(\n box_preds_corners)\n # the nms in 3d detection just remove overlap boxes.\n selected = nms_func(\n boxes_for_nms,\n top_scores,\n pre_max_size=self._nms_pre_max_sizes[0],\n post_max_size=self._nms_post_max_sizes[0],\n iou_threshold=self._nms_iou_thresholds[0],\n )\n else:\n selected = []\n # if selected is not None:\n selected_boxes = box_preds[selected]\n if self._use_direction_classifier:\n selected_dir_labels = dir_labels[selected]\n selected_labels = top_labels[selected]\n selected_scores = top_scores[selected]\n # finally generate predictions.\n if selected_boxes.shape[0] != 0:\n box_preds = selected_boxes\n scores = selected_scores\n label_preds = selected_labels\n if self._use_direction_classifier:\n dir_labels = selected_dir_labels\n period = (2 * np.pi / self._num_direction_bins)\n dir_rot = box_torch_ops.limit_period(\n box_preds[..., 6] - self._dir_offset,\n self._dir_limit_offset, period)\n box_preds[\n ...,\n 6] = dir_rot + self._dir_offset + period * dir_labels.to(\n box_preds.dtype)\n final_box_preds = box_preds\n final_scores = scores\n final_labels = label_preds\n if post_center_range is not None:\n mask = (final_box_preds[:, :3] >=\n post_center_range[:3]).all(1)\n mask &= (final_box_preds[:, :3] <=\n post_center_range[3:]).all(1)\n predictions_dict = {\n \"box3d_lidar\": final_box_preds[mask],\n \"scores\": final_scores[mask],\n \"label_preds\": label_preds[mask],\n \"metadata\": meta,\n }\n else:\n predictions_dict = {\n \"box3d_lidar\": final_box_preds,\n \"scores\": final_scores,\n \"label_preds\": label_preds,\n \"metadata\": meta,\n }\n else:\n dtype = batch_box_preds.dtype\n device = batch_box_preds.device\n predictions_dict = {\n \"box3d_lidar\":\n torch.zeros([0, box_preds.shape[-1]],\n dtype=dtype,\n device=device),\n \"scores\":\n torch.zeros([0], dtype=dtype, device=device),\n \"label_preds\":\n torch.zeros([0], dtype=top_labels.dtype, device=device),\n \"metadata\":\n meta,\n }\n predictions_dicts.append(predictions_dict)\n return predictions_dicts\n\n def metrics_to_float(self):\n self.rpn_acc.float()\n self.rpn_metrics.float()\n self.rpn_cls_loss.float()\n self.rpn_loc_loss.float()\n self.rpn_total_loss.float()\n\n def update_metrics(self, cls_loss, loc_loss, cls_preds, labels, sampled):\n batch_size = cls_preds.shape[0]\n num_class = self._num_class\n if not self._encode_background_as_zeros:\n num_class += 1\n cls_preds = cls_preds.view(batch_size, -1, num_class)\n rpn_acc = self.rpn_acc(labels, cls_preds, sampled).numpy()[0]\n prec, recall = self.rpn_metrics(labels, cls_preds, sampled)\n prec = prec.numpy()\n recall = recall.numpy()\n rpn_cls_loss = self.rpn_cls_loss(cls_loss).numpy()[0]\n rpn_loc_loss = self.rpn_loc_loss(loc_loss).numpy()[0]\n ret = {\n \"loss\": {\n \"cls_loss\": float(rpn_cls_loss),\n \"cls_loss_rt\": float(cls_loss.data.cpu().numpy()),\n 'loc_loss': float(rpn_loc_loss),\n \"loc_loss_rt\": float(loc_loss.data.cpu().numpy()),\n },\n \"rpn_acc\": float(rpn_acc),\n \"pr\": {},\n }\n for i, thresh in enumerate(self.rpn_metrics.thresholds):\n ret[\"pr\"][f\"prec@{int(thresh*100)}\"] = float(prec[i])\n ret[\"pr\"][f\"rec@{int(thresh*100)}\"] = float(recall[i])\n return ret\n\n def clear_metrics(self):\n self.rpn_acc.clear()\n self.rpn_metrics.clear()\n self.rpn_cls_loss.clear()\n self.rpn_loc_loss.clear()\n self.rpn_total_loss.clear()\n\n @staticmethod\n def convert_norm_to_float(net):\n '''\n BatchNorm layers to have parameters in single precision.\n Find all layers and convert them back to float. This can't\n be done with built in .apply as that function will apply\n fn to all modules, parameters, and buffers. Thus we wouldn't\n be able to guard the float conversion based on the module type.\n '''\n if isinstance(net, torch.nn.modules.batchnorm._BatchNorm):\n net.float()\n for child in net.children():\n VoxelNet.convert_norm_to_float(child)\n return net\n\n\ndef add_sin_difference(boxes1, boxes2, boxes1_rot, boxes2_rot, factor=1.0):\n if factor != 1.0:\n boxes1_rot = factor * boxes1_rot\n boxes2_rot = factor * boxes2_rot\n rad_pred_encoding = torch.sin(boxes1_rot) * torch.cos(boxes2_rot)\n rad_tg_encoding = torch.cos(boxes1_rot) * torch.sin(boxes2_rot)\n boxes1 = torch.cat([boxes1[..., :6], rad_pred_encoding, boxes1[..., 7:]],\n dim=-1)\n boxes2 = torch.cat([boxes2[..., :6], rad_tg_encoding, boxes2[..., 7:]],\n dim=-1)\n return boxes1, boxes2\n\n\ndef create_loss(loc_loss_ftor,\n cls_loss_ftor,\n box_preds,\n cls_preds,\n cls_targets,\n cls_weights,\n reg_targets,\n reg_weights,\n num_class,\n encode_background_as_zeros=True,\n encode_rad_error_by_sin=True,\n sin_error_factor=1.0,\n box_code_size=7,\n num_direction_bins=2):\n batch_size = int(box_preds.shape[0])\n box_preds = box_preds.view(batch_size, -1, box_code_size)\n if encode_background_as_zeros:\n cls_preds = cls_preds.view(batch_size, -1, num_class)\n else:\n cls_preds = cls_preds.view(batch_size, -1, num_class + 1)\n cls_targets = cls_targets.squeeze(-1)\n one_hot_targets = torchplus.nn.one_hot(\n cls_targets, depth=num_class + 1, dtype=box_preds.dtype)\n if encode_background_as_zeros:\n one_hot_targets = one_hot_targets[..., 1:]\n if encode_rad_error_by_sin:\n # sin(a - b) = sinacosb-cosasinb\n # reg_tg_rot = box_torch_ops.limit_period(\n # reg_targets[..., 6:7], 0.5, 2 * np.pi / num_direction_bins)\n box_preds, reg_targets = add_sin_difference(box_preds, reg_targets,\n box_preds[..., 6:7], reg_targets[..., 6:7], sin_error_factor)\n\n loc_losses = loc_loss_ftor(\n box_preds, reg_targets, weights=reg_weights) # [N, M]\n cls_losses = cls_loss_ftor(\n cls_preds, one_hot_targets, weights=cls_weights) # [N, M]\n return loc_losses, cls_losses\n\n\ndef prepare_loss_weights(labels,\n pos_cls_weight=1.0,\n neg_cls_weight=1.0,\n loss_norm_type=LossNormType.NormByNumPositives,\n dtype=torch.float32):\n \"\"\"get cls_weights and reg_weights from labels.\n \"\"\"\n cared = labels >= 0\n # cared: [N, num_anchors]\n positives = labels > 0\n negatives = labels == 0\n negative_cls_weights = negatives.type(dtype) * neg_cls_weight\n cls_weights = negative_cls_weights + pos_cls_weight * positives.type(dtype)\n reg_weights = positives.type(dtype)\n if loss_norm_type == LossNormType.NormByNumExamples:\n num_examples = cared.type(dtype).sum(1, keepdim=True)\n num_examples = torch.clamp(num_examples, min=1.0)\n cls_weights /= num_examples\n bbox_normalizer = positives.sum(1, keepdim=True).type(dtype)\n reg_weights /= torch.clamp(bbox_normalizer, min=1.0)\n elif loss_norm_type == LossNormType.NormByNumPositives: # for focal loss\n pos_normalizer = positives.sum(1, keepdim=True).type(dtype)\n reg_weights /= torch.clamp(pos_normalizer, min=1.0)\n cls_weights /= torch.clamp(pos_normalizer, min=1.0)\n elif loss_norm_type == LossNormType.NormByNumPosNeg:\n pos_neg = torch.stack([positives, negatives], dim=-1).type(dtype)\n normalizer = pos_neg.sum(1, keepdim=True) # [N, 1, 2]\n cls_normalizer = (pos_neg * normalizer).sum(-1) # [N, M]\n cls_normalizer = torch.clamp(cls_normalizer, min=1.0)\n # cls_normalizer will be pos_or_neg_weight/num_pos_or_neg\n normalizer = torch.clamp(normalizer, min=1.0)\n reg_weights /= normalizer[:, 0:1, 0]\n cls_weights /= cls_normalizer\n elif loss_norm_type == LossNormType.DontNorm: # support ghm loss\n pos_normalizer = positives.sum(1, keepdim=True).type(dtype)\n reg_weights /= torch.clamp(pos_normalizer, min=1.0)\n else:\n raise ValueError(\n f\"unknown loss norm type. available: {list(LossNormType)}\")\n return cls_weights, reg_weights, cared\n\n\ndef assign_weight_to_each_class(labels,\n weight_per_class,\n norm_by_num=True,\n dtype=torch.float32):\n weights = torch.zeros(labels.shape, dtype=dtype, device=labels.device)\n for label, weight in weight_per_class:\n positives = (labels == label).type(dtype)\n weight_class = weight * positives\n if norm_by_num:\n normalizer = positives.sum()\n normalizer = torch.clamp(normalizer, min=1.0)\n weight_class /= normalizer\n weights += weight_class\n return weights\n\n\ndef get_direction_target(anchors,\n reg_targets,\n one_hot=True,\n dir_offset=0,\n num_bins=2):\n batch_size = reg_targets.shape[0]\n anchors = anchors.view(batch_size, -1, anchors.shape[-1])\n rot_gt = reg_targets[..., 6] + anchors[..., 6]\n offset_rot = box_torch_ops.limit_period(rot_gt - dir_offset, 0, 2 * np.pi)\n dir_cls_targets = torch.floor(offset_rot / (2 * np.pi / num_bins)).long()\n dir_cls_targets = torch.clamp(dir_cls_targets, min=0, max=num_bins - 1)\n if one_hot:\n dir_cls_targets = torchplus.nn.one_hot(\n dir_cls_targets, num_bins, dtype=anchors.dtype)\n return dir_cls_targets\n" ]
[ [ "torch.cuda.synchronize", "torch.sigmoid", "torch.LongTensor", "torch.floor", "torch.max", "torch.sin", "torch.zeros", "torch.cat", "torch.nn.functional.softmax", "torch.full", "torch.tensor", "torch.no_grad", "torch.stack", "torch.clamp", "torch.cos" ] ]
admariner/NeMo
[ "e542d7f9063a40afa4119a3b94de4c2c636a37bb", "e542d7f9063a40afa4119a3b94de4c2c636a37bb" ]
[ "nemo/collections/asr/parts/utils/vad_utils.py", "tests/collections/asr/test_asr_ctc_encoder_model_bpe.py" ]
[ "# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport glob\nimport json\nimport math\nimport multiprocessing\nimport os\nimport shutil\nfrom itertools import repeat\nfrom typing import Dict, Tuple\n\nimport IPython.display as ipd\nimport librosa\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom pyannote.core import Annotation, Segment\nfrom pyannote.metrics import detection\nfrom sklearn.model_selection import ParameterGrid\n\nfrom nemo.collections.asr.models import EncDecClassificationModel\nfrom nemo.utils import logging\n\ntry:\n from torch.cuda.amp import autocast\nexcept ImportError:\n from contextlib import contextmanager\n\n @contextmanager\n def autocast(enabled=None):\n yield\n\n\n\"\"\"\nThis file contains all the utility functions required for voice activity detection. \n\"\"\"\n\n\ndef prepare_manifest(config: dict) -> str:\n \"\"\"\n Perform VAD on long audio snippet might cause CUDA out of memory issue. \n Automatically split manifest entry by split_duration to avoid the potential memory issue.\n \"\"\"\n if 'prepared_manifest_vad_input' in config and config['prepared_manifest_vad_input']:\n manifest_vad_input = config['prepared_manifest_vad_input']\n else:\n manifest_vad_input = \"manifest_vad_input.json\"\n\n # input_list is a list of variable ['audio_filepath': i, \"offset\": xxx, \"duration\": xxx])\n if type(config['input']) == str:\n input_list = []\n with open(config['input'], 'r', encoding='utf-8') as manifest:\n for line in manifest.readlines():\n input_list.append(json.loads(line.strip()))\n elif type(config['input']) == list:\n input_list = config['input']\n else:\n raise ValueError(\n \"The input for manifest preparation would either be a string of the filepath to manifest or a list of {'audio_filepath': i, 'offset': 0, 'duration': null} \"\n )\n\n args_func = {\n 'label': 'infer',\n 'split_duration': config['split_duration'],\n 'window_length_in_sec': config['window_length_in_sec'],\n }\n\n if config.get('num_workers') is not None and config['num_workers'] > 1:\n p = multiprocessing.Pool(processes=config['num_workers'])\n results = p.starmap(write_vad_infer_manifest, zip(input_list, repeat(args_func)))\n p.close()\n else:\n results = [write_vad_infer_manifest(input_el, args_func) for input_el in input_list]\n\n if os.path.exists(manifest_vad_input):\n logging.info(\"The prepared manifest file exists. Overwriting!\")\n os.remove(manifest_vad_input)\n\n with open(manifest_vad_input, 'a', encoding='utf-8') as fout:\n for res in results:\n for r in res:\n json.dump(r, fout)\n fout.write('\\n')\n fout.flush()\n return manifest_vad_input\n\n\ndef write_vad_infer_manifest(file: dict, args_func: dict) -> list:\n \"\"\"\n Used by prepare_manifest.\n Given a list of files, split them with maximum split_duration and write them to the manifest.\n Args:\n files (dict) : file to be processed\n args_func:\n label (str): label for audio snippet.y\n split_duration (float): max duration of each audio clip (each line in json)\n window_length_in_sec (float) : length of window for generating the frame. Used for taking care of joint. \n Returns:\n res (list) : list of generated metadata line of json for file\n \"\"\"\n res = []\n label = args_func['label']\n split_duration = args_func['split_duration']\n window_length_in_sec = args_func['window_length_in_sec']\n filepath = file['audio_filepath']\n in_duration = file.get('duration', None)\n in_offset = file.get('offset', 0)\n\n try:\n sr = 16000\n x, _sr = librosa.load(filepath, sr=sr, offset=in_offset, duration=in_duration)\n duration = librosa.get_duration(y=x, sr=sr)\n left = duration\n current_offset = in_offset\n\n status = 'single'\n while left > 0:\n if left <= split_duration:\n if status == 'single':\n write_duration = left\n current_offset = 0\n else:\n status = 'end'\n write_duration = left + window_length_in_sec\n current_offset -= window_length_in_sec\n offset_inc = left\n left = 0\n else:\n if status == 'start' or status == 'next':\n status = 'next'\n else:\n status = 'start'\n\n if status == 'start':\n write_duration = split_duration\n offset_inc = split_duration\n else:\n write_duration = split_duration + window_length_in_sec\n current_offset -= window_length_in_sec\n offset_inc = split_duration + window_length_in_sec\n\n left -= split_duration\n\n metadata = {\n 'audio_filepath': filepath,\n 'duration': write_duration,\n 'label': label,\n 'text': '_',\n 'offset': current_offset,\n }\n res.append(metadata)\n\n current_offset += offset_inc\n\n except Exception as e:\n err_file = \"error.log\"\n with open(err_file, 'w', encoding='utf-8') as fout:\n fout.write(filepath + \":\" + str(e))\n return res\n\n\ndef get_vad_stream_status(data: list) -> list:\n \"\"\"\n Generate a list of status for each snippet in manifest. A snippet should be in single, start, next or end status. \n Used for concatenating to full audio file.\n Args:\n data (list): list of filepath of audio snippet\n Returns:\n status (list): list of status of each snippet.\n \"\"\"\n if len(data) == 1:\n return ['single']\n\n status = [None] * len(data)\n for i in range(len(data)):\n if i == 0:\n status[i] = 'start' if data[i] == data[i + 1] else 'single'\n elif i == len(data) - 1:\n status[i] = 'end' if data[i] == data[i - 1] else 'single'\n else:\n if data[i] != data[i - 1] and data[i] == data[i + 1]:\n status[i] = 'start'\n elif data[i] == data[i - 1] and data[i] == data[i + 1]:\n status[i] = 'next'\n elif data[i] == data[i - 1] and data[i] != data[i + 1]:\n status[i] = 'end'\n else:\n status[i] = 'single'\n return status\n\n\ndef load_tensor_from_file(filepath: str) -> Tuple[torch.Tensor, str]:\n \"\"\"\n Load torch.Tensor and the name from file\n \"\"\"\n frame = []\n with open(filepath, \"r\", encoding='utf-8') as f:\n for line in f.readlines():\n frame.append(float(line))\n\n name = filepath.split(\"/\")[-1].rsplit(\".\", 1)[0]\n return torch.tensor(frame), name\n\n\ndef generate_overlap_vad_seq(\n frame_pred_dir: str,\n smoothing_method: str,\n overlap: float,\n window_length_in_sec: float,\n shift_length_in_sec: float,\n num_workers: int,\n out_dir: str = None,\n) -> str:\n \"\"\"\n Generate predictions with overlapping input windows/segments. Then a smoothing filter is applied to decide the label for a frame spanned by multiple windows. \n Two common smoothing filters are supported: majority vote (median) and average (mean).\n This function uses multiprocessing to speed up. \n Args:\n frame_pred_dir (str): Directory of frame prediction file to be processed.\n smoothing_method (str): median or mean smoothing filter.\n overlap (float): amounts of overlap of adjacent windows.\n window_length_in_sec (float): length of window for generating the frame.\n shift_length_in_sec (float): amount of shift of window for generating the frame.\n out_dir (str): directory of generated predictions.\n num_workers(float): number of process for multiprocessing\n Returns:\n overlap_out_dir(str): directory of the generated predictions.\n \"\"\"\n\n frame_filepathlist = glob.glob(frame_pred_dir + \"/*.frame\")\n if out_dir:\n overlap_out_dir = out_dir\n else:\n overlap_out_dir = frame_pred_dir + \"/overlap_smoothing_output\" + \"_\" + smoothing_method + \"_\" + str(overlap)\n\n if not os.path.exists(overlap_out_dir):\n os.mkdir(overlap_out_dir)\n\n per_args = {\n \"overlap\": overlap,\n \"window_length_in_sec\": window_length_in_sec,\n \"shift_length_in_sec\": shift_length_in_sec,\n \"out_dir\": overlap_out_dir,\n \"smoothing_method\": smoothing_method,\n }\n if num_workers is not None and num_workers > 1:\n p = multiprocessing.Pool(processes=num_workers)\n p.starmap(generate_overlap_vad_seq_per_file, zip(frame_filepathlist, repeat(per_args)))\n p.close()\n p.join()\n else:\n for frame_filepath in frame_filepathlist:\n generate_overlap_vad_seq_per_file(frame_filepath, per_args)\n\n return overlap_out_dir\n\n\[email protected]\ndef generate_overlap_vad_seq_per_tensor(\n frame: torch.Tensor, per_args: Dict[str, float], smoothing_method: str\n) -> torch.Tensor:\n \"\"\"\n Use generated frame prediction (generated by shifting window of shift_length_in_sec (10ms)) to generate prediction with overlapping input window/segments\n See description in generate_overlap_vad_seq.\n Use this for single instance pipeline. \n \"\"\"\n # This function will be refactor for vectorization but this is okay for now\n\n overlap = per_args['overlap']\n window_length_in_sec = per_args['window_length_in_sec']\n shift_length_in_sec = per_args['shift_length_in_sec']\n frame_len = per_args.get('frame_len', 0.01)\n\n shift = int(shift_length_in_sec / frame_len) # number of units of shift\n seg = int((window_length_in_sec / frame_len + 1)) # number of units of each window/segment\n\n jump_on_target = int(seg * (1 - overlap)) # jump on target generated sequence\n jump_on_frame = int(jump_on_target / shift) # jump on input frame sequence\n\n if jump_on_frame < 1:\n raise ValueError(\n f\"Note we jump over frame sequence to generate overlapping input segments. \\n \\\n Your input makes jump_on_frame={jump_on_frame} < 1 which is invalid because it cannot jump and will stuck.\\n \\\n Please try different window_length_in_sec, shift_length_in_sec and overlap choices. \\n \\\n jump_on_target = int(seg * (1 - overlap)) \\n \\\n jump_on_frame = int(jump_on_frame/shift) \"\n )\n\n target_len = int(len(frame) * shift)\n\n if smoothing_method == 'mean':\n preds = torch.zeros(target_len)\n pred_count = torch.zeros(target_len)\n\n for i, og_pred in enumerate(frame):\n if i % jump_on_frame != 0:\n continue\n start = i * shift\n end = start + seg\n preds[start:end] = preds[start:end] + og_pred\n pred_count[start:end] = pred_count[start:end] + 1\n\n preds = preds / pred_count\n last_non_zero_pred = preds[pred_count != 0][-1]\n preds[pred_count == 0] = last_non_zero_pred\n\n elif smoothing_method == 'median':\n preds = [torch.empty(0) for _ in range(target_len)]\n for i, og_pred in enumerate(frame):\n if i % jump_on_frame != 0:\n continue\n\n start = i * shift\n end = start + seg\n for j in range(start, end):\n if j <= target_len - 1:\n preds[j] = torch.cat((preds[j], og_pred.unsqueeze(0)), 0)\n\n preds = torch.stack([torch.nanquantile(l, q=0.5) for l in preds])\n nan_idx = torch.isnan(preds)\n last_non_nan_pred = preds[~nan_idx][-1]\n preds[nan_idx] = last_non_nan_pred\n\n else:\n raise ValueError(\"smoothing_method should be either mean or median\")\n\n return preds\n\n\ndef generate_overlap_vad_seq_per_file(frame_filepath: str, per_args: dict) -> str:\n \"\"\"\n A wrapper for generate_overlap_vad_seq_per_tensor.\n \"\"\"\n\n out_dir = per_args['out_dir']\n smoothing_method = per_args['smoothing_method']\n frame, name = load_tensor_from_file(frame_filepath)\n\n per_args_float: Dict[str, float] = {}\n for i in per_args:\n if type(per_args[i]) == float or type(per_args[i]) == int:\n per_args_float[i] = per_args[i]\n\n preds = generate_overlap_vad_seq_per_tensor(frame, per_args_float, smoothing_method)\n\n overlap_filepath = os.path.join(out_dir, name + \".\" + smoothing_method)\n with open(overlap_filepath, \"w\", encoding='utf-8') as f:\n for pred in preds:\n f.write(f\"{pred:.4f}\\n\")\n\n return overlap_filepath\n\n\[email protected]\ndef merge_overlap_segment(segments: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Merged the given overlapped segments.\n For example:\n torch.Tensor([[0, 1.5], [1, 3.5]]) -> torch.Tensor([0, 3.5])\n \"\"\"\n if (\n segments.shape == torch.Size([0])\n or segments.shape == torch.Size([0, 2])\n or segments.shape == torch.Size([1, 2])\n ):\n return segments\n\n segments = segments[segments[:, 0].sort()[1]]\n merge_boundary = segments[:-1, 1] >= segments[1:, 0]\n head_padded = torch.nn.functional.pad(merge_boundary, [1, 0], mode='constant', value=0.0)\n head = segments[~head_padded, 0]\n tail_padded = torch.nn.functional.pad(merge_boundary, [0, 1], mode='constant', value=0.0)\n tail = segments[~tail_padded, 1]\n merged = torch.stack((head, tail), dim=1)\n return merged\n\n\[email protected]\ndef filter_short_segments(segments: torch.Tensor, threshold: float) -> torch.Tensor:\n \"\"\"\n Remove segments which duration is smaller than a threshold.\n For example,\n torch.Tensor([[0, 1.5], [1, 3.5], [4, 7]]) and threshold = 2.0\n -> \n torch.Tensor([[1, 3.5], [4, 7]])\n \"\"\"\n return segments[segments[:, 1] - segments[:, 0] >= threshold]\n\n\ndef percentile(data: torch.Tensor, perc: int) -> float:\n \"\"\"\n Calculate percentile given data\n \"\"\"\n size = len(data)\n return float(sorted(data)[int(math.ceil((size * perc) / 100)) - 1])\n\n\ndef cal_vad_onset_offset(\n scale: str, onset: float, offset: float, sequence: torch.Tensor = None\n) -> Tuple[float, float]:\n \"\"\"\n Calculate onset and offset threshold given different scale.\n \"\"\"\n if scale == \"absolute\":\n mini = 0\n maxi = 1\n elif scale == \"relative\":\n mini = min(sequence)\n maxi = max(sequence)\n elif scale == \"percentile\":\n mini = percentile(sequence, 1)\n maxi = percentile(sequence, 99)\n\n onset = mini + onset * (maxi - mini)\n offset = mini + offset * (maxi - mini)\n return float(onset), float(offset)\n\n\[email protected]\ndef binarization(sequence: torch.Tensor, per_args: Dict[str, float]) -> torch.Tensor:\n \"\"\"\n Binarize predictions to speech and non-speech\n\n Reference\n Paper: Gregory Gelly and Jean-Luc Gauvain. \"Minimum Word Error Training of RNN-based Voice Activity Detection\", InterSpeech 2015. \n Implementation: https://github.com/pyannote/pyannote-audio/blob/master/pyannote/audio/utils/signal.py \n\n Args:\n sequence (torch.Tensor) : A tensor of frame level predictions.\n per_args:\n onset (float): onset threshold for detecting the beginning and end of a speech \n offset (float): offset threshold for detecting the end of a speech. \n pad_onset (float): adding durations before each speech segment\n pad_offset (float): adding durations after each speech segment;\n shift_length_in_sec (float): amount of shift of window for generating the frame.\n \n Returns:\n speech_segments(torch.Tensor): A tensor of speech segment in torch.Tensor([[start1, end1], [start2, end2]]) format. \n \"\"\"\n shift_length_in_sec = per_args.get('shift_length_in_sec', 0.01)\n\n onset = per_args.get('onset', 0.5)\n offset = per_args.get('offset', 0.5)\n pad_onset = per_args.get('pad_onset', 0.0)\n pad_offset = per_args.get('pad_offset', 0.0)\n\n speech = False\n start = 0.0\n i = 0\n\n speech_segments = torch.empty(0)\n\n for i in range(1, len(sequence)):\n # Current frame is speech\n if speech:\n # Switch from speech to non-speech\n if sequence[i] < offset:\n if i * shift_length_in_sec + pad_offset > max(0, start - pad_onset):\n new_seg = torch.tensor(\n [max(0, start - pad_onset), i * shift_length_in_sec + pad_offset]\n ).unsqueeze(0)\n speech_segments = torch.cat((speech_segments, new_seg), 0)\n\n start = i * shift_length_in_sec\n speech = False\n\n # Current frame is non-speech\n else:\n # Switch from non-speech to speech\n if sequence[i] > onset:\n start = i * shift_length_in_sec\n speech = True\n\n # if it's speech at the end, add final segment\n if speech:\n new_seg = torch.tensor([max(0, start - pad_onset), i * shift_length_in_sec + pad_offset]).unsqueeze(0)\n speech_segments = torch.cat((speech_segments, new_seg), 0)\n\n # Merge the overlapped speech segments due to padding\n speech_segments = merge_overlap_segment(speech_segments) # not sorted\n return speech_segments\n\n\[email protected]\ndef remove_segments(original_segments: torch.Tensor, to_be_removed_segments: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Remove speech segments list in to_be_removed_segments from original_segments.\n For example, \n remove torch.Tensor([[start2, end2],[start4, end4]]) from torch.Tensor([[start1, end1],[start2, end2],[start3, end3], [start4, end4]]),\n -> \n torch.Tensor([[start1, end1],[start3, end3]])\n \"\"\"\n for y in to_be_removed_segments:\n original_segments = original_segments[original_segments.eq(y).all(dim=1).logical_not()]\n return original_segments\n\n\[email protected]\ndef get_gap_segments(segments: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Get the gap segments. \n For example,\n torch.Tensor([[start1, end1], [start2, end2], [start3, end3]]) -> torch.Tensor([[end1, start2], [end2, start3]])\n \"\"\"\n segments = segments[segments[:, 0].sort()[1]]\n return torch.column_stack((segments[:-1, 1], segments[1:, 0]))\n\n\[email protected]\ndef filtering(speech_segments: torch.Tensor, per_args: Dict[str, float]) -> torch.Tensor:\n\n \"\"\"\n Filter out short non_speech and speech segments.\n\n Reference\n Paper: Gregory Gelly and Jean-Luc Gauvain. \"Minimum Word Error Training of RNN-based Voice Activity Detection\", InterSpeech 2015. \n Implementation: https://github.com/pyannote/pyannote-audio/blob/master/pyannote/audio/utils/signal.py \n Args:\n speech_segments (torch.Tensor): A tensor of speech segment in torch.Tensor([[start1, end1], [start2, end2]]) format. \n per_args:\n min_duration_on (float): threshold for small non_speech deletion\n min_duration_off (float): threshold for short speech segment deletion\n filter_speech_first (float): Whether to perform short speech segment deletion first. Use 1.0 to represent True. \n\n Returns:\n speech_segments(torch.Tensor): A tensor of filtered speech segment in torch.Tensor([[start1, end1], [start2, end2]]) format. \n \"\"\"\n if speech_segments.shape == torch.Size([0]):\n return speech_segments\n\n min_duration_on = per_args.get('min_duration_on', 0.0)\n min_duration_off = per_args.get('min_duration_off', 0.0)\n filter_speech_first = per_args.get('filter_speech_first', 1.0)\n\n if filter_speech_first == 1.0:\n # Filter out the shorter speech segments\n if min_duration_on > 0.0:\n speech_segments = filter_short_segments(speech_segments, min_duration_on)\n # Filter out the shorter non-speech segments and return to be as speech segments\n if min_duration_off > 0.0:\n # Find non-speech segments\n non_speech_segments = get_gap_segments(speech_segments)\n # Find shorter non-speech segments\n short_non_speech_segments = remove_segments(\n non_speech_segments, filter_short_segments(non_speech_segments, min_duration_off)\n )\n # Return shorter non-speech segments to be as speech segments\n speech_segments = torch.cat((speech_segments, short_non_speech_segments), 0)\n\n # Merge the overlapped speech segments\n speech_segments = merge_overlap_segment(speech_segments)\n else:\n if min_duration_off > 0.0:\n # Find non-speech segments\n non_speech_segments = get_gap_segments(speech_segments)\n # Find shorter non-speech segments\n short_non_speech_segments = remove_segments(\n non_speech_segments, filter_short_segments(non_speech_segments, min_duration_off)\n )\n\n speech_segments = torch.cat((speech_segments, short_non_speech_segments), 0)\n\n # Merge the overlapped speech segments\n speech_segments = merge_overlap_segment(speech_segments)\n if min_duration_on > 0.0:\n speech_segments = filter_short_segments(speech_segments, min_duration_on)\n\n return speech_segments\n\n\ndef prepare_gen_segment_table(sequence: torch.Tensor, per_args: dict) -> Tuple[str, dict]:\n \"\"\"\n Preparing for generating segment table. \n \"\"\"\n out_dir = per_args.get('out_dir', None)\n\n # calculate onset offset based on scale selection\n per_args['onset'], per_args['offset'] = cal_vad_onset_offset(\n per_args.get('scale', 'absolute'), per_args['onset'], per_args['offset'], sequence\n )\n\n # cast 'filter_speech_first' for torch.jit.script\n if 'filter_speech_first' in per_args:\n if per_args['filter_speech_first']:\n per_args['filter_speech_first'] = 1.0\n else:\n per_args['filter_speech_first'] = 0.0\n\n per_args_float: Dict[str, float] = {}\n for i in per_args:\n if type(per_args[i]) == float or type(per_args[i]) == int:\n per_args_float[i] = per_args[i]\n\n return out_dir, per_args_float\n\n\[email protected]\ndef generate_vad_segment_table_per_tensor(sequence: torch.Tensor, per_args: Dict[str, float]) -> torch.Tensor:\n \"\"\"\n See description in generate_overlap_vad_seq.\n Use this for single instance pipeline. \n \"\"\"\n\n shift_length_in_sec = per_args['shift_length_in_sec']\n speech_segments = binarization(sequence, per_args)\n speech_segments = filtering(speech_segments, per_args)\n\n if speech_segments.shape == torch.Size([0]):\n return speech_segments\n\n speech_segments, _ = torch.sort(speech_segments, 0)\n\n dur = speech_segments[:, 1:2] - speech_segments[:, 0:1] + shift_length_in_sec\n speech_segments = torch.column_stack((speech_segments, dur))\n\n return speech_segments\n\n\ndef generate_vad_segment_table_per_file(pred_filepath: str, per_args: dict) -> str:\n \"\"\"\n A wrapper for generate_vad_segment_table_per_tensor\n \"\"\"\n sequence, name = load_tensor_from_file(pred_filepath)\n out_dir, per_args_float = prepare_gen_segment_table(sequence, per_args)\n\n preds = generate_vad_segment_table_per_tensor(sequence, per_args_float)\n save_name = name + \".txt\"\n save_path = os.path.join(out_dir, save_name)\n\n if preds.shape == torch.Size([0]):\n with open(save_path, \"w\", encoding='utf-8') as fp:\n fp.write(f\"0 0 speech\\n\")\n\n else:\n with open(save_path, \"w\", encoding='utf-8') as fp:\n for i in preds:\n fp.write(f\"{i[0]:.4f} {i[2]:.4f} speech\\n\")\n\n return save_path\n\n\ndef generate_vad_segment_table(\n vad_pred_dir: str, postprocessing_params: dict, shift_length_in_sec: float, num_workers: int, out_dir: str = None,\n) -> str:\n \"\"\"\n Convert frame level prediction to speech segment in start and end times format.\n And save to csv file in rttm-like format\n 0, 10, speech\n 17,18, speech\n Args:\n vad_pred_dir (str): directory of prediction files to be processed.\n postprocessing_params (dict): dictionary of thresholds for prediction score. See details in binarization and filtering.\n shift_length_in_sec (float): amount of shift of window for generating the frame.\n out_dir (str): output dir of generated table/csv file.\n num_workers(float): number of process for multiprocessing\n Returns:\n table_out_dir(str): directory of the generated table.\n \"\"\"\n\n suffixes = (\"frame\", \"mean\", \"median\")\n vad_pred_filepath_list = [os.path.join(vad_pred_dir, x) for x in os.listdir(vad_pred_dir) if x.endswith(suffixes)]\n\n if out_dir:\n table_out_dir = out_dir\n else:\n table_out_dir_name = \"table_output_tmp_\"\n for key in postprocessing_params:\n table_out_dir_name = table_out_dir_name + str(key) + str(postprocessing_params[key]) + \"_\"\n\n table_out_dir = os.path.join(vad_pred_dir, table_out_dir_name)\n\n if not os.path.exists(table_out_dir):\n os.mkdir(table_out_dir)\n\n per_args = {\n \"shift_length_in_sec\": shift_length_in_sec,\n \"out_dir\": table_out_dir,\n }\n per_args = {**per_args, **postprocessing_params}\n\n if num_workers is not None and num_workers > 1:\n p = multiprocessing.Pool(processes=num_workers)\n p.starmap(generate_vad_segment_table_per_file, zip(vad_pred_filepath_list, repeat(per_args)))\n p.close()\n p.join()\n else:\n for vad_pred_filepath in vad_pred_filepath_list:\n generate_vad_segment_table_per_file(vad_pred_filepath, per_args)\n\n return table_out_dir\n\n\ndef vad_construct_pyannote_object_per_file(\n vad_table_filepath: str, groundtruth_RTTM_file: str\n) -> Tuple[Annotation, Annotation]:\n \"\"\"\n Construct a Pyannote object for evaluation.\n Args:\n vad_table_filepath(str) : path of vad rttm-like table.\n groundtruth_RTTM_file(str): path of groundtruth rttm file.\n Returns:\n reference(pyannote.Annotation): groundtruth\n hypothesis(pyannote.Annotation): prediction\n \"\"\"\n\n pred = pd.read_csv(vad_table_filepath, sep=\" \", header=None)\n label = pd.read_csv(groundtruth_RTTM_file, sep=\" \", delimiter=None, header=None)\n label = label.rename(columns={3: \"start\", 4: \"dur\", 7: \"speaker\"})\n\n # construct reference\n reference = Annotation()\n for index, row in label.iterrows():\n reference[Segment(row['start'], row['start'] + row['dur'])] = row['speaker']\n\n # construct hypothsis\n hypothesis = Annotation()\n for index, row in pred.iterrows():\n hypothesis[Segment(float(row[0]), float(row[0]) + float(row[1]))] = 'Speech'\n return reference, hypothesis\n\n\ndef get_parameter_grid(params: dict) -> list:\n \"\"\"\n Get the parameter grid given a dictionary of parameters.\n \"\"\"\n has_filter_speech_first = False\n if 'filter_speech_first' in params:\n filter_speech_first = params['filter_speech_first']\n has_filter_speech_first = True\n params.pop(\"filter_speech_first\")\n\n params_grid = list(ParameterGrid(params))\n\n if has_filter_speech_first:\n for i in params_grid:\n i['filter_speech_first'] = filter_speech_first\n return params_grid\n\n\ndef vad_tune_threshold_on_dev(\n params: dict,\n vad_pred: str,\n groundtruth_RTTM: str,\n result_file: str = \"res\",\n vad_pred_method: str = \"frame\",\n focus_metric: str = \"DetER\",\n shift_length_in_sec: float = 0.01,\n num_workers: int = 20,\n) -> Tuple[dict, dict]:\n \"\"\"\n Tune thresholds on dev set. Return best thresholds which gives the lowest detection error rate (DetER) in thresholds.\n Args:\n params (dict): dictionary of parameters to be tuned on.\n vad_pred_method (str): suffix of prediction file. Use to locate file. Should be either in \"frame\", \"mean\" or \"median\".\n groundtruth_RTTM_dir (str): directory of ground-truth rttm files or a file contains the paths of them.\n focus_metric (str): metrics we care most when tuning threshold. Should be either in \"DetER\", \"FA\", \"MISS\"\n Returns:\n best_threshold (float): threshold that gives lowest DetER.\n \"\"\"\n min_score = 100\n all_perf = {}\n try:\n check_if_param_valid(params)\n except:\n raise ValueError(\"Please check if the parameters are valid\")\n\n paired_filenames, groundtruth_RTTM_dict, vad_pred_dict = pred_rttm_map(vad_pred, groundtruth_RTTM, vad_pred_method)\n metric = detection.DetectionErrorRate()\n params_grid = get_parameter_grid(params)\n\n for param in params_grid:\n for i in param:\n if type(param[i]) == np.float64 or type(param[i]) == np.int64:\n param[i] = float(param[i])\n try:\n # Generate speech segments by performing binarization on the VAD prediction according to param.\n # Filter speech segments according to param and write the result to rttm-like table.\n vad_table_dir = generate_vad_segment_table(\n vad_pred, param, shift_length_in_sec=shift_length_in_sec, num_workers=num_workers\n )\n # add reference and hypothesis to metrics\n for filename in paired_filenames:\n groundtruth_RTTM_file = groundtruth_RTTM_dict[filename]\n vad_table_filepath = os.path.join(vad_table_dir, filename + \".txt\")\n reference, hypothesis = vad_construct_pyannote_object_per_file(\n vad_table_filepath, groundtruth_RTTM_file\n )\n metric(reference, hypothesis) # accumulation\n\n # delete tmp table files\n shutil.rmtree(vad_table_dir, ignore_errors=True)\n\n report = metric.report(display=False)\n DetER = report.iloc[[-1]][('detection error rate', '%')].item()\n FA = report.iloc[[-1]][('false alarm', '%')].item()\n MISS = report.iloc[[-1]][('miss', '%')].item()\n\n assert (\n focus_metric == \"DetER\" or focus_metric == \"FA\" or focus_metric == \"MISS\"\n ), \"Metric we care most should be only in 'DetER', 'FA' or 'MISS'!\"\n all_perf[str(param)] = {'DetER (%)': DetER, 'FA (%)': FA, 'MISS (%)': MISS}\n logging.info(f\"parameter {param}, {all_perf[str(param)] }\")\n\n score = all_perf[str(param)][focus_metric + ' (%)']\n\n del report\n metric.reset() # reset internal accumulator\n\n # save results for analysis\n with open(result_file + \".txt\", \"a\", encoding='utf-8') as fp:\n fp.write(f\"{param}, {all_perf[str(param)] }\\n\")\n\n if score < min_score:\n best_threshold = param\n optimal_scores = all_perf[str(param)]\n min_score = score\n print(\"Current best\", best_threshold, optimal_scores)\n\n except RuntimeError as e:\n print(f\"Pass {param}, with error {e}\")\n except pd.errors.EmptyDataError as e1:\n print(f\"Pass {param}, with error {e1}\")\n\n return best_threshold, optimal_scores\n\n\ndef check_if_param_valid(params: dict) -> bool:\n \"\"\"\n Check if the parameters are valid.\n \"\"\"\n for i in params:\n if i == \"filter_speech_first\":\n if not type(params[\"filter_speech_first\"]) == bool:\n raise ValueError(\"Invalid inputs! filter_speech_first should be either True or False!\")\n elif i == \"pad_onset\":\n continue\n elif i == \"pad_offset\":\n continue\n else:\n for j in params[i]:\n if not j >= 0:\n raise ValueError(\n \"Invalid inputs! All float parameters except pad_onset and pad_offset should be larger than 0!\"\n )\n\n if not (all(i <= 1 for i in params['onset']) and all(i <= 1 for i in params['offset'])):\n raise ValueError(\"Invalid inputs! The onset and offset thresholds should be in range [0, 1]!\")\n\n return True\n\n\ndef pred_rttm_map(vad_pred: str, groundtruth_RTTM: str, vad_pred_method: str = \"frame\") -> Tuple[set, dict, dict]:\n \"\"\"\n Find paired files in vad_pred and groundtruth_RTTM\n \"\"\"\n groundtruth_RTTM_dict = {}\n if os.path.isfile(groundtruth_RTTM):\n with open(groundtruth_RTTM, \"r\", encoding='utf-8') as fp:\n groundtruth_RTTM_files = fp.read().splitlines()\n elif os.path.isdir(groundtruth_RTTM):\n groundtruth_RTTM_files = glob.glob(os.path.join(groundtruth_RTTM, \"*.rttm\"))\n else:\n raise ValueError(\n \"groundtruth_RTTM should either be a directory contains rttm files or a file contains paths to them!\"\n )\n for f in groundtruth_RTTM_files:\n filename = os.path.basename(f).rsplit(\".\", 1)[0]\n groundtruth_RTTM_dict[filename] = f\n\n vad_pred_dict = {}\n if os.path.isfile(vad_pred):\n with open(vad_pred, \"r\", encoding='utf-8') as fp:\n vad_pred_files = fp.read().splitlines()\n elif os.path.isdir(vad_pred):\n vad_pred_files = glob.glob(os.path.join(vad_pred, \"*.\" + vad_pred_method))\n else:\n raise ValueError(\n \"vad_pred should either be a directory containing vad pred files or a file contains paths to them!\"\n )\n for f in vad_pred_files:\n filename = os.path.basename(f).rsplit(\".\", 1)[0]\n vad_pred_dict[filename] = f\n\n paired_filenames = groundtruth_RTTM_dict.keys() & vad_pred_dict.keys()\n return paired_filenames, groundtruth_RTTM_dict, vad_pred_dict\n\n\ndef plot(\n path2audio_file: str,\n path2_vad_pred: str,\n path2ground_truth_label: str = None,\n offset: float = 0,\n duration: float = None,\n threshold: float = None,\n per_args: dict = None,\n) -> ipd.Audio:\n \"\"\"\n Plot VAD outputs for demonstration in tutorial\n Args:\n path2audio_file (str): path to audio file.\n path2_vad_pred (str): path to vad prediction file,\n path2ground_truth_label(str): path to groundtruth label file.\n threshold (float): threshold for prediction score (from 0 to 1).\n per_args(dict): a dict that stores the thresholds for postprocessing.\n \"\"\"\n plt.figure(figsize=[20, 2])\n FRAME_LEN = 0.01\n\n audio, sample_rate = librosa.load(path=path2audio_file, sr=16000, mono=True, offset=offset, duration=duration)\n dur = librosa.get_duration(y=audio, sr=sample_rate)\n\n time = np.arange(offset, offset + dur, FRAME_LEN)\n frame, _ = load_tensor_from_file(path2_vad_pred)\n frame_snippet = frame[int(offset / FRAME_LEN) : int((offset + dur) / FRAME_LEN)]\n\n len_pred = len(frame_snippet)\n ax1 = plt.subplot()\n ax1.plot(np.arange(audio.size) / sample_rate, audio, 'gray')\n ax1.set_xlim([0, int(dur) + 1])\n ax1.tick_params(axis='y', labelcolor='b')\n ax1.set_ylabel('Signal')\n ax1.set_ylim([-1, 1])\n ax2 = ax1.twinx()\n\n if threshold and per_args:\n raise ValueError(\"threshold and per_args cannot be used at same time!\")\n if not threshold and not per_args:\n raise ValueError(\"One and only one of threshold and per_args must have been used!\")\n\n if threshold:\n pred_snippet = np.where(frame_snippet >= threshold, 1, 0)\n if per_args:\n _, per_args_float = prepare_gen_segment_table(\n frame, per_args\n ) # take whole frame here for calculating onset and offset\n speech_segments = generate_vad_segment_table_per_tensor(frame, per_args_float)\n pred = gen_pred_from_speech_segments(speech_segments, frame)\n pred_snippet = pred[int(offset / FRAME_LEN) : int((offset + dur) / FRAME_LEN)]\n\n if path2ground_truth_label:\n label = extract_labels(path2ground_truth_label, time)\n ax2.plot(np.arange(len_pred) * FRAME_LEN, label, 'r', label='label')\n\n ax2.plot(np.arange(len_pred) * FRAME_LEN, pred_snippet, 'b', label='pred')\n ax2.plot(np.arange(len_pred) * FRAME_LEN, frame_snippet, 'g--', label='speech prob')\n ax2.tick_params(axis='y', labelcolor='r')\n ax2.legend(loc='lower right', shadow=True)\n ax2.set_ylabel('Preds and Probas')\n ax2.set_ylim([-0.1, 1.1])\n return ipd.Audio(audio, rate=16000)\n\n\ndef gen_pred_from_speech_segments(\n speech_segments: torch.Tensor, prob: float, shift_length_in_sec: float = 0.01\n) -> np.array:\n \"\"\"\n Generate prediction arrays like 000111000... from speech segments {[0,1][2,4]} \n \"\"\"\n pred = np.zeros(prob.shape)\n speech_segments = [list(i) for i in speech_segments]\n speech_segments.sort(key=lambda x: x[0])\n\n for seg in speech_segments:\n start = int(seg[0] / shift_length_in_sec)\n end = int(seg[1] / shift_length_in_sec)\n pred[start:end] = 1\n return pred\n\n\ndef extract_labels(path2ground_truth_label: str, time: list) -> list:\n \"\"\"\n Extract ground-truth label for given time period.\n path2ground_truth_label (str): path of groundtruth label file \n time (list) : a list of array representing time period.\n \"\"\"\n\n data = pd.read_csv(path2ground_truth_label, sep=\" \", delimiter=None, header=None)\n data = data.rename(columns={3: \"start\", 4: \"dur\", 7: \"speaker\"})\n labels = []\n for pos in time:\n line = data[(data[\"start\"] <= pos) & (data[\"start\"] + data[\"dur\"] > pos)]\n if len(line) >= 1:\n labels.append(1)\n else:\n labels.append(0)\n return labels\n\n\ndef generate_vad_frame_pred(\n vad_model, window_length_in_sec: float, shift_length_in_sec: float, manifest_vad_input: str, out_dir: str\n) -> str:\n \"\"\"\n Generate VAD frame level prediction and write to out_dir\n \"\"\"\n time_unit = int(window_length_in_sec / shift_length_in_sec)\n trunc = int(time_unit / 2)\n trunc_l = time_unit - trunc\n all_len = 0\n\n data = []\n for line in open(manifest_vad_input, 'r', encoding='utf-8'):\n file = json.loads(line)['audio_filepath'].split(\"/\")[-1]\n data.append(file.split(\".wav\")[0])\n logging.info(f\"Inference on {len(data)} audio files/json lines!\")\n\n status = get_vad_stream_status(data)\n for i, test_batch in enumerate(vad_model.test_dataloader()):\n test_batch = [x.to(vad_model.device) for x in test_batch]\n with autocast():\n log_probs = vad_model(input_signal=test_batch[0], input_signal_length=test_batch[1])\n probs = torch.softmax(log_probs, dim=-1)\n pred = probs[:, 1]\n\n if status[i] == 'start':\n to_save = pred[:-trunc]\n elif status[i] == 'next':\n to_save = pred[trunc:-trunc_l]\n elif status[i] == 'end':\n to_save = pred[trunc_l:]\n else:\n to_save = pred\n\n all_len += len(to_save)\n outpath = os.path.join(out_dir, data[i] + \".frame\")\n with open(outpath, \"a\", encoding='utf-8') as fout:\n for f in range(len(to_save)):\n fout.write('{0:0.4f}\\n'.format(to_save[f]))\n\n del test_batch\n if status[i] == 'end' or status[i] == 'single':\n logging.debug(f\"Overall length of prediction of {data[i]} is {all_len}!\")\n all_len = 0\n return out_dir\n\n\ndef init_vad_model(model_path: str):\n \"\"\"\n Initiate VAD model with model path\n \"\"\"\n if model_path.endswith('.nemo'):\n logging.info(f\"Using local VAD model from {model_path}\")\n vad_model = EncDecClassificationModel.restore_from(restore_path=model_path)\n elif model_path.endswith('.ckpt'):\n vad_model = EncDecClassificationModel.load_from_checkpoint(checkpoint_path=model_path)\n else:\n logging.info(f\"Using NGC cloud VAD model {model_path}\")\n vad_model = EncDecClassificationModel.from_pretrained(model_name=model_path)\n return vad_model\n\n\ndef stitch_segmented_asr_output(\n segmented_output_manifest: str,\n speech_segments_tensor_dir: str = \"speech_segments\",\n stitched_output_manifest: str = \"asr_stitched_output_manifest.json\",\n) -> str:\n \"\"\"\n Stitch the prediction of speech segments.\n \"\"\"\n if not os.path.exists(speech_segments_tensor_dir):\n os.mkdir(speech_segments_tensor_dir)\n\n segmented_output = []\n for line in open(segmented_output_manifest, 'r', encoding='utf-8'):\n file = json.loads(line)\n segmented_output.append(file)\n\n with open(stitched_output_manifest, 'w', encoding='utf-8') as fout:\n speech_segments = torch.Tensor()\n all_pred_text = \"\"\n if len(segmented_output) > 1:\n for i in range(1, len(segmented_output)):\n start, end = (\n segmented_output[i - 1]['offset'],\n segmented_output[i - 1]['offset'] + segmented_output[i - 1]['duration'],\n )\n new_seg = torch.tensor([start, end]).unsqueeze(0)\n speech_segments = torch.cat((speech_segments, new_seg), 0)\n pred_text = segmented_output[i - 1]['pred_text']\n all_pred_text += pred_text\n name = segmented_output[i - 1]['audio_filepath'].split(\"/\")[-1].rsplit(\".\", 1)[0]\n\n if segmented_output[i - 1]['audio_filepath'] != segmented_output[i]['audio_filepath']:\n\n speech_segments_tensor_path = os.path.join(speech_segments_tensor_dir, name + '.pt')\n torch.save(speech_segments, speech_segments_tensor_path)\n meta = {\n 'audio_filepath': segmented_output[i - 1]['audio_filepath'],\n 'speech_segments_filepath': speech_segments_tensor_path,\n 'pred_text': all_pred_text,\n }\n\n json.dump(meta, fout)\n fout.write('\\n')\n fout.flush()\n speech_segments = torch.Tensor()\n all_pred_text = \"\"\n else:\n all_pred_text += \" \"\n else:\n i = -1\n\n start, end = segmented_output[i]['offset'], segmented_output[i]['offset'] + segmented_output[i]['duration']\n new_seg = torch.tensor([start, end]).unsqueeze(0)\n speech_segments = torch.cat((speech_segments, new_seg), 0)\n pred_text = segmented_output[i]['pred_text']\n all_pred_text += pred_text\n name = segmented_output[i]['audio_filepath'].split(\"/\")[-1].rsplit(\".\", 1)[0]\n speech_segments_tensor_path = os.path.join(speech_segments_tensor_dir, name + '.pt')\n torch.save(speech_segments, speech_segments_tensor_path)\n\n meta = {\n 'audio_filepath': segmented_output[i]['audio_filepath'],\n 'speech_segments_filepath': speech_segments_tensor_path,\n 'pred_text': all_pred_text,\n }\n json.dump(meta, fout)\n fout.write('\\n')\n fout.flush()\n\n logging.info(\n f\"Finish stitch segmented ASR output to {stitched_output_manifest}, the speech segments info has been stored in directory {speech_segments_tensor_dir}\"\n )\n return stitched_output_manifest\n\n\ndef construct_manifest_eval(\n input_manifest: str, stitched_output_manifest: str, aligned_vad_asr_output_manifest: str = \"vad_asr_out.json\"\n) -> str:\n\n \"\"\"\n Generate aligned manifest for evaluation.\n Because some pure noise samples might not appear in stitched_output_manifest.\n \"\"\"\n stitched_output = dict()\n for line in open(stitched_output_manifest, 'r', encoding='utf-8'):\n file = json.loads(line)\n stitched_output[file[\"audio_filepath\"]] = file\n\n out = []\n for line in open(input_manifest, 'r', encoding='utf-8'):\n file = json.loads(line)\n sample = file[\"audio_filepath\"]\n if sample in stitched_output:\n file[\"pred_text\"] = stitched_output[sample][\"pred_text\"]\n file[\"speech_segments_filepath\"] = stitched_output[sample][\"speech_segments_filepath\"]\n else:\n file[\"pred_text\"] = \"\"\n file[\"speech_segments_filepath\"] = \"\"\n\n out.append(file)\n\n with open(aligned_vad_asr_output_manifest, 'w', encoding='utf-8') as fout:\n for i in out:\n json.dump(i, fout)\n fout.write('\\n')\n fout.flush()\n\n return aligned_vad_asr_output_manifest\n", "# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport copy\nimport os\nimport shutil\nimport tempfile\n\nimport pytest\nimport torch\nfrom omegaconf import DictConfig\n\nfrom nemo.collections.asr.data import audio_to_text\nfrom nemo.collections.asr.metrics.wer_bpe import CTCBPEDecoding, CTCBPEDecodingConfig\nfrom nemo.collections.asr.models import configs\nfrom nemo.collections.asr.models.ctc_bpe_models import EncDecCTCModelBPE\nfrom nemo.collections.common import tokenizers\nfrom nemo.utils.config_utils import assert_dataclass_signature_match\n\n\[email protected]()\ndef asr_model(test_data_dir):\n preprocessor = {'_target_': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor'}\n encoder = {\n '_target_': 'nemo.collections.asr.modules.ConvASREncoder',\n 'feat_in': 64,\n 'activation': 'relu',\n 'conv_mask': True,\n 'jasper': [\n {\n 'filters': 1024,\n 'repeat': 1,\n 'kernel': [1],\n 'stride': [1],\n 'dilation': [1],\n 'dropout': 0.0,\n 'residual': False,\n 'separable': True,\n 'se': True,\n 'se_context_size': -1,\n }\n ],\n }\n\n decoder = {\n '_target_': 'nemo.collections.asr.modules.ConvASRDecoder',\n 'feat_in': 1024,\n 'num_classes': -1,\n 'vocabulary': None,\n }\n\n tokenizer = {'dir': os.path.join(test_data_dir, \"asr\", \"tokenizers\", \"an4_wpe_128\"), 'type': 'wpe'}\n\n modelConfig = DictConfig(\n {\n 'preprocessor': DictConfig(preprocessor),\n 'encoder': DictConfig(encoder),\n 'decoder': DictConfig(decoder),\n 'tokenizer': DictConfig(tokenizer),\n }\n )\n\n model_instance = EncDecCTCModelBPE(cfg=modelConfig)\n return model_instance\n\n\nclass TestEncDecCTCModel:\n @pytest.mark.with_downloads()\n @pytest.mark.unit\n def test_constructor(self, asr_model):\n asr_model.train()\n # TODO: make proper config and assert correct number of weights\n # Check to/from config_dict:\n confdict = asr_model.to_config_dict()\n instance2 = EncDecCTCModelBPE.from_config_dict(confdict)\n assert isinstance(instance2, EncDecCTCModelBPE)\n\n @pytest.mark.with_downloads()\n @pytest.mark.unit\n def test_forward(self, asr_model):\n asr_model = asr_model.eval()\n\n asr_model.preprocessor.featurizer.dither = 0.0\n asr_model.preprocessor.featurizer.pad_to = 0\n\n input_signal = torch.randn(size=(4, 512))\n length = torch.randint(low=161, high=500, size=[4])\n\n with torch.no_grad():\n # batch size 1\n logprobs_instance = []\n for i in range(input_signal.size(0)):\n logprobs_ins, _, _ = asr_model.forward(\n input_signal=input_signal[i : i + 1], input_signal_length=length[i : i + 1]\n )\n logprobs_instance.append(logprobs_ins)\n print(len(logprobs_ins))\n logprobs_instance = torch.cat(logprobs_instance, 0)\n\n # batch size 4\n logprobs_batch, _, _ = asr_model.forward(input_signal=input_signal, input_signal_length=length)\n\n assert logprobs_instance.shape == logprobs_batch.shape\n diff = torch.mean(torch.abs(logprobs_instance - logprobs_batch))\n assert diff <= 1e-6\n diff = torch.max(torch.abs(logprobs_instance - logprobs_batch))\n assert diff <= 1e-6\n\n @pytest.mark.with_downloads()\n @pytest.mark.unit\n def test_save_restore_artifact(self, asr_model):\n with tempfile.TemporaryDirectory() as tmpdir:\n save_path = os.path.join(tmpdir, 'ctc_bpe.nemo')\n asr_model.train()\n asr_model.save_to(save_path)\n\n new_model = EncDecCTCModelBPE.restore_from(save_path)\n assert isinstance(new_model, type(asr_model))\n assert new_model.vocab_path.endswith('_vocab.txt')\n\n assert len(new_model.tokenizer.tokenizer.get_vocab()) == 128\n\n @pytest.mark.with_downloads()\n @pytest.mark.unit\n def test_save_restore_artifact_spe(self, asr_model, test_data_dir):\n with tempfile.TemporaryDirectory() as tmpdir:\n tokenizer_dir = os.path.join(test_data_dir, \"asr\", \"tokenizers\", \"an4_spe_128\")\n asr_model.change_vocabulary(new_tokenizer_dir=tokenizer_dir, new_tokenizer_type='bpe')\n\n save_path = os.path.join(tmpdir, 'ctc_bpe.nemo')\n asr_model.train()\n asr_model.save_to(save_path)\n\n new_model = EncDecCTCModelBPE.restore_from(save_path)\n assert isinstance(new_model, type(asr_model))\n assert isinstance(new_model.tokenizer, tokenizers.SentencePieceTokenizer)\n assert new_model.model_path.endswith('_tokenizer.model')\n assert new_model.vocab_path.endswith('_vocab.txt')\n assert new_model.spe_vocab_path.endswith('_tokenizer.vocab')\n\n assert new_model.tokenizer.tokenizer.vocab_size == 128\n assert len(new_model.tokenizer.tokenizer.get_vocab()) == 128\n\n @pytest.mark.with_downloads()\n @pytest.mark.unit\n def test_save_restore_artifact_agg(self, asr_model, test_data_dir):\n tokenizer_dir = os.path.join(test_data_dir, \"asr\", \"tokenizers\", \"an4_spe_128\")\n tok_en = {\"dir\": tokenizer_dir, \"type\": \"wpe\"}\n # the below is really an english tokenizer but we pretend it is spanish\n tok_es = {\"dir\": tokenizer_dir, \"type\": \"wpe\"}\n tcfg = DictConfig({\"type\": \"agg\", \"langs\": {\"en\": tok_en, \"es\": tok_es}})\n with tempfile.TemporaryDirectory() as tmpdir:\n asr_model.change_vocabulary(new_tokenizer_dir=tcfg, new_tokenizer_type=\"agg\")\n\n save_path = os.path.join(tmpdir, \"ctc_agg.nemo\")\n asr_model.train()\n asr_model.save_to(save_path)\n\n new_model = EncDecCTCModelBPE.restore_from(save_path)\n assert isinstance(new_model, type(asr_model))\n assert isinstance(new_model.tokenizer, tokenizers.AggregateTokenizer)\n\n # should be double\n assert new_model.tokenizer.tokenizer.vocab_size == 254\n assert len(new_model.tokenizer.tokenizer.get_vocab()) == 254\n\n @pytest.mark.with_downloads()\n @pytest.mark.unit\n def test_vocab_change(self, test_data_dir, asr_model):\n old_vocab = copy.deepcopy(asr_model.decoder.vocabulary)\n\n with tempfile.TemporaryDirectory() as save_dir:\n save_path = os.path.join(save_dir, 'temp.nemo')\n\n with tempfile.TemporaryDirectory() as tmpdir:\n old_tmpdir_path = tmpdir\n\n old_tokenizer_dir = os.path.join(test_data_dir, \"asr\", \"tokenizers\", \"an4_wpe_128\", 'vocab.txt')\n new_tokenizer_dir = os.path.join(tmpdir, 'tokenizer')\n\n os.makedirs(new_tokenizer_dir, exist_ok=True)\n shutil.copy2(old_tokenizer_dir, new_tokenizer_dir)\n\n nw1 = asr_model.num_weights\n asr_model.change_vocabulary(new_tokenizer_dir=new_tokenizer_dir, new_tokenizer_type='wpe')\n # No change\n assert nw1 == asr_model.num_weights\n\n with open(os.path.join(new_tokenizer_dir, 'vocab.txt'), 'a+') as f:\n f.write(\"!\\n\")\n f.write('$\\n')\n f.write('@\\n')\n\n asr_model.change_vocabulary(new_tokenizer_dir=new_tokenizer_dir, new_tokenizer_type='wpe')\n # fully connected + bias\n assert asr_model.num_weights == nw1 + 3 * (asr_model.decoder._feat_in + 1)\n\n new_vocab = copy.deepcopy(asr_model.decoder.vocabulary)\n assert len(old_vocab) != len(new_vocab)\n\n # save the model (after change of vocabulary)\n asr_model.save_to(save_path)\n assert os.path.exists(save_path)\n # delete copied version of the vocabulary from nested tmpdir (by scope)\n\n # assert copied vocab no longer exists\n assert not os.path.exists(os.path.join(old_tmpdir_path, 'tokenizer', 'vocab.txt'))\n\n # make a copy of the tokenizer before renaming\n try:\n os.rename(old_tokenizer_dir, old_tokenizer_dir + '.bkp')\n assert not os.path.exists(old_tokenizer_dir)\n\n # restore model from .nemo\n asr_model2 = EncDecCTCModelBPE.restore_from(save_path)\n assert isinstance(asr_model2, EncDecCTCModelBPE)\n\n # Check if vocabulary size is same\n assert asr_model.tokenizer.tokenizer.vocab_size == asr_model2.tokenizer.tokenizer.vocab_size\n\n # Make a copy of the tokenizer\n new_tokenizer_dir = os.path.join(save_dir, 'tokenizer')\n\n os.makedirs(new_tokenizer_dir, exist_ok=True)\n new_tokenizer_path = os.path.join(new_tokenizer_dir, 'vocab.txt')\n with open(new_tokenizer_path, 'w') as f:\n for v in asr_model2.tokenizer.tokenizer.get_vocab():\n f.write(f\"{v}\\n\")\n\n # Add some new tokens too\n f.write(\"^\\n\")\n f.write(\"^^\\n\")\n f.write(\"^^^\\n\")\n\n assert os.path.exists(new_tokenizer_path)\n\n # change vocabulary\n asr_model2.change_vocabulary(new_tokenizer_dir, new_tokenizer_type='wpe')\n assert asr_model.tokenizer.vocab_size != asr_model2.tokenizer.vocab_size\n\n new_save_path = os.path.join(save_dir, 'temp2.nemo')\n asr_model2.save_to(new_save_path)\n\n asr_model3 = EncDecCTCModelBPE.restore_from(new_save_path)\n assert isinstance(asr_model3, EncDecCTCModelBPE)\n\n # Check if vocabulary size is same\n assert asr_model2.tokenizer.tokenizer.vocab_size == asr_model3.tokenizer.tokenizer.vocab_size\n assert asr_model2.vocab_path != asr_model3.vocab_path\n\n # Model PT level checks\n assert len(asr_model2.artifacts) == 1\n\n finally:\n os.rename(old_tokenizer_dir + '.bkp', old_tokenizer_dir)\n\n @pytest.mark.unit\n def test_decoding_change(self, asr_model):\n assert asr_model.decoding is not None\n assert isinstance(asr_model.decoding, CTCBPEDecoding)\n assert asr_model.decoding.cfg.strategy == \"greedy\"\n assert asr_model.decoding.preserve_alignments is False\n assert asr_model.decoding.compute_timestamps is False\n\n cfg = CTCBPEDecodingConfig(preserve_alignments=True, compute_timestamps=True)\n asr_model.change_decoding_strategy(cfg)\n\n assert asr_model.decoding.preserve_alignments is True\n assert asr_model.decoding.compute_timestamps is True\n\n @pytest.mark.unit\n def test_ASRDatasetConfig_for_AudioToBPEDataset(self):\n # ignore some additional arguments as dataclass is generic\n IGNORE_ARGS = [\n 'is_tarred',\n 'num_workers',\n 'batch_size',\n 'tarred_audio_filepaths',\n 'shuffle',\n 'pin_memory',\n 'drop_last',\n 'tarred_shard_strategy',\n 'shuffle_n',\n 'parser',\n 'normalize',\n 'unk_index',\n 'pad_id',\n 'bos_id',\n 'eos_id',\n 'blank_index',\n 'bucketing_batch_size',\n 'bucketing_strategy',\n ]\n\n REMAP_ARGS = {'trim_silence': 'trim', 'labels': 'tokenizer'}\n\n result = assert_dataclass_signature_match(\n audio_to_text.AudioToBPEDataset, configs.ASRDatasetConfig, ignore_args=IGNORE_ARGS, remap_args=REMAP_ARGS,\n )\n signatures_match, cls_subset, dataclass_subset = result\n\n assert signatures_match\n assert cls_subset is None\n assert dataclass_subset is None\n\n @pytest.mark.unit\n def test_ASRDatasetConfig_for_TarredAudioToBPEDataset(self):\n # ignore some additional arguments as dataclass is generic\n IGNORE_ARGS = [\n 'is_tarred',\n 'num_workers',\n 'batch_size',\n 'shuffle',\n 'pin_memory',\n 'drop_last',\n 'parser',\n 'normalize',\n 'unk_index',\n 'pad_id',\n 'bos_id',\n 'eos_id',\n 'blank_index',\n 'global_rank',\n 'world_size',\n 'bucketing_batch_size',\n 'bucketing_strategy',\n ]\n\n REMAP_ARGS = {\n 'trim_silence': 'trim',\n 'tarred_audio_filepaths': 'audio_tar_filepaths',\n 'tarred_shard_strategy': 'shard_strategy',\n 'shuffle_n': 'shuffle',\n 'labels': 'tokenizer',\n }\n\n result = assert_dataclass_signature_match(\n audio_to_text.TarredAudioToBPEDataset,\n configs.ASRDatasetConfig,\n ignore_args=IGNORE_ARGS,\n remap_args=REMAP_ARGS,\n )\n signatures_match, cls_subset, dataclass_subset = result\n\n assert signatures_match\n assert cls_subset is None\n assert dataclass_subset is None\n" ]
[ [ "torch.zeros", "torch.cat", "torch.cuda.amp.autocast", "numpy.where", "torch.save", "torch.Size", "pandas.read_csv", "torch.softmax", "numpy.arange", "torch.tensor", "matplotlib.pyplot.subplot", "torch.sort", "numpy.zeros", "torch.nn.functional.pad", "matplotlib.pyplot.figure", "torch.column_stack", "torch.empty", "torch.nanquantile", "sklearn.model_selection.ParameterGrid", "torch.stack", "torch.Tensor", "torch.isnan" ], [ "torch.abs", "torch.randint", "torch.cat", "torch.randn", "torch.no_grad" ] ]
qwang70/PreSumm
[ "b2c3aee0ada7f5fa8754dffd44355b956fe0d45b" ]
[ "src/train_extractive.py" ]
[ "#!/usr/bin/env python\n\"\"\"\n Main training workflow\n\"\"\"\nfrom __future__ import division\n\nimport argparse\nimport glob\nimport os\nimport random\nimport signal\nimport time\n\nimport torch\n\nimport distributed\nfrom models import data_loader, model_builder\nfrom models.data_loader import load_dataset\nfrom models.model_builder import ExtSummarizer\nfrom models.trainer_ext import build_trainer\nfrom others.logging import logger, init_logger\n\nimport pdb\n\nmodel_flags = ['hidden_size', 'ff_size', 'heads', 'inter_layers', 'encoder', 'ff_actv', 'use_interval', 'rnn_size']\n\n\ndef train_multi_ext(args):\n \"\"\" Spawns 1 process per GPU \"\"\"\n init_logger()\n\n nb_gpu = args.world_size\n mp = torch.multiprocessing.get_context('spawn')\n\n # Create a thread to listen for errors in the child processes.\n error_queue = mp.SimpleQueue()\n error_handler = ErrorHandler(error_queue)\n\n # Train with multiprocessing.\n procs = []\n for i in range(nb_gpu):\n device_id = i\n procs.append(mp.Process(target=run, args=(args,\n device_id, error_queue,), daemon=True))\n procs[i].start()\n logger.info(\" Starting process pid: %d \" % procs[i].pid)\n error_handler.add_child(procs[i].pid)\n for p in procs:\n p.join()\n\n\ndef run(args, device_id, error_queue):\n \"\"\" run process \"\"\"\n setattr(args, 'gpu_ranks', [int(i) for i in args.gpu_ranks])\n\n try:\n gpu_rank = distributed.multi_init(device_id, args.world_size, args.gpu_ranks)\n print('gpu_rank %d' % gpu_rank)\n if gpu_rank != args.gpu_ranks[device_id]:\n raise AssertionError(\"An error occurred in \\\n Distributed initialization\")\n\n train_single_ext(args, device_id)\n except KeyboardInterrupt:\n pass # killed by parent, do nothing\n except Exception:\n # propagate exception to parent process, keeping original traceback\n import traceback\n error_queue.put((args.gpu_ranks[device_id], traceback.format_exc()))\n\n\nclass ErrorHandler(object):\n \"\"\"A class that listens for exceptions in children processes and propagates\n the tracebacks to the parent process.\"\"\"\n\n def __init__(self, error_queue):\n \"\"\" init error handler \"\"\"\n import signal\n import threading\n self.error_queue = error_queue\n self.children_pids = []\n self.error_thread = threading.Thread(\n target=self.error_listener, daemon=True)\n self.error_thread.start()\n signal.signal(signal.SIGUSR1, self.signal_handler)\n\n def add_child(self, pid):\n \"\"\" error handler \"\"\"\n self.children_pids.append(pid)\n\n def error_listener(self):\n \"\"\" error listener \"\"\"\n (rank, original_trace) = self.error_queue.get()\n self.error_queue.put((rank, original_trace))\n os.kill(os.getpid(), signal.SIGUSR1)\n\n def signal_handler(self, signalnum, stackframe):\n \"\"\" signal handler \"\"\"\n for pid in self.children_pids:\n os.kill(pid, signal.SIGINT) # kill children processes\n (rank, original_trace) = self.error_queue.get()\n msg = \"\"\"\\n\\n-- Tracebacks above this line can probably\n be ignored --\\n\\n\"\"\"\n msg += original_trace\n raise Exception(msg)\n\n\ndef validate_ext(args, device_id):\n timestep = 0\n FILE_PATH = 'model_step_*.pt'\n #FILE_PATH = 'bertext_cnndm_transformer*.pt'\n if (args.test_all):\n cp_files = sorted(glob.glob(os.path.join(args.model_path, FILE_PATH)))\n cp_files.sort(key=os.path.getmtime)\n xent_lst = []\n for i, cp in enumerate(cp_files):\n step = int(cp.split('.')[-2].split('_')[-1])\n xent = validate(args, device_id, cp, step)\n xent_lst.append((xent, cp))\n max_step = xent_lst.index(min(xent_lst))\n if (i - max_step > 10):\n break\n xent_lst = sorted(xent_lst, key=lambda x: x[0])[:3]\n logger.info('PPL %s' % str(xent_lst))\n for xent, cp in xent_lst:\n step = int(cp.split('.')[-2].split('_')[-1])\n test_ext(args, device_id, cp, step)\n else:\n while (True):\n cp_files = sorted(glob.glob(os.path.join(args.model_path, FILE_PATH)))\n cp_files.sort(key=os.path.getmtime)\n if (cp_files):\n cp = cp_files[-1]\n time_of_cp = os.path.getmtime(cp)\n if (not os.path.getsize(cp) > 0):\n print(\"will sleep 60\", os.path.getsize(cp))\n time.sleep(60)\n continue\n if (time_of_cp > timestep):\n timestep = time_of_cp\n step = 0\n step = int(cp.split('.')[-2].split('_')[-1])\n validate(args, device_id, cp, step)\n test_ext(args, device_id, cp, step)\n\n cp_files = sorted(glob.glob(os.path.join(args.model_path, FILE_PATH)))\n cp_files.sort(key=os.path.getmtime)\n if (cp_files):\n cp = cp_files[-1]\n time_of_cp = os.path.getmtime(cp)\n if (time_of_cp > timestep):\n continue\n return\n else:\n print(\"will sleep 300\", cp_files)\n time.sleep(300)\n\n\ndef validate(args, device_id, pt, step):\n device = \"cpu\" if args.visible_gpus == '-1' else \"cuda\"\n if (pt != ''):\n test_from = pt\n else:\n test_from = args.test_from\n logger.info('Loading checkpoint from %s' % test_from)\n checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt.keys():\n if (k in model_flags):\n setattr(args, k, opt[k])\n print(args)\n\n model = ExtSummarizer(args, device, checkpoint)\n model.eval()\n\n valid_iter = data_loader.Dataloader(args, load_dataset(args, 'valid', shuffle=False),\n args.batch_size, device,\n shuffle=False, is_test=False)\n trainer = build_trainer(args, device_id, model, None)\n stats = trainer.validate(valid_iter, step)\n return stats.xent()\n\n\ndef test_ext(args, device_id, pt, step):\n device = \"cpu\" if args.visible_gpus == '-1' else \"cuda\"\n if (pt != ''):\n test_from = pt\n else:\n test_from = args.test_from\n logger.info('Loading checkpoint from %s' % test_from)\n checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt.keys():\n if (k in model_flags):\n setattr(args, k, opt[k])\n print(args)\n\n model = ExtSummarizer(args, device, checkpoint)\n model.eval()\n\n test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),\n args.test_batch_size, device,\n shuffle=False, is_test=True)\n trainer = build_trainer(args, device_id, model, None)\n trainer.test(test_iter, step)\n\ndef train_ext(args, device_id):\n if (args.world_size > 1):\n train_multi_ext(args)\n else:\n train_single_ext(args, device_id)\n\n\ndef train_single_ext(args, device_id):\n init_logger(args.log_file)\n\n device = \"cpu\" if args.visible_gpus == '-1' else \"cuda\"\n logger.info('Device ID %d' % device_id)\n logger.info('Device %s' % device)\n torch.manual_seed(args.seed)\n random.seed(args.seed)\n torch.backends.cudnn.deterministic = True\n\n if device_id >= 0:\n torch.cuda.set_device(device_id)\n torch.cuda.manual_seed(args.seed)\n\n torch.manual_seed(args.seed)\n random.seed(args.seed)\n torch.backends.cudnn.deterministic = True\n\n if args.train_from != '':\n logger.info('Loading checkpoint from %s' % args.train_from)\n checkpoint = torch.load(args.train_from,\n map_location=lambda storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt.keys():\n if (k in model_flags):\n setattr(args, k, opt[k])\n else:\n checkpoint = None\n\n def train_iter_fct():\n return data_loader.Dataloader(args, load_dataset(args, 'train', shuffle=True), args.batch_size, device,\n shuffle=True, is_test=False)\n\n model = ExtSummarizer(args, device, checkpoint)\n optim = model_builder.build_optim(args, model, checkpoint)\n\n logger.info(model)\n\n trainer = build_trainer(args, device_id, model, optim)\n trainer.train(train_iter_fct, args.train_steps)\n" ]
[ [ "torch.cuda.manual_seed", "torch.cuda.set_device", "torch.load", "torch.manual_seed", "torch.multiprocessing.get_context" ] ]
Melimet/DAP2020
[ "0854fe4ce8ace6abf6dc0bbcf71984595ff6d42a" ]
[ "hy-data-analysis-with-python-spring-2020/part05-e08_bicycle_timeseries/test/test_bicycle_timeseries.py" ]
[ "#!/usr/bin/env python3\n\nimport unittest\nfrom unittest.mock import patch, MagicMock\nimport pandas as pd\nimport numpy as np\n\n\nfrom tmc import points\n\nfrom tmc.utils import load, get_out, patch_helper\n\nmodule_name=\"src.bicycle_timeseries\"\nbicycle_timeseries = load(module_name, \"bicycle_timeseries\")\nmain = load(module_name, \"main\")\nph = patch_helper(module_name)\n\n\n@points('p05-08.1')\nclass BicycleTimeseries(unittest.TestCase):\n\n # @classmethod\n # def setUpClass(cls):\n # cls.df = bicycle_timeseries()\n\n def setUp(self):\n self.df = bicycle_timeseries()\n \n def test_shape(self):\n self.assertEqual(self.df.shape, (37128, 20), msg=\"Incorrect shape!\")\n\n def test_columns(self):\n cols = ['Auroransilta', 'Eteläesplanadi', 'Huopalahti (asema)',\n 'Kaisaniemi/Eläintarhanlahti', 'Kaivokatu', 'Kulosaaren silta et.',\n 'Kulosaaren silta po. ', 'Kuusisaarentie', 'Käpylä, Pohjoisbaana',\n 'Lauttasaaren silta eteläpuoli', 'Merikannontie',\n 'Munkkiniemen silta eteläpuoli', 'Munkkiniemi silta pohjoispuoli',\n 'Heperian puisto/Ooppera', 'Pitkäsilta itäpuoli',\n 'Pitkäsilta länsipuoli', 'Lauttasaaren silta pohjoispuoli',\n 'Ratapihantie', 'Viikintie', 'Baana']\n np.testing.assert_array_equal(self.df.columns, cols, err_msg=\"Incorrect columns!\")\n\n def test_index(self):\n self.assertIsInstance(self.df.index[0], pd.Timestamp,\n msg=\"Expected index to have type timestamp!\")\n self.assertEqual(self.df.index[0], pd.to_datetime(\"2014-1-1 00:00\"),\n msg=\"Incorrect first index!\")\n \n self.assertEqual(self.df.index[1], pd.to_datetime(\"2014-1-1 01:00\"),\n msg=\"Incorrect second index!\")\n\n def test_calls(self):\n with patch(ph(\"bicycle_timeseries\"), wraps=bicycle_timeseries) as pbts,\\\n patch(ph(\"pd.read_csv\"), wraps=pd.read_csv) as prc,\\\n patch(ph(\"pd.to_datetime\"), wraps=pd.to_datetime) as pdatetime:\n main()\n pbts.assert_called_once()\n prc.assert_called_once()\n pdatetime.assert_called()\n \nif __name__ == '__main__':\n unittest.main()\n \n" ]
[ [ "numpy.testing.assert_array_equal", "pandas.to_datetime" ] ]