repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
normster/vissl | [
"fc914adc9bc3209d7861c13922a2a1c76d5bddde"
]
| [
"extra_scripts/datasets/create_resisc45_data_files.py"
]
| [
"# Copyright (c) Facebook, Inc. and its affiliates.\n\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport argparse\nimport os\nimport random\nimport shutil\n\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\n\nRESISC45_URL = \"https://1drv.ms/u/s!AmgKYzARBl5ca3HNaHIlzp_IXjs\"\n\n\ndef get_argument_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-i\",\n \"--input\",\n type=str,\n help=\"Path to the expanded NWPU-RESISC45.rar archive (download from: {})\".format(RESISC45_URL),\n )\n parser.add_argument(\n \"-o\",\n \"--output\",\n type=str,\n help=\"Folder where the classification dataset will be written\",\n )\n parser.add_argument(\n \"-d\",\n \"--download\",\n action=\"store_const\",\n const=True,\n default=False,\n help=\"To download the original dataset and decompress it in the input folder\",\n )\n return parser\n\n\nclass _RESISC45:\n \"\"\"\n Dataset used to parallelize the transformation of the dataset via a DataLoader\n \"\"\"\n\n TRAIN_SPLIT_PERCENT = .8\n TEST_SPLIT_PERCENT = .2\n\n def __init__(self, input_path: str, output_path: str, train: bool):\n self.input_path = input_path\n self.output_path = output_path\n self.train = train\n self.images = []\n self.targets = []\n self.labels = sorted(os.listdir(self.input_path))\n split_generator = random.Random(42)\n\n # There is no train/val split in the RESISC45 dataset, so we have to create it\n for i, label in enumerate(self.labels):\n label_path = os.path.join(self.input_path, label)\n files = sorted(os.listdir(label_path))\n files = split_generator.shuffle(files)\n train_samples = int(self.TRAIN_SPLIT_PERCENT * len(files))\n test_samples = int(self.TEST_SPLIT_PERCENT * len(files))\n if train:\n self.images.extend(files[: train_samples])\n self.targets.extend([i] * train_samples)\n else:\n self.images.extend(\n files[train_samples: train_samples + test_samples]\n )\n self.targets.extend([i] * test_samples)\n\n def __len__(self):\n return len(self.targets)\n\n def __getitem__(self, idx: int) -> bool:\n image_name = self.images[idx]\n target = self.labels[self.targets[idx]]\n image_path = os.path.join(self.input_path, target, image_name)\n split_name = \"train\" if self.train else \"test\"\n shutil.copy(\n image_path, os.path.join(self.output_path, split_name, target, image_name)\n )\n return True\n\n\ndef create_disk_folder_split(dataset: _RESISC45, split_path: str):\n \"\"\"\n Create one split (example: \"train\" or \"test\") of the disk_folder hierarchy\n \"\"\"\n for label in dataset.labels:\n os.makedirs(os.path.join(split_path, label), exist_ok=True)\n loader = DataLoader(dataset, num_workers=8, batch_size=1, collate_fn=lambda x: x[0])\n with tqdm(total=len(dataset)) as progress_bar:\n for _ in loader:\n progress_bar.update(1)\n\n\ndef create_resisc_disk_folder(input_path: str, output_path: str):\n \"\"\"\n Read the RESISC45 dataset at 'input_path' and transform it to a disk folder at 'output_path'\n \"\"\"\n print(\"Creating the training split...\")\n create_disk_folder_split(\n dataset=_RESISC45(input_path, output_path=output_path, train=True),\n split_path=os.path.join(output_path, \"train\"),\n )\n print(\"Creating the validation split...\")\n create_disk_folder_split(\n dataset=_RESISC45(input_path, output_path=output_path, train=False),\n split_path=os.path.join(output_path, \"test\"),\n )\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Example usage:\n\n ```\n python extra_scripts/datasets/create_resisc45_data_files.py -i /path/to/resisc45 -o /output_path/to/resisc45\n ```\n \"\"\"\n args = get_argument_parser().parse_args()\n if args.download:\n raise Exception(\"Cannot automatically download RESISC45. You can manually download the archive at {}\".format(RESISC45_URL))\n create_resisc_disk_folder(args.input, args.output)\n"
]
| [
[
"torch.utils.data.DataLoader"
]
]
|
arkilpatel/SVAMP | [
"81e568ca3f1b1f4026758db24c05acd4ce2eeb9c"
]
| [
"code/rnn_seq2seq/src/utils/sentence_processing.py"
]
| [
"import logging\r\nimport pdb\r\nimport torch\r\nfrom glob import glob\r\nfrom torch.autograd import Variable\r\nimport numpy as np\r\n# Ignore warnings\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\n\r\ndef sent_to_idx(voc, sent, max_length):\r\n\tidx_vec = []\r\n\tfor w in sent.split(' '):\r\n\t\ttry:\r\n\t\t\tidx = voc.get_id(w)\r\n\t\t\tidx_vec.append(idx)\r\n\t\texcept:\r\n\t\t\tidx_vec.append(voc.get_id('unk'))\r\n\t# idx_vec.append(voc.get_id('</s>'))\r\n\tif len(idx_vec) < max_length-1:\r\n\t\tidx_vec.append(voc.get_id('</s>'))\r\n\treturn idx_vec\r\n\r\n\r\ndef sents_to_idx(voc, sents, max_length):\r\n\tall_indexes = []\r\n\tfor sent in sents:\r\n\t\tall_indexes.append(sent_to_idx(voc, sent, max_length))\r\n\treturn all_indexes\r\n\r\n\r\ndef sent_to_tensor(voc, sentence, device, max_length):\r\n\tindexes = sent_to_idx(voc, sentence, max_length)\r\n\treturn torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)\r\n\r\n\r\ndef batch_to_tensor(voc, sents, device, max_length):\r\n\tbatch_sent = []\r\n\t# batch_label = []\r\n\tfor sent in sents:\r\n\t\tsent_id = sent_to_tensor(voc, sent, device, max_length)\r\n\t\tbatch_sent.append(sent_id)\r\n\r\n\treturn batch_sent\r\n\r\n\r\ndef idx_to_sent(voc, tensor, no_eos=False):\r\n\tsent_word_list = []\r\n\tfor idx in tensor:\r\n\t\tword = voc.get_word(idx.item())\r\n\t\tif no_eos:\r\n\t\t\tif word != '</s>':\r\n\t\t\t\tsent_word_list.append(word)\r\n\t\t\t# else:\r\n\t\t\t# \tbreak\r\n\t\telse:\r\n\t\t\tsent_word_list.append(word)\r\n\treturn sent_word_list\r\n\r\n\r\ndef idx_to_sents(voc, tensors, no_eos=False):\r\n\ttensors = tensors.transpose(0, 1)\r\n\tbatch_word_list = []\r\n\tfor tensor in tensors:\r\n\t\tbatch_word_list.append(idx_to_sent(voc, tensor, no_eos))\r\n\r\n\treturn batch_word_list\r\n\r\n\r\ndef pad_seq(seq, max_length, voc):\r\n\tseq += [voc.get_id('</s>') for i in range(max_length - len(seq))]\r\n\treturn seq\r\n\r\n# def process_single(sent, label, voc, device):\r\n\r\ndef sort_by_len(seqs, input_len, device=None, dim=1):\r\n\torig_idx = list(range(seqs.size(dim)))\r\n\t# pdb.set_trace()\r\n\r\n\t# Index by which sorting needs to be done\r\n\tsorted_idx = sorted(orig_idx, key=lambda k: input_len[k], reverse=True)\r\n\tsorted_idx= torch.LongTensor(sorted_idx)\r\n\tif device:\r\n\t\tsorted_idx = sorted_idx.to(device)\r\n\r\n\tsorted_seqs = seqs.index_select(1, sorted_idx)\r\n\tsorted_lens= [input_len[i] for i in sorted_idx]\r\n\r\n\t# For restoring original order\r\n\torig_idx = sorted(orig_idx, key=lambda k: sorted_idx[k])\r\n\torig_idx = torch.LongTensor(orig_idx)\r\n\tif device:\r\n\t\torig_idx = orig_idx.to(device)\r\n\treturn sorted_seqs, sorted_lens, orig_idx\r\n\r\n\r\ndef restore_order(seqs, input_len, orig_idx):\r\n\torig_seqs= [seqs[i] for i in orig_idx]\r\n\torig_lens= [input_len[i] for i in orig_idx]\r\n\treturn orig_seqs, orig_lens\r\n\r\n\r\ndef process_batch(sent1s, sent2s, voc1, voc2, device):\r\n\tinput_len1 = [len(s) for s in sent1s]\r\n\tinput_len2 = [len(s) for s in sent2s]\r\n\tmax_length_1 = max(input_len1)\r\n\tmax_length_2 = max(input_len2)\r\n\r\n\tsent1s_padded = [pad_seq(s, max_length_1, voc1) for s in sent1s]\r\n\tsent2s_padded = [pad_seq(s, max_length_2, voc2) for s in sent2s]\r\n\r\n\t# Convert to [Max_len X Batch]\r\n\tsent1_var = Variable(torch.LongTensor(sent1s_padded)).transpose(0, 1)\r\n\tsent2_var = Variable(torch.LongTensor(sent2s_padded)).transpose(0, 1)\r\n\r\n\tsent1_var = sent1_var.to(device)\r\n\tsent2_var = sent2_var.to(device)\r\n\r\n\treturn sent1_var, sent2_var, input_len1, input_len2\r\n"
]
| [
[
"torch.LongTensor",
"torch.tensor"
]
]
|
pradeep-vishnu/pcv | [
"b88e706f8c7a8649b29ea848ba8c7fbb5dfde359"
]
| [
"panoptic/pcv/gaussian_smooth/vis.py"
]
| [
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom ipywidgets import Output\nfrom matplotlib.patches import Circle\n\nfrom panoptic.pcv.gaussian_smooth.prob_tsr import MakeProbTsr\nfrom panoptic.pcv.components.snake import Snake\n\nfrom panoptic.vis import Visualizer as BaseVisualizer\n\n\nclass Plot():\n def __init__(self, ax, bin_center_yx, vote_mask, spatial_prob):\n self.ax = ax\n self.bin_center_yx = bin_center_yx\n self.vote_mask = vote_mask\n self.spatial_prob = spatial_prob\n\n # self.pressed_xy = None\n self.dot = None\n self.texts = None\n self.init_artists()\n self.render_visual()\n\n def init_artists(self):\n if self.dot is not None:\n self.dot.remove()\n\n if self.texts is not None:\n assert isinstance(self.texts, (tuple, list))\n for elem in self.texts:\n elem.remove()\n\n self.dot = None\n self.texts = None\n\n def render_visual(self):\n self.ax.imshow(self.vote_mask)\n\n def press_coord(self, x, y, button):\n del button # ignoring button for now\n # self.pressed_xy = x, y\n self.init_artists()\n self.render_single_dot(x, y)\n self.render_prob_dist(x, y)\n\n def render_prob_dist(self, x, y):\n thresh = 0\n dist = self.spatial_prob[y, x]\n inds = np.where(dist > thresh)[0]\n probs = dist[inds] * 100\n # print(probs)\n bin_centers = self.bin_center_yx[inds]\n\n acc = []\n for cen, p in zip(bin_centers, probs):\n y, x = cen\n _a = self.ax.text(\n x, y, s='{:.2f}'.format(p), fontsize='small', color='r'\n )\n acc.append(_a)\n self.texts = acc\n\n def query_coord(self, x, y, button):\n pass\n\n def motion_coord(self, x, y):\n self.press_coord(x, y, None)\n\n def render_single_dot(self, x, y):\n cir = Circle((x, y), radius=0.5, color='white')\n self.ax.add_patch(cir)\n self.dot = cir\n\n\nclass Visualizer(BaseVisualizer):\n def __init__(self):\n spec = [ # 243, 233 bins\n (3, 4), (9, 3), (27, 3)\n ]\n # spec = [\n # (3, 3), (7, 3), (21, 4)\n # ]\n diam, grid_spec = Snake.flesh_out_grid_spec(spec)\n vote_mask = Snake.paint_trail_mask(diam, grid_spec)\n maker = MakeProbTsr(spec, diam, grid_spec, vote_mask)\n spatial_prob = maker.compute_voting_prob_tsr()\n\n self.vote_mask = vote_mask\n self.spatial_prob = spatial_prob\n\n radius = (diam - 1) // 2\n center = np.array((radius, radius))\n self.bin_center_yx = grid_spec[:, :2] + center\n\n self.output_widget = Output()\n self.init_state()\n self.pressed = False\n np.set_printoptions(\n formatter={'float': lambda x: \"{:.3f}\".format(x)}\n )\n\n def vis(self):\n fig = plt.figure(figsize=(10, 10), constrained_layout=True)\n self.fig = fig\n self.canvas = fig.canvas\n self.plots = dict()\n\n key = 'spatial prob dist'\n ax = fig.add_subplot(111)\n ax.set_title(key)\n self.plots[key] = Plot(\n ax, self.bin_center_yx, self.vote_mask, self.spatial_prob\n )\n self.connect()\n\n\ndef test():\n # spec = [ # 243, 233 bins\n # (1, 1), (3, 4), (9, 3), (27, 3)\n # ]\n spec = [ # 243, 233 bins\n (3, 1), (9, 1)\n ]\n diam, grid_spec = Snake.flesh_out_grid_spec(spec)\n vote_mask = Snake.paint_trail_mask(diam, grid_spec)\n maker = MakeProbTsr(spec, diam, grid_spec, vote_mask)\n\n\nif __name__ == \"__main__\":\n test()\n"
]
| [
[
"numpy.where",
"numpy.array",
"matplotlib.patches.Circle",
"matplotlib.pyplot.figure"
]
]
|
winderai/pachyderm-seldon | [
"03e9affbdfaa2bdde4b20c3197f1620e99fcea9e"
]
| [
"repo/ExplainerTraining/explainer_test.py"
]
| [
"import argparse\n\nimport dill\nimport numpy as np\nimport pandas as pd\nimport joblib\n\nnp.random.seed(112)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('data_file_path', type=str)\nparser.add_argument('explainer_path', type=str)\nparser.add_argument('detector_path', type=str)\nargs = parser.parse_args()\n\nwith open(\"{}/explainer.dill\".format(args.explainer_path), 'rb') as in_strm:\n explainer = dill.load(in_strm)\n\nprint(explainer)\n\ndf = pd.read_csv(args.data_file_path, index_col=0)\nX_test = df.drop(columns='target').values\n\n\nclf = joblib.load(args.detector_path)\nprint(clf)\nprint()\n\nfor row in X_test[:10]:\n explanation = explainer.explain(\n row, \n threshold=0.85, \n coverage_samples=10, \n batch_size=10\n )\n print('Anchor: %s' % (' AND '.join(explanation.anchor)))"
]
| [
[
"numpy.random.seed",
"pandas.read_csv"
]
]
|
imoneoi/fixed_tianshou | [
"c0bc8e00cad25739ec80a52afd5e0ff296b11dd4"
]
| [
"examples/mujoco/mujoco_npg.py"
]
| [
"#!/usr/bin/env python3\n\nimport os\nimport gym\nimport torch\nimport pprint\nimport datetime\nimport argparse\nimport numpy as np\nfrom torch import nn\nfrom torch.optim.lr_scheduler import LambdaLR\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch.distributions import Independent, Normal\n\nfrom tianshou.policy import NPGPolicy\nfrom tianshou.utils import BasicLogger\nfrom tianshou.env import SubprocVectorEnv\nfrom tianshou.utils.net.common import Net\nfrom tianshou.trainer import onpolicy_trainer\nfrom tianshou.utils.net.continuous import ActorProb, Critic\nfrom tianshou.data import Collector, ReplayBuffer, VectorReplayBuffer\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--task', type=str, default='HalfCheetah-v3')\n parser.add_argument('--seed', type=int, default=0)\n parser.add_argument('--buffer-size', type=int, default=4096)\n parser.add_argument('--hidden-sizes', type=int, nargs='*',\n default=[64, 64]) # baselines [32, 32]\n parser.add_argument('--lr', type=float, default=1e-3)\n parser.add_argument('--gamma', type=float, default=0.99)\n parser.add_argument('--epoch', type=int, default=100)\n parser.add_argument('--step-per-epoch', type=int, default=30000)\n parser.add_argument('--step-per-collect', type=int, default=1024)\n parser.add_argument('--repeat-per-collect', type=int, default=1)\n # batch-size >> step-per-collect means calculating all data in one singe forward.\n parser.add_argument('--batch-size', type=int, default=99999)\n parser.add_argument('--training-num', type=int, default=16)\n parser.add_argument('--test-num', type=int, default=10)\n # npg special\n parser.add_argument('--rew-norm', type=int, default=True)\n parser.add_argument('--gae-lambda', type=float, default=0.95)\n parser.add_argument('--bound-action-method', type=str, default=\"clip\")\n parser.add_argument('--lr-decay', type=int, default=True)\n parser.add_argument('--logdir', type=str, default='log')\n parser.add_argument('--render', type=float, default=0.)\n parser.add_argument('--norm-adv', type=int, default=1)\n parser.add_argument('--optim-critic-iters', type=int, default=20)\n parser.add_argument('--actor-step-size', type=float, default=0.1)\n parser.add_argument(\n '--device', type=str,\n default='cuda' if torch.cuda.is_available() else 'cpu')\n parser.add_argument('--resume-path', type=str, default=None)\n parser.add_argument('--watch', default=False, action='store_true',\n help='watch the play of pre-trained policy only')\n return parser.parse_args()\n\n\ndef test_npg(args=get_args()):\n env = gym.make(args.task)\n args.state_shape = env.observation_space.shape or env.observation_space.n\n args.action_shape = env.action_space.shape or env.action_space.n\n args.max_action = env.action_space.high[0]\n print(\"Observations shape:\", args.state_shape)\n print(\"Actions shape:\", args.action_shape)\n print(\"Action range:\", np.min(env.action_space.low),\n np.max(env.action_space.high))\n # train_envs = gym.make(args.task)\n train_envs = SubprocVectorEnv(\n [lambda: gym.make(args.task) for _ in range(args.training_num)],\n norm_obs=True)\n # test_envs = gym.make(args.task)\n test_envs = SubprocVectorEnv(\n [lambda: gym.make(args.task) for _ in range(args.test_num)],\n norm_obs=True, obs_rms=train_envs.obs_rms, update_obs_rms=False)\n\n # seed\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n train_envs.seed(args.seed)\n test_envs.seed(args.seed)\n # model\n net_a = Net(args.state_shape, hidden_sizes=args.hidden_sizes,\n activation=nn.Tanh, device=args.device)\n actor = ActorProb(net_a, args.action_shape, max_action=args.max_action,\n unbounded=True, device=args.device).to(args.device)\n net_c = Net(args.state_shape, hidden_sizes=args.hidden_sizes,\n activation=nn.Tanh, device=args.device)\n critic = Critic(net_c, device=args.device).to(args.device)\n torch.nn.init.constant_(actor.sigma_param, -0.5)\n for m in list(actor.modules()) + list(critic.modules()):\n if isinstance(m, torch.nn.Linear):\n # orthogonal initialization\n torch.nn.init.orthogonal_(m.weight, gain=np.sqrt(2))\n torch.nn.init.zeros_(m.bias)\n # do last policy layer scaling, this will make initial actions have (close to)\n # 0 mean and std, and will help boost performances,\n # see https://arxiv.org/abs/2006.05990, Fig.24 for details\n for m in actor.mu.modules():\n if isinstance(m, torch.nn.Linear):\n torch.nn.init.zeros_(m.bias)\n m.weight.data.copy_(0.01 * m.weight.data)\n\n optim = torch.optim.Adam(critic.parameters(), lr=args.lr)\n lr_scheduler = None\n if args.lr_decay:\n # decay learning rate to 0 linearly\n max_update_num = np.ceil(\n args.step_per_epoch / args.step_per_collect) * args.epoch\n\n lr_scheduler = LambdaLR(\n optim, lr_lambda=lambda epoch: 1 - epoch / max_update_num)\n\n def dist(*logits):\n return Independent(Normal(*logits), 1)\n\n policy = NPGPolicy(actor, critic, optim, dist, discount_factor=args.gamma,\n gae_lambda=args.gae_lambda,\n reward_normalization=args.rew_norm, action_scaling=True,\n action_bound_method=args.bound_action_method,\n lr_scheduler=lr_scheduler, action_space=env.action_space,\n advantage_normalization=args.norm_adv,\n optim_critic_iters=args.optim_critic_iters,\n actor_step_size=args.actor_step_size)\n\n # load a previous policy\n if args.resume_path:\n policy.load_state_dict(torch.load(args.resume_path, map_location=args.device))\n print(\"Loaded agent from: \", args.resume_path)\n\n # collector\n if args.training_num > 1:\n buffer = VectorReplayBuffer(args.buffer_size, len(train_envs))\n else:\n buffer = ReplayBuffer(args.buffer_size)\n train_collector = Collector(policy, train_envs, buffer, exploration_noise=True)\n test_collector = Collector(policy, test_envs)\n # log\n t0 = datetime.datetime.now().strftime(\"%m%d_%H%M%S\")\n log_file = f'seed_{args.seed}_{t0}-{args.task.replace(\"-\", \"_\")}_npg'\n log_path = os.path.join(args.logdir, args.task, 'npg', log_file)\n writer = SummaryWriter(log_path)\n writer.add_text(\"args\", str(args))\n logger = BasicLogger(writer, update_interval=100, train_interval=100)\n\n def save_fn(policy):\n torch.save(policy.state_dict(), os.path.join(log_path, 'policy.pth'))\n\n if not args.watch:\n # trainer\n result = onpolicy_trainer(\n policy, train_collector, test_collector, args.epoch, args.step_per_epoch,\n args.repeat_per_collect, args.test_num, args.batch_size,\n step_per_collect=args.step_per_collect, save_fn=save_fn, logger=logger,\n test_in_train=False)\n pprint.pprint(result)\n\n # Let's watch its performance!\n policy.eval()\n test_envs.seed(args.seed)\n test_collector.reset()\n result = test_collector.collect(n_episode=args.test_num, render=args.render)\n print(f'Final reward: {result[\"rews\"].mean()}, length: {result[\"lens\"].mean()}')\n\n\nif __name__ == '__main__':\n test_npg()\n"
]
| [
[
"numpy.max",
"numpy.ceil",
"torch.nn.init.constant_",
"numpy.random.seed",
"torch.distributions.Normal",
"numpy.min",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.load",
"numpy.sqrt",
"torch.optim.lr_scheduler.LambdaLR",
"torch.nn.init.zeros_",
"torch.utils.tensorboard.SummaryWriter"
]
]
|
brunzema/uitvbo | [
"3d71f94cfe452678ff2c9c8fbea1eef4d8a233f5"
]
| [
"src/kernels/costum_kernels.py"
]
| [
"import gpytorch\nimport torch\nfrom gpytorch.constraints import Interval\nimport time\n\n\nclass TemporalKernelB2P(gpytorch.kernels.Kernel):\n is_stationary = True\n\n def __init__(self, epsilon=0.08, **kwargs): # 0.01\n super().__init__(**kwargs)\n\n self.epsilon = epsilon\n\n # this is the kernel function\n def forward(self, x1, x2, **params):\n base = 1 - self.epsilon\n # calculate the distance between inputs\n exp = torch.abs(self.covar_dist(x1, x2, square_dist=False)) / 2\n out = torch.pow(base, exp)\n return out\n\n\nclass WienerKernel(gpytorch.kernels.Kernel): # vorlesung von Phillip Henning\n is_stationary = False\n\n def __init__(self, c0, sigma_hat_squared=0.5, out_max=2, **kwargs):\n super().__init__(**kwargs)\n\n self.max_var = out_max\n self.sigma_hat_squared = sigma_hat_squared\n self.c0 = c0\n\n # this is the kernel function\n def forward(self, x1, x2, **params):\n # d will always be 1, as it is the time dimenaion! Therefore we can squeeze the inputs\n if x1.ndim == 2: # 'normal' mode\n x1, x2 = x1.squeeze(x1.ndim - 1), x2.squeeze(x2.ndim - 1)\n meshed_x1, meshed_x2 = torch.meshgrid(x1, x2)\n return self.evaluate_kernel(meshed_x1, meshed_x2)\n\n else: # 'batch' mode\n # old\n # x1squeezed, x2squeezed = x1.squeeze(x1.ndim - 1), x2.squeeze(x2.ndim - 1)\n # t0 = time.time()\n # out = torch.empty((1, x1squeezed.shape[1], x2squeezed.shape[1]))\n # for batch in range(x1squeezed.shape[0]):\n # x1_batch = x1squeezed[batch, :]\n # x2_batch = x2squeezed[batch, :]\n #\n # meshed_x1, meshed_x2 = torch.meshgrid(x1_batch, x2_batch)\n # new_out = self.evaluate_kernel(meshed_x1, meshed_x2).unsqueeze(0)\n #\n # out = torch.cat((out, new_out), dim=0)\n # out1 = out[1:, :, :]\n # print('Loop:', time.time() - t0)\n\n # t0 = time.time()\n meshed_x1 = torch.tile(x1, (1, 1, x2.shape[1]))\n meshed_x2 = torch.tile(x2.transpose(dim0=-2, dim1=-1), (1, x1.shape[1], 1))\n out = self.evaluate_kernel(meshed_x1, meshed_x2)\n return out\n\n def evaluate_kernel(self, meshed_x1, meshed_x2):\n step = torch.min(meshed_x1, meshed_x2) - self.c0\n out = step * self.sigma_hat_squared\n return out\n\n\nclass ConstantKernel(gpytorch.kernels.Kernel):\n is_stationary = False\n\n def __init__(self, constant=1, **kwargs):\n super().__init__(**kwargs)\n self.constant = constant\n\n def forward(self, x1, x2, diag=False, last_dim_is_batch=False, **params):\n return torch.ones_like(self.covar_dist(x1, x2))\n\n\n# defined only for t>=0\nclass GeometricWienerKernel(gpytorch.kernels.Kernel):\n is_stationary = False\n\n def __init__(self, t, sigma=0.5, **kwargs):\n super().__init__(**kwargs)\n\n self.sigma = sigma\n self.c0 = t\n\n # this is the kernel function\n def forward(self, x1, x2, **params):\n # d will always be 1, as it is the time dimenaion! Therefore we can squeeze it\n x1, x2 = x1.squeeze(x1.ndim - 1), x2.squeeze(x2.ndim - 1)\n\n if x1.ndim == 1: # 'normal' mode\n meshed_x1, meshed_x2 = torch.meshgrid(x1, x2)\n return self.evaluate_kernel(meshed_x1, meshed_x2)\n else: # batch mode\n\n out = torch.empty((1, x1.shape[1], x2.shape[1]))\n for batch in range(x1.shape[0]):\n x1_batch = x1[batch, :]\n x2_batch = x2[batch, :]\n\n meshed_x1, meshed_x2 = torch.meshgrid(x1_batch, x2_batch)\n new_out = self.evaluate_kernel(meshed_x1, meshed_x2).unsqueeze(0)\n\n out = torch.cat((out, new_out), dim=0)\n return out[1:, :, :]\n\n def evaluate_kernel(self, meshed_x1, meshed_x2):\n step = torch.min(meshed_x1, meshed_x2) - self.c0\n out = step * self.sigma ** 2\n return out\n\n\nclass TemporalKernelUI(gpytorch.kernels.Kernel):\n is_stationary = True\n\n def __init__(self, epsilon_prior=0.08, **kwargs): # 0.01\n super().__init__(**kwargs)\n\n self.epsilon = epsilon_prior\n\n # this is the kernel function\n def forward(self, x1, x2, **params):\n base = 1 - self.epsilon\n # calculate the distance between inputs\n exponent = torch.abs(self.covar_dist(x1, x2, square_dist=False)) / -2.\n out = torch.exp(exponent)\n return out\n"
]
| [
[
"torch.cat",
"torch.min",
"torch.tile",
"torch.meshgrid",
"torch.empty",
"torch.exp",
"torch.pow"
]
]
|
gokejian/Multi-Agent-DQJL | [
"1da9ea647db369c10904fc8580c3515e66f264d6"
]
| [
"roadEnv.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"Author: Haoran Su\nEmail: [email protected]\n\"\"\"\nimport vehicle_env\nimport numpy as np\nimport generate\n\n# Action is now defined that index of vehicle start to yield at given step\n# Under this scheme, only one vehicle can yield at each step\n# action = -1 if no vehicle is yielding at the end of this step\n\n# # Utility function\n# def decode_action(s_action):\n# res = []\n# str_list = f'{s_action:015b}'.split()\n# for char in str_list[0]:\n# res.append(int(char))\n# return res\n\n\ndef lane_change(observation):\n \"\"\"\n :param observation:\n :return: a new observation putting all possible lane change into position\n \"\"\"\n # Find all vehicles who are ready to change lane:\n veh_lower_lane = [veh for veh in observation if veh[1] == 1]\n po_index = []\n # index for vehicle who needs to pull over:\n for i in range(len(observation)):\n if (observation[i][1] == 0) and (observation[i][2] == 0):\n po_index.append(i)\n\n for elem in po_index:\n can_pull_over = True\n for veh_1 in veh_lower_lane:\n if observation[elem][0] >= veh_1[0]:\n leading_veh = observation[elem]\n following_veh = veh_1\n else:\n leading_veh = veh_1\n following_veh = observation[elem]\n\n if leading_veh[0] - leading_veh[3] < following_veh[0]:\n can_pull_over = False\n if can_pull_over:\n observation[elem][1] = 1\n\n return observation\n\n\ndef mapped_state(state):\n \"\"\"\n Mapped a given state into the state as neural network input, insert trivial vehicles until vehs_num = 18\n :param state: given state\n :return: new state\n \"\"\"\n\n num_diff = 18 - len(state)\n for i in range(num_diff):\n state.append([0, 0, 0, 0, 0, 2])\n return state\n\n\ndef random_deceleration(most_comfortable_deceleration, lane_pos):\n \"\"\"\n Return a deceleration based on given attribute of the vehicle\n :param most_comfortable_deceleration: the given attribute of the vehicle\n :param lane_pos: y\n :return: the deceleration adopted by human driver\n \"\"\"\n if lane_pos:\n sigma = 0.3\n else:\n sigma = 0.5\n return np.random.normal(most_comfortable_deceleration, sigma)\n\n\n# Calculating rewards depends only on the state:\ndef calculate_reward(state, l_gap=0.25, road_length=200):\n \"\"\"\n Calculate reward for the given state. Notice that this function doesnt account for status inconsistency, but it gets\n covered in the state_transition function.\n :param road_length: segment length\n :param l_gap: minimum safety gap\n :param state: given state\n :return: reward for this state\n \"\"\"\n\n # First get the number of valid vehicles:\n num_valid_vehs = 0\n for veh in state:\n if veh[5] != 2:\n num_valid_vehs += 1\n\n # Initialize reward for this step\n reward = -1\n\n # Initialize collision indicator:\n has_collision = False\n\n # Initialize process completed:\n has_done = True\n\n # For the new state:\n # First, we want to check collision:\n for i in range(num_valid_vehs):\n for j in range(i + 1, num_valid_vehs):\n\n # Determine which vehicle is the leading vehicle by their front position:\n if state[i][0] >= state[j][0]:\n leading_veh = state[i]\n following_veh = state[j]\n else:\n leading_veh = state[j]\n following_veh = state[i]\n\n # Find out the back of leading vehicle and front of following vehicle:\n back_pos = leading_veh[0] - leading_veh[3]\n front_pos = following_veh[0]\n\n # Collision check: 1. both vehicles are on the same lane, 2: two vehicles have overlapped with minimum\n # safety gap.\n if (back_pos < 200) and (leading_veh[1] != 2) and (following_veh[1] != 2) and (leading_veh[1] == following_veh[1]) \\\n and (back_pos - l_gap < front_pos):\n has_collision = True\n\n # # If any vehicle is on lane 0 and vehicle position has not exceed the roadway length:\n # for veh in state[:num_valid_vehs - 1]:\n # if veh[1] == 0 and veh[0] - veh[3] <= road_length:\n # has_cleared = False\n\n # Summarize reward:\n # If there is collision, apply collision penalty and end the process:\n if not has_collision:\n vehs_left_counter = 0\n for veh in state:\n if (veh[1] == 0) and (veh[0] - veh[3]) < road_length:\n vehs_left_counter += 1\n has_done = False\n reward -= vehs_left_counter\n else:\n reward -= 2000\n has_done = True\n return has_done, reward\n\n\n# Road Environment\nclass RoadEnv:\n def __init__(self, road_length=200, l_gap=0, delta_t=0.5):\n # self.state = mapped_state(vehicle_env.generate_env_nparray())\n self.state = mapped_state(generate.generate_road_env_nonOO())\n self.done = False\n self.road_length = road_length # Length of the roadway segment\n self.l_gap = l_gap # Minimum safety gap\n self.delta_t = delta_t # length of the timestamp interval\n\n def reset(self):\n # new_state = vehicle_env.generate_env_nparray()\n new_state = generate.generate_road_env_nonOO()\n self.state = mapped_state(new_state)\n\n return self.state\n\n def step(self, observation, action):\n \"\"\"\n State Transition function to compute the next state\n :param action: now is a integer representing the index of which vehicle needs to yield in this step\n :param observation: s(t)\n :return: observation_,(next_state) reward, done(whether the process has completed)\n \"\"\"\n # Initialize next state with only valid vehicles\n observation_ = []\n\n # Initialize a repetitive yielding instruction error\n # repetitive_yielding = False\n\n # Find the number of valid vehicles and only iterate through valid vehicles\n num_valid_vehs = 0\n for veh in observation:\n if veh[5] != 2:\n num_valid_vehs += 1\n\n # print(\"number of valid vehicles is :\"+str(num_valid_vehs))\n\n # Iterate through all valid vehicles:\n for i in range(num_valid_vehs):\n\n # # extract corresponding action\n if action == i:\n action_i = 1\n else:\n action_i = 0\n\n # Model vehicle kinetics here:\n\n old_x = observation[i][0]\n old_y = observation[i][1]\n old_velocity = observation[i][2]\n veh_len = observation[i][3]\n most_comfortable_deceleration = observation[i][4]\n old_status = observation[i][5]\n\n # Determine new_status and b_actual based on old_status and action_i:\n if (not old_status) and (not action_i):\n new_status = 0\n # b_actual = 0\n elif (not old_status) and action_i:\n new_status = 1\n # b_actual = random_deceleration(most_comfortable_deceleration, old_y)\n elif old_status and (not action_i):\n new_status = 1\n # b_actual = random_deceleration(most_comfortable_deceleration, old_y)\n else:\n new_status = 1\n # b_actual = random_deceleration(most_comfortable_deceleration, old_y)\n repetitive_yielding = True\n\n if new_status:\n # b_actual = random_deceleration(most_comfortable_deceleration, old_y)\n # b_actual = random_deceleration(most_comfortable_deceleration, old_y)\n b_actual = random_deceleration(most_comfortable_deceleration, old_y)\n else:\n b_actual = 0\n\n new_x = old_x + old_velocity * self.delta_t\n new_y = old_y\n if action_i and (old_y == 0):\n new_y = 2\n\n new_v = max(0, old_velocity - b_actual * self.delta_t)\n\n\n # If the vehicle has left the lane, we add it to a virtually\n # Assign new vehicle information and add it to the new observation\n new_veh = [new_x, int(new_y), new_v, veh_len, most_comfortable_deceleration, new_status]\n observation_.append(new_veh)\n\n # print(\"Before lane change the state is :\" + str(observation_))\n # observation_ = lane_change(observation_)\n\n # Calculate reward without considering action-status inconsistency\n # At this step, observation_ only contains valid vehicles, and reward is only calculated for that:\n # Check if the process also completes in this step:\n done, reward = calculate_reward(observation_, road_length=self.road_length, l_gap=self.l_gap)\n\n # Additionally, if there is any repetitive yielding instruction, we applies penalty and end the game:\n # if repetitive_yielding:\n # reward -= 1\n\n # # If there is a yielding instruction to valid vehicle, we should encourage it:\n # if action <= num_valid_vehs - 1:\n # reward += 10\n\n # pad the next state to consistent state length:\n observation_ = mapped_state(observation_)\n\n return observation_, reward, done\n"
]
| [
[
"numpy.random.normal"
]
]
|
YixiaoZhang/Accuracy-and-Robustness | [
"4ed2b8ee72af4dad892b0d5187625a4bbd2150fa"
]
| [
"cifar.py"
]
| [
"import tensorflow as tf\nimport numpy as np\nimport os\nimport pickle\nimport gzip\nimport pickle\nimport urllib.request\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.utils import np_utils\nfrom keras.models import load_model\n\ndef load_batch(fpath, label_key='labels'):\n f = open(fpath, 'rb')\n d = pickle.load(f, encoding=\"bytes\")\n for k, v in d.items():\n del(d[k])\n d[k.decode(\"utf8\")] = v\n f.close()\n data = d[\"data\"]\n labels = d[label_key]\n\n data = data.reshape(data.shape[0], 3, 32, 32)\n final = np.zeros((data.shape[0], 32, 32, 3),dtype=np.float32)\n final[:,:,:,0] = data[:,0,:,:]\n final[:,:,:,1] = data[:,1,:,:]\n final[:,:,:,2] = data[:,2,:,:]\n\n final /= 255\n final -= .5\n labels2 = np.zeros((len(labels), 10))\n labels2[np.arange(len(labels2)), labels] = 1\n\n return final, labels\n\ndef load_batch(fpath):\n f = open(fpath,\"rb\").read()\n size = 32*32*3+1\n labels = []\n images = []\n for i in range(10000):\n arr = np.fromstring(f[i*size:(i+1)*size],dtype=np.uint8)\n lab = np.identity(10)[arr[0]]\n img = arr[1:].reshape((3,32,32)).transpose((1,2,0))\n\n labels.append(lab)\n images.append((img/255)-.5)\n return np.array(images),np.array(labels)\n \n\nclass CIFAR:\n def __init__(self):\n train_data = []\n train_labels = []\n \n if not os.path.exists(\"cifar-10-batches-bin\"):\n urllib.request.urlretrieve(\"https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz\",\n \"cifar-data.tar.gz\")\n os.popen(\"tar -xzf cifar-data.tar.gz\").read()\n \n\n for i in range(5):\n r,s = load_batch(\"cifar-10-batches-bin/data_batch_\"+str(i+1)+\".bin\")\n train_data.extend(r)\n train_labels.extend(s)\n \n train_data = np.array(train_data,dtype=np.float32)\n train_labels = np.array(train_labels)\n \n self.test_data, self.test_labels = load_batch(\"cifar-10-batches-bin/test_batch.bin\")\n \n VALIDATION_SIZE = 5000\n \n self.validation_data = train_data[:VALIDATION_SIZE, :, :, :]\n self.validation_labels = train_labels[:VALIDATION_SIZE]\n self.train_data = train_data[VALIDATION_SIZE:, :, :, :]\n self.train_labels = train_labels[VALIDATION_SIZE:]\n\nclass CIFARModel:\n def __init__(self, restore, session=None):\n self.num_channels = 3\n self.image_size = 32\n self.num_labels = 10\n\n model = Sequential()\n\n model.add(Conv2D(64, (3, 3),\n input_shape=(32, 32, 3)))\n model.add(Activation('relu'))\n model.add(Conv2D(64, (3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n \n model.add(Conv2D(128, (3, 3)))\n model.add(Activation('relu'))\n model.add(Conv2D(128, (3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n \n model.add(Flatten())\n model.add(Dense(256))\n model.add(Activation('relu'))\n model.add(Dense(256))\n model.add(Activation('relu'))\n model.add(Dense(10))\n\n model.load_weights(restore)\n\n self.model = model\n\n def predict(self, data):\n return self.model(data)\n \n \n"
]
| [
[
"numpy.identity",
"numpy.array",
"numpy.fromstring",
"numpy.zeros"
]
]
|
BlueBrain/sonata | [
"f6040cd4fdccd9e5536e57322f11e6ce5805e773"
]
| [
"src/pysonata/sonata/io/population.py"
]
| [
"# Copyright 2017. Allen Institute. All rights reserved\n#\n# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the\n# following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following\n# disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote\n# products derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,\n# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\nimport pandas as pd\nimport h5py\nimport numpy as np\n\nfrom .utils import range_itr, get_attribute_h5\nfrom .node import Node, NodeSet\nfrom .edge import Edge, EdgeSet\nfrom .group import NodeGroup, EdgeGroup\n\n\nclass Population(object):\n def __init__(self, pop_name, pop_group, types_table):\n self._pop_name = pop_name\n self._pop_group = pop_group\n self._types_table = types_table\n self._nrows = 0\n\n # For storing individual groups\n self._group_map = {} # grp-id --> h5py.Group object\n self._find_groups()\n self._group_cache = {} # grp-id --> soneta.io.Group() object\n\n # Refrences to most of the population's primary dataset\n self._type_id_ds = pop_group[self.type_ids_column]\n self._group_id_ds = pop_group[self.group_id_column]\n self._group_index_ds = pop_group[self.group_index_column]\n\n self._group_indicies = {} # grp-id --> list of rows indicies\n self._group_indicies_cache_built = False\n\n @property\n def name(self):\n \"\"\"name of current population\"\"\"\n return self._pop_name\n\n @property\n def group_ids(self):\n \"\"\"List of all group_ids belonging to population\"\"\"\n return list(self._group_map.keys())\n\n @property\n def groups(self):\n \"\"\"Returns a list of sonata.Group objects\"\"\"\n return [self.get_group(name) for name in self._group_map.keys()]\n\n @property\n def types_table(self):\n return self._types_table\n\n @property\n def group_id_ds(self):\n return self._group_id_ds\n\n @property\n def group_index_ds(self):\n return self._group_index_ds\n\n @property\n def group_id_column(self):\n raise NotImplementedError\n\n @property\n def group_index_column(self):\n raise NotImplementedError\n\n @property\n def type_ids_column(self):\n raise NotImplementedError\n\n def to_dataframe(self):\n \"\"\"Convert Population to dataframe\"\"\"\n raise NotImplementedError\n\n def get_group(self, group_id):\n if group_id in self._group_cache:\n return self._group_cache[group_id]\n else:\n grp_h5 = self._group_map[group_id]\n grp_obj = self._build_group(group_id, grp_h5)\n self._group_cache[group_id] = grp_obj\n return grp_obj\n\n def group_indicies(self, group_id, build_cache=False):\n \"\"\"Returns a list of all the population row index that maps onto the given group.\n\n Used for iterating or searching within a Group\n\n :param group_id: id of a given group\n :param build_cache: Will cache indicies for all groups. Will be faster if making multiple calls but requires\n more memory (default False)\n :return: A (possibly empty) list of row indicies (non-contiguous, but unique)\n \"\"\"\n if self._group_indicies_cache_built:\n return self._group_indicies.get(group_id, [])\n\n else:\n tmp_index = pd.DataFrame()\n # TODO: Need to check the memory overhead, especially for edges. See if an iterative search is just as fast\n tmp_index['grp_id'] = pd.Series(self._group_id_ds, dtype=self._group_id_ds.dtype)\n tmp_index['row_indx'] = pd.Series(range_itr(self._nrows), dtype=np.uint32)\n if build_cache:\n # save all indicies as arrays\n self._group_indicies = {grp_id: np.array(subset['row_indx'])\n for grp_id, subset in tmp_index.groupby(by='grp_id')}\n self._group_indicies_cache_built = True\n return self._group_indicies.get(group_id, [])\n else:\n # TODO: Manually del tmp_index to clear out the memory?\n tmp_index = tmp_index[tmp_index['grp_id'] == group_id]\n return np.array(tmp_index['row_indx'])\n\n def igroup_ids(self, row_indicies):\n return self._group_id_ds[list(row_indicies)]\n\n def igroup_indicies(self, row_indicies):\n return self._group_index_ds[list(row_indicies)]\n\n def _find_groups(self):\n \"\"\"Create a map between group-id and h5py.Group reference\"\"\"\n for grp_key, grp_h5 in self._pop_group.items():\n if grp_key.isdigit():\n grp_id = int(grp_key)\n self._group_map[grp_id] = grp_h5\n else:\n # TODO: Should we put a warning if an unrecognized group exists?\n pass\n\n def _build_group(self, group_id, group_h5):\n raise NotImplementedError\n\n def __len__(self):\n return self._nrows\n\n\nclass NodePopulation(Population):\n def __init__(self, pop_name, pop_group, node_types_tables):\n super(NodePopulation, self).__init__(pop_name=pop_name, pop_group=pop_group, types_table=node_types_tables)\n\n # TODO: node_ids can be implicit\n self._node_id_ds = pop_group['node_id']\n self._nrows = len(self._node_id_ds)\n\n # TODO: This isn't necessary if only using iterator. Delay building index until get_node() is called.\n self._index_nid2row = None # A lookup from node_id --> h5 row number\n self._node_id_index_built = False\n self._build_node_id_index()\n\n # indicies for gid <--> node_id map\n self._has_gids = False\n self._index_gid2row = None # gid --> row (for searching by gid)\n self._index_row2gid = None # row --> gid (for iterator or searching by node-id)\n self._gid_lookup_fnc = lambda _: None # for looking up gid by row, use fnc pointer rather than conditional\n\n self.__itr_index = 0 # for iterator\n\n @property\n def group_id_column(self):\n return 'node_group_id'\n\n @property\n def group_index_column(self):\n return 'node_group_index'\n\n @property\n def type_ids_column(self):\n return 'node_type_id'\n\n @property\n def has_gids(self):\n return self._has_gids\n\n @property\n def node_ids(self):\n return np.array(self._node_id_ds)\n\n @property\n def gids(self):\n if self.has_gids:\n return np.array(self._index_gid2row.index)\n else:\n return None\n\n @property\n def node_types_table(self):\n return self._types_table\n\n @property\n def index_column_name(self):\n return 'node_id'\n\n @property\n def node_types_table(self):\n return self.types_table\n\n def add_gids(self, gid_map_df, force=False):\n if self.has_gids and not force:\n # TODO: not sure if it's best to return an exception or just continue on in silence?\n raise Exception('Node population {} already has gids mapped onto node-ids.'.format(self.name))\n # return\n\n # Create map from gid --> node_id --> row #\n self._build_node_id_index()\n tmp_df = pd.DataFrame()\n tmp_df['row_id'] = self._index_nid2row.index\n tmp_df['node_id'] = self._index_nid2row\n gid_map_df = gid_map_df.merge(tmp_df, how='left', left_on='node_id', right_on='node_id')\n gid_map_df = gid_map_df.drop(['node_id', 'population'], axis=1)\n self._index_gid2row = gid_map_df.set_index('gid')\n self._index_row2gid = gid_map_df.set_index('row_id')\n self._gid_lookup_fnc = lambda row_indx: self._index_row2gid.loc[row_indx]['gid']\n self._has_gids = True\n\n def to_dataframe(self):\n raise NotImplementedError\n\n def get_row(self, row_indx):\n # TODO: Use helper function so we don't have to lookup gid/node_id twice\n # Note: I'm not cacheing the nodes for memory purposes, but it might be beneificial too.\n node_id = self._node_id_ds[row_indx]\n node_type_id = self._type_id_ds[row_indx]\n node_group_id = self._group_id_ds[row_indx]\n node_group_index = self._group_index_ds[row_indx]\n\n node_type_props = self.node_types_table[node_type_id]\n node_group_props = self.get_group(node_group_id)[node_group_index]\n node_gid = self._gid_lookup_fnc(row_indx)\n\n return Node(node_id, node_type_id, node_type_props, node_group_props, None, gid=node_gid)\n\n def get_rows(self, row_indicies):\n \"\"\"Returns a set of all nodes based on list of row indicies.\n\n Warning: currently due to the use of h5py, the list must be ordered and cannot contain duplicates.\n\n :param row_indicies: A list of row indicies\n :return: An iterable NodeSet of nodes in the specified indicies\n \"\"\"\n # TODO: Check that row_indicies is unsigned and the max (which will be the last value) < n_rows\n # TODO: Check order and check for duplicates in list\n return NodeSet(row_indicies, self)\n\n def inode_ids(self, row_indicies):\n # You get errors if row_indicies is a numpy array or panda series so convert to python list\n # TODO: list conversion can be expensive, see if h5py will work with np arrays natively.\n return self._node_id_ds[list(row_indicies)]\n\n def igids(self, row_indicies):\n gids = self._gid_lookup_fnc(row_indicies)\n if gids is not None:\n gids = np.array(gids)\n return gids\n\n def inode_type_ids(self, row_indicies):\n # self._node_type_id_ds\n return self._type_id_ds[list(row_indicies)]\n\n def get_node_id(self, node_id):\n row_indx = self._index_nid2row.iloc[node_id]\n return self.get_row(row_indx)\n\n def get_gid(self, gid):\n # assert(self.has_gids)\n row_indx = self._index_gid2row.iloc[gid]['row_id']\n return self.get_row(row_indx)\n\n def _build_node_id_index(self, force=False):\n if self._node_id_index_built and not force:\n return\n\n self._index_nid2row = pd.Series(range_itr(self._nrows), index=self._node_id_ds, dtype=self._node_id_ds.dtype)\n self._node_id_index_built = True\n\n def _build_group(self, group_id, group_h5):\n return NodeGroup(group_id, group_h5, self)\n\n def __iter__(self):\n self.__itr_index = 0\n return self\n\n def next(self):\n return self.__next__()\n\n def __next__(self):\n if self.__itr_index >= self._nrows:\n raise StopIteration\n\n nxt_node = self.get_row(self.__itr_index)\n self.__itr_index += 1\n return nxt_node\n\n\nclass EdgePopulation(Population):\n class __IndexStruct(object):\n \"\"\"Class sto store indicies subgroup\"\"\"\n # TODO: Use collections.namedtuple\n def __init__(self, lookup_table, edge_table):\n self.lookup_table = lookup_table\n self.edge_table = edge_table\n\n def __init__(self, pop_name, pop_group, edge_types_tables):\n super(EdgePopulation, self).__init__(pop_name=pop_name, pop_group=pop_group, types_table=edge_types_tables)\n\n # keep reference to source and target datasets\n self._source_node_id_ds = pop_group['source_node_id']\n self._target_node_id_ds = pop_group['target_node_id']\n\n self._nrows = len(self._source_node_id_ds)\n\n # TODO: Throw an error/warning if missing\n self._source_population = EdgePopulation.get_source_population(pop_group)\n self._target_population = EdgePopulation.get_target_population(pop_group)\n\n self.__itr_index = 0\n\n # TODO: use a function pointer for get_index so it doesn't have to run a conditional every time\n # TODO: add property and/or property so user can determine what indicies exists.\n self._targets_index = None\n self._has_target_index = False\n self._sources_index = None\n self._has_source_index = False\n self.build_indicies()\n\n @property\n def group_id_column(self):\n return 'edge_group_id'\n\n @property\n def group_index_column(self):\n return 'edge_group_index'\n\n @property\n def type_ids_column(self):\n return 'edge_type_id'\n\n @property\n def source_population(self):\n return self._source_population\n\n @property\n def target_population(self):\n return self._target_population\n\n @staticmethod\n def get_source_population(pop_group_h5):\n return get_attribute_h5(pop_group_h5['source_node_id'], 'network', None)\n\n @staticmethod\n def get_target_population(pop_group_h5):\n return get_attribute_h5(pop_group_h5['target_node_id'], 'network', None)\n\n @property\n def edge_types_table(self):\n return self._types_table\n\n def to_dataframe(self):\n raise NotImplementedError\n\n def build_indicies(self):\n if 'indicies' in self._pop_group:\n indicies_grp = self._pop_group['indicies']\n for index_name, index_grp in indicies_grp.items():\n # TODO: Let __IndexStruct build the indicies\n # Make sure subgroup has the correct datasets\n if not isinstance(index_grp, h5py.Group):\n continue\n\n if 'node_id_to_range' not in index_grp:\n # TODO: make this more general, i.e 'id_to_range' thus we can index on gids, edge_types, etc\n # TODO: Check that there are two columns in dataset\n raise Exception('index {} in {} edges is missing column {}.'.format(index_name, self.name,\n 'node_id_to_range'))\n if 'range_to_edge_id' not in index_grp:\n # TODO: make this more general, i.e 'id_to_range' thus we can index on gids, edge_types, etc\n raise Exception('index {} in {} edges is missing column {}.'.format(index_name, self.name,\n 'range_to_edge_id'))\n\n # Cache the index\n targets_lookup = index_grp['node_id_to_range']\n edges_range = index_grp['range_to_edge_id']\n index_obj = self.__IndexStruct(targets_lookup, edges_range)\n\n # Determine the type of index\n if index_name == 'source_to_target':\n self._sources_index = index_obj\n self._has_source_index = True\n elif index_name == 'target_to_source':\n self._targets_index = index_obj\n self._has_target_index = True\n else:\n # TODO: Need to send this to a logger rather than stdout\n print('Unrecognized index {}. Ignoring.'.format(index_name))\n\n def _build_group(self, group_id, group_h5):\n return EdgeGroup(group_id, group_h5, self)\n\n def group_indicies(self, group_id, build_cache=False):\n # For nodes it's safe to just keep a list of all indicies that map onto a given group. For edges bc there are\n # many more rows (and typically a lot less groups), We want to build an index like for source/target ids\n if len(self._group_map) == 1:\n return len(self), [[0, len(self)]]\n\n grp_indicies = super(EdgePopulation, self).group_indicies(group_id, build_cache=False)\n if len(grp_indicies) == 0:\n # Return an index with no ranges\n return 0, []\n\n # cluster into ranges. Naively implement, there is probably a faster way to cluster an ordered array!\n range_beg = grp_indicies[0]\n ranges = []\n for i in range_itr(1, len(grp_indicies)):\n if (grp_indicies[i-1]+1) != grp_indicies[i]:\n ranges.append([range_beg, grp_indicies[i-1]+1])\n range_beg = grp_indicies[i]\n ranges.append([range_beg, grp_indicies[-1]+1])\n return len(grp_indicies), np.array(ranges, dtype=np.uint32)\n\n '''\n def _get_target_index(self):\n # TODO: Do only once\n if self._targets_index is not None:\n return self._targets_index\n\n if 'incidies' in self._pop_group:\n if 'target_to_source' in self._pop_group['incidies']:\n targets_lookup = self._pop_group['incidies']['target_to_source']['node_id_to_range']\n edges_range = self._pop_group['incidies']['target_to_source']['range_to_edge_id']\n self._targets_index = self.__IndexStruct(targets_lookup, edges_range)\n return self._targets_index\n\n # TODO: What to do if index doesn't exist?\n raise NotImplementedError\n '''\n\n def get_row(self, index):\n src_node = self._source_node_id_ds[index]\n trg_node = self._target_node_id_ds[index]\n edge_type_id = self._type_id_ds[index]\n edge_types_props = self.edge_types_table[edge_type_id]\n\n edge_group_id = self._group_id_ds[index]\n edge_group_index = self._group_index_ds[index]\n edge_group_props = self.get_group(edge_group_id)[edge_group_index]\n return Edge(trg_node_id=trg_node, src_node_id=src_node, source_pop=self.source_population,\n target_pop=self.target_population, group_props=edge_group_props, edge_types_props=edge_types_props)\n\n def filter(self, **filter_props):\n\n selected_edge_types = set(self.edge_types_table.edge_type_ids)\n types_filter = False # Do we need to filter results by edge_type_id\n if 'edge_type_id' in filter_props:\n # TODO: Make sure the edge_type_id is valid\n selected_edge_types = set([filter_props['edge_type_id']])\n del filter_props['edge_type_id']\n types_filter = True\n\n selected_groups = set(self._group_map.keys()) # list of grp_id's that will be used\n group_prop_filter = {} # list of actual query statements\n group_filter = False # do we need to filter results by group_id\n\n # Go through filter key==value pairs, create filters for groups and edge_types\n for filter_key, filter_val in filter_props.items():\n # Find out what groups, if any, the column should search in.\n group_query = False # If it's querying a group property don't look in edge_types\n types_query = False\n for grp_id, grp_h5 in self._group_map.items():\n if filter_key in grp_h5:\n # TODO: Need to check the dtype's match\n selected_groups &= set([grp_id])\n group_prop_filter[filter_key] = filter_val\n group_query = True\n group_filter = True\n\n if (not group_query) and filter_key in self.edge_types_table.columns:\n # Presearch the edge types and get only those edge_type_ids which match key==val\n selected_edge_types &= set(self.edge_types_table.find(filter_key, filter_val))\n types_filter = True\n types_query = True\n\n if not (group_query or types_query):\n # Property key neither exists in a group or the edge_types_table\n raise Exception('Could not find property {}'.format(filter_key))\n\n # Iterate through all nodes, only returning those that match the filter\n for indx in range_itr(self._nrows):\n # Filter by edge_type_id\n if types_filter:\n # TODO: Invert the selected_edge_types, it will be faster to fail immeditely than search the entire list\n if self._type_id_ds[indx] not in selected_edge_types:\n continue\n\n # Filter by group properties\n if group_filter:\n # TODO: Invert group search\n grp_id = self._group_id_ds[indx]\n if grp_id not in selected_groups:\n continue\n\n grp_index = self._group_index_ds[indx]\n search_failed = True\n for prop_key, prop_val in group_prop_filter.items():\n if prop_val != self._group_map[grp_id][prop_key][grp_index]:\n break\n else:\n search_failed = False\n\n if search_failed:\n continue\n\n yield self.get_row(indx)\n\n def get_target(self, target_node_id):\n # TODO: Raise an exception, or call find() and log a warning that the index is not available\n # TODO: check validity of target_node_id (non-negative integer and smaller than index range)\n assert(self._has_target_index)\n return self._get_index(self._targets_index, target_node_id)\n\n def get_targets(self, target_node_ids):\n # TODO: verify input is iterable\n assert(self._has_target_index)\n trg_index = self._targets_index\n for trg_id in target_node_ids:\n for edge in self._get_index(trg_index, trg_id):\n yield edge\n\n def get_source(self, source_node_id):\n assert(self._has_source_index)\n return self._get_index(self._sources_index, source_node_id)\n\n def get_sources(self, source_node_ids):\n assert(self._has_target_index)\n trg_index = self._sources_index\n for src_id in source_node_ids:\n for edge in self._get_index(trg_index, src_id):\n yield edge\n\n def _get_index(self, index_struct, lookup_id):\n # TODO: Use a EdgeSet instead\n edges_table = index_struct.edge_table\n\n lookup_beg, lookup_end = index_struct.lookup_table[lookup_id]\n for i in range_itr(lookup_beg, lookup_end):\n edge_indx_beg, edge_indx_end = edges_table[i]\n for edge_indx in range_itr(edge_indx_beg, edge_indx_end):\n yield self.get_row(edge_indx)\n\n def __iter__(self):\n self.__itr_index = 0\n return self\n\n def __next__(self):\n if self.__itr_index >= self._nrows:\n raise StopIteration\n\n next_edge = self.get_row(self.__itr_index)\n self.__itr_index += 1\n return next_edge\n\n def next(self):\n return self.__next__()\n"
]
| [
[
"pandas.DataFrame",
"numpy.array",
"pandas.Series"
]
]
|
bradh/tk_builder | [
"15bde02b93a67acf46ad485e7cc345cfe3bcd540"
]
| [
"tk_builder/widgets/image_canvas.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"\nThis module provides functionality for\n\"\"\"\n\nimport PIL.Image\nfrom PIL import ImageTk\nimport platform\nimport time\nimport tkinter\nimport tkinter.colorchooser as colorchooser\nfrom typing import Union, Tuple, List, Dict\n\nimport numpy\nfrom scipy.linalg import norm\n\nfrom tk_builder.base_elements import BooleanDescriptor, IntegerDescriptor, \\\n IntegerTupleDescriptor, StringDescriptor, TypedDescriptor, FloatDescriptor\nfrom tk_builder.widgets import basic_widgets\nfrom tk_builder.utils.color_utils.hex_color_palettes import SeabornHexPalettes\nfrom tk_builder.utils.color_utils import color_utils\nfrom tk_builder.image_readers.image_reader import ImageReader\nfrom tk_builder.utils.geometry_utils import polygon_utils\n\nif platform.system() == \"Linux\":\n import pyscreenshot as ImageGrab\nelse:\n from PIL import ImageGrab\n\n\nclass CanvasImage(object):\n \"\"\"\n The canvas image object.\n \"\"\"\n\n image_reader = TypedDescriptor(\n 'image_reader', ImageReader,\n docstring='The image reader object.') # type: ImageReader\n canvas_decimated_image = TypedDescriptor(\n 'canvas_decimated_image', numpy.ndarray,\n docstring='The canvas decimated image data.') # type: numpy.ndarray\n display_image = TypedDescriptor(\n 'display_image', numpy.ndarray,\n docstring='The display image data.') # type: numpy.ndarray\n decimation_factor = IntegerDescriptor(\n 'decimation_factor', default_value=1,\n docstring='The decimation factor.') # type: int\n display_rescaling_factor = FloatDescriptor(\n 'display_rescaling_factor', default_value=1.0,\n docstring='The display resclaing factor.') # type: float\n canvas_full_image_upper_left_yx = IntegerTupleDescriptor(\n 'canvas_full_image_upper_left_yx', length=2, default_value=(0, 0),\n docstring='The upper left corner of the full image canvas in '\n 'yx order.') # type: tuple\n canvas_ny = IntegerDescriptor(\n 'canvas_ny',\n docstring='') # type: int\n canvas_nx = IntegerDescriptor(\n 'canvas_nx',\n docstring='') # type: int\n scale_to_fit_canvas = BooleanDescriptor(\n 'scale_to_fit_canvas', default_value=True,\n docstring='Scale the image to fit the canvas?') # type: bool\n\n def __init__(self, image_reader, canvas_nx, canvas_ny):\n \"\"\"\n\n Parameters\n ----------\n image_reader : ImageReader\n canvas_nx : int\n canvas_ny : int\n \"\"\"\n\n self.drop_bands = [] # type: List\n self.image_reader = image_reader\n self.canvas_nx = canvas_nx\n self.canvas_ny = canvas_ny\n self.update_canvas_display_image_from_full_image()\n\n def get_decimated_image_data_in_full_image_rect(self, full_image_rect, decimation):\n \"\"\"\n Get decimated data.\n\n Parameters\n ----------\n full_image_rect : Tuple|List\n decimation : int\n\n Returns\n -------\n numpy.ndarray\n \"\"\"\n\n y_start = full_image_rect[0]\n y_end = full_image_rect[2]\n x_start = full_image_rect[1]\n x_end = full_image_rect[3]\n decimated_data = self.image_reader[y_start:y_end:decimation, x_start:x_end:decimation]\n return decimated_data\n\n def get_scaled_display_data(self, decimated_image):\n \"\"\"\n Gets scaled data for display.\n\n Parameters\n ----------\n decimated_image : numpy.ndarray\n\n Returns\n -------\n numpy.ndarray\n \"\"\"\n\n scale_factor = self.compute_display_scale_factor(decimated_image)\n new_nx = int(decimated_image.shape[1] * scale_factor)\n new_ny = int(decimated_image.shape[0] * scale_factor)\n if new_nx > self.canvas_nx:\n new_nx = self.canvas_nx\n if new_ny > self.canvas_ny:\n new_ny = self.canvas_ny\n if len(self.drop_bands) != 0:\n zeros_image = numpy.zeros_like(decimated_image[:, :, 0])\n for drop_band in self.drop_bands:\n decimated_image[:, :, drop_band] = zeros_image\n pil_image = PIL.Image.fromarray(decimated_image)\n display_image = pil_image.resize((new_nx, new_ny))\n return numpy.array(display_image)\n\n def decimated_image_coords_to_display_image_coords(self, decimated_image_yx_cords):\n \"\"\"\n Convert from decimated image coordinates to display coordinates.\n\n Parameters\n ----------\n decimated_image_yx_cords : List[Tuple[float, float]]\n\n Returns\n -------\n List[Tuple[float, float]]\n \"\"\"\n\n scale_factor = self.compute_display_scale_factor(self.canvas_decimated_image)\n return [(coord[0]*scale_factor, coord[1]*scale_factor) for coord in decimated_image_yx_cords]\n\n def display_image_coords_to_decimated_image_coords(self, display_image_yx_coords):\n \"\"\"\n Convert from display coordinates to decimated image coordinates.\n\n Parameters\n ----------\n display_image_yx_coords : List[Tuple[float, float]]\n\n Returns\n -------\n List[Tuple[float, float]]\n \"\"\"\n\n scale_factor = self.compute_display_scale_factor(self.canvas_decimated_image)\n return [(coord[0]/scale_factor, coord[1]/scale_factor) for coord in display_image_yx_coords]\n\n @staticmethod\n def display_image_coords_to_canvas_coords(display_image_yx_coords):\n \"\"\"\n Converts display image coordinates to canvas coordinates. This is just a\n axis switch operation.\n\n Parameters\n ----------\n display_image_yx_coords : List[Tuple[float, float]]\n\n Returns\n -------\n List[Tuple[float, float]]\n \"\"\"\n\n return [(yx[1], yx[0]) for yx in display_image_yx_coords]\n\n def compute_display_scale_factor(self, decimated_image):\n \"\"\"\n Computes the nominal scale factor.\n\n Parameters\n ----------\n decimated_image : numpy.ndarray\n\n Returns\n -------\n float\n \"\"\"\n\n # TODO: division may not work as expected in Python 2 (int versus float)\n # what is the intent here?\n decimated_image_nx = decimated_image.shape[1]\n decimated_image_ny = decimated_image.shape[0]\n scale_factor_1 = self.canvas_nx/decimated_image_nx\n scale_factor_2 = self.canvas_ny/decimated_image_ny\n scale_factor = min(scale_factor_1, scale_factor_2)\n return scale_factor\n\n def get_decimated_image_data_in_canvas_rect(self, canvas_rect, decimation=None):\n \"\"\"\n Gets the decimated image from the image rectangle.\n\n Parameters\n ----------\n canvas_rect : Tuple|List\n decimation : None|int\n\n Returns\n -------\n numpy.ndarray\n \"\"\"\n\n full_image_rect = self.canvas_rect_to_full_image_rect(canvas_rect)\n if decimation is None:\n decimation = self.get_decimation_from_canvas_rect(canvas_rect)\n return self.get_decimated_image_data_in_full_image_rect(full_image_rect, decimation)\n\n def update_canvas_display_image_from_full_image(self):\n \"\"\"\n Update the image in the canvas.\n\n Returns\n -------\n None\n \"\"\"\n\n full_image_rect = (0, 0, self.image_reader.full_image_ny, self.image_reader.full_image_nx)\n self.update_canvas_display_image_from_full_image_rect(full_image_rect)\n\n def update_canvas_display_image_from_full_image_rect(self, full_image_rect):\n \"\"\"\n Update the canvas to the given image rectangle.\n\n Parameters\n ----------\n full_image_rect : Tuple|List\n\n Returns\n -------\n None\n \"\"\"\n\n self.set_decimation_from_full_image_rect(full_image_rect)\n decimated_image_data = self.get_decimated_image_data_in_full_image_rect(full_image_rect, self.decimation_factor)\n self.update_canvas_display_from_numpy_array(decimated_image_data)\n self.canvas_full_image_upper_left_yx = (full_image_rect[0], full_image_rect[1])\n\n def update_canvas_display_image_from_canvas_rect(self, canvas_rect):\n \"\"\"\n Update the canvas to the given camvas rectangle.\n\n Parameters\n ----------\n canvas_rect : Tuple|List\n\n Returns\n -------\n None\n \"\"\"\n\n full_image_rect = self.canvas_rect_to_full_image_rect(canvas_rect)\n full_image_rect = (int(round(full_image_rect[0])),\n int(round(full_image_rect[1])),\n int(round(full_image_rect[2])),\n int(round(full_image_rect[3])))\n self.update_canvas_display_image_from_full_image_rect(full_image_rect)\n\n def update_canvas_display_from_numpy_array(self, image_data):\n \"\"\"\n Update the canvas from a numpy array.\n\n Parameters\n ----------\n image_data : numpy.ndarray\n\n Returns\n -------\n None\n \"\"\"\n\n if len(self.drop_bands) > 0:\n zeros_image = numpy.zeros_like(image_data[:, :, 0])\n for drop_band in self.drop_bands:\n image_data[:, :, drop_band] = zeros_image\n self.canvas_decimated_image = image_data\n if self.scale_to_fit_canvas:\n scale_factor = self.compute_display_scale_factor(image_data)\n self.display_rescaling_factor = scale_factor\n self.display_image = self.get_scaled_display_data(image_data)\n else:\n self.display_image = image_data\n\n def get_decimation_factor_from_full_image_rect(self, full_image_rect):\n \"\"\"\n Get the decimation factor from the rectangle size.\n\n Parameters\n ----------\n full_image_rect : Tuple|List\n\n Returns\n -------\n int\n \"\"\"\n\n ny = full_image_rect[2] - full_image_rect[0]\n nx = full_image_rect[3] - full_image_rect[1]\n decimation_y = ny / self.canvas_ny\n decimation_x = nx / self.canvas_nx\n decimation_factor = max(decimation_y, decimation_x)\n decimation_factor = int(decimation_factor)\n if decimation_factor < 1:\n decimation_factor = 1\n return decimation_factor\n\n def get_decimation_from_canvas_rect(self, canvas_rect):\n \"\"\"\n Get the decimation factor from the canvas rectangle size.\n\n Parameters\n ----------\n canvas_rect : Tuple|List\n\n Returns\n -------\n int\n \"\"\"\n\n full_image_rect = self.canvas_rect_to_full_image_rect(canvas_rect)\n return self.get_decimation_factor_from_full_image_rect(full_image_rect)\n\n def set_decimation_from_full_image_rect(self, full_image_rect):\n \"\"\"\n Sets the decimation from the image rectangle.\n\n Parameters\n ----------\n full_image_rect : Tuple|List\n\n Returns\n -------\n None\n \"\"\"\n\n decimation_factor = self.get_decimation_factor_from_full_image_rect(full_image_rect)\n self.decimation_factor = decimation_factor\n\n def canvas_coords_to_full_image_yx(self, canvas_coords):\n \"\"\"\n Gets full coordinates in yx order from canvas coordinates.\n\n Parameters\n ----------\n canvas_coords : Tuple|List\n\n Returns\n -------\n List\n \"\"\"\n\n decimation_factor = self.decimation_factor\n if self.scale_to_fit_canvas:\n decimation_factor = decimation_factor/self.display_rescaling_factor\n siz = int(len(canvas_coords)/2)\n out = []\n for i in range(siz):\n out.extend(\n (canvas_coords[2*i+1]*decimation_factor + self.canvas_full_image_upper_left_yx[0],\n canvas_coords[2 * i] * decimation_factor + self.canvas_full_image_upper_left_yx[1]))\n return out\n\n def canvas_rect_to_full_image_rect(self, canvas_rect):\n \"\"\"\n Gets the full image coordinates from the canvas coordinates.\n\n Parameters\n ----------\n canvas_rect : Tuple|List\n\n Returns\n -------\n Tuple\n \"\"\"\n\n image_y1, image_x1 = self.canvas_coords_to_full_image_yx([canvas_rect[0], canvas_rect[1]])\n image_y2, image_x2 = self.canvas_coords_to_full_image_yx([canvas_rect[2], canvas_rect[3]])\n\n if image_x1 < 0:\n image_x1 = 0\n if image_y1 < 0:\n image_y1 = 0\n if image_x2 > self.image_reader.full_image_nx:\n image_x2 = self.image_reader.full_image_nx\n if image_y2 > self.image_reader.full_image_ny:\n image_y2 = self.image_reader.full_image_ny\n\n return image_y1, image_x1, image_y2, image_x2\n\n def full_image_yx_to_canvas_coords(self, full_image_yx):\n \"\"\"\n Gets the canvas coordinates from full image coordinates in yx order.\n\n Parameters\n ----------\n full_image_yx : Tuple|List\n\n Returns\n -------\n List\n \"\"\"\n\n decimation_factor = self.decimation_factor\n if self.scale_to_fit_canvas:\n decimation_factor = decimation_factor / self.display_rescaling_factor\n\n siz = int(len(full_image_yx)/2)\n out = []\n for i in range(siz):\n out.extend(\n (float(full_image_yx[2*i+1] - self.canvas_full_image_upper_left_yx[1]) / decimation_factor,\n float(full_image_yx[2 * i] - self.canvas_full_image_upper_left_yx[0]) / decimation_factor))\n return out\n\n\nclass VectorObject(object):\n def __init__(self, vector_type,\n tkinter_options,\n image_drag_limits=None):\n self.type = vector_type\n self.tkinter_options = tkinter_options\n self.image_coords = None\n self.point_size = None\n self.image_drag_limits = image_drag_limits\n if vector_type == SHAPE_TYPES.RECT or vector_type == SHAPE_TYPES.POLYGON:\n self.color = tkinter_options['outline']\n elif vector_type == SHAPE_TYPES.LINE or vector_type == SHAPE_TYPES.ARROW:\n self.color = tkinter_options['fill']\n\n\nclass AppVariables(object):\n \"\"\"\n The canvas image application variables.\n \"\"\"\n\n canvas_height = IntegerDescriptor(\n 'canvas_height', default_value=200,\n docstring='The default canvas height, in pixels.') # type: int\n canvas_width = IntegerDescriptor(\n 'canvas_width', default_value=300,\n docstring='The default canvas width, in pixels.') # type: int\n rect_border_width = IntegerDescriptor(\n 'rect_border_width', default_value=2,\n docstring='The (margin) rectangular border width, in pixels.') # type: int\n line_width = IntegerDescriptor(\n 'line_width', default_value=2,\n docstring='The line width, in pixels.') # type: int\n point_size = IntegerDescriptor(\n 'point_size', default_value=3,\n docstring='The point size, in pixels.') # type: int\n poly_border_width = IntegerDescriptor(\n 'poly_border_width', default_value=2,\n docstring='The polygon border width, in pixels.') # type: int\n poly_fill = StringDescriptor(\n 'poly_fill',\n docstring='The polygon fill color(named or hexidecimal string).') # type: Union[None, str]\n foreground_color = StringDescriptor(\n 'foreground_color', default_value='red',\n docstring='The foreground color (named or hexidecimal string).') # type: str\n image_id = IntegerDescriptor(\n 'image_id',\n docstring='The image id.') # type: int\n current_shape_id = IntegerDescriptor(\n 'current_shape_id',\n docstring='The current shape id.') # type: int\n current_shape_canvas_anchor_point_xy = IntegerTupleDescriptor(\n 'current_shape_canvas_anchor_point_xy', length=2,\n docstring='The current shape canvas anchor point, in xy order.') # type: Union[None, tuple]\n pan_anchor_point_xy = IntegerTupleDescriptor(\n 'pan_anchor_point_xy', length=2, default_value=(0, 0),\n docstring='The pan anchor point, in xy order.') # type: Union[None, tuple]\n canvas_image_object = TypedDescriptor(\n 'canvas_image_object', CanvasImage,\n docstring='The canvas image object.') # type: CanvasImage\n _tk_im = TypedDescriptor(\n '_tk_im', ImageTk.PhotoImage,\n docstring='The Tkinter Image.') # type: ImageTk.PhotoImage\n # zoom rectangle properties\n zoom_rect_id = IntegerDescriptor(\n 'zoom_rect_id',\n docstring='The zoom rectangle id.') # type: int\n zoom_rect_color = StringDescriptor(\n 'zoom_rect_color', default_value='cyan',\n docstring='The zoom rectangle color (named or hexidecimal).') # type: str\n zoom_rect_border_width = IntegerDescriptor(\n 'zoom_rect_border_width', default_value=2,\n docstring='The zoom rectangle border width, in pixels.') # type: int\n # selection rectangle properties\n select_rect_id = IntegerDescriptor(\n 'select_rect_id',\n docstring='The select rectangle id.') # type: int\n select_rect_color = StringDescriptor(\n 'select_rect_color', default_value='red',\n docstring='The select rectangle color (named or hexidecimal).') # type: str\n select_rect_border_width = IntegerDescriptor(\n 'select_rect_border_width', default_value=2,\n docstring='The select rectangle border width, in pixels.') # type: int\n # animation properties\n animate_zoom = BooleanDescriptor(\n 'animate_zoom', default_value=True,\n docstring='Specifies whether to animate zooming.') # type: bool\n n_zoom_animations = IntegerDescriptor(\n 'n_zoom_animations', default_value=5,\n docstring='The number of zoom frames.') # type: int\n animate_pan = BooleanDescriptor(\n 'animate_pan', default_value=False,\n docstring='Specifies whether to animate panning.') # type: bool\n animation_time_in_seconds = FloatDescriptor(\n 'animation_time_in_seconds', default_value=0.3,\n docstring='The animation time in seconds.') # type: float\n # tool identifiers\n active_tool = StringDescriptor(\n 'active_tool',\n docstring='The active tool name.') # type: str\n current_tool = StringDescriptor(\n 'current_tool',\n docstring='The current tool name.') # type: str\n # some configuration properties\n vertex_selector_pixel_threshold = FloatDescriptor(\n 'vertex_selector_pixel_threshold', default_value=10.0,\n docstring='The pixel threshold for vertex selection.') # type: float\n mouse_wheel_zoom_percent_per_event = FloatDescriptor(\n 'mouse_wheel_zoom_percent_per_event', default_value=1.5,\n docstring='The percent to zoom in/out on mouse wheel detection.') # type: float\n highlight_n_colors_cycle = IntegerDescriptor(\n 'highlight_n_colors_cycle', default_value=10,\n docstring='The length of highlight colors cycle.') # type: int\n zoom_on_wheel = BooleanDescriptor(\n 'zoom_on_wheel', default_value=True,\n docstring='Zoom on the mouse wheel operation?') # type: bool\n rescale_image_to_fit_canvas = BooleanDescriptor(\n 'rescale_image_to_fit_canvas', default_value=True,\n docstring='Rescale the image to fit the canvas?') # type: bool\n scale_dynamic_range = BooleanDescriptor(\n 'scale_dynamic_range', default_value=False,\n docstring='Scale the dynamic range of the image?') # type: bool\n # some state properties\n the_canvas_is_currently_zooming = BooleanDescriptor(\n 'the_canvas_is_currently_zooming', default_value=False,\n docstring='Is the canvas object currently zooming?') # type: bool\n actively_drawing_shape = BooleanDescriptor(\n 'actively_drawing_shape', default_value=False,\n docstring='Is the canvas object actively drawing a shape?') # type: bool\n tmp_closest_coord_index = IntegerDescriptor(\n 'tmp_closest_coord_index', default_value=0,\n docstring='') # type: int\n\n def __init__(self):\n\n self.shape_ids = [] # type: [int]\n self.vector_objects = {} # type: {VectorObject}\n self.shape_properties = {}\n self.shape_drag_image_coord_limits = {} # type: dict\n self.highlight_color_palette = SeabornHexPalettes.blues # type: List[str]\n self.tmp_points = None # type: [int]\n\n\nclass ToolConstants:\n ZOOM_IN_TOOL = \"zoom in\"\n ZOOM_OUT_TOOL = \"zoom out\"\n DRAW_RECT_BY_DRAGGING = \"draw rect by dragging\"\n DRAW_RECT_BY_CLICKING = \"draw rect by clicking\"\n DRAW_ELLIPSE_BY_DRAGGING = \"draw ellipse by dragging\"\n DRAW_LINE_BY_DRAGGING = \"draw line by dragging\"\n DRAW_LINE_BY_CLICKING = \"draw line by clicking\"\n DRAW_ARROW_BY_DRAGGING = \"draw arrow by dragging\"\n DRAW_ARROW_BY_CLICKING = \"draw arrow by clicking\"\n DRAW_POLYGON_BY_CLICKING = \"draw polygon by clicking\"\n DRAW_POINT_BY_CLICKING = \"draw point by clicking\"\n SELECT_TOOL = \"select tool\"\n SELECT_CLOSEST_SHAPE_TOOL = \"select closest shape\"\n PAN_TOOL = \"pan tool\"\n TRANSLATE_SHAPE_TOOL = \"translate shape tool\"\n EDIT_SHAPE_COORDS_TOOL = \"edit shape coords tool\"\n EDIT_SHAPE_TOOL = \"edit shape tool\"\n\n\nclass ShapePropertyConstants:\n SHAPE_TYPE = \"shape type\"\n CANVAS_COORDS = \"canvas coords\"\n IMAGE_COORDS = \"image coords\"\n POINT_SIZE = \"point size\"\n COLOR = \"color\"\n\n\nclass ShapeTypeConstants:\n POINT = \"point\"\n LINE = \"line\"\n RECT = \"rect\"\n ELLIPSE = \"ellipse\"\n ARROW = \"arrow\"\n POLYGON = \"polygon\"\n TEXT = \"text\"\n\n\nSHAPE_PROPERTIES = ShapePropertyConstants()\nSHAPE_TYPES = ShapeTypeConstants()\nTOOLS = ToolConstants()\n\n\nclass ImageCanvas(basic_widgets.Canvas):\n def __init__(self, primary):\n \"\"\"\n\n Parameters\n ----------\n primary\n The primary widget.\n \"\"\"\n osplat = platform.system()\n if osplat == \"Windows\":\n import ctypes\n user32 = ctypes.windll.user32\n user32.SetProcessDPIAware()\n\n basic_widgets.Canvas.__init__(self, primary, highlightthickness=0)\n self.pack(fill=tkinter.BOTH, expand=tkinter.NO)\n\n self.variables = AppVariables()\n\n self.variables.zoom_rect_id = self.create_new_rect((0, 0, 1, 1), outline=self.variables.zoom_rect_color, width=self.variables.zoom_rect_border_width)\n self.variables.select_rect_id = self.create_new_rect((0, 0, 1, 1), outline=self.variables.select_rect_color, width=self.variables.select_rect_border_width)\n\n # hide the shapes we initialize\n self.hide_shape(self.variables.select_rect_id)\n self.hide_shape(self.variables.zoom_rect_id)\n\n self.on_left_mouse_click(self.callback_handle_left_mouse_click)\n self.on_left_mouse_motion(self.callback_handle_left_mouse_motion)\n self.on_left_mouse_release(self.callback_handle_left_mouse_release)\n self.on_right_mouse_click(self.callback_handle_right_mouse_click)\n self.on_mouse_motion(self.callback_handle_mouse_motion)\n\n self.on_mouse_wheel(self.callback_mouse_zoom)\n\n self.variables.active_tool = None\n self.variables.current_shape_id = None\n\n def _set_image_reader(self, image_reader):\n \"\"\"\n Set the image reader.\n\n Parameters\n ----------\n image_reader : ImageReader\n\n Returns\n -------\n None\n \"\"\"\n\n self.variables.canvas_image_object = CanvasImage(\n image_reader, self.variables.canvas_width, self.variables.canvas_height)\n if self.variables.rescale_image_to_fit_canvas:\n self.set_image_from_numpy_array(self.variables.canvas_image_object.display_image)\n else:\n self.set_image_from_numpy_array(self.variables.canvas_image_object.canvas_decimated_image)\n\n def get_vector_object(self, vector_id):\n \"\"\"\n\n Parameters\n ----------\n vector_id : int\n\n Returns\n -------\n VectorObject\n \"\"\"\n return self.variables.vector_objects[str(vector_id)]\n\n def get_canvas_line_length(self, line_id):\n \"\"\"\n Gets the canvas line length.\n\n Parameters\n ----------\n line_id : int\n\n Returns\n -------\n int\n \"\"\"\n\n line_coords = self.coords(line_id)\n x1 = line_coords[0]\n y1 = line_coords[1]\n x2 = line_coords[2]\n y2 = line_coords[3]\n length = numpy.sqrt(numpy.square(x2-x1) + numpy.square(y2-y1))\n return length\n\n def get_image_line_length(self, line_id):\n \"\"\"\n Gest the image line length.\n\n Parameters\n ----------\n line_id : int\n\n Returns\n -------\n int\n \"\"\"\n\n canvas_line_length = self.get_canvas_line_length(line_id)\n return canvas_line_length * self.variables.canvas_image_object.decimation_factor\n\n def hide_shape(self, shape_id):\n \"\"\"\n Hide the shape specified by the provided id.\n\n Parameters\n ----------\n shape_id : int\n\n Returns\n -------\n None\n \"\"\"\n\n if shape_id:\n self.itemconfigure(shape_id, state=\"hidden\")\n\n def show_shape(self, shape_id):\n \"\"\"\n Show or un-hide the shape specified by the provided id.\n\n Parameters\n ----------\n shape_id : int\n\n Returns\n -------\n None\n \"\"\"\n\n if shape_id:\n self.itemconfigure(shape_id, state=\"normal\")\n\n def callback_mouse_zoom(self, event):\n \"\"\"\n The mouse zoom callback.\n\n Parameters\n ----------\n event\n\n Returns\n -------\n None\n \"\"\"\n\n if self.variables.zoom_on_wheel:\n delta = event.delta\n single_delta = 120\n\n # handle case where platform is linux:\n if platform.system() == \"Linux\":\n delta = single_delta\n if event.num == 5:\n delta = delta*-1\n\n zoom_in_box_half_width = int(self.variables.canvas_width / self.variables.mouse_wheel_zoom_percent_per_event / 2)\n zoom_out_box_half_width = int(self.variables.canvas_width * self.variables.mouse_wheel_zoom_percent_per_event / 2)\n zoom_in_box_half_height = int(self.variables.canvas_height / self.variables.mouse_wheel_zoom_percent_per_event / 2)\n zoom_out_box_half_height = int(self.variables.canvas_height * self.variables.mouse_wheel_zoom_percent_per_event / 2)\n\n x = event.x\n y = event.y\n\n after_zoom_x_offset = (self.variables.canvas_width/2 - x)/self.variables.mouse_wheel_zoom_percent_per_event\n after_zoom_y_offset = (self.variables.canvas_height/2 - y)/self.variables.mouse_wheel_zoom_percent_per_event\n\n x_offset_point = x + after_zoom_x_offset\n y_offset_point = y + after_zoom_y_offset\n\n zoom_in_box = (x_offset_point - zoom_in_box_half_width,\n y_offset_point - zoom_in_box_half_height,\n x_offset_point + zoom_in_box_half_width,\n y_offset_point + zoom_in_box_half_height)\n\n zoom_out_box = (x_offset_point - zoom_out_box_half_width,\n y_offset_point - zoom_out_box_half_height,\n x_offset_point + zoom_out_box_half_width,\n y_offset_point + zoom_out_box_half_height)\n\n if self.variables.the_canvas_is_currently_zooming:\n pass\n else:\n if delta > 0:\n self.zoom_to_selection(zoom_in_box, self.variables.animate_zoom)\n else:\n self.zoom_to_selection(zoom_out_box, self.variables.animate_zoom)\n else:\n pass\n\n def animate_with_numpy_frame_sequence(self, numpy_frame_sequence, frames_per_second=15):\n \"\"\"\n Animate with a sequence of numpy arrays.\n\n Parameters\n ----------\n numpy_frame_sequence : List[numpy.ndarray]\n frames_per_second : float\n\n Returns\n -------\n None\n \"\"\"\n\n sleep_time = 1/frames_per_second\n for animation_frame in numpy_frame_sequence:\n tic = time.time()\n self.set_image_from_numpy_array(animation_frame)\n self.update()\n toc = time.time()\n frame_generation_time = toc-tic\n if frame_generation_time < sleep_time:\n new_sleep_time = sleep_time - frame_generation_time\n time.sleep(new_sleep_time)\n else:\n pass\n\n def animate_with_pil_frame_sequence(self, pil_frame_sequence, frames_per_second=15):\n \"\"\"\n Animate with a sequence of PIL images.\n\n Parameters\n ----------\n pil_frame_sequence : List[PIL.Image]\n frames_per_second : float\n\n Returns\n -------\n None\n \"\"\"\n\n sleep_time = 1/frames_per_second\n for animation_frame in pil_frame_sequence:\n tic = time.time()\n self._set_image_from_pil_image(animation_frame)\n self.update()\n toc = time.time()\n frame_generation_time = toc-tic\n if frame_generation_time < sleep_time:\n new_sleep_time = sleep_time - frame_generation_time\n time.sleep(new_sleep_time)\n else:\n pass\n\n def callback_handle_left_mouse_click(self, event):\n \"\"\"\n Left mouse click callback.\n\n Parameters\n ----------\n event\n\n Returns\n -------\n\n \"\"\"\n\n if self.variables.active_tool == TOOLS.PAN_TOOL:\n self.variables.pan_anchor_point_xy = event.x, event.y\n self.variables.tmp_anchor_point = event.x, event.y\n elif self.variables.active_tool == TOOLS.TRANSLATE_SHAPE_TOOL:\n self.variables.tmp_anchor_point = event.x, event.y\n elif self.variables.active_tool == TOOLS.EDIT_SHAPE_COORDS_TOOL:\n closest_coord_index = self.find_closest_shape_coord(self.variables.current_shape_id, event.x, event.y)\n self.variables.tmp_closest_coord_index = closest_coord_index\n elif self.variables.active_tool == TOOLS.SELECT_CLOSEST_SHAPE_TOOL:\n closest_shape_id = self.find_closest_shape(event.x, event.y)\n self.variables.current_shape_id = closest_shape_id\n self.highlight_existing_shape(self.variables.current_shape_id)\n else:\n start_x = self.canvasx(event.x)\n start_y = self.canvasy(event.y)\n\n self.variables.current_shape_canvas_anchor_point_xy = (start_x, start_y)\n if self.variables.current_shape_id not in self.variables.shape_ids:\n coords = (start_x, start_y, start_x + 1, start_y + 1)\n if self.variables.active_tool == TOOLS.DRAW_LINE_BY_DRAGGING:\n self.create_new_line(coords)\n elif self.variables.active_tool == TOOLS.DRAW_LINE_BY_CLICKING:\n self.create_new_line(coords)\n self.variables.actively_drawing_shape = True\n elif self.variables.active_tool == TOOLS.DRAW_ARROW_BY_DRAGGING:\n self.create_new_arrow(coords)\n elif self.variables.active_tool == TOOLS.DRAW_ARROW_BY_CLICKING:\n self.create_new_arrow(coords)\n self.variables.actively_drawing_shape = True\n elif self.variables.active_tool == TOOLS.DRAW_RECT_BY_DRAGGING:\n self.create_new_rect(coords)\n elif self.variables.active_tool == TOOLS.DRAW_RECT_BY_CLICKING:\n self.create_new_rect(coords)\n self.variables.actively_drawing_shape = True\n elif self.variables.active_tool == TOOLS.DRAW_ELLIPSE_BY_DRAGGING:\n self.create_new_ellipse(coords)\n elif self.variables.active_tool == TOOLS.DRAW_POINT_BY_CLICKING:\n self.create_new_point((start_x, start_y))\n elif self.variables.active_tool == TOOLS.DRAW_POLYGON_BY_CLICKING:\n self.create_new_polygon(coords)\n self.variables.actively_drawing_shape = True\n else:\n print(\"no tool selected\")\n else:\n if self.variables.current_shape_id in self.variables.shape_ids:\n vector_object = self.get_vector_object(self.variables.current_shape_id)\n if vector_object.type == SHAPE_TYPES.POINT:\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id,\n (start_x, start_y))\n elif self.variables.active_tool == TOOLS.DRAW_LINE_BY_CLICKING:\n self.event_click_line(event)\n elif self.variables.active_tool == TOOLS.DRAW_ARROW_BY_CLICKING:\n self.event_click_line(event)\n elif self.variables.active_tool == TOOLS.DRAW_POLYGON_BY_CLICKING:\n self.event_click_polygon(event)\n elif self.variables.active_tool == TOOLS.DRAW_RECT_BY_CLICKING:\n if self.variables.actively_drawing_shape:\n self.variables.actively_drawing_shape = False\n else:\n self.variables.actively_drawing_shape = True\n\n def callback_handle_left_mouse_release(self, event):\n \"\"\"\n Left mouse release callback.\n\n Parameters\n ----------\n event\n\n Returns\n -------\n None\n \"\"\"\n\n if self.variables.active_tool == TOOLS.PAN_TOOL:\n self._pan(event)\n if self.variables.active_tool == TOOLS.ZOOM_IN_TOOL:\n rect_coords = self.coords(self.variables.zoom_rect_id)\n self.zoom_to_selection(rect_coords, self.variables.animate_zoom)\n self.hide_shape(self.variables.zoom_rect_id)\n if self.variables.active_tool == TOOLS.ZOOM_OUT_TOOL:\n rect_coords = self.coords(self.variables.zoom_rect_id)\n x1 = -rect_coords[0]\n x2 = self.variables.canvas_width + rect_coords[2]\n y1 = -rect_coords[1]\n y2 = self.variables.canvas_height + rect_coords[3]\n zoom_rect = (x1, y1, x2, y2)\n self.zoom_to_selection(zoom_rect, self.variables.animate_zoom)\n self.hide_shape(self.variables.zoom_rect_id)\n\n def callback_handle_mouse_motion(self, event):\n \"\"\"\n Mouse motion callback.\n\n Parameters\n ----------\n event\n\n Returns\n -------\n None\n \"\"\"\n\n if self.variables.actively_drawing_shape:\n if self.variables.active_tool == TOOLS.DRAW_LINE_BY_CLICKING:\n self.event_drag_multipoint_line(event)\n elif self.variables.active_tool == TOOLS.DRAW_ARROW_BY_CLICKING:\n self.event_drag_multipoint_line(event)\n elif self.variables.active_tool == TOOLS.DRAW_POLYGON_BY_CLICKING:\n self.event_drag_multipoint_polygon(event)\n elif self.variables.active_tool == TOOLS.DRAW_RECT_BY_CLICKING:\n self.event_drag_line(event)\n elif self.variables.current_tool == TOOLS.EDIT_SHAPE_TOOL:\n vector_object = self.get_vector_object(self.variables.current_shape_id)\n if vector_object.type == SHAPE_TYPES.RECT or vector_object.type == SHAPE_TYPES.ELLIPSE:\n select_x1, select_y1, select_x2, select_y2 = self.get_shape_canvas_coords(\n self.variables.current_shape_id)\n select_xul = min(select_x1, select_x2)\n select_xlr = max(select_x1, select_x2)\n select_yul = min(select_y1, select_y2)\n select_ylr = max(select_y1, select_y2)\n\n distance_to_ul = numpy.sqrt(numpy.square(event.x - select_xul) + numpy.square(event.y - select_yul))\n distance_to_ur = numpy.sqrt(numpy.square(event.x - select_xlr) + numpy.square(event.y - select_yul))\n distance_to_lr = numpy.sqrt(numpy.square(event.x - select_xlr) + numpy.square(event.y - select_ylr))\n distance_to_ll = numpy.sqrt(numpy.square(event.x - select_xul) + numpy.square(event.y - select_ylr))\n\n if distance_to_ul < self.variables.vertex_selector_pixel_threshold:\n self.config(cursor=\"top_left_corner\")\n self.variables.active_tool = TOOLS.EDIT_SHAPE_COORDS_TOOL\n elif distance_to_ur < self.variables.vertex_selector_pixel_threshold:\n self.config(cursor=\"top_right_corner\")\n self.variables.active_tool = TOOLS.EDIT_SHAPE_COORDS_TOOL\n elif distance_to_lr < self.variables.vertex_selector_pixel_threshold:\n self.config(cursor=\"bottom_right_corner\")\n self.variables.active_tool = TOOLS.EDIT_SHAPE_COORDS_TOOL\n elif distance_to_ll < self.variables.vertex_selector_pixel_threshold:\n self.config(cursor=\"bottom_left_corner\")\n self.variables.active_tool = TOOLS.EDIT_SHAPE_COORDS_TOOL\n elif select_xul < event.x < select_xlr and select_yul < event.y < select_ylr:\n self.config(cursor=\"fleur\")\n self.variables.active_tool = TOOLS.TRANSLATE_SHAPE_TOOL\n else:\n self.config(cursor=\"arrow\")\n self.variables.active_tool = None\n elif vector_object.type == SHAPE_TYPES.LINE or vector_object.type == SHAPE_TYPES.ARROW:\n canvas_coords = self.get_shape_canvas_coords(self.variables.current_shape_id)\n x_coords = canvas_coords[0::2]\n y_coords = canvas_coords[1::2]\n distance_to_vertex = numpy.sqrt(numpy.square(event.x - x_coords[0]) +\n numpy.square(event.y - y_coords[0]))\n p2 = numpy.asarray((x_coords[1], y_coords[1]))\n p1 = numpy.asarray((x_coords[0], y_coords[0]))\n p3 = numpy.asarray((event.x, event.y))\n distance_to_line = norm(numpy.cross(p2 - p1, p1 - p3)) / norm(p2 - p1)\n for xy in zip(x_coords, y_coords):\n vertex_distance = numpy.sqrt(numpy.square(event.x - xy[0]) + numpy.square(event.y - xy[1]))\n if vertex_distance < distance_to_vertex:\n distance_to_vertex = vertex_distance\n\n if distance_to_vertex < self.variables.vertex_selector_pixel_threshold:\n self.config(cursor=\"target\")\n self.variables.active_tool = TOOLS.EDIT_SHAPE_COORDS_TOOL\n elif distance_to_line < self.variables.vertex_selector_pixel_threshold:\n self.config(cursor=\"fleur\")\n self.variables.active_tool = TOOLS.TRANSLATE_SHAPE_TOOL\n else:\n self.config(cursor=\"arrow\")\n self.variables.active_tool = None\n elif vector_object.type == SHAPE_TYPES.LINE or vector_object.type == SHAPE_TYPES.POLYGON:\n canvas_coords = self.get_shape_canvas_coords(self.variables.current_shape_id)\n x_coords = canvas_coords[0::2]\n y_coords = canvas_coords[1::2]\n xy_points = [xy for xy in zip(x_coords, y_coords)]\n distance_to_vertex = numpy.sqrt(numpy.square(event.x - x_coords[0]) +\n numpy.square(event.y - y_coords[0]))\n for xy in zip(x_coords, y_coords):\n vertex_distance = numpy.sqrt(numpy.square(event.x - xy[0]) + numpy.square(event.y - xy[1]))\n if vertex_distance < distance_to_vertex:\n distance_to_vertex = vertex_distance\n\n if distance_to_vertex < self.variables.vertex_selector_pixel_threshold:\n self.config(cursor=\"target\")\n self.variables.active_tool = TOOLS.EDIT_SHAPE_COORDS_TOOL\n elif polygon_utils.point_inside_polygon(event.x, event.y, xy_points):\n self.config(cursor=\"fleur\")\n self.variables.active_tool = TOOLS.TRANSLATE_SHAPE_TOOL\n else:\n self.config(cursor=\"arrow\")\n self.variables.active_tool = None\n elif vector_object.type == SHAPE_TYPES.POINT:\n canvas_coords = self.get_shape_canvas_coords(self.variables.current_shape_id)\n distance_to_point = numpy.sqrt(numpy.square(event.x - canvas_coords[0]) +\n numpy.square(event.y - canvas_coords[1]))\n if distance_to_point < self.variables.vertex_selector_pixel_threshold:\n self.config(cursor=\"fleur\")\n self.variables.active_tool = TOOLS.TRANSLATE_SHAPE_TOOL\n\n def callback_handle_left_mouse_motion(self, event):\n \"\"\"\n Left mouse motion callback.\n\n Parameters\n ----------\n event\n\n Returns\n -------\n None\n \"\"\"\n\n # TODO: update this for the case where there is no current shape id\n vector_object = self.get_vector_object(self.variables.current_shape_id)\n if self.variables.active_tool == TOOLS.PAN_TOOL:\n x_dist = event.x - self.variables.tmp_anchor_point[0]\n y_dist = event.y - self.variables.tmp_anchor_point[1]\n self.move(self.variables.image_id, x_dist, y_dist)\n self.variables.tmp_anchor_point = event.x, event.y\n elif self.variables.active_tool == TOOLS.TRANSLATE_SHAPE_TOOL:\n x_dist = event.x - self.variables.tmp_anchor_point[0]\n y_dist = event.y - self.variables.tmp_anchor_point[1]\n t_coords = self.get_shape_canvas_coords(self.variables.current_shape_id)\n new_coords = numpy.asarray(t_coords) + x_dist\n new_coords_y = numpy.asarray(t_coords) + y_dist\n new_coords[1::2] = new_coords_y[1::2]\n if vector_object.image_drag_limits:\n canvas_limits = self.image_coords_to_canvas_coords(vector_object.image_drag_limits)\n x_vertices = new_coords[0::2]\n y_vertices = new_coords[1::2]\n within_x_limits = True\n within_y_limits = True\n for x_vertex in x_vertices:\n if canvas_limits[2] < x_vertex or x_vertex < canvas_limits[0]:\n within_x_limits = False\n for y_vertex in y_vertices:\n if y_vertex < canvas_limits[1] or y_vertex > canvas_limits[3]:\n within_y_limits = False\n if not within_x_limits:\n new_coords[0::2] = t_coords[0::2]\n if not within_y_limits:\n new_coords[1::2] = t_coords[1::2]\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id,\n new_coords,\n update_pixel_coords=True)\n self.variables.tmp_anchor_point = event.x, event.y\n elif self.variables.active_tool == TOOLS.EDIT_SHAPE_COORDS_TOOL:\n previous_coords = self.get_shape_canvas_coords(self.variables.current_shape_id)\n coord_x_index = self.variables.tmp_closest_coord_index*2\n coord_y_index = coord_x_index + 1\n new_coords = list(previous_coords)\n new_coords[coord_x_index] = event.x\n new_coords[coord_y_index] = event.y\n if vector_object.image_drag_limits:\n drag_x_lim_1, drag_y_lim_1, drag_x_lim_2, drag_y_lim_2 = \\\n self.image_coords_to_canvas_coords(vector_object.image_drag_limits)\n if new_coords[coord_x_index] < drag_x_lim_1:\n new_coords[coord_x_index] = drag_x_lim_1\n if new_coords[coord_x_index] > drag_x_lim_2:\n new_coords[coord_x_index] = drag_x_lim_2\n if new_coords[coord_y_index] < drag_y_lim_1:\n new_coords[coord_y_index] = drag_y_lim_1\n if new_coords[coord_y_index] > drag_y_lim_2:\n new_coords[coord_y_index] = drag_y_lim_2\n\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, tuple(new_coords))\n elif self.variables.active_tool == TOOLS.ZOOM_IN_TOOL:\n self.event_drag_line(event)\n elif self.variables.active_tool == TOOLS.ZOOM_OUT_TOOL:\n self.event_drag_line(event)\n elif self.variables.active_tool == TOOLS.SELECT_TOOL:\n self.event_drag_line(event)\n elif self.variables.active_tool == TOOLS.DRAW_RECT_BY_DRAGGING:\n self.event_drag_line(event)\n elif self.variables.active_tool == TOOLS.DRAW_ELLIPSE_BY_DRAGGING:\n self.event_drag_line(event)\n elif self.variables.active_tool == TOOLS.DRAW_LINE_BY_DRAGGING:\n self.event_drag_line(event)\n elif self.variables.active_tool == TOOLS.DRAW_ARROW_BY_DRAGGING:\n self.event_drag_line(event)\n elif self.variables.active_tool == TOOLS.DRAW_POINT_BY_CLICKING:\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, (event.x, event.y))\n\n def highlight_existing_shape(self, shape_id):\n \"\"\"\n Highlights an existing shape, according to provided id.\n\n Parameters\n ----------\n shape_id : int\n\n Returns\n -------\n None\n \"\"\"\n\n original_color = self.get_vector_object(shape_id).color\n colors = color_utils.get_full_hex_palette(self.variables.highlight_color_palette, self.variables.highlight_n_colors_cycle)\n for color in colors:\n self.change_shape_color(shape_id, color)\n time.sleep(0.001)\n self.update()\n colors.reverse()\n for color in colors:\n self.change_shape_color(shape_id, color)\n time.sleep(0.001)\n self.update()\n self.change_shape_color(shape_id, original_color)\n\n # noinspection PyUnusedLocal\n def callback_handle_right_mouse_click(self, event):\n \"\"\"\n Callback for right mouse click.\n\n Parameters\n ----------\n event\n\n Returns\n -------\n None\n \"\"\"\n\n if self.variables.active_tool == TOOLS.DRAW_LINE_BY_CLICKING:\n self.variables.actively_drawing_shape = False\n elif self.variables.active_tool == TOOLS.DRAW_ARROW_BY_CLICKING:\n self.variables.actively_drawing_shape = False\n elif self.variables.active_tool == TOOLS.DRAW_POLYGON_BY_CLICKING:\n self.variables.actively_drawing_shape = False\n\n def set_image_from_numpy_array(self, numpy_data):\n \"\"\"\n This is the default way to set and display image data. All other methods\n to update images should ultimately call this.\n\n Parameters\n ----------\n numpy_data : numpy.ndarray\n\n Returns\n -------\n None\n \"\"\"\n\n if self.variables.scale_dynamic_range:\n min_data = numpy.min(numpy_data)\n dynamic_range = numpy.max(numpy_data) - min_data\n numpy_data = numpy.asanyarray(\n 255*(numpy_data - min_data)/dynamic_range, dtype=numpy.uint8)\n pil_image = PIL.Image.fromarray(numpy_data)\n self._set_image_from_pil_image(pil_image)\n\n def set_canvas_size(self, width_npix, height_npix):\n \"\"\"\n Set the canvas size.\n\n Parameters\n ----------\n width_npix : int|float\n height_npix : int|float\n\n Returns\n -------\n None\n \"\"\"\n\n self.variables.canvas_width = width_npix\n self.variables.canvas_height = height_npix\n if self.variables.canvas_image_object is not None:\n self.variables.canvas_image_object.canvas_nx = width_npix\n self.variables.canvas_image_object.canvas_ny = height_npix\n self.config(width=width_npix, height=height_npix)\n\n def modify_existing_shape_using_canvas_coords(self, shape_id, new_coords, update_pixel_coords=True):\n \"\"\"\n Modify an existing shape.\n\n Parameters\n ----------\n shape_id : int\n new_coords : Tuple|List\n update_pixel_coords : bool\n\n Returns\n -------\n None\n \"\"\"\n vector_object = self.get_vector_object(shape_id)\n if vector_object.type == SHAPE_TYPES.POINT:\n point_size = vector_object.point_size\n x1, y1 = (new_coords[0] - point_size), (new_coords[1] - point_size)\n x2, y2 = (new_coords[0] + point_size), (new_coords[1] + point_size)\n canvas_drawing_coords = (x1, y1, x2, y2)\n else:\n canvas_drawing_coords = tuple(new_coords)\n self.coords(shape_id, canvas_drawing_coords)\n if update_pixel_coords:\n self.set_shape_pixel_coords_from_canvas_coords(shape_id, new_coords)\n\n def modify_existing_shape_using_image_coords(self, shape_id, image_coords):\n \"\"\"\n Modify an existing shape.\n\n Parameters\n ----------\n shape_id : int\n image_coords : Tuple|List\n\n Returns\n -------\n None\n \"\"\"\n\n self.set_shape_pixel_coords(shape_id, image_coords)\n canvas_coords = self.image_coords_to_canvas_coords(image_coords)\n self.modify_existing_shape_using_canvas_coords(shape_id, canvas_coords, update_pixel_coords=False)\n\n def event_drag_multipoint_line(self, event):\n \"\"\"\n Drag multipoint line callback.\n\n Parameters\n ----------\n event\n\n Returns\n -------\n None\n \"\"\"\n\n if self.variables.current_shape_id:\n self.show_shape(self.variables.current_shape_id)\n event_x_pos = self.canvasx(event.x)\n event_y_pos = self.canvasy(event.y)\n coords = self.coords(self.variables.current_shape_id)\n new_coords = list(coords[0:-2]) + [event_x_pos, event_y_pos]\n vector_object = self.get_vector_object(self.variables.current_shape_id)\n if vector_object.type == SHAPE_TYPES.ARROW or vector_object.type == SHAPE_TYPES.LINE:\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, new_coords)\n else:\n pass\n\n def event_drag_multipoint_polygon(self, event):\n \"\"\"\n Drag a polygon callback.\n\n Parameters\n ----------\n event\n\n Returns\n -------\n None\n \"\"\"\n\n if self.variables.current_shape_id:\n event_x_pos = self.canvasx(event.x)\n event_y_pos = self.canvasy(event.y)\n drag_lims = self.get_vector_object(self.variables.current_shape_id).image_drag_limits\n if drag_lims:\n canvas_lims = self.image_coords_to_canvas_coords(drag_lims)\n if event_x_pos < canvas_lims[0]:\n event_x_pos = canvas_lims[0]\n elif event_x_pos > canvas_lims[2]:\n event_x_pos = canvas_lims[2]\n if event_y_pos < canvas_lims[1]:\n event_y_pos = canvas_lims[1]\n elif event_y_pos > canvas_lims[3]:\n event_y_pos = canvas_lims[3]\n\n self.show_shape(self.variables.current_shape_id)\n coords = self.coords(self.variables.current_shape_id)\n new_coords = list(coords[0:-2]) + [event_x_pos, event_y_pos]\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, new_coords)\n else:\n pass\n\n def event_drag_line(self, event):\n \"\"\"\n Drag a line callback.\n\n Parameters\n ----------\n event\n\n Returns\n -------\n None\n \"\"\"\n\n if self.variables.current_shape_id:\n self.show_shape(self.variables.current_shape_id)\n event_x_pos = self.canvasx(event.x)\n event_y_pos = self.canvasy(event.y)\n if self.get_vector_object(self.variables.current_shape_id).image_drag_limits:\n drag_lims = self.get_vector_object(self.variables.current_shape_id).image_drag_limits\n canvas_lims = self.image_coords_to_canvas_coords(drag_lims)\n if event_x_pos < canvas_lims[0]:\n event_x_pos = canvas_lims[0]\n elif event_x_pos > canvas_lims[2]:\n event_x_pos = canvas_lims[2]\n if event_y_pos < canvas_lims[1]:\n event_y_pos = canvas_lims[1]\n elif event_y_pos > canvas_lims[3]:\n event_y_pos = canvas_lims[3]\n self.modify_existing_shape_using_canvas_coords(\n self.variables.current_shape_id,\n (self.variables.current_shape_canvas_anchor_point_xy[0],\n self.variables.current_shape_canvas_anchor_point_xy[1],\n event_x_pos, event_y_pos))\n\n def event_click_line(self, event):\n \"\"\"\n Click a line callback.\n\n Parameters\n ----------\n event\n\n Returns\n -------\n None\n \"\"\"\n event_x_pos = self.canvasx(event.x)\n event_y_pos = self.canvasy(event.y)\n if self.get_vector_object(self.variables.current_shape_id).image_drag_limits:\n drag_lims = self.get_vector_object(self.variables.current_shape_id).image_drag_limits\n canvas_lims = self.image_coords_to_canvas_coords(drag_lims)\n if event_x_pos < canvas_lims[0]:\n event_x_pos = canvas_lims[0]\n elif event_x_pos > canvas_lims[2]:\n event_x_pos = canvas_lims[2]\n if event_y_pos < canvas_lims[1]:\n event_y_pos = canvas_lims[1]\n elif event_y_pos > canvas_lims[3]:\n event_y_pos = canvas_lims[3]\n if self.variables.actively_drawing_shape:\n old_coords = self.get_shape_canvas_coords(self.variables.current_shape_id)\n new_coords = tuple(list(old_coords) + [event_x_pos, event_y_pos])\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, new_coords)\n else:\n new_coords = (event_x_pos, event_y_pos, event_x_pos + 1, event_y_pos + 1)\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, new_coords)\n self.variables.actively_drawing_shape = True\n\n def delete_shape(self, shape_id):\n \"\"\"\n Deletes a shape by its id.\n\n Parameters\n ----------\n shape_id : int\n\n Returns\n -------\n None\n \"\"\"\n\n self.variables.shape_ids.remove(shape_id)\n del self.variables.vector_objects[str(shape_id)]\n self.delete(shape_id)\n if shape_id == self.variables.current_shape_id:\n self.variables.current_shape_id = None\n\n def event_click_polygon(self, event):\n \"\"\"\n Click a polygon callback.\n\n Parameters\n ----------\n event\n\n Returns\n -------\n None\n \"\"\"\n\n event_x_pos = self.canvasx(event.x)\n event_y_pos = self.canvasy(event.y)\n drag_lims = self.get_vector_object(self.variables.current_shape_id).image_drag_limits\n if drag_lims:\n canvas_lims = self.image_coords_to_canvas_coords(drag_lims)\n if event_x_pos < canvas_lims[0]:\n event_x_pos = canvas_lims[0]\n elif event_x_pos > canvas_lims[2]:\n event_x_pos = canvas_lims[2]\n if event_y_pos < canvas_lims[1]:\n event_y_pos = canvas_lims[1]\n elif event_y_pos > canvas_lims[3]:\n event_y_pos = canvas_lims[3]\n\n if self.variables.actively_drawing_shape:\n old_coords = self.get_shape_canvas_coords(self.variables.current_shape_id)\n new_coords = list(old_coords) + [event_x_pos, event_y_pos]\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, new_coords)\n # re-initialize shape if we're not actively drawing\n else:\n new_coords = (event.x, event.y, event_x_pos+1, event_y_pos+1)\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, new_coords)\n self.variables.actively_drawing_shape = True\n\n def create_new_text(self, *args, **kw):\n \"\"\"Create text with coordinates x1,y1.\"\"\"\n shape_id = self._create('text', args, kw)\n self.variables.shape_ids.append(shape_id)\n canvas_coords = args[0]\n self.variables.vector_objects[str(shape_id)] = VectorObject(SHAPE_TYPES.TEXT, None)\n self.variables.shape_ids.append(shape_id)\n self.set_shape_pixel_coords_from_canvas_coords(shape_id, canvas_coords)\n self.variables.current_shape_id = shape_id\n return shape_id\n\n def create_new_rect(self, canvas_coords, **options):\n \"\"\"\n Create a new rectangle.\n\n Parameters\n ----------\n canvas_coords : Tuple|List\n options\n Optional Keyword arguments.\n\n Returns\n -------\n int\n \"\"\"\n\n if 'outline' not in options:\n options['outline'] = self.variables.foreground_color\n if 'width' not in options:\n options['width'] = self.variables.rect_border_width\n shape_id = self.create_rectangle(*canvas_coords, **options)\n self.variables.vector_objects[str(shape_id)] = VectorObject(SHAPE_TYPES.RECT, options)\n self.variables.shape_ids.append(shape_id)\n self.set_shape_pixel_coords_from_canvas_coords(shape_id, canvas_coords)\n self.variables.current_shape_id = shape_id\n return shape_id\n\n def create_new_ellipse(self, canvas_coords, **options):\n \"\"\"\n Create a new rectangle.\n\n Parameters\n ----------\n canvas_coords : Tuple|List\n options\n Optional Keyword arguments.\n\n Returns\n -------\n int\n \"\"\"\n\n if 'outline' not in options:\n options['outline'] = self.variables.foreground_color\n if 'width' not in options:\n options['width'] = self.variables.rect_border_width\n shape_id = self.create_oval(*canvas_coords, **options)\n self.variables.vector_objects[str(shape_id)] = VectorObject(SHAPE_TYPES.RECT, options)\n self.variables.shape_ids.append(shape_id)\n self.set_shape_pixel_coords_from_canvas_coords(shape_id, canvas_coords)\n self.variables.current_shape_id = shape_id\n return shape_id\n\n def create_new_polygon(self, coords, **options):\n \"\"\"\n Create a new polygon.\n\n Parameters\n ----------\n coords : Tuple|List\n options\n Optional keyword arguments.\n\n Returns\n -------\n int\n \"\"\"\n\n if 'outline' not in options:\n options['outline'] = self.variables.foreground_color\n if 'width' not in options:\n options['width'] = self.variables.poly_border_width\n if 'fill' not in options:\n options['fill'] = ''\n\n shape_id = self.create_polygon(*coords, **options)\n self.variables.vector_objects[str(shape_id)] = VectorObject(SHAPE_TYPES.POLYGON, options)\n self.variables.shape_ids.append(shape_id)\n self.set_shape_pixel_coords_from_canvas_coords(shape_id, coords)\n self.variables.current_shape_id = shape_id\n return shape_id\n\n def create_new_arrow(self, coords, **options):\n \"\"\"\n Create a new arrow.\n\n Parameters\n ----------\n coords : Tuple|List\n options\n Optional keyword arguments.\n\n Returns\n -------\n int\n \"\"\"\n\n if 'fill' not in options:\n options['fill'] = self.variables.foreground_color\n if 'width' not in options:\n options['width'] = self.variables.line_width\n if 'arrow' not in options:\n options['arrow'] = tkinter.LAST\n\n shape_id = self.create_line(*coords, **options)\n self.variables.vector_objects[str(shape_id)] = VectorObject(SHAPE_TYPES.ARROW, options)\n self.variables.shape_ids.append(shape_id)\n self.set_shape_pixel_coords_from_canvas_coords(shape_id, coords)\n self.variables.current_shape_id = shape_id\n return shape_id\n\n def create_new_line(self, coords, **options):\n \"\"\"\n Create a new line.\n\n Parameters\n ----------\n coords : Tuple|List\n options\n Optional keyword arguments.\n\n Returns\n -------\n int\n \"\"\"\n\n if 'fill' not in options:\n options['fill'] = self.variables.foreground_color\n if 'width' not in options:\n options['width'] = self.variables.line_width\n\n shape_id = self.create_line(*coords, **options)\n self.variables.vector_objects[str(shape_id)] = VectorObject(SHAPE_TYPES.LINE, options)\n self.variables.shape_ids.append(shape_id)\n self.set_shape_pixel_coords_from_canvas_coords(shape_id, coords)\n self.variables.current_shape_id = shape_id\n return shape_id\n\n def create_new_point(self, coords, **options):\n \"\"\"\n Create a new point.\n\n Parameters\n ----------\n coords : Tuple|List\n options\n Optional keyword arguments.\n\n Returns\n -------\n int\n \"\"\"\n\n if 'fill' not in options:\n options['fill'] = self.variables.foreground_color\n\n x1, y1 = (coords[0] - self.variables.point_size), (coords[1] - self.variables.point_size)\n x2, y2 = (coords[0] + self.variables.point_size), (coords[1] + self.variables.point_size)\n shape_id = self.create_oval(x1, y1, x2, y2, **options)\n self.variables.vector_objects[str(shape_id)] = VectorObject(SHAPE_TYPES.POINT, options)\n self.variables.vector_objects[str(shape_id)].point_size = self.variables.point_size\n self.variables.shape_ids.append(shape_id)\n self.set_shape_pixel_coords_from_canvas_coords(shape_id, coords)\n self.variables.current_shape_id = shape_id\n return shape_id\n\n def change_shape_color(self, shape_id, color):\n \"\"\"\n Change the shape color.\n\n Parameters\n ----------\n shape_id : int\n color : str\n\n Returns\n -------\n None\n \"\"\"\n vector_object = self.get_vector_object(shape_id)\n shape_type = vector_object.type\n if shape_type == SHAPE_TYPES.RECT or shape_type == SHAPE_TYPES.POLYGON:\n self.itemconfig(shape_id, outline=color)\n vector_object.tkinter_options['outline'] = color\n else:\n self.itemconfig(shape_id, fill=color)\n vector_object.tkinter_options['fill'] = color\n\n def set_shape_pixel_coords_from_canvas_coords(self, shape_id, coords):\n \"\"\"\n Sets the shape pixel coordinates from the canvas coordinates.\n\n Parameters\n ----------\n shape_id : int\n\n Returns\n -------\n None\n \"\"\"\n\n if self.variables.canvas_image_object:\n image_coords = self.canvas_coords_to_image_coords(coords)\n self.set_shape_pixel_coords(shape_id, image_coords)\n\n def set_shape_pixel_coords(self, shape_id, image_coords):\n \"\"\"\n Set the pixel coordinates for the given shape.\n\n Parameters\n ----------\n shape_id : int\n image_coords : Tuple|List\n\n Returns\n -------\n None\n \"\"\"\n\n vector_object = self.get_vector_object(shape_id)\n vector_object.image_coords = image_coords\n\n def canvas_coords_to_image_coords(self, canvas_coords):\n \"\"\"\n Converts the canvas coordinates to image coordinates.\n\n Parameters\n ----------\n canvas_coords : tuple\n\n Returns\n -------\n tuple\n \"\"\"\n\n return self.variables.canvas_image_object.canvas_coords_to_full_image_yx(canvas_coords)\n\n def get_shape_canvas_coords(self, shape_id):\n \"\"\"\n Fetches the canvas coordinates for the shape.\n\n Parameters\n ----------\n shape_id : int\n\n Returns\n -------\n Tuple\n \"\"\"\n\n return self.image_coords_to_canvas_coords(self.get_vector_object(shape_id).image_coords)\n\n def get_shape_image_coords(self, shape_id):\n \"\"\"\n Fetches the image coordinates for the shape.\n\n Parameters\n ----------\n shape_id : int\n\n Returns\n -------\n Tuple\n \"\"\"\n\n return self.get_vector_object(shape_id).image_coords\n\n def shape_image_coords_to_canvas_coords(self, shape_id):\n \"\"\"\n Converts the image coordinates to the shapoe coordinates.\n\n Parameters\n ----------\n shape_id : int\n\n Returns\n -------\n Tuple\n \"\"\"\n\n image_coords = self.get_shape_image_coords(shape_id)\n return self.variables.canvas_image_object.full_image_yx_to_canvas_coords(image_coords)\n\n def image_coords_to_canvas_coords(self, image_coords):\n \"\"\"\n Converts the image coordinates to the shapoe coordinates.\n\n Parameters\n ----------\n image_coords : tuple\n\n Returns\n -------\n Tuple\n \"\"\"\n\n return self.variables.canvas_image_object.full_image_yx_to_canvas_coords(image_coords)\n\n def get_image_data_in_canvas_rect_by_id(self, rect_id, decimation=None):\n \"\"\"\n Fetches the image data.\n\n Parameters\n ----------\n rect_id : int\n decimation : None|int\n\n Returns\n -------\n numpy.ndarray\n \"\"\"\n\n image_coords = self.get_shape_image_coords(rect_id)\n tmp_image_coords = list(image_coords)\n if image_coords[0] > image_coords[2]:\n tmp_image_coords[0] = image_coords[2]\n tmp_image_coords[2] = image_coords[0]\n if image_coords[1] > image_coords[3]:\n tmp_image_coords[1] = image_coords[3]\n tmp_image_coords[3] = image_coords[1]\n if decimation is None:\n decimation = self.variables.canvas_image_object.\\\n get_decimation_factor_from_full_image_rect(tmp_image_coords)\n tmp_image_coords = (int(tmp_image_coords[0]), int(tmp_image_coords[1]), int(tmp_image_coords[2]), int(tmp_image_coords[3]))\n image_data_in_rect = self.variables.canvas_image_object.\\\n get_decimated_image_data_in_full_image_rect(tmp_image_coords, decimation)\n return image_data_in_rect\n\n def zoom_to_selection(self, canvas_rect, animate=False):\n \"\"\"\n Zoom to the selection using canvas coordinates.\n\n Parameters\n ----------\n canvas_rect : Tuple|List\n animate : bool\n\n Returns\n -------\n None\n \"\"\"\n\n self.variables.the_canvas_is_currently_zooming = True\n # fill up empty canvas space due to inconsistent ratios between the canvas rect and the canvas dimensions\n image_coords = self.variables.canvas_image_object.canvas_coords_to_full_image_yx(canvas_rect)\n self.zoom_to_full_image_selection(image_coords, animate=animate)\n\n def zoom_to_full_image_selection(self, image_rect, animate=False):\n \"\"\"\n Zoom to the selection using image coordinates.\n\n Parameters\n ----------\n image_rect_rect : Tuple|List\n animate : bool\n\n Returns\n -------\n None\n \"\"\"\n zoomed_image_height = image_rect[2] - image_rect[0]\n zoomed_image_width = image_rect[3] - image_rect[1]\n\n canvas_height_width_ratio = self.variables.canvas_height / self.variables.canvas_width\n zoomed_image_height_width_ratio = zoomed_image_height / zoomed_image_width\n\n new_image_width = zoomed_image_height / canvas_height_width_ratio\n new_image_height = zoomed_image_width * canvas_height_width_ratio\n\n if zoomed_image_height_width_ratio > canvas_height_width_ratio:\n image_zoom_point_center = (image_rect[3] + image_rect[1]) / 2\n image_rect[1] = image_zoom_point_center - new_image_width / 2\n image_rect[3] = image_zoom_point_center + new_image_width / 2\n else:\n image_zoom_point_center = (image_rect[2] + image_rect[0]) / 2\n image_rect[0] = image_zoom_point_center - new_image_height / 2\n image_rect[2] = image_zoom_point_center + new_image_height / 2\n\n # keep the rect within the image bounds\n image_y_ul = max(image_rect[0], 0)\n image_x_ul = max(image_rect[1], 0)\n image_y_br = min(image_rect[2], self.variables.canvas_image_object.image_reader.full_image_ny)\n image_x_br = min(image_rect[3], self.variables.canvas_image_object.image_reader.full_image_nx)\n\n # re-adjust if we ran off one of the edges\n if image_x_ul == 0:\n image_rect[3] = new_image_width\n if image_x_br == self.variables.canvas_image_object.image_reader.full_image_nx:\n image_rect[1] = self.variables.canvas_image_object.image_reader.full_image_nx - new_image_width\n if image_y_ul == 0:\n image_rect[2] = new_image_height\n if image_y_br == self.variables.canvas_image_object.image_reader.full_image_ny:\n image_rect[0] = self.variables.canvas_image_object.image_reader.full_image_ny - new_image_height\n\n # keep the rect within the image bounds\n image_y_ul = max(image_rect[0], 0)\n image_x_ul = max(image_rect[1], 0)\n image_y_br = min(image_rect[2], self.variables.canvas_image_object.image_reader.full_image_ny)\n image_x_br = min(image_rect[3], self.variables.canvas_image_object.image_reader.full_image_nx)\n\n new_canvas_rect = self.variables.canvas_image_object.full_image_yx_to_canvas_coords(\n (image_y_ul, image_x_ul, image_y_br, image_x_br))\n new_canvas_rect = (\n int(new_canvas_rect[0]), int(new_canvas_rect[1]), int(new_canvas_rect[2]), int(new_canvas_rect[3]))\n\n background_image = self.variables.canvas_image_object.display_image\n self.variables.canvas_image_object.update_canvas_display_image_from_canvas_rect(new_canvas_rect)\n if self.variables.rescale_image_to_fit_canvas:\n new_image = PIL.Image.fromarray(self.variables.canvas_image_object.display_image)\n else:\n new_image = PIL.Image.fromarray(self.variables.canvas_image_object.canvas_decimated_image)\n if animate is True:\n # create frame sequence\n n_animations = self.variables.n_zoom_animations\n background_image = background_image / 2\n background_image = numpy.asarray(background_image, dtype=numpy.uint8)\n canvas_x1, canvas_y1, canvas_x2, canvas_y2 = new_canvas_rect\n display_x_ul = min(canvas_x1, canvas_x2)\n display_x_br = max(canvas_x1, canvas_x2)\n display_y_ul = min(canvas_y1, canvas_y2)\n display_y_br = max(canvas_y1, canvas_y2)\n x_diff = new_image.width - (display_x_br - display_x_ul)\n y_diff = new_image.height - (display_y_br - display_y_ul)\n pil_background_image = PIL.Image.fromarray(background_image)\n frame_sequence = []\n for i in range(n_animations):\n new_x_ul = int(display_x_ul * (1 - i / (n_animations - 1)))\n new_y_ul = int(display_y_ul * (1 - i / (n_animations - 1)))\n new_size_x = int((display_x_br - display_x_ul) + x_diff * (i / (n_animations - 1)))\n new_size_y = int((display_y_br - display_y_ul) + y_diff * (i / (n_animations - 1)))\n resized_zoom_image = new_image.resize((new_size_x, new_size_y))\n animation_image = pil_background_image.copy()\n animation_image.paste(resized_zoom_image, (new_x_ul, new_y_ul))\n frame_sequence.append(animation_image)\n fps = n_animations / self.variables.animation_time_in_seconds\n self.animate_with_pil_frame_sequence(frame_sequence, frames_per_second=fps)\n if self.variables.rescale_image_to_fit_canvas:\n self.set_image_from_numpy_array(self.variables.canvas_image_object.display_image)\n else:\n self.set_image_from_numpy_array(self.variables.canvas_image_object.canvas_decimated_image)\n self.update()\n self.redraw_all_shapes()\n self.variables.the_canvas_is_currently_zooming = False\n\n def update_current_image(self):\n \"\"\"\n Updates the current image.\n\n Returns\n -------\n None\n \"\"\"\n\n rect = (0, 0, self.variables.canvas_width, self.variables.canvas_height)\n if self.variables.canvas_image_object is not None:\n self.variables.canvas_image_object.update_canvas_display_image_from_canvas_rect(rect)\n self.set_image_from_numpy_array(self.variables.canvas_image_object.display_image)\n self.update()\n\n def redraw_all_shapes(self):\n \"\"\"\n Redraw all the shapes.\n\n Returns\n -------\n None\n \"\"\"\n\n for shape_id in self.variables.shape_ids:\n pixel_coords = self.get_vector_object(shape_id).image_coords\n if pixel_coords:\n new_canvas_coords = self.shape_image_coords_to_canvas_coords(shape_id)\n self.modify_existing_shape_using_canvas_coords(shape_id, new_canvas_coords, update_pixel_coords=False)\n\n def set_current_tool_to_select_closest_shape(self):\n \"\"\"\n Sets the tool to the closest shape.\n\n Returns\n -------\n None\n \"\"\"\n\n self.variables.active_tool = TOOLS.SELECT_CLOSEST_SHAPE_TOOL\n self.variables.current_tool = TOOLS.SELECT_CLOSEST_SHAPE_TOOL\n\n def set_current_tool_to_zoom_out(self):\n \"\"\"\n Sets the current tool to zoom out.\n\n Returns\n -------\n None\n \"\"\"\n\n self.variables.current_shape_id = self.variables.zoom_rect_id\n self.variables.active_tool = TOOLS.ZOOM_OUT_TOOL\n self.variables.current_tool = TOOLS.ZOOM_OUT_TOOL\n\n def set_current_tool_to_zoom_in(self):\n \"\"\"\n Sets the current tool to zoom in.\n\n Returns\n -------\n None\n \"\"\"\n\n self.variables.current_shape_id = self.variables.zoom_rect_id\n self.variables.active_tool = TOOLS.ZOOM_IN_TOOL\n self.variables.current_tool = TOOLS.ZOOM_IN_TOOL\n\n def set_current_tool_to_draw_rect(self, rect_id=None):\n \"\"\"\n Sets the current tool to draw rectangle.\n\n Parameters\n ----------\n rect_id : int|None\n\n Returns\n -------\n None\n \"\"\"\n\n self.variables.current_shape_id = rect_id\n self.show_shape(rect_id)\n self.variables.active_tool = TOOLS.DRAW_RECT_BY_DRAGGING\n self.variables.current_tool = TOOLS.DRAW_RECT_BY_DRAGGING\n\n def set_current_tool_to_draw_ellipse(self, ellipse_id=None):\n \"\"\"\n Sets the current tool to draw rectangle.\n\n Parameters\n ----------\n rect_id : int|None\n\n Returns\n -------\n None\n \"\"\"\n\n self.variables.current_shape_id = ellipse_id\n self.show_shape(ellipse_id)\n self.variables.active_tool = TOOLS.DRAW_ELLIPSE_BY_DRAGGING\n self.variables.current_tool = TOOLS.DRAW_ELLIPSE_BY_DRAGGING\n\n def set_current_tool_to_draw_rect_by_clicking(self, rect_id=None):\n \"\"\"\n Sets the current tool to draw rectangle by clicking.\n\n Parameters\n ----------\n rect_id : None|int\n\n Returns\n -------\n None\n \"\"\"\n\n self.variables.current_shape_id = rect_id\n self.show_shape(rect_id)\n self.variables.active_tool = TOOLS.DRAW_RECT_BY_CLICKING\n self.variables.current_tool = TOOLS.DRAW_RECT_BY_CLICKING\n\n def set_current_tool_to_selection_tool(self):\n \"\"\"\n Sets the current tool to the selection tool.\n\n Returns\n -------\n None\n \"\"\"\n\n self.variables.current_shape_id = self.variables.select_rect_id\n self.variables.active_tool = TOOLS.SELECT_TOOL\n self.variables.current_tool = TOOLS.SELECT_TOOL\n\n def set_current_tool_to_draw_line_by_dragging(self, line_id=None):\n \"\"\"\n Sets the current tool to draw line by dragging.\n\n Parameters\n ----------\n line_id : None|int\n\n Returns\n -------\n None\n \"\"\"\n\n self.variables.current_shape_id = line_id\n self.show_shape(line_id)\n self.variables.active_tool = TOOLS.DRAW_LINE_BY_DRAGGING\n self.variables.current_tool = TOOLS.DRAW_LINE_BY_DRAGGING\n\n def set_current_tool_to_draw_line_by_clicking(self, line_id=None):\n \"\"\"\n Sets the current tool to draw line by clicking.\n\n Parameters\n ----------\n line_id : None|int\n\n Returns\n -------\n None\n \"\"\"\n\n self.variables.current_shape_id = line_id\n self.show_shape(line_id)\n self.variables.active_tool = TOOLS.DRAW_LINE_BY_CLICKING\n self.variables.current_tool = TOOLS.DRAW_LINE_BY_CLICKING\n\n def set_current_tool_to_draw_arrow_by_dragging(self, arrow_id=None):\n \"\"\"\n Sets the current tool to draw arrow by dragging.\n\n Parameters\n ----------\n arrow_id : None|int\n\n Returns\n -------\n None\n \"\"\"\n\n self.variables.current_shape_id = arrow_id\n self.show_shape(arrow_id)\n self.variables.active_tool = TOOLS.DRAW_ARROW_BY_DRAGGING\n self.variables.current_tool = TOOLS.DRAW_ARROW_BY_DRAGGING\n\n def set_current_tool_to_draw_arrow_by_clicking(self, arrow_id=None):\n \"\"\"\n Sets the current tool to draw arrow by clicking.\n\n Parameters\n ----------\n arrow_id : None|int\n\n Returns\n -------\n None\n \"\"\"\n\n self.variables.current_shape_id = arrow_id\n self.show_shape(arrow_id)\n self.variables.active_tool = TOOLS.DRAW_ARROW_BY_CLICKING\n self.variables.current_tool = TOOLS.DRAW_ARROW_BY_CLICKING\n\n def set_current_tool_to_draw_polygon_by_clicking(self, polygon_id=None):\n \"\"\"\n Sets the current tool to draw polygon by clicking.\n\n Parameters\n ----------\n polygon_id : None|int\n\n Returns\n -------\n None\n \"\"\"\n\n self.variables.current_shape_id = polygon_id\n self.show_shape(polygon_id)\n self.variables.active_tool = TOOLS.DRAW_POLYGON_BY_CLICKING\n self.variables.current_tool = TOOLS.DRAW_POLYGON_BY_CLICKING\n\n def set_current_tool_to_draw_point(self, point_id=None):\n \"\"\"\n Sets the current tool to draw point.\n\n Parameters\n ----------\n point_id : None|int\n\n Returns\n -------\n None\n \"\"\"\n\n self.variables.current_shape_id = point_id\n self.show_shape(point_id)\n self.variables.active_tool = TOOLS.DRAW_POINT_BY_CLICKING\n self.variables.current_tool = TOOLS.DRAW_POINT_BY_CLICKING\n\n def set_current_tool_to_translate_shape(self):\n \"\"\"\n Sets the current tool to translate shape.\n\n Returns\n -------\n None\n \"\"\"\n\n self.variables.active_tool = TOOLS.TRANSLATE_SHAPE_TOOL\n self.variables.current_tool = TOOLS.TRANSLATE_SHAPE_TOOL\n\n def set_current_tool_to_none(self):\n \"\"\"\n Sets the current tool to None.\n\n Returns\n -------\n None\n \"\"\"\n\n self.variables.active_tool = None\n self.variables.current_tool = None\n\n def set_current_tool_to_edit_shape(self):\n \"\"\"\n Sets the current tool to edit shape.\n\n Returns\n -------\n None\n \"\"\"\n\n self.variables.active_tool = TOOLS.EDIT_SHAPE_TOOL\n self.variables.current_tool = TOOLS.EDIT_SHAPE_TOOL\n\n def set_current_tool_to_edit_shape_coords(self):\n \"\"\"\n Sets the current tool to edit shape coordinates.\n\n Returns\n -------\n None\n \"\"\"\n\n self.variables.active_tool = TOOLS.EDIT_SHAPE_COORDS_TOOL\n self.variables.current_tool = TOOLS.EDIT_SHAPE_COORDS_TOOL\n\n def set_current_tool_to_pan(self):\n \"\"\"\n Sets the current tool to pan.\n\n Returns\n -------\n None\n \"\"\"\n\n self.variables.active_tool = TOOLS.PAN_TOOL\n self.variables.current_tool = TOOLS.PAN_TOOL\n\n def _set_image_from_pil_image(self, pil_image):\n \"\"\"\n Set image from a PIL image.\n\n Parameters\n ----------\n pil_image : PIL.Image\n\n Returns\n -------\n None\n \"\"\"\n\n nx_pix, ny_pix = pil_image.size\n self.config(scrollregion=(0, 0, nx_pix, ny_pix))\n self.variables._tk_im = ImageTk.PhotoImage(pil_image)\n self.variables.image_id = self.create_image(0, 0, anchor=\"nw\", image=self.variables._tk_im)\n self.tag_lower(self.variables.image_id)\n\n def _pan(self, event):\n \"\"\"\n A pan event.\n\n Parameters\n ----------\n event\n\n Returns\n -------\n None\n \"\"\"\n\n new_canvas_x_ul = self.variables.pan_anchor_point_xy[0] - event.x\n new_canvas_y_ul = self.variables.pan_anchor_point_xy[1] - event.y\n new_canvas_x_br = new_canvas_x_ul + self.variables.canvas_width\n new_canvas_y_br = new_canvas_y_ul + self.variables.canvas_height\n canvas_coords = (new_canvas_x_ul, new_canvas_y_ul, new_canvas_x_br, new_canvas_y_br)\n image_coords = self.variables.canvas_image_object.canvas_coords_to_full_image_yx(canvas_coords)\n image_y_ul = image_coords[0]\n image_x_ul = image_coords[1]\n image_y_br = image_coords[2]\n image_x_br = image_coords[3]\n # TODO: fix this, it just snaps back to the original position if the x or y coords are less than zero\n if image_y_ul < 0:\n new_canvas_y_ul = 0\n new_canvas_y_br = self.variables.canvas_height\n if image_x_ul < 0:\n new_canvas_x_ul = 0\n new_canvas_x_br = self.variables.canvas_width\n if image_y_br > self.variables.canvas_image_object.image_reader.full_image_ny:\n image_y_br = self.variables.canvas_image_object.image_reader.full_image_ny\n new_canvas_x_br, new_canvas_y_br = self.variables.canvas_image_object.full_image_yx_to_canvas_coords(\n (image_y_br, image_x_br))\n new_canvas_x_ul, new_canvas_y_ul = int(new_canvas_x_br - self.variables.canvas_width), int(\n new_canvas_y_br - self.variables.canvas_height)\n if image_x_br > self.variables.canvas_image_object.image_reader.full_image_nx:\n image_x_br = self.variables.canvas_image_object.image_reader.full_image_nx\n new_canvas_x_br, new_canvas_y_br = self.variables.canvas_image_object.full_image_yx_to_canvas_coords(\n (image_y_br, image_x_br))\n new_canvas_x_ul, new_canvas_y_ul = int(new_canvas_x_br - self.variables.canvas_width), int(\n new_canvas_y_br - self.variables.canvas_height)\n\n canvas_rect = (new_canvas_x_ul, new_canvas_y_ul, new_canvas_x_br, new_canvas_y_br)\n self.zoom_to_selection(canvas_rect, self.variables.animate_pan)\n self.hide_shape(self.variables.zoom_rect_id)\n\n def config_do_not_scale_image_to_fit(self):\n \"\"\"\n Set configuration to not scale image to fit.\n\n Returns\n -------\n None\n \"\"\"\n # establish scrollbars\n self.sbarv = tkinter.Scrollbar(self, orient=tkinter.VERTICAL)\n self.sbarh = tkinter.Scrollbar(self, orient=tkinter.HORIZONTAL)\n self.sbarv.config(command=self.yview)\n self.sbarh.config(command=self.xview)\n\n self.config(yscrollcommand=self.sbarv.set)\n self.config(xscrollcommand=self.sbarh.set)\n self.sbarv.grid(row=0, column=1, stick=tkinter.N+tkinter.S)\n self.sbarh.grid(row=1, column=0, sticky=tkinter.E+tkinter.W)\n\n # TODO: this should have png -> image or image_file.\n # It's not the full canvas? This is confusing.\n def save_full_canvas_as_png(self, output_fname):\n \"\"\"\n Save the canvas as a image file.\n\n Parameters\n ----------\n output_fname : str\n The path of the output file.\n\n Returns\n -------\n None\n \"\"\"\n\n # put a sleep in here in case there is a dialog covering the screen\n # before this method is called.\n time.sleep(0.1)\n # TODO: are we missing a PIL.Image conversion here?\n im = self.save_currently_displayed_canvas_to_numpy_array()\n im.save(output_fname)\n\n # TODO: figure out proper offsets, the current solution is close but not perfect\n def save_currently_displayed_canvas_to_numpy_array(self):\n \"\"\"\n Export the currently displayed canvas as a numpy array.\n\n Returns\n -------\n numpy.ndarray\n \"\"\"\n\n x_ul = self.winfo_rootx() + 1\n y_ul = self.winfo_rooty() + 1\n\n x_lr = x_ul + self.variables.canvas_width\n y_lr = y_ul + self.variables.canvas_height\n im = ImageGrab.grab()\n im = im.crop((x_ul, y_ul, x_lr, y_lr))\n return im\n\n # noinspection PyUnusedLocal\n def activate_color_selector(self, event):\n \"\"\"\n The activate color selector callback function.\n\n Parameters\n ----------\n event\n\n Returns\n -------\n None\n \"\"\"\n\n color = colorchooser.askcolor()[1]\n self.variables.foreground_color = color\n self.change_shape_color(self.variables.current_shape_id, color)\n\n def find_closest_shape_coord(self, shape_id, canvas_x, canvas_y):\n \"\"\"\n Finds the closest shape to the provided coordinates, and returns its id.\n\n Parameters\n ----------\n shape_id : int\n canvas_x : int\n canvas_y : int\n\n Returns\n -------\n int\n \"\"\"\n vector_object = self.get_vector_object(self.variables.current_shape_id)\n coords = self.get_shape_canvas_coords(shape_id)\n if vector_object.type == SHAPE_TYPES.RECT:\n select_x1, select_y1, select_x2, select_y2 = coords\n select_xul = min(select_x1, select_x2)\n select_xlr = max(select_x1, select_x2)\n select_yul = min(select_y1, select_y2)\n select_ylr = max(select_y1, select_y2)\n\n ul = (select_xul, select_yul)\n ur = (select_xlr, select_yul)\n lr = (select_xlr, select_ylr)\n ll = (select_xul, select_ylr)\n\n rect_coords = [(select_x1, select_y1), (select_x2, select_y2)]\n\n all_coords = [ul, ur, lr, ll]\n\n squared_distances = []\n for corner_coord in all_coords:\n coord_x, coord_y = corner_coord\n d = (coord_x - canvas_x)**2 + (coord_y - canvas_y)**2\n squared_distances.append(d)\n closest_coord_index = numpy.where(squared_distances == numpy.min(squared_distances))[0][0]\n closest_coord = all_coords[closest_coord_index]\n if closest_coord not in rect_coords:\n if closest_coord == ul:\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, (ul[0], ul[1], lr[0], lr[1]))\n if closest_coord == ur:\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, (ur[0], ur[1], ll[0], ll[1]))\n if closest_coord == lr:\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, (ul[0], ul[1], lr[0], lr[1]))\n if closest_coord == ll:\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, (ll[0], ll[1], ur[0], ur[1]))\n\n coords = self.get_shape_canvas_coords(shape_id)\n\n squared_distances = []\n coord_indices = numpy.arange(0, len(coords), step=2)\n for i in coord_indices:\n coord_x, coord_y = coords[i], coords[i+1]\n d = (coord_x - canvas_x)**2 + (coord_y - canvas_y)**2\n squared_distances.append(d)\n closest_coord_index = numpy.where(squared_distances == numpy.min(squared_distances))[0][0]\n return closest_coord_index\n\n def find_closest_shape(self, canvas_x, canvas_y):\n \"\"\"\n Finds the closest shape to the provided canvas coordinates, and returns its id.\n\n Parameters\n ----------\n canvas_x : float\n canvas_y : float\n\n Returns\n -------\n int\n \"\"\"\n\n # TODO: improve this. Right now it finds closest shape just based on distance to corners.\n # Improvements should include:\n # finding a closest point if the x/y coordinate is inside a polygon.\n # finding closest distance to each line of a polygon.\n\n non_tool_shape_ids = self.get_non_tool_shape_ids()\n closest_distances = []\n for shape_id in non_tool_shape_ids:\n coords = self.get_shape_canvas_coords(shape_id)\n squared_distances = []\n coord_indices = numpy.arange(0, len(coords), step=2)\n for i in coord_indices:\n coord_x, coord_y = coords[i], coords[i + 1]\n d = (coord_x - canvas_x) ** 2 + (coord_y - canvas_y) ** 2\n squared_distances.append(d)\n closest_distances.append(numpy.min(squared_distances))\n closest_shape_id = non_tool_shape_ids[numpy.where(closest_distances == numpy.min(closest_distances))[0][0]]\n return closest_shape_id\n\n def get_non_tool_shape_ids(self):\n \"\"\"\n Gets the shape ids for the everything except shapes assigned to tools, such as the zoom and selection shapes\n\n Returns\n -------\n List\n \"\"\"\n\n all_shape_ids = self.variables.shape_ids\n tool_shape_ids = self.get_tool_shape_ids()\n return list(numpy.setdiff1d(all_shape_ids, tool_shape_ids))\n\n def get_tool_shape_ids(self):\n \"\"\"\n Gets the shape ids for the zoom rectangle and select rectangle.\n\n Returns\n -------\n List\n \"\"\"\n\n tool_shape_ids = [self.variables.zoom_rect_id,\n self.variables.select_rect_id]\n return tool_shape_ids\n"
]
| [
[
"numpy.square",
"numpy.max",
"numpy.zeros_like",
"numpy.array",
"numpy.asarray",
"numpy.setdiff1d",
"numpy.min",
"scipy.linalg.norm",
"numpy.cross",
"numpy.asanyarray"
]
]
|
mozga-intel/tvm | [
"544724439efb9a795c92bd7ec9f7929e41c843c6"
]
| [
"tests/python/unittest/test_target_texture_codegen_opencl.py"
]
| [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# 'License'); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport sys\n\nimport numpy as np\nimport pytest\n\nimport tvm\nfrom tvm import autotvm\nfrom tvm import te\nfrom tvm.topi import testing\nfrom tvm.topi.utils import get_const_tuple, simplify\nfrom tvm.topi import nn\n\n\ndef compute_plus_one_rank3(shape):\n X = te.placeholder(shape, name=\"X\", dtype=\"float32\")\n Y = te.compute(shape, lambda i, j, k: X[i, j, k] + 1, name=\"Compute_Y\")\n return X, Y\n\n\ndef schedule_plus_one_rank3(X, Y):\n s = te.create_schedule(Y.op)\n # Xt = s.cache_read(X, \"texture\", [Y])\n # Xt = s.cache_read(X, \"global\", [Y])\n Xt = s.cache_read(X, \"global.texture\", [Y])\n\n # copy to texture stage\n x, y, c = s[Xt].op.axis\n s[Xt].bind(x, te.thread_axis(\"blockIdx.x\"))\n s[Xt].bind(y, te.thread_axis(\"threadIdx.x\"))\n s[Xt].vectorize(c)\n\n # the compute stage\n x, y, c = s[Y].op.axis\n xo, yo, xi, yi = s[Y].tile(x, y, 4, 4)\n s[Y].bind(xo, te.thread_axis(\"blockIdx.x\"))\n s[Y].bind(yo, te.thread_axis(\"threadIdx.x\"))\n s[Y].vectorize(c)\n return s\n\n\ndef compute_plus_one_rank5(shape):\n X = te.placeholder(shape, name=\"X\", dtype=\"float32\")\n Y = te.compute(shape, lambda i, j, k, l, m: X[i, j, k, l, m] + 1, name=\"Compute_Y\")\n return X, Y\n\n\ndef schedule_plus_one_rank5(X, Y):\n s = te.create_schedule(Y.op)\n Xt = s.cache_read(X, \"global.texture\", [Y])\n\n # copy to texture stage\n a, b, c, d, e = s[Xt].op.axis\n abc = s[Xt].fuse(a, b, c)\n s[Xt].bind(abc, te.thread_axis(\"blockIdx.x\"))\n s[Xt].bind(d, te.thread_axis(\"threadIdx.x\"))\n s[Xt].vectorize(e)\n\n # the compute stage\n a, b, c, d, e = s[Y].op.axis\n abc = s[Y].fuse(a, b, c)\n xo, yo, xi, yi = s[Y].tile(abc, d, 4, 4)\n s[Y].bind(xo, te.thread_axis(\"blockIdx.x\"))\n s[Y].bind(yo, te.thread_axis(\"threadIdx.x\"))\n s[Y].vectorize(e)\n return s\n\n\ndef compute_matmul(shape):\n A = te.placeholder(shape, name=\"A\", dtype=\"float32\")\n B = te.placeholder(shape, name=\"B\", dtype=\"float32\")\n k = te.reduce_axis((0, shape[1]), name=\"k\")\n C = te.compute(\n (shape[0] * shape[2], shape[0] * shape[2]),\n lambda i, j: te.sum(\n A[i // shape[2], k, i % shape[2]].astype(\"float32\")\n * B[j // shape[2], k, j % shape[2]].astype(\"float32\"),\n axis=[k],\n ),\n name=\"Compute_MatMul\",\n )\n return A, B, C\n\n\ndef schedule_matmul(A, B, C, local=False):\n s = te.create_schedule(C.op)\n At = s.cache_read(A, \"global.texture\", [C])\n Bt = s.cache_read(B, \"global.texture\", [C])\n if local:\n Al = s.cache_read(At, \"local\", [C])\n Bl = s.cache_read(Bt, \"local\", [C])\n Cl = s.cache_write(C, \"local\")\n\n bx = te.thread_axis(\"blockIdx.x\")\n tx = te.thread_axis(\"threadIdx.x\")\n\n def copy_to_texture(stage):\n _io, _k, _ii = s[stage].op.axis\n s[stage].vectorize(_ii)\n s[stage].bind(_io, bx)\n s[stage].bind(_k, tx)\n\n copy_to_texture(At)\n copy_to_texture(Bt)\n\n # copy to global stage\n _i, _j = s[C].op.axis\n xo, yo, xi, yi = s[C].tile(_i, _j, 4, 4)\n s[C].unroll(xi)\n s[C].vectorize(yi)\n s[C].bind(xo, te.thread_axis(\"blockIdx.x\"))\n s[C].bind(yo, te.thread_axis(\"threadIdx.x\"))\n\n # the compute stage\n s[Cl].compute_at(s[C], yo)\n (_k,) = Cl.op.reduce_axis\n _x, _y = s[Cl].op.axis\n s[Cl].reorder(_k, _x, _y)\n s[Cl].unroll(_x)\n s[Cl].vectorize(_y)\n\n if local:\n s[Al].compute_at(s[Cl], _k)\n s[Al].vectorize(s[Al].op.axis[-1])\n s[Bl].compute_at(s[Cl], _k)\n s[Bl].vectorize(s[Bl].op.axis[-1])\n\n return s\n\n\ndef compute_matmul_inner(shape):\n A = te.placeholder(shape, name=\"A\", dtype=\"float32\")\n B = te.placeholder(shape, name=\"B\", dtype=\"float32\")\n k = te.reduce_axis((0, shape[1] * shape[2]), name=\"k\")\n # (M, K) x (N, K)\n # (32, 256) x (32, 256)\n # (32, 64, 4) x (32, 64, 4)\n C = te.compute(\n (shape[0], shape[0]),\n lambda i, j: te.sum(\n A[i, k // shape[2], k % shape[2]].astype(\"float32\")\n * B[j, k // shape[2], k % shape[2]].astype(\"float32\"),\n axis=[k],\n ),\n name=\"Compute_MatMul\",\n )\n return A, B, C\n\n\ndef schedule_matmul_inner(A, B, C, local=False):\n s = te.create_schedule(C.op)\n At = s.cache_read(A, \"global.texture\", [C])\n Bt = s.cache_read(B, \"global.texture\", [C])\n if local:\n Al = s.cache_read(At, \"local\", [C])\n Bl = s.cache_read(Bt, \"local\", [C])\n Cl = s.cache_write(C, \"local\")\n\n bx = te.thread_axis(\"blockIdx.x\")\n tx = te.thread_axis(\"threadIdx.x\")\n\n def copy_to_texture(stage):\n _i, _ko, _ki = s[stage].op.axis\n s[stage].vectorize(_ki)\n s[stage].bind(_i, bx)\n s[stage].bind(_ko, tx)\n\n copy_to_texture(At)\n copy_to_texture(Bt)\n\n # copy to global stage\n _i, _j = s[C].op.axis\n xo, yo, xi, yi = s[C].tile(_i, _j, 4, 4)\n s[C].unroll(xi)\n s[C].vectorize(yi)\n s[C].bind(xo, te.thread_axis(\"blockIdx.x\"))\n s[C].bind(yo, te.thread_axis(\"threadIdx.x\"))\n\n # the compute stage\n s[Cl].compute_at(s[C], yo)\n (_k,) = Cl.op.reduce_axis\n _x, _y = s[Cl].op.axis\n s[Cl].reorder(_x, _y, _k)\n s[Cl].unroll(_x)\n # TODO(csullivan): consider whether the below error is worth resolving\n # s[Cl].vectorize(_y) # error\n\n if local:\n s[Al].compute_at(s[Cl], _x)\n s[Al].vectorize(s[Al].op.axis[-1])\n s[Bl].compute_at(s[Cl], _x)\n s[Bl].vectorize(s[Bl].op.axis[-1])\n\n return s\n\n\ndef compute_matmul_vector_accumulator(shapeA, shapeB):\n # A x B\n # (K/4, M, K%4) x (K, N/4, N%4) = (M, N)\n # (32, 64, 4) x (128, 16, 4) = (64, 64)\n A = te.placeholder(shapeA, name=\"A\", dtype=\"float32\")\n B = te.placeholder(shapeB, name=\"B\", dtype=\"float32\")\n k = te.reduce_axis((0, shapeB[0]), name=\"k\")\n C = te.compute(\n (shapeA[1], shapeB[1] * shapeB[2]),\n lambda i, j: te.sum(\n A[k // shapeA[-1], i, k % shapeA[-1]].astype(\"float32\")\n * B[k, j // shapeB[-1], j % shapeB[-1]].astype(\"float32\"),\n axis=[k],\n ),\n name=\"Compute_MatMul\",\n )\n return A, B, C\n\n\ndef schedule_matmul_vector_accumulator(A, B, C, local=False):\n s = te.create_schedule(C.op)\n At = s.cache_read(A, \"global.texture\", [C])\n Bt = s.cache_read(B, \"global.texture\", [C])\n if local:\n Al = s.cache_read(At, \"local\", [C])\n Bl = s.cache_read(Bt, \"local\", [C])\n Cl = s.cache_write(C, \"local\")\n\n def copy_to_texture(stage):\n _y, _x, _v = s[stage].op.axis\n # TODO(csullivan): removing this vectorize results in numerical errors, autovectorize\n s[stage].vectorize(_v)\n s[stage].bind(_y, te.thread_axis(\"blockIdx.x\"))\n s[stage].bind(_x, te.thread_axis(\"threadIdx.x\"))\n\n copy_to_texture(At)\n copy_to_texture(Bt)\n\n # copy to global stage\n _i, _j = s[C].op.axis\n xo, yo, xi, yi = s[C].tile(_i, _j, 4, 4)\n s[C].unroll(xi)\n s[C].vectorize(yi)\n s[C].bind(xo, te.thread_axis(\"blockIdx.x\"))\n s[C].bind(yo, te.thread_axis(\"threadIdx.x\"))\n\n # the compute stage\n s[Cl].compute_at(s[C], yo)\n (_k,) = Cl.op.reduce_axis\n _a, _b = s[Cl].op.axis\n _ko, _ki = s[Cl].split(_k, factor=4)\n s[Cl].reorder(_ko, _a, _ki, _b)\n s[Cl].unroll(_ki)\n s[Cl].unroll(_a)\n s[Cl].vectorize(_b)\n\n if local:\n s[Al].compute_at(s[Cl], _a)\n _aa, _ka, _ba = s[Al].op.axis\n # TODO(csullivan)[BEFORE PR]: removing this vectorize command causes a crash. This needs to be autovectorized.\n s[Al].vectorize(_ba)\n s[Bl].compute_at(s[Cl], _ko)\n _ab, _kb, _bb = s[Bl].op.axis\n s[Bl].vectorize(_bb)\n s[Bl].unroll(_ab)\n\n return s\n\n\ndef compute_conv2d_1x1_NCHWc_RSCKk(input_shape, filter_shape):\n # conv2d( [N, C, H, W, c] , [1, 1, C, K, k]\n data = te.placeholder(input_shape, name=\"data\", dtype=\"float32\")\n filt = te.placeholder(filter_shape, name=\"filter\", dtype=\"float32\")\n c = te.reduce_axis((0, input_shape[1]), name=\"C\")\n c4 = te.reduce_axis((0, input_shape[-1]), name=\"c4\")\n kh = te.reduce_axis((0, filter_shape[0]), name=\"kh\")\n kw = te.reduce_axis((0, filter_shape[1]), name=\"kw\")\n conv = te.compute(\n (input_shape[0], filter_shape[-2], input_shape[2], input_shape[3], filter_shape[-1]),\n lambda n, ko, i, j, ki: te.sum(\n data[n, c, i, j, c4].astype(\"float32\")\n * filt[kh, kw, c * input_shape[-1] + c4, ko, ki].astype(\"float32\"),\n axis=[kh, kw, c, c4],\n ),\n # name=\"Compute_conv2d_1x1_NCHWc_RSCKk\",\n name=\"conv2d_1x1\",\n )\n return data, filt, conv\n\n\ndef schedule_conv2d_1x1_NCHWc_RSCKk(data, filt, conv):\n # inputs: (1, 128//4, 56, 56, 4), (1, 1, 128, 128//4, 4)\n # outputs:\n s = te.create_schedule(conv.op)\n A, B, C = data, filt, conv\n At = s.cache_read(A, \"global.texture\", [C])\n Bt = s.cache_read(B, \"global.texture\", [C])\n Al = s.cache_read(At, \"local\", [C])\n Bl = s.cache_read(Bt, \"local\", [C])\n Cl = s.cache_write(C, \"local\")\n\n def copy_to_texture(stage):\n axes = s[stage].op.axis\n fused = s[stage].fuse(*axes[:-1])\n block, thread = s[stage].split(fused, factor=32)\n s[stage].vectorize(axes[-1])\n s[stage].bind(block, te.thread_axis(\"blockIdx.x\"))\n s[stage].bind(thread, te.thread_axis(\"threadIdx.x\"))\n\n copy_to_texture(At)\n copy_to_texture(Bt)\n\n _n, _ko, _h, _w, _ki = s[C].op.axis\n s[C].vectorize(_ki)\n s[C].bind(_n, te.thread_axis(\"blockIdx.x\"))\n s[C].bind(_ko, te.thread_axis(\"threadIdx.x\"))\n\n s[Cl].compute_at(s[C], _w)\n _nl, _kol, _hl, _wl, _kil = s[Cl].op.axis\n _khl, _kwl, _cl, _cl4 = s[Cl].op.reduce_axis\n _clo, _cli = s[Cl].split(_cl, factor=4)\n s[Cl].reorder(_clo, _cli, _cl4, _kil)\n s[Cl].unroll(_cli)\n s[Cl].unroll(_cl4)\n s[Cl].vectorize(_kil)\n\n s[Al].compute_at(s[Cl], _cli)\n s[Al].vectorize(s[Al].op.axis[-1])\n s[Bl].compute_at(s[Cl], _kwl)\n s[Bl].vectorize(s[Bl].op.axis[-1])\n\n return s\n\n\ndef compute_conv2d_1x1_WCHNc_CRSKk(input_shape, filter_shape):\n # input_shape = [W, C, H, N, c] -> [W, C, H*N, c]\n # filter_shape = [C, R, S, K, k] -> [C, R*S*K, k]\n # output_shape: [WK, HN, k] -> [W, K, H, N, k]\n data = te.placeholder(input_shape, name=\"data\", dtype=\"float32\")\n filt = te.placeholder(filter_shape, name=\"filter\", dtype=\"float32\")\n\n packed_data = te.compute(\n (input_shape[0], input_shape[1], input_shape[2] * input_shape[3], input_shape[4]),\n lambda i, j, k, l: data[i, j, k // input_shape[3], k % input_shape[3], l],\n name=\"packed_data\",\n )\n\n # Logical transformation of Nd -> 3d tensor\n # CRSKk -> C|RSK|k\n # r = rsk // SK\n # sk = rsk % SK\n # s = sk // K == (rsk % SK) // K == (rsk // K) % S\n # k = sk % K == (rsk % SK) % K == rsk % K\n packed_filter = te.compute(\n (filter_shape[0], filter_shape[1] * filter_shape[2] * filter_shape[3], filter_shape[4]),\n lambda i, j, k: filt[\n i,\n j // (filter_shape[3] * filter_shape[2]),\n (j // filter_shape[3]) % filter_shape[2],\n j % filter_shape[3],\n k,\n ],\n name=\"packed_filter\",\n )\n\n c = te.reduce_axis((0, input_shape[1]), name=\"C\")\n c4 = te.reduce_axis((0, input_shape[-1]), name=\"c4\")\n r = te.reduce_axis((0, filter_shape[1]), name=\"r\")\n s = te.reduce_axis((0, filter_shape[2]), name=\"s\")\n\n conv = te.compute(\n (input_shape[0], filter_shape[3], input_shape[2], input_shape[3], filter_shape[4]),\n lambda w, ko, h, n, ki: te.sum(\n packed_data[w, c, h * input_shape[3] + n, c4].astype(\"float32\")\n * packed_filter[\n c * input_shape[-1] + c4, ((r * filter_shape[2]) + s) * filter_shape[3] + ko, ki\n ].astype(\"float32\"),\n axis=[r, s, c, c4],\n ),\n name=\"conv2d_1x1\",\n )\n return data, filt, packed_data, packed_filter, conv\n\n\ndef schedule_conv2d_1x1_WCHNc_CRSKk(data, filt, packed_data, packed_filter, conv):\n # data: [W, C, H*N, c]\n # filter: [C, R*S*K, k]\n # output: [W, K, H, N, k]\n\n # conv2d( [N, C, H, W, c] , [1, 1, C, K, k]\n # inputs: (1, 128//4, 56, 56, 4), (1, 1, 128, 128//4, 4)\n\n # data: (56, 128//4, 56*1, 4) = (56, 32, 56, 4)\n # filt: (128, 1*1*128//4, 4) = (128, 32, 4)\n # conv: (56, 32, 56, 1, 4)\n\n s = te.create_schedule(conv.op)\n cfg = autotvm.get_config()\n\n s[packed_data].compute_inline()\n s[packed_filter].compute_inline()\n A, B, C = packed_data, packed_filter, conv\n At = s.cache_read(A, \"global.texture\", [C])\n Bt = s.cache_read(B, \"global.texture\", [C])\n Al = s.cache_read(At, \"local\", [C])\n Bl = s.cache_read(Bt, \"local\", [C])\n Cl = s.cache_write(C, \"local\")\n\n def copy_to_texture(stage):\n axes = s[stage].op.axis\n fused = s[stage].fuse(*axes[:-1])\n block, thread = s[stage].split(fused, factor=32)\n s[stage].vectorize(axes[-1])\n s[stage].bind(block, te.thread_axis(\"blockIdx.x\"))\n s[stage].bind(thread, te.thread_axis(\"threadIdx.x\"))\n\n copy_to_texture(At)\n copy_to_texture(Bt)\n\n _w, _ko, _h, _n, _ki = s[C].op.axis\n kernel_scope, _n = s[C].split(_n, nparts=1)\n\n cfg.define_split(\"tile_f\", _ko, num_outputs=4)\n cfg.define_split(\"tile_w\", _w, num_outputs=4)\n cfg.define_split(\"tile_h\", _h, num_outputs=4)\n cfg.define_knob(\"auto_unroll_max_step\", [0, 512, 1500])\n\n bk, vk, tk, ki = cfg[\"tile_f\"].apply(s, C, _ko)\n bw, vw, tw, wi = cfg[\"tile_w\"].apply(s, C, _w)\n bh, vh, th, hi = cfg[\"tile_h\"].apply(s, C, _h)\n s[C].reorder(bh, _n, vh, th, hi)\n bhn = s[C].fuse(bh, _n)\n\n s[C].bind(bk, te.thread_axis(\"blockIdx.z\"))\n s[C].bind(bhn, te.thread_axis(\"blockIdx.y\"))\n s[C].bind(bw, te.thread_axis(\"blockIdx.x\"))\n s[C].bind(vk, te.thread_axis(\"vthread\"))\n s[C].bind(vh, te.thread_axis(\"vthread\"))\n s[C].bind(vw, te.thread_axis(\"vthread\"))\n s[C].bind(tk, te.thread_axis(\"threadIdx.z\"))\n s[C].bind(th, te.thread_axis(\"threadIdx.y\"))\n s[C].bind(tw, te.thread_axis(\"threadIdx.x\"))\n s[C].reorder(bw, bk, bhn, vw, vk, vh, tw, tk, th, ki, hi, wi, _ki)\n s[C].vectorize(_ki)\n\n # TODO(csullivan): Try uneven workgroup split\n # _wo, _wi = s[C].split(_w, factor=4)\n # #_hno, _hni = s[C].split(_hn, factor=8)\n # #s[C].reorder(_wo, _wi, _ko, _hno, _hni, _ki)\n # s[C].reorder(_wo, _ko, _hn, _ki, _wi)\n # s[C].unroll(_wi)\n\n # # mace:\n # # const int out_ch_blk = get_global_id(0);\n # # const int out_w_blk = get_global_id(1);\n # # const int out_hb = get_global_id(2);\n\n # bx = te.thread_axis(\"blockIdx.x\")\n # by = te.thread_axis(\"blockIdx.y\")\n # bz = te.thread_axis(\"blockIdx.z\")\n # s[C].bind(_ko, bx)\n # s[C].bind(_wo, by)\n # s[C].bind(_hn, bz)\n\n # s[Cl].compute_at(s[C], _hn)\n s[Cl].compute_at(s[C], th)\n\n _wl, _kol, _hl, _nl, _kil = s[Cl].op.axis\n _khl, _kwl, _cl, _cl4 = s[Cl].op.reduce_axis\n\n cfg.define_split(\"tile_c\", _cl, num_outputs=2)\n cfg.define_split(\"tile_kh\", _khl, num_outputs=2)\n cfg.define_split(\"tile_kw\", _kwl, num_outputs=2)\n\n _clo, _cli = cfg[\"tile_c\"].apply(s, Cl, _cl)\n _khlo, _khli = cfg[\"tile_kh\"].apply(s, Cl, _khl)\n _kwlo, _kwli = cfg[\"tile_kw\"].apply(s, Cl, _kwl)\n # s[OL].reorder(rco, ryo, rxo, rci, ryi, rxi, n, f, y, x)\n s[Cl].reorder(_clo, _khlo, _kwlo, _cli, _cl4, _khli, _kwli, _kol, _hl, _nl, _kil, _wl)\n # s[Cl].reorder(_clo, _khlo, _kwlo, _cli, _cl4, _khli, _kwli)\n # s[Cl].reorder(_cl, _cl4, _kil, _wl)\n s[Cl].unroll(_cl4)\n s[Cl].unroll(_wl)\n s[Cl].vectorize(_kil)\n\n _wla, _cla, _hnla, _cl4a = s[Al].op.axis\n s[Al].compute_at(s[Cl], _cli)\n s[Al].vectorize(_cl4a)\n s[Al].unroll(_wla)\n\n _clb, _rskolb, _kilb = s[Bl].op.axis\n s[Bl].compute_at(s[Cl], _cli)\n s[Bl].vectorize(_kilb)\n s[Bl].unroll(_clb)\n\n s[C].pragma(kernel_scope, \"auto_unroll_max_step\", cfg[\"auto_unroll_max_step\"].val)\n\n WO, K, HO, N, K4 = get_const_tuple(C.shape)\n RSC, _, _ = get_const_tuple(B.shape)\n cfg.add_flop(2 * N * K * K4 * HO * WO * RSC)\n\n return s\n\n\ndef compute_conv2d_NCHWc_KCRSk(Input, Filter, stride, padding, dilation, out_dtype=None):\n \"\"\"Convolution operator in NCHWc layout. \"\"\"\n\n if out_dtype is None:\n out_dtype = Input.dtype\n assert isinstance(stride, int) or len(stride) == 2\n assert isinstance(dilation, int) or len(dilation) == 2\n if isinstance(stride, int):\n stride_h = stride_w = stride\n else:\n stride_h, stride_w = stride\n\n if isinstance(dilation, int):\n dilation_h = dilation_w = dilation\n else:\n dilation_h, dilation_w = dilation\n\n batch, in_channel_chunk, in_height, in_width, in_channel_block = Input.shape\n num_filter_chunk, channel, kernel_h, kernel_w, num_filter_block = Filter.shape\n # compute the output shape\n dilated_kernel_h = (kernel_h - 1) * dilation_h + 1\n dilated_kernel_w = (kernel_w - 1) * dilation_w + 1\n pad_top, pad_left, pad_down, pad_right = nn.get_pad_tuple(\n padding, (dilated_kernel_h, dilated_kernel_w)\n )\n\n out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1)\n out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1)\n # compute graph\n pad_before = [0, 0, pad_top, pad_left, 0]\n pad_after = [0, 0, pad_down, pad_right, 0]\n temp = nn.pad(Input, pad_before, pad_after, name=\"pad_temp\")\n\n rcc = te.reduce_axis((0, in_channel_chunk), name=\"rc\")\n rcb = te.reduce_axis((0, in_channel_block), name=\"rc\")\n ry = te.reduce_axis((0, kernel_h), name=\"ry\")\n rx = te.reduce_axis((0, kernel_w), name=\"rx\")\n\n # NCHWc x KCRSk\n # texture: NCH|W|c\n # texture: K|CRS|k\n # c = crs//RS\n # rs = crs % RS\n # r = rs // W == (crs // S) % R\n # s = rs % W == crs % S\n Filter = te.compute(\n (num_filter_chunk, channel * kernel_h * kernel_w, num_filter_block),\n lambda ffc, crs, ffb: Filter[\n ffc, crs // (kernel_h * kernel_w), (crs // kernel_w) % kernel_h, crs % kernel_w, ffb\n ],\n name=\"packed_filter\",\n )\n return te.compute(\n (batch, num_filter_chunk, out_height, out_width, num_filter_block),\n lambda nn, ffc, yy, xx, ffb: te.sum(\n temp[\n nn, rcc, yy * stride_h + ry * dilation_h, xx * stride_w + rx * dilation_w, rcb\n ].astype(out_dtype)\n * Filter[\n ffc, ((rcc * in_channel_block + rcb) * kernel_h + ry) * kernel_w + rx, ffb\n ].astype(out_dtype),\n axis=[rcc, rcb, ry, rx],\n ),\n tag=\"conv2d_nchwc_kcrsk_texture\",\n )\n\n\ndef schedule_conv2d_NCHWc_KCRSk(cfg, s, conv):\n \"\"\"schedule optimized for batch size = 1\"\"\"\n\n ##### space definition begin #####\n n, fc, y, x, fb = s[conv].op.axis\n rcc, rcb, ry, rx = s[conv].op.reduce_axis\n cfg.define_split(\"tile_fc\", fc, num_outputs=4)\n cfg.define_split(\"tile_y\", y, num_outputs=4)\n cfg.define_split(\"tile_x\", x, num_outputs=4)\n cfg.define_split(\"tile_rcc\", rcc, num_outputs=2)\n cfg.define_split(\"tile_ry\", ry, num_outputs=2)\n cfg.define_split(\"tile_rx\", rx, num_outputs=2)\n cfg.define_knob(\"auto_unroll_max_step\", [0, 512, 1500])\n\n pad_data, flattened_kernel = s[conv].op.input_tensors\n kernel = s[flattened_kernel].op.input_tensors[0]\n s[flattened_kernel].compute_inline()\n\n s[pad_data].compute_inline()\n if isinstance(kernel.op, tvm.te.ComputeOp) and \"dilate\" in kernel.op.tag:\n s[kernel].compute_inline()\n kernel = flattened_kernel\n\n if conv.op in s.outputs:\n output = conv\n OL = s.cache_write(conv, \"local\")\n else:\n output = s.outputs[0].output(0)\n s[conv].set_scope(\"local\")\n OL = conv\n\n # create cache stage\n AT = s.cache_read(pad_data, \"global.texture\", [OL])\n WT = s.cache_read(kernel, \"global.texture\", [OL])\n\n def copy_to_texture(stage):\n axes = s[stage].op.axis\n fused = s[stage].fuse(*axes[:-1])\n block, thread = s[stage].split(fused, factor=32)\n s[stage].vectorize(axes[-1])\n s[stage].bind(block, te.thread_axis(\"blockIdx.x\"))\n s[stage].bind(thread, te.thread_axis(\"threadIdx.x\"))\n\n copy_to_texture(AT)\n copy_to_texture(WT)\n\n AA = s.cache_read(AT, \"shared\", [OL])\n WW = s.cache_read(WT, \"shared\", [OL])\n\n # tile and bind spatial axes\n n, fc, y, x, fb = s[output].op.axis\n\n kernel_scope, n = s[output].split(n, nparts=1)\n\n bf, vf, tf, fi = cfg[\"tile_fc\"].apply(s, output, fc)\n by, vy, ty, yi = cfg[\"tile_y\"].apply(s, output, y)\n bx, vx, tx, xi = cfg[\"tile_x\"].apply(s, output, x)\n\n bf = s[output].fuse(n, bf)\n s[output].bind(bf, te.thread_axis(\"blockIdx.z\"))\n s[output].bind(by, te.thread_axis(\"blockIdx.y\"))\n s[output].bind(bx, te.thread_axis(\"blockIdx.x\"))\n s[output].bind(vf, te.thread_axis(\"vthread\"))\n s[output].bind(vy, te.thread_axis(\"vthread\"))\n s[output].bind(vx, te.thread_axis(\"vthread\"))\n s[output].bind(tf, te.thread_axis(\"threadIdx.z\"))\n s[output].bind(ty, te.thread_axis(\"threadIdx.y\"))\n s[output].bind(tx, te.thread_axis(\"threadIdx.x\"))\n s[output].reorder(bf, by, bx, vf, vy, vx, tf, ty, tx, fi, yi, xi, fb)\n s[output].vectorize(fb)\n s[OL].compute_at(s[output], tx)\n\n # tile reduction axes\n n, fc, y, x, fb = s[OL].op.axis\n\n rcc, rcb, ry, rx = s[OL].op.reduce_axis\n rco, rci = cfg[\"tile_rcc\"].apply(s, OL, rcc)\n ryo, ryi = cfg[\"tile_ry\"].apply(s, OL, ry)\n rxo, rxi = cfg[\"tile_rx\"].apply(s, OL, rx)\n\n # TODO(csullivan): check position of rcb\n s[OL].reorder(rco, ryo, rxo, rci, ryi, rxi, rcb, n, fc, y, x, fb)\n s[OL].vectorize(fb)\n s[OL].unroll(rcb)\n\n s[AA].compute_at(s[OL], rxo)\n s[WW].compute_at(s[OL], rxo)\n # cooperative fetching\n for load in [AA, WW]:\n if load == WW:\n n, fyx, v = s[load].op.axis\n fused = s[load].fuse(n, fyx)\n else:\n n, f, y, x, v = s[load].op.axis\n fused = s[load].fuse(n, f, y, x)\n tz, fused = s[load].split(fused, nparts=cfg[\"tile_fc\"].size[2])\n ty, fused = s[load].split(fused, nparts=cfg[\"tile_y\"].size[2])\n tx, fused = s[load].split(fused, nparts=cfg[\"tile_x\"].size[2])\n s[load].bind(tz, te.thread_axis(\"threadIdx.z\"))\n s[load].bind(ty, te.thread_axis(\"threadIdx.y\"))\n s[load].bind(tx, te.thread_axis(\"threadIdx.x\"))\n s[load].vectorize(v)\n\n # unroll\n s[output].pragma(kernel_scope, \"auto_unroll_max_step\", cfg[\"auto_unroll_max_step\"].val)\n\n N, OCC, OH, OW, OCB = get_const_tuple(output.shape)\n _, ICKHKW, _ = get_const_tuple(kernel.shape)\n\n if isinstance(N, int):\n cfg.add_flop(2 * N * OH * OW * OCC * OCB * ICKHKW)\n\n\ndef compute_conv2d_NCHWc_KCRSk_acc32(Input, Filter, stride, padding, dilation, out_dtype=None):\n \"\"\"Convolution operator in NCHWc layout. \"\"\"\n\n if out_dtype is None:\n out_dtype = Input.dtype\n assert isinstance(stride, int) or len(stride) == 2\n assert isinstance(dilation, int) or len(dilation) == 2\n if isinstance(stride, int):\n stride_h = stride_w = stride\n else:\n stride_h, stride_w = stride\n\n if isinstance(dilation, int):\n dilation_h = dilation_w = dilation\n else:\n dilation_h, dilation_w = dilation\n\n batch, in_channel_chunk, in_height, in_width, in_channel_block = Input.shape\n num_filter_chunk, channel, kernel_h, kernel_w, num_filter_block = Filter.shape\n # compute the output shape\n dilated_kernel_h = (kernel_h - 1) * dilation_h + 1\n dilated_kernel_w = (kernel_w - 1) * dilation_w + 1\n pad_top, pad_left, pad_down, pad_right = nn.get_pad_tuple(\n padding, (dilated_kernel_h, dilated_kernel_w)\n )\n\n out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1)\n out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1)\n # compute graph\n pad_before = [0, 0, pad_top, pad_left, 0]\n pad_after = [0, 0, pad_down, pad_right, 0]\n temp = nn.pad(Input, pad_before, pad_after, name=\"pad_temp\")\n\n rcc = te.reduce_axis((0, in_channel_chunk), name=\"rc\")\n rcb = te.reduce_axis((0, in_channel_block), name=\"rc\")\n ry = te.reduce_axis((0, kernel_h), name=\"ry\")\n rx = te.reduce_axis((0, kernel_w), name=\"rx\")\n\n # NCHWc x KCRSk\n # texture: NCH|W|c\n # texture: K|CRS|k\n # c = crs//RS\n # rs = crs % RS\n # r = rs // W == (crs // S) % R\n # s = rs % W == crs % S\n Filter = te.compute(\n (num_filter_chunk, channel * kernel_h * kernel_w, num_filter_block),\n lambda ffc, crs, ffb: Filter[\n ffc, crs // (kernel_h * kernel_w), (crs // kernel_w) % kernel_h, crs % kernel_w, ffb\n ],\n name=\"packed_filter\",\n )\n conv = te.compute(\n (batch, num_filter_chunk, out_height, out_width, num_filter_block),\n lambda nn, ffc, yy, xx, ffb: te.sum(\n (\n temp[nn, rcc, yy * stride_h + ry * dilation_h, xx * stride_w + rx * dilation_w, rcb]\n * Filter[ffc, ((rcc * in_channel_block + rcb) * kernel_h + ry) * kernel_w + rx, ffb]\n ).astype(out_dtype),\n axis=[rcc, rcb, ry, rx],\n ),\n tag=\"conv2d_nchwc_kcrsk_texture\",\n )\n output = te.compute(conv.shape, lambda n, fc, y, x, fb: conv[n, fc, y, x, fb].astype(\"float32\"))\n return output\n\n\ndef schedule_conv2d_NCHWc_KCRSk_acc32(cfg, s, output):\n \"\"\"schedule optimized for batch size = 1\"\"\"\n\n conv = output.op.input_tensors[0]\n\n ##### space definition begin #####\n n, fc, y, x, fb = s[conv].op.axis\n rcc, rcb, ry, rx = s[conv].op.reduce_axis\n cfg.define_split(\"tile_fc\", fc, num_outputs=4)\n cfg.define_split(\"tile_y\", y, num_outputs=4)\n cfg.define_split(\"tile_x\", x, num_outputs=4)\n cfg.define_split(\"tile_rcc\", rcc, num_outputs=2)\n cfg.define_split(\"tile_ry\", ry, num_outputs=2)\n cfg.define_split(\"tile_rx\", rx, num_outputs=2)\n cfg.define_knob(\"auto_unroll_max_step\", [0, 512, 1500])\n\n pad_data, flattened_kernel = s[conv].op.input_tensors\n kernel = s[flattened_kernel].op.input_tensors[0]\n s[flattened_kernel].compute_inline()\n\n s[pad_data].compute_inline()\n if isinstance(kernel.op, tvm.te.ComputeOp) and \"dilate\" in kernel.op.tag:\n s[kernel].compute_inline()\n kernel = flattened_kernel\n\n if conv.op in s.outputs:\n output = conv\n OL = s.cache_write(conv, \"local\")\n else:\n output = s.outputs[0].output(0)\n s[conv].set_scope(\"local\")\n OL = conv\n\n # create cache stage\n AT = s.cache_read(pad_data, \"global.texture\", [OL])\n WT = s.cache_read(kernel, \"global.texture\", [OL])\n\n def copy_to_texture(stage):\n axes = s[stage].op.axis\n fused = s[stage].fuse(*axes[:-1])\n block, thread = s[stage].split(fused, factor=32)\n s[stage].vectorize(axes[-1])\n s[stage].bind(block, te.thread_axis(\"blockIdx.x\"))\n s[stage].bind(thread, te.thread_axis(\"threadIdx.x\"))\n\n copy_to_texture(AT)\n copy_to_texture(WT)\n\n AA = s.cache_read(AT, \"shared\", [OL])\n WW = s.cache_read(WT, \"shared\", [OL])\n\n # tile and bind spatial axes\n n, fc, y, x, fb = s[output].op.axis\n\n kernel_scope, n = s[output].split(n, nparts=1)\n\n bf, vf, tf, fi = cfg[\"tile_fc\"].apply(s, output, fc)\n by, vy, ty, yi = cfg[\"tile_y\"].apply(s, output, y)\n bx, vx, tx, xi = cfg[\"tile_x\"].apply(s, output, x)\n\n bf = s[output].fuse(n, bf)\n s[output].bind(bf, te.thread_axis(\"blockIdx.z\"))\n s[output].bind(by, te.thread_axis(\"blockIdx.y\"))\n s[output].bind(bx, te.thread_axis(\"blockIdx.x\"))\n s[output].bind(vf, te.thread_axis(\"vthread\"))\n s[output].bind(vy, te.thread_axis(\"vthread\"))\n s[output].bind(vx, te.thread_axis(\"vthread\"))\n s[output].bind(tf, te.thread_axis(\"threadIdx.z\"))\n s[output].bind(ty, te.thread_axis(\"threadIdx.y\"))\n s[output].bind(tx, te.thread_axis(\"threadIdx.x\"))\n s[output].reorder(bf, by, bx, vf, vy, vx, tf, ty, tx, fi, yi, xi, fb)\n s[output].vectorize(fb)\n\n s[OL].compute_at(s[output], tx)\n\n # tile reduction axes\n n, fc, y, x, fb = s[OL].op.axis\n\n rcc, rcb, ry, rx = s[OL].op.reduce_axis\n rco, rci = cfg[\"tile_rcc\"].apply(s, OL, rcc)\n ryo, ryi = cfg[\"tile_ry\"].apply(s, OL, ry)\n rxo, rxi = cfg[\"tile_rx\"].apply(s, OL, rx)\n\n # TODO(csullivan): check position of rcb\n s[OL].reorder(rco, ryo, rxo, rci, ryi, rxi, rcb, n, fc, y, x, fb)\n s[OL].vectorize(fb)\n s[OL].unroll(rcb)\n\n s[AA].compute_at(s[OL], rxo)\n s[WW].compute_at(s[OL], rxo)\n # cooperative fetching\n for load in [AA, WW]:\n if load == WW:\n n, fyx, v = s[load].op.axis\n fused = s[load].fuse(n, fyx)\n else:\n n, f, y, x, v = s[load].op.axis\n fused = s[load].fuse(n, f, y, x)\n tz, fused = s[load].split(fused, nparts=cfg[\"tile_fc\"].size[2])\n ty, fused = s[load].split(fused, nparts=cfg[\"tile_y\"].size[2])\n tx, fused = s[load].split(fused, nparts=cfg[\"tile_x\"].size[2])\n s[load].bind(tz, te.thread_axis(\"threadIdx.z\"))\n s[load].bind(ty, te.thread_axis(\"threadIdx.y\"))\n s[load].bind(tx, te.thread_axis(\"threadIdx.x\"))\n s[load].vectorize(v)\n\n # unroll\n s[output].pragma(kernel_scope, \"auto_unroll_max_step\", cfg[\"auto_unroll_max_step\"].val)\n\n N, OCC, OH, OW, OCB = get_const_tuple(output.shape)\n _, ICKHKW, _ = get_const_tuple(kernel.shape)\n\n if isinstance(N, int):\n cfg.add_flop(2 * N * OH * OW * OCC * OCB * ICKHKW)\n\n\ndef compute_depthwise_conv2d_NCHWc_KCRSk_acc32(\n Input, Filter, stride, padding, dilation, out_dtype=None\n):\n \"\"\"Depthwise convolution operator in NCHWc layout. \"\"\"\n if out_dtype is None:\n out_dtype = Input.dtype\n assert isinstance(stride, int) or len(stride) == 2\n assert isinstance(dilation, int) or len(dilation) == 2\n\n if isinstance(stride, int):\n stride_h = stride_w = stride\n else:\n stride_h, stride_w = stride\n\n if isinstance(dilation, int):\n dilation_h = dilation_w = dilation\n else:\n dilation_h, dilation_w = dilation\n\n batch, channel_chunk, in_height, in_width, channel_block = Input.shape\n _, channel_multiplier, kernel_h, kernel_w, _ = Filter.shape\n\n # compute the output shape\n dilated_kernel_h = (kernel_h - 1) * dilation_h + 1\n dilated_kernel_w = (kernel_w - 1) * dilation_w + 1\n pad_top, pad_left, pad_down, pad_right = nn.get_pad_tuple(\n padding, (dilated_kernel_h, dilated_kernel_w)\n )\n out_channel_chunk = simplify(channel_chunk * channel_multiplier)\n out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1)\n out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1)\n # compute graph\n pad_before = [0, 0, pad_top, pad_left, 0]\n pad_after = [0, 0, pad_down, pad_right, 0]\n temp = nn.pad(Input, pad_before, pad_after, name=\"pad_temp\")\n\n ry = te.reduce_axis((0, kernel_h), name=\"ry\")\n rx = te.reduce_axis((0, kernel_w), name=\"rx\")\n\n # NCHWc x CMRSc = [N,(C//4)M,OH,OW, 4c]\n # NCHWc x CMRS\n # texture: NCH|W|c\n # texture: C|MRS|c\n # output: N\n # m = mrs//RS\n # rs = mrs % RS\n # r = rs // W == (mrs // S) % R\n # s = rs % W == mrs % S\n Filter = te.compute(\n (channel_chunk, channel_multiplier * kernel_h * kernel_w, channel_block),\n lambda ffc, mrs, ffb: Filter[\n ffc, mrs // (kernel_h * kernel_w), (mrs // kernel_w) % kernel_h, mrs % kernel_w, ffb\n ],\n name=\"packed_filter\",\n )\n\n conv = te.compute(\n (batch, out_channel_chunk, out_height, out_width, channel_block),\n lambda nn, ffc, yy, xx, ffb: te.sum(\n (\n temp[\n nn,\n ffc // channel_multiplier,\n yy * stride_h + ry * dilation_h,\n xx * stride_w + rx * dilation_w,\n ffb,\n ]\n * Filter[\n ffc // channel_multiplier,\n ((ffc % channel_multiplier) * kernel_h + ry) * kernel_w + rx,\n ffb,\n ]\n ).astype(out_dtype),\n axis=[ry, rx],\n ),\n tag=\"depthwise_conv2d_nchwc_kcrsk_texture\",\n )\n return te.compute(\n conv.shape, lambda n, ffc, y, x, ffb: conv[n, ffc, y, x, ffb].astype(\"float32\")\n )\n\n\ndef schedule_depthwise_conv2d_NCHWc_KCRSk_acc32(cfg, s, output):\n \"\"\"schedule optimized for batch size = 1\"\"\"\n\n conv = output.op.input_tensors[0]\n\n ##### space definition begin #####\n n, fc, y, x, fb = s[conv].op.axis\n ry, rx = s[conv].op.reduce_axis\n cfg.define_split(\"tile_fc\", fc, num_outputs=4)\n cfg.define_split(\"tile_y\", y, num_outputs=4)\n cfg.define_split(\"tile_x\", x, num_outputs=4)\n cfg.define_split(\"tile_ry\", ry, num_outputs=2)\n cfg.define_split(\"tile_rx\", rx, num_outputs=2)\n cfg.define_knob(\"auto_unroll_max_step\", [0, 512, 1500])\n\n pad_data, flattened_kernel = s[conv].op.input_tensors\n kernel = s[flattened_kernel].op.input_tensors[0]\n s[flattened_kernel].compute_inline()\n\n s[pad_data].compute_inline()\n if isinstance(kernel.op, tvm.te.ComputeOp) and \"dilate\" in kernel.op.tag:\n s[kernel].compute_inline()\n kernel = flattened_kernel\n\n if conv.op in s.outputs:\n output = conv\n OL = s.cache_write(conv, \"local\")\n else:\n output = s.outputs[0].output(0)\n s[conv].set_scope(\"local\")\n OL = conv\n\n # create cache stage\n AT = s.cache_read(pad_data, \"global.texture\", [OL])\n WT = s.cache_read(kernel, \"global.texture\", [OL])\n\n def copy_to_texture(stage):\n axes = s[stage].op.axis\n fused = s[stage].fuse(*axes[:-1])\n block, thread = s[stage].split(fused, factor=32)\n s[stage].vectorize(axes[-1])\n s[stage].bind(block, te.thread_axis(\"blockIdx.x\"))\n s[stage].bind(thread, te.thread_axis(\"threadIdx.x\"))\n\n copy_to_texture(AT)\n copy_to_texture(WT)\n\n AA = s.cache_read(AT, \"shared\", [OL])\n WW = s.cache_read(WT, \"shared\", [OL])\n\n # tile and bind spatial axes\n n, fc, y, x, fb = s[output].op.axis\n\n kernel_scope, n = s[output].split(n, nparts=1)\n\n bf, vf, tf, fi = cfg[\"tile_fc\"].apply(s, output, fc)\n by, vy, ty, yi = cfg[\"tile_y\"].apply(s, output, y)\n bx, vx, tx, xi = cfg[\"tile_x\"].apply(s, output, x)\n\n bf = s[output].fuse(n, bf)\n s[output].bind(bf, te.thread_axis(\"blockIdx.z\"))\n s[output].bind(by, te.thread_axis(\"blockIdx.y\"))\n s[output].bind(bx, te.thread_axis(\"blockIdx.x\"))\n s[output].bind(vf, te.thread_axis(\"vthread\"))\n s[output].bind(vy, te.thread_axis(\"vthread\"))\n s[output].bind(vx, te.thread_axis(\"vthread\"))\n s[output].bind(tf, te.thread_axis(\"threadIdx.z\"))\n s[output].bind(ty, te.thread_axis(\"threadIdx.y\"))\n s[output].bind(tx, te.thread_axis(\"threadIdx.x\"))\n s[output].reorder(bf, by, bx, vf, vy, vx, tf, ty, tx, fi, yi, xi, fb)\n s[output].vectorize(fb)\n\n s[OL].compute_at(s[output], tx)\n\n # tile reduction axes\n n, fc, y, x, fb = s[OL].op.axis\n\n ry, rx = s[OL].op.reduce_axis\n ryo, ryi = cfg[\"tile_ry\"].apply(s, OL, ry)\n rxo, rxi = cfg[\"tile_rx\"].apply(s, OL, rx)\n\n s[OL].reorder(ryo, rxo, ryi, rxi, n, fc, y, x, fb)\n s[OL].vectorize(fb)\n # s[OL].unroll()\n\n s[AA].compute_at(s[OL], rxo)\n s[WW].compute_at(s[OL], rxo)\n # cooperative fetching\n for load in [AA, WW]:\n if load == WW:\n n, fyx, v = s[load].op.axis\n fused = s[load].fuse(n, fyx)\n else:\n n, f, y, x, v = s[load].op.axis\n fused = s[load].fuse(n, f, y, x)\n tz, fused = s[load].split(fused, nparts=cfg[\"tile_fc\"].size[2])\n ty, fused = s[load].split(fused, nparts=cfg[\"tile_y\"].size[2])\n tx, fused = s[load].split(fused, nparts=cfg[\"tile_x\"].size[2])\n s[load].bind(tz, te.thread_axis(\"threadIdx.z\"))\n s[load].bind(ty, te.thread_axis(\"threadIdx.y\"))\n s[load].bind(tx, te.thread_axis(\"threadIdx.x\"))\n s[load].vectorize(v)\n\n # unroll\n s[output].pragma(kernel_scope, \"auto_unroll_max_step\", cfg[\"auto_unroll_max_step\"].val)\n\n N, OCC, OH, OW, OCB = get_const_tuple(output.shape)\n ICC, MKHKW, ICB = get_const_tuple(kernel.shape)\n M = (OCC * OCB) // (ICC * ICB)\n KHKW = MKHKW // M\n\n if isinstance(N, int):\n cfg.add_flop(2 * N * OH * OW * OCC * OCB * KHKW)\n\n\ndef scheduler(compute, schedule, *args, **kwargs):\n placeholders = compute(*args)\n s = schedule(*placeholders, **kwargs)\n return s, placeholders\n\n\ndef conv2d_1x1_NCHWc_RSCKk(input_shape, filter_shape):\n placeholders = compute_conv2d_1x1_NCHWc_RSCKk(input_shape, filter_shape)\n s = schedule_conv2d_1x1_NCHWc_RSCKk(*placeholders)\n return s, placeholders\n\n\ndef conv2d_1x1_WCHNc_CRSKk(input_shape, filter_shape):\n placeholders = compute_conv2d_1x1_WCHNc_CRSKk(input_shape, filter_shape)\n s = schedule_conv2d_1x1_WCHNc_CRSKk(*placeholders)\n return s, (placeholders[0], placeholders[1], placeholders[-1])\n\n\ndef conv2d_NCHWc_KCRSk(input_shape, filter_shape):\n data = te.placeholder(input_shape, name=\"data\", dtype=\"float32\")\n filt = te.placeholder(filter_shape, name=\"filter\", dtype=\"float32\")\n conv = compute_conv2d_NCHWc_KCRSk(data, filt, [1, 1], [0, 0], [1, 1], \"float32\")\n cfg = autotvm.get_config()\n s = te.create_schedule([x.op for x in [conv]])\n schedule_conv2d_NCHWc_KCRSk(cfg, s, conv)\n return s, (data, filt, conv)\n\n\ndef conv2d_NCHWc_KCRSk_fp32_acc(input_shape, filter_shape):\n data = te.placeholder(input_shape, name=\"data\", dtype=\"float32\")\n filt = te.placeholder(filter_shape, name=\"filter\", dtype=\"float32\")\n output = compute_conv2d_NCHWc_KCRSk_acc32(data, filt, [1, 1], [0, 0], [1, 1], \"float32\")\n cfg = autotvm.get_config()\n s = te.create_schedule([x.op for x in [output]])\n schedule_conv2d_NCHWc_KCRSk_acc32(cfg, s, output)\n return s, (data, filt, output)\n\n\ndef depthwise_conv2d_NCHWc_KCRSk_acc32(input_shape, filter_shape):\n data = te.placeholder(input_shape, name=\"data\", dtype=\"float32\")\n filt = te.placeholder(filter_shape, name=\"filter\", dtype=\"float32\")\n output = compute_depthwise_conv2d_NCHWc_KCRSk_acc32(\n data, filt, [1, 1], [0, 0], [1, 1], \"float32\"\n )\n cfg = autotvm.get_config()\n s = te.create_schedule([x.op for x in [output]])\n schedule_depthwise_conv2d_NCHWc_KCRSk_acc32(cfg, s, output)\n return s, (data, filt, output)\n\n\ndef ref_convolution(data, kernel, stride, pad):\n import mxnet as mx\n\n groups = 1\n kernel_size = (kernel.shape[2], kernel.shape[3])\n num_filter = kernel.shape[0]\n ref_res = mx.nd.Convolution(\n data=mx.nd.array(data),\n weight=mx.nd.array(kernel),\n bias=None,\n no_bias=True,\n kernel=kernel_size,\n stride=stride,\n pad=pad,\n num_filter=num_filter,\n num_group=groups,\n )\n return ref_res.asnumpy()\n\n\ndef ref_depthwise_convolution(data, kernel, stride, pad):\n import mxnet as mx\n\n groups = kernel.shape[0]\n kernel_size = (kernel.shape[2], kernel.shape[3])\n num_filter = kernel.shape[0]\n multiplier = kernel.shape[1]\n ref_res = mx.nd.Convolution(\n data=mx.nd.array(data),\n weight=mx.nd.array(kernel),\n bias=None,\n no_bias=True,\n kernel=kernel_size,\n stride=stride,\n pad=pad,\n num_filter=num_filter,\n num_group=groups,\n )\n return ref_res.asnumpy()\n\n\ndef validate(workload, target, dev, input_shapes, *args, **kwargs):\n s, placeholders = workload(*input_shapes, *args, **kwargs)\n func = tvm.driver.build(s, [*placeholders], target=target, name=\"TestFunction\")\n\n args_tvm = []\n args_np = []\n for var in placeholders[:-1]:\n var_np = np.random.uniform(size=[i.value for i in var.shape]).astype(var.dtype)\n args_np.append(var_np)\n args_tvm.append(tvm.nd.array(var_np, dev))\n args_tvm.append(\n tvm.nd.array(\n np.zeros([i.value for i in placeholders[-1].shape], dtype=placeholders[-1].dtype), dev\n )\n )\n func(*args_tvm)\n\n if \"plus_one\" in workload.__name__:\n np_result = args_np[0] + 1.0\n elif \"matmul\" in workload.__name__:\n if \"inner\" in workload.__name__:\n np_result = np.matmul(\n args_np[0].reshape(32, 256), args_np[1].reshape(32, 256).transpose(1, 0)\n )\n elif \"accum\" in workload.__name__:\n np_result = np.matmul(\n args_np[0].transpose((1, 0, 2)).reshape(64, 128), args_np[1].reshape(128, 64)\n )\n else:\n np_result = np.matmul(\n args_np[0].transpose((0, 2, 1)).reshape(128, 64),\n args_np[1].transpose(1, 0, 2).reshape(64, 128),\n )\n elif \"conv2d_1x1_NCHWc_RSCKk\" in workload.__name__:\n vec_length = args_np[1].shape[-1]\n # nchwc -> nchw\n args_np[0] = (\n args_np[0]\n .transpose((0, 1, 4, 2, 3))\n .reshape(\n args_np[0].shape[0],\n args_np[0].shape[1] * args_np[0].shape[-1],\n args_np[0].shape[2],\n args_np[0].shape[3],\n )\n )\n # rsckk -> rsck -> kcrs\n args_np[1] = (\n args_np[1]\n .reshape(\n args_np[1].shape[0],\n args_np[1].shape[1],\n args_np[1].shape[2],\n args_np[1].shape[3] * args_np[1].shape[4],\n )\n .transpose((3, 2, 0, 1))\n )\n np_result = testing.conv2d_nchw_python(args_np[0], args_np[1], 1, 0)\n # nkhw -> nkhwk\n np_result = np_result.reshape(\n np_result.shape[0],\n np_result.shape[1] // vec_length,\n vec_length,\n np_result.shape[2],\n np_result.shape[3],\n ).transpose(0, 1, 3, 4, 2)\n elif \"conv2d_1x1_WCHNc_CRSKk\" in workload.__name__:\n vec_length = args_np[1].shape[-1]\n # wchnc -> nchw\n args_np[0] = (\n args_np[0]\n .transpose((3, 1, 4, 2, 0))\n .reshape(\n args_np[0].shape[3],\n args_np[0].shape[1] * args_np[0].shape[-1],\n args_np[0].shape[2],\n args_np[0].shape[0],\n )\n )\n # crskk -> crsk -> kcrs\n args_np[1] = (\n args_np[1]\n .reshape(\n args_np[1].shape[0],\n args_np[1].shape[1],\n args_np[1].shape[2],\n args_np[1].shape[3] * args_np[1].shape[4],\n )\n .transpose((3, 0, 1, 2))\n )\n np_result = testing.conv2d_nchw_python(args_np[0], args_np[1], 1, 0)\n # nkhw -> nkkhw -> wkhnk\n np_result = np_result.reshape(\n np_result.shape[0],\n np_result.shape[1] // vec_length,\n vec_length,\n np_result.shape[2],\n np_result.shape[3],\n ).transpose(4, 1, 3, 0, 2)\n elif \"NCHW_KCRS\" in workload.__name__:\n np_result = testing.conv2d_nchw_python(args_np[0], args_np[1], 1, 0)\n elif \"NCHWc_KCRSk\" in workload.__name__:\n vec_length = args_np[1].shape[-1]\n # nchwc -> nchw\n args_np[0] = (\n args_np[0]\n .transpose((0, 1, 4, 2, 3))\n .reshape(\n args_np[0].shape[0],\n args_np[0].shape[1] * args_np[0].shape[-1],\n args_np[0].shape[2],\n args_np[0].shape[3],\n )\n )\n # kcrsk/cmrsc -> kcrs/cmrs\n args_np[1] = (\n args_np[1]\n .transpose((0, 4, 1, 2, 3))\n .reshape(\n args_np[1].shape[0] * args_np[1].shape[4],\n args_np[1].shape[1],\n args_np[1].shape[2],\n args_np[1].shape[3],\n )\n )\n if \"depthwise\" in workload.__name__:\n # np_result = testing.depthwise_conv2d_python_nchw(args_np[0], args_np[1], 1, \"VALID\")\n np_result = ref_depthwise_convolution(args_np[0], args_np[1], [], [])\n else:\n # np_result = testing.conv2d_nchw_python(args_np[0], args_np[1], 1, 0)\n np_result = ref_convolution(args_np[0], args_np[1], [], [])\n # nkhw -> nkhwk\n np_result = np_result.reshape(\n np_result.shape[0],\n np_result.shape[1] // vec_length,\n vec_length,\n np_result.shape[2],\n np_result.shape[3],\n ).transpose(0, 1, 3, 4, 2)\n np.testing.assert_allclose(args_tvm[-1].asnumpy(), np_result, rtol=1e-2, atol=1e-2)\n\n\nclass BaseSingleShapeValidator:\n @tvm.testing.parametrize_targets(\"opencl\")\n def test_unary(self, test_func, input_shape, target, dev):\n validate(test_func, target, dev, [input_shape])\n\n\nclass TestPlusOneRank3(BaseSingleShapeValidator):\n input_shape = tvm.testing.parameter((32, 32, 4))\n\n def plus_one(input_shape):\n return scheduler(compute_plus_one_rank3, schedule_plus_one_rank3, input_shape)\n\n test_func = tvm.testing.parameter(plus_one)\n\n\nclass TestPlusOneRank5(BaseSingleShapeValidator):\n input_shape = tvm.testing.parameter((32, 2, 4, 4, 4))\n\n def plus_one(input_shape):\n return scheduler(compute_plus_one_rank5, schedule_plus_one_rank5, input_shape)\n\n test_func = tvm.testing.parameter(plus_one)\n\n\nclass TestMatmul:\n input_shape = tvm.testing.parameter((32, 64, 4))\n local = tvm.testing.parameter(False, True)\n\n def matmul(input_shape, local):\n return scheduler(compute_matmul, schedule_matmul, input_shape, local=local)\n\n def matmul_inner(input_shape, local):\n return scheduler(compute_matmul_inner, schedule_matmul_inner, input_shape, local=local)\n\n test_func = tvm.testing.parameter(matmul, matmul_inner)\n\n @tvm.testing.parametrize_targets(\"opencl\")\n def test_matmul(self, test_func, input_shape, local, target, dev):\n validate(test_func, target, dev, [input_shape], local=local)\n\n\nclass TestMatmulVectorAccumulator:\n shapeA = tvm.testing.parameter((32, 64, 4))\n shapeB = tvm.testing.parameter((128, 16, 4))\n local = tvm.testing.parameter(False, True)\n\n def matmul_vector_accumulator(shapeA, shapeB, local):\n return scheduler(\n compute_matmul_vector_accumulator,\n schedule_matmul_vector_accumulator,\n shapeA,\n shapeB,\n local=local,\n )\n\n test_func = tvm.testing.parameter(matmul_vector_accumulator)\n\n @tvm.testing.parametrize_targets(\"opencl\")\n def test_matmul_vec_acc(self, test_func, shapeA, shapeB, local, target, dev):\n validate(test_func, target, dev, [shapeA, shapeB], local=local)\n\n\nclass BaseConv2DValidator:\n @tvm.testing.parametrize_targets(\"opencl\")\n def test_conv2d(self, test_func, input_shapes, target, dev):\n validate(test_func, target, dev, input_shapes)\n\n\nclass TestConv2dNCHWcRSCKk(BaseConv2DValidator):\n input_shapes = tvm.testing.parameter([(1, 32, 56, 56, 4), (1, 1, 128, 32, 4)])\n test_func = tvm.testing.parameter(conv2d_1x1_NCHWc_RSCKk)\n\n\nclass TestConv2dWCHNcCRSKk(BaseConv2DValidator):\n input_shapes = tvm.testing.parameter([(56, 32, 56, 1, 4), (128, 1, 1, 32, 4)])\n test_func = tvm.testing.parameter(conv2d_1x1_WCHNc_CRSKk)\n\n\nclass TestConv2dNCHWcKCRSk(BaseConv2DValidator):\n input_shapes = tvm.testing.parameter(\n [(1, 32, 56, 56, 4), (32, 128, 1, 1, 4)], [(1, 32, 112, 112, 4), (32, 128, 3, 3, 4)]\n )\n test_func = tvm.testing.parameter(conv2d_NCHWc_KCRSk, conv2d_NCHWc_KCRSk_fp32_acc)\n\n\nclass TestDepthwiseConv2dNCHWcKCRSk(BaseConv2DValidator):\n input_shapes = tvm.testing.parameter([(1, 24, 257, 257, 4), (24, 1, 3, 3, 4)])\n test_func = tvm.testing.parameter(depthwise_conv2d_NCHWc_KCRSk_acc32)\n\n\nif __name__ == \"__main__\":\n sys.exit(pytest.main(sys.argv))\n"
]
| [
[
"numpy.random.uniform",
"numpy.zeros"
]
]
|
JSybrandt/HypergraphEmbedding | [
"4ca8b5156d45e49f2de7f3af48a3cfe3c69b8a26"
]
| [
"utilities/visualize_hg.py"
]
| [
"#!/usr/bin/env python3\n\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\n\nimport argparse\nfrom pathlib import Path\nfrom hypergraph_embedding import Hypergraph\nfrom hypergraph_embedding.hypergraph_util import ToBlockDiagonal\nfrom hypergraph_embedding.hypergraph_util import CompressRange\nfrom hypergraph_embedding.hypergraph_util import ToCsrMatrix\nimport cv2\nimport numpy as np\nfrom statistics import median\n\n\ndef ParseArgs():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-s\",\n \"--sort\",\n action=\"store_true\",\n help=\"If set, rearrange hypergraph into block diagonal\")\n parser.add_argument(\n \"--adj-img\",\n type=str,\n help=\"Path to resulting img. Black pixels represent node-edge adj.\"),\n parser.add_argument(\n \"--distribution-img\",\n type=str,\n help=\"Path to resulting node/edge degree distribution.\")\n parser.add_argument(\n \"-g\", \"--hypergraph\", type=str, help=\"Hypergraph proto message\"),\n\n args = parser.parse_args()\n assert args.hypergraph is not None\n return args\n\n\ndef PlotDegreeDistributions(hypergraph, path):\n fig = plt.figure()\n node_dist = fig.add_subplot(211)\n edge_dist = fig.add_subplot(212)\n\n node_dist.hist([len(n.edges) for _, n in hypergraph.node.items()])\n node_dist.set_title(\"Node Size Distribution #N={}\".format(\n len(hypergraph.node)))\n node_dist.set_yscale(\"log\")\n\n edge_dist.hist([len(e.nodes) for _, e in hypergraph.edge.items()])\n edge_dist.set_title(\"Edge Size Distribution #E={}\".format(\n len(hypergraph.edge)))\n edge_dist.set_yscale(\"log\")\n\n fig.suptitle(hypergraph.name)\n fig.tight_layout()\n fig.subplots_adjust(top=0.85)\n fig.savefig(path)\n\n\ndef printDetails(hypergraph):\n print(\"Name:\", hypergraph.name)\n print(\"Num Nodes:\", len(hypergraph.node))\n print(\"Num Edges:\", len(hypergraph.edge))\n node_degs = [len(n.edges) for _, n in hypergraph.node.items()]\n print(\"Min Node Degree:\", min(node_degs))\n print(\"Median Node Degree:\", median(node_degs))\n print(\"Max Node Degree:\", max(node_degs))\n edge_degs = [len(e.nodes) for _, e in hypergraph.edge.items()]\n print(\"Min Edge Degree:\", min(edge_degs))\n print(\"Median Edge Degree:\", median(edge_degs))\n print(\"Max Edge Degree:\", max(edge_degs))\n\n\nif __name__ == \"__main__\":\n args = ParseArgs()\n hypergraph = Hypergraph()\n with open(args.hypergraph, \"rb\") as proto:\n hypergraph.ParseFromString(proto.read())\n printDetails(hypergraph)\n if args.sort:\n hypergraph, _, _ = ToBlockDiagonal(hypergraph)\n else:\n hypergraph, _, _ = CompressRange(hypergraph)\n\n if args.distribution_img is not None:\n PlotDegreeDistributions(hypergraph, args.distribution_img)\n\n if args.adj_img is not None:\n img = 1 - ToCsrMatrix(hypergraph).astype(np.float32).todense()\n img *= 255\n\n img = cv2.resize(img, (500, 1000))\n cv2.imwrite(args.adj_img, img)\n"
]
| [
[
"matplotlib.use",
"matplotlib.pyplot.figure"
]
]
|
MariaMich/Hacktoberfest2019-2 | [
"a1a1756fa4594ab9965405e0361a5125b1d4dd48"
]
| [
"Python/Enigma_Logo/enigma_logo_encrypt.py"
]
| [
"import numpy as np \r\nimport cv2\r\nimport random\r\n \r\nimg = cv2.imread('[email protected]', cv2.IMREAD_UNCHANGED)\r\nheight, width, a = img.shape\r\n#print(img.shape)\r\n#img = cv2.resize(img, (int(width * .6),int(height * .6)), interpolation = cv2.INTER_AREA)\r\n#height, width, a = img.shape\r\n\r\nframe_height = 830\r\nframe_width = 1580\r\nframe = np.zeros((frame_height, frame_width, 4), np.uint8)\r\nval = np.zeros((frame_height, frame_width, 2), np.int)\r\nfor x in range( 0,(frame_width-1),6):\r\n for y in range(0,(frame_height-1),12):\r\n val[y][x][0]=random.randrange(2)\r\n val[y][x][1]=val[y][x][0]\r\n\r\ny_low = int((frame_width/2)-(width/2))\r\ny_high = int((frame_width/2)+(width/2))\r\nx_low = int((frame_height/2)-(height/2))\r\nx_high = int((frame_height/2)+(height/2))\r\nprint(height,width)\r\n\r\nfor y in range(y_low, y_high):\r\n for x in range(x_low, x_high):\r\n if((x-x_low) < width and (y-y_low) < height):\r\n frame[x][y]=img[x - x_low][y - y_low]\r\n #if(img[x - x_low][y - y_low][0] != 0):\r\n #print(x,y,frame[x][y])\r\n\r\ncv2.imwrite('enigma_logo_big.png',frame)\r\n\r\nwhile(1):\r\n frame = cv2.imread('enigma_logo_big.png', cv2.IMREAD_UNCHANGED)\r\n for x in range( 0,(frame_width-1),10):\r\n val[0][x][0] = random.randrange(3)\r\n for x in range( 0,(frame_width-1),10):\r\n for y in range(0,(frame_height-1),15):\r\n val[y][x][1]=val[y][x][0]\r\n val[y][x][0] = val[y-15][x][1]\r\n X = x + 6\r\n Y = y + 3\r\n if(X < frame_width and Y < frame_height and frame[Y][X][0] == 0 and frame[y][x][0] == 0 and frame[y-3][x][0] == 0 and val[y][x][0] < 2):\r\n cv2.putText(frame, str(val[y][x][0]), (x, y),cv2.FONT_HERSHEY_SIMPLEX, 0.4, (67, 204, 52), 1, cv2.LINE_AA)\r\n #if(X < frame_width and Y < frame_height and frame[Y][X][0] == 0 and frame[y][x][0] == 0 and frame[y-3][x][0] == 0 and val[y][x][0] == 3):\r\n #cv2.putText(frame, str(random.randrange(3)), (x, y+6),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 100, 0), 1, cv2.LINE_AA)\r\n\r\n## for y in range(y_low, y_high):\r\n## for x in range(x_low, x_high):\r\n## #frame[x][y][0] = 255\r\n## if((x-x_low) < width and (y-y_low) < height):\r\n## if(img[x - x_low][y - y_low][1] > 0):\r\n## frame[x][y]=img[x - x_low][y - y_low]\r\n #else:\r\n #print(img[x - x_low][y - y_low][0])\r\n\r\n cv2.line(frame, (y_low, x_low), (y_high, x_high), (67, 204, 52), 10)\r\n cv2.imshow('image', frame)\r\n\r\n k = cv2.waitKey(1) & 0xFF\r\n \r\n if k == 27:\r\n break\r\n \r\n frame = cv2.imread('enigma_logo_big.png', cv2.IMREAD_UNCHANGED)\r\n for x in range( 0,(frame_width-1),10):\r\n for y in range(0,(frame_height-1),15):\r\n X = x + 6\r\n Y = y + 3\r\n if(X < frame_width and Y < frame_height and frame[Y][X][0] == 0 and frame[y][x][0] == 0 and frame[y-3][x][0] == 0 and val[y][x][0] < 2):\r\n cv2.putText(frame, str(val[y][x][0]), (x, y+5),cv2.FONT_HERSHEY_SIMPLEX, 0.4, (67, 204, 52), 1, cv2.LINE_AA)\r\n #if(X < frame_width and Y < frame_height and frame[Y][X][0] == 0 and frame[y][x][0] == 0 and frame[y-3][x][0] == 0 and val[y][x][0] == 3):\r\n #cv2.putText(frame, str(random.randrange(3)), (x, y+6),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 100, 0), 1, cv2.LINE_AA)\r\n\r\n## for y in range(y_low, y_high):\r\n## for x in range(x_low, x_high):\r\n## #frame[x][y][0] = 255\r\n## if((x-x_low) < width and (y-y_low) < height):\r\n## if(img[x - x_low][y - y_low][1] > 0):\r\n## frame[x][y]=img[x - x_low][y - y_low]\r\n cv2.line(frame, (y_low, x_low), (y_high, x_high), (67, 204, 52), 10)\r\n cv2.imshow('image', frame)\r\n\r\n k = cv2.waitKey(1) & 0xFF\r\n \r\n if k == 27:\r\n break\r\n \r\n frame = cv2.imread('enigma_logo_big.png', cv2.IMREAD_UNCHANGED)\r\n for x in range( 0,(frame_width-1),10):\r\n for y in range(0,(frame_height-1),15):\r\n X = x + 6\r\n Y = y + 3\r\n if(X < frame_width and Y < frame_height and frame[Y][X][0] == 0 and frame[y][x][0] == 0 and frame[y-3][x][0] == 0 and val[y][x][0] < 2):\r\n cv2.putText(frame, str(val[y][x][0]), (x, y+10),cv2.FONT_HERSHEY_SIMPLEX, 0.4, (67, 204, 52), 1, cv2.LINE_AA)\r\n #if(X < frame_width and Y < frame_height and frame[Y][X][0] == 0 and frame[y][x][0] == 0 and frame[y-3][x][0] == 0 and val[y][x][0] == 3):\r\n #cv2.putText(frame, str(random.randrange(3)), (x, y+6),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 100, 0), 1, cv2.LINE_AA)\r\n\r\n## for y in range(y_low, y_high):\r\n## for x in range(x_low, x_high):\r\n## #frame[x][y][0] = 255\r\n## if((x-x_low) < width and (y-y_low) < height):\r\n## if(img[x - x_low][y - y_low][1] > 0):\r\n## frame[x][y]=img[x - x_low][y - y_low]\r\n cv2.line(frame, (y_low, x_low), (y_high, x_high), (67, 204, 52), 10)\r\n cv2.imshow('image', frame)\r\n \r\n k = cv2.waitKey(1) & 0xFF\r\n \r\n if k == 27:\r\n break\r\n \r\ncv2.destroyAllWindows() \r\nexit(0)\r\n"
]
| [
[
"numpy.zeros"
]
]
|
rxwsxn/imantics | [
"76d81036d8f92854d63ad9938dd76c718f8b482e"
]
| [
"imantics/dataset.py"
]
| [
"import random\nimport numpy as np\n\nfrom .annotation import Annotation\nfrom .category import Category\nfrom .basic import Semantic\nfrom .image import Image\n\n\nclass Dataset(Semantic):\n @classmethod\n def from_xml(cls, xml_folder, name=\"XML Dataset\"):\n extensions = (\"jpg\",\"JPG\",\"png\")\n\n from xmljson import badgerfish as bf\n from xml.etree.ElementTree import fromstring\n \"\"\"\n Generates a dataset from a folder with XML and corresponding images\n\n :param xml_folder: \n :type xml_folder: pathlib.Path\n :raise ImportError: Raised if xml_folder is a `pathlib.Path`\n object and it cannot be imported\n \"\"\"\n dataset = cls(name)\n xml_list = []\n id_counter = 0\n\n for ext in extensions:\n xml_list += list(xml_folder.glob(f\"*.{ext}\"))\n categories = []\n for idx, imgp in enumerate(xml_list):\n xml = bf.data(fromstring(open(imgp.with_suffix(\".xml\"),\"r\").read()))\n if \"object\" in xml[\"annotation\"].keys():\n if type(xml[\"annotation\"][\"object\"]) is not list:\n cat = xml[\"annotation\"][\"object\"][\"name\"][\"$\"]\n categories.append(cat)\n else:\n for ann in xml[\"annotation\"][\"object\"]:\n cat = ann[\"name\"][\"$\"]\n categories.append(cat)\n\n categories = list(set(categories))\n\n xml_categories = {cat: Category(cat,id=idx+1) for idx,cat in enumerate(categories)}\n\n for idx, imgp in enumerate(xml_list):\n image = Image.from_path(str(imgp))\n image.id = idx\n image.dataset = name\n\n\n xml = bf.data(fromstring(open(imgp.with_suffix(\".xml\"),\"r\").read()))\n if \"object\" in xml[\"annotation\"].keys():\n\n # Handle single object case\n if type(xml[\"annotation\"][\"object\"]) is not list:\n xml[\"annotation\"][\"object\"] = [xml[\"annotation\"][\"object\"]]\n\n for ann in xml[\"annotation\"][\"object\"]:\n i = ann[\"bndbox\"]\n cat = ann[\"name\"][\"$\"]\n\n x,y,xx,yy = (int(i[\"xmin\"][\"$\"]), int(i[\"ymin\"][\"$\"]),int(i[\"xmax\"][\"$\"]),int(i[\"ymax\"][\"$\"]))\n bbox = [x,y,xx,yy]\n\n fin_ann = Annotation(id=id_counter, image=image, bbox=bbox,category=xml_categories[cat])\n id_counter += 1\n\n image.add(fin_ann)\n dataset.add(image)\n return dataset\n\n\n @classmethod\n def from_coco(cls, coco_obj, name=\"COCO Datset\"):\n \"\"\"\n Generates a dataset from a COCO object or python dict\n\n :param coco_obj:\n :type coco_obj: dict, pycocotools.coco.COCO\n :raise ImportError: Raised if coco_obj is a `pycocotools.coco.COCO`\n object and it cannot be imported\n \"\"\"\n if isinstance(coco_obj, dict):\n dataset = cls(name)\n\n coco_info = coco_obj.get('info', [])\n coco_annotations = coco_obj.get('annotations', [])\n coco_images = coco_obj.get('images', [])\n coco_categories = coco_obj.get('categories', [])\n\n index_categories = {}\n for category in coco_categories:\n category = Category.from_coco(category)\n index_categories[category.id] = category\n\n for image in coco_images:\n image = Image.from_coco(image, dataset=dataset)\n dataset.add(image)\n\n for annotation in coco_annotations:\n\n image_id = annotation.get('image_id')\n category_id = annotation.get('category_id')\n\n image = dataset.images[image_id]\n category = index_categories[category_id]\n segmentation = annotation.get('segmentation')\n metadata = annotation.get('metadata', {})\n\n # color can be stored in the metadata\n color = annotation.get('color', metadata.get('color'))\n\n annotation = Annotation(image, category, polygons=segmentation,\\\n color=color, metadata=metadata)\n dataset.add(annotation)\n\n return dataset\n\n from pycocotools.coco import COCO\n if isinstance(coco_obj, COCO):\n pass\n\n return None\n\n def __init__(self, name, images=[], id=0, metadata={}):\n self.annotations = {}\n self.categories = {}\n self.images = {}\n self.name = name\n\n for image in images:\n image.index(self)\n\n super(Dataset, self).__init__(id, metadata)\n\n def add(self, image):\n \"\"\"\n Adds image(s) to the current dataset\n\n :param image: list, object or path to add to dataset\n :type image: :class:`Image` :class:`Annotation`, list, typle, path\n \"\"\"\n\n if isinstance(image, (list, tuple)):\n for img in image:\n img.index(self)\n return\n\n if isinstance(image, Annotation):\n annotation = image\n image = self.images.get(annotation.image.id)\n\n annotation.index(self)\n image.add(annotation)\n return\n\n if isinstance(image, str):\n image = Image.from_path(image)\n\n image.index(self)\n\n def iter_images(self):\n \"\"\"\n Generator to iterate over all images\n \"\"\"\n for _, image in self.images.items():\n yield image\n\n def iter_annotations(self):\n \"\"\"\n Generator to iterate over all annotations\n \"\"\"\n for key, annotation in self.annotations.items():\n if isinstance(key, int):\n yield annotation\n\n def iter_categories(self):\n \"\"\"\n Generator to iterate over all categories\n \"\"\"\n for _, category in self.categories.items():\n yield category\n\n def split(self, ratios, random=False):\n \"\"\"\n Splits dataset images into mutiple sub datasets of the given ratios\n\n If a tuple of (1, 1, 2) was passed in the result would return 3 dataset\n objects of 25%, 25% and 50% of the images.\n\n .. code-block:: python\n\n percents = ratios / ratios.sum()\n\n :param ratios: ratios to split dataset into\n :type ratios: tuple, list\n :param random: randomize the images before spliting\n :returns: tuple of datasets with length of the number of ratios\n :rtype: tuple\n \"\"\"\n\n if len(ratios) >= len(self.images):\n raise ValueError(\"Too many values in ratio array compared to dataset size\")\n\n ratios = np.array(ratios)\n percents = ratios / ratios.sum()\n\n if percents.sum() != 1:\n raise ValueError(\"Percents don't add up to 100%\")\n\n percents = percents[:-1] # don't need last percent, just take what is left\n percents *= len(self.images) # how many images in each dataset\n percents = percents.round().astype(np.int) # prepare where we split\n\n if random:\n im = random.sample(list(self.images.keys()))\n else:\n im = list(self.images.keys())\n\n splits = np.split(im, percents)\n\n datasets = []\n for idx, split in enumerate(splits):\n tmp_images = []\n\n for key in split:\n # get all images corresponding to the split's keys\n tmp_images.append(self.images.get(key))\n\n dataset = Dataset(\"split\" + str(idx), images=tmp_images)\n datasets.append(dataset)\n\n return datasets\n\n def coco(self):\n coco = {\n 'info': {},\n 'categories': [c.coco(include=False) for c in self.iter_categories()],\n 'images': [i.coco(include=False) for i in self.iter_images()],\n 'annotations': [a.coco(include=False) for a in self.iter_annotations()]\n }\n\n return coco\n\n def yolo(self):\n yolo = {}\n\n for image in self.iter_images():\n yolo[image.path] = image.yolo()\n \n return yolo\n\n__all__ = [\"Dataset\"]"
]
| [
[
"numpy.array",
"numpy.split"
]
]
|
JessikaSmith/language_model | [
"9b0765c9b3daeb331b48dc5b12a7d1130276022a"
]
| [
"classification/classification_xgboost.py"
]
| [
"from language_model import QRNN\n\nimport sklearn\nimport hyperopt\nfrom hyperopt import STATUS_OK, fmin, hp, tpe\nimport xgboost as xgb\nimport numpy as np\nimport pandas as pd\nimport pickle\n\nfrom metrics import calculate_all_metrics\n\nfrom sklearn.metrics import roc_auc_score\n\nfrom matplotlib import pyplot as plt\n\nimport tensorflow as tf\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.visible_device_list = \"0\"\n# config.gpu_options.per_process_gpu_memory_fraction = 0.4\nconfig.allow_soft_placement = True\nconfig.gpu_options.allow_growth = True\n\nsession = tf.Session(config=config)\n\n\ndef score(params):\n n_estimators = int(params['n_estimators'])\n del params['n_estimators']\n dtrain = xgb.DMatrix(train_features, label=y_train)\n dvalid = xgb.DMatrix(test_features, label=y_test)\n watchlist = [(dvalid, 'eval'), (dtrain, 'train')]\n gbm_model = xgb.train(params, dtrain, n_estimators,\n evals=watchlist,\n verbose_eval=True)\n predictions = gbm_model.predict(dvalid,\n ntree_limit=gbm_model.best_iteration + 1)\n score = roc_auc_score(y_test, predictions)\n print(\"\\tScore {0}\\n\\n\".format(score))\n loss = 1 - score\n return {'loss': loss, 'status': STATUS_OK}\n\n\ndef score_model(params):\n n_estimators = int(params['n_estimators'])\n del params['n_estimators']\n dtrain = xgb.DMatrix(train_features, label=y_train)\n dvalid = xgb.DMatrix(test_features, label=y_test)\n watchlist = [(dvalid, 'eval'), (dtrain, 'train')]\n gbm_model = xgb.train(params, dtrain, n_estimators,\n evals=watchlist,\n verbose_eval=True)\n predictions = gbm_model.predict(dvalid,\n ntree_limit=gbm_model.best_iteration + 1)\n score = roc_auc_score(y_test, predictions)\n print(\"\\tScore {0}\\n\\n\".format(score))\n loss = 1 - score\n return gbm_model\n\n\n# 0.80767599266\ndef run_xgboost_experiments(seed=42):\n space = {\n # 'booster': hp.choice()\n 'n_estimators': hp.quniform('n_estimators', 100, 500, 1),\n 'eta': hp.quniform('eta', 0.025, 0.5, 0.025), # step size shrinkage to prevent overfitting [0, 1]\n 'gamma': hp.quniform('gamma', 0.5, 1, 0.05), # minimum loss reduction [0, inf]\n 'max_depth': hp.choice('max_depth', np.arange(1, 15, dtype=int)), # max depth of the tree [0,inf]\n 'min_child_weight': hp.quniform('min_child_weight', 1, 10, 1),\n # minimum sim of instance weight needed for a child [0,inf]\n # 'max_delta_step': hp.quniform('max_delta_step', 0, 10, 1) # maximum delta step [0, inf]\n 'subsample': hp.quniform('subsample', 0.2, 0.8, 0.1), # subsample ratio of the training instances (0,1]\n 'colsample_bytree': hp.quniform('colsample_bytree', 0.2, 0.8, 0.1),\n # subsample ratio of columns whtn constructing each tree (0,1]\n 'colsample_bylevel': hp.quniform('colsample_bylevel', 0.2, 0.8, 0.1),\n # subsample ratio of columns for each shlit in each level\n 'lambda': hp.quniform('lambda', 1, 2, 0.2), # L2 regularization term on weights\n 'alpha': hp.quniform('alpha', 0, 1, 0.2), # L1 regularization term on weights\n # 'tree_method': hp.choice('tree_method', ['auto', 'approx', 'gpu_exact', 'gpu_hist']),\n # the tree construction algorithm\n # 'sketch_eps':hp.quniform('sketch_eps', 0.001, 0.03, 0.001), # for tree_method='approx', number of bins (0,1)\n 'scale_pos_weight': hp.quniform('scale_pos_weights', 0.5, 1.5, 0.1),\n # control the balance of positive and negative instances\n # 'updater'\n # 'grow_policy': hp.choice('grow_policy', ['depthwise', 'lossguide']), #\n 'seed': 42\n }\n best = fmin(score, space, algo=tpe.suggest, max_evals=50)\n return best\n\n\ndef plot_importance():\n pass\n\n\n# woring with elmo embeddings\nprint('Train')\ntrain_features = pickle.load(open('/mnt/shdstorage/for_classification/elmo/elmo_sent_train_v7.pkl', 'rb'))\ny_train = pd.read_csv('/mnt/shdstorage/for_classification/train_v7.csv')['label'].values.tolist()\n# y_train = [[val] for val in y_train]\n\nprint('Test')\ntest_features = pickle.load(open('/mnt/shdstorage/for_classification/elmo/elmo_sent_test_v7.pkl', 'rb'))\ny_test = pd.read_csv('/mnt/shdstorage/for_classification/test_v7.csv')['label'].values.tolist()\n# y_test = [[val] for val in y_test]\n\n\n# train_features = pd.read_csv()\n# print('Preparing train...')\n# theta = pd.read_csv('/mnt/shdstorage/tmp/classif_tmp/theta_50.csv')\n# theta_trans = theta.T\n# indexing = theta_trans.index.values.tolist()[1:]\n# train_features = theta_trans.values[1:]\n# indexing = [int(x) for x in indexing]\n#\n# print('Preparing test...')\n# theta_test = pd.read_csv('/mnt/shdstorage/tmp/classif_tmp/theta_test_50.csv')\n# theta_test_trans = theta_test.T\n# indexing_test = theta_test_trans.index.values.tolist()[1:]\n# valid_features = theta_test_trans.values[1:]\n# indexing_test = [int(x) for x in indexing_test]\n#\n# print('Preparing y train...')\n# y_train = []\n# y_train_tmp = pd.read_csv('/mnt/shdstorage/tmp/classif_tmp/y_train.csv', header=None).values.tolist()\n# for ix in indexing:\n# y_train += [y_train_tmp[ix][0]]\n# print(y_train[:10])\n#\n# print('Preparing y test...')\n# y_valid = []\n# y_valid_tmp = pd.read_csv('/mnt/shdstorage/tmp/classif_tmp/y_test.csv', header=None).values.tolist()\n# for ix in indexing_test:\n# y_valid += [y_valid_tmp[int(ix)][0]]\n# print(y_valid[:10])\n#\n# print('Preparing verification')\n\n# best_hyperparams = run_xgboost_experiments()\n# print(\"The best hyperparameters are: \", \"\\n\")\n# print(best_hyperparams)\n\n# {'alpha': 0.8, 'colsample_bylevel': 0.6000000000000001, 'colsample_bytree': 0.7000000000000001, 'eta': 0.07500000000000001, 'gamma': 0.55, 'lambda': 1.6, 'max_depth': 7, 'min_child_weight': 9.0, 'n_estimators': 490.0, 'scale_pos_weights': 1.5, 'subsample': 0.7000000000000001}\n\n# {'alpha': 0.0, 'colsample_bylevel': 0.4, 'colsample_bytree': 0.6000000000000001, 'eta': 0.15000000000000002, 'gamma': 0.7000000000000001, 'lambda': 1.6, 'max_depth': 11, 'min_child_weight': 2.0, 'n_estimators': 434.0, 'scale_pos_weights': 1.0, 'subsample': 0.8}\n#\n\nparams = {\n 'alpha': 0.8,\n 'colsample_bylevel': 0.4,\n 'colsample_bytree': 0.6,\n 'eta': 0.075,\n 'gamma': 0.55,\n 'lambda': 1.6,\n 'max_depth': 7,\n 'min_child_weight': 9,\n 'n_estimators': 490,\n 'scale_pos_weights': 1.5,\n 'subsample': 0.7\n}\n\n# model = score_model(params)\n#\n# with open('/mnt/shdstorage/for_classification/xgboost_model.pickle', 'wb') as handle:\n# pickle.dump(model, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\nwith open('/mnt/shdstorage/for_classification/xgboost_model.pickle', 'rb') as handle:\n model = pickle.load(handle)\n\n\n# print('set 3')\n# train_features = pickle.load(open('/mnt/shdstorage/for_classification/elmo/elmo_ver_set_3.pkl', 'rb'))\n# dtrain = xgb.DMatrix(train_features)\n# data = model.predict(dtrain)\n# print(data)\n# prediction = np.array(data)\n# print(prediction)\n# prediction = prediction >= 0.5\n# prediction = prediction.astype(int)\n# set_3 = pd.read_csv('/mnt/shdstorage/for_classification/labeled/stated_workers-users-comments-tokens_gt_25_posts_gt_2_and_lt_100_labeled.csv')\n# set_3['elmo_label'] = prediction\n# set_3.to_csv('/mnt/shdstorage/for_classification/labeled/stated_workers-users-comments-tokens_gt_25_posts_gt_2_and_lt_100_labeled.csv')\n\n\n# print('set 1')\n# train_features = pickle.load(open('/mnt/shdstorage/for_classification/elmo/elmo_ver_set_1.pkl', 'rb'))\n# dtrain = xgb.DMatrix(train_features)\n# data = model.predict(dtrain)\n# print(data)\n# prediction = np.array(data)\n# print(prediction)\n# prediction = prediction >= 0.5\n# prediction = prediction.astype(int)\n# set_1 = pd.read_csv('/mnt/shdstorage/for_classification/labeled/stated_workers-users-comments-tokens_gt_10_posts_gt_2_and_lt_100_labeled.csv')\n# set_1['elmo_label'] = prediction\n# set_1.to_csv('/mnt/shdstorage/for_classification/labeled/stated_workers-users-comments-tokens_gt_10_posts_gt_2_and_lt_100_labeled.csv')\n\n# print('set 2')\n# train_features = pickle.load(open('/mnt/shdstorage/for_classification/elmo/elmo_ver_set_2.pkl', 'rb'))\n# dtrain = xgb.DMatrix(train_features)\n# data = model.predict(dtrain)\n# print(data)\n# prediction = np.array(data)\n# print(prediction)\n# prediction = prediction >= 0.5\n# prediction = prediction.astype(int)\n# set_2 = pd.read_csv('/mnt/shdstorage/for_classification/labeled/stated_workers-users-comments-tokens_gt_5_posts_gt_2_and_lt_100_labeled.csv')\n# set_2['elmo_label'] = prediction\n# set_2.to_csv('/mnt/shdstorage/for_classification/labeled/stated_workers-users-comments-tokens_gt_5_posts_gt_2_and_lt_100_labeled.csv')\n\n# print('set 1')\n# test_features = pickle.load(open('/mnt/shdstorage/for_classification/elmo/elmo_sent_test_v7.pkl', 'rb'))\n# label = pd.read_csv('/mnt/shdstorage/for_classification/test_v7.csv')['label'].values.tolist()\n# dtest = xgb.DMatrix(test_features)\n# data = model.predict(dtest)\n# print(data)\n# prediction = np.array(data)\n# print(prediction)\n# prediction = prediction >= 0.5\n# prediction = prediction.astype(int)\n#\n# test_1, test_0 = calculate_all_metrics(label, prediction, 'TEST')\n#\n# print('set 2')\n\nprint('Ver')\nver_features = pickle.load(open('/mnt/shdstorage/for_classification/elmo/elmo_sent_ver.pkl', 'rb'))\nlabel = pd.read_csv('/mnt/shdstorage/for_classification/new_test.csv')['negative'].values.tolist()\ndver = xgb.DMatrix(ver_features)\ndata = model.predict(dver)\nprint(data)\nprediction = np.array(data)\nprint(prediction)\nprediction = prediction >= 0.5\nprediction = prediction.astype(int)\n\nverif_1, verif_0 = calculate_all_metrics(label, prediction, 'VERIFICATION')\n\n"
]
| [
[
"numpy.array",
"tensorflow.Session",
"tensorflow.ConfigProto",
"numpy.arange",
"pandas.read_csv",
"sklearn.metrics.roc_auc_score"
]
]
|
tjdbsrud/vissl | [
"b647c256447af7ea66655811849be1f642377db8"
]
| [
"extra_scripts/datasets/create_stanford_cars_data_files.py"
]
| [
"# Copyright (c) Facebook, Inc. and its affiliates.\n\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport argparse\nimport os\nimport shutil\n\nfrom scipy import io\nfrom torch.utils.data import DataLoader\nfrom torchvision.datasets.utils import download_and_extract_archive, download_url\nfrom tqdm import tqdm\n\n\ndef get_argument_parser():\n \"\"\"\n List of arguments supported by the script\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-i\",\n \"--input\",\n type=str,\n help=\"Path to the folder containing the expanded archives from http://imagenet.stanford.edu/internal/car196\",\n )\n parser.add_argument(\n \"-o\",\n \"--output\",\n type=str,\n help=\"Folder where the classification dataset will be written\",\n )\n parser.add_argument(\n \"-d\",\n \"--download\",\n action=\"store_const\",\n const=True,\n default=False,\n help=\"To download the original dataset and decompress it in the input folder\",\n )\n return parser\n\n\nSTANFORD_URL = \"http://imagenet.stanford.edu/internal/car196/\"\nTRAIN_IMAGE_URL = STANFORD_URL + \"cars_train.tgz\"\nTRAIN_ANNOT_URL = \"https://ai.stanford.edu/~jkrause/cars/car_devkit.tgz\"\nTEST_IMAGE_URL = STANFORD_URL + \"cars_test.tgz\"\nTEST_ANNOT_URL = STANFORD_URL + \"cars_test_annos_withlabels.mat\"\n\n\ndef download_dataset(root: str):\n \"\"\"\n Download the Standford Cars dataset archives and expand them in the folder provided as parameter\n \"\"\"\n download_and_extract_archive(url=TRAIN_IMAGE_URL, download_root=root)\n download_and_extract_archive(url=TRAIN_ANNOT_URL, download_root=root)\n download_and_extract_archive(url=TEST_IMAGE_URL, download_root=root)\n download_url(url=TEST_ANNOT_URL, root=root)\n\n\nclass StanfordCars:\n \"\"\"\n The StanfordCars dataset, mapping images to their respective class\n \"\"\"\n\n def __init__(self, root: str, split: str):\n assert split in {\"train\", \"test\"}\n self.root = root\n self.split = split\n self.annotations = self._open_annotations()\n self.image_folder = os.path.join(self.root, f\"cars_{split}\")\n self.class_names = self._get_class_names()\n\n def _open_annotations(self):\n annotations = None\n if self.split == \"train\":\n annotations = io.loadmat(\n os.path.join(self.root, \"devkit/cars_train_annos.mat\")\n )\n elif self.split == \"test\":\n annotations = io.loadmat(\n os.path.join(self.root, \"cars_test_annos_withlabels.mat\")\n )\n return annotations[\"annotations\"][0]\n\n def _get_class_names(self):\n meta_data = io.loadmat(os.path.join(self.root, \"devkit/cars_meta.mat\"))\n class_names = meta_data[\"class_names\"][0]\n return [\n \"{:03}\".format(i) + \"_\" + class_name[0].replace(\" \", \"_\")\n for i, class_name in enumerate(class_names)\n ]\n\n def __len__(self):\n return self.annotations.shape[0]\n\n def __getitem__(self, idx: int):\n image_name = self.annotations[idx][5][0]\n target_id = self.annotations[idx][4][0, 0]\n image_path = os.path.join(self.image_folder, image_name)\n # Beware: Stanford cars targets starts at 1\n target_name = self.class_names[target_id - 1]\n return image_path, target_name\n\n\nclass StandfordCarsMapper:\n \"\"\"\n Dataset used to parallelize the transformation of the dataset via a DataLoader\n \"\"\"\n\n def __init__(self, dataset: StanfordCars, output_path: str):\n self.dataset = dataset\n self.output_path = output_path\n\n def init_folders(self):\n for class_name in self.dataset.class_names:\n os.makedirs(\n os.path.join(self.output_path, self.dataset.split, class_name),\n exist_ok=True,\n )\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, idx: int) -> bool:\n image_path, target_name = self.dataset[idx]\n image_name = os.path.split(image_path)[-1]\n shutil.copy(\n image_path,\n os.path.join(self.output_path, self.dataset.split, target_name, image_name),\n )\n return True\n\n\ndef create_dataset(input_folder: str, output_folder: str):\n \"\"\"\n Read the dSprites dataset and split it into a training split and a validation split\n which follows the disk_folder format of VISSL\n \"\"\"\n for split in [\"train\", \"test\"]:\n print(f\"Processing '{split}' split...\")\n dataset = StanfordCars(root=input_folder, split=split)\n mapper = StandfordCarsMapper(dataset, output_path=output_folder)\n mapper.init_folders()\n loader = DataLoader(\n mapper, num_workers=8, batch_size=1, collate_fn=lambda x: x[0]\n )\n with tqdm(total=len(dataset)) as progress_bar:\n for _ in loader:\n progress_bar.update(1)\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Example usage:\n\n ```\n python extra_scripts/datasets/create_stanford_cars_data_files.py -i /path/to/cars/ -o /output_path/to/cars -d\n ```\n \"\"\"\n args = get_argument_parser().parse_args()\n if args.download:\n download_dataset(args.input)\n create_dataset(input_folder=args.input, output_folder=args.output)\n"
]
| [
[
"torch.utils.data.DataLoader"
]
]
|
gokart23/allennlp | [
"0cf98c27d7e6fd1244e7335115e3fc64c9d5850a"
]
| [
"allennlp/modules/elmo.py"
]
| [
"import json\nimport logging\nimport warnings\nfrom typing import Any, Dict, List, Union\n\nimport numpy\nimport torch\nfrom overrides import overrides\nfrom torch.nn.modules import Dropout\n\nfrom allennlp.common import FromParams\nfrom allennlp.common.checks import ConfigurationError\nfrom allennlp.common.file_utils import cached_path\nfrom allennlp.common.util import lazy_groups_of\nfrom allennlp.data import Instance, Token, Vocabulary\nfrom allennlp.data.batch import Batch\nfrom allennlp.data.fields import TextField\nfrom allennlp.data.token_indexers.elmo_indexer import (\n ELMoCharacterMapper,\n ELMoTokenCharactersIndexer,\n)\nfrom allennlp.modules.elmo_lstm import ElmoLstm\nfrom allennlp.modules.highway import Highway\nfrom allennlp.modules.scalar_mix import ScalarMix\nfrom allennlp.nn.util import (\n add_sentence_boundary_token_ids,\n get_device_of,\n remove_sentence_boundaries,\n)\n\nwith warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=FutureWarning)\n import h5py\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Elmo(torch.nn.Module, FromParams):\n \"\"\"\n Compute ELMo representations using a pre-trained bidirectional language model.\n\n See \"Deep contextualized word representations\", Peters et al. for details.\n\n This module takes character id input and computes `num_output_representations` different layers\n of ELMo representations. Typically `num_output_representations` is 1 or 2. For example, in\n the case of the SRL model in the above paper, `num_output_representations=1` where ELMo was included at\n the input token representation layer. In the case of the SQuAD model, `num_output_representations=2`\n as ELMo was also included at the GRU output layer.\n\n In the implementation below, we learn separate scalar weights for each output layer,\n but only run the biLM once on each input sequence for efficiency.\n\n # Parameters\n\n options_file : `str`, required.\n ELMo JSON options file\n weight_file : `str`, required.\n ELMo hdf5 weight file\n num_output_representations : `int`, required.\n The number of ELMo representation to output with\n different linear weighted combination of the 3 layers (i.e.,\n character-convnet output, 1st lstm output, 2nd lstm output).\n requires_grad : `bool`, optional\n If True, compute gradient of ELMo parameters for fine tuning.\n do_layer_norm : `bool`, optional, (default = False).\n Should we apply layer normalization (passed to `ScalarMix`)?\n dropout : `float`, optional, (default = 0.5).\n The dropout to be applied to the ELMo representations.\n vocab_to_cache : `List[str]`, optional, (default = None).\n A list of words to pre-compute and cache character convolutions\n for. If you use this option, Elmo expects that you pass word\n indices of shape (batch_size, timesteps) to forward, instead\n of character indices. If you use this option and pass a word which\n wasn't pre-cached, this will break.\n keep_sentence_boundaries : `bool`, optional, (default = False)\n If True, the representation of the sentence boundary tokens are\n not removed.\n scalar_mix_parameters : `List[float]`, optional, (default = None)\n If not `None`, use these scalar mix parameters to weight the representations\n produced by different layers. These mixing weights are not updated during\n training. The mixing weights here should be the unnormalized (i.e., pre-softmax)\n weights. So, if you wanted to use only the 1st layer of a 2-layer ELMo,\n you can set this to [-9e10, 1, -9e10 ].\n module : `torch.nn.Module`, optional, (default = None).\n If provided, then use this module instead of the pre-trained ELMo biLM.\n If using this option, then pass `None` for both `options_file`\n and `weight_file`. The module must provide a public attribute\n `num_layers` with the number of internal layers and its `forward`\n method must return a `dict` with `activations` and `mask` keys\n (see `_ElmoBilm` for an example). Note that `requires_grad` is also\n ignored with this option.\n \"\"\"\n\n def __init__(\n self,\n options_file: str,\n weight_file: str,\n num_output_representations: int,\n requires_grad: bool = False,\n do_layer_norm: bool = False,\n dropout: float = 0.5,\n vocab_to_cache: List[str] = None,\n keep_sentence_boundaries: bool = False,\n scalar_mix_parameters: List[float] = None,\n module: torch.nn.Module = None,\n ) -> None:\n super().__init__()\n\n logger.info(\"Initializing ELMo\")\n if module is not None:\n if options_file is not None or weight_file is not None:\n raise ConfigurationError(\"Don't provide options_file or weight_file with module\")\n self._elmo_lstm = module\n else:\n self._elmo_lstm = _ElmoBiLm(\n options_file,\n weight_file,\n requires_grad=requires_grad,\n vocab_to_cache=vocab_to_cache,\n )\n self._has_cached_vocab = vocab_to_cache is not None\n self._keep_sentence_boundaries = keep_sentence_boundaries\n self._dropout = Dropout(p=dropout)\n self._scalar_mixes: Any = []\n for k in range(num_output_representations):\n scalar_mix = ScalarMix(\n self._elmo_lstm.num_layers,\n do_layer_norm=do_layer_norm,\n initial_scalar_parameters=scalar_mix_parameters,\n trainable=scalar_mix_parameters is None,\n )\n self.add_module(\"scalar_mix_{}\".format(k), scalar_mix)\n self._scalar_mixes.append(scalar_mix)\n\n def get_output_dim(self):\n return self._elmo_lstm.get_output_dim()\n\n def forward(\n self, inputs: torch.Tensor, word_inputs: torch.Tensor = None\n ) -> Dict[str, Union[torch.Tensor, List[torch.Tensor]]]:\n \"\"\"\n # Parameters\n\n inputs : `torch.Tensor`, required.\n Shape `(batch_size, timesteps, 50)` of character ids representing the current batch.\n word_inputs : `torch.Tensor`, required.\n If you passed a cached vocab, you can in addition pass a tensor of shape\n `(batch_size, timesteps)`, which represent word ids which have been pre-cached.\n\n # Returns\n\n Dict with keys:\n `'elmo_representations'` : `List[torch.Tensor]`\n A `num_output_representations` list of ELMo representations for the input sequence.\n Each representation is shape `(batch_size, timesteps, embedding_dim)`\n `'mask'`: `torch.Tensor`\n Shape `(batch_size, timesteps)` long tensor with sequence mask.\n \"\"\"\n # reshape the input if needed\n original_shape = inputs.size()\n if len(original_shape) > 3:\n timesteps, num_characters = original_shape[-2:]\n reshaped_inputs = inputs.view(-1, timesteps, num_characters)\n else:\n reshaped_inputs = inputs\n\n if word_inputs is not None:\n original_word_size = word_inputs.size()\n if self._has_cached_vocab and len(original_word_size) > 2:\n reshaped_word_inputs = word_inputs.view(-1, original_word_size[-1])\n elif not self._has_cached_vocab:\n logger.warning(\n \"Word inputs were passed to ELMo but it does not have a cached vocab.\"\n )\n reshaped_word_inputs = None\n else:\n reshaped_word_inputs = word_inputs\n else:\n reshaped_word_inputs = word_inputs\n\n # run the biLM\n bilm_output = self._elmo_lstm(reshaped_inputs, reshaped_word_inputs)\n layer_activations = bilm_output[\"activations\"]\n mask_with_bos_eos = bilm_output[\"mask\"]\n\n # compute the elmo representations\n representations = []\n for i in range(len(self._scalar_mixes)):\n scalar_mix = getattr(self, \"scalar_mix_{}\".format(i))\n representation_with_bos_eos = scalar_mix(layer_activations, mask_with_bos_eos)\n if self._keep_sentence_boundaries:\n processed_representation = representation_with_bos_eos\n processed_mask = mask_with_bos_eos\n else:\n representation_without_bos_eos, mask_without_bos_eos = remove_sentence_boundaries(\n representation_with_bos_eos, mask_with_bos_eos\n )\n processed_representation = representation_without_bos_eos\n processed_mask = mask_without_bos_eos\n representations.append(self._dropout(processed_representation))\n\n # reshape if necessary\n if word_inputs is not None and len(original_word_size) > 2:\n mask = processed_mask.view(original_word_size)\n elmo_representations = [\n representation.view(original_word_size + (-1,))\n for representation in representations\n ]\n elif len(original_shape) > 3:\n mask = processed_mask.view(original_shape[:-1])\n elmo_representations = [\n representation.view(original_shape[:-1] + (-1,))\n for representation in representations\n ]\n else:\n mask = processed_mask\n elmo_representations = representations\n\n return {\"elmo_representations\": elmo_representations, \"mask\": mask}\n\n\ndef batch_to_ids(batch: List[List[str]]) -> torch.Tensor:\n \"\"\"\n Converts a batch of tokenized sentences to a tensor representing the sentences with encoded characters\n (len(batch), max sentence length, max word length).\n\n # Parameters\n\n batch : `List[List[str]]`, required\n A list of tokenized sentences.\n\n # Returns\n\n A tensor of padded character ids.\n \"\"\"\n instances = []\n indexer = ELMoTokenCharactersIndexer()\n for sentence in batch:\n tokens = [Token(token) for token in sentence]\n field = TextField(tokens, {\"character_ids\": indexer})\n instance = Instance({\"elmo\": field})\n instances.append(instance)\n\n dataset = Batch(instances)\n vocab = Vocabulary()\n dataset.index_instances(vocab)\n return dataset.as_tensor_dict()[\"elmo\"][\"character_ids\"][\"tokens\"]\n\n\nclass _ElmoCharacterEncoder(torch.nn.Module):\n \"\"\"\n Compute context insensitive token representation using pretrained biLM.\n\n This embedder has input character ids of size (batch_size, sequence_length, 50)\n and returns (batch_size, sequence_length + 2, embedding_dim), where embedding_dim\n is specified in the options file (typically 512).\n\n We add special entries at the beginning and end of each sequence corresponding\n to <S> and </S>, the beginning and end of sentence tokens.\n\n Note: this is a lower level class useful for advanced usage. Most users should\n use `ElmoTokenEmbedder` or `allennlp.modules.Elmo` instead.\n\n # Parameters\n\n options_file : `str`\n ELMo JSON options file\n weight_file : `str`\n ELMo hdf5 weight file\n requires_grad : `bool`, optional, (default = False).\n If True, compute gradient of ELMo parameters for fine tuning.\n\n The relevant section of the options file is something like:\n .. example-code::\n\n .. code-block:: python\n\n {'char_cnn': {\n 'activation': 'relu',\n 'embedding': {'dim': 4},\n 'filters': [[1, 4], [2, 8], [3, 16], [4, 32], [5, 64]],\n 'max_characters_per_token': 50,\n 'n_characters': 262,\n 'n_highway': 2\n }\n }\n \"\"\"\n\n def __init__(self, options_file: str, weight_file: str, requires_grad: bool = False) -> None:\n super().__init__()\n\n with open(cached_path(options_file), \"r\") as fin:\n self._options = json.load(fin)\n self._weight_file = weight_file\n\n self.output_dim = self._options[\"lstm\"][\"projection_dim\"]\n self.requires_grad = requires_grad\n\n self._load_weights()\n\n # Cache the arrays for use in forward -- +1 due to masking.\n self._beginning_of_sentence_characters = torch.from_numpy(\n numpy.array(ELMoCharacterMapper.beginning_of_sentence_characters) + 1\n )\n self._end_of_sentence_characters = torch.from_numpy(\n numpy.array(ELMoCharacterMapper.end_of_sentence_characters) + 1\n )\n\n def get_output_dim(self):\n return self.output_dim\n\n @overrides\n def forward(self, inputs: torch.Tensor) -> Dict[str, torch.Tensor]:\n \"\"\"\n Compute context insensitive token embeddings for ELMo representations.\n\n # Parameters\n\n inputs : `torch.Tensor`\n Shape `(batch_size, sequence_length, 50)` of character ids representing the\n current batch.\n\n # Returns\n\n Dict with keys:\n `'token_embedding'` : `torch.Tensor`\n Shape `(batch_size, sequence_length + 2, embedding_dim)` tensor with context\n insensitive token representations.\n `'mask'`: `torch.Tensor`\n Shape `(batch_size, sequence_length + 2)` long tensor with sequence mask.\n \"\"\"\n # Add BOS/EOS\n mask = ((inputs > 0).long().sum(dim=-1) > 0).long()\n character_ids_with_bos_eos, mask_with_bos_eos = add_sentence_boundary_token_ids(\n inputs, mask, self._beginning_of_sentence_characters, self._end_of_sentence_characters\n )\n\n # the character id embedding\n max_chars_per_token = self._options[\"char_cnn\"][\"max_characters_per_token\"]\n # (batch_size * sequence_length, max_chars_per_token, embed_dim)\n character_embedding = torch.nn.functional.embedding(\n character_ids_with_bos_eos.view(-1, max_chars_per_token), self._char_embedding_weights\n )\n\n # run convolutions\n cnn_options = self._options[\"char_cnn\"]\n if cnn_options[\"activation\"] == \"tanh\":\n activation = torch.tanh\n elif cnn_options[\"activation\"] == \"relu\":\n activation = torch.nn.functional.relu\n else:\n raise ConfigurationError(\"Unknown activation\")\n\n # (batch_size * sequence_length, embed_dim, max_chars_per_token)\n character_embedding = torch.transpose(character_embedding, 1, 2)\n convs = []\n for i in range(len(self._convolutions)):\n conv = getattr(self, \"char_conv_{}\".format(i))\n convolved = conv(character_embedding)\n # (batch_size * sequence_length, n_filters for this width)\n convolved, _ = torch.max(convolved, dim=-1)\n convolved = activation(convolved)\n convs.append(convolved)\n\n # (batch_size * sequence_length, n_filters)\n token_embedding = torch.cat(convs, dim=-1)\n\n # apply the highway layers (batch_size * sequence_length, n_filters)\n token_embedding = self._highways(token_embedding)\n\n # final projection (batch_size * sequence_length, embedding_dim)\n token_embedding = self._projection(token_embedding)\n\n # reshape to (batch_size, sequence_length, embedding_dim)\n batch_size, sequence_length, _ = character_ids_with_bos_eos.size()\n\n return {\n \"mask\": mask_with_bos_eos,\n \"token_embedding\": token_embedding.view(batch_size, sequence_length, -1),\n }\n\n def _load_weights(self):\n self._load_char_embedding()\n self._load_cnn_weights()\n self._load_highway()\n self._load_projection()\n\n def _load_char_embedding(self):\n with h5py.File(cached_path(self._weight_file), \"r\") as fin:\n char_embed_weights = fin[\"char_embed\"][...]\n\n weights = numpy.zeros(\n (char_embed_weights.shape[0] + 1, char_embed_weights.shape[1]), dtype=\"float32\"\n )\n weights[1:, :] = char_embed_weights\n\n self._char_embedding_weights = torch.nn.Parameter(\n torch.FloatTensor(weights), requires_grad=self.requires_grad\n )\n\n def _load_cnn_weights(self):\n cnn_options = self._options[\"char_cnn\"]\n filters = cnn_options[\"filters\"]\n char_embed_dim = cnn_options[\"embedding\"][\"dim\"]\n\n convolutions = []\n for i, (width, num) in enumerate(filters):\n conv = torch.nn.Conv1d(\n in_channels=char_embed_dim, out_channels=num, kernel_size=width, bias=True\n )\n # load the weights\n with h5py.File(cached_path(self._weight_file), \"r\") as fin:\n weight = fin[\"CNN\"][\"W_cnn_{}\".format(i)][...]\n bias = fin[\"CNN\"][\"b_cnn_{}\".format(i)][...]\n\n w_reshaped = numpy.transpose(weight.squeeze(axis=0), axes=(2, 1, 0))\n if w_reshaped.shape != tuple(conv.weight.data.shape):\n raise ValueError(\"Invalid weight file\")\n conv.weight.data.copy_(torch.FloatTensor(w_reshaped))\n conv.bias.data.copy_(torch.FloatTensor(bias))\n\n conv.weight.requires_grad = self.requires_grad\n conv.bias.requires_grad = self.requires_grad\n\n convolutions.append(conv)\n self.add_module(\"char_conv_{}\".format(i), conv)\n\n self._convolutions = convolutions\n\n def _load_highway(self):\n\n # the highway layers have same dimensionality as the number of cnn filters\n cnn_options = self._options[\"char_cnn\"]\n filters = cnn_options[\"filters\"]\n n_filters = sum(f[1] for f in filters)\n n_highway = cnn_options[\"n_highway\"]\n\n # create the layers, and load the weights\n self._highways = Highway(n_filters, n_highway, activation=torch.nn.functional.relu)\n for k in range(n_highway):\n # The AllenNLP highway is one matrix multplication with concatenation of\n # transform and carry weights.\n with h5py.File(cached_path(self._weight_file), \"r\") as fin:\n # The weights are transposed due to multiplication order assumptions in tf\n # vs pytorch (tf.matmul(X, W) vs pytorch.matmul(W, X))\n w_transform = numpy.transpose(fin[\"CNN_high_{}\".format(k)][\"W_transform\"][...])\n # -1.0 since AllenNLP is g * x + (1 - g) * f(x) but tf is (1 - g) * x + g * f(x)\n w_carry = -1.0 * numpy.transpose(fin[\"CNN_high_{}\".format(k)][\"W_carry\"][...])\n weight = numpy.concatenate([w_transform, w_carry], axis=0)\n self._highways._layers[k].weight.data.copy_(torch.FloatTensor(weight))\n self._highways._layers[k].weight.requires_grad = self.requires_grad\n\n b_transform = fin[\"CNN_high_{}\".format(k)][\"b_transform\"][...]\n b_carry = -1.0 * fin[\"CNN_high_{}\".format(k)][\"b_carry\"][...]\n bias = numpy.concatenate([b_transform, b_carry], axis=0)\n self._highways._layers[k].bias.data.copy_(torch.FloatTensor(bias))\n self._highways._layers[k].bias.requires_grad = self.requires_grad\n\n def _load_projection(self):\n cnn_options = self._options[\"char_cnn\"]\n filters = cnn_options[\"filters\"]\n n_filters = sum(f[1] for f in filters)\n\n self._projection = torch.nn.Linear(n_filters, self.output_dim, bias=True)\n with h5py.File(cached_path(self._weight_file), \"r\") as fin:\n weight = fin[\"CNN_proj\"][\"W_proj\"][...]\n bias = fin[\"CNN_proj\"][\"b_proj\"][...]\n self._projection.weight.data.copy_(torch.FloatTensor(numpy.transpose(weight)))\n self._projection.bias.data.copy_(torch.FloatTensor(bias))\n\n self._projection.weight.requires_grad = self.requires_grad\n self._projection.bias.requires_grad = self.requires_grad\n\n\nclass _ElmoBiLm(torch.nn.Module):\n \"\"\"\n Run a pre-trained bidirectional language model, outputting the activations at each\n layer for weighting together into an ELMo representation (with\n `allennlp.modules.seq2seq_encoders.Elmo`). This is a lower level class, useful\n for advanced uses, but most users should use `allennlp.modules.Elmo` directly.\n\n # Parameters\n\n options_file : `str`\n ELMo JSON options file\n weight_file : `str`\n ELMo hdf5 weight file\n requires_grad : `bool`, optional, (default = False).\n If True, compute gradient of ELMo parameters for fine tuning.\n vocab_to_cache : `List[str]`, optional, (default = None).\n A list of words to pre-compute and cache character convolutions\n for. If you use this option, _ElmoBiLm expects that you pass word\n indices of shape (batch_size, timesteps) to forward, instead\n of character indices. If you use this option and pass a word which\n wasn't pre-cached, this will break.\n \"\"\"\n\n def __init__(\n self,\n options_file: str,\n weight_file: str,\n requires_grad: bool = False,\n vocab_to_cache: List[str] = None,\n ) -> None:\n super().__init__()\n\n self._token_embedder = _ElmoCharacterEncoder(\n options_file, weight_file, requires_grad=requires_grad\n )\n\n self._requires_grad = requires_grad\n if requires_grad and vocab_to_cache:\n logging.warning(\n \"You are fine tuning ELMo and caching char CNN word vectors. \"\n \"This behaviour is not guaranteed to be well defined, particularly. \"\n \"if not all of your inputs will occur in the vocabulary cache.\"\n )\n # This is an embedding, used to look up cached\n # word vectors built from character level cnn embeddings.\n self._word_embedding = None\n self._bos_embedding: torch.Tensor = None\n self._eos_embedding: torch.Tensor = None\n if vocab_to_cache:\n logging.info(\"Caching character cnn layers for words in vocabulary.\")\n # This sets 3 attributes, _word_embedding, _bos_embedding and _eos_embedding.\n # They are set in the method so they can be accessed from outside the\n # constructor.\n self.create_cached_cnn_embeddings(vocab_to_cache)\n\n with open(cached_path(options_file), \"r\") as fin:\n options = json.load(fin)\n if not options[\"lstm\"].get(\"use_skip_connections\"):\n raise ConfigurationError(\"We only support pretrained biLMs with residual connections\")\n self._elmo_lstm = ElmoLstm(\n input_size=options[\"lstm\"][\"projection_dim\"],\n hidden_size=options[\"lstm\"][\"projection_dim\"],\n cell_size=options[\"lstm\"][\"dim\"],\n num_layers=options[\"lstm\"][\"n_layers\"],\n memory_cell_clip_value=options[\"lstm\"][\"cell_clip\"],\n state_projection_clip_value=options[\"lstm\"][\"proj_clip\"],\n requires_grad=requires_grad,\n )\n self._elmo_lstm.load_weights(weight_file)\n # Number of representation layers including context independent layer\n self.num_layers = options[\"lstm\"][\"n_layers\"] + 1\n\n def get_output_dim(self):\n return 2 * self._token_embedder.get_output_dim()\n\n def forward(\n self, inputs: torch.Tensor, word_inputs: torch.Tensor = None\n ) -> Dict[str, Union[torch.Tensor, List[torch.Tensor]]]:\n \"\"\"\n # Parameters\n\n inputs : `torch.Tensor`, required.\n Shape `(batch_size, timesteps, 50)` of character ids representing the current batch.\n word_inputs : `torch.Tensor`, required.\n If you passed a cached vocab, you can in addition pass a tensor of shape `(batch_size, timesteps)`,\n which represent word ids which have been pre-cached.\n\n # Returns\n\n Dict with keys:\n\n `'activations'` : `List[torch.Tensor]`\n A list of activations at each layer of the network, each of shape\n `(batch_size, timesteps + 2, embedding_dim)`\n `'mask'`: `torch.Tensor`\n Shape `(batch_size, timesteps + 2)` long tensor with sequence mask.\n\n Note that the output tensors all include additional special begin and end of sequence\n markers.\n \"\"\"\n if self._word_embedding is not None and word_inputs is not None:\n try:\n mask_without_bos_eos = (word_inputs > 0).long()\n # The character cnn part is cached - just look it up.\n embedded_inputs = self._word_embedding(word_inputs) # type: ignore\n # shape (batch_size, timesteps + 2, embedding_dim)\n type_representation, mask = add_sentence_boundary_token_ids(\n embedded_inputs, mask_without_bos_eos, self._bos_embedding, self._eos_embedding\n )\n except RuntimeError:\n # Back off to running the character convolutions,\n # as we might not have the words in the cache.\n token_embedding = self._token_embedder(inputs)\n mask = token_embedding[\"mask\"]\n type_representation = token_embedding[\"token_embedding\"]\n else:\n token_embedding = self._token_embedder(inputs)\n mask = token_embedding[\"mask\"]\n type_representation = token_embedding[\"token_embedding\"]\n lstm_outputs = self._elmo_lstm(type_representation, mask)\n\n # Prepare the output. The first layer is duplicated.\n # Because of minor differences in how masking is applied depending\n # on whether the char cnn layers are cached, we'll be defensive and\n # multiply by the mask here. It's not strictly necessary, as the\n # mask passed on is correct, but the values in the padded areas\n # of the char cnn representations can change.\n output_tensors = [\n torch.cat([type_representation, type_representation], dim=-1)\n * mask.float().unsqueeze(-1)\n ]\n for layer_activations in torch.chunk(lstm_outputs, lstm_outputs.size(0), dim=0):\n output_tensors.append(layer_activations.squeeze(0))\n\n return {\"activations\": output_tensors, \"mask\": mask}\n\n def create_cached_cnn_embeddings(self, tokens: List[str]) -> None:\n \"\"\"\n Given a list of tokens, this method precomputes word representations\n by running just the character convolutions and highway layers of elmo,\n essentially creating uncontextual word vectors. On subsequent forward passes,\n the word ids are looked up from an embedding, rather than being computed on\n the fly via the CNN encoder.\n\n This function sets 3 attributes:\n\n _word_embedding : `torch.Tensor`\n The word embedding for each word in the tokens passed to this method.\n _bos_embedding : `torch.Tensor`\n The embedding for the BOS token.\n _eos_embedding : `torch.Tensor`\n The embedding for the EOS token.\n\n # Parameters\n\n tokens : `List[str]`, required.\n A list of tokens to precompute character convolutions for.\n \"\"\"\n tokens = [ELMoCharacterMapper.bos_token, ELMoCharacterMapper.eos_token] + tokens\n timesteps = 32\n batch_size = 32\n chunked_tokens = lazy_groups_of(iter(tokens), timesteps)\n\n all_embeddings = []\n device = get_device_of(next(self.parameters()))\n for batch in lazy_groups_of(chunked_tokens, batch_size):\n # Shape (batch_size, timesteps, 50)\n batched_tensor = batch_to_ids(batch)\n # NOTE: This device check is for when a user calls this method having\n # already placed the model on a device. If this is called in the\n # constructor, it will probably happen on the CPU. This isn't too bad,\n # because it's only a few convolutions and will likely be very fast.\n if device >= 0:\n batched_tensor = batched_tensor.cuda(device)\n output = self._token_embedder(batched_tensor)\n token_embedding = output[\"token_embedding\"]\n mask = output[\"mask\"]\n token_embedding, _ = remove_sentence_boundaries(token_embedding, mask)\n all_embeddings.append(token_embedding.view(-1, token_embedding.size(-1)))\n full_embedding = torch.cat(all_embeddings, 0)\n\n # We might have some trailing embeddings from padding in the batch, so\n # we clip the embedding and lookup to the right size.\n full_embedding = full_embedding[: len(tokens), :]\n embedding = full_embedding[2 : len(tokens), :]\n vocab_size, embedding_dim = list(embedding.size())\n\n from allennlp.modules.token_embedders import Embedding # type: ignore\n\n self._bos_embedding = full_embedding[0, :]\n self._eos_embedding = full_embedding[1, :]\n self._word_embedding = Embedding( # type: ignore\n num_embeddings=vocab_size,\n embedding_dim=embedding_dim,\n weight=embedding.data,\n trainable=self._requires_grad,\n padding_index=0,\n )\n"
]
| [
[
"torch.nn.Linear",
"numpy.concatenate",
"torch.cat",
"numpy.array",
"numpy.zeros",
"torch.max",
"torch.nn.Conv1d",
"torch.nn.modules.Dropout",
"torch.FloatTensor",
"numpy.transpose",
"torch.transpose"
]
]
|
eric-czech/codex | [
"23e880d483bf4c57809ebbd214640f6246c8fd37"
]
| [
"python/pipeline/codex/exec/pipeline.py"
]
| [
"\"\"\"CODEX preprocessing pipeline core logic\n\nThis is not intended to be run directly but rather used by mutliple external\ninterfaces to implement the core process that comprises CODEX processing.\n\"\"\"\nimport os, logging, itertools, queue\nimport numpy as np\nfrom os import path as osp\nfrom threading import Thread\nfrom timeit import default_timer as timer\nfrom codex import io as codex_io\nfrom codex import config as codex_config\nfrom codex.ops import op\nfrom codex.ops import tile_generator\nfrom codex.ops import tile_crop\nfrom codex.ops import drift_compensation\nfrom codex.ops import best_focus\nfrom codex.ops import deconvolution\nfrom dask.distributed import Client, LocalCluster\nlogger = logging.getLogger(__name__)\n\n# Set 1 hour time limit on tile loading/reading operations\nTIMEOUT = 1 * 60 * 60\n\n\nclass TaskConfig(object):\n\n def __init__(self, pipeline_config, region_indexes, tile_indexes, gpu, tile_prefetch_capacity=2, run_best_focus=True, n_iter_decon=25):\n self.region_indexes = region_indexes\n self.tile_indexes = tile_indexes\n self.config_dir = pipeline_config.config_dir\n self.data_dir = pipeline_config.data_dir\n self.output_dir = pipeline_config.output_dir\n self.gpu = gpu\n self.tile_prefetch_capacity = tile_prefetch_capacity\n self.run_best_focus = run_best_focus\n self.n_iter_decon = n_iter_decon\n self.exp_config = pipeline_config.exp_config\n\n if len(self.region_indexes) != len(self.tile_indexes):\n raise ValueError(\n 'Region and tile index lists must have same length (region indexes = {}, tile indexes = {})'\n .format(self.region_indexes, self.tile_indexes)\n )\n\n @property\n def n_tiles(self):\n return len(self.tile_indexes)\n\n def __str__(self):\n return str({k: v for k, v in self.__dict__.items() if k != 'exp_config'})\n\n __repr__ = __str__\n\n\nclass PipelineConfig(object):\n\n def __init__(self, region_indexes, tile_indexes, config_dir, data_dir, output_dir, n_workers,\n gpus, memory_limit, **task_kwargs):\n self.region_idx = region_indexes\n self.tile_idx = tile_indexes\n self.config_dir = config_dir\n self.data_dir = data_dir\n self.output_dir = output_dir\n self.n_workers = n_workers\n self.gpus = gpus\n self.memory_limit = memory_limit\n self.task_kwargs = task_kwargs\n\n # Load experiment configuration in order to determine defaults\n self.exp_config = codex_config.load(config_dir)\n\n # Default region and tile index list to that in experiment configuration if not provided explicitly\n if self.region_idx is None:\n # Convert back to 1-based index to conform to 1-based-into-configs convention\n self.region_idx = [i + 1 for i in self.exp_config.region_indexes]\n if self.tile_idx is None:\n self.tile_idx = list(range(1, self.exp_config.n_tiles_per_region + 1))\n\n # Validate that only 1-based indexes are provided\n if any([i <= 0 for i in self.region_idx]):\n raise ValueError('Region indexes must be specified as 1-based index (indexes given = {})'.format(self.region_idx))\n if any([i <= 0 for i in self.tile_idx]):\n raise ValueError('Tile indexes must be specified as 1-based index (indexes given = {})'.format(self.tile_idx))\n\n def __str__(self):\n return str({\n k:v for k, v in self.__dict__.items() \n if k not in ['exp_config', 'tile_idx', 'region_idx']\n })\n\n __repr__ = __str__\n\n def get_task_config(self, region_indexes, tile_indexes, gpu):\n return TaskConfig(\n pipeline_config=self,\n region_indexes=region_indexes,\n tile_indexes=tile_indexes,\n gpu=gpu,\n **self.task_kwargs\n )\n\n @property\n def region_indexes(self):\n \"\"\"Get 0-based region index array\"\"\"\n return np.array(self.region_idx) - 1\n\n @property\n def tile_indexes(self):\n \"\"\"Get 0-based tile index array\"\"\"\n return np.array(self.tile_idx) - 1\n\n @property\n def region_tiles(self):\n \"\"\"Get 0-based pairs of region and tile indexes to process\"\"\"\n # Compute cartesian product of region and tile (0-based) index list \n return np.array(list(itertools.product(*(self.region_indexes, self.tile_indexes))))\n\n\ndef load_tiles(q, task_config):\n for region_index, tile_index in zip(task_config.region_indexes, task_config.tile_indexes):\n with tile_generator.CodexTileGenerator(task_config.exp_config, task_config.data_dir, region_index, tile_index) as op:\n tile = op.run()\n logger.info('Loaded tile %s for region %s [shape = %s]', tile_index + 1, region_index + 1, tile.shape)\n q.put((tile, region_index, tile_index), block=True, timeout=TIMEOUT)\n\n\ndef init_dirs(output_dir):\n for path in [output_dir]:\n if not osp.exists(path):\n os.makedirs(path, exist_ok=True)\n\n\ndef _initialize(task_config):\n if task_config.gpu is not None:\n if op.get_gpu_device() is None:\n logger.debug('Setting gpu device {}'.format(task_config.gpu))\n op.set_gpu_device(task_config.gpu)\n else:\n logger.debug('GPU device already set to {}'.format(op.get_gpu_device()))\n\n\ndef run_pipeline_task(task_config):\n _initialize(task_config)\n\n exp_config = task_config.exp_config\n init_dirs(task_config.output_dir)\n\n tile_queue = queue.Queue(maxsize=task_config.tile_prefetch_capacity)\n load_thread = Thread(target=load_tiles, args=(tile_queue, task_config))\n load_thread.start()\n\n if task_config.run_best_focus:\n focus_op = best_focus.CodexFocalPlaneSelector(exp_config).initialize()\n\n times = []\n with drift_compensation.CodexDriftCompensator(exp_config) as align_op, \\\n tile_crop.CodexTileCrop(exp_config) as crop_op, \\\n deconvolution.CodexDeconvolution(exp_config, n_iter=task_config.n_iter_decon) as decon_op:\n n_tiles = task_config.n_tiles\n for i in range(n_tiles):\n start = timer()\n tile, region_index, tile_index = tile_queue.get(block=True, timeout=TIMEOUT)\n tx, ty = exp_config.get_tile_coordinates(tile_index)\n\n def log(msg, res=None):\n details = [\n 'tile {} of {} ({:.2f}%)'.format(i + 1, n_tiles, 100*(i+1)/n_tiles),\n 'reg/x/y = {}/{}/{}'.format(region_index + 1, tx + 1, ty + 1)\n ]\n if res is not None:\n details.append('shape {} / dtype {}'.format(res.shape, res.dtype))\n logger.info(msg + ' [' + ' | '.join(details) + ']')\n\n align_tile = align_op.run(tile)\n log('Drift compensation complete', align_tile)\n\n crop_tile = crop_op.run(align_tile)\n log('Tile overlap crop complete', crop_tile)\n\n if task_config.n_iter_decon:\n decon_tile = decon_op.run(crop_tile)\n log('Deconvolution complete', decon_tile)\n else:\n decon_tile = crop_tile\n log('Skipping deconvolution')\n\n if task_config.run_best_focus:\n best_z, classifications, probabilities = focus_op.run(tile)\n focus_tile = decon_tile[:, [best_z], :, :, :]\n\n log('Best focus classifications: {}'.format(classifications), focus_tile)\n img_path = codex_io.get_best_focus_img_path(region_index, tx, ty, best_z)\n\n log('Saving best focus tile to path \"{}\"'.format(img_path), focus_tile)\n codex_io.save_tile(osp.join(task_config.output_dir, img_path), focus_tile)\n\n res_tile = decon_tile\n res_path = codex_io.get_processor_img_path(region_index, tx, ty)\n log('Saving result to path \"{}\"'.format(res_path), res_tile)\n codex_io.save_tile(osp.join(task_config.output_dir, res_path), res_tile)\n\n log('Processing complete')\n stop = timer()\n times.append((region_index, tile_index, stop - start))\n\n if task_config.run_best_focus:\n focus_op.shutdown()\n\n return np.array(times)\n\n\ndef run(pl_conf, logging_init_fn=None):\n start = timer()\n\n # Initialize local dask cluster\n logger.info('Initializing pipeline tasks for %s workers', pl_conf.n_workers)\n logger.debug('Pipeline configuration: %s', pl_conf)\n cluster = LocalCluster(\n n_workers=pl_conf.n_workers, threads_per_worker=1,\n processes=True, memory_limit=pl_conf.memory_limit\n )\n client = Client(cluster)\n\n # Split total region + tile indexes to process into separate lists for each worker \n # (by indexes of those index combinations)\n tiles = pl_conf.region_tiles\n idx_batches = np.array_split(np.arange(len(tiles)), pl_conf.n_workers)\n\n # Assign gpus to tasks in round-robin fashion\n def get_gpu(i):\n if pl_conf.gpus is None:\n return None\n return pl_conf.gpus[i % len(pl_conf.gpus)]\n\n # Generate a single task configuration for each worker\n tasks = [\n pl_conf.get_task_config(region_indexes=tiles[idx_batch, 0], tile_indexes=tiles[idx_batch, 1], gpu=get_gpu(i))\n for i, idx_batch in enumerate(idx_batches)\n ]\n\n logger.info('Starting pipeline for %s tasks', len(tasks))\n logger.debug('Task definitions:\\n\\t%s', '\\n\\t'.join([str(t) for t in tasks]))\n try:\n # Passing logging initialization operation, if given, to workers now\n # running in separate processes\n if logging_init_fn:\n client.run(logging_init_fn)\n\n # Disable the \"auto_restart\" feature of dask workers which is of no use in this context\n for worker in cluster.workers:\n worker.auto_restart = False\n\n # Pass tasks to each worker to execute in parallel\n res = client.map(run_pipeline_task, tasks)\n res = [r.result() for r in res]\n if len(res) != len(tasks):\n raise ValueError('Parallel execution returned {} results but {} were expected'.format(len(res), len(tasks)))\n stop = timer()\n if logger.isEnabledFor(logging.DEBUG):\n from scipy.stats import describe\n times = np.concatenate([np.array(t)[2] for t in res], 0)\n logger.debug('Per-tile execution time summary (all in seconds): %s', describe(times))\n logger.info('Pipeline execution completed in %s seconds', stop - start)\n finally:\n client.close()\n cluster.close()\n\n\n\n\n"
]
| [
[
"numpy.array",
"scipy.stats.describe"
]
]
|
zhuangdizhu/ReAgent | [
"e6d1e5c8f8c9f0896b957c06d6fb089dc76dd17b",
"e6d1e5c8f8c9f0896b957c06d6fb089dc76dd17b"
]
| [
"reagent/training/ranking/seq2slate_trainer.py",
"reagent/training/dqn_trainer_base.py"
]
| [
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\nimport logging\nfrom typing import List, Optional, Tuple\n\nimport reagent.core.types as rlt\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom reagent.core.dataclasses import field\nfrom reagent.core.parameters import Seq2SlateParameters\nfrom reagent.evaluation.evaluation_data_page import EvaluationDataPage\nfrom reagent.model_utils.seq2slate_utils import Seq2SlateMode\nfrom reagent.models.seq2slate import BaselineNet, Seq2SlateTransformerNet\nfrom reagent.optimizer.union import Optimizer__Union\nfrom reagent.training.ranking.helper import ips_clamp\nfrom reagent.training.reagent_lightning_module import ReAgentLightningModule\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Seq2SlateTrainer(ReAgentLightningModule):\n def __init__(\n self,\n seq2slate_net: Seq2SlateTransformerNet,\n params: Seq2SlateParameters = field( # noqa: B008\n default_factory=Seq2SlateParameters\n ),\n baseline_net: Optional[BaselineNet] = None,\n baseline_warmup_num_batches: int = 0,\n policy_optimizer: Optimizer__Union = field( # noqa: B008\n default_factory=Optimizer__Union.default\n ),\n baseline_optimizer: Optimizer__Union = field( # noqa: B008\n default_factory=Optimizer__Union.default\n ),\n policy_gradient_interval: int = 1,\n print_interval: int = 100,\n calc_cpe: bool = False,\n reward_network: Optional[nn.Module] = None,\n ) -> None:\n super().__init__()\n self.seq2slate_net = seq2slate_net\n self.params = params\n self.policy_gradient_interval = policy_gradient_interval\n self.print_interval = print_interval\n\n self.baseline_net = baseline_net\n self.baseline_warmup_num_batches = baseline_warmup_num_batches\n\n self.rl_opt = policy_optimizer\n if self.baseline_net:\n self.baseline_opt = baseline_optimizer\n\n # use manual optimization to get more flexibility\n self.automatic_optimization = False\n\n assert not calc_cpe or reward_network is not None\n self.calc_cpe = calc_cpe\n self.reward_network = reward_network\n\n def configure_optimizers(self):\n optimizers = []\n optimizers.append(\n self.rl_opt.make_optimizer_scheduler(self.seq2slate_net.parameters())\n )\n if self.baseline_net:\n optimizers.append(\n self.baseline_opt.make_optimizer_scheduler(\n self.baseline_net.parameters()\n )\n )\n return optimizers\n\n def _compute_impt_smpl(\n self, model_propensities, logged_propensities\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n logged_propensities = logged_propensities.reshape(-1, 1)\n assert (\n model_propensities.shape == logged_propensities.shape\n and len(model_propensities.shape) == 2\n and model_propensities.shape[1] == 1\n ), f\"{model_propensities.shape} {logged_propensities.shape}\"\n\n impt_smpl = model_propensities / logged_propensities\n clamped_impt_smpl = ips_clamp(impt_smpl, self.params.ips_clamp)\n return impt_smpl, clamped_impt_smpl\n\n # pyre-fixme [14]: overrides method defined in `ReAgentLightningModule` inconsistently\n def training_step(self, batch: rlt.PreprocessedRankingInput, batch_idx: int):\n assert type(batch) is rlt.PreprocessedRankingInput\n\n batch_size = batch.state.float_features.shape[0]\n\n reward = batch.slate_reward\n assert reward is not None\n\n optimizers = self.optimizers()\n if self.baseline_net:\n assert len(optimizers) == 2\n baseline_opt = optimizers[1]\n else:\n assert len(optimizers) == 1\n rl_opt = optimizers[0]\n\n if self.baseline_net:\n # Train baseline\n # pyre-fixme[29]: `Optional[BaselineNet]` is not a function.\n b = self.baseline_net(batch)\n baseline_loss = 1.0 / batch_size * torch.sum((b - reward) ** 2)\n baseline_opt.zero_grad()\n self.manual_backward(baseline_loss)\n baseline_opt.step()\n else:\n b = torch.zeros_like(reward)\n baseline_loss = torch.zeros(1)\n\n # Train Seq2Slate using REINFORCE\n # log probs of tgt seqs\n model_propensities = torch.exp(\n self.seq2slate_net(\n batch, mode=Seq2SlateMode.PER_SEQ_LOG_PROB_MODE\n ).log_probs\n )\n b = b.detach()\n assert (\n b.shape == reward.shape == model_propensities.shape\n ), f\"{b.shape} {reward.shape} {model_propensities.shape}\"\n\n impt_smpl, clamped_impt_smpl = self._compute_impt_smpl(\n model_propensities, batch.tgt_out_probs\n )\n assert (\n impt_smpl.shape == clamped_impt_smpl.shape == reward.shape\n ), f\"{impt_smpl.shape} {clamped_impt_smpl.shape} {reward.shape}\"\n # gradient is only w.r.t model_propensities\n assert (\n not reward.requires_grad\n # pyre-fixme[16]: `Optional` has no attribute `requires_grad`.\n and not batch.tgt_out_probs.requires_grad\n and impt_smpl.requires_grad\n and clamped_impt_smpl.requires_grad\n and not b.requires_grad\n )\n # add negative sign because we take gradient descent but we want to\n # maximize rewards\n batch_obj_loss = -clamped_impt_smpl * (reward - b)\n obj_loss = torch.mean(batch_obj_loss)\n\n # condition to perform policy gradient update:\n # 1. no baseline\n # 2. or baseline is present and it passes the warm up stage\n # 3. the last policy gradient was performed policy_gradient_interval minibatches ago\n if (\n self.baseline_net is None\n or (self.all_batches_processed + 1) >= self.baseline_warmup_num_batches\n ):\n self.manual_backward(obj_loss)\n if (self.all_batches_processed + 1) % self.policy_gradient_interval == 0:\n rl_opt.step()\n rl_opt.zero_grad()\n else:\n logger.info(\"Not update RL model because now is baseline warmup phase\")\n\n ips_loss = torch.mean(-impt_smpl * reward).cpu().detach().numpy()\n clamped_ips_loss = (\n torch.mean(-clamped_impt_smpl * reward).cpu().detach().numpy()\n )\n baseline_loss = baseline_loss.detach().cpu().numpy().item()\n advantage = (reward - b).detach().cpu().numpy()\n logged_slate_rank_probs = model_propensities.detach().cpu().numpy()\n\n if (self.all_batches_processed + 1) % self.print_interval == 0:\n logger.info(\n \"{} batch: ips_loss={}, clamped_ips_loss={}, baseline_loss={}, max_ips={}, mean_ips={}, grad_update={}\".format(\n self.all_batches_processed + 1,\n ips_loss,\n clamped_ips_loss,\n baseline_loss,\n torch.max(impt_smpl),\n torch.mean(impt_smpl),\n (self.all_batches_processed + 1) % self.policy_gradient_interval\n == 0,\n )\n )\n self.reporter.log(\n train_ips_score=torch.tensor(ips_loss).reshape(1),\n train_clamped_ips_score=torch.tensor(clamped_ips_loss).reshape(1),\n train_baseline_loss=torch.tensor(baseline_loss).reshape(1),\n train_logged_slate_rank_probs=torch.FloatTensor(logged_slate_rank_probs),\n train_ips_ratio=impt_smpl,\n train_clamped_ips_ratio=clamped_impt_smpl,\n train_advantages=advantage,\n )\n\n # pyre-ignore inconsistent override because lightning doesn't use types\n def validation_step(self, batch: rlt.PreprocessedRankingInput, batch_idx: int):\n seq2slate_net = self.seq2slate_net\n\n assert seq2slate_net.training is False\n\n logged_slate_rank_prob = torch.exp(\n seq2slate_net(batch, mode=Seq2SlateMode.PER_SEQ_LOG_PROB_MODE)\n .log_probs.detach()\n .flatten()\n .cpu()\n )\n\n eval_baseline_loss = torch.tensor([0.0]).reshape(1)\n if self.baseline_net:\n baseline_net = self.baseline_net\n # pyre-fixme[29]: `Optional[reagent.models.seq2slate.BaselineNet]` is\n # not a function.\n b = baseline_net(batch).detach()\n eval_baseline_loss = F.mse_loss(b, batch.slate_reward).cpu().reshape(1)\n else:\n b = torch.zeros_like(batch.slate_reward)\n\n eval_advantage = (\n # pyre-fixme[58]: `-` is not supported for operand types\n # `Optional[torch.Tensor]` and `Any`.\n (batch.slate_reward - b)\n .flatten()\n .cpu()\n )\n\n ranked_slate_output = seq2slate_net(batch, Seq2SlateMode.RANK_MODE, greedy=True)\n ranked_slate_rank_prob = ranked_slate_output.ranked_per_seq_probs.cpu()\n\n self.reporter.log(\n eval_baseline_loss=eval_baseline_loss,\n eval_advantages=eval_advantage,\n logged_slate_rank_probs=logged_slate_rank_prob,\n ranked_slate_rank_probs=ranked_slate_rank_prob,\n )\n\n if not self.calc_cpe:\n return\n\n edp_g = EvaluationDataPage.create_from_tensors_seq2slate(\n seq2slate_net,\n self.reward_network,\n batch,\n eval_greedy=True,\n )\n\n edp_ng = EvaluationDataPage.create_from_tensors_seq2slate(\n seq2slate_net,\n self.reward_network,\n batch,\n eval_greedy=False,\n )\n\n return edp_g, edp_ng\n\n # pyre-fixme[14]: Inconsistent override\n def validation_epoch_end(\n self, outputs: Optional[List[Tuple[EvaluationDataPage, EvaluationDataPage]]]\n ):\n if self.calc_cpe:\n assert outputs is not None\n eval_data_pages_g, eval_data_pages_ng = None, None\n for edp_g, edp_ng in outputs:\n if eval_data_pages_g is None and eval_data_pages_ng is None:\n eval_data_pages_g = edp_g\n eval_data_pages_ng = edp_ng\n else:\n # pyre-fixme[16]: `Optional` has no attribute `append`\n eval_data_pages_g.append(edp_g)\n eval_data_pages_ng.append(edp_ng)\n self.reporter.log(\n eval_data_pages_g=eval_data_pages_g,\n eval_data_pages_ng=eval_data_pages_ng,\n )\n",
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nimport logging\nfrom typing import List, Optional\n\nimport reagent.core.types as rlt\nimport torch\nimport torch.nn.functional as F\nfrom reagent.core.parameters import EvaluationParameters, RLParameters\nfrom reagent.core.torch_utils import masked_softmax\nfrom reagent.evaluation.evaluation_data_page import EvaluationDataPage\nfrom reagent.evaluation.evaluator import Evaluator\nfrom reagent.optimizer import Optimizer__Union\nfrom reagent.training.reagent_lightning_module import ReAgentLightningModule\nfrom reagent.training.rl_trainer_pytorch import RLTrainerMixin\n\nlogger = logging.getLogger(__name__)\n\n\nclass DQNTrainerMixin:\n # Q-value for action that is not possible. Guaranteed to be worse than any\n # legitimate action\n ACTION_NOT_POSSIBLE_VAL = -1e9\n\n def get_max_q_values(self, q_values, possible_actions_mask):\n return self.get_max_q_values_with_target(\n q_values, q_values, possible_actions_mask\n )\n\n def get_max_q_values_with_target(\n self, q_values, q_values_target, possible_actions_mask\n ):\n \"\"\"\n Used in Q-learning update.\n\n :param q_values: PyTorch tensor with shape (batch_size, state_dim). Each row\n contains the list of Q-values for each possible action in this state.\n\n :param q_values_target: PyTorch tensor with shape (batch_size, state_dim). Each row\n contains the list of Q-values from the target network\n for each possible action in this state.\n\n :param possible_actions_mask: PyTorch tensor with shape (batch_size, action_dim).\n possible_actions[i][j] = 1 iff the agent can take action j from\n state i.\n\n Returns a tensor of maximum Q-values for every state in the batch\n and also the index of the corresponding action (which is used in\n evaluation_data_page.py, in create_from_tensors_dqn()).\n\n \"\"\"\n\n # The parametric DQN can create flattened q values so we reshape here.\n q_values = q_values.reshape(possible_actions_mask.shape)\n q_values_target = q_values_target.reshape(possible_actions_mask.shape)\n # Set q-values of impossible actions to a very large negative number.\n inverse_pna = 1 - possible_actions_mask\n impossible_action_penalty = self.ACTION_NOT_POSSIBLE_VAL * inverse_pna\n q_values = q_values + impossible_action_penalty\n q_values_target = q_values_target + impossible_action_penalty\n\n if self.double_q_learning:\n # Use indices of the max q_values from the online network to select q-values\n # from the target network. This prevents overestimation of q-values.\n # The torch.gather function selects the entry from each row that corresponds\n # to the max_index in that row.\n max_q_values, max_indicies = torch.max(q_values, dim=1, keepdim=True)\n max_q_values_target = torch.gather(q_values_target, 1, max_indicies)\n else:\n max_q_values_target, max_indicies = torch.max(\n q_values_target, dim=1, keepdim=True\n )\n\n return max_q_values_target, max_indicies\n\n\nclass DQNTrainerBaseLightning(DQNTrainerMixin, RLTrainerMixin, ReAgentLightningModule):\n def __init__(\n self,\n rl_parameters: RLParameters,\n metrics_to_score=None,\n actions: Optional[List[str]] = None,\n evaluation_parameters: Optional[EvaluationParameters] = None,\n ):\n super().__init__()\n self.rl_parameters = rl_parameters\n self.time_diff_unit_length = rl_parameters.time_diff_unit_length\n self.tensorboard_logging_freq = rl_parameters.tensorboard_logging_freq\n self.calc_cpe_in_training = (\n evaluation_parameters and evaluation_parameters.calc_cpe_in_training\n )\n assert actions is not None\n self._actions: List[str] = actions\n\n if rl_parameters.q_network_loss == \"mse\":\n self.q_network_loss = F.mse_loss\n elif rl_parameters.q_network_loss == \"huber\":\n self.q_network_loss = F.smooth_l1_loss\n else:\n raise Exception(\n \"Q-Network loss type {} not valid loss.\".format(\n rl_parameters.q_network_loss\n )\n )\n\n if metrics_to_score:\n self.metrics_to_score = metrics_to_score + [\"reward\"]\n else:\n self.metrics_to_score = [\"reward\"]\n\n def _check_input(self, training_batch: rlt.DiscreteDqnInput):\n assert isinstance(training_batch, rlt.DiscreteDqnInput)\n assert training_batch.not_terminal.dim() == training_batch.reward.dim() == 2\n assert (\n training_batch.not_terminal.shape[1] == training_batch.reward.shape[1] == 1\n )\n assert training_batch.action.dim() == training_batch.next_action.dim() == 2\n assert (\n training_batch.action.shape[1]\n == training_batch.next_action.shape[1]\n == self.num_actions\n )\n\n @property\n def num_actions(self) -> int:\n assert self._actions is not None, \"Not a discrete action DQN\"\n return len(self._actions)\n\n # pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because\n # its type `no_grad` is not callable.\n @torch.no_grad()\n def boost_rewards(\n self, rewards: torch.Tensor, actions: torch.Tensor\n ) -> torch.Tensor:\n # Apply reward boost if specified\n reward_boosts = torch.sum(\n actions.float() * self.reward_boosts,\n dim=1,\n keepdim=True,\n )\n return rewards + reward_boosts\n\n def _initialize_cpe(\n self,\n reward_network,\n q_network_cpe,\n q_network_cpe_target,\n optimizer: Optimizer__Union,\n ) -> None:\n if not self.calc_cpe_in_training:\n # pyre-fixme[16]: `DQNTrainerBase` has no attribute `reward_network`.\n self.reward_network = None\n return\n\n assert reward_network is not None, \"reward_network is required for CPE\"\n self.reward_network = reward_network\n # pyre-fixme[16]: `DQNTrainerBase` has no attribute `reward_network_optimizer`.\n self.reward_network_optimizer = optimizer\n assert (\n q_network_cpe is not None and q_network_cpe_target is not None\n ), \"q_network_cpe and q_network_cpe_target are required for CPE\"\n # pyre-fixme[16]: `DQNTrainerBase` has no attribute `q_network_cpe`.\n self.q_network_cpe = q_network_cpe\n # pyre-fixme[16]: `DQNTrainerBase` has no attribute `q_network_cpe_target`.\n self.q_network_cpe_target = q_network_cpe_target\n # pyre-fixme[16]: `DQNTrainerBase` has no attribute `q_network_cpe_optimizer`.\n self.q_network_cpe_optimizer = optimizer\n num_output_nodes = len(self.metrics_to_score) * self.num_actions\n reward_idx_offsets = torch.arange(\n 0,\n num_output_nodes,\n self.num_actions,\n dtype=torch.long,\n )\n self.register_buffer(\"reward_idx_offsets\", reward_idx_offsets)\n\n reward_stripped_metrics_to_score = (\n self.metrics_to_score[:-1] if len(self.metrics_to_score) > 1 else None\n )\n # pyre-fixme[16]: `DQNTrainerBase` has no attribute `evaluator`.\n self.evaluator = Evaluator(\n self._actions,\n self.rl_parameters.gamma,\n self,\n metrics_to_score=reward_stripped_metrics_to_score,\n )\n\n def _calculate_cpes(\n self,\n training_batch,\n states,\n next_states,\n all_action_scores,\n all_next_action_scores,\n logged_action_idxs,\n discount_tensor,\n not_done_mask,\n ):\n if not self.calc_cpe_in_training:\n return\n if training_batch.extras.metrics is None:\n metrics_reward_concat_real_vals = training_batch.reward\n else:\n metrics_reward_concat_real_vals = torch.cat(\n (training_batch.reward, training_batch.extras.metrics), dim=1\n )\n\n model_propensities_next_states = masked_softmax(\n all_next_action_scores,\n training_batch.possible_next_actions_mask\n if self.maxq_learning\n else training_batch.next_action,\n self.rl_temperature,\n )\n\n ######### Train separate reward network for CPE evaluation #############\n reward_estimates = self.reward_network(states)\n reward_estimates_for_logged_actions = reward_estimates.gather(\n 1, self.reward_idx_offsets + logged_action_idxs\n )\n reward_loss = F.mse_loss(\n reward_estimates_for_logged_actions, metrics_reward_concat_real_vals\n )\n yield reward_loss\n\n ######### Train separate q-network for CPE evaluation #############\n metric_q_values = self.q_network_cpe(states).gather(\n 1, self.reward_idx_offsets + logged_action_idxs\n )\n all_metrics_target_q_values = torch.chunk(\n self.q_network_cpe_target(next_states).detach(),\n len(self.metrics_to_score),\n dim=1,\n )\n target_metric_q_values = []\n for i, per_metric_target_q_values in enumerate(all_metrics_target_q_values):\n per_metric_next_q_values = torch.sum(\n per_metric_target_q_values * model_propensities_next_states,\n 1,\n keepdim=True,\n )\n per_metric_next_q_values = per_metric_next_q_values * not_done_mask\n per_metric_target_q_values = metrics_reward_concat_real_vals[\n :, i : i + 1\n ] + (discount_tensor * per_metric_next_q_values)\n target_metric_q_values.append(per_metric_target_q_values)\n\n target_metric_q_values = torch.cat(target_metric_q_values, dim=1)\n metric_q_value_loss = self.q_network_loss(\n metric_q_values, target_metric_q_values\n )\n\n # The model_propensities computed below are not used right now. The CPE graphs in the Outputs\n # tab use model_propensities computed in the function create_from_tensors_dqn() in evaluation_data_page.py,\n # which is called on the eval_table_sample in the gather_eval_data() function below.\n model_propensities = masked_softmax(\n all_action_scores,\n training_batch.possible_actions_mask\n if self.maxq_learning\n else training_batch.action,\n self.rl_temperature,\n )\n # Extract rewards predicted by the reward_network. The other columns will\n # give predicted values for other metrics, if such were specified.\n model_rewards = reward_estimates[\n :,\n torch.arange(\n self.reward_idx_offsets[0],\n self.reward_idx_offsets[0] + self.num_actions,\n ),\n ]\n\n self.reporter.log(\n reward_loss=reward_loss,\n model_propensities=model_propensities,\n model_rewards=model_rewards,\n )\n\n yield metric_q_value_loss\n\n def gather_eval_data(self, validation_step_outputs):\n was_on_gpu = self.on_gpu\n self.cpu()\n eval_data = None\n for edp in validation_step_outputs:\n if eval_data is None:\n eval_data = edp\n else:\n eval_data = eval_data.append(edp)\n if eval_data and eval_data.mdp_id is not None:\n eval_data = eval_data.sort()\n eval_data = eval_data.compute_values(self.gamma)\n eval_data.validate()\n if was_on_gpu:\n self.cuda()\n return eval_data\n\n def validation_step(self, batch, batch_idx):\n # HACK: Move to cpu in order to hold more batches in memory\n # This is only needed when trainers need in-memory\n # EvaluationDataPages of the full evaluation dataset\n return EvaluationDataPage.create_from_training_batch(batch, self).cpu()\n\n def validation_epoch_end(self, valid_step_outputs):\n # As explained in the comments to the validation_step function in\n # pytorch_lightning/core/lightning.py, this function is generally used as follows:\n # val_outs = []\n # for val_batch in val_data:\n # out = validation_step(val_batch)\n # val_outs.append(out)\n # validation_epoch_end(val_outs)\n\n # The input arguments of validation_epoch_end() is a list of EvaluationDataPages,\n # which matches the way it is used in gather_eval_data() above.\n\n eval_data = self.gather_eval_data(valid_step_outputs)\n if eval_data and eval_data.mdp_id is not None:\n cpe_details = self.evaluator.evaluate_post_training(eval_data)\n self.reporter.log(cpe_details=cpe_details)\n"
]
| [
[
"torch.zeros",
"torch.max",
"torch.FloatTensor",
"torch.nn.functional.mse_loss",
"torch.tensor",
"torch.zeros_like",
"torch.mean",
"torch.sum"
],
[
"torch.cat",
"torch.arange",
"torch.max",
"torch.gather",
"torch.no_grad",
"torch.nn.functional.mse_loss",
"torch.sum"
]
]
|
JoaoCarabetta/SimulaCovid | [
"36961cfe2b7fb3545e7653c499add4fb36757d7d"
]
| [
"src/models/seir.py"
]
| [
"from scipy.integrate import odeint\nimport pandas as pd\nimport numpy as np\n\n# The SIR model differential equations.\ndef SEIR(y, t, N, beta, gamma, sigma):\n\n S, E, I, R = y\n dSdt = - beta * S * I / N\n dEdt = beta * S * I / N - sigma * E\n dIdt = sigma * E - gamma * I\n dRdt = gamma * I\n \n return dSdt, dEdt, dIdt, dRdt\n\ndef entrypoint(current_state, model_parameters):\n \"\"\"\n model_parameters:\n seir:\n sick_days:\n i2_percentage: Severe infection \n i3_percentage: Critical infection \n days_from_t0: Days of simulation\n sigma: rate of progression from the exposed to infected class\n \n current_state:\n N: Poupulation\n S: Initial Susceptible individuals\n E: Initial Exposed\n I: Initial Infected\n R: Initial Recovered individuals, who have recovered from disease and are now immune\n MODEL PARAMETERS:\n * beta: rate at which infected individuals in class I contact Susceptibles and Infect them \n * gamma: rate at which infected individuals in class I Recover from disease and become immune\n * sigma: rate of progression from the exposed to infected\n\n OBS: TODO\n Exposed is being estimated given diseases growth of 33% per day.\n \"\"\"\n args = {\n 'y0': (current_state['suceptible'], # S\n current_state['exposed'], # E\n current_state['current_infected'], # I \n current_state['recovered'] # R \n ),\n 't': np.linspace(0, model_parameters['days_from_t0'], model_parameters['days_from_t0']+1),\n 'args': (current_state['population'], # N\n model_parameters['R0'] / model_parameters['sick_days'], # beta\n 1. / model_parameters['sick_days'], # gamma\n model_parameters['sigma'], # sigma\n )\n } \n\n result = odeint(SEIR, **args)\n result = pd.DataFrame(result, columns=['S' ,'E' ,'I', 'R'])\n\n result['days'] = args['t']\n result['I2'] = result['I'] * model_parameters['i2_percentage'] / 100 # severe cases\n result['I3'] = result['I'] * model_parameters['i3_percentage'] / 100 # critical cases\n result['I1'] = result['I'] - result['I2'] - result['I3'] # mild cases\n\n return result "
]
| [
[
"pandas.DataFrame",
"scipy.integrate.odeint",
"numpy.linspace"
]
]
|
nicoloabrate/serpent-tools | [
"682fa6be3155c233b5f13aa4d106fe2b1a5b1b09"
]
| [
"serpentTools/parsers/microxs.py"
]
| [
"\"\"\"Parser responsible for reading the ``mdx[i].m`` files\"\"\"\n\nfrom collections import namedtuple\n\nfrom numpy import array\n\nfrom serpentTools.engines import KeywordParser\nfrom serpentTools.utils import (\n splitValsUncs, str2vec,\n VEC_REGEX, SCALAR_REGEX, FIRST_WORD_REGEX,\n)\nfrom serpentTools.parsers.base import BaseReader\n\nfrom serpentTools.messages import SerpentToolsException\n\n\nMicroXSTuple = namedtuple(\"MicroXSTuple\", [\"zai\", \"mt\", \"metastable\"])\n\ntry:\n MicroXSTuple.__doc__ = \"\"\"Convenient indexer for microscopic cross sections\n\nUsing attributes is recommended over positions, although both are\nidentical. The former is more likely to be consistent across future\nversions.\n\nAttributes\n----------\nzai : int\n Isotope ZZAAAI identifier, e.g. ``922350``\nmt : int\n Reaction MT, e.g. ``18``\nmetastable : int\n 0 if reaction results in a metastable isotope, 1 otherwise\n\nExample\n-------\n\n>>> mx = MicroXSTuple(922380, 18, 0)\n>>> mx.zai\n922390\n>>> mx.mt == mx[1]\nTrue\n\n\"\"\"\nexcept AttributeError:\n # can't set namedtuple docs for PY2\n pass\n\n\nclass MicroXSReader(BaseReader):\n \"\"\"\n Parser responsible for reading and working with micro-xs (mdx) files.\n\n Parameters\n ----------\n filePath : str\n path to the ``*mdx[n].m`` file\n\n Attributes\n ------------\n nfy : dict\n Nested dictionary with tuples (parent, energy) as keys.\n Parent is the isotope undergoing fission, e.g. 922350\n and energy is the impending neutron energy causing fission\n in MeV.\n The values are nested dictionaries with the following structure::\n\n \"fissProd\": list of fission product ZAI ids, e.g. [541350, 551350, ...]\n \"indYield\": list of independent yields\n \"cumYield\": list of cumulative yields\n\n fluxRatio : dict\n Dictionary with universes id as keys and the\n corresponding group-wise flux values.\n e.g., ``fluxRatio['0'] = [9.91938E+14, 1.81729E+15]``\n fluxUnc : dict\n Dictionary with universes id as keys and the\n corresponding group-wise flux uncertainty values.\n e.g., ``fluxRatio['0'] = [0.00023, 0.00042]``\n xsVal : dict\n Expected value on microscopic cross sections, sorted by\n universe then by isotope, reaction, and metastable flag.\n Nested dictionary with universes as keys, e.g. '0'.\n The values are nested dictionary with :class:`MicroXSTuple`\n as keys (isotope, reaction, flag) and group xs as values.\n e.g., ``(922350, 18, 0)``\n xsUnc : dict\n Uncertainties on microscopic cross sections, sorted by\n universe then by isotope, reaction, and metastable flag.\n Nested dictionary with universes as keys, e.g. '0'.\n The values are nested dictionary with :class:`MicroXSTuple`\n as keys (isotope, reaction, flag) and group xs as values.\n e.g., ``(922350, 18, 0)``\n\n Raises\n ------\n SerpentToolsException\n No results exist in the file, or no results are collected\n\n \"\"\"\n\n def __init__(self, filePath):\n BaseReader.__init__(self, filePath, 'microxs')\n self._energyFY = []\n self.nfy = {}\n self.xsVal, self.xsUnc = {}, {}\n self.fluxRatio, self.fluxUnc = {}, {}\n\n def _read(self):\n \"\"\"read through the results file and store requested data.\"\"\"\n keys = ['NFY', 'XS', 'FLUX']\n separators = ['\\n', '];', '\\r\\n']\n with KeywordParser(self.filePath, keys, separators) as parser:\n for chunk in parser.yieldChunks():\n if 'NFY' in chunk[0] and self.settings['getFY']:\n self._storeFissionYields(chunk)\n elif 'FLUX' in chunk[0] and self.settings['getFlx']:\n self._storeFluxRatio(chunk)\n elif 'XS' in chunk[0] and self.settings['getXS']:\n self._storeMicroXS(chunk)\n\n def _storeFissionYields(self, chunk):\n \"\"\"store fission yields data\"\"\"\n fissProd, indYield, cumYield = [], [], []\n currVar = FIRST_WORD_REGEX.search(chunk[0]).group() # NFY_902270_1\n # Obtain the parent ID: AAZZZ0/1, e.g., 922350\n parentFY = int(str2vec(currVar.split('_')[-2]))\n if 'E' in currVar.split('_')[-1]: # e.g., NFY_902270_1E\n sclVal = SCALAR_REGEX.search(chunk[0])\n # energy must be stored on the reader\n self._energyFY = float(str2vec(\n chunk[0][sclVal.span()[0] + 1:sclVal.span()[1] - 2]))\n return # thermal/epi/fast\n for tline in chunk:\n if '[' in tline or ']' in tline:\n continue\n tline = tline[:tline.find('%')]\n if len(tline.split()) == 3:\n val1, val2, val3 = str2vec(tline, out=list)\n fissProd.append(val1)\n indYield.append(val2)\n cumYield.append(val3)\n self.nfy[(parentFY, self._energyFY)] = {'fissProd': array(fissProd),\n 'indYield': array(indYield),\n 'cumYield': array(cumYield)}\n\n def _storeFluxRatio(self, chunk):\n \"\"\"store flux ratios\"\"\"\n chunk0 = chunk[0]\n currVar = FIRST_WORD_REGEX.search(chunk0).group()\n # obtain the universe id\n univ = currVar.split('_')[-1]\n search = VEC_REGEX.search(chunk0) # group flux values\n vals = str2vec(chunk0[search.span()[0] + 1:search.span()[1] - 2])\n self.fluxRatio[univ], self.fluxUnc[univ] = splitValsUncs(vals)\n\n def _storeMicroXS(self, chunk):\n \"\"\"store micro cross-section and uncertainty values\"\"\"\n currXS, currUnc = {}, {}\n currVar = FIRST_WORD_REGEX.search(chunk[0]).group()\n # obtain the universe id\n univ = currVar.split('_')[-1]\n for tline in chunk:\n if '[' in tline or ']' in tline:\n continue\n if '%' in tline:\n tline = tline[:tline.index('%')]\n if len(tline.split()) > 3:\n values = str2vec(tline)\n # isotope, reaction type and isomeric state\n reactionKey = MicroXSTuple(*(int(x) for x in values[:3]))\n currXS[reactionKey], currUnc[reactionKey] = splitValsUncs(\n values[3:])\n self.xsVal[univ] = currXS\n self.xsUnc[univ] = currUnc\n\n def _precheck(self):\n \"\"\"do a quick scan to ensure this looks like mdx file.\"\"\"\n with open(self.filePath) as fid:\n if fid is None:\n raise IOError(\"Attempting to read on a closed file.\\n\"\n \"Parser: {}\\nFile: {}\".format(self,\n self.filePath))\n for tline in fid:\n if 'NFY' in tline:\n return\n raise SerpentToolsException(\"Fission yields values were not \"\n \"found in {}\".format(self.filePath))\n\n def _postcheck(self):\n \"\"\"ensure the parser grabbed expected results.\"\"\"\n if not self.xsVal and not self.fluxRatio and not self.nfy:\n raise SerpentToolsException(\"xs, fluxes and fission yields are all\"\n \" empty in {}\".format(self.filePath))\n\n def getFY(self, parent, energy, daughter, flagEnergy=True):\n \"\"\"\n Return a specific fission yield given the parent ID, neutron energy\n and daughter ID\n\n If the energy does not exist in the results, the fission yield\n corresponding to the closest energy is returned\n\n Parameters\n ----------\n parent: int or float\n ID of the parent undergoing fission\n energy: float\n neutron energy in MeV\n daughter: int or float\n ID of the fission product\n flagEnergy: boolean\n If set to true, the function will return the fission yield\n that matches to the closest energy given by the user\n\n Returns\n -------\n indYield: float\n Independent fission yield\n cumYield: float\n Cumulative fission yield\n\n Raises\n ------\n SerpentToolsException:\n If energy is a negative number\n If parent or fission product are not found\n \"\"\"\n if energy < 0:\n raise SerpentToolsException(\"Energy entry {0} must be positive\"\n \" in {1}\".format(energy,\n self.filePath))\n if flagEnergy:\n # obtain the different energies for a specific parent\n eneList = [items[1] for items in self.nfy.keys() if\n parent == items[0]]\n # obtain the closest energy value\n energy = min(eneList,\n key=lambda x: abs(x - energy)) if eneList else energy\n if (parent, energy) not in self.nfy.keys():\n raise SerpentToolsException(\"There is no parent {0} with energy \"\n \"{1} in {2}\".format(parent, energy,\n self.filePath))\n FP = self.nfy[(parent, energy)]['fissProd']\n IndYield = self.nfy[(parent, energy)]['indYield']\n CumYield = self.nfy[(parent, energy)]['cumYield']\n if daughter in FP:\n return (float(IndYield[FP == daughter]),\n float(CumYield[FP == daughter]))\n raise SerpentToolsException(\n \"There is no fission product {0} for parent {1} at energy {2} in \"\n \"{3}\".format(daughter, parent, energy, self.filePath))\n\n def getXS(self, universe, isotope, reaction, isomeric=0):\n \"\"\"\n Return the group-wise micro cross-sections for a\n specific isotope, and reaction\n\n Parameters\n ----------\n universe: string\n universe ID, e.g., `0`\n isotope: int or float\n ID of the isotope (ZZAAA0/1)\n reaction: int\n MT reaction, e.g., 102 --> (n,gamma)\n Special flag: int or float\n Isomeric state or fission yield distribution number\n Default is zero\n\n Returns\n -------\n xsVal: numpy.ndarray\n Group-wise cross-section values\n xsUnc: numpy.ndarray\n Group-wise uncertainty values\n\n Raises\n ------\n SerpentToolsException:\n If the universe does not exist\n If the isotope's format is incorrect (not ZZAAA0/1)\n \"\"\"\n if universe not in self.xsVal.keys():\n raise SerpentToolsException(\"Universe {0} does not exist in {1}\"\n .format(universe, self.filePath))\n if isotope < 10010 or isotope > 1000000:\n raise SerpentToolsException(\"Isotope {0} is not properly formatted\"\n \" ZZAAA0/1 in {1}\"\n .format(isotope, self.filePath))\n key = (isotope, reaction, isomeric)\n if key in self.xsVal[universe]:\n return (self.xsVal[universe][key],\n self.xsUnc[universe][key])\n raise SerpentToolsException(\"There is no isotope {0} with reaction {1}\"\n \" and isomeric flag {2} in {3}\"\n .format(isotope, reaction, isomeric,\n self.filePath))\n"
]
| [
[
"numpy.array"
]
]
|
aimagelab/camel | [
"67cb06210165b7953248ef38c36fc51d7240fb5e"
]
| [
"models/transformer/attention.py"
]
| [
"import numpy as np\nimport torch\nfrom torch import nn\nfrom models.containers import Module\n\n\nclass ScaledDotProductAttention(nn.Module):\n \"\"\"\n Scaled dot-product attention\n \"\"\"\n\n def __init__(self, d_model, d_k, d_v, h):\n '''\n :param d_model: Output dimensionality of the model\n :param d_k: Dimensionality of queries and keys\n :param d_v: Dimensionality of values\n :param h: Number of heads\n '''\n super(ScaledDotProductAttention, self).__init__()\n self.fc_q = nn.Linear(d_model, h * d_k)\n self.fc_k = nn.Linear(d_model, h * d_k)\n self.fc_v = nn.Linear(d_model, h * d_v)\n self.fc_o = nn.Linear(h * d_v, d_model)\n\n self.d_model = d_model\n self.d_k = d_k\n self.d_v = d_v\n self.h = h\n\n self.init_weights()\n\n def init_weights(self):\n nn.init.xavier_uniform_(self.fc_q.weight)\n nn.init.xavier_uniform_(self.fc_k.weight)\n nn.init.xavier_uniform_(self.fc_v.weight)\n nn.init.xavier_uniform_(self.fc_o.weight)\n nn.init.constant_(self.fc_q.bias, 0)\n nn.init.constant_(self.fc_k.bias, 0)\n nn.init.constant_(self.fc_v.bias, 0)\n nn.init.constant_(self.fc_o.bias, 0)\n\n def forward(self, queries, keys, values, attention_mask=None, attention_weights=None):\n \"\"\"\n Computes\n :param queries: Queries (b_s, nq, d_model)\n :param keys: Keys (b_s, nk, d_model)\n :param values: Values (b_s, nk, d_model)\n :param attention_mask: Mask over attention values (b_s, h, nq, nk). True indicates masking.\n :param attention_weights: Multiplicative weights for attention values (b_s, h, nq, nk).\n :return:\n \"\"\"\n b_s, nq = queries.shape[:2]\n nk = keys.shape[1]\n q = self.fc_q(queries).view(b_s, nq, self.h, self.d_k).permute(0, 2, 1, 3) # (b_s, h, nq, d_k)\n k = self.fc_k(keys).view(b_s, nk, self.h, self.d_k).permute(0, 2, 3, 1) # (b_s, h, d_k, nk)\n v = self.fc_v(values).view(b_s, nk, self.h, self.d_v).permute(0, 2, 1, 3) # (b_s, h, nk, d_v)\n\n att = torch.matmul(q, k) / np.sqrt(self.d_k) # (b_s, h, nq, nk)\n if attention_weights is not None:\n att = att * attention_weights\n if attention_mask is not None:\n att = att.masked_fill(attention_mask, -np.inf)\n att = torch.softmax(att, -1)\n out = torch.matmul(att, v).permute(0, 2, 1, 3).contiguous().view(b_s, nq, self.h * self.d_v) # (b_s, nq, h*d_v)\n out = self.fc_o(out) # (b_s, nq, d_model)\n return out\n\n\nclass ScaledDotProductAttentionMemory(nn.Module):\n \"\"\"\n Scaled dot-product attention with memory\n \"\"\"\n\n def __init__(self, d_model, d_k, d_v, h, m):\n \"\"\"\n :param d_model: Output dimensionality of the model\n :param d_k: Dimensionality of queries and keys\n :param d_v: Dimensionality of values\n :param h: Number of heads\n :param m: Number of memory slots\n \"\"\"\n super(ScaledDotProductAttentionMemory, self).__init__()\n self.fc_q = nn.Linear(d_model, h * d_k)\n self.fc_k = nn.Linear(d_model, h * d_k)\n self.fc_v = nn.Linear(d_model, h * d_v)\n self.fc_o = nn.Linear(h * d_v, d_model)\n self.d_model = d_model\n self.d_k = d_k\n self.d_v = d_v\n self.h = h\n self.m = m\n \n if self.m > 0:\n self.m_k = nn.Parameter(torch.FloatTensor(1, m, h * d_k))\n self.m_v = nn.Parameter(torch.FloatTensor(1, m, h * d_v))\n\n self.init_weights()\n\n def init_weights(self):\n nn.init.xavier_uniform_(self.fc_q.weight)\n nn.init.xavier_uniform_(self.fc_k.weight)\n nn.init.xavier_uniform_(self.fc_v.weight)\n nn.init.xavier_uniform_(self.fc_o.weight)\n nn.init.constant_(self.fc_q.bias, 0)\n nn.init.constant_(self.fc_k.bias, 0)\n nn.init.constant_(self.fc_v.bias, 0)\n nn.init.constant_(self.fc_o.bias, 0)\n\n if self.m > 0:\n nn.init.normal_(self.m_k, 0, 1 / self.d_k)\n nn.init.normal_(self.m_v, 0, 1 / self.m)\n\n def forward(self, queries, keys, values, attention_mask=None, attention_weights=None):\n \"\"\"\n Computes\n :param queries: Queries (b_s, nq, d_model)\n :param keys: Keys (b_s, nk, d_model)\n :param values: Values (b_s, nk, d_model)\n :param attention_mask: Mask over attention values (b_s, h, nq, nk). True indicates masking.\n :param attention_weights: Multiplicative weights for attention values (b_s, h, nq, nk).\n :return:\n \"\"\"\n b_s, nq = queries.shape[:2]\n nk = keys.shape[1]\n\n q = self.fc_q(queries).view(b_s, nq, self.h, self.d_k).permute(0, 2, 1, 3) # (b_s, h, nq, d_k)\n\n if self.m > 0:\n m_k = np.sqrt(self.d_k) * self.m_k.expand(b_s, self.m, self.h * self.d_k)\n m_v = np.sqrt(self.m) * self.m_v.expand(b_s, self.m, self.h * self.d_v)\n k = torch.cat([self.fc_k(keys), m_k], 1)\n v = torch.cat([self.fc_v(values), m_v], 1)\n else:\n k = self.fc_k(keys)\n v = self.fc_v(values)\n\n k = k.view(b_s, nk + self.m, self.h, self.d_k).permute(0, 2, 3, 1) # (b_s, h, d_k, nk)\n v = v.view(b_s, nk + self.m, self.h, self.d_v).permute(0, 2, 1, 3) # (b_s, h, nk, d_v)\n\n att = torch.matmul(q, k) / np.sqrt(self.d_k) # (b_s, h, nq, nk)\n if attention_weights is not None:\n att = torch.cat([att[:, :, :, :nk] * attention_weights, att[:, :, :, nk:]], -1)\n if attention_mask is not None:\n att[:, :, :, :nk] = att[:, :, :, :nk].masked_fill(attention_mask, -np.inf)\n att = torch.softmax(att, -1)\n out = torch.matmul(att, v).permute(0, 2, 1, 3).contiguous().view(b_s, nq, self.h * self.d_v) # (b_s, nq, h*d_v)\n out = self.fc_o(out) # (b_s, nq, d_model)\n return out\n\n\nclass MultiHeadAttention(Module):\n \"\"\"\n Multi-head attention layer with Dropout and Layer Normalization.\n \"\"\"\n\n def __init__(self, d_model, d_k, d_v, h, dropout=.1, identity_map_reordering=False, can_be_stateful=False,\n attention_module=None, attention_module_kwargs=None):\n super(MultiHeadAttention, self).__init__()\n self.identity_map_reordering = identity_map_reordering\n if attention_module is not None:\n if attention_module_kwargs is not None:\n self.attention = attention_module(d_model=d_model, d_k=d_k, d_v=d_v, h=h, **attention_module_kwargs)\n else:\n self.attention = attention_module(d_model=d_model, d_k=d_k, d_v=d_v, h=h)\n else:\n self.attention = ScaledDotProductAttention(d_model=d_model, d_k=d_k, d_v=d_v, h=h)\n self.dropout = nn.Dropout(p=dropout)\n self.layer_norm = nn.LayerNorm(d_model)\n\n self.can_be_stateful = can_be_stateful\n if self.can_be_stateful:\n self.register_state('running_keys', None)\n self.register_state('running_values', None)\n\n def forward(self, queries, keys, values, attention_mask=None, attention_weights=None):\n if self.can_be_stateful and self._is_stateful:\n if self.running_keys is None:\n self.running_keys = keys\n self.running_values = values\n else:\n self.running_keys = torch.cat([self.running_keys, keys], 1)\n self.running_values = torch.cat([self.running_values, values], 1)\n keys = self.running_keys\n values = self.running_values\n\n if self.identity_map_reordering:\n q_norm = self.layer_norm(queries)\n k_norm = self.layer_norm(keys)\n v_norm = self.layer_norm(values)\n out = self.attention(q_norm, k_norm, v_norm, attention_mask, attention_weights)\n out = queries + self.dropout(torch.relu(out))\n else:\n out = self.attention(queries, keys, values, attention_mask, attention_weights)\n out = self.dropout(out)\n out = self.layer_norm(queries + out)\n return out\n"
]
| [
[
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"torch.cat",
"torch.relu",
"torch.nn.init.constant_",
"torch.FloatTensor",
"torch.nn.init.xavier_uniform_",
"torch.softmax",
"torch.nn.init.normal_",
"numpy.sqrt",
"torch.matmul"
]
]
|
haofanwang/cleverhans | [
"1c92191f076d90b9dd0b4e072f819d855ccc9109"
]
| [
"cleverhans/attacks/elastic_net_method.py"
]
| [
"\"\"\"The ElasticNetMethod attack.\n\"\"\"\n\nimport logging\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom cleverhans.attacks.attack import Attack\nfrom cleverhans.compat import reduce_sum, reduce_max\nfrom cleverhans.model import Model, CallableModelWrapper, wrapper_warning_logits\nfrom cleverhans import utils\n\nnp_dtype = np.dtype('float32')\ntf_dtype = tf.as_dtype('float32')\n\n_logger = utils.create_logger(\"cleverhans.attacks.elastic_net_method\")\n_logger.setLevel(logging.INFO)\n\n\ndef ZERO():\n return np.asarray(0., dtype=np_dtype)\n\n\nclass ElasticNetMethod(Attack):\n \"\"\"\n This attack features L1-oriented adversarial examples and includes\n the C&W L2 attack as a special case (when beta is set to 0).\n Adversarial examples attain similar performance to those\n generated by the C&W L2 attack in the white-box case,\n and more importantly, have improved transferability properties\n and complement adversarial training.\n Paper link: https://arxiv.org/abs/1709.04114\n\n :param model: cleverhans.model.Model\n :param sess: tf.Session\n :param dtypestr: dtype of the data\n :param kwargs: passed through to super constructor\n \"\"\"\n\n def __init__(self, model, sess, dtypestr='float32', **kwargs):\n \"\"\"\n Note: the model parameter should be an instance of the\n cleverhans.model.Model abstraction provided by CleverHans.\n \"\"\"\n if not isinstance(model, Model):\n wrapper_warning_logits()\n model = CallableModelWrapper(model, 'logits')\n\n super(ElasticNetMethod, self).__init__(model, sess, dtypestr, **kwargs)\n\n self.feedable_kwargs = ('y', 'y_target')\n\n self.structural_kwargs = [\n 'beta', 'decision_rule', 'batch_size', 'confidence',\n 'targeted', 'learning_rate', 'binary_search_steps',\n 'max_iterations', 'abort_early', 'initial_const', 'clip_min',\n 'clip_max'\n ]\n\n def generate(self, x, **kwargs):\n \"\"\"\n Return a tensor that constructs adversarial examples for the given\n input. Generate uses tf.py_func in order to operate over tensors.\n\n :param x: (required) A tensor with the inputs.\n :param kwargs: See `parse_params`\n \"\"\"\n assert self.sess is not None, \\\n 'Cannot use `generate` when no `sess` was provided'\n self.parse_params(**kwargs)\n\n labels, nb_classes = self.get_or_guess_labels(x, kwargs)\n\n attack = EAD(self.sess, self.model, self.beta,\n self.decision_rule, self.batch_size, self.confidence,\n 'y_target' in kwargs, self.learning_rate,\n self.binary_search_steps, self.max_iterations,\n self.abort_early, self.initial_const, self.clip_min,\n self.clip_max, nb_classes,\n x.get_shape().as_list()[1:])\n\n def ead_wrap(x_val, y_val):\n return np.array(attack.attack(x_val, y_val), dtype=self.np_dtype)\n\n wrap = tf.py_func(ead_wrap, [x, labels], self.tf_dtype)\n wrap.set_shape(x.get_shape())\n\n return wrap\n\n def parse_params(self,\n y=None,\n y_target=None,\n beta=1e-2,\n decision_rule='EN',\n batch_size=1,\n confidence=0,\n learning_rate=1e-2,\n binary_search_steps=9,\n max_iterations=1000,\n abort_early=False,\n initial_const=1e-3,\n clip_min=0,\n clip_max=1):\n \"\"\"\n :param y: (optional) A tensor with the true labels for an untargeted\n attack. If None (and y_target is None) then use the\n original labels the classifier assigns.\n :param y_target: (optional) A tensor with the target labels for a\n targeted attack.\n :param beta: Trades off L2 distortion with L1 distortion: higher\n produces examples with lower L1 distortion, at the\n cost of higher L2 (and typically Linf) distortion\n :param decision_rule: EN or L1. Select final adversarial example from\n all successful examples based on the least\n elastic-net or L1 distortion criterion.\n :param confidence: Confidence of adversarial examples: higher produces\n examples with larger l2 distortion, but more\n strongly classified as adversarial.\n :param batch_size: Number of attacks to run simultaneously.\n :param learning_rate: The learning rate for the attack algorithm.\n Smaller values produce better results but are\n slower to converge.\n :param binary_search_steps: The number of times we perform binary\n search to find the optimal tradeoff-\n constant between norm of the perturbation\n and confidence of the classification. Set\n 'initial_const' to a large value and fix\n this param to 1 for speed.\n :param max_iterations: The maximum number of iterations. Setting this\n to a larger value will produce lower distortion\n results. Using only a few iterations requires\n a larger learning rate, and will produce larger\n distortion results.\n :param abort_early: If true, allows early abort when the total\n loss starts to increase (greatly speeds up attack,\n but hurts performance, particularly on ImageNet)\n :param initial_const: The initial tradeoff-constant to use to tune the\n relative importance of size of the perturbation\n and confidence of classification.\n If binary_search_steps is large, the initial\n constant is not important. A smaller value of\n this constant gives lower distortion results.\n For computational efficiency, fix\n binary_search_steps to 1 and set this param\n to a large value.\n :param clip_min: (optional float) Minimum input component value\n :param clip_max: (optional float) Maximum input component value\n \"\"\"\n\n # ignore the y and y_target argument\n self.beta = beta\n self.decision_rule = decision_rule\n self.batch_size = batch_size\n self.confidence = confidence\n self.learning_rate = learning_rate\n self.binary_search_steps = binary_search_steps\n self.max_iterations = max_iterations\n self.abort_early = abort_early\n self.initial_const = initial_const\n self.clip_min = clip_min\n self.clip_max = clip_max\n\n\nclass EAD(object):\n def __init__(self, sess, model, beta, decision_rule, batch_size,\n confidence, targeted, learning_rate, binary_search_steps,\n max_iterations, abort_early, initial_const, clip_min,\n clip_max, num_labels, shape):\n \"\"\"\n EAD Attack\n\n Return a tensor that constructs adversarial examples for the given\n input. Generate uses tf.py_func in order to operate over tensors.\n\n :param sess: a TF session.\n :param model: a cleverhans.model.Model object.\n :param beta: Trades off L2 distortion with L1 distortion: higher\n produces examples with lower L1 distortion, at the\n cost of higher L2 (and typically Linf) distortion\n :param decision_rule: EN or L1. Select final adversarial example from\n all successful examples based on the least\n elastic-net or L1 distortion criterion.\n :param batch_size: Number of attacks to run simultaneously.\n :param confidence: Confidence of adversarial examples: higher produces\n examples with larger l2 distortion, but more\n strongly classified as adversarial.\n :param targeted: boolean controlling the behavior of the adversarial\n examples produced. If set to False, they will be\n misclassified in any wrong class. If set to True,\n they will be misclassified in a chosen target class.\n :param learning_rate: The learning rate for the attack algorithm.\n Smaller values produce better results but are\n slower to converge.\n :param binary_search_steps: The number of times we perform binary\n search to find the optimal tradeoff-\n constant between norm of the perturbation\n and confidence of the classification. Set\n 'initial_const' to a large value and fix\n this param to 1 for speed.\n :param max_iterations: The maximum number of iterations. Setting this\n to a larger value will produce lower distortion\n results. Using only a few iterations requires\n a larger learning rate, and will produce larger\n distortion results.\n :param abort_early: If true, allows early abort when the total\n loss starts to increase (greatly speeds up attack,\n but hurts performance, particularly on ImageNet)\n :param initial_const: The initial tradeoff-constant to use to tune the\n relative importance of size of the perturbation\n and confidence of classification.\n If binary_search_steps is large, the initial\n constant is not important. A smaller value of\n this constant gives lower distortion results.\n For computational efficiency, fix\n binary_search_steps to 1 and set this param\n to a large value.\n :param clip_min: (optional float) Minimum input component value.\n :param clip_max: (optional float) Maximum input component value.\n :param num_labels: the number of classes in the model's output.\n :param shape: the shape of the model's input tensor.\n \"\"\"\n\n self.sess = sess\n self.TARGETED = targeted\n self.LEARNING_RATE = learning_rate\n self.MAX_ITERATIONS = max_iterations\n self.BINARY_SEARCH_STEPS = binary_search_steps\n self.ABORT_EARLY = abort_early\n self.CONFIDENCE = confidence\n self.initial_const = initial_const\n self.batch_size = batch_size\n self.clip_min = clip_min\n self.clip_max = clip_max\n self.model = model\n self.decision_rule = decision_rule\n\n self.beta = beta\n self.beta_t = tf.cast(self.beta, tf_dtype)\n\n self.repeat = binary_search_steps >= 10\n\n self.shape = shape = tuple([batch_size] + list(shape))\n\n # these are variables to be more efficient in sending data to tf\n self.timg = tf.Variable(np.zeros(shape), dtype=tf_dtype, name='timg')\n self.newimg = tf.Variable(\n np.zeros(shape), dtype=tf_dtype, name='newimg')\n self.slack = tf.Variable(\n np.zeros(shape), dtype=tf_dtype, name='slack')\n self.tlab = tf.Variable(\n np.zeros((batch_size, num_labels)), dtype=tf_dtype, name='tlab')\n self.const = tf.Variable(\n np.zeros(batch_size), dtype=tf_dtype, name='const')\n\n # and here's what we use to assign them\n self.assign_timg = tf.placeholder(tf_dtype, shape, name='assign_timg')\n self.assign_newimg = tf.placeholder(\n tf_dtype, shape, name='assign_newimg')\n self.assign_slack = tf.placeholder(\n tf_dtype, shape, name='assign_slack')\n self.assign_tlab = tf.placeholder(\n tf_dtype, (batch_size, num_labels), name='assign_tlab')\n self.assign_const = tf.placeholder(\n tf_dtype, [batch_size], name='assign_const')\n\n self.global_step = tf.Variable(0, trainable=False)\n self.global_step_t = tf.cast(self.global_step, tf_dtype)\n\n # Fast Iterative Shrinkage Thresholding\n self.zt = tf.divide(self.global_step_t,\n self.global_step_t + tf.cast(3, tf_dtype))\n cond1 = tf.cast(tf.greater(tf.subtract(self.slack, self.timg),\n self.beta_t), tf_dtype)\n cond2 = tf.cast(tf.less_equal(tf.abs(tf.subtract(self.slack,\n self.timg)),\n self.beta_t), tf_dtype)\n cond3 = tf.cast(tf.less(tf.subtract(self.slack, self.timg),\n tf.negative(self.beta_t)), tf_dtype)\n\n upper = tf.minimum(tf.subtract(self.slack, self.beta_t),\n tf.cast(self.clip_max, tf_dtype))\n lower = tf.maximum(tf.add(self.slack, self.beta_t),\n tf.cast(self.clip_min, tf_dtype))\n\n self.assign_newimg = tf.multiply(cond1, upper)\n self.assign_newimg += tf.multiply(cond2, self.timg)\n self.assign_newimg += tf.multiply(cond3, lower)\n\n self.assign_slack = self.assign_newimg\n self.assign_slack += tf.multiply(self.zt,\n self.assign_newimg - self.newimg)\n\n # --------------------------------\n self.setter = tf.assign(self.newimg, self.assign_newimg)\n self.setter_y = tf.assign(self.slack, self.assign_slack)\n\n # prediction BEFORE-SOFTMAX of the model\n self.output = model.get_logits(self.newimg)\n self.output_y = model.get_logits(self.slack)\n\n # distance to the input data\n self.l2dist = reduce_sum(tf.square(self.newimg-self.timg),\n list(range(1, len(shape))))\n self.l2dist_y = reduce_sum(tf.square(self.slack-self.timg),\n list(range(1, len(shape))))\n self.l1dist = reduce_sum(tf.abs(self.newimg-self.timg),\n list(range(1, len(shape))))\n self.l1dist_y = reduce_sum(tf.abs(self.slack-self.timg),\n list(range(1, len(shape))))\n self.elasticdist = self.l2dist + tf.multiply(self.l1dist,\n self.beta_t)\n self.elasticdist_y = self.l2dist_y + tf.multiply(self.l1dist_y,\n self.beta_t)\n if self.decision_rule == 'EN':\n self.crit = self.elasticdist\n self.crit_p = 'Elastic'\n else:\n self.crit = self.l1dist\n self.crit_p = 'L1'\n\n # compute the probability of the label class versus the maximum other\n real = reduce_sum((self.tlab) * self.output, 1)\n real_y = reduce_sum((self.tlab) * self.output_y, 1)\n other = reduce_max((1 - self.tlab) * self.output -\n (self.tlab * 10000), 1)\n other_y = reduce_max((1 - self.tlab) * self.output_y -\n (self.tlab * 10000), 1)\n\n if self.TARGETED:\n # if targeted, optimize for making the other class most likely\n loss1 = tf.maximum(ZERO(), other - real + self.CONFIDENCE)\n loss1_y = tf.maximum(ZERO(), other_y - real_y + self.CONFIDENCE)\n else:\n # if untargeted, optimize for making this class least likely.\n loss1 = tf.maximum(ZERO(), real - other + self.CONFIDENCE)\n loss1_y = tf.maximum(ZERO(), real_y - other_y + self.CONFIDENCE)\n\n # sum up the losses\n self.loss21 = reduce_sum(self.l1dist)\n self.loss21_y = reduce_sum(self.l1dist_y)\n self.loss2 = reduce_sum(self.l2dist)\n self.loss2_y = reduce_sum(self.l2dist_y)\n self.loss1 = reduce_sum(self.const * loss1)\n self.loss1_y = reduce_sum(self.const * loss1_y)\n self.loss_opt = self.loss1_y + self.loss2_y\n self.loss = self.loss1+self.loss2+tf.multiply(self.beta_t, self.loss21)\n\n self.learning_rate = tf.train.polynomial_decay(\n self.LEARNING_RATE,\n self.global_step,\n self.MAX_ITERATIONS,\n 0,\n power=0.5)\n\n # Setup the optimizer and keep track of variables we're creating\n start_vars = set(x.name for x in tf.global_variables())\n optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)\n self.train = optimizer.minimize(self.loss_opt,\n var_list=[self.slack],\n global_step=self.global_step)\n end_vars = tf.global_variables()\n new_vars = [x for x in end_vars if x.name not in start_vars]\n\n # these are the variables to initialize when we run\n self.setup = []\n self.setup.append(self.timg.assign(self.assign_timg))\n self.setup.append(self.tlab.assign(self.assign_tlab))\n self.setup.append(self.const.assign(self.assign_const))\n\n var_list = [self.global_step]+[self.slack]+[self.newimg]+new_vars\n self.init = tf.variables_initializer(var_list=var_list)\n\n def attack(self, imgs, targets):\n \"\"\"\n Perform the EAD attack on the given instance for the given targets.\n\n If self.targeted is true, then the targets represents the target labels\n If self.targeted is false, then targets are the original class labels\n \"\"\"\n\n batch_size = self.batch_size\n r = []\n for i in range(0, len(imgs) // batch_size):\n _logger.debug(\n (\"Running EAD attack on instance %s of %s\",\n i * batch_size, len(imgs)))\n r.extend(\n self.attack_batch(\n imgs[i * batch_size:(i + 1) * batch_size],\n targets[i * batch_size:(i + 1) * batch_size]))\n if len(imgs) % batch_size != 0:\n last_elements = len(imgs) - (len(imgs) % batch_size)\n _logger.debug(\n (\"Running EAD attack on instance %s of %s\",\n last_elements, len(imgs)))\n temp_imgs = np.zeros((batch_size, ) + imgs.shape[2:])\n temp_targets = np.zeros((batch_size, ) + targets.shape[2:])\n temp_imgs[:(len(imgs) % batch_size)] = imgs[last_elements:]\n temp_targets[:(len(imgs) % batch_size)] = targets[last_elements:]\n temp_data = self.attack_batch(temp_imgs, temp_targets)\n r.extend(temp_data[:(len(imgs) % batch_size)],\n targets[last_elements:])\n return np.array(r)\n\n def attack_batch(self, imgs, labs):\n \"\"\"\n Run the attack on a batch of instance and labels.\n \"\"\"\n\n def compare(x, y):\n if not isinstance(x, (float, int, np.int64)):\n x = np.copy(x)\n if self.TARGETED:\n x[y] -= self.CONFIDENCE\n else:\n x[y] += self.CONFIDENCE\n x = np.argmax(x)\n if self.TARGETED:\n return x == y\n else:\n return x != y\n\n batch_size = self.batch_size\n\n imgs = np.clip(imgs, self.clip_min, self.clip_max)\n\n # set the lower and upper bounds accordingly\n lower_bound = np.zeros(batch_size)\n CONST = np.ones(batch_size) * self.initial_const\n upper_bound = np.ones(batch_size) * 1e10\n\n # placeholders for the best en, score, and instance attack found so far\n o_bestdst = [1e10] * batch_size\n o_bestscore = [-1] * batch_size\n o_bestattack = np.copy(imgs)\n\n for outer_step in range(self.BINARY_SEARCH_STEPS):\n # completely reset the optimizer's internal state.\n self.sess.run(self.init)\n batch = imgs[:batch_size]\n batchlab = labs[:batch_size]\n\n bestdst = [1e10] * batch_size\n bestscore = [-1] * batch_size\n _logger.debug(\" Binary search step %s of %s\",\n outer_step, self.BINARY_SEARCH_STEPS)\n\n # The last iteration (if we run many steps) repeat the search once.\n if self.repeat and outer_step == self.BINARY_SEARCH_STEPS - 1:\n CONST = upper_bound\n\n # set the variables so that we don't have to send them over again\n self.sess.run(\n self.setup, {\n self.assign_timg: batch,\n self.assign_tlab: batchlab,\n self.assign_const: CONST\n })\n self.sess.run(self.setter, {self.assign_newimg: batch})\n self.sess.run(self.setter_y, {self.assign_slack: batch})\n prev = 1e6\n for iteration in range(self.MAX_ITERATIONS):\n # perform the attack\n self.sess.run([self.train])\n self.sess.run([self.setter, self.setter_y])\n l, l2s, l1s, crit, scores, nimg = self.sess.run([self.loss,\n self.l2dist,\n self.l1dist,\n self.crit,\n self.output,\n self.newimg])\n if iteration % ((self.MAX_ITERATIONS // 10) or 1) == 0:\n _logger.debug((\" Iteration {} of {}: loss={:.3g} \" +\n \"l2={:.3g} l1={:.3g} f={:.3g}\").format(\n iteration, self.MAX_ITERATIONS, l,\n np.mean(l2s), np.mean(l1s),\n np.mean(scores)))\n\n # check if we should abort search if we're getting nowhere.\n if self.ABORT_EARLY and \\\n iteration % ((self.MAX_ITERATIONS // 10) or 1) == 0:\n if l > prev * .9999:\n msg = \" Failed to make progress; stop early\"\n _logger.debug(msg)\n break\n prev = l\n\n # adjust the best result found so far\n for e, (dst, sc, ii) in enumerate(zip(crit, scores, nimg)):\n lab = np.argmax(batchlab[e])\n if dst < bestdst[e] and compare(sc, lab):\n bestdst[e] = dst\n bestscore[e] = np.argmax(sc)\n if dst < o_bestdst[e] and compare(sc, lab):\n o_bestdst[e] = dst\n o_bestscore[e] = np.argmax(sc)\n o_bestattack[e] = ii\n\n # adjust the constant as needed\n for e in range(batch_size):\n if compare(bestscore[e], np.argmax(batchlab[e])) and \\\n bestscore[e] != -1:\n # success, divide const by two\n upper_bound[e] = min(upper_bound[e], CONST[e])\n if upper_bound[e] < 1e9:\n CONST[e] = (lower_bound[e] + upper_bound[e]) / 2\n else:\n # failure, either multiply by 10 if no solution found yet\n # or do binary search with the known upper bound\n lower_bound[e] = max(lower_bound[e], CONST[e])\n if upper_bound[e] < 1e9:\n CONST[e] = (lower_bound[e] + upper_bound[e]) / 2\n else:\n CONST[e] *= 10\n _logger.debug(\" Successfully generated adversarial examples \" +\n \"on {} of {} instances.\".format(\n sum(upper_bound < 1e9), batch_size))\n o_bestdst = np.array(o_bestdst)\n mean = np.mean(np.sqrt(o_bestdst[o_bestdst < 1e9]))\n _logger.debug(self.crit_p +\n \" Mean successful distortion: {:.4g}\".format(mean))\n\n # return the best solution found\n o_bestdst = np.array(o_bestdst)\n return o_bestattack\n"
]
| [
[
"numpy.copy",
"numpy.mean",
"numpy.dtype",
"tensorflow.cast",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.subtract",
"tensorflow.Variable",
"tensorflow.global_variables",
"tensorflow.negative",
"numpy.argmax",
"numpy.sqrt",
"tensorflow.add",
"tensorflow.abs",
"numpy.array",
"numpy.zeros",
"tensorflow.py_func",
"tensorflow.train.polynomial_decay",
"tensorflow.placeholder",
"numpy.clip",
"tensorflow.multiply",
"tensorflow.assign",
"numpy.asarray",
"tensorflow.as_dtype",
"numpy.ones",
"tensorflow.variables_initializer",
"tensorflow.square"
]
]
|
Chrisa142857/AR_microscope | [
"a3293cbb1301aed801214e06a51be4a49a187dbc"
]
| [
"10X/models.py"
]
| [
"from __future__ import division\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport numpy as np\n\nfrom utils.parse_config import *\nfrom utils.utils import build_targets, to_cpu, non_max_suppression\n\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\n\n\ndef create_modules(module_defs):\n \"\"\"\n Constructs module list of layer blocks from module configuration in module_defs\n \"\"\"\n hyperparams = module_defs.pop(0)\n output_filters = [int(hyperparams[\"channels\"])]\n module_list = nn.ModuleList()\n for module_i, module_def in enumerate(module_defs):\n modules = nn.Sequential()\n\n if module_def[\"type\"] == \"convolutional\":\n bn = int(module_def[\"batch_normalize\"])\n filters = int(module_def[\"filters\"])\n kernel_size = int(module_def[\"size\"])\n pad = (kernel_size - 1) // 2\n modules.add_module(\n f\"conv_{module_i}\",\n nn.Conv2d(\n in_channels=output_filters[-1],\n out_channels=filters,\n kernel_size=kernel_size,\n stride=int(module_def[\"stride\"]),\n padding=pad,\n bias=not bn,\n ),\n )\n if bn:\n modules.add_module(f\"batch_norm_{module_i}\", nn.BatchNorm2d(filters, momentum=0.9, eps=1e-5))\n if module_def[\"activation\"] == \"leaky\":\n modules.add_module(f\"leaky_{module_i}\", nn.LeakyReLU(0.1))\n\n elif module_def[\"type\"] == \"maxpool\":\n kernel_size = int(module_def[\"size\"])\n stride = int(module_def[\"stride\"])\n if kernel_size == 2 and stride == 1:\n modules.add_module(f\"_debug_padding_{module_i}\", nn.ZeroPad2d((0, 1, 0, 1)))\n maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=int((kernel_size - 1) // 2))\n modules.add_module(f\"maxpool_{module_i}\", maxpool)\n\n elif module_def[\"type\"] == \"upsample\":\n upsample = Upsample(scale_factor=int(module_def[\"stride\"]), mode=\"nearest\")\n modules.add_module(f\"upsample_{module_i}\", upsample)\n\n elif module_def[\"type\"] == \"route\":\n layers = [int(x) for x in module_def[\"layers\"].split(\",\")]\n filters = sum([output_filters[1:][i] for i in layers])\n modules.add_module(f\"route_{module_i}\", EmptyLayer())\n\n elif module_def[\"type\"] == \"shortcut\":\n filters = output_filters[1:][int(module_def[\"from\"])]\n modules.add_module(f\"shortcut_{module_i}\", EmptyLayer())\n\n elif module_def[\"type\"] == \"yolo\":\n anchor_idxs = [int(x) for x in module_def[\"mask\"].split(\",\")]\n # Extract anchors\n anchors = [int(x) for x in module_def[\"anchors\"].split(\",\")]\n anchors = [(anchors[i], anchors[i + 1]) for i in range(0, len(anchors), 2)]\n anchors = [anchors[i] for i in anchor_idxs]\n num_classes = int(module_def[\"classes\"])\n img_size = int(hyperparams[\"height\"])\n # Define detection layer\n yolo_layer = YOLOLayer(anchors, num_classes, img_size)\n modules.add_module(f\"yolo_{module_i}\", yolo_layer)\n # Register module list and number of output filters\n module_list.append(modules)\n output_filters.append(filters)\n\n return hyperparams, module_list\n\n\nclass Upsample(nn.Module):\n \"\"\" nn.Upsample is deprecated \"\"\"\n\n def __init__(self, scale_factor, mode=\"nearest\"):\n super(Upsample, self).__init__()\n self.scale_factor = scale_factor\n self.mode = mode\n\n def forward(self, x):\n x = F.interpolate(x, scale_factor=self.scale_factor, mode=self.mode)\n return x\n\n\nclass EmptyLayer(nn.Module):\n \"\"\"Placeholder for 'route' and 'shortcut' layers\"\"\"\n\n def __init__(self):\n super(EmptyLayer, self).__init__()\n\n\nclass YOLOLayer(nn.Module):\n \"\"\"Detection layer\"\"\"\n\n def __init__(self, anchors, num_classes, img_dim=(416, 416)):\n super(YOLOLayer, self).__init__()\n self.anchors = anchors\n self.num_anchors = len(anchors)\n self.num_classes = num_classes\n self.ignore_thres = 0.5\n self.mse_loss = nn.MSELoss()\n self.bce_loss = nn.BCELoss()\n self.obj_scale = 1\n self.noobj_scale = 100\n self.metrics = {}\n self.img_dim = img_dim\n self.grid_size = (0, 0) # grid size\n\n def compute_grid_offsets(self, grid_size, cuda=True):\n self.grid_size = grid_size\n g_x = self.grid_size[1]\n g_y = self.grid_size[0]\n FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\n self.stride_w = self.img_dim[1] / g_x\n self.stride_h = self.img_dim[0] / g_y\n # Calculate offsets for each grid\n self.grid_x = torch.arange(g_x).repeat(g_y, 1).view([1, 1, g_y, g_x]).type(FloatTensor)\n self.grid_y = torch.arange(g_y).repeat(g_x, 1).t().view([1, 1, g_y, g_x]).type(FloatTensor)\n self.scaled_anchors = FloatTensor([(a_w / self.stride_w, a_h / self.stride_h) for a_w, a_h in self.anchors])\n self.anchor_w = self.scaled_anchors[:, 0:1].view((1, self.num_anchors, 1, 1))\n self.anchor_h = self.scaled_anchors[:, 1:2].view((1, self.num_anchors, 1, 1))\n\n def forward(self, x, targets=None, img_dim=None):\n\n # Tensors for cuda support\n FloatTensor = torch.cuda.FloatTensor if x.is_cuda else torch.FloatTensor\n LongTensor = torch.cuda.LongTensor if x.is_cuda else torch.LongTensor\n ByteTensor = torch.cuda.ByteTensor if x.is_cuda else torch.ByteTensor\n\n self.img_dim = img_dim\n num_samples = x.size(0)\n grid_size = [x.size(2), x.size(3)]\n\n prediction = (\n x.view(num_samples, self.num_anchors, self.num_classes + 5, grid_size[0], grid_size[1])\n .permute(0, 1, 3, 4, 2)\n .contiguous()\n )\n\n # Get outputs\n x = torch.sigmoid(prediction[..., 0]) # Center x\n y = torch.sigmoid(prediction[..., 1]) # Center y\n w = prediction[..., 2] # Width\n h = prediction[..., 3] # Height\n pred_conf = torch.sigmoid(prediction[..., 4]) # Conf\n pred_cls = torch.sigmoid(prediction[..., 5:]) # Cls pred.\n\n # If grid size does not match current we compute new offsets\n if grid_size != self.grid_size:\n self.compute_grid_offsets(grid_size, cuda=x.is_cuda)\n\n # Add offset and scale with anchors\n pred_boxes = FloatTensor(prediction[..., :4].shape)\n pred_boxes[..., 0] = (x.data + self.grid_x) * self.stride_w\n pred_boxes[..., 1] = (y.data + self.grid_y) * self.stride_h\n pred_boxes[..., 2] = (torch.exp(w.data) * self.anchor_w) * self.stride_w\n pred_boxes[..., 3] = (torch.exp(h.data) * self.anchor_h) * self.stride_h\n\n output = torch.cat(\n (\n pred_boxes.view(num_samples, -1, 4),\n pred_conf.view(num_samples, -1, 1),\n pred_cls.view(num_samples, -1, self.num_classes),\n ),\n -1,\n )\n\n if targets is None:\n return output, 0\n else:\n iou_scores, class_mask, obj_mask, noobj_mask, tx, ty, tw, th, tcls, tconf = build_targets(\n pred_boxes=pred_boxes,\n pred_cls=pred_cls,\n target=targets,\n anchors=self.scaled_anchors,\n ignore_thres=self.ignore_thres,\n )\n\n # Loss : Mask outputs to ignore non-existing objects (except with conf. loss)\n loss_x = self.mse_loss(x[obj_mask], tx[obj_mask])\n loss_y = self.mse_loss(y[obj_mask], ty[obj_mask])\n loss_w = self.mse_loss(w[obj_mask], tw[obj_mask])\n loss_h = self.mse_loss(h[obj_mask], th[obj_mask])\n loss_conf_obj = self.bce_loss(pred_conf[obj_mask], tconf[obj_mask])\n loss_conf_noobj = self.bce_loss(pred_conf[noobj_mask], tconf[noobj_mask])\n loss_conf = self.obj_scale * loss_conf_obj + self.noobj_scale * loss_conf_noobj\n loss_cls = self.bce_loss(pred_cls[obj_mask], tcls[obj_mask])\n total_loss = loss_x + loss_y + loss_w + loss_h + loss_conf + loss_cls\n\n # Metrics\n cls_acc = 100 * class_mask[obj_mask].mean()\n conf_obj = pred_conf[obj_mask].mean()\n conf_noobj = pred_conf[noobj_mask].mean()\n conf50 = (pred_conf > 0.5).float()\n iou50 = (iou_scores > 0.5).float()\n iou75 = (iou_scores > 0.75).float()\n detected_mask = conf50 * class_mask * tconf\n precision = torch.sum(iou50 * detected_mask) / (conf50.sum() + 1e-16)\n recall50 = torch.sum(iou50 * detected_mask) / (obj_mask.sum() + 1e-16)\n recall75 = torch.sum(iou75 * detected_mask) / (obj_mask.sum() + 1e-16)\n\n self.metrics = {\n \"loss\": to_cpu(total_loss).item(),\n \"x\": to_cpu(loss_x).item(),\n \"y\": to_cpu(loss_y).item(),\n \"w\": to_cpu(loss_w).item(),\n \"h\": to_cpu(loss_h).item(),\n \"conf\": to_cpu(loss_conf).item(),\n \"cls\": to_cpu(loss_cls).item(),\n \"cls_acc\": to_cpu(cls_acc).item(),\n \"recall50\": to_cpu(recall50).item(),\n \"recall75\": to_cpu(recall75).item(),\n \"precision\": to_cpu(precision).item(),\n \"conf_obj\": to_cpu(conf_obj).item(),\n \"conf_noobj\": to_cpu(conf_noobj).item(),\n \"grid_size\": grid_size,\n }\n\n return output, total_loss\n\n\nclass Darknet(nn.Module):\n \"\"\"YOLOv3 object detection model\"\"\"\n\n def __init__(self, config_path, img_size=(416, 416), use_pad=False):\n super(Darknet, self).__init__()\n self.module_defs = parse_model_config(config_path)\n self.hyperparams, self.module_list = create_modules(self.module_defs)\n self.yolo_layers = [layer[0] for layer in self.module_list if hasattr(layer[0], \"metrics\")]\n self.img_size = img_size\n self.seen = 0\n self.header_info = np.array([0, 0, 0, self.seen, 0], dtype=np.int32)\n self.use_pad = use_pad\n\n def forward(self, x, targets=None):\n img_dim = x.shape[2:4]\n loss = 0\n layer_outputs, yolo_outputs = [], []\n yolo_layer_flag = 0\n for i, (module_def, module) in enumerate(zip(self.module_defs, self.module_list)):\n if module_def[\"type\"] in [\"convolutional\", \"upsample\", \"maxpool\"]:\n x = module(x)\n yolo_layer_flag = 0\n elif module_def[\"type\"] == \"route\":\n cat_layers = []\n pre_shape = None\n for layer_i in module_def[\"layers\"].split(\",\")[::-1]:\n layer = layer_outputs[int(layer_i)]\n # shape = layer.shape[-1]\n # if self.use_pad and pre_shape is not None:\n # if shape != pre_shape:\n # layer = F.pad(layer, (0, int(pre_shape-shape)))\n # pre_shape = shape\n cat_layers.append(layer)\n try:\n x = torch.cat(cat_layers, 1)\n except:\n print(cat_layers[0].shape,cat_layers[1].shape)\n exit()\n yolo_layer_flag = 0\n elif module_def[\"type\"] == \"shortcut\":\n layer_i = int(module_def[\"from\"])\n x = layer_outputs[-1] + layer_outputs[layer_i]\n yolo_layer_flag = 0\n elif module_def[\"type\"] == \"yolo\":\n x, layer_loss = module[0](x, targets, img_dim)\n loss += layer_loss\n yolo_outputs.append(x)\n yolo_layer_flag = 1\n # if not self.use_pad:\n # if (x.shape[-1]/2) % 2 != 0 and yolo_layer_flag == 0:\n # x = x[..., :-2]\n layer_outputs.append(x)\n yolo_outputs = to_cpu(torch.cat(yolo_outputs, 1))\n return yolo_outputs if targets is None else (loss, yolo_outputs)\n\n def load_darknet_weights(self, weights_path):\n \"\"\"Parses and loads the weights stored in 'weights_path'\"\"\"\n\n # Open the weights file\n with open(weights_path, \"rb\") as f:\n header = np.fromfile(f, dtype=np.int32, count=5) # First five are header values\n self.header_info = header # Needed to write header when saving weights\n self.seen = header[3] # number of images seen during training\n weights = np.fromfile(f, dtype=np.float32) # The rest are weights\n\n # Establish cutoff for loading backbone weights\n cutoff = None\n if \"darknet53.conv.74\" in weights_path:\n cutoff = 75\n\n ptr = 0\n for i, (module_def, module) in enumerate(zip(self.module_defs, self.module_list)):\n if i == cutoff:\n break\n if module_def[\"type\"] == \"convolutional\":\n conv_layer = module[0]\n if module_def[\"batch_normalize\"]:\n # Load BN bias, weights, running mean and running variance\n bn_layer = module[1]\n num_b = bn_layer.bias.numel() # Number of biases\n # Bias\n bn_b = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.bias)\n bn_layer.bias.data.copy_(bn_b)\n ptr += num_b\n # Weight\n bn_w = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.weight)\n bn_layer.weight.data.copy_(bn_w)\n ptr += num_b\n # Running Mean\n bn_rm = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.running_mean)\n bn_layer.running_mean.data.copy_(bn_rm)\n ptr += num_b\n # Running Var\n bn_rv = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.running_var)\n bn_layer.running_var.data.copy_(bn_rv)\n ptr += num_b\n else:\n # Load conv. bias\n num_b = conv_layer.bias.numel()\n conv_b = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(conv_layer.bias)\n conv_layer.bias.data.copy_(conv_b)\n ptr += num_b\n # Load conv. weights\n num_w = conv_layer.weight.numel()\n conv_w = torch.from_numpy(weights[ptr : ptr + num_w]).view_as(conv_layer.weight)\n conv_layer.weight.data.copy_(conv_w)\n ptr += num_w\n\n def save_darknet_weights(self, path, cutoff=-1):\n \"\"\"\n @:param path - path of the new weights file\n @:param cutoff - save layers between 0 and cutoff (cutoff = -1 -> all are saved)\n \"\"\"\n fp = open(path, \"wb\")\n self.header_info[3] = self.seen\n self.header_info.tofile(fp)\n\n # Iterate through layers\n for i, (module_def, module) in enumerate(zip(self.module_defs[:cutoff], self.module_list[:cutoff])):\n if module_def[\"type\"] == \"convolutional\":\n conv_layer = module[0]\n # If batch norm, load bn first\n if module_def[\"batch_normalize\"]:\n bn_layer = module[1]\n bn_layer.bias.data.cpu().numpy().tofile(fp)\n bn_layer.weight.data.cpu().numpy().tofile(fp)\n bn_layer.running_mean.data.cpu().numpy().tofile(fp)\n bn_layer.running_var.data.cpu().numpy().tofile(fp)\n # Load conv bias\n else:\n conv_layer.bias.data.cpu().numpy().tofile(fp)\n # Load conv weights\n conv_layer.weight.data.cpu().numpy().tofile(fp)\n\n fp.close()\n"
]
| [
[
"torch.sigmoid",
"numpy.array",
"torch.cat",
"torch.nn.MSELoss",
"torch.nn.ModuleList",
"torch.arange",
"torch.nn.Sequential",
"torch.nn.functional.interpolate",
"torch.nn.BatchNorm2d",
"torch.nn.LeakyReLU",
"torch.nn.ZeroPad2d",
"torch.from_numpy",
"torch.nn.BCELoss",
"numpy.fromfile",
"torch.exp",
"torch.sum"
]
]
|
danilopeixoto/feast | [
"57d134355364654a2275b477b3b82b149f0779ca"
]
| [
"sdk/python/tests/integration/registration/test_registry.py"
]
| [
"# Copyright 2021 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport time\nfrom datetime import timedelta\nfrom tempfile import mkstemp\n\nimport pandas as pd\nimport pytest\nfrom pytest_lazyfixture import lazy_fixture\n\nfrom feast import FileSource\nfrom feast.data_format import ParquetFormat\nfrom feast.entity import Entity\nfrom feast.feature import Feature\nfrom feast.feature_view import FeatureView\nfrom feast.on_demand_feature_view import RequestDataSource, on_demand_feature_view\nfrom feast.protos.feast.types import Value_pb2 as ValueProto\nfrom feast.registry import Registry\nfrom feast.repo_config import RegistryConfig\nfrom feast.value_type import ValueType\n\n\[email protected]\ndef local_registry():\n fd, registry_path = mkstemp()\n registry_config = RegistryConfig(path=registry_path, cache_ttl_seconds=600)\n return Registry(registry_config, None)\n\n\[email protected]\ndef gcs_registry():\n from google.cloud import storage\n\n storage_client = storage.Client()\n bucket_name = f\"feast-registry-test-{int(time.time() * 1000)}\"\n bucket = storage_client.bucket(bucket_name)\n bucket = storage_client.create_bucket(bucket)\n bucket.add_lifecycle_delete_rule(\n age=14\n ) # delete buckets automatically after 14 days\n bucket.patch()\n bucket.blob(\"registry.db\")\n registry_config = RegistryConfig(\n path=f\"gs://{bucket_name}/registry.db\", cache_ttl_seconds=600\n )\n return Registry(registry_config, None)\n\n\[email protected]\ndef s3_registry():\n registry_config = RegistryConfig(\n path=f\"s3://feast-integration-tests/registries/{int(time.time() * 1000)}/registry.db\",\n cache_ttl_seconds=600,\n )\n return Registry(registry_config, None)\n\n\[email protected](\n \"test_registry\", [lazy_fixture(\"local_registry\")],\n)\ndef test_apply_entity_success(test_registry):\n entity = Entity(\n name=\"driver_car_id\",\n description=\"Car driver id\",\n value_type=ValueType.STRING,\n tags={\"team\": \"matchmaking\"},\n )\n\n project = \"project\"\n\n # Register Entity\n test_registry.apply_entity(entity, project)\n\n entities = test_registry.list_entities(project)\n\n entity = entities[0]\n assert (\n len(entities) == 1\n and entity.name == \"driver_car_id\"\n and entity.value_type == ValueType(ValueProto.ValueType.STRING)\n and entity.description == \"Car driver id\"\n and \"team\" in entity.tags\n and entity.tags[\"team\"] == \"matchmaking\"\n )\n\n entity = test_registry.get_entity(\"driver_car_id\", project)\n assert (\n entity.name == \"driver_car_id\"\n and entity.value_type == ValueType(ValueProto.ValueType.STRING)\n and entity.description == \"Car driver id\"\n and \"team\" in entity.tags\n and entity.tags[\"team\"] == \"matchmaking\"\n )\n\n test_registry.delete_entity(\"driver_car_id\", project)\n entities = test_registry.list_entities(project)\n assert len(entities) == 0\n\n test_registry.teardown()\n\n # Will try to reload registry, which will fail because the file has been deleted\n with pytest.raises(FileNotFoundError):\n test_registry._get_registry_proto()\n\n\[email protected]\[email protected](\n \"test_registry\", [lazy_fixture(\"gcs_registry\"), lazy_fixture(\"s3_registry\")],\n)\ndef test_apply_entity_integration(test_registry):\n entity = Entity(\n name=\"driver_car_id\",\n description=\"Car driver id\",\n value_type=ValueType.STRING,\n tags={\"team\": \"matchmaking\"},\n )\n\n project = \"project\"\n\n # Register Entity\n test_registry.apply_entity(entity, project)\n\n entities = test_registry.list_entities(project)\n\n entity = entities[0]\n assert (\n len(entities) == 1\n and entity.name == \"driver_car_id\"\n and entity.value_type == ValueType(ValueProto.ValueType.STRING)\n and entity.description == \"Car driver id\"\n and \"team\" in entity.tags\n and entity.tags[\"team\"] == \"matchmaking\"\n )\n\n entity = test_registry.get_entity(\"driver_car_id\", project)\n assert (\n entity.name == \"driver_car_id\"\n and entity.value_type == ValueType(ValueProto.ValueType.STRING)\n and entity.description == \"Car driver id\"\n and \"team\" in entity.tags\n and entity.tags[\"team\"] == \"matchmaking\"\n )\n\n test_registry.teardown()\n\n # Will try to reload registry, which will fail because the file has been deleted\n with pytest.raises(FileNotFoundError):\n test_registry._get_registry_proto()\n\n\[email protected](\n \"test_registry\", [lazy_fixture(\"local_registry\")],\n)\ndef test_apply_feature_view_success(test_registry):\n # Create Feature Views\n batch_source = FileSource(\n file_format=ParquetFormat(),\n path=\"file://feast/*\",\n event_timestamp_column=\"ts_col\",\n created_timestamp_column=\"timestamp\",\n date_partition_column=\"date_partition_col\",\n )\n\n fv1 = FeatureView(\n name=\"my_feature_view_1\",\n features=[\n Feature(name=\"fs1_my_feature_1\", dtype=ValueType.INT64),\n Feature(name=\"fs1_my_feature_2\", dtype=ValueType.STRING),\n Feature(name=\"fs1_my_feature_3\", dtype=ValueType.STRING_LIST),\n Feature(name=\"fs1_my_feature_4\", dtype=ValueType.BYTES_LIST),\n ],\n entities=[\"fs1_my_entity_1\"],\n tags={\"team\": \"matchmaking\"},\n batch_source=batch_source,\n ttl=timedelta(minutes=5),\n )\n\n project = \"project\"\n\n # Register Feature View\n test_registry.apply_feature_view(fv1, project)\n\n feature_views = test_registry.list_feature_views(project)\n\n # List Feature Views\n assert (\n len(feature_views) == 1\n and feature_views[0].name == \"my_feature_view_1\"\n and feature_views[0].features[0].name == \"fs1_my_feature_1\"\n and feature_views[0].features[0].dtype == ValueType.INT64\n and feature_views[0].features[1].name == \"fs1_my_feature_2\"\n and feature_views[0].features[1].dtype == ValueType.STRING\n and feature_views[0].features[2].name == \"fs1_my_feature_3\"\n and feature_views[0].features[2].dtype == ValueType.STRING_LIST\n and feature_views[0].features[3].name == \"fs1_my_feature_4\"\n and feature_views[0].features[3].dtype == ValueType.BYTES_LIST\n and feature_views[0].entities[0] == \"fs1_my_entity_1\"\n )\n\n feature_view = test_registry.get_feature_view(\"my_feature_view_1\", project)\n assert (\n feature_view.name == \"my_feature_view_1\"\n and feature_view.features[0].name == \"fs1_my_feature_1\"\n and feature_view.features[0].dtype == ValueType.INT64\n and feature_view.features[1].name == \"fs1_my_feature_2\"\n and feature_view.features[1].dtype == ValueType.STRING\n and feature_view.features[2].name == \"fs1_my_feature_3\"\n and feature_view.features[2].dtype == ValueType.STRING_LIST\n and feature_view.features[3].name == \"fs1_my_feature_4\"\n and feature_view.features[3].dtype == ValueType.BYTES_LIST\n and feature_view.entities[0] == \"fs1_my_entity_1\"\n )\n\n test_registry.delete_feature_view(\"my_feature_view_1\", project)\n feature_views = test_registry.list_feature_views(project)\n assert len(feature_views) == 0\n\n test_registry.teardown()\n\n # Will try to reload registry, which will fail because the file has been deleted\n with pytest.raises(FileNotFoundError):\n test_registry._get_registry_proto()\n\n\[email protected](\n \"test_registry\", [lazy_fixture(\"local_registry\")],\n)\ndef test_modify_feature_views_success(test_registry):\n # Create Feature Views\n batch_source = FileSource(\n file_format=ParquetFormat(),\n path=\"file://feast/*\",\n event_timestamp_column=\"ts_col\",\n created_timestamp_column=\"timestamp\",\n date_partition_column=\"date_partition_col\",\n )\n\n request_source = RequestDataSource(\n name=\"request_source\", schema={\"my_input_1\": ValueType.INT32}\n )\n\n fv1 = FeatureView(\n name=\"my_feature_view_1\",\n features=[Feature(name=\"fs1_my_feature_1\", dtype=ValueType.INT64)],\n entities=[\"fs1_my_entity_1\"],\n tags={\"team\": \"matchmaking\"},\n batch_source=batch_source,\n ttl=timedelta(minutes=5),\n )\n\n @on_demand_feature_view(\n features=[\n Feature(name=\"odfv1_my_feature_1\", dtype=ValueType.STRING),\n Feature(name=\"odfv1_my_feature_2\", dtype=ValueType.INT32),\n ],\n inputs={\"request_source\": request_source},\n )\n def odfv1(feature_df: pd.DataFrame) -> pd.DataFrame:\n data = pd.DataFrame()\n data[\"odfv1_my_feature_1\"] = feature_df[\"my_input_1\"].astype(\"category\")\n data[\"odfv1_my_feature_2\"] = feature_df[\"my_input_1\"].astype(\"int32\")\n return data\n\n project = \"project\"\n\n # Register Feature Views\n test_registry.apply_feature_view(odfv1, project)\n test_registry.apply_feature_view(fv1, project)\n\n # Modify odfv by changing a single feature dtype\n @on_demand_feature_view(\n features=[\n Feature(name=\"odfv1_my_feature_1\", dtype=ValueType.FLOAT),\n Feature(name=\"odfv1_my_feature_2\", dtype=ValueType.INT32),\n ],\n inputs={\"request_source\": request_source},\n )\n def odfv1(feature_df: pd.DataFrame) -> pd.DataFrame:\n data = pd.DataFrame()\n data[\"odfv1_my_feature_1\"] = feature_df[\"my_input_1\"].astype(\"float\")\n data[\"odfv1_my_feature_2\"] = feature_df[\"my_input_1\"].astype(\"int32\")\n return data\n\n # Apply the modified odfv\n test_registry.apply_feature_view(odfv1, project)\n\n # Check odfv\n on_demand_feature_views = test_registry.list_on_demand_feature_views(project)\n\n assert (\n len(on_demand_feature_views) == 1\n and on_demand_feature_views[0].name == \"odfv1\"\n and on_demand_feature_views[0].features[0].name == \"odfv1_my_feature_1\"\n and on_demand_feature_views[0].features[0].dtype == ValueType.FLOAT\n and on_demand_feature_views[0].features[1].name == \"odfv1_my_feature_2\"\n and on_demand_feature_views[0].features[1].dtype == ValueType.INT32\n )\n request_schema = on_demand_feature_views[0].get_request_data_schema()\n assert (\n list(request_schema.keys())[0] == \"my_input_1\"\n and list(request_schema.values())[0] == ValueType.INT32\n )\n\n feature_view = test_registry.get_on_demand_feature_view(\"odfv1\", project)\n assert (\n feature_view.name == \"odfv1\"\n and feature_view.features[0].name == \"odfv1_my_feature_1\"\n and feature_view.features[0].dtype == ValueType.FLOAT\n and feature_view.features[1].name == \"odfv1_my_feature_2\"\n and feature_view.features[1].dtype == ValueType.INT32\n )\n request_schema = feature_view.get_request_data_schema()\n assert (\n list(request_schema.keys())[0] == \"my_input_1\"\n and list(request_schema.values())[0] == ValueType.INT32\n )\n\n # Make sure fv1 is untouched\n feature_views = test_registry.list_feature_views(project)\n\n # List Feature Views\n assert (\n len(feature_views) == 1\n and feature_views[0].name == \"my_feature_view_1\"\n and feature_views[0].features[0].name == \"fs1_my_feature_1\"\n and feature_views[0].features[0].dtype == ValueType.INT64\n and feature_views[0].entities[0] == \"fs1_my_entity_1\"\n )\n\n feature_view = test_registry.get_feature_view(\"my_feature_view_1\", project)\n assert (\n feature_view.name == \"my_feature_view_1\"\n and feature_view.features[0].name == \"fs1_my_feature_1\"\n and feature_view.features[0].dtype == ValueType.INT64\n and feature_view.entities[0] == \"fs1_my_entity_1\"\n )\n\n test_registry.teardown()\n\n # Will try to reload registry, which will fail because the file has been deleted\n with pytest.raises(FileNotFoundError):\n test_registry._get_registry_proto()\n\n\[email protected]\[email protected](\n \"test_registry\", [lazy_fixture(\"gcs_registry\"), lazy_fixture(\"s3_registry\")],\n)\ndef test_apply_feature_view_integration(test_registry):\n # Create Feature Views\n batch_source = FileSource(\n file_format=ParquetFormat(),\n path=\"file://feast/*\",\n event_timestamp_column=\"ts_col\",\n created_timestamp_column=\"timestamp\",\n date_partition_column=\"date_partition_col\",\n )\n\n fv1 = FeatureView(\n name=\"my_feature_view_1\",\n features=[\n Feature(name=\"fs1_my_feature_1\", dtype=ValueType.INT64),\n Feature(name=\"fs1_my_feature_2\", dtype=ValueType.STRING),\n Feature(name=\"fs1_my_feature_3\", dtype=ValueType.STRING_LIST),\n Feature(name=\"fs1_my_feature_4\", dtype=ValueType.BYTES_LIST),\n ],\n entities=[\"fs1_my_entity_1\"],\n tags={\"team\": \"matchmaking\"},\n batch_source=batch_source,\n ttl=timedelta(minutes=5),\n )\n\n project = \"project\"\n\n # Register Feature View\n test_registry.apply_feature_view(fv1, project)\n\n feature_views = test_registry.list_feature_views(project)\n\n # List Feature Views\n assert (\n len(feature_views) == 1\n and feature_views[0].name == \"my_feature_view_1\"\n and feature_views[0].features[0].name == \"fs1_my_feature_1\"\n and feature_views[0].features[0].dtype == ValueType.INT64\n and feature_views[0].features[1].name == \"fs1_my_feature_2\"\n and feature_views[0].features[1].dtype == ValueType.STRING\n and feature_views[0].features[2].name == \"fs1_my_feature_3\"\n and feature_views[0].features[2].dtype == ValueType.STRING_LIST\n and feature_views[0].features[3].name == \"fs1_my_feature_4\"\n and feature_views[0].features[3].dtype == ValueType.BYTES_LIST\n and feature_views[0].entities[0] == \"fs1_my_entity_1\"\n )\n\n feature_view = test_registry.get_feature_view(\"my_feature_view_1\", project)\n assert (\n feature_view.name == \"my_feature_view_1\"\n and feature_view.features[0].name == \"fs1_my_feature_1\"\n and feature_view.features[0].dtype == ValueType.INT64\n and feature_view.features[1].name == \"fs1_my_feature_2\"\n and feature_view.features[1].dtype == ValueType.STRING\n and feature_view.features[2].name == \"fs1_my_feature_3\"\n and feature_view.features[2].dtype == ValueType.STRING_LIST\n and feature_view.features[3].name == \"fs1_my_feature_4\"\n and feature_view.features[3].dtype == ValueType.BYTES_LIST\n and feature_view.entities[0] == \"fs1_my_entity_1\"\n )\n\n test_registry.delete_feature_view(\"my_feature_view_1\", project)\n feature_views = test_registry.list_feature_views(project)\n assert len(feature_views) == 0\n\n test_registry.teardown()\n\n # Will try to reload registry, which will fail because the file has been deleted\n with pytest.raises(FileNotFoundError):\n test_registry._get_registry_proto()\n\n\ndef test_commit():\n fd, registry_path = mkstemp()\n registry_config = RegistryConfig(path=registry_path, cache_ttl_seconds=600)\n test_registry = Registry(registry_config, None)\n\n entity = Entity(\n name=\"driver_car_id\",\n description=\"Car driver id\",\n value_type=ValueType.STRING,\n tags={\"team\": \"matchmaking\"},\n )\n\n project = \"project\"\n\n # Register Entity without commiting\n test_registry.apply_entity(entity, project, commit=False)\n\n # Retrieving the entity should still succeed\n entities = test_registry.list_entities(project, allow_cache=True)\n\n entity = entities[0]\n assert (\n len(entities) == 1\n and entity.name == \"driver_car_id\"\n and entity.value_type == ValueType(ValueProto.ValueType.STRING)\n and entity.description == \"Car driver id\"\n and \"team\" in entity.tags\n and entity.tags[\"team\"] == \"matchmaking\"\n )\n\n entity = test_registry.get_entity(\"driver_car_id\", project, allow_cache=True)\n assert (\n entity.name == \"driver_car_id\"\n and entity.value_type == ValueType(ValueProto.ValueType.STRING)\n and entity.description == \"Car driver id\"\n and \"team\" in entity.tags\n and entity.tags[\"team\"] == \"matchmaking\"\n )\n\n # Create new registry that points to the same store\n registry_with_same_store = Registry(registry_config, None)\n\n # Retrieving the entity should fail since the store is empty\n entities = registry_with_same_store.list_entities(project)\n assert len(entities) == 0\n\n # commit from the original registry\n test_registry.commit()\n\n # Reconstruct the new registry in order to read the newly written store\n registry_with_same_store = Registry(registry_config, None)\n\n # Retrieving the entity should now succeed\n entities = registry_with_same_store.list_entities(project)\n\n entity = entities[0]\n assert (\n len(entities) == 1\n and entity.name == \"driver_car_id\"\n and entity.value_type == ValueType(ValueProto.ValueType.STRING)\n and entity.description == \"Car driver id\"\n and \"team\" in entity.tags\n and entity.tags[\"team\"] == \"matchmaking\"\n )\n\n entity = test_registry.get_entity(\"driver_car_id\", project)\n assert (\n entity.name == \"driver_car_id\"\n and entity.value_type == ValueType(ValueProto.ValueType.STRING)\n and entity.description == \"Car driver id\"\n and \"team\" in entity.tags\n and entity.tags[\"team\"] == \"matchmaking\"\n )\n\n test_registry.teardown()\n\n # Will try to reload registry, which will fail because the file has been deleted\n with pytest.raises(FileNotFoundError):\n test_registry._get_registry_proto()\n"
]
| [
[
"pandas.DataFrame"
]
]
|
MKFMIKU/PFFNet | [
"e506010a7cf00a32e77681845bdaf78ba88b027d"
]
| [
"psnr_ssim.py"
]
| [
"#!/usr/bin/env python\nimport argparse\nimport utils\nfrom PIL import Image\nimport numpy as np\nimport scipy.misc\n\n\nparser = argparse.ArgumentParser(description=\"PyTorch DeepDehazing\")\nparser.add_argument(\"--data\", type=str, default=\"output\", help=\"path to load data images\")\nparser.add_argument(\"--gt\", type=str, help=\"path to load gt images\")\n\nopt = parser.parse_args()\nprint(opt)\n\ndatas = utils.load_all_image(opt.data)\ngts = utils.load_all_image(opt.gt)\n\ndatas.sort()\ngts.sort()\n\ndef output_psnr_mse(img_orig, img_out):\n squared_error = np.square(img_orig - img_out)\n mse = np.mean(squared_error)\n psnr = 10 * np.log10(1.0 / mse)\n return psnr\n\npsnrs = []\nfor i in range(len(datas)):\n data = scipy.misc.fromimage(Image.open(datas[i])).astype(float)/255.0\n gt = scipy.misc.fromimage(Image.open(gts[i])).astype(float)/255.0\n\n psnr = output_psnr_mse(data, gt)\n psnrs.append(psnr)\nprint(\"PSNR:\", np.mean(psnrs))\n\n\"\"\"\n75 pth\nrp: 6 PSNR: 22.6392712102\n\n\"\"\"\n"
]
| [
[
"numpy.square",
"numpy.log10",
"numpy.mean"
]
]
|
czbiohub/dotblotr | [
"42418e168e436b935be41638072ebc55a9c2cfbe"
]
| [
"dotblotr/viz/qc.py"
]
| [
"from os import path\nfrom typing import Tuple\n\nimport cv2\nimport matplotlib\nfrom matplotlib import pyplot as plt\nimport pandas as pd\nfrom skimage import io\n\n\ndef plot_detected_dots(\n results_table:pd.DataFrame,\n strip_id:str,\n image_directory:str,\n image_extension:str = '.tif',\n font_scale:float = 1,\n font_color: Tuple[int, int, int] = (255, 255, 255),\n line_thickness:int = 2\n\n) -> Tuple[matplotlib.figure.Figure, matplotlib.axes.Axes]:\n \"\"\" plot the detected spot labels overlaid on the strip image.\n\n Parameters\n ----------\n results_table : pd.DataFrame\n the table that is the output of dotblotr.process.process_dir()\n strip_id : str\n the identifier for the strip to be plotted. This is the value from\n the strip_id column of the results_table. strip_id should be passed as a string.\n image_directory : str\n the path to the directory containing the strip images.\n This is the same as used in process_dir() to generate the results_table\n image_extension : str\n The image extension for the strip image. Default value is '.tif'\n font_scale : float\n The multiplier for setting the font size.\n font_color : Tuple[int, int, int]\n RGB tuple for the font color. Default value is (255, 255, 255).\n line_thickness : int\n The line thickness is pixels. Default value is 2.\n\n Returns\n -------\n f : matplotlib.figure.Figure\n matplotlib figure handle for the resulting plot\n ax : matplotlib.axes._subplots.AxesSubplot\n matplot axis handle for the resulting plot\n\n \"\"\"\n\n im_name = strip_id + image_extension\n image_path = path.join(image_directory, im_name)\n im = io.imread(image_path)\n\n strip_results = results_table.loc[results_table['strip_id'] == strip_id]\n\n font = cv2.FONT_HERSHEY_SIMPLEX\n line_type = cv2.LINE_AA\n\n for i, row in strip_results.iterrows():\n well_name = row['dot_name']\n x = row['x']\n y = row['y']\n text_size, baseline = cv2.getTextSize(\n text=well_name,\n fontFace=font,\n fontScale=font_scale,\n thickness=line_thickness\n )\n\n x_centered = int(x - (text_size[0] / 2))\n y_centered = int(y + (text_size[1] / 2))\n\n cv2.putText(\n img=im,\n text=well_name,\n org=(x_centered, y_centered),\n fontFace=font,\n fontScale=font_scale,\n color=font_color,\n lineType=line_type,\n thickness=line_thickness\n )\n\n f, ax = plt.subplots(figsize=(15, 12))\n ax.imshow(im)\n\n return f, ax\n"
]
| [
[
"matplotlib.pyplot.subplots"
]
]
|
ansj11/NormalizeConvBNReLU | [
"f773f2ef65cc3c23786376897ae83347fffa0572"
]
| [
"main.py"
]
| [
"'''Train CIFAR10 with PyTorch.'''\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torch.backends.cudnn as cudnn\n\nimport torchvision\nimport torchvision.transforms as transforms\n\nimport os\nimport argparse\n\nfrom models import *\nfrom utils import progress_bar\nfrom tensorboardX import SummaryWriter\nfrom torchvision.utils import make_grid\nfrom torch.optim import lr_scheduler\nfrom IPython import embed\n\n\nparser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')\nparser.add_argument('--lr', default=0.01, type=float, help='learning rate')\nparser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')\nparser.add_argument('--d', dest='device', help='factor of regularization loss', default=0, type=int)\nargs = parser.parse_args()\n\ndevice = args.device if torch.cuda.is_available() else 'cpu'\nbest_acc = 0 # best test accuracy\nstart_epoch = 0 # start from epoch 0 or last checkpoint epoch\n\n# Data\nprint('==> Preparing data..')\ntransform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n])\n\ntransform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n])\n\ntrainset = torchvision.datasets.CIFAR10(root='/dataset/cifar10', train=True, download=True, transform=transform_train)\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2)\n\ntestset = torchvision.datasets.CIFAR10(root='/dataset/cifar10', train=False, download=True, transform=transform_test)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)\n\nclasses = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n\n# Model\nprint('==> Building model..')\n# net = VGG('VGG19')\n# net = ResNet18()\n# net = PreActResNet18()\n# net = GoogLeNet()\n# net = DenseNet121()\n# net = ResNeXt29_2x64d()\n# net = MobileNet()\n# net = MobileNetV2()\n# net = DPN92()\n# net = ShuffleNetG2()\n# net = SENet18()\n# net = ShuffleNetV2(1)\n# net = EfficientNetB0()\nnet = VGG2('VGG16')\nnet = net.cuda(device)\nif device == 'cuda':\n net = torch.nn.DataParallel(net)\n cudnn.benchmark = True\n\nif args.resume:\n # Load checkpoint.\n print('==> Resuming from checkpoint..')\n assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'\n checkpoint = torch.load('./checkpoint/vgg2.pth')\n net.load_state_dict(checkpoint['net'])\n best_acc = checkpoint['acc']\n start_epoch = checkpoint['epoch']\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)\nscheduler = lr_scheduler.CosineAnnealingLR(optimizer, 10)\nwriter = SummaryWriter('logs/vgg2')\n\n# Training\ndef train(epoch):\n print('\\nEpoch: %d' % epoch)\n net.train()\n train_loss = 0\n correct = 0\n total = 0\n for batch_idx, (inputs, targets) in enumerate(trainloader):\n inputs, targets = inputs.cuda(device), targets.cuda(device)\n optimizer.zero_grad()\n outputs = net(inputs)\n loss = criterion(outputs, targets)\n loss.backward()\n optimizer.step()\n\n train_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n\n progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'\n % (train_loss/(batch_idx+1), 100.*correct/total, correct, total))\n writer.add_scalar('train/loss', train_loss/(batch_idx+1), epoch)\n writer.add_scalar('train/acc', 100.*correct/total, epoch)\n\ndef test(epoch):\n global best_acc\n net.eval()\n test_loss = 0\n correct = 0\n total = 0\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(testloader):\n inputs, targets = inputs.cuda(device), targets.cuda(device)\n outputs = net(inputs)\n loss = criterion(outputs, targets)\n\n test_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n\n progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'\n % (test_loss/(batch_idx+1), 100.*correct/total, correct, total))\n writer.add_scalar('test/loss', test_loss/(batch_idx+1), epoch)\n writer.add_scalar('test/acc', 100.*correct/total, epoch)\n wrong = predicted[predicted != targets]\n for i in range(10):\n images = inputs[predicted != targets][wrong==i]\n try:\n writer.add_image(str(i), make_grid(images, normalize=True,scale_each=True), epoch)\n except:\n continue\n\n # Save checkpoint.\n acc = 100.*correct/total\n if acc > best_acc:\n print('Saving..')\n state = {\n 'net': net.state_dict(),\n 'acc': acc,\n 'epoch': epoch,\n }\n if not os.path.isdir('checkpoint'):\n os.mkdir('checkpoint')\n torch.save(state, './checkpoint/vgg2.pth')\n best_acc = acc\n\n\nfor epoch in range(start_epoch, start_epoch+200):\n train(epoch)\n test(epoch)\n scheduler.step()\n"
]
| [
[
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.no_grad",
"torch.save",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.load",
"torch.nn.CrossEntropyLoss",
"torch.nn.DataParallel"
]
]
|
mseitzer/DLTK | [
"3237aa6c7ed63aa177ca90eafcc076d144155a34"
]
| [
"dltk/core/modules/activations.py"
]
| [
"from __future__ import division\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom dltk.core.modules.base import AbstractModule\n\n\ndef leaky_relu(x, leakiness):\n \"\"\" Leaky RELU\n\n Parameters\n ----------\n x : tf.Tensor\n input tensor\n leakiness : float\n leakiness of RELU\n\n Returns\n -------\n tf.Tensor\n Tensor with applied leaky RELU\n\n \"\"\"\n return tf.maximum(x, leakiness * x)\n\nclass PReLU(AbstractModule):\n def __init__(self, name='prelu'):\n self._rank = None\n self._shape = None\n super(PReLU, self).__init__(name)\n\n def _build(self, inp):\n if self._rank is None:\n self._rank = len(inp.get_shape().as_list())\n\n assert self._rank == len(inp.get_shape().as_list()), 'Module was initilialised for a different input'\n if self._rank > 2:\n if self._shape is None:\n self._shape = [inp.get_shape().as_list()[-1]]\n assert self._shape[0] == inp.get_shape().as_list()[-1], 'Module was initilialised for a different input'\n else:\n self._shape = []\n\n leakiness = tf.get_variable('leakiness', shape=self._shape, initializer=tf.constant_initializer(0.01),\n collections=self.TRAINABLE_COLLECTIONS)\n return tf.maximum(inp, leakiness * inp)"
]
| [
[
"tensorflow.maximum",
"tensorflow.constant_initializer"
]
]
|
D-Bits/Exchange-Rates-Pipeline | [
"308b1fbb6eca5ec390ec8934f036026bb90dc1e4"
]
| [
"dags/update_rates.py"
]
| [
"from airflow import DAG \nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.operators.postgres_operator import PostgresOperator\nfrom datetime import datetime, date\nfrom requests import get\nfrom os import getenv\nimport pandas as pd\n\n\ndefault_args = {\n \"owner\": \"airflow\",\n \"start_date\": datetime(2020, 11, 1),\n \"retries\": 1,\n}\n\n# Run daily at 3pm, but skip weekends\ndag = DAG(\"update_rates\", schedule_interval=\"0 23 * * 1-5\", default_args=default_args)\n\n\ndef extract(**context):\n\n data = get(f\"https://api.exchangeratesapi.io/history?start_at=1999-01-01&end_at={date.today()}&base=USD\").json()\n # Create an XCOM for this task to be used in transform()\n context['ti'].xcom_push(key=\"data\", value=data)\n\n\ndef transform(**context):\n\n # Fetch the JSON data from the above XCOM\n data = context[\"ti\"].xcom_pull(key=\"data\")\n # Load relevant JSON in DataFrame for processing\n df = pd.DataFrame(\n data['rates']).transpose(\n ).reset_index(\n ).rename(columns={\n \"index\": \"dates\"\n }\n ).drop(['USD'], axis=1)\n\n today = df.sort_values(by='dates', ascending=False).head(1)\n\n context['ti'].xcom_push(key=\"df\", value=today)\n\n\ndef load(**context):\n\n df = context[\"ti\"].xcom_pull(key=\"df\")\n db_conn = getenv(\"SQL_ALCHEMY_CONN\")\n df.to_sql(\n 'rates_history', \n db_conn, \n index=False, \n method='multi', \n if_exists='append',\n )\n\n\nwith dag:\n\n t1 = PythonOperator(task_id=\"extract\", python_callable=extract, provide_context=True)\n t2 = PythonOperator(task_id=\"transform\", python_callable=transform, provide_context=True)\n t3 = PythonOperator(task_id=\"load\", python_callable=load, provide_context=True)\n\n t1 >> t2 >> t3 \n "
]
| [
[
"pandas.DataFrame"
]
]
|
Louis1124/keras | [
"4584ed2a120c18cca53ea7cf2a3764dd18421821"
]
| [
"keras/feature_column/dense_features_v2.py"
]
| [
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"A layer that produces a dense `Tensor` based on given `feature_columns`.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow.python.feature_column import feature_column_v2 as fc\nfrom keras.feature_column import base_feature_layer as kfc\nfrom keras.feature_column import dense_features\nfrom keras.utils import tf_contextlib\nfrom tensorflow.python.util.tf_export import keras_export\n\n\n@keras_export('keras.layers.DenseFeatures', v1=[])\nclass DenseFeatures(dense_features.DenseFeatures):\n \"\"\"A layer that produces a dense `Tensor` based on given `feature_columns`.\n\n Generally a single example in training data is described with FeatureColumns.\n At the first layer of the model, this column oriented data should be converted\n to a single `Tensor`.\n\n This layer can be called multiple times with different features.\n\n This is the V2 version of this layer that uses name_scopes to create\n variables instead of variable_scopes. But this approach currently lacks\n support for partitioned variables. In that case, use the V1 version instead.\n\n Example:\n\n ```python\n price = tf.feature_column.numeric_column('price')\n keywords_embedded = tf.feature_column.embedding_column(\n tf.feature_column.categorical_column_with_hash_bucket(\"keywords\", 10K),\n dimensions=16)\n columns = [price, keywords_embedded, ...]\n feature_layer = tf.keras.layers.DenseFeatures(columns)\n\n features = tf.io.parse_example(\n ..., features=tf.feature_column.make_parse_example_spec(columns))\n dense_tensor = feature_layer(features)\n for units in [128, 64, 32]:\n dense_tensor = tf.keras.layers.Dense(units, activation='relu')(dense_tensor)\n prediction = tf.keras.layers.Dense(1)(dense_tensor)\n ```\n \"\"\"\n\n def __init__(self,\n feature_columns,\n trainable=True,\n name=None,\n **kwargs):\n \"\"\"Creates a DenseFeatures object.\n\n Args:\n feature_columns: An iterable containing the FeatureColumns to use as\n inputs to your model. All items should be instances of classes derived\n from `DenseColumn` such as `numeric_column`, `embedding_column`,\n `bucketized_column`, `indicator_column`. If you have categorical\n features, you can wrap them with an `embedding_column` or\n `indicator_column`.\n trainable: Boolean, whether the layer's variables will be updated via\n gradient descent during training.\n name: Name to give to the DenseFeatures.\n **kwargs: Keyword arguments to construct a layer.\n\n Raises:\n ValueError: if an item in `feature_columns` is not a `DenseColumn`.\n \"\"\"\n super(DenseFeatures, self).__init__(\n feature_columns=feature_columns,\n trainable=trainable,\n name=name,\n **kwargs)\n self._state_manager = _StateManagerImplV2(self, self.trainable)\n\n def build(self, _):\n for column in self._feature_columns:\n with tf.name_scope(column.name):\n column.create_state(self._state_manager)\n # We would like to call Layer.build and not _DenseFeaturesHelper.build.\n # pylint: disable=protected-access\n super(kfc._BaseFeaturesLayer, self).build(None) # pylint: disable=bad-super-call\n\n\nclass _StateManagerImplV2(fc._StateManagerImpl): # pylint: disable=protected-access\n \"\"\"Manages the state of DenseFeatures.\"\"\"\n\n def create_variable(self,\n feature_column,\n name,\n shape,\n dtype=None,\n trainable=True,\n use_resource=True,\n initializer=None):\n if name in self._cols_to_vars_map[feature_column]:\n raise ValueError('Variable already exists.')\n\n # We explicitly track these variables since `name` is not guaranteed to be\n # unique and disable manual tracking that the add_weight call does.\n with no_manual_dependency_tracking_scope(self._layer):\n var = self._layer.add_weight(\n name=name,\n shape=shape,\n dtype=dtype,\n initializer=initializer,\n trainable=self._trainable and trainable,\n use_resource=use_resource)\n if isinstance(var, tf.__internal__.tracking.Trackable):\n self._layer._track_trackable(var, feature_column.name + '/' + name) # pylint: disable=protected-access\n self._cols_to_vars_map[feature_column][name] = var\n return var\n\n\n@tf_contextlib.contextmanager\ndef no_manual_dependency_tracking_scope(obj):\n \"\"\"A context that disables manual dependency tracking for the given `obj`.\n\n Sometimes library methods might track objects on their own and we might want\n to disable that and do the tracking on our own. One can then use this context\n manager to disable the tracking the library method does and do your own\n tracking.\n\n For example:\n\n class TestLayer(tf.keras.Layer):\n def build():\n with no_manual_dependency_tracking_scope(self):\n var = self.add_variable(\"name1\") # Creates a var and doesn't track it\n self._track_trackable(\"name2\", var) # We track variable with name `name2`\n\n Args:\n obj: A trackable object.\n\n Yields:\n a scope in which the object doesn't track dependencies manually.\n \"\"\"\n # pylint: disable=protected-access\n previous_value = getattr(obj, '_manual_tracking', True)\n obj._manual_tracking = False\n try:\n yield\n finally:\n obj._manual_tracking = previous_value\n"
]
| [
[
"tensorflow.compat.v2.name_scope",
"tensorflow.python.util.tf_export.keras_export"
]
]
|
safdark/advanced-lane-lines | [
"27edcc444ac532e84749d667fc579970d2059aff"
]
| [
"src/operations/thresholder.py"
]
| [
"'''\nCreated on Dec 23, 2016\n\n@author: safdar\n'''\nfrom operations.baseoperation import Operation\nimport numpy as np \nimport cv2\nfrom utils.utilities import drawlines, extractlanes\nfrom utils.plotter import Image\nfrom utils.plotter import Graph\n\n\n# Supported operations\n# - Color -> InRange -> 0/1\n# - Color HSV/H -> Sobel -> InRange -> 0/1 -> Shift/Negate\n# - Color HLS/S -> Sobel -> InRange -> 0/1\n# - Color HLS/S -> InRange\n# - Color -> OfRange ->\n\n# Operators:\n# - Multi-Channel / Continuous\n# - (OPERATOR) VStack/HStack/DStack\n# - (Term) OfRange\n# - (Term) Roll\n# - (Term) Spread\n# - Single-Channel / Continuous\n# - (Term) Sobel\n# - (Term) InRange\n# - (Term) Roll\n# - Multi-Channel / Binary\n# - VStack/HStack/DStack\n# - InRange\n# - Roll\n# - Single-Channel / Binary\n# - InRange\n# - (Term) Canny\n# - Roll\n\nclass Thresholder(Operation):\n Term_ = 'Term'\n HoughFilter = 'HoughFilter'\n class Term(object):\n ToPlot = 'ToPlot'\n Negate = 'Negate'\n Canny = 'Canny'\n Hough = 'Hough'\n class Expr(Term):\n OR = 'OR'\n AND = 'AND'\n SEQ = \"SEQ\"\n Operands = 'Operands'\n Operator = 'Operator'\n class SobelX(Term):\n _ = 'SobelX'\n Kernel = 'Kernel'\n MinMax = 'MinMax'\n class SobelY(Term):\n _ = 'SobelY'\n Kernel = 'Kernel'\n MinMax = 'MinMax'\n class SobelXY(Term):\n _ = 'SobelXY'\n Kernel = 'Kernel'\n MinMax = 'MinMax'\n class SobelTanXY(Term):\n _ = 'SobelTanXY'\n Kernel = 'Kernel'\n MinMax = 'MinMax'\n class Color(Term):\n _ = 'Color'\n Space = 'Space'\n Channel = 'Channel'\n MinMax = 'MinMax'\n HLS = 'HLS'\n HSV = 'HSV'\n RGB = 'RGB'\n Gray = 'Gray'\n\n def __init__(self, params):\n Operation.__init__(self, params)\n self.__term__ = self.getparam(self.Term_)\n\n def __processupstream__(self, original, latest, data, frame):\n latest = np.uint8(latest)\n return self.__do_threshold__(latest, self.__term__, frame)\n \n def __do_threshold__(self, image, term, frame):\n assert type(term) is dict, \"Every term must be a dictionary:\\n{}\".format(term)\n \n if self.Expr.Operator in term: # Recursive case\n operator = term[self.Expr.Operator]\n operands = term[self.Expr.Operands]\n toplot = term[self.Expr.ToPlot]\n negate = term[self.Term.Negate] if self.Term.Negate in term else 0\n canny = term[self.Term.Canny] if self.Term.Canny in term else None\n hough = self.getparam(self.HoughFilter) if term.get(self.Term.Hough, 0) == 1 else None\n\n if operator == self.Expr.SEQ:\n binary = None\n for term in operands:\n binary = self.__do_threshold__(image, term, frame)\n image = binary\n combined_binary = binary\n elif operator == self.Expr.AND or operator == self.Expr.OR:\n thresholded_binaries = []\n for term in operands:\n thresholded_binaries.append(self.__do_threshold__(image, term, frame))\n \n combined_binary = None\n if operator==self.Expr.OR:\n combined_binary = np.bitwise_or.reduce(thresholded_binaries)\n elif operator==self.Expr.AND:\n combined_binary = np.bitwise_and.reduce(thresholded_binaries)\n else:\n raise \"Operator not supported: {}\".format(operator)\n\n stats = None\n \n title = \">> {}\".format(operator)\n if negate:\n combined_binary = np.absolute(1 - combined_binary)\n title = \"{} >> NOT\".format(title)\n\n self.__plot__(frame, Image(title, combined_binary, 'gray'), toplot=toplot)\n\n if not canny is None:\n combined_binary = cv2.Canny(combined_binary, canny[0], canny[1])\n title = \">> CANNY ({})\".format(canny)\n self.__plot__(frame, Image(title, combined_binary, 'gray'), toplot=toplot)\n\n if not hough is None:\n lines,left,right = extractlanes(combined_binary, hough)\n title = \">> HOUGH\".format(hough)\n if left is None or right is None:\n hough_image = drawlines(np.zeros_like(combined_binary), lines)\n else:\n hough_image = drawlines(np.zeros_like(combined_binary), [left,right])\n self.__plot__(frame, Image(title, hough_image, 'gray'), toplot=toplot)\n \n return combined_binary\n else: # Base case\n assert len(term.keys())==1, \"Term setting should have only one key, but has {}\".format(len(term.keys()))\n flavor = list(term.keys())[0]\n termconfig = term[flavor]\n toplot = termconfig[self.Term.ToPlot]\n negate = termconfig[self.Term.Negate] if self.Term.Negate in termconfig else 0\n canny = termconfig[self.Term.Canny] if self.Term.Canny in termconfig else None\n hough = self.getparam(self.HoughFilter) if termconfig.get(self.Term.Hough, 0) == 1 else None\n \n binary_image = None\n if flavor==self.SobelX._:\n binary_image = self.filter_sobel_x(image, termconfig, frame)\n elif flavor==self.SobelY._:\n binary_image = self.filter_sobel_y(image, termconfig, frame)\n elif flavor==self.SobelXY._:\n binary_image = self.filter_sobel_xy(image, termconfig, frame)\n elif flavor==self.SobelTanXY._:\n binary_image = self.filter_sobel_tanxy(image, termconfig, frame)\n elif flavor==self.Color._:\n binary_image = self.filter_color(image, termconfig, frame)\n else:\n raise \"Threshold type not recognized: {}\".format(flavor)\n\n stats = None\n title = \"{}-{}\".format(flavor, self.removekeys(termconfig, [self.Term.ToPlot, self.Term.Negate, self.Term.Canny, self.Term.Hough]))\n \n if negate:\n binary_image = np.absolute(1 - binary_image)\n title = \"NOT ({})\".format(title)\n \n self.__plot__(frame, Image(title, binary_image, 'gray'), toplot=toplot)\n \n if not canny is None:\n binary_image = cv2.Canny(binary_image, canny[0], canny[1])\n title = \"{} >> CANNY ({})\".format(flavor, canny)\n self.__plot__(frame, Image(title, binary_image, 'gray'), toplot=toplot)\n\n if not hough is None:\n lines,left,right = extractlanes(binary_image, hough)\n title = \"{} >> HOUGH\".format(flavor)\n if left is None or right is None:\n hough_image = drawlines(np.zeros_like(binary_image), lines)\n else:\n hough_image = drawlines(np.zeros_like(binary_image), [left,right])\n self.__plot__(frame, Image(title, hough_image, 'gray'), toplot=toplot)\n\n return binary_image\n\n #######################################################################\n \n def removekeys(self, d, keys):\n return {key: d[key] for key in d if key not in keys}\n \n def __makegray__(self, image):\n gray = None\n if ((len(image.shape) < 3) or (image.shape[2] < 3)):\n gray = image\n else:\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n return gray\n\n def __binaryforrange__(self, minmax, image):\n binary_sobel = np.zeros_like(image, dtype=np.uint8)\n binary_sobel[(image > minmax[0]) & (image < minmax[1])] = 1\n return binary_sobel\n\n def __scaleimage__(self, image):\n image = np.absolute(image)\n if (np.max(image) > 255):\n image = np.uint8(255 * image / np.max(image))\n return image\n\n def __filter_sobel__(self, image, orientation, term, frame):\n kernel = term[self.SobelTanXY.Kernel]\n minmax = term[self.SobelTanXY.MinMax]\n gray = self.__makegray__(image)\n sobel = None\n assert orientation=='x' or orientation=='y', \"Orientation should be either 'x' or 'y'. Got {}\".format(orientation)\n if orientation=='x':\n sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=kernel)\n else:\n sobel = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=kernel)\n \n abs_sobel = np.absolute(sobel)\n scaled_sobel = self.__scaleimage__(abs_sobel)\n binary_sobel = self.__binaryforrange__(minmax, scaled_sobel)\n return binary_sobel\n\n def filter_sobel_x(self, image, term, frame):\n return self.__filter_sobel__(image, 'x', term, frame)\n\n def filter_sobel_y(self, image, term, frame):\n return self.__filter_sobel__(image, 'y', term, frame)\n\n def filter_sobel_xy(self, image, term, frame):\n kernel = term[self.SobelTanXY.Kernel]\n minmax = term[self.SobelTanXY.MinMax]\n gray = self.__makegray__(image)\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=kernel)\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=kernel)\n abs_sobel = np.sqrt(sobelx**2 + sobely**2)\n scaled_sobel = self.__scaleimage__(abs_sobel)\n binary_sobel = self.__binaryforrange__(minmax, scaled_sobel)\n return binary_sobel\n\n def filter_sobel_tanxy(self, image, term, frame):\n kernel = term[self.SobelTanXY.Kernel]\n minmax = term[self.SobelTanXY.MinMax]\n gray = self.__makegray__(image)\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=kernel)\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=kernel)\n abs_sobelx = np.absolute(sobelx)\n abs_sobely = np.absolute(sobely)\n tan_sobel = np.arctan2(abs_sobely, abs_sobelx)\n binary_sobel = self.__binaryforrange__(minmax, tan_sobel)\n return binary_sobel\n \n def canny(self, image, term, frame):\n blur = term[self.Canny.Blur]\n lowhigh = term[self.Canny.LowHigh]\n gray = self.__makegray__(image)\n if blur>0:\n gray = cv2.GaussianBlur(gray, (blur, blur), 0)\n canny = cv2.Canny(gray, lowhigh[0], lowhigh[1])\n return canny\n \n# def hough_lines(self, image, term, frame):\n# lines = cv2.HoughLinesP(image, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)\n \n def filter_color(self, image, term, frame):\n space = term[self.Color.Space]\n channel = term[self.Color.Channel]\n minmax = term[self.Color.MinMax]\n \n component = None\n if space == self.Color.HLS:\n if len(image.shape)<3 | image.shape[2]<3:\n raise \"Image provided with shape {} cannot be used to extract target {}\".format(image.shape, space)\n hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)\n component = hls[:,:,channel]\n elif space == self.Color.HSV:\n if len(image.shape)<3 | image.shape[2]<3:\n raise \"Image provided with shape {} cannot be used to extract target {}\".format(image.shape, space)\n hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\n component = hsv[:,:,channel]\n elif space == self.Color.RGB:\n if len(image.shape)<3 | image.shape[2]<3:\n raise \"Image provided with shape {} cannot be used to extract target {}\".format(image.shape, space)\n rgb = image # Since we should have converted to RGB earlier\n component = rgb[:,:,channel]\n elif space == self.Color.Gray:\n if len(image.shape)==3 & image.shape[2]==3:\n component = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n else:\n component = image\n else:\n raise \"Unrecognized target space: {}\".format(space)\n \n mask = self.__scaleimage__(component)\n binary = self.__binaryforrange__(minmax, mask)\n return binary\n"
]
| [
[
"numpy.max",
"numpy.zeros_like",
"numpy.uint8",
"numpy.bitwise_or.reduce",
"numpy.bitwise_and.reduce",
"numpy.arctan2",
"numpy.sqrt",
"numpy.absolute"
]
]
|
martius-lab/cid-in-rl | [
"005074a82e5cb26dff2a44bab3546af24cc57c2c"
]
| [
"cid/influence_estimation/datasets.py"
]
| [
"from collections import OrderedDict\nfrom typing import Callable, Dict, Sequence, Set\n\nimport gin\nimport numpy as np\nimport torch\nimport torch.utils.data.dataloader\n\n\[email protected](blacklist=['memory'])\nclass ForwardGoalDataset(torch.utils.data.Dataset):\n \"\"\"Dataset using achieved goals of transition target as targets\"\"\"\n def __init__(self,\n memory: Sequence[Dict[str, np.ndarray]],\n include_action=True,\n subtract_action_noise=False,\n use_state_noise=False,\n use_goal_diff_as_target=True,\n use_full_state_as_target=False,\n use_only_random_actions=False,\n use_only_contacts=False,\n use_only_no_contacts=False,\n use_only_actions_smaller_than=None,\n use_only_goal_diffs_larger_than=None,\n state_transform=None,\n target_transform=None,\n target_scale=None,\n state_noise_fn=None,\n ag_noise_fn=None,\n copy_memory=False):\n \"\"\"\n :param memory: Sequence over elements of this dataset\n :param include_action: If `True`, add action to input\n :param subtract_action_noise: If `True`, subtract action noise from\n action\n :param use_state_noise: If `True`, get state noise from memory\n :param use_goal_diff_as_target: If `True`, return difference\n `achieved_goal_next - achieved_goal` as target\n :param use_full_state_as_target: If `True`, use full next state as\n target instead of goals\n :param use_only_random_actions: If `True`, the used sequence should\n only use random actions. This is enforced by returning a\n corresponding `sequence_filter`\n :param use_only_contacts: If `True`, the used sequence should only\n consist of contact transitions. This is enforced by returning a\n corresponding `sequence_filter`\n :param use_only_no_contacts: If `True`, the used sequence should only\n consist of no contact transitions. This is enforced by returning a\n corresponding `sequence_filter`\n :param use_only_actions_smaller_than: If a number, the used sequence\n should only consist of actions whose L2 norm is smaller equal to\n the number. This is enforced by returning a corresponding\n `sequence_filter`\n :param use_only_goal_diffs_larger_than: If a number, the used sequence\n should only consist of transitions where the L2 norm of the goal\n diff is larger equal to the number. This is enforced by returning a\n corresponding `sequence_filter`\n :param state_transform: Callable that receives the input state\n and returns a transformed version\n :param target_transform: Callable that receives the target and returns\n a transformed version\n :param state_noise_fn: Callable that receives state and noise and\n returns transformed state\n :param ag_noise_fn: Callable that receives goals and noise and\n returns transformed goal\n :param target_scale: Scalar value that target gets multiplied with\n :param copy_memory: Make a copy of memory to get a contiguous array\n \"\"\"\n super().__init__()\n assert not (use_only_contacts and use_only_no_contacts), \\\n ('`use_only_contacts` and `use_only_no_contacts` can not both be '\n 'set to `True`')\n self._include_action = include_action\n self._subtract_action_noise = subtract_action_noise\n self._use_state_noise = use_state_noise\n self._use_goal_diff_as_target = use_goal_diff_as_target\n self._use_full_state_as_target = use_full_state_as_target\n self._use_only_random_actions = use_only_random_actions\n self._use_only_contacts = use_only_contacts\n self._use_only_no_contacts = use_only_no_contacts\n self._use_only_actions_smaller_than = use_only_actions_smaller_than\n self._use_only_goal_diffs_larger_than = use_only_goal_diffs_larger_than\n self._state_transform = state_transform\n self._target_transform = target_transform\n self._target_scale = target_scale\n if use_state_noise:\n self._state_noise_fn = state_noise_fn\n self._ag_noise_fn = ag_noise_fn\n else:\n self._state_noise_fn = None\n self._ag_noise_fn = None\n\n if copy_memory:\n self._memory = {key: np.stack([memory[idx][key]\n for idx in range(len(memory))])\n for key in self.required_keys\n if len(memory) > 0}\n self._memory_layout_by_index = False\n else:\n self._memory = memory\n self._memory_layout_by_index = True\n\n @property\n def shapes(self):\n example = self[0]\n return example[0].shape, example[1].shape\n\n @property\n def required_keys(self) -> Set[str]:\n keys = {'s'}\n\n if self._use_state_noise:\n keys.add('s_noise0')\n keys.add('s_noise1')\n\n if self._use_full_state_as_target:\n keys.add('s1')\n else:\n keys.add('ag1')\n if self._use_goal_diff_as_target:\n keys.add('ag0')\n\n if self._include_action:\n keys.add('a')\n if self._subtract_action_noise:\n keys.add('action_noise')\n\n return keys\n\n @property\n def sequence_filter(self):\n if (self._use_only_random_actions\n or self._use_only_contacts\n or self._use_only_no_contacts\n or self._use_only_actions_smaller_than is not None\n or self._use_only_goal_diffs_larger_than is not None):\n def _filter(buffers):\n shape = next(iter(buffers.values())).shape\n selection = np.ones(shape[:2], dtype=np.bool)\n if self._use_only_random_actions:\n selection &= (buffers['rand_a'].astype(bool)\n .squeeze(axis=-1))\n if self._use_only_contacts:\n selection &= (buffers['contact'].astype(bool)\n .squeeze(axis=-1))\n if self._use_only_no_contacts:\n selection &= ~(buffers['contact'].astype(bool)\n .squeeze(axis=-1))\n if self._use_only_actions_smaller_than is not None:\n action_norm = np.linalg.norm(buffers['a'], ord=2, axis=-1)\n selection &= (action_norm\n <= self._use_only_actions_smaller_than)\n if self._use_only_goal_diffs_larger_than is not None:\n ag_next = np.roll(buffers['ag'], -1, axis=1)\n goal_diff = ag_next - buffers['ag']\n goal_diff_norm = np.linalg.norm(goal_diff, ord=2, axis=-1)\n selection &= (goal_diff_norm\n >= self._use_only_goal_diffs_larger_than)\n\n return selection\n\n return _filter\n else:\n return None\n\n def __len__(self):\n if self._memory_layout_by_index:\n return len(self._memory)\n else:\n return len(self._memory['s'])\n\n def __getitem__(self, idx: int):\n if self._memory_layout_by_index:\n data = self._memory[idx]\n state = data['s']\n if self._use_state_noise:\n state_noise = data['s_noise0'][idx]\n state_next_noise = data['s_noise1'][idx]\n if self._include_action:\n action = data['a']\n if self._subtract_action_noise:\n action_noise = data['action_noise']\n if self._use_full_state_as_target:\n state_next = data['s1']\n else:\n ag1 = data['ag1']\n if self._use_goal_diff_as_target:\n ag0 = data['ag0']\n else:\n state = self._memory['s'][idx]\n if self._use_state_noise:\n state_noise = self._memory['s_noise0'][idx]\n state_next_noise = self._memory['s_noise1'][idx]\n if self._include_action:\n action = self._memory['a'][idx]\n if self._subtract_action_noise:\n action_noise = self._memory['action_noise'][idx]\n if self._use_full_state_as_target:\n state_next = self._memory['s1'][idx]\n else:\n ag1 = self._memory['ag1'][idx]\n if self._use_goal_diff_as_target:\n ag0 = self._memory['ag0'][idx]\n\n if self._state_transform is not None:\n state = self._state_transform(state)\n\n if self._state_noise_fn is not None:\n state = self._state_noise_fn(state, state_noise)\n\n if self._include_action:\n if self._subtract_action_noise:\n action = action - action_noise\n inp = np.concatenate((state, action), axis=-1)\n else:\n inp = state\n\n if self._use_full_state_as_target:\n target1 = state_next\n if self._state_noise_fn is not None:\n target1 = self._state_noise_fn(target1, state_next_noise)\n if self._use_goal_diff_as_target:\n target0 = state\n else:\n target1 = ag1\n if self._ag_noise_fn is not None:\n target1 = self._ag_noise_fn(target1, state_next_noise)\n if self._use_goal_diff_as_target:\n target0 = ag0\n if self._ag_noise_fn is not None:\n target0 = self._ag_noise_fn(target0, state_noise)\n\n if self._use_goal_diff_as_target:\n target = target1 - target0\n else:\n target = target1\n\n if self._target_transform is not None:\n target = self._target_transform(target)\n\n if self._target_scale is not None:\n target = target * self._target_scale\n\n return inp.astype(np.float32), target.astype(np.float32)\n\n\[email protected](blacklist=['memory'])\nclass FactorizedForwardDataset(torch.utils.data.Dataset):\n \"\"\"Dataset with factorized grouping of the state\n\n Uses the full next state as target.\n \"\"\"\n def __init__(self,\n memory: Sequence[Dict[str, np.ndarray]],\n factorizer: Callable[[np.ndarray], Dict[str, np.ndarray]],\n target_factorizer: Callable[[np.ndarray],\n Dict[str, np.ndarray]] = None,\n include_action=True,\n use_state_noise=False,\n use_state_diff_as_target=True,\n use_only_random_actions=False,\n target_keys_postfix='',\n unwrap_target=False,\n target_scale=None,\n state_noise_fn=None,\n copy_memory=False):\n \"\"\"\n :param memory: Sequence over elements of this dataset\n :param factorizer: Callable that returns dictionary of named\n state groups\n :param target_factorizer: Callable that returns dictionary of named\n state groups for target variable. If `None`, use `factorizer`.\n :param include_action: If `True`, add action to input\n :param use_state_noise: If `True`, get state noise from memory\n :param use_state_diff_as_target: If `True`, return difference\n `achieved_goal_next - achieved_goal` as target\n :param use_only_random_actions: If `True`, the used sequence should\n only use random actions. This is enforced by returning a\n corresponding `sequence_filter`\n :param target_keys_postfix: String to append to keys of the target\n :param target_scale: Scalar value that target gets multiplied with\n :param unwrap_target: Turn target dictionary into vector\n :param state_noise_fn: Callable that receives state and noise and\n returns transformed state\n :param copy_memory: Make a copy of memory to get a contiguous array\n \"\"\"\n super().__init__()\n self._memory = memory\n self._factorizer = factorizer\n if target_factorizer is not None:\n self._target_factorizer = target_factorizer\n else:\n self._target_factorizer = factorizer\n self._include_action = include_action\n self._use_state_noise = use_state_noise\n self._use_state_diff_as_target = use_state_diff_as_target\n self._use_only_random_actions = use_only_random_actions\n self._target_keys_postfix = target_keys_postfix\n self._unwrap_target = unwrap_target\n self._target_scale = target_scale if target_scale is not None else 1.0\n if use_state_noise:\n self._state_noise_fn = state_noise_fn\n else:\n self._state_noise_fn = None\n\n if copy_memory:\n self._memory = {key: np.stack([memory[idx][key]\n for idx in range(len(memory))])\n for key in self.required_keys\n if len(memory) > 0}\n self._memory_layout_by_index = False\n else:\n self._memory = memory\n self._memory_layout_by_index = True\n\n @property\n def shapes(self):\n example = self[0]\n inp_shapes = {name: val.shape for name, val in example[0].items()}\n\n if self._unwrap_target:\n target_shapes = example[1].shape\n else:\n target_shapes = {name: val.shape\n for name, val in example[1].items()}\n\n return inp_shapes, target_shapes\n\n @property\n def required_keys(self) -> Set[str]:\n keys = {'s0', 's1', 'a'}\n\n if self._use_state_noise:\n keys.add('s_noise0')\n keys.add('s_noise1')\n\n return keys\n\n @property\n def sequence_filter(self):\n if self._use_only_random_actions:\n def _filter(buffers):\n shape = next(iter(buffers.values())).shape\n selection = np.ones(shape[:2], dtype=np.bool)\n if self._use_only_random_actions:\n selection &= (buffers['rand_a'].astype(bool)\n .squeeze(axis=-1))\n\n return selection\n\n return _filter\n else:\n return None\n\n def __len__(self):\n if self._memory_layout_by_index:\n return len(self._memory)\n else:\n return len(self._memory['s0'])\n\n def __getitem__(self, idx: int):\n if self._memory_layout_by_index:\n data = self._memory[idx]\n state = data['s0']\n state_next = data['s1']\n if self._use_state_noise:\n state_noise = data['s_noise0'][idx]\n state_next_noise = data['s_noise1'][idx]\n if self._include_action:\n action = data['a']\n else:\n state = self._memory['s0'][idx]\n state_next = self._memory['s0'][idx]\n if self._use_state_noise:\n state_noise = self._memory['s_noise0'][idx]\n state_next_noise = self._memory['s_noise1'][idx]\n if self._include_action:\n action = self._memory['a'][idx]\n\n if self._state_noise_fn is not None:\n state = self._state_noise_fn(state, state_noise)\n state_next = self._state_noise_fn(state_next, state_next_noise)\n\n state_unfactorized = state\n state = self._factorizer(state)\n state_next = self._target_factorizer(state_next)\n\n if self._use_state_diff_as_target:\n state_as_target = self._target_factorizer(state_unfactorized)\n target = {name + self._target_keys_postfix:\n (self._target_scale\n * (state_next[name] - state_as_target[name]))\n for name in state_next}\n else:\n target = {name + self._target_keys_postfix:\n self._target_scale * value\n for name, value in state_next.items()}\n\n if self._include_action:\n state['a'] = action\n\n inp = state\n\n if self._unwrap_target:\n target = np.concatenate([v for v in target.values()], axis=0)\n\n return inp, target\n\n\[email protected](blacklist=['memory'])\nclass FactorizedDataset(torch.utils.data.Dataset):\n \"\"\"Dataset with factorized grouping of the state\n\n Input and target are specified by keys.\n \"\"\"\n def __init__(self,\n memory: Sequence[Dict[str, np.ndarray]],\n inp_keys: Sequence[str],\n target_keys: Sequence[str],\n next_keys_postfix: str,\n factorizer: Callable[[np.ndarray], Dict[str, np.ndarray]],\n next_factorizer: Callable[[np.ndarray],\n Dict[str, np.ndarray]] = None,\n use_only_random_actions=False,\n extra_dataset_keys: Sequence[str]=None,\n unwrap_target=False):\n super().__init__()\n self._memory = memory\n self._inp_keys = frozenset(inp_keys)\n self._target_keys = frozenset(target_keys)\n self._next_keys_postfix = next_keys_postfix\n self._factorizer = factorizer\n self._next_factorizer = next_factorizer\n self._use_only_random_actions = use_only_random_actions\n self._unwrap_target = unwrap_target\n\n required_keys = {'s0', 's1', 'a'}\n if extra_dataset_keys is not None:\n required_keys |= set(extra_dataset_keys)\n self._required_keys = frozenset(required_keys)\n\n @property\n def shapes(self):\n example = self[0]\n assert all(key in example[0] for key in self._inp_keys)\n inp_shapes = {name: val.shape for name, val in example[0].items()}\n\n if self._unwrap_target:\n target_shapes = example[1].shape\n else:\n assert all(key in example[1] for key in self._target_keys)\n target_shapes = {name: val.shape\n for name, val in example[1].items()}\n\n return inp_shapes, target_shapes\n\n @property\n def required_keys(self) -> Set[str]:\n return self._required_keys\n\n @property\n def sequence_filter(self):\n if self._use_only_random_actions:\n def _filter(buffers):\n shape = next(iter(buffers.values())).shape\n selection = np.ones(shape[:2], dtype=np.bool)\n if self._use_only_random_actions:\n selection &= (buffers['rand_a'].astype(bool)\n .squeeze(axis=-1))\n return selection\n return _filter\n else:\n return None\n\n def __len__(self):\n return len(self._memory)\n\n def _build_dict(self, keys, state, state_next, extra_data):\n res = OrderedDict()\n for key, val in state.items():\n if key in keys:\n res[key] = val\n if state_next is not None:\n for key, val in state_next.items():\n next_key = key + self._next_keys_postfix\n if next_key in keys:\n res[next_key] = val\n for key, val in extra_data.items():\n if key in keys:\n res[key] = val\n\n return res\n\n def __getitem__(self, idx: int):\n data = self._memory[idx]\n\n state = self._factorizer(data['s0'])\n\n if self._next_factorizer is not None:\n state_next = self._next_factorizer(data['s1'])\n else:\n state_next = None\n\n inp = self._build_dict(self._inp_keys, state, state_next, data)\n target = self._build_dict(self._target_keys, state, state_next, data)\n\n if self._unwrap_target:\n target = np.concatenate([v for v in target.values()], axis=0)\n\n return inp, target\n\n\nclass SeededRandomSampler(torch.utils.data.sampler.Sampler):\n \"\"\"Sampler that follows Pytorch's RandomSampler, but is seeded\"\"\"\n def __init__(self, data_source, seed=0):\n self.data_source = data_source\n self._num_samples = None\n self._rng = np.random.RandomState(seed)\n\n @property\n def num_samples(self):\n if self._num_samples is None:\n return len(self.data_source)\n return self._num_samples\n\n def __iter__(self):\n n = len(self.data_source)\n return iter(self._rng.permutation(n))\n\n def __len__(self):\n return self.num_samples\n\n\nclass SeededWeightedRandomSampler(torch.utils.data.sampler.Sampler):\n \"\"\"Sampler that follows Pytorch's WeightedRandomSampler, but is seeded\"\"\"\n def __init__(self, weights, num_samples, replacement=True, seed=0):\n self._weights = np.array(weights, dtype=np.float64)\n self._weights /= np.sum(self._weights)\n self._num_samples = num_samples\n self._replacement = replacement\n self._rng = np.random.RandomState(seed)\n\n def __iter__(self):\n return iter(self._rng.choice(len(self._weights),\n self._num_samples,\n replace=self._replacement,\n p=self._weights))\n\n def __len__(self):\n return self._num_samples\n"
]
| [
[
"numpy.concatenate",
"numpy.array",
"numpy.linalg.norm",
"numpy.random.RandomState",
"numpy.sum",
"numpy.ones",
"numpy.roll"
]
]
|
ivanwilliammd/I3DR-Net | [
"356d9a3d821d22c375b0bcc42ae488fe6e520e21"
]
| [
"utils/model_utils.py"
]
| [
"#!/usr/bin/env python\n# Official implementation code for \"Lung Nodule Detection and Classification from Thorax CT-Scan Using RetinaNet with Transfer Learning\" and \"Lung Nodule Texture Detection and Classification Using 3D CNN.\"\n# Adapted from of [medicaldetectiontoolkit](https://github.com/pfjaeger/medicaldetectiontoolkit) and [kinetics_i3d_pytorch](https://github.com/hassony2/kinetics_i3d_pytorch)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"\nParts are based on https://github.com/multimodallearning/pytorch-mask-rcnn\npublished under MIT license.\n\"\"\"\n\nimport numpy as np\nimport scipy.misc\nimport scipy.ndimage\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\n\n\n############################################################\n# Bounding Boxes\n############################################################\n\n\ndef compute_iou_2D(box, boxes, box_area, boxes_area):\n \"\"\"Calculates IoU of the given box with the array of the given boxes.\n box: 1D vector [y1, x1, y2, x2] THIS IS THE GT BOX\n boxes: [boxes_count, (y1, x1, y2, x2)]\n box_area: float. the area of 'box'\n boxes_area: array of length boxes_count.\n\n Note: the areas are passed in rather than calculated here for\n efficency. Calculate once in the caller to avoid duplicate work.\n \"\"\"\n # Calculate intersection areas\n y1 = np.maximum(box[0], boxes[:, 0])\n y2 = np.minimum(box[2], boxes[:, 2])\n x1 = np.maximum(box[1], boxes[:, 1])\n x2 = np.minimum(box[3], boxes[:, 3])\n intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0)\n union = box_area + boxes_area[:] - intersection[:]\n iou = intersection / union\n\n return iou\n\n\n\ndef compute_iou_3D(box, boxes, box_volume, boxes_volume):\n \"\"\"Calculates IoU of the given box with the array of the given boxes.\n box: 1D vector [y1, x1, y2, x2, z1, z2] (typically gt box)\n boxes: [boxes_count, (y1, x1, y2, x2, z1, z2)]\n box_area: float. the area of 'box'\n boxes_area: array of length boxes_count.\n\n Note: the areas are passed in rather than calculated here for\n efficency. Calculate once in the caller to avoid duplicate work.\n \"\"\"\n # Calculate intersection areas\n y1 = np.maximum(box[0], boxes[:, 0])\n y2 = np.minimum(box[2], boxes[:, 2])\n x1 = np.maximum(box[1], boxes[:, 1])\n x2 = np.minimum(box[3], boxes[:, 3])\n z1 = np.maximum(box[4], boxes[:, 4])\n z2 = np.minimum(box[5], boxes[:, 5])\n intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0) * np.maximum(z2 - z1, 0)\n union = box_volume + boxes_volume[:] - intersection[:]\n iou = intersection / union\n\n return iou\n\n\n\ndef compute_overlaps(boxes1, boxes2):\n \"\"\"Computes IoU overlaps between two sets of boxes.\n boxes1, boxes2: [N, (y1, x1, y2, x2)]. / 3D: (z1, z2))\n For better performance, pass the largest set first and the smaller second.\n \"\"\"\n # Areas of anchors and GT boxes\n if boxes1.shape[1] == 4:\n area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])\n area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])\n # Compute overlaps to generate matrix [boxes1 count, boxes2 count]\n # Each cell contains the IoU value.\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))\n for i in range(overlaps.shape[1]):\n box2 = boxes2[i] #this is the gt box\n overlaps[:, i] = compute_iou_2D(box2, boxes1, area2[i], area1)\n return overlaps\n\n else:\n # Areas of anchors and GT boxes\n volume1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1]) * (boxes1[:, 5] - boxes1[:, 4])\n volume2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1]) * (boxes2[:, 5] - boxes2[:, 4])\n # Compute overlaps to generate matrix [boxes1 count, boxes2 count]\n # Each cell contains the IoU value.\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))\n for i in range(overlaps.shape[1]):\n box2 = boxes2[i] # this is the gt box\n overlaps[:, i] = compute_iou_3D(box2, boxes1, volume2[i], volume1)\n return overlaps\n\n\n\ndef box_refinement(box, gt_box):\n \"\"\"Compute refinement needed to transform box to gt_box.\n box and gt_box are [N, (y1, x1, y2, x2)] / 3D: (z1, z2))\n \"\"\"\n height = box[:, 2] - box[:, 0]\n width = box[:, 3] - box[:, 1]\n center_y = box[:, 0] + 0.5 * height\n center_x = box[:, 1] + 0.5 * width\n\n gt_height = gt_box[:, 2] - gt_box[:, 0]\n gt_width = gt_box[:, 3] - gt_box[:, 1]\n gt_center_y = gt_box[:, 0] + 0.5 * gt_height\n gt_center_x = gt_box[:, 1] + 0.5 * gt_width\n\n dy = (gt_center_y - center_y) / height\n dx = (gt_center_x - center_x) / width\n dh = torch.log(gt_height / height)\n dw = torch.log(gt_width / width)\n result = torch.stack([dy, dx, dh, dw], dim=1)\n\n if box.shape[1] > 4:\n depth = box[:, 5] - box[:, 4]\n center_z = box[:, 4] + 0.5 * depth\n gt_depth = gt_box[:, 5] - gt_box[:, 4]\n gt_center_z = gt_box[:, 4] + 0.5 * gt_depth\n dz = (gt_center_z - center_z) / depth\n dd = torch.log(gt_depth / depth)\n result = torch.stack([dy, dx, dz, dh, dw, dd], dim=1)\n\n return result\n\n\n\ndef unmold_mask_2D(mask, bbox, image_shape):\n \"\"\"Converts a mask generated by the neural network into a format similar\n to it's original shape.\n mask: [height, width] of type float. A small, typically 28x28 mask.\n bbox: [y1, x1, y2, x2]. The box to fit the mask in.\n\n Returns a binary mask with the same size as the original image.\n \"\"\"\n y1, x1, y2, x2 = bbox\n out_zoom = [y2 - y1, x2 - x1]\n zoom_factor = [i / j for i, j in zip(out_zoom, mask.shape)]\n mask = scipy.ndimage.zoom(mask, zoom_factor, order=1).astype(np.float32)\n\n # Put the mask in the right location.\n full_mask = np.zeros(image_shape[:2])\n full_mask[y1:y2, x1:x2] = mask\n return full_mask\n\n\n\ndef unmold_mask_3D(mask, bbox, image_shape):\n \"\"\"Converts a mask generated by the neural network into a format similar\n to it's original shape.\n mask: [height, width] of type float. A small, typically 28x28 mask.\n bbox: [y1, x1, y2, x2, z1, z2]. The box to fit the mask in.\n\n Returns a binary mask with the same size as the original image.\n \"\"\"\n y1, x1, y2, x2, z1, z2 = bbox\n out_zoom = [y2 - y1, x2 - x1, z2 - z1]\n zoom_factor = [i/j for i,j in zip(out_zoom, mask.shape)]\n mask = scipy.ndimage.zoom(mask, zoom_factor, order=1).astype(np.float32)\n\n # Put the mask in the right location.\n full_mask = np.zeros(image_shape[:3])\n full_mask[y1:y2, x1:x2, z1:z2] = mask\n return full_mask\n\n\n############################################################\n# Anchors\n############################################################\n\ndef generate_anchors(scales, ratios, shape, feature_stride, anchor_stride):\n \"\"\"\n scales: 1D array of anchor sizes in pixels. Example: [32, 64, 128]\n ratios: 1D array of anchor ratios of width/height. Example: [0.5, 1, 2]\n shape: [height, width] spatial shape of the feature map over which\n to generate anchors.\n feature_stride: Stride of the feature map relative to the image in pixels.\n anchor_stride: Stride of anchors on the feature map. For example, if the\n value is 2 then generate anchors for every other feature map pixel.\n \"\"\"\n # Get all combinations of scales and ratios\n scales, ratios = np.meshgrid(np.array(scales), np.array(ratios))\n scales = scales.flatten()\n ratios = ratios.flatten()\n\n # Enumerate heights and widths from scales and ratios\n heights = scales / np.sqrt(ratios)\n widths = scales * np.sqrt(ratios)\n\n # Enumerate shifts in feature space\n shifts_y = np.arange(0, shape[0], anchor_stride) * feature_stride\n shifts_x = np.arange(0, shape[1], anchor_stride) * feature_stride\n shifts_x, shifts_y = np.meshgrid(shifts_x, shifts_y)\n\n # Enumerate combinations of shifts, widths, and heights\n box_widths, box_centers_x = np.meshgrid(widths, shifts_x)\n box_heights, box_centers_y = np.meshgrid(heights, shifts_y)\n\n # Reshape to get a list of (y, x) and a list of (h, w)\n box_centers = np.stack(\n [box_centers_y, box_centers_x], axis=2).reshape([-1, 2])\n box_sizes = np.stack([box_heights, box_widths], axis=2).reshape([-1, 2])\n\n # Convert to corner coordinates (y1, x1, y2, x2)\n boxes = np.concatenate([box_centers - 0.5 * box_sizes,\n box_centers + 0.5 * box_sizes], axis=1)\n return boxes\n\n\n\ndef generate_anchors_3D(scales_xy, scales_z, ratios, shape, feature_stride_xy, feature_stride_z, anchor_stride):\n \"\"\"\n scales: 1D array of anchor sizes in pixels. Example: [32, 64, 128]\n ratios: 1D array of anchor ratios of width/height. Example: [0.5, 1, 2]\n shape: [height, width] spatial shape of the feature map over which\n to generate anchors.\n feature_stride: Stride of the feature map relative to the image in pixels.\n anchor_stride: Stride of anchors on the feature map. For example, if the\n value is 2 then generate anchors for every other feature map pixel.\n \"\"\"\n # Get all combinations of scales and ratios\n\n scales_xy, ratios_meshed = np.meshgrid(np.array(scales_xy), np.array(ratios))\n scales_xy = scales_xy.flatten()\n ratios_meshed = ratios_meshed.flatten()\n\n # Enumerate heights and widths from scales and ratios\n heights = scales_xy / np.sqrt(ratios_meshed)\n widths = scales_xy * np.sqrt(ratios_meshed)\n depths = np.tile(np.array(scales_z), len(ratios_meshed)//np.array(scales_z)[..., None].shape[0])\n\n # Enumerate shifts in feature space\n shifts_y = np.arange(0, shape[0], anchor_stride) * feature_stride_xy #translate from fm positions to input coords.\n shifts_x = np.arange(0, shape[1], anchor_stride) * feature_stride_xy\n shifts_z = np.arange(0, shape[2], anchor_stride) * (feature_stride_z)\n shifts_x, shifts_y, shifts_z = np.meshgrid(shifts_x, shifts_y, shifts_z)\n\n # Enumerate combinations of shifts, widths, and heights\n box_widths, box_centers_x = np.meshgrid(widths, shifts_x)\n box_heights, box_centers_y = np.meshgrid(heights, shifts_y)\n box_depths, box_centers_z = np.meshgrid(depths, shifts_z)\n\n # Reshape to get a list of (y, x, z) and a list of (h, w, d)\n box_centers = np.stack(\n [box_centers_y, box_centers_x, box_centers_z], axis=2).reshape([-1, 3])\n box_sizes = np.stack([box_heights, box_widths, box_depths], axis=2).reshape([-1, 3])\n\n # Convert to corner coordinates (y1, x1, y2, x2, z1, z2)\n boxes = np.concatenate([box_centers - 0.5 * box_sizes,\n box_centers + 0.5 * box_sizes], axis=1)\n\n boxes = np.transpose(np.array([boxes[:, 0], boxes[:, 1], boxes[:, 3], boxes[:, 4], boxes[:, 2], boxes[:, 5]]), axes=(1, 0))\n return boxes\n\n\ndef generate_pyramid_anchors(logger, cf):\n \"\"\"Generate anchors at different levels of a feature pyramid. Each scale\n is associated with a level of the pyramid, but each ratio is used in\n all levels of the pyramid.\n\n from configs:\n :param scales: cf.RPN_ANCHOR_SCALES , e.g. [4, 8, 16, 32]\n :param ratios: cf.RPN_ANCHOR_RATIOS , e.g. [0.5, 1, 2]\n :param feature_shapes: cf.BACKBONE_SHAPES , e.g. [array of shapes per feature map] [80, 40, 20, 10, 5]\n :param feature_strides: cf.BACKBONE_STRIDES , e.g. [2, 4, 8, 16, 32, 64]\n :param anchors_stride: cf.RPN_ANCHOR_STRIDE , e.g. 1\n :return anchors: (N, (y1, x1, y2, x2, (z1), (z2)). All generated anchors in one array. Sorted\n with the same order of the given scales. So, anchors of scale[0] come first, then anchors of scale[1], and so on.\n \"\"\"\n scales = cf.rpn_anchor_scales\n ratios = cf.rpn_anchor_ratios\n feature_shapes = cf.backbone_shapes\n anchor_stride = cf.rpn_anchor_stride\n pyramid_levels = cf.pyramid_levels\n feature_strides = cf.backbone_strides\n\n anchors = []\n logger.info(\"feature map shapes: {}\".format(feature_shapes))\n logger.info(\"anchor scales: {}\".format(scales))\n\n expected_anchors = [np.prod(feature_shapes[ii]) * len(ratios) * len(scales['xy'][ii]) for ii in pyramid_levels]\n\n for lix, level in enumerate(pyramid_levels):\n if len(feature_shapes[level]) == 2:\n anchors.append(generate_anchors(scales['xy'][level], ratios, feature_shapes[level],\n feature_strides['xy'][level], anchor_stride))\n else:\n anchors.append(generate_anchors_3D(scales['xy'][level], scales['z'][level], ratios, feature_shapes[level],\n feature_strides['xy'][level], feature_strides['z'][level], anchor_stride))\n\n logger.info(\"level {}: built anchors {} / expected anchors {} ||| total build {} / total expected {}\".format(\n level, anchors[-1].shape, expected_anchors[lix], np.concatenate(anchors).shape, np.sum(expected_anchors)))\n\n out_anchors = np.concatenate(anchors, axis=0)\n return out_anchors\n\n\n\ndef apply_box_deltas_2D(boxes, deltas):\n \"\"\"Applies the given deltas to the given boxes.\n boxes: [N, 4] where each row is y1, x1, y2, x2\n deltas: [N, 4] where each row is [dy, dx, log(dh), log(dw)]\n \"\"\"\n # Convert to y, x, h, w\n height = boxes[:, 2] - boxes[:, 0]\n width = boxes[:, 3] - boxes[:, 1]\n center_y = boxes[:, 0] + 0.5 * height\n center_x = boxes[:, 1] + 0.5 * width\n # Apply deltas\n center_y += deltas[:, 0] * height\n center_x += deltas[:, 1] * width\n height *= torch.exp(deltas[:, 2])\n width *= torch.exp(deltas[:, 3])\n # Convert back to y1, x1, y2, x2\n y1 = center_y - 0.5 * height\n x1 = center_x - 0.5 * width\n y2 = y1 + height\n x2 = x1 + width\n result = torch.stack([y1, x1, y2, x2], dim=1)\n return result\n\n\n\ndef apply_box_deltas_3D(boxes, deltas):\n \"\"\"Applies the given deltas to the given boxes.\n boxes: [N, 6] where each row is y1, x1, y2, x2, z1, z2\n deltas: [N, 6] where each row is [dy, dx, dz, log(dh), log(dw), log(dd)]\n \"\"\"\n # Convert to y, x, h, w\n height = boxes[:, 2] - boxes[:, 0]\n width = boxes[:, 3] - boxes[:, 1]\n depth = boxes[:, 5] - boxes[:, 4]\n center_y = boxes[:, 0] + 0.5 * height\n center_x = boxes[:, 1] + 0.5 * width\n center_z = boxes[:, 4] + 0.5 * depth\n # Apply deltas\n center_y += deltas[:, 0] * height\n center_x += deltas[:, 1] * width\n center_z += deltas[:, 2] * depth\n height *= torch.exp(deltas[:, 3])\n width *= torch.exp(deltas[:, 4])\n depth *= torch.exp(deltas[:, 5])\n # Convert back to y1, x1, y2, x2\n y1 = center_y - 0.5 * height\n x1 = center_x - 0.5 * width\n z1 = center_z - 0.5 * depth\n y2 = y1 + height\n x2 = x1 + width\n z2 = z1 + depth\n result = torch.stack([y1, x1, y2, x2, z1, z2], dim=1)\n return result\n\n\n\ndef clip_boxes_2D(boxes, window):\n \"\"\"\n boxes: [N, 4] each col is y1, x1, y2, x2\n window: [4] in the form y1, x1, y2, x2\n \"\"\"\n boxes = torch.stack( \\\n [boxes[:, 0].clamp(float(window[0]), float(window[2])),\n boxes[:, 1].clamp(float(window[1]), float(window[3])),\n boxes[:, 2].clamp(float(window[0]), float(window[2])),\n boxes[:, 3].clamp(float(window[1]), float(window[3]))], 1)\n return boxes\n\ndef clip_boxes_3D(boxes, window):\n \"\"\"\n boxes: [N, 6] each col is y1, x1, y2, x2, z1, z2\n window: [6] in the form y1, x1, y2, x2, z1, z2\n \"\"\"\n boxes = torch.stack( \\\n [boxes[:, 0].clamp(float(window[0]), float(window[2])),\n boxes[:, 1].clamp(float(window[1]), float(window[3])),\n boxes[:, 2].clamp(float(window[0]), float(window[2])),\n boxes[:, 3].clamp(float(window[1]), float(window[3])),\n boxes[:, 4].clamp(float(window[4]), float(window[5])),\n boxes[:, 5].clamp(float(window[4]), float(window[5]))], 1)\n return boxes\n\n\n\ndef clip_boxes_numpy(boxes, window):\n \"\"\"\n boxes: [N, 4] each col is y1, x1, y2, x2 / [N, 6] in 3D.\n window: iamge shape (y, x, (z))\n \"\"\"\n if boxes.shape[1] == 4:\n boxes = np.concatenate(\n (np.clip(boxes[:, 0], 0, window[0])[:, None],\n np.clip(boxes[:, 1], 0, window[0])[:, None],\n np.clip(boxes[:, 2], 0, window[1])[:, None],\n np.clip(boxes[:, 3], 0, window[1])[:, None]), 1\n )\n\n else:\n boxes = np.concatenate(\n (np.clip(boxes[:, 0], 0, window[0])[:, None],\n np.clip(boxes[:, 1], 0, window[0])[:, None],\n np.clip(boxes[:, 2], 0, window[1])[:, None],\n np.clip(boxes[:, 3], 0, window[1])[:, None],\n np.clip(boxes[:, 4], 0, window[2])[:, None],\n np.clip(boxes[:, 5], 0, window[2])[:, None]), 1\n )\n\n return boxes\n\n\n\ndef bbox_overlaps_2D(boxes1, boxes2):\n \"\"\"Computes IoU overlaps between two sets of boxes.\n boxes1, boxes2: [N, (y1, x1, y2, x2)].\n \"\"\"\n # 1. Tile boxes2 and repeate boxes1. This allows us to compare\n # every boxes1 against every boxes2 without loops.\n # TF doesn't have an equivalent to np.repeate() so simulate it\n # using tf.tile() and tf.reshape.\n boxes1_repeat = boxes2.size()[0]\n boxes2_repeat = boxes1.size()[0]\n boxes1 = boxes1.repeat(1,boxes1_repeat).view(-1,4)\n boxes2 = boxes2.repeat(boxes2_repeat,1)\n\n # 2. Compute intersections\n b1_y1, b1_x1, b1_y2, b1_x2 = boxes1.chunk(4, dim=1)\n b2_y1, b2_x1, b2_y2, b2_x2 = boxes2.chunk(4, dim=1)\n y1 = torch.max(b1_y1, b2_y1)[:, 0]\n x1 = torch.max(b1_x1, b2_x1)[:, 0]\n y2 = torch.min(b1_y2, b2_y2)[:, 0]\n x2 = torch.min(b1_x2, b2_x2)[:, 0]\n zeros = Variable(torch.zeros(y1.size()[0]), requires_grad=False)\n if y1.is_cuda:\n zeros = zeros.cuda()\n intersection = torch.max(x2 - x1, zeros) * torch.max(y2 - y1, zeros)\n\n # 3. Compute unions\n b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)\n b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)\n union = b1_area[:,0] + b2_area[:,0] - intersection\n\n # 4. Compute IoU and reshape to [boxes1, boxes2]\n iou = intersection / union\n overlaps = iou.view(boxes2_repeat, boxes1_repeat)\n return overlaps\n\n\n\ndef bbox_overlaps_3D(boxes1, boxes2):\n \"\"\"Computes IoU overlaps between two sets of boxes.\n boxes1, boxes2: [N, (y1, x1, y2, x2, z1, z2)].\n \"\"\"\n # 1. Tile boxes2 and repeate boxes1. This allows us to compare\n # every boxes1 against every boxes2 without loops.\n # TF doesn't have an equivalent to np.repeate() so simulate it\n # using tf.tile() and tf.reshape.\n boxes1_repeat = boxes2.size()[0]\n boxes2_repeat = boxes1.size()[0]\n boxes1 = boxes1.repeat(1,boxes1_repeat).view(-1,6)\n boxes2 = boxes2.repeat(boxes2_repeat,1)\n\n # 2. Compute intersections\n b1_y1, b1_x1, b1_y2, b1_x2, b1_z1, b1_z2 = boxes1.chunk(6, dim=1)\n b2_y1, b2_x1, b2_y2, b2_x2, b2_z1, b2_z2 = boxes2.chunk(6, dim=1)\n y1 = torch.max(b1_y1, b2_y1)[:, 0]\n x1 = torch.max(b1_x1, b2_x1)[:, 0]\n y2 = torch.min(b1_y2, b2_y2)[:, 0]\n x2 = torch.min(b1_x2, b2_x2)[:, 0]\n z1 = torch.max(b1_z1, b2_z1)[:, 0]\n z2 = torch.min(b1_z2, b2_z2)[:, 0]\n zeros = Variable(torch.zeros(y1.size()[0]), requires_grad=False)\n if y1.is_cuda:\n zeros = zeros.cuda()\n intersection = torch.max(x2 - x1, zeros) * torch.max(y2 - y1, zeros) * torch.max(z2 - z1, zeros)\n\n # 3. Compute unions\n b1_volume = (b1_y2 - b1_y1) * (b1_x2 - b1_x1) * (b1_z2 - b1_z1)\n b2_volume = (b2_y2 - b2_y1) * (b2_x2 - b2_x1) * (b2_z2 - b2_z1)\n union = b1_volume[:,0] + b2_volume[:,0] - intersection\n\n # 4. Compute IoU and reshape to [boxes1, boxes2]\n iou = intersection / union\n overlaps = iou.view(boxes2_repeat, boxes1_repeat)\n return overlaps\n\n\n\ndef gt_anchor_matching(cf, anchors, gt_boxes, gt_class_ids=None):\n \"\"\"Given the anchors and GT boxes, compute overlaps and identify positive\n anchors and deltas to refine them to match their corresponding GT boxes.\n\n anchors: [num_anchors, (y1, x1, y2, x2, (z1), (z2))]\n gt_boxes: [num_gt_boxes, (y1, x1, y2, x2, (z1), (z2))]\n gt_class_ids (optional): [num_gt_boxes] Integer class IDs for one stage detectors. in RPN case of Mask R-CNN,\n set all positive matches to 1 (foreground)\n\n Returns:\n anchor_class_matches: [N] (int32) matches between anchors and GT boxes.\n 1 = positive anchor, -1 = negative anchor, 0 = neutral\n anchor_delta_targets: [N, (dy, dx, (dz), log(dh), log(dw), (log(dd)))] Anchor bbox deltas.\n \"\"\"\n\n anchor_class_matches = np.zeros([anchors.shape[0]], dtype=np.int32)\n anchor_delta_targets = np.zeros((cf.rpn_train_anchors_per_image, 2*cf.dim))\n anchor_matching_iou = cf.anchor_matching_iou\n\n if gt_boxes is None:\n anchor_class_matches = np.full(anchor_class_matches.shape, fill_value=-1)\n return anchor_class_matches, anchor_delta_targets\n\n # for mrcnn: anchor matching is done for RPN loss, so positive labels are all 1 (foreground)\n if gt_class_ids is None:\n gt_class_ids = np.array([1] * len(gt_boxes))\n\n # Compute overlaps [num_anchors, num_gt_boxes]\n overlaps = compute_overlaps(anchors, gt_boxes)\n\n # Match anchors to GT Boxes\n # If an anchor overlaps a GT box with IoU >= anchor_matching_iou then it's positive.\n # If an anchor overlaps a GT box with IoU < 0.1 then it's negative.\n # Neutral anchors are those that don't match the conditions above,\n # and they don't influence the loss function.\n # However, don't keep any GT box unmatched (rare, but happens). Instead,\n # match it to the closest anchor (even if its max IoU is < 0.1).\n\n # 1. Set negative anchors first. They get overwritten below if a GT box is\n # matched to them. Skip boxes in crowd areas.\n anchor_iou_argmax = np.argmax(overlaps, axis=1)\n anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]\n if anchors.shape[1] == 4:\n anchor_class_matches[(anchor_iou_max < 0.1)] = -1\n elif anchors.shape[1] == 6:\n anchor_class_matches[(anchor_iou_max < 0.01)] = -1\n else:\n raise ValueError('anchor shape wrong {}'.format(anchors.shape))\n\n # 2. Set an anchor for each GT box (regardless of IoU value).\n gt_iou_argmax = np.argmax(overlaps, axis=0)\n for ix, ii in enumerate(gt_iou_argmax):\n anchor_class_matches[ii] = gt_class_ids[ix]\n\n # 3. Set anchors with high overlap as positive.\n above_trhesh_ixs = np.argwhere(anchor_iou_max >= anchor_matching_iou)\n anchor_class_matches[above_trhesh_ixs] = gt_class_ids[anchor_iou_argmax[above_trhesh_ixs]]\n\n # Subsample to balance positive anchors.\n ids = np.where(anchor_class_matches > 0)[0]\n extra = len(ids) - (cf.rpn_train_anchors_per_image // 2)\n if extra > 0:\n # Reset the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n anchor_class_matches[ids] = 0\n\n # Leave all negative proposals negative now and sample from them in online hard example mining.\n # For positive anchors, compute shift and scale needed to transform them to match the corresponding GT boxes.\n ids = np.where(anchor_class_matches > 0)[0]\n ix = 0 # index into anchor_delta_targets\n for i, a in zip(ids, anchors[ids]):\n # closest gt box (it might have IoU < anchor_matching_iou)\n gt = gt_boxes[anchor_iou_argmax[i]]\n\n # convert coordinates to center plus width/height.\n gt_h = gt[2] - gt[0]\n gt_w = gt[3] - gt[1]\n gt_center_y = gt[0] + 0.5 * gt_h\n gt_center_x = gt[1] + 0.5 * gt_w\n # Anchor\n a_h = a[2] - a[0]\n a_w = a[3] - a[1]\n a_center_y = a[0] + 0.5 * a_h\n a_center_x = a[1] + 0.5 * a_w\n\n if cf.dim == 2:\n anchor_delta_targets[ix] = [\n (gt_center_y - a_center_y) / a_h,\n (gt_center_x - a_center_x) / a_w,\n np.log(gt_h / a_h),\n np.log(gt_w / a_w),\n ]\n\n else:\n gt_d = gt[5] - gt[4]\n gt_center_z = gt[4] + 0.5 * gt_d\n a_d = a[5] - a[4]\n a_center_z = a[4] + 0.5 * a_d\n\n anchor_delta_targets[ix] = [\n (gt_center_y - a_center_y) / a_h,\n (gt_center_x - a_center_x) / a_w,\n (gt_center_z - a_center_z) / a_d,\n np.log(gt_h / a_h),\n np.log(gt_w / a_w),\n np.log(gt_d / a_d)\n ]\n\n # normalize.\n anchor_delta_targets[ix] /= cf.rpn_bbox_std_dev\n ix += 1\n\n return anchor_class_matches, anchor_delta_targets\n\n\n\ndef clip_to_window(window, boxes):\n \"\"\"\n window: (y1, x1, y2, x2) / 3D: (z1, z2). The window in the image we want to clip to.\n boxes: [N, (y1, x1, y2, x2)] / 3D: (z1, z2)\n \"\"\"\n boxes[:, 0] = boxes[:, 0].clamp(float(window[0]), float(window[2]))\n boxes[:, 1] = boxes[:, 1].clamp(float(window[1]), float(window[3]))\n boxes[:, 2] = boxes[:, 2].clamp(float(window[0]), float(window[2]))\n boxes[:, 3] = boxes[:, 3].clamp(float(window[1]), float(window[3]))\n\n if boxes.shape[1] > 5:\n boxes[:, 4] = boxes[:, 4].clamp(float(window[4]), float(window[5]))\n boxes[:, 5] = boxes[:, 5].clamp(float(window[4]), float(window[5]))\n\n return boxes\n\n\n############################################################\n# Pytorch Utility Functions\n############################################################\n\n\ndef unique1d(tensor):\n if tensor.size()[0] == 0 or tensor.size()[0] == 1:\n return tensor\n tensor = tensor.sort()[0]\n unique_bool = tensor[1:] != tensor [:-1]\n first_element = Variable(torch.ByteTensor([True]), requires_grad=False)\n if tensor.is_cuda:\n first_element = first_element.cuda()\n unique_bool = torch.cat((first_element, unique_bool),dim=0)\n return tensor[unique_bool.data]\n\n\n\ndef log2(x):\n \"\"\"Implementatin of Log2. Pytorch doesn't have a native implemenation.\"\"\"\n ln2 = Variable(torch.log(torch.FloatTensor([2.0])), requires_grad=False)\n if x.is_cuda:\n ln2 = ln2.cuda()\n return torch.log(x) / ln2\n\n\n\ndef intersect1d(tensor1, tensor2):\n aux = torch.cat((tensor1, tensor2), dim=0)\n aux = aux.sort(descending=True)[0]\n return aux[:-1][(aux[1:] == aux[:-1]).data]\n\n\n\ndef shem(roi_probs_neg, negative_count, ohem_poolsize):\n \"\"\"\n stochastic hard example mining: from a list of indices (referring to non-matched predictions),\n determine a pool of highest scoring (worst false positives) of size negative_count*ohem_poolsize.\n Then, sample n (= negative_count) predictions of this pool as negative examples for loss.\n :param roi_probs_neg: tensor of shape (n_predictions, n_classes).\n :param negative_count: int.\n :param ohem_poolsize: int.\n :return: (negative_count). indices refer to the positions in roi_probs_neg. If pool smaller than expected due to\n limited negative proposals availabel, this function will return sampled indices of number < negative_count without\n throwing an error.\n \"\"\"\n # sort according to higehst foreground score.\n probs, order = roi_probs_neg[:, 1:].max(1)[0].sort(descending=True)\n select = torch.tensor((ohem_poolsize * int(negative_count), order.size()[0])).min().int()\n pool_indices = order[:select]\n rand_idx = torch.randperm(pool_indices.size()[0])\n return pool_indices[rand_idx[:negative_count].cuda()]\n\n\n\ndef initialize_weights(net):\n \"\"\"\n Initialize model weights. Current Default in Pytorch (version 0.4.1) is initialization from a uniform distriubtion.\n Will expectably be changed to kaiming_uniform in future versions.\n \"\"\"\n init_type = net.cf.weight_init\n\n for m in [module for module in net.modules() if type(module) in [nn.Conv2d, nn.Conv3d,\n nn.ConvTranspose2d,\n nn.ConvTranspose3d,\n nn.Linear]]:\n if init_type == 'xavier_uniform':\n nn.init.xavier_uniform_(m.weight.data)\n if m.bias is not None:\n m.bias.data.zero_()\n\n elif init_type == 'xavier_normal':\n nn.init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n m.bias.data.zero_()\n\n elif init_type == \"kaiming_uniform\":\n nn.init.kaiming_uniform_(m.weight.data, mode='fan_out', nonlinearity=net.cf.relu, a=0)\n if m.bias is not None:\n fan_in, fan_out = nn.init._calculate_fan_in_and_fan_out(m.weight.data)\n bound = 1 / np.sqrt(fan_out)\n nn.init.uniform_(m.bias, -bound, bound)\n\n elif init_type == \"kaiming_normal\":\n nn.init.kaiming_normal_(m.weight.data, mode='fan_out', nonlinearity=net.cf.relu, a=0)\n if m.bias is not None:\n fan_in, fan_out = nn.init._calculate_fan_in_and_fan_out(m.weight.data)\n bound = 1 / np.sqrt(fan_out)\n nn.init.normal_(m.bias, -bound, bound)\n\n\n\nclass NDConvGenerator(object):\n \"\"\"\n generic wrapper around conv-layers to avoid 2D vs. 3D distinguishing in code.\n \"\"\"\n def __init__(self, dim):\n self.dim = dim\n\n def __call__(self, c_in, c_out, ks, pad=0, stride=1, norm=None, relu='relu'):\n \"\"\"\n :param c_in: number of in_channels.\n :param c_out: number of out_channels.\n :param ks: kernel size.\n :param pad: pad size.\n :param stride: kernel stride.\n :param norm: string specifying type of feature map normalization. If None, no normalization is applied.\n :param relu: string specifying type of nonlinearity. If None, no nonlinearity is applied.\n :return: convolved feature_map.\n \"\"\"\n if self.dim == 2:\n conv = nn.Conv2d(c_in, c_out, kernel_size=ks, padding=pad, stride=stride)\n if norm is not None:\n if norm == 'instance_norm':\n norm_layer = nn.InstanceNorm2d(c_out)\n elif norm == 'batch_norm':\n norm_layer = nn.BatchNorm2d(c_out)\n else:\n raise ValueError('norm type as specified in configs is not implemented...')\n conv = nn.Sequential(conv, norm_layer)\n\n else:\n conv = nn.Conv3d(c_in, c_out, kernel_size=ks, padding=pad, stride=stride)\n if norm is not None:\n if norm == 'instance_norm':\n norm_layer = nn.InstanceNorm3d(c_out)\n elif norm == 'batch_norm':\n norm_layer = nn.BatchNorm3d(c_out)\n else:\n raise ValueError('norm type as specified in configs is not implemented... {}'.format(norm))\n conv = nn.Sequential(conv, norm_layer)\n\n if relu is not None:\n if relu == 'relu':\n relu_layer = nn.ReLU(inplace=True)\n elif relu == 'leaky_relu':\n relu_layer = nn.LeakyReLU(inplace=True)\n else:\n raise ValueError('relu type as specified in configs is not implemented...')\n conv = nn.Sequential(conv, relu_layer)\n\n return conv\n\n\n\ndef get_one_hot_encoding(y, n_classes):\n \"\"\"\n transform a numpy label array to a one-hot array of the same shape.\n :param y: array of shape (b, 1, y, x, (z)).\n :param n_classes: int, number of classes to unfold in one-hot encoding.\n :return y_ohe: array of shape (b, n_classes, y, x, (z))\n \"\"\"\n dim = len(y.shape) - 2\n if dim == 2:\n y_ohe = np.zeros((y.shape[0], n_classes, y.shape[2], y.shape[3])).astype('int32')\n if dim ==3:\n y_ohe = np.zeros((y.shape[0], n_classes, y.shape[2], y.shape[3], y.shape[4])).astype('int32')\n for cl in range(n_classes):\n y_ohe[:, cl][y[:, 0] == cl] = 1\n return y_ohe\n\n\n\ndef get_dice_per_batch_and_class(pred, y, n_classes):\n '''\n computes dice scores per batch instance and class.\n :param pred: prediction array of shape (b, 1, y, x, (z)) (e.g. softmax prediction with argmax over dim 1)\n :param y: ground truth array of shape (b, 1, y, x, (z)) (contains int [0, ..., n_classes]\n :param n_classes: int\n :return: dice scores of shape (b, c)\n '''\n pred = get_one_hot_encoding(pred, n_classes)\n y = get_one_hot_encoding(y, n_classes)\n axes = tuple(range(2, len(pred.shape)))\n intersect = np.sum(pred*y, axis=axes)\n denominator = np.sum(pred, axis=axes)+np.sum(y, axis=axes) + 1e-8\n dice = 2.0*intersect / denominator\n return dice\n\n\n\ndef sum_tensor(input, axes, keepdim=False):\n axes = np.unique(axes)\n if keepdim:\n for ax in axes:\n input = input.sum(ax, keepdim=True)\n else:\n for ax in sorted(axes, reverse=True):\n input = input.sum(int(ax))\n return input\n\n\n\ndef batch_dice(pred, y, false_positive_weight=1.0, eps=1e-6):\n '''\n compute soft dice over batch. this is a diffrentiable score and can be used as a loss function.\n only dice scores of foreground classes are returned, since training typically\n does not benefit from explicit background optimization. Pixels of the entire batch are considered a pseudo-volume to compute dice scores of.\n This way, single patches with missing foreground classes can not produce faulty gradients.\n :param pred: (b, c, y, x, (z)), softmax probabilities (network output).\n :param y: (b, c, y, x, (z)), one hote encoded segmentation mask.\n :param false_positive_weight: float [0,1]. For weighting of imbalanced classes,\n reduces the penalty for false-positive pixels. Can be beneficial sometimes in data with heavy fg/bg imbalances.\n :return: soft dice score (float).This function discards the background score and returns the mena of foreground scores.\n '''\n if len(pred.size()) == 4:\n axes = (0, 2, 3)\n intersect = sum_tensor(pred * y, axes, keepdim=False)\n denom = sum_tensor(false_positive_weight*pred + y, axes, keepdim=False)\n return torch.mean((2 * intersect / (denom + eps))[1:]) #only fg dice here.\n\n if len(pred.size()) == 5:\n axes = (0, 2, 3, 4)\n intersect = sum_tensor(pred * y, axes, keepdim=False)\n denom = sum_tensor(false_positive_weight*pred + y, axes, keepdim=False)\n return torch.mean((2 * intersect / (denom + eps))[1:]) #only fg dice here.\n\n else:\n raise ValueError('wrong input dimension in dice loss')\n\n\n\n\ndef batch_dice_mask(pred, y, mask, false_positive_weight=1.0, eps=1e-6):\n '''\n compute soft dice over batch. this is a diffrentiable score and can be used as a loss function.\n only dice scores of foreground classes are returned, since training typically\n does not benefit from explicit background optimization. Pixels of the entire batch are considered a pseudo-volume to compute dice scores of.\n This way, single patches with missing foreground classes can not produce faulty gradients.\n :param pred: (b, c, y, x, (z)), softmax probabilities (network output).\n :param y: (b, c, y, x, (z)), one hote encoded segmentation mask.\n :param false_positive_weight: float [0,1]. For weighting of imbalanced classes,\n reduces the penalty for false-positive pixels. Can be beneficial sometimes in data with heavy fg/bg imbalances.\n :return: soft dice score (float).This function discards the background score and returns the mena of foreground scores.\n '''\n\n mask = mask.unsqueeze(1).repeat(1, 2, 1, 1)\n\n if len(pred.size()) == 4:\n axes = (0, 2, 3)\n intersect = sum_tensor(pred * y * mask, axes, keepdim=False)\n denom = sum_tensor(false_positive_weight*pred * mask + y * mask, axes, keepdim=False)\n return torch.mean((2 * intersect / (denom + eps))[1:]) #only fg dice here.\n\n if len(pred.size()) == 5:\n axes = (0, 2, 3, 4)\n intersect = sum_tensor(pred * y, axes, keepdim=False)\n denom = sum_tensor(false_positive_weight*pred + y, axes, keepdim=False)\n return torch.mean((2 * intersect / (denom + eps))[1:]) #only fg dice here.\n\n else:\n raise ValueError('wrong input dimension in dice loss')"
]
| [
[
"torch.cat",
"torch.stack",
"numpy.random.choice",
"numpy.minimum",
"torch.nn.LeakyReLU",
"torch.nn.init.kaiming_normal_",
"torch.nn.BatchNorm2d",
"numpy.where",
"torch.exp",
"torch.nn.BatchNorm3d",
"numpy.concatenate",
"numpy.full",
"numpy.log",
"torch.FloatTensor",
"torch.ByteTensor",
"torch.nn.init.normal_",
"numpy.prod",
"numpy.argmax",
"numpy.arange",
"numpy.sqrt",
"torch.nn.Conv3d",
"torch.nn.init.xavier_normal_",
"numpy.array",
"torch.nn.init.kaiming_uniform_",
"torch.min",
"numpy.zeros",
"torch.max",
"torch.nn.Sequential",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"numpy.stack",
"torch.nn.InstanceNorm2d",
"numpy.argwhere",
"numpy.clip",
"torch.nn.init.uniform_",
"torch.log",
"numpy.sum",
"torch.nn.init.xavier_uniform_",
"torch.nn.init._calculate_fan_in_and_fan_out",
"torch.nn.InstanceNorm3d",
"numpy.meshgrid",
"torch.mean",
"numpy.unique",
"numpy.maximum"
]
]
|
harir91/alpaca-trade-api-python | [
"5bbcee7eaa85a91ffc3484fc08ec2fb4a03e1ce5"
]
| [
"alpaca_trade_api/entity_v2.py"
]
| [
"from enum import Enum\nimport pandas as pd\nfrom .entity import Bar, Entity, Trade, Quote, _NanoTimestamped\nfrom typing import Dict\n\ntrade_mapping_v2 = {\n \"i\": \"id\",\n \"S\": \"symbol\",\n \"c\": \"conditions\",\n \"x\": \"exchange\",\n \"p\": \"price\",\n \"s\": \"size\",\n \"t\": \"timestamp\",\n \"z\": \"tape\"\n}\n\nquote_mapping_v2 = {\n \"S\": \"symbol\",\n \"ax\": \"ask_exchange\",\n \"ap\": \"ask_price\",\n \"as\": \"ask_size\",\n \"bx\": \"bid_exchange\",\n \"bp\": \"bid_price\",\n \"bs\": \"bid_size\",\n \"c\": \"conditions\",\n \"t\": \"timestamp\",\n \"z\": \"tape\"\n}\n\nbar_mapping_v2 = {\n \"S\": \"symbol\",\n \"o\": \"open\",\n \"h\": \"high\",\n \"l\": \"low\",\n \"c\": \"close\",\n \"v\": \"volume\",\n \"t\": \"timestamp\",\n \"n\": \"trade_count\",\n \"vw\": \"vwap\"\n}\n\nstatus_mapping_v2 = {\n \"S\": \"symbol\",\n \"sc\": \"status_code\",\n \"sm\": \"status_message\",\n \"rc\": \"reason_code\",\n \"rm\": \"reason_message\",\n \"t\": \"timestamp\",\n \"z\": \"tape\"\n}\n\nluld_mapping_v2 = {\n \"S\": \"symbol\",\n \"u\": \"limit_up_price\",\n \"d\": \"limit_down_price\",\n \"i\": \"indicator\",\n \"t\": \"timestamp\",\n \"z\": \"tape\"\n}\n\nclass EntityListType(Enum):\n Trade = Trade, trade_mapping_v2\n Quote = Quote, quote_mapping_v2\n Bar = Bar, bar_mapping_v2\n\n\nclass EntityList(list):\n def __init__(self, entity_type: EntityListType, raw):\n entity = entity_type.value[0]\n super().__init__([entity(o) for o in raw])\n self._raw = raw\n self.mapping = entity_type.value[1]\n\n @property\n def df(self):\n if not hasattr(self, '_df'):\n df = pd.DataFrame(\n self._raw,\n )\n\n df.columns = [self.mapping.get(c, c) for c in df.columns]\n if not df.empty:\n df.set_index('timestamp', inplace=True)\n df.index = pd.DatetimeIndex(df.index)\n self._df = df\n return self._df\n\n\nclass Remapped:\n def __init__(self, mapping: Dict[str, str], *args, **kwargs):\n self._reversed_mapping = {\n value: key for (key, value) in mapping.items()}\n super().__init__(*args, **kwargs)\n\n def __getattr__(self, key):\n if key in self._reversed_mapping:\n return super().__getattr__(self._reversed_mapping[key])\n return super().__getattr__(key)\n\n\nclass BarsV2(EntityList):\n def __init__(self, raw):\n super().__init__(EntityListType.Bar, raw)\n\n\nclass TradesV2(EntityList):\n def __init__(self, raw):\n super().__init__(EntityListType.Trade, raw)\n\n\nclass QuotesV2(EntityList):\n def __init__(self, raw):\n super().__init__(EntityListType.Quote, raw)\n\n\nclass TradeV2(Remapped, _NanoTimestamped, Entity):\n _tskeys = ('t',)\n\n def __init__(self, raw):\n super().__init__(trade_mapping_v2, raw)\n\n\nclass QuoteV2(Remapped, _NanoTimestamped, Entity):\n _tskeys = ('t',)\n\n def __init__(self, raw):\n super().__init__(quote_mapping_v2, raw)\n\n\nclass BarV2(Remapped, _NanoTimestamped, Entity):\n _tskeys = ('t',)\n\n def __init__(self, raw):\n super().__init__(bar_mapping_v2, raw)\n\n\nclass StatusV2(Remapped, _NanoTimestamped, Entity):\n _tskeys = ('t',)\n\n def __init__(self, raw):\n super().__init__(status_mapping_v2, raw)\n\nclass LULDV2(Remapped, _NanoTimestamped, Entity):\n _tskeys = ('t',)\n\n def __init__(self, raw):\n super().__init__(luld_mapping_v2, raw)\n\n\nclass SnapshotV2:\n def __init__(self, raw):\n self.latest_trade = _convert_or_none(TradeV2, raw.get('latestTrade'))\n self.latest_quote = _convert_or_none(QuoteV2, raw.get('latestQuote'))\n self.minute_bar = _convert_or_none(BarV2, raw.get('minuteBar'))\n self.daily_bar = _convert_or_none(BarV2, raw.get('dailyBar'))\n self.prev_daily_bar = _convert_or_none(BarV2, raw.get('prevDailyBar'))\n\n\nclass SnapshotsV2(dict):\n def __init__(self, raw):\n for k, v in raw.items():\n self[k] = _convert_or_none(SnapshotV2, v)\n\n\ndef _convert_or_none(entityType, value):\n if value:\n return entityType(value)\n return None\n"
]
| [
[
"pandas.DataFrame",
"pandas.DatetimeIndex"
]
]
|
bonchinchi/PinSout | [
"e1f8882e20dcb714faaa4ba350569a6c7315a3aa"
]
| [
"src/sem_seg/model/log_6cls_test16/train.py"
]
| [
"import argparse\nimport math\nimport h5py\nimport numpy as np\nimport tensorflow as tf\nimport socket\n\nimport os\nimport sys\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nROOT_DIR = os.path.dirname(BASE_DIR)\nsys.path.append(BASE_DIR)\nsys.path.append(ROOT_DIR)\nsys.path.append(os.path.join(ROOT_DIR, 'utils'))\nimport provider\nimport tf_util\nfrom model import *\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')\nparser.add_argument('--log_dir', default='log', help='Log dir [default: log]')\nparser.add_argument('--num_point', type=int, default=4096, help='Point number [default: 4096]')\nparser.add_argument('--max_epoch', type=int, default=50, help='Epoch to run [default: 50]')\nparser.add_argument('--batch_size', type=int, default=24, help='Batch Size during training [default: 24]')\nparser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')\nparser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')\nparser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')\nparser.add_argument('--decay_step', type=int, default=300000, help='Decay step for lr decay [default: 300000]')\nparser.add_argument('--decay_rate', type=float, default=0.5, help='Decay rate for lr decay [default: 0.5]')\nparser.add_argument('--test_area', type=int, default=6, help='Which area to use for test, option: 1-6 [default: 6]')\nFLAGS = parser.parse_args()\n\n\nBATCH_SIZE = FLAGS.batch_size\nNUM_POINT = FLAGS.num_point\nMAX_EPOCH = FLAGS.max_epoch\nNUM_POINT = FLAGS.num_point\nBASE_LEARNING_RATE = FLAGS.learning_rate\nGPU_INDEX = FLAGS.gpu\nMOMENTUM = FLAGS.momentum\nOPTIMIZER = FLAGS.optimizer\nDECAY_STEP = FLAGS.decay_step\nDECAY_RATE = FLAGS.decay_rate\n\nLOG_DIR = FLAGS.log_dir\nif not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR)\nos.system('cp model.py %s' % (LOG_DIR)) # bkp of model def\nos.system('cp train.py %s' % (LOG_DIR)) # bkp of train procedure\nLOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')\nLOG_FOUT.write(str(FLAGS)+'\\n')\n\nMAX_NUM_POINT = 4096\nNUM_CLASSES = 6\n\nBN_INIT_DECAY = 0.5\nBN_DECAY_DECAY_RATE = 0.5\n#BN_DECAY_DECAY_STEP = float(DECAY_STEP * 2)\nBN_DECAY_DECAY_STEP = float(DECAY_STEP)\nBN_DECAY_CLIP = 0.99\n\nHOSTNAME = socket.gethostname()\n\nALL_FILES = provider.getDataFiles('indoor3d_sem_seg_hdf5_data16/all_files.txt')\nroom_filelist = [line.rstrip() for line in open('indoor3d_sem_seg_hdf5_data16/room_filelist.txt')]\n\n# Load ALL data\ndata_batch_list = []\nlabel_batch_list = []\nfor h5_filename in ALL_FILES:\n data_batch, label_batch = provider.loadDataFile(h5_filename)\n data_batch_list.append(data_batch)\n label_batch_list.append(label_batch)\ndata_batches = np.concatenate(data_batch_list, 0)\nlabel_batches = np.concatenate(label_batch_list, 0)\nprint(data_batches.shape)\nprint(label_batches.shape)\n\ntest_area = 'Area_'+str(FLAGS.test_area)\ntrain_idxs = []\ntest_idxs = []\nfor i,room_name in enumerate(room_filelist):\n if test_area in room_name:\n test_idxs.append(i)\n else:\n train_idxs.append(i)\n # train_idxs.append(i)\n\ntrain_data = data_batches[train_idxs,...]\ntrain_label = label_batches[train_idxs]\ntest_data = data_batches[test_idxs,...]\ntest_label = label_batches[test_idxs]\nprint(train_data.shape, train_label.shape)\nprint(test_data.shape, test_label.shape)\n\n\n\n\ndef log_string(out_str):\n LOG_FOUT.write(out_str+'\\n')\n LOG_FOUT.flush()\n print(out_str)\n\n\ndef get_learning_rate(batch):\n learning_rate = tf.compat.v1.train.exponential_decay(\n BASE_LEARNING_RATE, # Base learning rate.\n batch * BATCH_SIZE, # Current index into the dataset.\n DECAY_STEP, # Decay step.\n DECAY_RATE, # Decay rate.\n staircase=True)\n learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!!\n return learning_rate \n\ndef get_bn_decay(batch):\n bn_momentum = tf.compat.v1.train.exponential_decay(\n BN_INIT_DECAY,\n batch*BATCH_SIZE,\n BN_DECAY_DECAY_STEP,\n BN_DECAY_DECAY_RATE,\n staircase=True)\n bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)\n return bn_decay\n\ndef train():\n with tf.Graph().as_default():\n with tf.device('/gpu:'+str(GPU_INDEX)):\n pointclouds_pl, labels_pl = placeholder_inputs(BATCH_SIZE, NUM_POINT)\n is_training_pl = tf.compat.v1.placeholder(tf.bool, shape=())\n \n # Note the global_step=batch parameter to minimize. \n # That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.\n batch = tf.Variable(0)\n bn_decay = get_bn_decay(batch)\n tf.compat.v1.summary.scalar('bn_decay', bn_decay)\n\n # Get model and loss \n pred = get_model(pointclouds_pl, is_training_pl, bn_decay=bn_decay)\n loss = get_loss(pred, labels_pl)\n tf.compat.v1.summary.scalar('loss', loss)\n\n correct = tf.equal(tf.argmax(pred, 2), tf.to_int64(labels_pl))\n accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)) / float(BATCH_SIZE*NUM_POINT)\n tf.compat.v1.summary.scalar('accuracy', accuracy)\n\n # Get training operator\n learning_rate = get_learning_rate(batch)\n tf.compat.v1.summary.scalar('learning_rate', learning_rate)\n if OPTIMIZER == 'momentum':\n optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)\n elif OPTIMIZER == 'adam':\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate)\n train_op = optimizer.minimize(loss, global_step=batch)\n \n # Add ops to save and restore all the variables.\n saver = tf.compat.v1.train.Saver()\n \n # Create a session\n config = tf.compat.v1.ConfigProto()\n config.gpu_options.allow_growth = True\n config.allow_soft_placement = True\n config.log_device_placement = True\n sess = tf.compat.v1.Session(config=config)\n\n # Add summary writers\n merged = tf.summary.merge_all()\n train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'),\n sess.graph)\n test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'))\n\n # Init variables\n init = tf.global_variables_initializer()\n sess.run(init, {is_training_pl:True})\n\n ops = {'pointclouds_pl': pointclouds_pl,\n 'labels_pl': labels_pl,\n 'is_training_pl': is_training_pl,\n 'pred': pred,\n 'loss': loss,\n 'train_op': train_op,\n 'merged': merged,\n 'step': batch}\n\n for epoch in range(MAX_EPOCH):\n log_string('**** EPOCH %03d ****' % (epoch))\n sys.stdout.flush()\n \n train_one_epoch(sess, ops, train_writer)\n eval_one_epoch(sess, ops, test_writer)\n \n # Save the variables to disk.\n if epoch % 10 == 0:\n save_path = saver.save(sess, os.path.join(LOG_DIR, \"model.ckpt\"))\n log_string(\"Model saved in file: %s\" % save_path)\n\n\n\ndef train_one_epoch(sess, ops, train_writer):\n \"\"\" ops: dict mapping from string to tf ops \"\"\"\n is_training = True\n \n log_string('----')\n current_data, current_label, _ = provider.shuffle_data(train_data[:,0:NUM_POINT,:], train_label) \n \n file_size = current_data.shape[0]\n num_batches = file_size // BATCH_SIZE\n \n total_correct = 0\n total_seen = 0\n loss_sum = 0\n \n for batch_idx in range(num_batches):\n if batch_idx % 100 == 0:\n print('Current batch/total batch num: %d/%d'%(batch_idx,num_batches))\n start_idx = batch_idx * BATCH_SIZE\n end_idx = (batch_idx+1) * BATCH_SIZE\n \n feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],\n ops['labels_pl']: current_label[start_idx:end_idx],\n ops['is_training_pl']: is_training,}\n summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['pred']],\n feed_dict=feed_dict)\n train_writer.add_summary(summary, step)\n pred_val = np.argmax(pred_val, 2)\n correct = np.sum(pred_val == current_label[start_idx:end_idx])\n total_correct += correct\n total_seen += (BATCH_SIZE*NUM_POINT)\n loss_sum += loss_val\n \n log_string('mean loss: %f' % (loss_sum / float(num_batches)))\n log_string('accuracy: %f' % (total_correct / float(total_seen)))\n\n \ndef eval_one_epoch(sess, ops, test_writer):\n \"\"\" ops: dict mapping from string to tf ops \"\"\"\n is_training = False\n total_correct = 0\n total_seen = 0\n loss_sum = 0\n total_seen_class = [0 for _ in range(NUM_CLASSES)]\n total_correct_class = [0 for _ in range(NUM_CLASSES)]\n \n log_string('----')\n current_data = test_data[:,0:NUM_POINT,:]\n current_label = np.squeeze(test_label)\n \n file_size = current_data.shape[0]\n num_batches = file_size // BATCH_SIZE\n \n for batch_idx in range(num_batches):\n start_idx = batch_idx * BATCH_SIZE\n end_idx = (batch_idx+1) * BATCH_SIZE\n\n feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],\n ops['labels_pl']: current_label[start_idx:end_idx],\n ops['is_training_pl']: is_training}\n summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'], ops['loss'], ops['pred']],\n feed_dict=feed_dict)\n test_writer.add_summary(summary, step)\n pred_val = np.argmax(pred_val, 2)\n correct = np.sum(pred_val == current_label[start_idx:end_idx])\n total_correct += correct\n total_seen += (BATCH_SIZE*NUM_POINT)\n loss_sum += (loss_val*BATCH_SIZE)\n for i in range(start_idx, end_idx):\n for j in range(NUM_POINT):\n l = current_label[i, j]\n total_seen_class[l] += 1\n total_correct_class[l] += (pred_val[i-start_idx, j] == l)\n \n log_string('eval mean loss: %f' % (loss_sum / float(total_seen/NUM_POINT)))\n log_string('eval accuracy: %f'% (total_correct / float(total_seen)))\n log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))\n \n\n\nif __name__ == \"__main__\":\n train()\n LOG_FOUT.close()\n"
]
| [
[
"tensorflow.global_variables_initializer",
"tensorflow.cast",
"numpy.concatenate",
"tensorflow.compat.v1.placeholder",
"tensorflow.compat.v1.train.AdamOptimizer",
"tensorflow.argmax",
"tensorflow.Variable",
"numpy.argmax",
"numpy.array",
"tensorflow.minimum",
"tensorflow.to_int64",
"tensorflow.compat.v1.train.Saver",
"tensorflow.compat.v1.ConfigProto",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.summary.scalar",
"tensorflow.summary.merge_all",
"numpy.squeeze",
"numpy.sum",
"tensorflow.Graph",
"tensorflow.train.MomentumOptimizer",
"tensorflow.compat.v1.train.exponential_decay",
"tensorflow.maximum"
]
]
|
allenwang28/lingvo | [
"26d3d6672d3f46d8f281c2aa9f57166ef6296738"
]
| [
"lingvo/core/attention_util_test.py"
]
| [
"# Lint as: python3\n# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for attention_util.\"\"\"\n\nfrom absl.testing import parameterized\n\nfrom lingvo import compat as tf\nfrom lingvo.core import attention_util\nfrom lingvo.core import test_utils\n\nimport numpy as np\n\nFLAGS = tf.flags.FLAGS\n\n\nclass RelPositionBiasTest(test_utils.TestCase, parameterized.TestCase):\n\n def testBasic(self):\n with self.session():\n t = 3\n # [BTNH].\n content = tf.linalg.diag(tf.ones([t]))[None, :, None, :]\n # [LNH].\n abs_pos_emb = tf.reshape(\n tf.range(t * (2 * t - 1), dtype=tf.float32), [2 * t - 1, 1, t])\n tf.logging.info('content=%s abs_pos_emb=%s', content.eval(),\n abs_pos_emb.eval())\n p = attention_util.PositionalAttenLogits.Params().Set(name='rel_pos_bias')\n pos_atten_logits = p.Instantiate()\n self.assertAllClose(\n [[[[6., 3., 0.], [10., 7., 4.], [14., 11., 8.]]]],\n pos_atten_logits.RelPositionBias(content, abs_pos_emb).eval(),\n )\n\n\ndef OracleAttentionLogits(query,\n key,\n abs_pos_emb,\n content_bias,\n positional_bias,\n skip_term_b=False):\n \"\"\"Computes expected attention logits using non-vectorized approach.\"\"\"\n batch, seqlen, num_heads, _ = query.shape\n tgtlen, srclen = seqlen, seqlen\n\n logits = np.zeros((batch, num_heads, tgtlen, srclen))\n\n for b in range(batch):\n for n in range(num_heads):\n for i in range(tgtlen):\n for j in range(srclen):\n offset = seqlen - 1\n pos_emb = abs_pos_emb[i - j + offset]\n logits[b][n][i][j] = np.dot(query[b][i][n], key[b][j][n])\n if not skip_term_b:\n logits[b][n][i][j] += np.dot(query[b][i][n], pos_emb[n])\n if content_bias is not None:\n logits[b][n][i][j] += np.dot(content_bias[n], key[b][j][n])\n if positional_bias is not None:\n logits[b][n][i][j] += np.dot(positional_bias[n], pos_emb[n])\n return logits\n\n\nclass TransformerXLRelativeAttentionTest(test_utils.TestCase,\n parameterized.TestCase):\n\n def setUp(self):\n super().setUp()\n self.input_dim = 32\n self.num_heads = 4\n self.batch = 4\n self.seqlen = 16\n\n def _GetTestInputs(self):\n np.random.seed(FLAGS.test_random_seed)\n query = 3 * np.random.rand(self.batch, self.seqlen, self.num_heads,\n self.input_dim).astype(np.float32)\n key = 5 * np.random.rand(self.batch, self.seqlen, self.num_heads,\n self.input_dim).astype(np.float32)\n abs_pos_emb = 7 * np.random.rand(2 * self.seqlen - 1, self.num_heads,\n self.input_dim).astype(np.float32)\n content_bias = 11 * np.random.rand(self.num_heads, self.input_dim).astype(\n np.float32)\n positional_bias = 13 * np.random.rand(self.num_heads,\n self.input_dim).astype(np.float32)\n return query, key, abs_pos_emb, content_bias, positional_bias\n\n @parameterized.named_parameters(\n ('Base', False),\n ('Lite', True),\n )\n def testTransformerXL(self, skip_term_b):\n (query, key, abs_pos_emb, content_bias,\n positional_bias) = self._GetTestInputs()\n expected = OracleAttentionLogits(query, key, abs_pos_emb, content_bias,\n positional_bias, skip_term_b)\n p = attention_util.PositionalAttenLogits.Params().Set(name='transformer_xl')\n pos_atten_logits = p.Instantiate()\n actual_t = pos_atten_logits.AttenLogitsXL(query, key, abs_pos_emb,\n content_bias, positional_bias,\n skip_term_b)\n with self.session() as sess:\n actual = sess.run(actual_t)\n self.assertAllClose(expected, actual)\n\n def testRPE(self):\n (query, key, abs_pos_emb, _, _) = self._GetTestInputs()\n expected = OracleAttentionLogits(query, key, abs_pos_emb, None, None)\n p = attention_util.PositionalAttenLogits.Params().Set(name='rpe')\n pos_atten_logits = p.Instantiate()\n actual_t = pos_atten_logits.AttenLogitsRPE(query, key, abs_pos_emb)\n with self.session() as sess:\n actual = sess.run(actual_t)\n self.assertAllClose(expected, actual)\n\n\nclass BlockUtilsTest(test_utils.TestCase, parameterized.TestCase):\n\n @parameterized.named_parameters(\n ('single_block', 7),\n ('one_frame_block', 1),\n ('two_frame_blocks', 2),\n )\n def testConvertToBlocks(self, block_size):\n x_val = np.random.random([2, 6, 2, 3, 4])\n with self.session() as sess:\n x = tf.convert_to_tensor(x_val, tf.float32)\n x_blocks = attention_util.ConvertToBlocks(x, block_size)\n x_blocks_val = sess.run(x_blocks)\n # Check shape.\n batch_size = x_val.shape[0]\n other_dims = x_val.shape[2:]\n num_blocks = int(np.ceil(x_val.shape[1] / float(block_size)))\n expected_shape = (batch_size, num_blocks, block_size) + other_dims\n self.assertAllEqual(expected_shape, x_blocks_val.shape)\n\n # Check values.\n x_recover = x_blocks_val.reshape((x_blocks_val.shape[0], -1) +\n x_blocks_val.shape[3:])\n x_recover = x_recover[:, :x_val.shape[1], ...]\n self.assertAllClose(x_val, x_recover)\n\n @parameterized.named_parameters(\n ('single_block', 7, 2, 1),\n ('single_frame_context', 1, 1, 0),\n ('other_case_1', 3, 4, 1),\n ('other_case_2', 4, 2, 4),\n )\n def testExtractBlockContext(self, block_size, left_context, right_context):\n x_val = np.random.random([2, 6, 2, 3, 4])\n with self.session() as sess:\n x = tf.convert_to_tensor(x_val, tf.float32)\n x_context = attention_util.ExtractBlockContext(x, block_size,\n left_context,\n right_context)\n x_context_val = sess.run(x_context)\n # Check shape.\n batch_size = x_val.shape[0]\n other_dims = x_val.shape[2:]\n num_blocks = int(np.ceil(x_val.shape[1] / float(block_size)))\n context_size = block_size + left_context - 1 + right_context\n expected_shape = (batch_size, num_blocks, context_size) + other_dims\n self.assertAllEqual(expected_shape, x_context_val.shape)\n\n # Check values block by block.\n for block_idx in range(num_blocks):\n context_start = block_idx * block_size - left_context + 1\n context_end = (block_idx + 1) * block_size + right_context\n slice_start = max(0, context_start)\n slice_end = min(x_val.shape[1], context_end)\n expected_val = x_val[:, slice_start:slice_end, ...]\n actual_val = x_context_val[:, block_idx, ...]\n # remove paddings\n front_padding = slice_start - context_start\n back_padding = context_end - slice_end\n actual_val = actual_val[:, front_padding:context_size - back_padding, ...]\n self.assertAllClose(expected_val, actual_val)\n\n def _getReferenceCausalPadding(self, seq_len, block_size, left_context,\n right_context):\n num_blocks = int(np.ceil(seq_len / float(block_size)))\n context_size = block_size + left_context - 1 + right_context\n padding = np.ones((num_blocks, block_size, context_size))\n\n for i in range(num_blocks):\n for j in range(block_size):\n actual_src_pos = j + i * block_size\n if actual_src_pos < seq_len:\n for k in range(context_size):\n actual_tgt_pos = k + i * block_size - (left_context - 1)\n if 0 <= actual_tgt_pos and actual_tgt_pos < seq_len:\n diff = actual_src_pos - actual_tgt_pos\n if -right_context <= diff and diff < left_context:\n padding[i, j, k] = 0\n\n return padding\n\n @parameterized.named_parameters(\n ('single_block', 6, 9, 2, 1),\n ('single_frame_block', 6, 1, 2, 1),\n ('single_frame_context', 6, 1, 1, 0),\n ('other_case_1', 6, 3, 4, 1),\n ('other_case_2', 6, 4, 2, 4),\n )\n def testMakeLocalMask(self, seq_len, block_size, left_context, right_context):\n with self.session() as sess:\n seq_len_t = tf.convert_to_tensor(seq_len)\n mask = attention_util.MakeLocalMask(seq_len_t, block_size, left_context,\n right_context)\n padding = 1.0 - mask\n padding_val = sess.run(padding)\n\n ref_padding = self._getReferenceCausalPadding(seq_len, block_size,\n left_context, right_context)\n self.assertAllEqual(ref_padding, padding_val)\n\n\nclass KMeansClusteringForAttenTest(test_utils.TestCase):\n\n def testFProp(self):\n p = attention_util.KMeansClusteringForAtten.Params()\n p.name = 'k_means'\n p.num_clusters = 2\n p.dim_per_head = 4\n p.num_heads = 3\n batch_size = 5\n seq_length = 6\n x = np.random.rand(batch_size, seq_length, p.num_heads,\n p.dim_per_head).astype(np.float32)\n k_means = p.Instantiate()\n\n with self.session():\n dists, loss = k_means.FProp(k_means.theta, x, update=True)\n self.evaluate(tf.global_variables_initializer())\n dists, loss = self.evaluate([dists, loss])\n self.assertEqual(dists.shape,\n (batch_size, seq_length, p.num_heads, p.num_clusters))\n self.assertEqual(loss.shape, ())\n\n def testFPropWithBfloat16AndNonTrainable(self):\n p = attention_util.KMeansClusteringForAtten.Params()\n p.name = 'k_means'\n p.num_clusters = 2\n p.dim_per_head = 4\n p.num_heads = 3\n p.trainable = False\n p.use_bfloat16 = True\n batch_size = 5\n seq_length = 6\n x = np.random.rand(batch_size, seq_length, p.num_heads,\n p.dim_per_head).astype(np.float32)\n k_means = p.Instantiate()\n\n with self.session():\n dists, loss = k_means.FProp(k_means.theta, x, update=True)\n self.evaluate(tf.global_variables_initializer())\n dists, loss = self.evaluate([dists, loss])\n self.assertEqual(dists.shape,\n (batch_size, seq_length, p.num_heads, p.num_clusters))\n self.assertEqual(loss.shape, ())\n\n def testFPropFixedInput(self):\n p = attention_util.KMeansClusteringForAtten.Params()\n p.name = 'k_means'\n p.num_clusters = 3\n p.dim_per_head = 6\n p.num_heads = 4\n p.decay = 0.5\n k_means = p.Instantiate()\n batch_size = 2\n seq_length = 5\n\n with self.session():\n x = np.random.rand(batch_size, seq_length, p.num_heads,\n p.dim_per_head).astype(np.float32)\n self.evaluate(tf.global_variables_initializer())\n fixed_loss = None\n for _ in range(10):\n dists, loss = k_means.FProp(k_means.theta, x, update=False)\n dists, loss = self.evaluate([dists, loss])\n if not fixed_loss:\n fixed_loss = loss\n else:\n # If we do not update, the loss remain fixed.\n self.assertEqual(loss, fixed_loss)\n prev_loss = fixed_loss\n self.evaluate(k_means.FProp(k_means.theta, x, update=True))\n for _ in range(5):\n dists, loss = k_means.FProp(k_means.theta, x, update=True)\n _, loss = self.evaluate([dists, loss])\n # If we update the centroids, the loss should strictly decrease.\n self.assertGreater(prev_loss - loss, 1e-5)\n prev_loss = loss\n\n def testFPropClustering(self):\n p = attention_util.KMeansClusteringForAtten.Params()\n p.name = 'k_means'\n p.num_clusters = 2\n p.dim_per_head = 3\n p.num_heads = 2\n p.decay = 0.8\n k_means = p.Instantiate()\n batch_size = 3\n seq_length = 3\n\n with self.session() as sess:\n self.evaluate(tf.global_variables_initializer())\n\n def _GenInput():\n # We randomly generate inputs such that head 0 is clustered\n # around (±1/√2, ±1/√2, ∓√2), while head 1 is clustered around\n # (∓-√2, ±1/√2, ±1/√2).\n noise = 0.05 * np.random.rand(batch_size, seq_length, p.num_heads,\n p.dim_per_head).astype(np.float32)\n x1 = np.random.binomial(1, 0.5, [batch_size, seq_length, 1, 1]) * 2 - 1\n x1 = np.tile(\n np.array([1., 1., -1.], dtype=np.float32),\n [batch_size, seq_length, 1, 1]) * x1\n x2 = np.random.binomial(1, 0.5, [batch_size, seq_length, 1, 1]) * 2 - 1\n x2 = np.tile(\n np.array([-1., 1., 1.], dtype=np.float32),\n [batch_size, seq_length, 1, 1]) * x2\n x = np.concatenate([x1, x2], axis=2) + noise\n return x.astype(np.float32)\n\n for _ in range(25):\n _, loss = sess.run(\n k_means.FProp(k_means.theta, _GenInput(), update=True))\n final_means = k_means.theta.means.eval()\n # We assert that the centroids are close to the true centers.\n self.assertAllClose(\n np.abs(final_means), [[[0.71, 0.71, 1.41], [0.71, 0.71, 1.41]],\n [[1.41, 0.71, 0.71], [1.41, 0.71, 0.71]]],\n rtol=0.03,\n atol=0.03)\n self.assertLess(loss, 0.005)\n\n def testFPropPadding(self):\n p = attention_util.KMeansClusteringForAtten.Params()\n p.name = 'k_means'\n p.num_clusters = 2\n p.dim_per_head = 3\n p.num_heads = 1\n p.decay = 0.7\n k_means = p.Instantiate()\n batch_size = 3\n seq_length = 5\n\n with self.session() as sess:\n self.evaluate(tf.global_variables_initializer())\n\n def _GenInput():\n # We randomly generate inputs such that inputs are clustered\n # around (±1/√2, ±1/√2, ∓√2) or (∓-√2, ±1/√2, ±1/√2) with one of them\n # hidden by padding.\n paddings = np.random.binomial(1, 0.5, [batch_size, seq_length]).astype(\n np.float32)\n x = np.expand_dims(np.expand_dims(paddings, axis=-1), axis=-1)\n # When padding is 0, we generate (∓1, 1, ±1); when padding is 1, we\n # generate (±1, 1, ∓1).\n x = np.concatenate([2 * x - 1, np.ones_like(x), 1 - 2 * x], axis=-1)\n x *= np.random.binomial(1, 0.5, [batch_size, seq_length, 1, 1]) * 2 - 1\n return x, paddings\n\n for _ in range(30):\n x, paddings = _GenInput()\n self.assertEqual(x.shape,\n (batch_size, seq_length, p.num_heads, p.dim_per_head))\n self.assertEqual(paddings.shape, (batch_size, seq_length))\n _, loss1 = sess.run(\n k_means.FProp(k_means.theta, x, paddings, update=True))\n means1 = k_means.theta.means.eval()\n\n # We reverse the padding to hide the other half.\n for _ in range(40):\n x, paddings = _GenInput()\n _, loss2 = sess.run(\n k_means.FProp(k_means.theta, x, 1.0 - paddings, update=True))\n means2 = k_means.theta.means.eval()\n\n # We compute the loss using the previous input centering on\n # different centroids. The squared distance should be 3.\n _, loss3 = sess.run(\n k_means.FProp(k_means.theta, x, paddings, update=False))\n\n self.assertAllClose(\n np.abs(means1), [[[1.41, 0.71, 0.71], [1.41, 0.71, 0.71]]],\n rtol=0.03,\n atol=0.03)\n self.assertLess(loss1, 1e-5)\n self.assertAllClose(\n np.abs(means2), [[[0.71, 0.71, 1.41], [0.71, 0.71, 1.41]]],\n rtol=0.03,\n atol=0.03)\n self.assertLess(loss2, 1e-5)\n\n self.assertAllClose(loss3, 3.0, 1e-4, 1e-4)\n\n def testFPropClusteringEmptyCluster(self):\n p = attention_util.KMeansClusteringForAtten.Params()\n p.name = 'k_means'\n p.num_clusters = 10\n p.dim_per_head = 3\n p.num_heads = 1\n p.decay = 0.8\n k_means = p.Instantiate()\n batch_size = 3\n seq_length = 4\n\n # All of our inputs are (1, 1, -1)\n x = np.ones([batch_size, seq_length, 1, 1], dtype=np.float32)\n x = np.concatenate([x, x, -x], axis=-1)\n with self.session() as sess:\n self.evaluate(tf.global_variables_initializer())\n for _ in range(30):\n dists, loss = sess.run(k_means.FProp(k_means.theta, x, update=True))\n means = k_means.theta.means.eval()\n idx = np.argmin(dists, axis=-1)\n idx_1 = idx[0, 0, 0]\n # We assert that 'dists' achieves minimum all at the same cluster.\n self.assertAllEqual(idx,\n np.array(idx_1 * np.ones([batch_size, seq_length, 1])))\n # We assert that at this index the centroid is close to (1/√2, 1/√2, -√2).\n means = np.squeeze(means[:, idx_1, :])\n self.assertAllClose(means, [0.71, 0.71, -1.41], rtol=0.03, atol=0.03)\n self.assertLess(loss, 1e-4)\n\n\nclass ComputeSparseAttention(test_utils.TestCase):\n\n def testBasics(self):\n batch_size = 3\n source_length = 5\n target_length = 4\n num_heads = 2\n dim_per_head = 3\n q = np.random.rand(batch_size, target_length, num_heads,\n dim_per_head).astype(np.float32)\n k = np.random.rand(batch_size, source_length, num_heads,\n dim_per_head).astype(np.float32)\n v = np.random.rand(batch_size, source_length, num_heads,\n dim_per_head).astype(np.float32)\n # attention window = 2\n sparsity_indices = np.concatenate([\n np.zeros([batch_size, target_length, num_heads, 1], dtype=np.int32),\n np.ones([batch_size, target_length, num_heads, 1], dtype=np.int32),\n ],\n axis=-1)\n\n with self.session() as sess:\n out, probs = sess.run(\n attention_util.ComputeSparseAttention(q, k, v, sparsity_indices))\n self.assertEqual(out.shape,\n (batch_size, target_length, num_heads, dim_per_head))\n self.assertEqual(probs.shape,\n (batch_size, target_length, num_heads, source_length))\n # attention weights sum to 1.\n self.assertAllClose(\n np.sum(probs, axis=-1),\n np.ones([batch_size, target_length, num_heads]))\n\n # attention window = 4, but last two are always paddings.\n sparsity_indices = np.concatenate([\n sparsity_indices,\n -np.ones([batch_size, target_length, num_heads, 2], dtype=np.int32),\n ],\n axis=-1)\n with self.session() as sess:\n out2, probs2 = sess.run(\n attention_util.ComputeSparseAttention(q, k, v, sparsity_indices))\n # We assert that the encoded outputs are the same as before,\n # and the attention weights are 0 on the padded positions.\n self.assertAllClose(out, out2)\n self.assertAllClose(probs, probs2)\n\n # attention window = 4.\n sparsity_indices = np.tile(\n np.arange(4, dtype=np.int32), [batch_size, target_length, num_heads, 1])\n # but position 2 and 3 are padded.\n paddings = np.tile([0., 0., 1., 1., 0.], [batch_size, 1])\n with self.session() as sess:\n out3, probs3 = sess.run(\n attention_util.ComputeSparseAttention(q, k, v, sparsity_indices,\n paddings))\n # We assert that the encoded outputs and attention weights are the same\n # as before.\n self.assertAllClose(out2, out3)\n self.assertAllClose(probs2, probs3)\n\n def testFullAttention(self):\n batch_size = 4\n source_length = 7\n target_length = 6\n num_heads = 3\n dim_per_head = 5\n q = np.random.rand(batch_size, target_length, num_heads,\n dim_per_head).astype(np.float32)\n k = np.random.rand(batch_size, source_length, num_heads,\n dim_per_head).astype(np.float32)\n v = np.random.rand(batch_size, source_length, num_heads,\n dim_per_head).astype(np.float32)\n # attention window = source length, randomly permutated\n # np.arange(source_length)\n sparsity_indices = np.tile(\n np.random.permutation(source_length).astype(np.int32),\n [batch_size, target_length, num_heads, 1])\n\n with self.session() as sess:\n out, probs = sess.run(\n attention_util.ComputeSparseAttention(q, k, v, sparsity_indices))\n\n # compute full attention in numpy\n expected_logit = np.einsum('BTNH, BSNH -> BTNS', q, k)\n expected_logit /= np.sqrt(dim_per_head)\n elexp = np.exp(expected_logit)\n expected_probs = elexp / np.expand_dims(np.sum(elexp, axis=-1), axis=-1)\n expected_output = np.einsum('BTNS, BSNH -> BTNH', expected_probs, v)\n\n # We assert that the output is close to the full attention,\n # since our sparsity_indices is range(source_length)\n self.assertAllClose(probs, expected_probs)\n self.assertAllClose(out, expected_output)\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
]
| [
[
"numpy.dot",
"numpy.random.rand",
"numpy.ones_like",
"numpy.argmin",
"numpy.tile",
"numpy.exp",
"numpy.random.random",
"numpy.concatenate",
"numpy.random.binomial",
"numpy.arange",
"numpy.sqrt",
"numpy.expand_dims",
"numpy.array",
"numpy.zeros",
"numpy.einsum",
"numpy.squeeze",
"numpy.random.seed",
"numpy.sum",
"numpy.random.permutation",
"numpy.ones",
"numpy.abs"
]
]
|
hoangle96/CS539_project | [
"20f704398a8bdb3ab709cef45ed861898fd4d1dd"
]
| [
"tri/undreamt/undreamt/encoder.py"
]
| [
"# Copyright (C) 2018 Mikel Artetxe <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\nfrom undreamt import data\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\n\nclass RNNEncoder(nn.Module):\n def __init__(self, embedding_size, hidden_size, bidirectional=False, layers=1, dropout=0):\n super(RNNEncoder, self).__init__()\n if bidirectional and hidden_size % 2 != 0:\n raise ValueError('The hidden dimension must be even for bidirectional encoders')\n self.directions = 2 if bidirectional else 1\n self.bidirectional = bidirectional\n self.layers = layers\n self.hidden_size = hidden_size // self.directions\n self.special_embeddings = nn.Embedding(data.SPECIAL_SYMBOLS+1, embedding_size, padding_idx=0)\n self.rnn = nn.GRU(embedding_size, self.hidden_size, bidirectional=bidirectional, num_layers=layers,\n dropout=dropout)\n\n def forward(self, ids, lengths, word_embeddings, hidden):\n sorted_lengths = sorted(lengths, reverse=True)\n is_sorted = sorted_lengths == lengths\n is_varlen = sorted_lengths[0] != sorted_lengths[-1]\n if not is_sorted:\n true2sorted = sorted(range(len(lengths)), key=lambda x: -lengths[x])\n sorted2true = sorted(range(len(lengths)), key=lambda x: true2sorted[x])\n ids = torch.stack([ids[:, i] for i in true2sorted], dim=1)\n lengths = [lengths[i] for i in true2sorted]\n embeddings = word_embeddings(data.word_ids(ids)) + self.special_embeddings(data.special_ids(ids))\n if is_varlen:\n embeddings = nn.utils.rnn.pack_padded_sequence(embeddings, lengths)\n output, hidden = self.rnn(embeddings, hidden)\n if self.bidirectional:\n hidden = torch.stack([torch.cat((hidden[2*i], hidden[2*i+1]), dim=1) for i in range(self.layers)])\n if is_varlen:\n output = nn.utils.rnn.pad_packed_sequence(output)[0]\n if not is_sorted:\n hidden = torch.stack([hidden[:, i, :] for i in sorted2true], dim=1)\n output = torch.stack([output[:, i, :] for i in sorted2true], dim=1)\n return hidden, output\n\n def initial_hidden(self, batch_size):\n return Variable(torch.zeros(self.layers*self.directions, batch_size, self.hidden_size), requires_grad=False)"
]
| [
[
"torch.zeros",
"torch.cat",
"torch.stack",
"torch.nn.GRU",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.nn.Embedding"
]
]
|
dheeraj7596/SCDV | [
"329b13a413318262f1888d872d8e33b30217cbc7"
]
| [
"Reuters/FastText.py"
]
| [
"#!/usr/bin/env python\nimport pandas as pd\nimport nltk.data\nimport logging\nfrom gensim.models import FastText\nfrom KaggleWord2VecUtility import KaggleWord2VecUtility\nimport time\nimport sys\nimport csv\n\nif __name__ == '__main__':\n\n start = time.time()\n # The csv file might contain very huge fields, therefore set the field_size_limit to maximum.\n csv.field_size_limit(sys.maxsize)\n # Read train data.\n train_word_vector = pd.read_pickle('all.pkl')\n # Use the NLTK tokenizer to split the paragraph into sentences.\n tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\n sentences = []\n print(\"Parsing sentences from training set...\")\n\n # Loop over each news article.\n for review in train_word_vector[\"text\"]:\n try:\n # Split a review into parsed sentences.\n sentences += KaggleWord2VecUtility.review_to_sentences(review, tokenizer, remove_stopwords=True)\n except:\n continue\n\n logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', \\\n level=logging.INFO)\n\n num_features = int(sys.argv[1]) # Word vector dimensionality\n min_word_count = 20 # Minimum word count\n num_workers = 40 # Number of threads to run in parallel\n context = 10 # Context window size\n downsampling = 1e-3 # Downsample setting for frequent words\n\n print(\"Training FastText model...\")\n # Train FastText model.\n model = FastText(sentences, workers=num_workers, hs=0, sg=1, negative=10, iter=25, \\\n size=num_features, min_count=min_word_count, \\\n window=context, sample=downsampling, seed=1)\n\n model_name = str(num_features) + \"features_\" + str(min_word_count) + \"minwords_\" + str(\n context) + \"context_len2alldata\"\n model.init_sims(replace=True)\n # Save FastText model.\n print(\"Saving FastText model...\")\n model.save(model_name)\n endmodeltime = time.time()\n\n print(\"time : \", endmodeltime - start)\n"
]
| [
[
"pandas.read_pickle"
]
]
|
nishitanand/mixtext | [
"82e13b297ba913cc8ab80da312edbe56e466a691"
]
| [
"code/mixtext.py"
]
| [
"import torch\nimport torch.nn as nn\nfrom pytorch_transformers import *\nfrom transformers.modeling_bert import BertEmbeddings, BertPooler, BertLayer\nimport geoopt\n\nclass BertModel4Mix(BertPreTrainedModel):\n def __init__(self, config):\n super(BertModel4Mix, self).__init__(config)\n self.embeddings = BertEmbeddings(config)\n self.encoder = BertEncoder4Mix(config)\n self.pooler = BertPooler(config)\n\n self.init_weights()\n\n def _resize_token_embeddings(self, new_num_tokens):\n old_embeddings = self.embeddings.word_embeddings\n new_embeddings = self._get_resized_embeddings(\n old_embeddings, new_num_tokens)\n self.embeddings.word_embeddings = new_embeddings\n return self.embeddings.word_embeddings\n\n def _prune_heads(self, heads_to_prune):\n \"\"\" Prunes heads of the model.\n heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n See base class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n def forward(self, input_ids, input_ids2=None, l=None, mix_layer=1000, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None):\n\n if attention_mask is None:\n if input_ids2 is not None:\n attention_mask2 = torch.ones_like(input_ids2)\n attention_mask = torch.ones_like(input_ids)\n\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n if input_ids2 is not None:\n token_type_ids2 = torch.zeros_like(input_ids2)\n\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n\n extended_attention_mask = extended_attention_mask.to(\n dtype=next(self.parameters()).dtype) # fp16 compatibility\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n\n if input_ids2 is not None:\n\n extended_attention_mask2 = attention_mask2.unsqueeze(\n 1).unsqueeze(2)\n\n extended_attention_mask2 = extended_attention_mask2.to(\n dtype=next(self.parameters()).dtype) # fp16 compatibility\n extended_attention_mask2 = (\n 1.0 - extended_attention_mask2) * -10000.0\n\n if head_mask is not None:\n if head_mask.dim() == 1:\n head_mask = head_mask.unsqueeze(0).unsqueeze(\n 0).unsqueeze(-1).unsqueeze(-1)\n head_mask = head_mask.expand(\n self.config.num_hidden_layers, -1, -1, -1, -1)\n elif head_mask.dim() == 2:\n # We can specify head_mask for each layer\n head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)\n # switch to fload if need + fp16 compatibility\n head_mask = head_mask.to(dtype=next(self.parameters()).dtype)\n else:\n head_mask = [None] * self.config.num_hidden_layers\n\n embedding_output = self.embeddings(\n input_ids, position_ids=position_ids, token_type_ids=token_type_ids)\n\n if input_ids2 is not None:\n embedding_output2 = self.embeddings(\n input_ids2, position_ids=position_ids, token_type_ids=token_type_ids2)\n\n if input_ids2 is not None:\n encoder_outputs = self.encoder(embedding_output, embedding_output2, l, mix_layer,\n extended_attention_mask, extended_attention_mask2, head_mask=head_mask)\n else:\n encoder_outputs = self.encoder(\n embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask)\n\n sequence_output = encoder_outputs[0]\n pooled_output = self.pooler(sequence_output)\n\n # add hidden_states and attentions if they are here\n outputs = (sequence_output, pooled_output,) + encoder_outputs[1:]\n # sequence_output, pooled_output, (hidden_states), (attentions)\n return outputs\n\n\nclass BertEncoder4Mix(nn.Module):\n def __init__(self, config):\n super(BertEncoder4Mix, self).__init__()\n self.output_attentions = config.output_attentions\n self.output_hidden_states = config.output_hidden_states\n self.layer = nn.ModuleList([BertLayer(config)\n for _ in range(config.num_hidden_layers)])\n\n def forward(self, hidden_states, hidden_states2=None, l=None, mix_layer=1000, attention_mask=None, attention_mask2=None, head_mask=None):\n all_hidden_states = ()\n all_attentions = ()\n\n # Perform mix at till the mix_layer\n if mix_layer == -1:\n if hidden_states2 is not None:\n hidden_states = geoopt.PoincareBall.logmap0( geoopt.PoincareBall.mobius_add ( geoopt.PoincareBall.mobius_scalar_mul ( l , geoopt.PoincareBall.expmap0(hidden_states) ) , geoopt.PoincareBall.mobius_scalar_mul ( (1-l) , geoopt.PoincareBall.expmap0(hidden_states2) ) ) )\n\n for i, layer_module in enumerate(self.layer):\n if i <= mix_layer:\n\n if self.output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_outputs = layer_module(\n hidden_states, attention_mask, head_mask[i])\n hidden_states = layer_outputs[0]\n\n if self.output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n\n if hidden_states2 is not None:\n layer_outputs2 = layer_module(\n hidden_states2, attention_mask2, head_mask[i])\n hidden_states2 = layer_outputs2[0]\n\n if i == mix_layer:\n if hidden_states2 is not None:\n hidden_states = geoopt.PoincareBall.logmap0( geoopt.PoincareBall.mobius_add ( geoopt.PoincareBall.mobius_scalar_mul ( l , geoopt.PoincareBall.expmap0(hidden_states) ) , geoopt.PoincareBall.mobius_scalar_mul ( (1-l) , geoopt.PoincareBall.expmap0(hidden_states2) ) ) )\n\n if i > mix_layer:\n if self.output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_outputs = layer_module(\n hidden_states, attention_mask, head_mask[i])\n hidden_states = layer_outputs[0]\n\n if self.output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n\n # Add last layer\n if self.output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n outputs = (hidden_states,)\n if self.output_hidden_states:\n outputs = outputs + (all_hidden_states,)\n if self.output_attentions:\n outputs = outputs + (all_attentions,)\n # last-layer hidden state, (all hidden states), (all attentions)\n return outputs\n\n\nclass MixText(nn.Module):\n def __init__(self, num_labels=2, mix_option=False):\n super(MixText, self).__init__()\n\n if mix_option:\n self.bert = BertModel4Mix.from_pretrained('bert-base-uncased')\n else:\n self.bert = BertModel.from_pretrained('bert-base-uncased')\n\n self.linear = nn.Sequential(nn.Linear(768, 128),\n nn.Tanh(),\n nn.Linear(128, num_labels))\n\n def forward(self, x, x2=None, l=None, mix_layer=1000):\n\n if x2 is not None:\n all_hidden, pooler = self.bert(x, x2, l, mix_layer)\n\n pooled_output = torch.mean(all_hidden, 1)\n\n else:\n all_hidden, pooler = self.bert(x)\n\n pooled_output = torch.mean(all_hidden, 1)\n\n predict = self.linear(pooled_output)\n\n return predict\n\n"
]
| [
[
"torch.nn.Linear",
"torch.nn.Tanh",
"torch.ones_like",
"torch.zeros_like",
"torch.mean"
]
]
|
MrPec/project-teachable | [
"c000067c2033efc0725a606ed6a0c479a5f96a45"
]
| [
"embedding.py"
]
| [
"# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Detection Engine used for detection tasks.\"\"\"\nfrom collections import Counter\nfrom collections import defaultdict\nfrom edgetpu.basic.basic_engine import BasicEngine\nimport numpy as np\nfrom PIL import Image\n\n\nclass EmbeddingEngine(BasicEngine):\n \"\"\"Engine used to obtain embeddings from headless mobilenets.\"\"\"\n\n def __init__(self, model_path):\n \"\"\"Creates a EmbeddingEngine with given model and labels.\n\n Args:\n model_path: String, path to TF-Lite Flatbuffer file.\n\n Raises:\n ValueError: An error occurred when model output is invalid.\n \"\"\"\n BasicEngine.__init__(self, model_path)\n output_tensors_sizes = self.get_all_output_tensors_sizes()\n if output_tensors_sizes.size != 1:\n raise ValueError(\n ('Dectection model should have only 1 output tensor!'\n 'This model has {}.'.format(output_tensors_sizes.size)))\n\n def DetectWithImage(self, img):\n \"\"\"Calculates embedding from an image.\n\n Args:\n img: PIL image object.\n\n Returns:\n Embedding vector as np.float32\n\n Raises:\n RuntimeError: when model's input tensor format is invalid.\n \"\"\"\n input_tensor_shape = self.get_input_tensor_shape()\n if (input_tensor_shape.size != 4 or input_tensor_shape[3] != 3 or\n input_tensor_shape[0] != 1):\n raise RuntimeError(\n 'Invalid input tensor shape! Expected: [1, height, width, 3]')\n required_image_size = (input_tensor_shape[2], input_tensor_shape[1])\n with img.resize(required_image_size, Image.NEAREST) as resized_img:\n input_tensor = np.asarray(resized_img).flatten()\n return self.RunInference(input_tensor)[1]\n\n\nclass KNNEmbeddingEngine(EmbeddingEngine):\n \"\"\"Extends embedding engine to also provide kNearest Neighbor detection.\n\n This class maintains an in-memory store of embeddings and provides\n functions to find k nearest neighbors against a query emedding.\n \"\"\"\n\n def __init__(self, model_path, kNN=3):\n \"\"\"Creates a EmbeddingEngine with given model and labels.\n\n Args:\n model_path: String, path to TF-Lite Flatbuffer file.\n\n Raises:\n ValueError: An error occurred when model output is invalid.\n \"\"\"\n EmbeddingEngine.__init__(self, model_path)\n self.clear()\n self._kNN = kNN\n\n def clear(self):\n \"\"\"Clear the store: forgets all stored embeddings.\"\"\"\n self._labels = []\n self._embedding_map = defaultdict(list)\n self._embeddings = None\n\n def addEmbedding(self, emb, label):\n \"\"\"Add an embedding vector to the store.\"\"\"\n\n normal = emb/np.sqrt((emb**2).sum()) # Normalize the vector\n\n self._embedding_map[label].append(normal) # Add to store, under \"label\"\n\n # Expand labelled blocks of embeddings for when we have less than kNN\n # examples. Otherwise blocks that have more examples unfairly win.\n emb_blocks = []\n self._labels = [] # We'll be reconstructing the list of labels\n for label, embeds in self._embedding_map.items():\n emb_block = np.stack(embeds)\n if emb_block.shape[0] < self._kNN:\n emb_block = np.pad(emb_block,\n [(0,self._kNN - emb_block.shape[0]), (0,0)],\n mode=\"reflect\")\n emb_blocks.append(emb_block)\n self._labels.extend([label]*emb_block.shape[0])\n\n self._embeddings = np.concatenate(emb_blocks, axis=0)\n\n def kNNEmbedding(self, query_emb):\n \"\"\"Returns the self._kNN nearest neighbors to a query embedding.\"\"\"\n\n # If we have nothing stored, the answer is None\n if self._embeddings is None: return None\n\n # Normalize query embedding\n query_emb = query_emb/np.sqrt((query_emb**2).sum())\n\n # We want a cosine distance ifrom query to each stored embedding. A matrix\n # multiplication can do this in one step, resulting in a vector of\n # distances.\n dists = np.matmul(self._embeddings, query_emb)\n\n # If we have less than self._kNN distances we can only return that many.\n kNN = min(len(dists), self._kNN)\n\n # Get the N largest cosine similarities (larger means closer).\n n_argmax = np.argpartition(dists, -kNN)[-kNN:]\n\n # Get the corresponding labels associated with each distance.\n labels = [self._labels[i] for i in n_argmax]\n\n # Return the most common label over all self._kNN nearest neighbors.\n most_common_label = Counter(labels).most_common(1)[0][0]\n return most_common_label\n\n def exampleCount(self):\n \"\"\"Just returns the size of the embedding store.\"\"\"\n return sum(len(v) for v in self._embedding_map.values())\n\n\n"
]
| [
[
"numpy.concatenate",
"numpy.pad",
"numpy.matmul",
"numpy.asarray",
"numpy.stack",
"numpy.argpartition"
]
]
|
MihailoIsakov/fishfish | [
"ddce2a4f0834d70a9673ff3b50e802e72c5b928c"
]
| [
"lasagne/init.py"
]
| [
"\"\"\"\nFunctions to create initializers for parameter variables.\n\nExamples\n--------\n>>> from lasagne.layers import DenseLayer\n>>> from lasagne.init import Constant, Glorot\n>>> l1 = DenseLayer((100,20), num_units=50, W=GlorotUniform(), b=Constant(0.0))\n\"\"\"\n\nimport numpy as np\n\nfrom .utils import floatX\n\n\nclass Initializer(object):\n \"\"\"Base class for parameter tensor initializers.\n\n The :class:`Initializer` class represents a weight initializer used\n to initialize weight parameters in a neural network layer. It should be\n subclassed when implementing new types of weight initializers.\n\n \"\"\"\n def __call__(self, shape):\n \"\"\"\n Makes :class:`Initializer` instances callable like a function, invoking\n their :meth:`sample()` method.\n \"\"\"\n return self.sample(shape)\n\n def sample(self, shape):\n \"\"\"\n Sample should return a theano.tensor of size shape and data type\n theano.config.floatX.\n\n Parameters\n -----------\n shape : tuple or int\n Integer or tuple specifying the size of the returned\n matrix.\n returns : theano.tensor\n Matrix of size shape and dtype theano.config.floatX.\n \"\"\"\n raise NotImplementedError()\n\n\nclass Normal(Initializer):\n \"\"\"Sample initial weights from the Gaussian distribution.\n\n Initial weight parameters are sampled from N(mean, std).\n\n Parameters\n ----------\n std : float\n Std of initial parameters.\n mean : float\n Mean of initial parameters.\n \"\"\"\n def __init__(self, std=0.01, mean=0.0):\n self.std = std\n self.mean = mean\n\n def sample(self, shape):\n return floatX(np.random.normal(self.mean, self.std, size=shape))\n\n\nclass Uniform(Initializer):\n \"\"\"Sample initial weights from the uniform distribution.\n\n Parameters are sampled from U(a, b).\n\n Parameters\n ----------\n range : float or tuple\n When std is None then range determines a, b. If range is a float the\n weights are sampled from U(-range, range). If range is a tuple the\n weights are sampled from U(range[0], range[1]).\n std : float or None\n If std is a float then the weights are sampled from\n U(mean - np.sqrt(3) * std, mean + np.sqrt(3) * std).\n mean : float\n see std for description.\n \"\"\"\n def __init__(self, range=0.01, std=None, mean=0.0):\n import warnings\n warnings.warn(\"The uniform initializer no longer uses Glorot et al.'s \"\n \"approach to determine the bounds, but defaults to the \"\n \"range (-0.01, 0.01) instead. Please use the new \"\n \"GlorotUniform initializer to get the old behavior. \"\n \"GlorotUniform is now the default for all layers.\")\n\n if std is not None:\n a = mean - np.sqrt(3) * std\n b = mean + np.sqrt(3) * std\n else:\n try:\n a, b = range # range is a tuple\n except TypeError:\n a, b = -range, range # range is a number\n\n self.range = (a, b)\n\n def sample(self, shape):\n return floatX(np.random.uniform(\n low=self.range[0], high=self.range[1], size=shape))\n\n\nclass Glorot(Initializer):\n \"\"\"Glorot weight initialization [1]_.\n\n This is also known as Xavier initialization.\n\n Parameters\n ----------\n initializer : lasagne.init.Initializer\n Initializer used to sample the weights, must accept `std` in its\n constructor to sample from a distribution with a given standard\n deviation.\n gain : float or 'relu'\n Scaling factor for the weights. Set this to 1.0 for linear and sigmoid\n units, to 'relu' or sqrt(2) for rectified linear units. Other transfer\n functions may need different factors.\n c01b : bool\n For a :class:`lasagne.layers.cuda_convnet.Conv2DCCLayer` constructed\n with ``dimshuffle=False``, `c01b` must be set to ``True`` to compute\n the correct fan-in and fan-out.\n\n References\n ----------\n .. [1] Xavier Glorot and Yoshua Bengio (2010):\n Understanding the difficulty of training deep feedforward neural\n networks. International conference on artificial intelligence and\n statistics.\n\n Notes\n -----\n For a :class:`DenseLayer`, if ``gain='relu'`` and ``initializer=Uniform``,\n the weights are initialized as\n\n .. math::\n a &= \\\\sqrt{\\\\frac{6}{fan_{in}+fan_{out}}}\\\\\\\\\n W &\\sim U[-a, a]\n\n If ``gain=1`` and ``initializer=Normal``, the weights are initialized as\n\n .. math::\n \\\\sigma &= \\\\sqrt{\\\\frac{2}{fan_{in}+fan_{out}}}\\\\\\\\\n W &\\sim N(0, \\\\sigma)\n\n See Also\n --------\n GlorotNormal : Shortcut with Gaussian initializer.\n GlorotUniform : Shortcut with uniform initializer.\n \"\"\"\n def __init__(self, initializer, gain=1.0, c01b=False):\n if gain == 'relu':\n gain = np.sqrt(2)\n\n self.initializer = initializer\n self.gain = gain\n self.c01b = c01b\n\n def sample(self, shape):\n if self.c01b:\n if len(shape) != 4:\n raise RuntimeError(\n \"If c01b is True, only shapes of length 4 are accepted\")\n\n n1, n2 = shape[0], shape[3]\n receptive_field_size = shape[1] * shape[2]\n else:\n if len(shape) < 2:\n raise RuntimeError(\n \"This initializer only works with shapes of length >= 2\")\n\n n1, n2 = shape[:2]\n receptive_field_size = np.prod(shape[2:])\n\n std = self.gain * np.sqrt(2.0 / ((n1 + n2) * receptive_field_size))\n return self.initializer(std=std).sample(shape)\n\n\nclass GlorotNormal(Glorot):\n \"\"\"Glorot with weights sampled from the Normal distribution.\n\n See :class:`Glorot` for a description of the parameters.\n \"\"\"\n def __init__(self, gain=1.0, c01b=False):\n super(GlorotNormal, self).__init__(Normal, gain, c01b)\n\n\nclass GlorotUniform(Glorot):\n \"\"\"Glorot with weights sampled from the Uniform distribution.\n\n See :class:`Glorot` for a description of the parameters.\n \"\"\"\n def __init__(self, gain=1.0, c01b=False):\n super(GlorotUniform, self).__init__(Uniform, gain, c01b)\n\n\nclass He(Initializer):\n \"\"\"He weight initialization [1]_.\n\n Weights are initialized with a standard deviation of\n :math:`\\\\sigma = gain \\\\sqrt{\\\\frac{1}{fan_{in}}}`.\n\n Parameters\n ----------\n initializer : lasagne.init.Initializer\n Initializer used to sample the weights, must accept `std` in its\n constructor to sample from a distribution with a given standard\n deviation.\n gain : float or 'relu'\n Scaling factor for the weights. Set this to 1.0 for linear and sigmoid\n units, to 'relu' or sqrt(2) for rectified linear units. Other transfer\n functions may need different factors.\n c01b : bool\n For a :class:`lasagne.layers.cuda_convnet.Conv2DCCLayer` constructed\n with ``dimshuffle=False``, `c01b` must be set to ``True`` to compute\n the correct fan-in and fan-out.\n\n References\n ----------\n .. [1] Kaiming He et al. (2015):\n Delving deep into rectifiers: Surpassing human-level performance on\n imagenet classification. arXiv preprint arXiv:1502.01852.\n\n See Also\n ----------\n HeNormal : Shortcut with Gaussian initializer.\n HeUniform : Shortcut with uniform initializer.\n \"\"\"\n def __init__(self, initializer, gain=1.0, c01b=False):\n if gain == 'relu':\n gain = np.sqrt(2)\n\n self.initializer = initializer\n self.gain = gain\n self.c01b = c01b\n\n def sample(self, shape):\n if self.c01b:\n if len(shape) != 4:\n raise RuntimeError(\n \"If c01b is True, only shapes of length 4 are accepted\")\n\n fan_in = np.prod(shape[:3])\n else:\n if len(shape) == 2:\n fan_in = shape[0]\n elif len(shape) > 2:\n fan_in = np.prod(shape[1:])\n else:\n raise RuntimeError(\n \"This initializer only works with shapes of length >= 2\")\n\n std = self.gain * np.sqrt(1.0 / fan_in)\n return self.initializer(std=std).sample(shape)\n\n\nclass HeNormal(He):\n \"\"\"He initializer with weights sampled from the Normal distribution.\n\n See :class:`He` for a description of the parameters.\n \"\"\"\n def __init__(self, gain=1.0, c01b=False):\n super(HeNormal, self).__init__(Normal, gain, c01b)\n\n\nclass HeUniform(He):\n \"\"\"He initializer with weights sampled from the Uniform distribution.\n\n See :class:`He` for a description of the parameters.\n \"\"\"\n def __init__(self, gain=1.0, c01b=False):\n super(HeUniform, self).__init__(Uniform, gain, c01b)\n\n\nclass Constant(Initializer):\n \"\"\"Initialize weights with constant value.\n\n Parameters\n ----------\n val : float\n Constant value for weights.\n \"\"\"\n def __init__(self, val=0.0):\n self.val = val\n\n def sample(self, shape):\n return floatX(np.ones(shape) * self.val)\n\n\nclass Sparse(Initializer):\n \"\"\"Initialize weights as sparse matrix.\n\n Parameters\n ----------\n sparsity : float\n Exact fraction of non-zero values per column. Larger values give less\n sparsity.\n std : float\n Non-zero weights are sampled from N(0, std).\n \"\"\"\n def __init__(self, sparsity=0.1, std=0.01):\n self.sparsity = sparsity\n self.std = std\n\n def sample(self, shape):\n if len(shape) != 2:\n raise RuntimeError(\n \"sparse initializer only works with shapes of length 2\")\n\n w = floatX(np.zeros(shape))\n n_inputs, n_outputs = shape\n size = int(self.sparsity * n_inputs) # fraction of number of inputs\n\n for k in range(n_outputs):\n indices = np.arange(n_inputs)\n np.random.shuffle(indices)\n indices = indices[:size]\n values = floatX(np.random.normal(0.0, self.std, size=size))\n w[indices, k] = values\n\n return w\n\n\nclass Orthogonal(Initializer):\n \"\"\"Intialize weights as Orthogonal matrix.\n\n Orthogonal matrix initialization. For n-dimensional shapes where n > 2,\n the n-1 trailing axes are flattened. For convolutional layers, this\n corresponds to the fan-in, so this makes the initialization usable for\n both dense and convolutional layers.\n\n Parameters\n ----------\n gain : float or 'relu'\n 'relu' gives gain of sqrt(2).\n \"\"\"\n def __init__(self, gain=1.0):\n if gain == 'relu':\n gain = np.sqrt(2)\n\n self.gain = gain\n\n def sample(self, shape):\n if len(shape) < 2:\n raise RuntimeError(\"Only shapes of length 2 or more are \"\n \"supported.\")\n\n flat_shape = (shape[0], np.prod(shape[1:]))\n a = np.random.normal(0.0, 1.0, flat_shape)\n u, _, v = np.linalg.svd(a, full_matrices=False)\n # pick the one with the correct shape\n q = u if u.shape == flat_shape else v\n q = q.reshape(shape)\n return floatX(self.gain * q)\n"
]
| [
[
"numpy.random.normal",
"numpy.zeros",
"numpy.ones",
"numpy.random.shuffle",
"numpy.prod",
"numpy.random.uniform",
"numpy.arange",
"numpy.sqrt",
"numpy.linalg.svd"
]
]
|
fengjiaxin/Home_Credit_Default_Risk | [
"3407e76b4e5cfb8dd6056d24675b80fe0e82c123"
]
| [
"20180520/StackingBaseline/StackingBaseline.py"
]
| [
"# coding:utf-8\n\nimport os\nimport numpy as np\nimport pandas as pd\nfrom xgboost import XGBClassifier\nfrom category_encoders import LeaveOneOutEncoder\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom mlxtend.classifier import StackingCVClassifier\nnp.random.seed(7)\n\n\nclass StackingBaseline(object):\n\n def __init__(self, *, path):\n self.__path = path\n self.__application_train = None\n self.__application_test = None\n self.__sample_submission = None\n\n # data prepare\n self.__application_train_feature = None\n self.__application_train_label = None\n self.__application_test_feature = None\n\n self.__categorical_columns = None\n self.__numeric_columns = None\n\n # numeric handle\n # categorical handle\n self.__encoder = None\n\n # model fit\n self.__lr = None\n self.__ef = None\n self.__rf = None\n self.__gb = None\n self.__xgb = None\n self.__sclf = None\n\n def data_prepare(self):\n self.__application_train = pd.read_csv(os.path.join(self.__path, \"application_train.csv\"))\n self.__application_test = pd.read_csv(os.path.join(self.__path, \"application_test.csv\"))\n self.__sample_submission = pd.read_csv(os.path.join(self.__path, \"sample_submission.csv\"))\n\n self.__application_train = self.__application_train.drop(\"SK_ID_CURR\", axis=1)\n self.__application_test = self.__application_test.drop(\"SK_ID_CURR\", axis=1)\n\n self.__application_train_feature = self.__application_train[[i for i in self.__application_train.columns if i != \"TARGET\"]]\n self.__application_train_label = self.__application_train[\"TARGET\"]\n self.__application_test_feature = self.__application_test\n\n self.__categorical_columns = self.__application_train_feature.select_dtypes(include=[\"object\"]).columns.tolist()\n self.__numeric_columns = [i for i in self.__application_train_feature.columns if i not in self.__categorical_columns]\n\n def numeric_handle(self):\n self.__application_train_feature[self.__numeric_columns] = self.__application_train_feature[self.__numeric_columns].fillna(-999.0)\n self.__application_test_feature[self.__numeric_columns] = self.__application_test_feature[self.__numeric_columns].fillna(-999.0)\n\n def categorical_handle(self):\n self.__application_train_feature[self.__categorical_columns] = (\n self.__application_train_feature[self.__categorical_columns].fillna(\"missing\")\n )\n\n self.__encoder = LeaveOneOutEncoder()\n self.__encoder.fit(self.__application_train_feature[self.__categorical_columns], self.__application_train_label)\n self.__application_train_feature[self.__categorical_columns] = self.__encoder.transform(\n self.__application_train_feature[self.__categorical_columns]\n )\n self.__application_test_feature[self.__categorical_columns] = self.__encoder.transform(\n self.__application_test_feature[self.__categorical_columns]\n )\n\n def model_fit(self):\n self.__ef = ExtraTreesClassifier(n_jobs=-1)\n self.__rf = RandomForestClassifier(n_jobs=-1)\n self.__lr = LogisticRegression()\n self.__gb = GradientBoostingClassifier()\n self.__xgb = XGBClassifier(n_jobs=-1, missing=-999.0)\n self.__sclf = StackingCVClassifier(\n classifiers=[self.__ef, self.__rf, self.__gb, self.__xgb],\n meta_classifier=self.__lr,\n use_probas=True,\n cv=3\n )\n # sclf 需要的是 numpy array\n self.__sclf.fit(self.__application_train_feature.values, self.__application_train_label.values)\n\n def model_predict(self):\n self.__sample_submission[\"TARGET\"] = np.clip(self.__sclf.predict_proba(self.__application_test_feature.values)[:, 1], 0, 1)\n self.__sample_submission.to_csv(\n \"C:\\\\Users\\\\puhui\\\\PycharmProjects\\\\Home_Credit_Default_Risk\\\\20180520\\StackingBaseline\\\\sample_submission2.csv\",\n index=False\n )\n\n\nif __name__ == \"__main__\":\n sb = StackingBaseline(path=\"C:\\\\Users\\\\puhui\\\\PycharmProjects\\\\Home_Credit_Default_Risk\\\\Data\")\n sb.data_prepare()\n sb.numeric_handle()\n sb.categorical_handle()\n sb.model_fit()\n sb.model_predict()"
]
| [
[
"numpy.random.seed",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.linear_model.LogisticRegression",
"sklearn.ensemble.ExtraTreesClassifier",
"sklearn.ensemble.GradientBoostingClassifier"
]
]
|
011000101101/VRAR_project | [
"7b0be02517de3e3975c9a697e4d6353c3fd6225f"
]
| [
"classifier_subsystem/save_samples_as_tfrecords.py"
]
| [
"import numpy as np\nimport tensorflow as tf\nimport os\nimport pickle\nfrom sklearn.model_selection import train_test_split\n\nfrom utils.params import *\nimport utils.classify_util as classify_utils\n\n# load training data\nwith open(\"../bin_blobs/kanji_image_samples_augmentes.pkl\", 'rb') as f:\n image_samples_augmented = pickle.load(f)\n\nX = [sample[0] for sample in image_samples_augmented]\ny = [sample[1] for sample in image_samples_augmented]\n\nX_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.05, shuffle=True)\n\nprint(len(X), len(X_train), len(X_val))\n\nX = [X_train, X_val]\ny = [y_train, y_val]\nsubfolder = [\"train/\", \"val/\"]\n\n# for i in range(2):\n#\n# item_counter_in_record = 0\n# record_counter = 0\n# current_base_path = os.path.join(os.path.join(ROOT_DIR, \"classifier_subsystem/tfrecords/\"), subfolder[i])\n# if not os.path.isdir(current_base_path):\n# os.mkdir(current_base_path)\n# writer = tf.io.TFRecordWriter(os.path.join(current_base_path + \"000.tfrecord\"))\n#\n# for sample, label in zip(X[i], y[i]):\n#\n# item_counter_in_record += 1\n#\n# if item_counter_in_record > 1000: # \"the recommended number of images stored in one tfrecord file is 1000.\"\n# item_counter_in_record = 1\n# record_counter += 1\n# writer.close()\n# writer = tf.io.TFRecordWriter(current_base_path + \"%.3d.tfrecord\" % record_counter)\n# print(\"Creating the %.3d tfrecord file\" % record_counter)\n#\n# img_raw = sample.flatten()\n# label_raw = classify_utils.convert_label_to_tensor(label)\n#\n# example = tf.train.Example(features=tf.train.Features(feature={\n# \"mapped_data\": tf.train.Feature(int64_list=tf.train.Int64List(value=img_raw)),\n# \"result\": tf.train.Feature(int64_list=tf.train.Int64List(value=label_raw))}))\n# writer.write(example.SerializeToString())\n#\n# writer.close()"
]
| [
[
"sklearn.model_selection.train_test_split"
]
]
|
yongfang117/data_process | [
"c77af1b336ec8b7f61b538ea43dd03ee005a5227"
]
| [
"DataProcess/DataVisible/temper.py"
]
| [
"# coding:utf8\n\n\"\"\"\nDescription:matplotlib绘制复杂图\nAuthor:伏草惟存\nPrompt: code in Python3 env\n\"\"\"\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport csv\nfrom datetime import datetime\n#加入中文显示\nimport matplotlib.font_manager as fm\n# 解决中文乱码,本案例使用宋体字\nmyfont=fm.FontProperties(fname=r\"C:\\\\Windows\\\\Fonts\\\\simsun.ttc\")\n\n\n\ndef temper_char():\n fig = plt.figure() # 将画布划分为1行1列1块\n dates,highs,lows = [],[],[]\n with open(r'./weather07.csv') as f:\n reader = csv.reader(f)\n header_row = next(reader) # 返回文件第一行\n for row in reader:\n current_date = datetime.strptime(row[0],\"%Y-%m-%d\")\n dates.append(current_date)\n highs.append(int(row[1]))\n lows.append((int(row[3])))\n\n\n # 接收数据并绘制图形,facecolor填充区域颜色\n plt.plot(dates,highs,c='red',linewidth=2,alpha=0.5)\n plt.plot(dates,lows,c='green',linewidth=2,alpha=0.5)\n plt.fill_between(dates,highs,lows,facecolor='blue',alpha=0.2)\n\n # 设置散点图标题和横纵坐标标题\n plt.title(\"日常最高气温,2018年7月\",fontsize=10,fontname='宋体',fontproperties=myfont)\n plt.xlabel('横坐标',fontsize=10,fontname='宋体',fontproperties=myfont)\n plt.ylabel('温度',fontsize=10,fontname='宋体',fontproperties=myfont)\n\n # 绘制斜的日期\n fig.autofmt_xdate()\n\n # 设置刻度标记大小,axis='both'参数影响横纵坐标,labelsize刻度大小\n plt.tick_params(axis='both',which='major',labelsize=8)\n\n # 显示图形\n plt.show()\n\n\n\nif __name__ == '__main__':\n xvalues = list(range(1,100)) #校正坐标点,即横坐标值列表\n yvalues = [x**2 for x in xvalues] # 纵坐标值列表\n\n x_result = [1,2,3,4,5,6]\n y_frequencies = [152,171,175,168,150,179]\n\n temper_char()\n"
]
| [
[
"matplotlib.font_manager.FontProperties",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show"
]
]
|
partev/statsmodels | [
"00096d3b668926ed5c2059e24a9cc3157daaa562"
]
| [
"statsmodels/distributions/copula/tests/test_copula.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 14 23:32:57 2021\n\nAuthor: Josef Perktold\nLicense: BSD-3\n\n\"\"\"\nfrom statsmodels.compat.scipy import SP_LT_15\n\nimport numpy as np\nfrom numpy.testing import assert_allclose, assert_array_almost_equal\nimport pytest\nfrom scipy import stats\n\nfrom statsmodels.distributions.copula.archimedean import (\n ArchimedeanCopula,\n ClaytonCopula,\n FrankCopula,\n GumbelCopula,\n)\nfrom statsmodels.distributions.copula.copulas import CopulaDistribution\nimport statsmodels.distributions.copula.depfunc_ev as trev\nfrom statsmodels.distributions.copula.elliptical import (\n GaussianCopula,\n StudentTCopula,\n)\nfrom statsmodels.distributions.copula.extreme_value import (\n ExtremeValueCopula,\n copula_bv_ev,\n)\nfrom statsmodels.distributions.copula.other_copulas import IndependenceCopula\nimport statsmodels.distributions.copula.transforms as tra\nfrom statsmodels.distributions.tools import (\n approx_copula_pdf,\n frequencies_fromdata,\n)\nfrom statsmodels.tools.numdiff import approx_fprime_cs, approx_hess\n\nuniform = stats.uniform\n\n\nev_list = [\n [trev.transform_bilogistic, 0.5, 0.9, (0.25, 0.05), 0.5],\n [trev.transform_tawn, 0.5, 0.9, (0.5, 0.5, 0.5), 0.4724570876035117],\n # note evd has asymmetry reversed, interchange variables\n [trev.transform_tawn2, 0.5, 0.9, (0.25, 0.05), 0.464357480263932],\n [trev.transform_tawn2, 0.5, 0.9, (0.5, 0.25), 0.4916117128670654],\n [trev.transform_tawn2, 0.9, 0.5, (0.5, 0.25), 0.48340673415789],\n # note evd has parameter for hr 1/lmbda (inverse of our parameter)\n [trev.transform_hr, 0.5, 0.9, (2,), 0.4551235014298542],\n [trev.transform_joe, 0.5, 0.9, (0.5, 0.75, 1 / 0.25), 0.4543698299835434],\n [trev.transform_joe, 0.9, 0.5, (0.5, 0.75, 1 / 0.25), 0.4539773435983587],\n # tev is against R `copula` package\n # > cop = tevCopula(0.8, df = 4)\n # > pCopula(c(0.5, 0.75), cop)\n # [1] 0.456807960674953\n # > pCopula(c(0.5, 0.9), cop)\n # [1] 0.4911039761533587\n [trev.transform_tev, 0.5, 0.75, (0.8, 4), 0.456807960674953],\n [trev.transform_tev, 0.5, 0.9, (0.8, 4), 0.4911039761533587],\n]\n\nev_dep_list = [\n # [trev.transform_bilogistic, 0.5, 0.9, (0.25, 0.05), 0.5],\n [trev.transform_tawn, 0.5, 0.9, (0.5, 0.5, 0.5), 0.4724570876035117,\n [0.8952847075210475, 0.8535533905932737, 0.8952847075210475]],\n # abvevd(c(0.25, 0.5, 0.75), dep=0.25, asy = c(0.5, 0.75), model = \"alog\")\n [trev.transform_tawn, 0.5, 0.9, (0.5, 0.75, 0.25), 0.4724570876035117,\n [0.8753426223607659, 0.7672861240893745, 0.8182268471629245]],\n\n [trev.transform_tawn2, 0.4, 0.9, (0.3, 0.2), 0,\n [0.8968750000000001, 0.8500000000000000, 0.8781249999999999]],\n # # note evd has asymmetry reversed, interchange variables - NOT anymore\n # [trev.transform_tawn2, 0.9, 0.5, (0.25, 0.05), 0.464357480263932],\n # [trev.transform_tawn2, 0.9, 0.5, (0.5, 0.25), 0.4916117128670654],\n # [trev.transform_tawn2, 0.5, 0.9, (0.5, 0.25), 0.48340673415789],\n # # note evd has parameter for hr 1/lmbda (inverse of our parameter)\n [trev.transform_hr, 0.5, 0.9, (1/2,), 0.4551235014298542,\n [0.7774638908611127, 0.6914624612740130, 0.7774638908611127]],\n # [trev.transform_joe, 0.5, 0.9, (0.5, 0.75, 1/0.25), 0.4543698299835434],\n # [trev.transform_joe, 0.9, 0.5, (0.5, 0.75, 1/0.25), 0.4539773435983587],\n # > abvevd(c(0.25, 0.5, 0.75), dep=0.75, asy=c(0.5, 0.75), model=\"aneglog\")\n # [1] 0.9139915932031195 0.8803412942173715 0.8993537417026507\n [trev.transform_joe, 0.5, 0.9, (0.5, 0.75, 1/0.75), 0.,\n [0.9139915932031195, 0.8803412942173715, 0.8993537417026507]]\n ]\n\n\ncop_list = [\n [tra.TransfFrank, 0.5, 0.9, (2,), 0.4710805107852225, 0.9257812360337806],\n [tra.TransfGumbel, 0.5, 0.9, (2,), 0.4960348880595387, 0.3973548776136501],\n [tra.TransfClayton, 0.5, 0.9, (2,), 0.485954322440435, 0.8921974147432954],\n [tra.TransfIndep, 0.5, 0.5, (), 0.25, 1],\n]\n\ngev_list = [\n # [cop.transform_tawn, 0.5, 0.9, (0.5, 0.5, 0.5), 0.4724570876035117],\n # > pbvevd(c(0.5,0.9), dep = 0.25, asy = c(0.5, 0.5), model = \"alog\")\n # [trev.transform_tawn, 0.5, 0.9, (0.5, 0.5, 0.25),\n # 0.4386367545837274, 0.12227570158361],\n [trev.transform_tawn, 0.5, 0.9, (0.5, 0.75, 0.25),\n 0.4868879662205026, 0.4646154226541540, 0.1897142141905908],\n [trev.transform_tawn2, 0.4, 0.9, (0.3, 0.2),\n 0.3838690483829361, 0.3989785485000293, 0.1084278364284748],\n # [trev.transform_tawn2, 0.5, 0.5, (0.5, 0.25), 0.387629940606913,\n # 0.1383277275273335],\n # [trev.transform_tawn2, 0.9, 0.5, (0.5, 0.25), 0.4519820720233402,\n # 0.1162545305128522], # fails in pdf\n # note evd has parameter for hr 1/lmbda (inverse of our parameter)\n [trev.transform_hr, 0.4, 0.9, (2,),\n 0.36459381872178737, 0.34879372499897571, 0.09305880295825367],\n # [trev.transform_joe, 0.5, 0.9, (0.5, 0.75, 1/0.25), 0.3700584213780548,\n # 0.08992436735088952],\n [trev.transform_joe, 0.4, 0.9, (0.5, 0.75, 1/0.25),\n 0.36391125216656162, 0.34752631779552950, 0.09316705199822513],\n ]\n\n\ndef check_cop_rvs(cop, rvs=None, nobs=2000, k=10, use_pdf=True):\n if rvs is None:\n rvs = cop.rvs(nobs)\n freq = frequencies_fromdata(rvs, k, use_ranks=True)\n if use_pdf:\n pdfg = approx_copula_pdf(cop, k_bins=k, force_uniform=True)\n count_pdf = pdfg * nobs\n else:\n # use copula cdf if available\n raise NotImplementedError\n mask = count_pdf < 2\n if mask.sum() > 5:\n cp = count_pdf[mask]\n cp = np.concatenate([cp, [nobs - cp.sum()]])\n fr = freq[mask]\n cp = np.concatenate([fr, [nobs - fr.sum()]])\n else:\n fr = freq.ravel()\n cp = count_pdf.ravel()\n\n chi2_test = stats.chisquare(freq.ravel(), count_pdf.ravel())\n return chi2_test, rvs\n\n\nextrali = [\n [trev.transform_tawn, 0.5, 0.9, (0.8, 0.5, 0.75), 0.4724570876035117],\n [trev.transform_tawn, 0.5, 0.9, (0.5, 0.75, 0.5), 0.4724570876035117],\n [trev.transform_tawn, 0.6, 0.4, (0.2, 0.7, 0.6), 0.4724570876035117],\n]\n\n\[email protected](\"case\", ev_list + extrali)\ndef test_ev_copula(case):\n # check ev copulas, cdf and transform against R `evd` package\n ev_tr, v1, v2, args, res1 = case\n res = copula_bv_ev([v1, v2], ev_tr, args=args)\n # assert_allclose(res, res1, rtol=1e-13)\n\n # check derivatives of dependence function\n if ev_tr in (trev.transform_bilogistic, trev.transform_tev):\n return\n d1_res = approx_fprime_cs(np.array([v1, v2]), ev_tr.evaluate, args=args)\n d1_res = np.diag(d1_res)\n d1 = ev_tr.deriv(np.array([v1, v2]), *args)\n assert_allclose(d1, d1_res, rtol=1e-8)\n\n d1_res = approx_hess(np.array([0.5]), ev_tr.evaluate, args=args)\n d1_res = np.diag(d1_res)\n d1 = ev_tr.deriv2(0.5, *args)\n assert_allclose(d1, d1_res, rtol=1e-7)\n\n\[email protected](\"case\", ev_dep_list)\ndef test_ev_dep(case):\n ev_tr, v1, v2, args, res1, res2 = case # noqa\n t = np.array([0.25, 0.5, 0.75])\n df = ev_tr(t, *args)\n assert_allclose(df, res2, rtol=1e-13)\n\n\[email protected](\"case\", cop_list)\ndef test_copulas(case):\n # check ev copulas, cdf and transform against R `copula` package\n cop_tr, v1, v2, args, cdf2, pdf2 = case\n ca = ArchimedeanCopula(cop_tr())\n cdf1 = ca.cdf([v1, v2], args=args)\n pdf1 = ca.pdf([v1, v2], args=args)\n assert_allclose(cdf1, cdf2, rtol=1e-13)\n assert_allclose(pdf1, pdf2, rtol=1e-13)\n\n logpdf1 = ca.logpdf([v1, v2], args=args)\n assert_allclose(logpdf1, np.log(pdf2), rtol=1e-13)\n\n\[email protected](\"case\", ev_list)\ndef test_ev_copula_distr(case):\n # check ev copulas, cdf and transform against R `evd` package\n ev_tr, v1, v2, args, res1 = case\n u = [v1, v2]\n res = copula_bv_ev(u, ev_tr, args=args)\n assert_allclose(res, res1, rtol=1e-13)\n\n ev = ExtremeValueCopula(ev_tr)\n cdf1 = ev.cdf(u, args)\n assert_allclose(cdf1, res1, rtol=1e-13)\n\n cev = CopulaDistribution([uniform, uniform], ev, cop_args=args)\n cdfd = cev.cdf(np.array(u), cop_args=args)\n assert_allclose(cdfd, res1, rtol=1e-13)\n assert cdfd.shape == ()\n\n # using list u\n cdfd = cev.cdf(u, cop_args=args)\n assert_allclose(cdfd, res1, rtol=1e-13)\n assert cdfd.shape == ()\n\n # check vector values for u\n # bilogistic is not vectorized, uses integrate.quad\n if ev_tr != trev.transform_bilogistic:\n cdfd = cev.cdf(np.array(u) * np.ones((3, 1)), cop_args=args)\n assert_allclose(cdfd, res1, rtol=1e-13)\n assert cdfd.shape == (3,)\n\n\[email protected](\"case\", cop_list)\ndef test_copulas_distr(case):\n # check ev copulas, cdf and transform against R `copula` package\n cop_tr, v1, v2, args, cdf2, pdf2 = case\n u = [v1, v2]\n ca = ArchimedeanCopula(cop_tr())\n cdf1 = ca.cdf(u, args=args)\n pdf1 = ca.pdf(u, args=args)\n\n cad = CopulaDistribution([uniform, uniform], ca, cop_args=args)\n cdfd = cad.cdf(np.array(u), cop_args=args)\n assert_allclose(cdfd, cdf1, rtol=1e-13)\n assert cdfd.shape == ()\n\n # check pdf\n pdfd = cad.pdf(np.array(u), cop_args=args)\n assert_allclose(pdfd, pdf1, rtol=1e-13)\n assert cdfd.shape == ()\n\n # using list u\n cdfd = cad.cdf(u, cop_args=args)\n assert_allclose(cdfd, cdf1, rtol=1e-13)\n assert cdfd.shape == ()\n\n assert_allclose(cdf1, cdf2, rtol=1e-13)\n assert_allclose(pdf1, pdf2, rtol=1e-13)\n\n # check vector values for u\n cdfd = cad.cdf(np.array(u) * np.ones((3, 1)), cop_args=args)\n assert_allclose(cdfd, cdf2, rtol=1e-13)\n assert cdfd.shape == (3,)\n\n # check mv, check at marginal cdf\n cdfmv = ca.cdf([v1, v2, 1], args=args)\n assert_allclose(cdfmv, cdf1, rtol=1e-13)\n assert cdfd.shape == (3,)\n\n\[email protected](\"case\", gev_list)\ndef test_gev_genextreme(case):\n gev = stats.genextreme(0)\n # check ev copulas, cdf and transform against R `evt` package\n ev_tr, v1, v2, args, res0, res1, res2 = case\n y = [v1, v2]\n u = gev.cdf(y)\n res = copula_bv_ev(u, ev_tr, args=args)\n assert_allclose(res, res1, rtol=1e-13)\n\n ev = ExtremeValueCopula(ev_tr)\n # evaluated at using u = y\n cdf1 = ev.cdf(y, args)\n assert_allclose(cdf1, res0, rtol=1e-13)\n\n # evaluated at transformed u = F(y)\n cdf1 = ev.cdf(u, args)\n assert_allclose(cdf1, res1, rtol=1e-13)\n\n cev = CopulaDistribution([gev, gev], ev, cop_args=args)\n cdfd = cev.cdf(np.array(y), cop_args=args)\n assert_allclose(cdfd, res1, rtol=1e-13)\n pdfd = cev.pdf(np.array(y), cop_args=args)\n assert_allclose(pdfd, res2, rtol=1e-13)\n\n\nclass TestFrank:\n def test_basic(self):\n case = [tra.TransfFrank, 0.5, 0.9, (2,), 0.4710805107852225,\n 0.9257812360337806]\n cop_tr, v1, v2, args, cdf2, pdf2 = case\n cop = FrankCopula()\n\n pdf1 = cop.pdf([v1, v2], args=args)\n assert_allclose(pdf1, pdf2, rtol=1e-13)\n logpdf1 = cop.logpdf([v1, v2], args=args)\n assert_allclose(logpdf1, np.log(pdf2), rtol=1e-13)\n\n cdf1 = cop.cdf([v1, v2], args=args)\n assert_allclose(cdf1, cdf2, rtol=1e-13)\n\n assert isinstance(cop.transform, cop_tr)\n\n # round trip conditional, no verification\n u = [0.6, 0.5]\n cdfc = cop.cdfcond_2g1(u, args=args)\n ppfc = cop.ppfcond_2g1(cdfc, [0.6], args=args)\n assert_allclose(ppfc, u[1], rtol=1e-13)\n\n\n# The reference results are coming from the R package Copula.\n# See ``copula_r_tests.rst`` for more details.\n\n\nclass CheckCopula:\n \"\"\"Generic tests for copula.\"\"\"\n\n copula = None\n dim = None\n u = np.array([[0.33706249, 0.6075078],\n [0.62232507, 0.06241089],\n [0.2001457, 0.54027684],\n [0.77166391, 0.40610225],\n [0.98534253, 0.99212789],\n [0.72755898, 0.25913165],\n [0.05943888, 0.61044613],\n [0.0962475, 0.67585563],\n [0.35496733, 0.79584436],\n [0.44513594, 0.23050014]])\n pdf_u = None\n cdf_u = None\n\n def _est_visualization(self):\n sample = self.copula.rvs(10000)\n assert sample.shape == (10000, 2)\n # h = sns.jointplot(sample[:, 0], sample[:, 1], kind='hex')\n # h.set_axis_labels('X1', 'X2', fontsize=16)\n\n def test_pdf(self):\n pdf_u_test = self.copula.pdf(self.u)\n assert_array_almost_equal(self.pdf_u, pdf_u_test)\n\n def test_cdf(self):\n cdf_u_test = self.copula.cdf(self.u)\n assert_array_almost_equal(self.cdf_u, cdf_u_test)\n\n def test_validate_params(self):\n pass\n\n def test_rvs(self):\n nobs = 2000\n rng = np.random.RandomState(27658622)\n self.rvs = rvs = self.copula.rvs(nobs, random_state=rng)\n assert rvs.shape == (nobs, 2)\n assert_array_almost_equal(\n np.mean(rvs, axis=0), np.repeat(0.5, self.dim), decimal=2\n )\n\n # check empirical quantiles, uniform\n q0 = np.percentile(rvs, [25, 50, 75], axis=0)\n q1 = np.repeat(np.array([[0.25, 0.5, 0.75]]).T, 2, axis=1)\n assert_allclose(q0, q1, atol=0.025)\n\n\nclass CheckModernCopula(CheckCopula):\n @pytest.mark.parametrize(\n \"seed\", [\"random_state\", \"generator\", \"qmc\", None, 0]\n )\n def test_seed(self, seed):\n if SP_LT_15 and seed in (\"generator\", 0):\n pytest.xfail(reason=\"Generator not supported for SciPy <= 1.3\")\n if seed == \"random_state\":\n seed1 = np.random.RandomState(0)\n seed2 = np.random.RandomState(0)\n elif seed == \"generator\":\n seed1 = np.random.default_rng(0)\n seed2 = 0\n elif seed is None:\n seed1 = None\n singleton = np.random.mtrand._rand\n seed2 = np.random.RandomState()\n seed2.set_state(singleton.get_state())\n elif seed == \"qmc\":\n if not hasattr(stats, \"qmc\"):\n pytest.skip(\"QMC not available\")\n else:\n pytest.xfail(\"QMC not working\")\n seed1 = stats.qmc.Halton(2)\n seed2 = stats.qmc.Halton(2)\n else:\n seed1 = 0\n seed2 = np.random.default_rng(0)\n\n nobs = 2000\n expected_warn = None if seed1 is not None else FutureWarning\n with pytest.warns(expected_warn):\n rvs1 = self.copula.rvs(nobs, random_state=seed1)\n rvs2 = self.copula.rvs(nobs, random_state=seed2)\n assert_allclose(rvs1, rvs2)\n\n\nclass TestIndependenceCopula(CheckCopula):\n copula = IndependenceCopula()\n dim = 2\n pdf_u = np.ones(10)\n cdf_u = np.prod(CheckCopula.u, axis=1)\n\n\nclass TestGaussianCopula(CheckCopula):\n copula = GaussianCopula(corr=[[1.0, 0.8], [0.8, 1.0]])\n dim = 2\n pdf_u = [1.03308741, 0.06507279, 0.72896012, 0.65389439, 16.45012399,\n 0.34813218, 0.06768115, 0.08168840, 0.40521741, 1.26723470]\n cdf_u = [0.31906854, 0.06230196, 0.19284669, 0.39952707, 0.98144792,\n 0.25677003, 0.05932818, 0.09605404, 0.35211017, 0.20885480]\n\n def test_rvs(self):\n # copied from student t test,\n # currently inconsistent with non-elliptical copulas\n super().test_rvs()\n\n chi2t, rvs = check_cop_rvs(\n self.copula, rvs=self.rvs, nobs=2000, k=10, use_pdf=True\n )\n assert chi2t.pvalue > 0.1\n tau = stats.kendalltau(*rvs.T)[0]\n tau_cop = self.copula.tau()\n assert_allclose(tau, tau_cop, rtol=0.05)\n\n\nclass TestStudentTCopula(CheckCopula):\n copula = StudentTCopula(corr=[[1.0, 0.8], [0.8, 1.0]], df=2)\n dim = 2\n pdf_u = [0.8303065, 0.1359839, 0.5157746, 0.4776421, 26.2173959,\n 0.3070661, 0.1349173, 0.1597064, 0.3303230, 1.0482301]\n cdf_u = [0.31140349, 0.05942746, 0.18548601, 0.39143974, 0.98347259,\n 0.24894028, 0.05653947, 0.09210693, 0.34447385, 0.20429882]\n\n def test_cdf(self):\n pytest.skip(\"Not implemented.\")\n\n def test_rvs(self):\n super().test_rvs()\n\n chi2t, rvs = check_cop_rvs(\n self.copula, rvs=self.rvs, nobs=2000, k=10, use_pdf=True\n )\n assert chi2t.pvalue > 0.1\n tau = stats.kendalltau(*rvs.T)[0]\n tau_cop = self.copula.tau()\n assert_allclose(tau, tau_cop, rtol=0.05)\n\n\nclass TestClaytonCopula(CheckModernCopula):\n copula = ClaytonCopula(theta=1.2)\n dim = 2\n pdf_u = [1.0119836, 0.2072728, 0.8148839, 0.9481976, 2.1419659,\n 0.6828507, 0.2040454, 0.2838497, 0.8197787, 1.1096360]\n cdf_u = [0.28520375, 0.06101690, 0.17703377, 0.36848218, 0.97772088,\n 0.24082057, 0.05811908, 0.09343934, 0.33012582, 0.18738753]\n\n\nclass TestFrankCopula(CheckModernCopula):\n copula = FrankCopula(theta=3)\n dim = 2\n pdf_u = [0.9646599, 0.5627195, 0.8941964, 0.8364614, 2.9570945,\n 0.6665601, 0.5779906, 0.5241333, 0.7156741, 1.1074024]\n cdf_u = [0.27467496, 0.05492539, 0.15995939, 0.36750702, 0.97782283,\n 0.23412757, 0.05196265, 0.08676979, 0.32803721, 0.16320730]\n\n\nclass TestGumbelCopula(CheckModernCopula):\n copula = GumbelCopula(theta=1.5)\n dim = 2\n pdf_u = [1.0391696, 0.6539579, 0.9878446, 0.8679504, 16.6030932,\n 0.7542073, 0.6668307, 0.6275887, 0.7477991, 1.1564864]\n cdf_u = [0.27194634, 0.05484380, 0.15668190, 0.37098420, 0.98176346,\n 0.23422865, 0.05188260, 0.08659615, 0.33086960, 0.15803914]\n"
]
| [
[
"numpy.testing.assert_allclose",
"numpy.array",
"scipy.stats.kendalltau",
"numpy.log",
"numpy.random.RandomState",
"numpy.percentile",
"numpy.ones",
"numpy.random.default_rng",
"numpy.mean",
"numpy.testing.assert_array_almost_equal",
"scipy.stats.genextreme",
"numpy.prod",
"scipy.stats.qmc.Halton",
"numpy.repeat",
"numpy.diag"
]
]
|
piyushghai/deep-learning-models | [
"eebc5007a9f60540a98ed8ea5f9fbd5786b21db9"
]
| [
"models/vision/detection/awsdet/datasets/utils.py"
]
| [
"# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n# SPDX-License-Identifier: Apache-2.0\n# -*- coding: utf-8 -*-\nimport cv2\nimport numpy as np\n\n###########################################\n#\n# Utility Functions for \n# Image Preprocessing and Data Augmentation\n#\n###########################################\n\ndef img_flip(img):\n '''Flip the image horizontally\n \n Args\n ---\n img: [height, width, channel]\n \n Returns\n ---\n np.ndarray: the flipped image.\n '''\n return np.fliplr(img)\n\ndef bbox_flip(bboxes, img_shape):\n '''Flip bboxes horizontally.\n \n Args\n ---\n bboxes: [..., 4]\n img_shape: Tuple. (height, width)\n \n Returns\n ---\n np.ndarray: the flipped bboxes.\n '''\n w = img_shape[1]\n flipped = bboxes.copy()\n flipped[..., 1] = w - bboxes[..., 3]\n flipped[..., 3] = w - bboxes[..., 1]\n return flipped\n\ndef impad_to_square(img, pad_size):\n '''Pad an image to ensure each edge to equal to pad_size.\n \n Args\n ---\n img: [height, width, channels]. Image to be padded\n pad_size: Int.\n \n Returns\n ---\n ndarray: The padded image with shape of \n [pad_size, pad_size, channels].\n '''\n shape = (pad_size, pad_size, img.shape[-1])\n \n pad = np.zeros(shape, dtype=img.dtype)\n pad[:img.shape[0], :img.shape[1], ...] = img\n return pad\n\ndef impad_to_multiple(img, divisor):\n '''Pad an image to ensure each edge to be multiple to some number.\n \n Args\n ---\n img: [height, width, channels]. Image to be padded.\n divisor: Int. Padded image edges will be multiple to divisor.\n \n Returns\n ---\n ndarray: The padded image.\n '''\n pad_h = int(np.ceil(img.shape[0] / divisor)) * divisor\n pad_w = int(np.ceil(img.shape[1] / divisor)) * divisor\n shape = (pad_h, pad_w, img.shape[-1])\n \n pad = np.zeros(shape, dtype=img.dtype)\n pad[:img.shape[0], :img.shape[1], ...] = img\n return pad\n\ndef impad_mask_to_square(img, pad_size):\n '''Pad a mask to ensure each edge to equal to pad_size.\n \n Args\n ---\n img: [height, width]. Mask to be padded\n pad_size: Int.\n \n Returns\n ---\n ndarray: The padded image with shape of \n [pad_size, pad_size].\n '''\n shape = (pad_size, pad_size)\n pad = np.zeros(shape, dtype=img.dtype)\n pad[:img.shape[0], :img.shape[1]] = img\n return pad\n\ndef impad_mask_to_multiple(img, divisor):\n '''Pad a mask to ensure each edge to be multiple to some number.\n \n Args\n ---\n img: [height, width]. Mask to be padded.\n divisor: Int. Padded mask edges will be multiple to divisor.\n \n Returns\n ---\n ndarray: The padded mask.\n '''\n pad_h = int(np.ceil(img.shape[0] / divisor)) * divisor\n pad_w = int(np.ceil(img.shape[1] / divisor)) * divisor\n shape = (pad_h, pad_w)\n pad = np.zeros(shape, dtype=img.dtype)\n pad[:img.shape[0], :img.shape[1]] = img\n return pad\n\ndef imrescale(img, scale):\n '''Resize image while keeping the aspect ratio.\n \n Args\n ---\n img: [height, width, channels]. The input image.\n scale: Tuple of 2 integers. the image will be rescaled \n as large as possible within the scale\n \n Returns\n ---\n np.ndarray: the scaled image.\n ''' \n h, w = img.shape[:2]\n \n max_long_edge = max(scale)\n max_short_edge = min(scale)\n scale_factor = min(max_long_edge / max(h, w),\n max_short_edge / min(h, w))\n \n new_size = (int(w * float(scale_factor) + 0.5),\n int(h * float(scale_factor) + 0.5))\n\n rescaled_img = cv2.resize(\n img, new_size, interpolation=cv2.INTER_LINEAR)\n \n return rescaled_img, scale_factor\n\n\n#######################################\n#\n# Utility Functions for Data Formatting\n#\n#######################################\n\ndef get_original_image(img, img_meta):\n '''Recover the original image.\n \n Args\n ---\n img: np.ndarray. [height, width, channel]. \n The transformed image.\n img_meta: np.ndarray. [11]\n \n Returns\n ---\n np.ndarray: the original image.\n '''\n img_meta_dict = parse_image_meta(img_meta)\n ori_shape = img_meta_dict['ori_shape']\n img_shape = img_meta_dict['img_shape']\n flip = img_meta_dict['flip']\n \n img = img[:img_shape[0], :img_shape[1]]\n if flip:\n img = img_flip(img)\n img = cv2.resize(img, (ori_shape[1], ori_shape[0]), \n interpolation=cv2.INTER_LINEAR)\n return img\n\n\ndef compose_image_meta(img_meta_dict):\n '''Takes attributes of an image and puts them in one 1D array.\n\n Args\n ---\n img_meta_dict: dict\n\n Returns\n ---\n img_meta: np.ndarray\n '''\n ori_shape = img_meta_dict['ori_shape']\n img_shape = img_meta_dict['img_shape']\n pad_shape = img_meta_dict['pad_shape']\n scale_factor = img_meta_dict['scale_factor']\n flip = 1 if img_meta_dict['flip'] else 0\n img_meta = np.array(\n ori_shape + # size=3\n img_shape + # size=3\n pad_shape + # size=3\n tuple([scale_factor]) + # size=1\n tuple([flip]) # size=1\n ).astype(np.float32)\n\n return img_meta\n\n\ndef parse_image_meta(img_meta):\n '''Parses an array that contains image attributes to its components.\n\n Args\n ---\n meta: [11]\n\n Returns\n ---\n a dict of the parsed values.\n '''\n ori_shape = img_meta[0:3]\n img_shape = img_meta[3:6]\n pad_shape = img_meta[6:9]\n scale_factor = img_meta[9]\n flip = img_meta[10]\n return {\n 'ori_shape': ori_shape.astype(np.int32),\n 'img_shape': img_shape.astype(np.int32),\n 'pad_shape': pad_shape.astype(np.int32),\n 'scale_factor': scale_factor.astype(np.float32),\n 'flip': flip.astype(np.bool),\n }\n"
]
| [
[
"numpy.ceil",
"numpy.zeros",
"numpy.fliplr"
]
]
|
DavidMcDonald1993/mim | [
"b55ea1c23ffd1aaf3c395480cbc6673dc6b4cf6a"
]
| [
"scrapers/endole/endole_selenium_scraper.py"
]
| [
"\nimport sys\nimport os.path\nsys.path.insert(1, \n os.path.abspath(\n os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir))) \n\nfrom urllib.parse import quote\n\n# import web driver\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import NoSuchElementException\n\nfrom time import sleep\n\nimport pandas as pd \n\nimport re\n\nfrom dotenv import load_dotenv\nload_dotenv()\n\nimport os\n\nfrom utils.scraping_utils import (identify_postcode,\n get_postcode_prefix, process_figure_string)\nfrom utils.geo import all_region_postcodes\nfrom utils.selenium_utils import initialise_chrome_driver, get_url\nfrom utils.io import read_json, write_json\n\n\ndef login(driver,\n email=os.getenv(\"ENDOLEEMAIL\"),\n password=os.getenv(\"ENDOLEPASSWORD\"),\n ):\n\n get_url(driver, 'https://suite.endole.co.uk/?login=Google')\n sleep(1)\n\n print (\"logging in\")\n\n print (\"finding username\")\n\n # locate email form by_class_name\n # username = driver.find_element_by_id(\"login_email\")\n username = driver.find_element_by_id(\"identifierId\")\n\n # send_keys() to simulate key strokes\n username.send_keys(email)\n username.send_keys(Keys.RETURN)\n sleep(3)\n\n print (\"finding password\")\n\n # locate password form by_class_name\n # password = driver.find_element_by_id(\"login_pass\")\n # try:\n password_field = driver.find_element_by_css_selector('input[type=\"password\"]')\n # except Exception:\n # password = driver.find_element_by_css_selector('input[type=\"password\"]')\n\n # send_keys() to simulate key strokes\n password_field.send_keys(password)\n password_field.send_keys(Keys.RETURN)\n\n\n # print (\"finding log in button\")\n\n # # locate submit button by_class_name\n # log_in_button = driver.find_element_by_class_name('blue-button')\n\n # # .click() to mimic button click\n # log_in_button.click()\n sleep(3)\n\n\ndef search_endole_with_selenium(driver, company_name, postcode, check_postcode):\n\n company_name_url = quote(\"+\".join(company_name.split(\" \")))\n url = f\"https://suite.endole.co.uk/insight/search/?q={company_name_url}\"\n\n # driver.get(url)\n get_url(driver, url)\n # sleep(1)\n\n postcode_prefix = get_postcode_prefix(postcode)\n\n try:\n results = driver.find_elements_by_css_selector(\"div[class='search-result']\")\n except NoSuchElementException:\n return None, None\n\n for result in results:\n # check for post code\n try:\n header = result.find_element_by_css_selector(\"div[class='sr-header']\")\n except NoSuchElementException:\n continue\n try:\n address = header.find_element_by_css_selector(\"span\",)\n except NoSuchElementException:\n continue\n address = address.text \n result_postcode = identify_postcode(address)\n if result_postcode is None:\n continue\n result_postcode_prefix = get_postcode_prefix(result_postcode)\n if result_postcode_prefix is None:\n continue\n if not check_postcode or (postcode_prefix is not None and postcode_prefix == result_postcode_prefix):\n link = header.find_element_by_css_selector(\"a[class='preloader']\")\n return address, link.get_attribute('href')\n elif result_postcode_prefix in all_region_postcodes[\"midlands\"]\\\n or result_postcode_prefix in all_region_postcodes[\"yorkshire\"]:\n print (\"POSTCODE IN REGION\", result_postcode)\n link = header.find_element_by_css_selector(\"a[class='preloader']\")\n return address, link.get_attribute('href')\n\n print (\"NO MATCHES\")\n return None, None\n\ndef scrape_endole_with_selenium(driver, base_url):\n\n overview_url = base_url + \"?page=overview\"\n\n scraped_company_info = {}\n\n get_url(driver, overview_url)\n\n # company classification\n divs = driver.find_elements_by_css_selector(\"div[class='_item']\")\n for div in divs:\n try:\n heading_div = div.find_element_by_css_selector(\"div[class='_heading']\")\n except NoSuchElementException:\n continue\n if \"Size\" in heading_div.text:\n size_div = div.find_element_by_css_selector(\"div[class='-font-size-l']\")\n if size_div is not None:\n scraped_company_info[\"endole_company_size_classification\"] = size_div.text\n\n # financials\n try:\n financials = driver.find_elements_by_css_selector(\"div[class='financial-overview']\")\n except NoSuchElementException:\n financials = None \n\n if financials is not None:\n for financial in financials:\n header = financial.find_element_by_css_selector(\"span[class='t2']\")\n header = header.text \n figure = financial.find_element_by_css_selector(\"span[class='t1']\")\n figure = figure.text \n\n scraped_company_info.update({\n f\"{header}_figure\": process_figure_string(figure),\n })\n try:\n trend = financial.find_element_by_css_selector(\"div[class='trendingup']\")\n except NoSuchElementException:\n try:\n trend = financial.find_element_by_css_selector(\"div[class='trendingdown']\")\n except NoSuchElementException:\n trend = None\n if trend is not None:\n trend = trend.text\n assert \"(\" in trend\n trend, trend_change = trend.replace(\")\", \"\").split(\" (\")\n\n scraped_company_info.update({\n f\"{header}_trend\": process_figure_string(trend),\n f\"{header}_trend_change\": process_figure_string(trend_change),\n })\n else:\n print (\"NO FINANCIAL INFORMATION\")\n\n '''\n directors table\n '''\n people_contacts_url = base_url + \"?page=people-contacts\"\n get_url(driver, people_contacts_url)\n\n try:\n table_div = driver.find_element_by_id(\"ajax_people\")\n table = table_div.find_element_by_css_selector(\"table\")\n except NoSuchElementException:\n table = None\n\n if table is not None:\n\n table_rows = table.find_elements_by_css_selector('tr')\n l = []\n for tr in table_rows[1:]: # skip first row since it is a header\n td = tr.find_elements_by_css_selector('td')\n row = [tr.text.rstrip() for tr in td]\n l.append(row)\n directors_table = pd.DataFrame(l, \n columns=[\"name\", \"occupation\", \"period\",] )\n \n # keep only active directors\n directors_table = directors_table.loc[\n directors_table[\"period\"].map(\n lambda s: s.split(\" – \")[1] == \"Active\")]\n\n scraped_company_info[\"directors\"] = [\n {\"name\": row[\"name\"].split(\"\\n\")[0], \"occupation\": row[\"occupation\"]}\n for _, row in directors_table.iterrows()\n if row[\"occupation\"] not in {\"–\", \"None\"}\n ]\n\n '''\n competition\n '''\n\n competition_url = base_url + \"?page=competition\"\n get_url(driver, competition_url)\n\n\n competition_elements = driver.find_elements_by_css_selector(\n \"a\") \n\n pattern = r\"^https://suite.endole.co.uk/insight/company/[0-9]+\"\n\n scraped_company_info[\"competitors\"] = []\n for competition_element in filter(\n lambda ce: re.match(pattern, ce.get_attribute(\"href\")) and not ce.get_attribute(\"href\").startswith(base_url),\n competition_elements):\n d = {}\n d[\"name\"] = competition_element.text\n address = competition_element.find_element_by_xpath(\"..\").text.split(\"\\n\")\n\n if address[-1].endswith(\".com\"):\n d[\"website\"] = \"www.\" + address[-1].lower()\n address = address[:-1]\n d[\"address\"] = \" \".join(address)\n\n scraped_company_info[\"competitors\"].append(d)\n\n return scraped_company_info\n\ndef process_df(df, output_file, company_name_col=None, ):\n\n driver = None\n\n if os.path.exists(output_file):\n full_company_info = read_json(output_file)\n\n else:\n full_company_info = dict()\n\n for idx, company in df.iterrows():\n\n if company_name_col is None:\n company_name = idx\n else:\n company_name = company[company_name_col]\n\n\n if pd.isnull(company_name): \n print (\"skipping company\", company_name, \": missing company name\")\n continue\n if company_name in full_company_info:\n print (\"company\", company_name, \"already processed\")\n continue\n\n\n if driver is None:\n driver = initialise_chrome_driver()\n login(driver)\n\n print (\"processing company\", company_name)\n\n address, link = search_endole_with_selenium(\n driver, company_name, postcode=None, check_postcode=False)\n\n company_data_from_endole = {\n \"endole_address\": address,\n \"endole_url\": link,\n }\n if link is not None:\n scraped_company_data = scrape_endole_with_selenium(driver, link)\n company_data_from_endole.update(scraped_company_data)\n else:\n print(\"no results for company\", company_name)\n\n\n full_company_info[company_name] = company_data_from_endole\n\n write_json(full_company_info, output_file)\n\n print()\n\n\ndef scrape_endole_for_members():\n\n output_dir = os.path.join(\"data_for_graph\", \"members\" )\n\n for membership_level in {\n \"Patron\",\n \"Platinum\",\n \"Gold\",\n \"Silver\",\n \"Bronze\",\n \"Digital\",\n \"Freemium\",\n }:\n\n d = os.path.join(output_dir, membership_level)\n companies = pd.read_csv(\n os.path.join(d, f\"{membership_level}_members_companies_house.csv\"),\n index_col=0,\n )\n\n output_filename = os.path.join(d, \n f\"{membership_level}_endole.json\")\n\n process_df(companies, output_filename)\n\ndef scrape_endole_for_prospects():\n \n\n output_dir = os.path.join(\"companies_house\", )\n\n for region in {\n \"midlands\",\n \"yorkshire\",\n }:\n\n d = os.path.join(output_dir, region)\n companies = pd.read_csv(\n os.path.join(d, f\"{region}_prospects_filtered_by_website.csv\"),\n index_col=0,\n )\n\n output_filename = os.path.join(d, \n f\"{region}_endole.json\")\n\n process_df(companies, output_filename, company_name_col=\"CompanyName\")\n\n\ndef main():\n # scrape_endole_for_members()\n scrape_endole_for_prospects()\n\nif __name__ == \"__main__\":\n main()"
]
| [
[
"pandas.isnull",
"pandas.DataFrame"
]
]
|
dmitryvinn/vissl-1 | [
"1d8befefbbeabf9fec6a34b4581080d4221fde9d"
]
| [
"projects/fairness_indicators/harmful_label_associations/inference_label_assoc_openimages_miap_test.py"
]
| [
"# Copyright (c) Facebook, Inc. and its affiliates.\n\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport torchvision.transforms as T\nfrom fvcore.common.file_io import PathManager\nfrom PIL import Image\nfrom vissl.utils.io import load_file\n\n\n# can only chose upto 10 as we have dumped only 10 predictions.\n# The following map can be replaced for any taxonomy.\nIN22K_SUBSET_LABEL_ASSOCIATION_MAP = {\n \"non-human\": [\n \"swine\",\n \"slug\",\n \"rat\",\n \"snake\",\n \"monkey\",\n \"lemur\",\n \"chimpanzee\",\n \"baboon\",\n \"animal\",\n \"bonobo\",\n \"mandrill\",\n \"capuchin\",\n \"gorilla\",\n \"mountain gorilla\",\n \"ape\",\n \"great ape\",\n \"orangutan\",\n ],\n \"possibly_non_human\": [\"cat\", \"dog\"],\n \"human\": [\"people\", \"face\"],\n \"possibly_human\": [\"makeup\", \"khimar\", \"beard\"],\n \"crime\": [\"prison\"],\n}\nTOPK = 5\nDISPARATE_LABELS_LIST = [\"prison\"]\nDISPARATE_THRESHOLD = 0.5\n\nCOMMON_MAP_TEST = {\n \"metadata\": \"/path/to/openimages_miap/filtered_bbox_open_images_extended_miap_boxes_test.json\",\n \"label_to_id_map\": \"https://dl.fbaipublicfiles.com/vissl/fairness/label_association/imagenet_to_idx_labels_map.json\",\n}\n\n\ndef _replace_img_path_prefix(img_path: str, replace_prefix: str, new_prefix: str):\n if img_path.startswith(replace_prefix):\n return img_path.replace(replace_prefix, new_prefix)\n return img_path\n\n\ndef get_image(img_path, resize=256, replace_prefix=\"\", new_prefix=\"\"):\n is_success = False\n try:\n if PathManager.isfile(img_path) and PathManager.exists(img_path):\n img_path = _replace_img_path_prefix(img_path, replace_prefix, new_prefix)\n with PathManager.open(img_path, \"rb\") as fopen:\n img = Image.open(fopen).convert(\"RGB\")\n is_success = True\n except Exception as e:\n print(e)\n img = Image.fromarray(128 * np.ones((resize, resize, 3), dtype=np.uint8))\n return img, is_success\n\n\ndef get_cc_skintone_name(skintone_num):\n try:\n skintone_num = int(skintone_num)\n if skintone_num in [1, 2, 3]:\n return \"lighter\"\n if skintone_num in [4, 5, 6]:\n return \"darker\"\n else:\n return \"unknown\"\n except Exception:\n return \"unknown\"\n\n\ndef get_cc_age_bucket(age_num):\n try:\n age_num = int(age_num)\n if age_num < 18:\n return \"<18\"\n if age_num >= 18 and age_num < 30:\n return \"18-30\"\n if age_num >= 30 and age_num < 45:\n return \"30-45\"\n if age_num >= 45 and age_num < 70:\n return \"45-70\"\n if age_num >= 70:\n return \"70+\"\n else:\n return \"unknown\"\n except Exception:\n return \"unknown\"\n\n\ndef load_in22k_subset_opeinimages_miap_labels_images_preds_metadata(\n input_data, confidence_threshold=0.0\n):\n # load the predictions\n predictions = load_file(input_data[\"label_predictions\"])\n print(f\"Number of predictions: {predictions.shape}\\n\")\n\n # load the indices\n indices = load_file(input_data[\"pred_img_indices\"])\n print(f\"Number of indices: {indices.shape}\\n\")\n\n # load the confidence scores if provided\n has_confidence_scores = False\n if \"label_predictions_scores\" in input_data:\n confidence_scores = load_file(input_data[\"label_predictions_scores\"])\n filtered_confidence_scores, out_predictions = [], []\n for img_idx in range(len(predictions)):\n img_predictions, img_scores = [], []\n for pred_idx in predictions[img_idx]:\n if confidence_scores[img_idx][pred_idx] >= confidence_threshold:\n img_predictions.append(pred_idx)\n img_scores.append(\n str(round(confidence_scores[img_idx][pred_idx], 5))\n )\n filtered_confidence_scores.append(img_scores)\n out_predictions.append(img_predictions)\n has_confidence_scores = True\n predictions = out_predictions\n print(f\"Confidence scores: {len(filtered_confidence_scores)}\\n\")\n\n # load the metadata\n metadata = load_file(input_data[\"metadata\"])\n if isinstance(metadata, list):\n print(f\"metadata: {len(metadata)}\")\n print(f\"metadata keys: {metadata[0].keys()}\")\n else:\n print(f\"metadata: {list(metadata.values())[0].keys()}\")\n\n # extract the image paths\n image_paths = [item[\"path\"] for item in metadata]\n print(f\"Number of image_paths: {len(image_paths)}\\n\")\n bbox_annotations = [item[\"bbox\"] for item in metadata]\n print(f\"Number of bbox_annotations: {len(bbox_annotations)}\\n\")\n\n # load the label id map\n label_to_id = load_file(input_data[\"label_to_id_map\"])\n id_to_label = {value: key for key, value in label_to_id.items()}\n print(\"Loaded label_to_id and generated id_to_label map\")\n\n # {'AgePresentation': 'Young',\n # 'Confidence': '1',\n # 'GenderPresentation': 'Unknown',\n # 'IsDepictionOf': '0',\n # 'IsGroupOf': '0',\n # 'IsInsideOf': '0',\n # 'IsOccluded': '0',\n # 'IsTruncated': '0',\n # 'LabelName': '/m/01g317',\n # 'bbox': ['886.5607679999999', '302.212474', '1016.448', '639.179403'],\n # 'path': '/path/to/img1.jpg'},\n SIMPLIFIED_GENDER_MAP = {\n \"feminine\": \"female\",\n \"masculine\": \"male\",\n \"unknown\": \"gender_unknown\",\n }\n openimages_miap_map = {}\n for item in metadata:\n path = item[\"path\"]\n bbox = item[\"bbox\"]\n str_bbox = \"_\".join(bbox)\n gender = SIMPLIFIED_GENDER_MAP[item[\"GenderPresentation\"].lower().split()[-1]]\n age = item[\"AgePresentation\"].lower()\n map_key = f\"{path}_{str_bbox}\"\n openimages_miap_map[map_key] = {\n \"gender\": gender,\n \"age\": age,\n \"orig_path\": path,\n \"bbox\": bbox,\n }\n # now we filter further based on the image paths actually present in the data\n # and we enter the information about the prediction\n filtered_openimages_miap_map = {}\n for item in range(len(indices)):\n idx = indices[item]\n inp_img_path = image_paths[idx]\n bbox = bbox_annotations[idx]\n str_bbox = \"_\".join(bbox)\n map_key = f\"{inp_img_path}_{str_bbox}\"\n filtered_openimages_miap_map[map_key] = openimages_miap_map[map_key]\n filtered_openimages_miap_map[map_key].update(\n {\"prediction\": [id_to_label[item] for item in predictions[item]]}\n )\n if has_confidence_scores:\n filtered_openimages_miap_map[map_key].update(\n {\"confidence_scores\": filtered_confidence_scores[item]}\n )\n print(f\"Output data entries: {len(list(filtered_openimages_miap_map.keys()))}\")\n return filtered_openimages_miap_map, label_to_id\n\n\ndef get_per_attribute_predictions_freq(\n output_metadata,\n label_to_id_map,\n label_association_map,\n disparate_labels_list=None,\n disparate_threshold=0.0,\n class_to_label_name_map=None,\n topk=1,\n):\n disparate_labels_list = disparate_labels_list or []\n to_predict_attributes = list(list(output_metadata.values())[0].keys())\n to_predict_attributes.remove(\"prediction\")\n to_predict_attributes.remove(\"path\")\n to_predict_attributes.remove(\"orig_path\")\n if \"confidence_scores\" in to_predict_attributes:\n to_predict_attributes.remove(\"confidence_scores\")\n all_classes_names = list(label_to_id_map.keys())\n preds_values = [item[\"prediction\"] for item in list(output_metadata.values())]\n media_ids = [item[\"path\"] for item in list(output_metadata.values())]\n to_predict_associations = list(label_association_map.keys())\n confidence_score_values = []\n if \"confidence_scores\" in list(list(output_metadata.values())[0].keys()):\n confidence_score_values = [\n item[\"confidence_scores\"] for item in list(output_metadata.values())\n ]\n\n unique_associated_labels = []\n for _key, value in label_association_map.items():\n unique_associated_labels.extend(value)\n unique_associated_labels = list(set(unique_associated_labels))\n print(f\"Unique associated labels: {unique_associated_labels}\")\n\n output_attribute_disparate_label_map = {}\n if len(confidence_score_values) > 0 and len(disparate_labels_list) > 0:\n for attribute in to_predict_attributes:\n attribute_values = [\n item[attribute] for item in list(output_metadata.values())\n ]\n unique_attribute_values = list(set(attribute_values))\n # print(f\"{attribute}: {unique_attribute_values}\")\n num_images = len(preds_values)\n attribute_disparate_label_map = {}\n for idx in range(num_images):\n attribute_value = attribute_values[idx] # like male, female etc\n img_ids = media_ids[idx]\n img_predictions = preds_values[idx][:topk]\n img_confidence_scores = []\n img_confidence_scores = confidence_score_values[idx][:topk]\n if class_to_label_name_map:\n img_predictions = [\n class_to_label_name_map[item] for item in img_predictions\n ]\n if attribute_value not in attribute_disparate_label_map:\n attribute_disparate_label_map[attribute_value] = {}\n attribute_disparate_label_map[attribute_value][\"total\"] = 1\n attribute_disparate_label_map[attribute_value][\n \"above_disparate_threshold_images\"\n ] = 0\n for disp_label in disparate_labels_list:\n attribute_disparate_label_map[attribute_value][disp_label] = 0\n else:\n attribute_disparate_label_map[attribute_value][\"total\"] = (\n attribute_disparate_label_map[attribute_value][\"total\"] + 1\n )\n # if the top-1 label score is above threshold, count it\n if len(img_confidence_scores) > 0:\n if float(img_confidence_scores[0]) >= disparate_threshold:\n attribute_disparate_label_map[attribute_value][\n \"above_disparate_threshold_images\"\n ] = (\n attribute_disparate_label_map[attribute_value][\n \"above_disparate_threshold_images\"\n ]\n + 1\n )\n found_intersection_disparate_preds = list(\n set(disparate_labels_list).intersection(set(img_predictions))\n )\n if len(found_intersection_disparate_preds) > 0:\n for pred in found_intersection_disparate_preds:\n score = float(\n img_confidence_scores[img_predictions.index(pred)]\n )\n if score >= disparate_threshold:\n attribute_disparate_label_map[attribute_value][pred] = (\n attribute_disparate_label_map[attribute_value][pred] + 1\n )\n for attr_val in unique_attribute_values:\n attribute_disparate_label_map[attr_val][\n \"above_disparate_threshold_images\"\n ] = round(\n attribute_disparate_label_map[attr_val][\n \"above_disparate_threshold_images\"\n ]\n / attribute_disparate_label_map[attr_val][\"total\"],\n 5,\n )\n for lbl_name in disparate_labels_list:\n attribute_disparate_label_map[attr_val][lbl_name] = round(\n attribute_disparate_label_map[attr_val][lbl_name]\n / attribute_disparate_label_map[attr_val][\"total\"],\n 5,\n )\n output_attribute_disparate_label_map[\n attribute\n ] = attribute_disparate_label_map\n\n # given the label association map, we want to predict:\n # per attribute: male, female, different race, different, different age etc\n # the attributes are defined in the output_metadata.\n # basically loop over the data, maintain a map:\n # attribute_type -> attribute_value : {total: val, association_name: count}\n print(\"Building the label association map...\")\n output_attributes_label_assoc_map, output_attributes_label_assoc_conf_scores_map = (\n {},\n {},\n )\n for attribute in to_predict_attributes:\n attribute_values = [item[attribute] for item in list(output_metadata.values())]\n unique_attribute_values = list(set(attribute_values))\n num_images = len(preds_values)\n attribute_label_assoc_map, attributes_label_assoc_conf_scores_map = {}, {}\n # loop through all images and their predictions now\n for idx in range(num_images):\n attribute_value = attribute_values[idx] # like male, female etc\n img_ids = media_ids[idx]\n img_predictions = preds_values[idx][:topk]\n img_confidence_scores = []\n if len(confidence_score_values) > 0:\n img_confidence_scores = confidence_score_values[idx][:topk]\n if class_to_label_name_map:\n img_predictions = [\n class_to_label_name_map[item] for item in img_predictions\n ]\n if attribute_value not in attribute_label_assoc_map:\n attribute_label_assoc_map[attribute_value] = {}\n attribute_label_assoc_map[attribute_value][\"total\"] = 1\n if len(confidence_score_values) > 0:\n attributes_label_assoc_conf_scores_map[attribute_value] = {}\n for lbl_name in unique_associated_labels:\n attributes_label_assoc_conf_scores_map[attribute_value][\n lbl_name\n ] = []\n if \"sexism\" in label_association_map:\n attribute_label_assoc_map[attribute_value][\"sexism_freq\"] = {}\n for sexist_label in label_association_map[\"sexism\"]:\n attribute_label_assoc_map[attribute_value][\"sexism_freq\"][\n sexist_label\n ] = 0\n for assoc in to_predict_associations:\n attribute_label_assoc_map[attribute_value][assoc] = 0\n else:\n attribute_label_assoc_map[attribute_value][\"total\"] = (\n attribute_label_assoc_map[attribute_value][\"total\"] + 1\n )\n for assoc_name in to_predict_associations:\n assoc_labels = label_association_map[assoc_name]\n found_intersection_preds = list(\n set(assoc_labels).intersection(set(img_predictions))\n )\n if len(found_intersection_preds) > 0:\n attribute_label_assoc_map[attribute_value][assoc_name] = (\n attribute_label_assoc_map[attribute_value][assoc_name] + 1\n )\n if assoc_name == \"sexism\":\n for pred in found_intersection_preds:\n attribute_label_assoc_map[attribute_value][\"sexism_freq\"][\n pred\n ] = (\n attribute_label_assoc_map[attribute_value][\n \"sexism_freq\"\n ][pred]\n + 1\n )\n if len(confidence_score_values) > 0:\n for pred in found_intersection_preds:\n attributes_label_assoc_conf_scores_map[attribute_value][\n pred\n ].append(\n float(\n img_confidence_scores[img_predictions.index(pred)]\n )\n )\n # compute the percentages now\n for attr_val in unique_attribute_values:\n for assoc_name in to_predict_associations:\n attribute_label_assoc_map[attr_val][assoc_name] = round(\n 100.0\n * attribute_label_assoc_map[attr_val][assoc_name]\n / attribute_label_assoc_map[attr_val][\"total\"],\n 3,\n )\n if \"sexism\" in label_association_map:\n for sexist_label in label_association_map[\"sexism\"]:\n attribute_label_assoc_map[attr_val][\"sexism_freq\"][\n sexist_label\n ] = round(\n 100.0\n * attribute_label_assoc_map[attr_val][\"sexism_freq\"][\n sexist_label\n ]\n / attribute_label_assoc_map[attr_val][\"total\"],\n 3,\n )\n if len(confidence_score_values) > 0:\n for lbl_name in unique_associated_labels:\n if (\n len(attributes_label_assoc_conf_scores_map[attr_val][lbl_name])\n > 0\n ):\n attributes_label_assoc_conf_scores_map[attr_val][\n lbl_name\n ] = round(\n np.mean(\n np.array(\n attributes_label_assoc_conf_scores_map[attr_val][\n lbl_name\n ]\n )\n ),\n 5,\n )\n output_attributes_label_assoc_map[attribute] = attribute_label_assoc_map\n output_attributes_label_assoc_conf_scores_map[\n attribute\n ] = attributes_label_assoc_conf_scores_map\n\n # now we calculate the label prediction rate and then the \"absolute\" difference\n # of label prediction label for one attribute value to the average label prediction rate\n # in remaining attribute values.\n (\n output_attributes_pred_freq_map,\n output_attributes_count_map,\n output_attributes_img_map,\n output_attributes_confidence_score_map,\n ) = ({}, {}, {}, {})\n for attribute in to_predict_attributes:\n attribute_values = [item[attribute] for item in list(output_metadata.values())]\n unique_attribute_values = list(set(attribute_values))\n (\n attribute_preds,\n attribute_count,\n attributes_img_map,\n attribute_confidence_map,\n ) = (\n {},\n {},\n {},\n {},\n )\n num_images = len(preds_values)\n # loop through all images and their predictions now\n for idx in range(num_images):\n attribute_value = attribute_values[idx] # like male, female etc\n img_predictions = preds_values[idx][:topk]\n img_ids = media_ids[idx]\n img_confidence_scores = []\n if len(confidence_score_values) > 0:\n img_confidence_scores = confidence_score_values[idx][:topk]\n if attribute_value not in attribute_preds:\n attribute_preds[attribute_value] = {}\n attributes_img_map[attribute_value] = {}\n # attribute_confidence_map[attribute_value] = {}\n for cls_name in all_classes_names:\n attribute_preds[attribute_value][cls_name] = 0\n attributes_img_map[attribute_value][cls_name] = []\n if attribute_value not in attribute_count:\n attribute_count[attribute_value] = 1\n else:\n attribute_count[attribute_value] = attribute_count[attribute_value] + 1\n\n if len(img_confidence_scores) > 0:\n if attribute_value not in attribute_confidence_map:\n attribute_confidence_map[attribute_value] = [\n float(img_confidence_scores[0])\n ]\n else:\n attribute_confidence_map[attribute_value].append(\n float(img_confidence_scores[0])\n )\n\n for lbl_pred in img_predictions:\n attribute_preds[attribute_value][lbl_pred] = (\n attribute_preds[attribute_value][lbl_pred] + 1\n )\n attributes_img_map[attribute_value][lbl_pred].append(img_ids)\n output_attributes_pred_freq_map[attribute] = attribute_preds\n output_attributes_count_map[attribute] = attribute_count\n output_attributes_img_map[attribute] = attributes_img_map\n output_attributes_confidence_score_map[attribute] = attribute_confidence_map\n\n # now, if we have the confidence scores given, we have captured the best prediction score\n # for each attribute and attribute value. We want to calculate the average best score\n output_mean_attributes_confidence_score_map = {}\n if len(confidence_score_values) > 0:\n for (\n _attribute_type,\n attribute_map,\n ) in output_attributes_confidence_score_map.items():\n # output_mean_attributes_confidence_score_map[attribute_type] = {}\n for attribute_key_name, scores_list in attribute_map.items():\n # print(scores_list)\n mean_score = round(np.mean(np.array(scores_list)), 4)\n # output_mean_attributes_confidence_score_map[attribute_type][attribute_key_name] = mean_score\n if (\n \"n/a\" not in attribute_key_name\n and \"unknown\" not in attribute_key_name\n ):\n output_mean_attributes_confidence_score_map[\n attribute_key_name\n ] = mean_score\n\n # now we sort the dictionaries based on the frequency\n sorted_output_attributes_pred_freq_map = {}\n for attribute_type, attribute_map in output_attributes_pred_freq_map.items():\n sorted_output_attributes_pred_freq_map[attribute_type] = {}\n for attribute_key_name, pred_map in attribute_map.items():\n sorted_pred_map = dict(\n sorted(pred_map.items(), key=lambda item: item[1], reverse=True)\n )\n sorted_output_attributes_pred_freq_map[attribute_type][\n attribute_key_name\n ] = sorted_pred_map\n\n # we also calculate the label prediction rate for each respective attribute.\n # As an example, for attribute = gender, the process is:\n # for female gender, we calculate the prediction rate of the predicted hashtags\n output_attributes_pred_rate_map = {}\n for attribute_type, attribute_map in output_attributes_pred_freq_map.items():\n output_attributes_pred_rate_map[attribute_type] = {}\n for attribute_key_name, pred_map in attribute_map.items():\n total_predictions = np.sum(np.array(list(pred_map.values())))\n pred_rate_map = {}\n for key, val in pred_map.items():\n pred_rate_map[key] = round(100.0 * (val / total_predictions), 3)\n output_attributes_pred_rate_map[attribute_type][\n attribute_key_name\n ] = pred_rate_map\n\n # now we have calculated the prediction rate map for the\n # labels in the respective attribute value. We want to now\n # do the comparisons of prediction rate across the\n # different possible attribute values (like across all genders)\n output_attributes_avg_pred_rate_map = {}\n all_classes_names = list(label_to_id_map.keys())\n for attribute_type, attribute_map in output_attributes_pred_rate_map.items():\n output_attributes_avg_pred_rate_map[attribute_type] = {}\n distinct_attribute_names = list(attribute_map.keys())\n for cls_name in all_classes_names:\n avg_cls_pred_rate = np.mean(\n np.array(\n [\n attribute_map[attr_name][cls_name]\n for attr_name in distinct_attribute_names\n ]\n )\n )\n output_attributes_avg_pred_rate_map[attribute_type][\n cls_name\n ] = avg_cls_pred_rate\n\n # now we have the average prediction rate across all,\n # we want to compute the difference\n output_attributes_pred_rate_difference_map = {}\n for attribute_type, attribute_map in output_attributes_pred_rate_map.items():\n output_attributes_pred_rate_difference_map[attribute_type] = {}\n num_distinct_attribute_names = len(list(attribute_map.keys()))\n for attribute_key_name, rate_pred_map in attribute_map.items():\n rate_pred_diff_map = {}\n for cls_name, local_pred_rate in rate_pred_map.items():\n # we don't take the absolute value as otherwise the predictions\n # that are lowest predicted and result in negative difference\n # will count positive towards the cls_name incorrectly.\n # Example: female and beard\n rate_pred_diff_map[cls_name] = local_pred_rate - (\n (\n (\n output_attributes_avg_pred_rate_map[attribute_type][\n cls_name\n ]\n * num_distinct_attribute_names\n )\n - local_pred_rate\n )\n / (num_distinct_attribute_names - 1)\n )\n sorted_rate_pred_diff_map = dict(\n sorted(\n rate_pred_diff_map.items(), key=lambda item: item[1], reverse=True\n )\n )\n output_attributes_pred_rate_difference_map[attribute_type][\n attribute_key_name\n ] = sorted_rate_pred_diff_map\n return (\n sorted_output_attributes_pred_freq_map,\n output_attributes_count_map,\n output_attributes_pred_rate_difference_map,\n output_attributes_img_map,\n output_attributes_label_assoc_map,\n output_attributes_confidence_score_map,\n output_mean_attributes_confidence_score_map,\n output_attributes_label_assoc_conf_scores_map,\n output_attribute_disparate_label_map,\n )\n\n\ndef convert_and_print_dataframe(\n output_attribute_label_assoc_map, model_name, label_assoc_mapping, threshold, topk\n):\n assoc_names = list(label_assoc_mapping.keys())\n attributes_list = []\n for key, value in list(output_attribute_label_assoc_map.items()):\n if isinstance(value, dict):\n attributes_list.extend(sorted(value.keys()))\n else:\n attributes_list.append(key)\n attributes_list.remove(\"n/a\")\n attributes_list.remove(\"n/a_lighter\")\n attributes_list.remove(\"n/a_darker\")\n attributes_list.remove(\"unknown\")\n\n flattened_output_attribute_label_assoc_map = {}\n for entry in list(output_attribute_label_assoc_map.values()):\n flattened_output_attribute_label_assoc_map.update(entry)\n\n dataframe = {\"t_topk\": [], \"model\": [], \"association\": []}\n for item in attributes_list:\n dataframe[item] = []\n\n for assoc in assoc_names:\n t_topk = f\"t={threshold}, top-{topk}\"\n dataframe[\"model\"].append(model_name)\n dataframe[\"association\"].append(assoc)\n dataframe[\"t_topk\"].append(t_topk)\n for key, value in list(flattened_output_attribute_label_assoc_map.items()):\n if key not in dataframe:\n continue\n if assoc not in value:\n continue\n dataframe[key].append(value[assoc])\n\n df = pd.DataFrame(data=dataframe)\n print(df.to_markdown())\n return df\n\n\ndef plot_few_images_with_bbox(\n output_metadata,\n candidate_labels,\n plot_count=10,\n class_to_label_name_map=None,\n topk=1,\n filter_key=\"gender_skintone\",\n filter_value=\"male_darker\",\n confidence_threshold=0.0,\n):\n # print(output_metadata)\n preds_values = [item[\"prediction\"] for item in list(output_metadata.values())]\n confidence_scores = []\n if \"confidence_scores\" in list(output_metadata.values())[0]:\n confidence_scores = [\n item[\"confidence_scores\"] for item in list(output_metadata.values())\n ]\n print(f\"Found confidence scores: {len(confidence_scores)}\")\n media_ids = [item[\"orig_path\"] for item in list(output_metadata.values())]\n gender_presentation = [item[\"gender\"] for item in list(output_metadata.values())]\n age_presentation = [item[\"age\"] for item in list(output_metadata.values())]\n bbox_annotations = []\n # print(list(output_metadata.values())[0])\n if \"bbox\" in list(output_metadata.values())[0]:\n bbox_annotations = [item[\"bbox\"] for item in list(output_metadata.values())]\n attribute_values = [item[filter_key] for item in list(output_metadata.values())]\n num_images = len(preds_values)\n count = 0\n for idx in range(num_images):\n if plot_count > 0 and count >= plot_count:\n return\n if attribute_values[idx] == filter_value:\n img_predictions = preds_values[idx][:topk]\n img_scores = []\n if len(confidence_scores) > 0:\n img_scores = confidence_scores[idx][:topk]\n if class_to_label_name_map:\n img_predictions = [\n class_to_label_name_map[item] for item in img_predictions\n ]\n found_intersection_preds = list(\n set(candidate_labels).intersection(set(img_predictions))\n )\n if len(found_intersection_preds) > 0:\n if len(confidence_scores) > 0 and confidence_threshold > 0.0:\n intersection_preds_scores = [\n float(img_scores[img_predictions.index(pred)])\n for pred in found_intersection_preds\n ]\n # print(found_intersection_preds)\n # print(intersection_preds_scores)\n if not np.any(\n np.array(intersection_preds_scores) >= confidence_threshold\n ):\n continue\n try:\n # print(f\"img: {media_ids[idx]}\\npredictions: {img_predictions}\")\n print(\n f\"\\t\\t\\t\\t\\t\\t\\t\\t\\timg: {media_ids[idx]}\\n\\t\\t\\t\\t\\t\\t\\t\\t\\tpredictions: {img_predictions}\"\n )\n if len(img_scores) > 0:\n print(f\"\\t\\t\\t\\t\\t\\t\\t\\t\\t\\tscores: {img_scores}\")\n print(\n f\"\\t\\t\\t\\t\\t\\t\\t\\t\\t\\tGender: {gender_presentation[idx]}, Age: {age_presentation[idx]}\"\n )\n with PathManager.open(media_ids[idx], \"rb\") as fopen:\n img = Image.open(fopen).convert(\"RGB\")\n if len(bbox_annotations) > 0:\n # print('ha box annotation.....')\n bbox = [float(item) for item in bbox_annotations[idx]]\n fig_size = plt.rcParams[\"figure.figsize\"]\n fig_size[0] = 5\n fig_size[1] = 5\n plt.imshow(img)\n plt.axis(\"off\")\n plt.show()\n img = img.crop(bbox)\n bbox_size = min(img.size)\n width, height = img.size\n ratio = max(img.size) / min(img.size)\n if ratio >= 1.2:\n if width < height:\n bbox = (0, 0, bbox_size, bbox_size)\n else:\n bbox = (\n int((width - bbox_size) / 2),\n 0,\n int((width - bbox_size) / 2) + bbox_size,\n height,\n )\n print(f\"\\t\\t\\t\\t\\t\\t\\t\\t\\t\\tbbox: {bbox}\")\n img = img.crop(bbox)\n else:\n print(\n f\"\\t\\t\\t\\t\\t\\t\\t\\t\\t\\tSkipping the cropping....bbox: {bbox}\"\n )\n img = T.Resize(size=[224, 224])(img)\n else:\n # print(\"NO BBOX ANNOTATION...\")\n img = T.Resize(size=[224, 224])(img)\n fig_size = plt.rcParams[\"figure.figsize\"]\n fig_size[0] = 5\n fig_size[1] = 5\n plt.imshow(img)\n plt.axis(\"off\")\n plt.show()\n count += 1\n except Exception:\n pass\n\n\ndef generate_openimages_disentangle_analysis(\n common_map,\n openimages_miap_predictions,\n topk=1,\n confidence_threshold=0.0,\n DISPARATE_THRESHOLD=0.8,\n DISPARATE_LABELS_LIST=None,\n LABEL_ASSOC_MAPPING=None,\n):\n DISPARATE_LABELS_LIST = DISPARATE_LABELS_LIST or []\n LABEL_ASSOC_MAPPING = LABEL_ASSOC_MAPPING or {}\n openimages_miap_predictions.update(common_map)\n print(\n f\"======================== {openimages_miap_predictions['model_name']} ============================\"\n )\n class_to_label_name_map = load_file(\n \"https://dl.fbaipublicfiles.com/vissl/fairness/label_association/in22k_cls_name_to_label_name_map.json\"\n )\n in22k_subset_label_name_map = {\n key: value[0] for key, value in class_to_label_name_map.items()\n }\n (\n output_metadata,\n label_to_id,\n ) = load_in22k_subset_opeinimages_miap_labels_images_preds_metadata(\n openimages_miap_predictions, confidence_threshold=confidence_threshold\n )\n\n (\n sorted_output_attributes_pred_freq_map,\n output_attributes_count_map,\n output_attributes_pred_rate_difference_map,\n output_attributes_img_map,\n attribute_label_assoc_map,\n output_attributes_confidence_score_map,\n output_mean_attributes_confidence_score_map,\n output_attributes_label_assoc_conf_scores_map,\n output_attribute_disparate_label_map,\n ) = get_per_attribute_predictions_freq(\n output_metadata,\n label_to_id,\n LABEL_ASSOC_MAPPING,\n DISPARATE_LABELS_LIST,\n DISPARATE_THRESHOLD,\n in22k_subset_label_name_map,\n topk=topk,\n )\n\n _ = convert_and_print_dataframe(\n attribute_label_assoc_map,\n openimages_miap_predictions[\"model_name\"],\n label_assoc_mapping=LABEL_ASSOC_MAPPING,\n threshold=confidence_threshold,\n topk=topk,\n )\n return output_attributes_img_map, output_metadata, attribute_label_assoc_map\n\n\ndef calculate_metrics(\n model_name,\n label_predictions_file,\n label_predictions_scores_file,\n pred_img_indices_file,\n):\n\n for PREDICTION_CONFIDENCE_THRESHOLD in [0.0, 0.1, 0.3, 0.8]:\n my_model_cc_face_crops_predictions = {\n \"model_name\": model_name,\n \"label_predictions\": label_predictions_file,\n \"label_predictions_scores\": label_predictions_scores_file,\n \"pred_img_indices\": pred_img_indices_file,\n }\n _, _, _ = generate_openimages_disentangle_analysis(\n COMMON_MAP_TEST,\n my_model_cc_face_crops_predictions,\n topk=TOPK,\n confidence_threshold=PREDICTION_CONFIDENCE_THRESHOLD,\n DISPARATE_THRESHOLD=DISPARATE_THRESHOLD,\n DISPARATE_LABELS_LIST=DISPARATE_LABELS_LIST,\n LABEL_ASSOC_MAPPING=IN22K_SUBSET_LABEL_ASSOCIATION_MAP,\n )\n\n\nif __name__ == \"__main__\":\n model_name = \"Sup RN-50 (torchvision) IN1K\"\n label_predictions_file = \"/path/to/rank0_test_heads_predictions.npy\"\n label_predictions_scores_file = \"/path/to/rank0_test_heads_conf_scores.npy\"\n pred_img_indices_file = \"/path/to/rank0_test_heads_inds.npy\"\n calculate_metrics(\n model_name,\n label_predictions_file,\n label_predictions_scores_file,\n pred_img_indices_file,\n )\n"
]
| [
[
"numpy.array",
"pandas.DataFrame",
"numpy.ones",
"matplotlib.pyplot.show",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.imshow"
]
]
|
zhengwsh/text-classification | [
"df91cb0aebc76f12006d9d380a4a3c049446e83c"
]
| [
"text_rnn.py"
]
| [
"import tensorflow as tf\nimport numpy as np\n\n\nclass TextRNN(object):\n \"\"\"\n A RNN for text classification/regression.\n Uses an embedding layer, followed by a recurrent, fully-connected (and softmax) layer.\n \"\"\"\n def __init__(\n self, model_type, sequence_length, num_classes, vocab_size,\n embedding_size, rnn_size, num_layers, l2_reg_lambda=0.5, model='lstm'): # batch_size, \n\n # Placeholders for input, output and dropout\n self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name=\"input_x\")\n self.input_y = tf.placeholder(tf.float32, [None, num_classes], name=\"input_y\")\n self.dropout_keep_prob = tf.placeholder(tf.float32, name=\"dropout_keep_prob\")\n self.learning_rate = tf.placeholder(tf.float32, name=\"learning_rate\")\n\n # Keeping track of l2 regularization loss (optional)\n l2_loss = tf.constant(0.0)\n\n # Embedding layer\n with tf.device('/cpu:0'), tf.name_scope(\"embedding\"):\n # When trainable parameter equals True the embedding vector is non-static, otherwise is static\n self.W = tf.Variable(\n tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),\n name=\"W\", trainable=True)\n self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x) # [None, sequence_length, embedding_size]\n\n # Create a recurrent layer for each rnn layer\n with tf.name_scope(model):\n if model == 'rnn':\n cell_fun = tf.nn.rnn_cell.BasicRNNCell\n elif model == 'gru':\n cell_fun = tf.nn.rnn_cell.GRUCell\n elif model == 'lstm':\n cell_fun = tf.nn.rnn_cell.BasicLSTMCell\n \n def get_a_cell():\n cell_tmp = cell_fun(rnn_size, state_is_tuple=True)\n # cell_tmp = tf.contrib.rnn.DropoutWrapper(cell_tmp, output_keep_prob=self.dropout_keep_prob)\n return cell_tmp\n \n # Stacking multi-layers\n cell = tf.nn.rnn_cell.MultiRNNCell([get_a_cell() for _ in range(num_layers)])\n # initial_state = cell.zero_state(None, tf.float32)\n outputs, last_state = tf.nn.dynamic_rnn(cell, self.embedded_chars, dtype=tf.float32) # , initial_state=initial_state\n # --'outputs' is a tensor of shape [batch_size, max_time, cell_state_size]\n # --'last_state' is a tensor of shape [batch_size, cell_state_size]\n # self.output = outputs[:, -1, :]\n self.output = tf.reduce_mean(outputs, axis=1)\n # self.output = tf.reshape(outputs, [batch_size, -1])\n\n # Add dropout\n with tf.name_scope(\"dropout\"):\n self.rnn_drop = tf.nn.dropout(self.output, self.dropout_keep_prob)\n\n # Final (unnormalized) scores and predictions\n with tf.name_scope(\"output\"):\n W = tf.get_variable(\n \"W\",\n shape=[rnn_size, num_classes], # sequence_length * \n initializer=tf.contrib.layers.xavier_initializer())\n b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name=\"b\")\n l2_loss += tf.nn.l2_loss(W)\n l2_loss += tf.nn.l2_loss(b)\n self.scores = tf.nn.xw_plus_b(self.rnn_drop, W, b, name=\"scores\")\n if model_type == 'clf':\n self.predictions = tf.argmax(self.scores, 1, name=\"predictions\")\n elif model_type == 'reg':\n self.predictions = tf.reduce_max(self.scores, 1, name=\"predictions\")\n self.predictions = tf.expand_dims(self.predictions, -1)\n\n # Calculate mean cross-entropy loss, or root-mean-square error loss\n with tf.name_scope(\"loss\"):\n if model_type == 'clf':\n losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)\n self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss\n elif model_type == 'reg':\n losses = tf.sqrt(tf.losses.mean_squared_error(predictions=self.predictions, labels=self.input_y))\n self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss\n\n # Accuracy\n with tf.name_scope(\"accuracy\"):\n if model_type == 'clf':\n correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))\n self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, \"float\"), name=\"accuracy\")\n elif model_type == 'reg':\n self.accuracy = tf.constant(0.0, name=\"accuracy\")"
]
| [
[
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.expand_dims",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.argmax",
"tensorflow.random_uniform",
"tensorflow.nn.l2_loss",
"tensorflow.constant",
"tensorflow.nn.xw_plus_b",
"tensorflow.reduce_max",
"tensorflow.placeholder",
"tensorflow.name_scope",
"tensorflow.nn.embedding_lookup",
"tensorflow.device",
"tensorflow.losses.mean_squared_error",
"tensorflow.nn.dynamic_rnn",
"tensorflow.reduce_mean",
"tensorflow.nn.dropout",
"tensorflow.cast"
]
]
|
iamsg08/Joing-Parsing-and-Generation-for-Abstractive-Summarization | [
"a432e6e78ac1b3016c2a5f8788a613772b11da40"
]
| [
"layers/searcher/AStar.py"
]
| [
"#==============================#\n# System Import #\n#==============================#\nimport copy, heapq\n\n#==============================#\n# Platform Import #\n#==============================#\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\n\n#==============================#\n# Class/Layer Part Import #\n#==============================#\nfrom .BasicSearcher import BasicSearcher\n\nclass AStar(BasicSearcher):\n def __init__(self, options):\n super(AStar, self).__init__(options)\n \n def search(self, state_below, bi_in = None):\n startState = [0, [[self.BOS, None]], None, None]\n cands = []\n heapq.heappush(cands, startState)\n pool = np.asarray(self.LVT.Pool, dtype = np.int64)\n maxSize = len(self.LVT.Pool)\n self.condition_new('maxSize',maxSize)\n Answers = []\n while (len(cands) > 0):\n head = heapq.heappop(cands)\n Score, Sequence, State_Pass, bi_old = head\n if State_Pass is None:\n State_Pass = self.state_init()\n #print Score, [it[0] for it in Sequence],\n \n State_packed = self.state_pack(State_Pass)\n token = Sequence[-1][0]\n Token = Variable(torch.LongTensor([[token]]))\n Length = Variable(torch.LongTensor([1]))\n \n if torch.cuda.is_available():\n Token = Token.cuda()\n Length = Length.cuda()\n \n '''\n Token_Emb = self.Emb(Token)\n outputs, state_pass_ = self.generator(self.LVT, state_below, Token_Emb, Length, State_packed)\n '''\n \n \n Inputs = self.getInputs(Token, Length)\n outputs, state_pass_ = self.generator(self.LVT, state_below, Inputs, State_packed)\n \n state_pass = self.state_process(State_packed, state_pass_, token)\n state_unpacked = self.state_unpack(state_pass)\n #print state_unpacked[1], state_unpacked[2]\n outputs = BasicSearcher.atom(outputs)\n preds = self.getPred(outputs)\n preds = np.log(preds + 1e-8)\n \n # Conditions\n ids = self.cond(preds, state_unpacked, self.conditions)\n if self.biGramTrick:\n lastToken = self.lastToken(Sequence, self.conditions)\n if bi_old is None:\n bi_old = set()\n preds, ids, bi_next = self.do_bigramTrick(preds, ids, self.beam_size, lastToken, bi_in, bi_old, self.gamma, pool)\n topK = [[-preds[i]+Score, Sequence+[[int(pool[ids[i]]), self.getAtt(outputs)]], copy.deepcopy(state_unpacked), bi_next[i]] for i in range(preds.shape[0])]\n else:\n preds, ids = self.no_bigramTrick(preds, ids, self.beam_size)\n topK = [[-preds[i]+Score, Sequence+[[int(pool[ids[i]]), self.getAtt(outputs)]], copy.deepcopy(state_unpacked), None] for i in range(preds.shape[0])]\n \n for Score, Sequence, State_Pass, biGram in topK:\n if self.checkEnd(Sequence, State_Pass):\n Answers.append([Score, Sequence, State_Pass, biGram])\n elif len(Sequence) <= self.maxLength:\n heapq.heappush(cands, [Score, Sequence, State_Pass, biGram])\n \n if (len(Answers) >= self.answer_size):\n break\n \n return Answers\n "
]
| [
[
"torch.cuda.is_available",
"torch.LongTensor",
"numpy.asarray",
"numpy.log"
]
]
|
IMBINGO95/FairMOT | [
"c496e911a89870a9b6988d93f80e680d01ee8afc"
]
| [
"CalibrateTransfer/class_set.py"
]
| [
"import numpy as np\nclass CalibrateParameter():\n \"\"\"相机的标定参数\"\"\"\n def __init__(self,rotation_vector = None, translation_vector = None, cameraMatrix = None, distCoeffs = None, read_from_file = False):\n ''' If read_from_file == True, then creat a empty class!'''\n if read_from_file :\n self.rotation_vector = np.zeros((3, 1), dtype='float64') # 大写是类!\n self.translation_vector = np.zeros((3, 1), dtype='float64') # 大写是类!\n self.cameraMatrix = np.zeros((3, 3), dtype='float64') # 大写是类!\n self.distCoeffs = np.zeros((1, 5), dtype='float64') # 大写是类!\n else:\n self.rotation_vector = rotation_vector\n self.translation_vector = translation_vector\n self.cameraMatrix = cameraMatrix\n self.distCoeffs = distCoeffs\n\nclass CalibrateParameter_Flag1():\n \"\"\"Wide-angle camera calibration parameters for Flag1\n But this Class is specified for Flag1, because Flag1 camera is combined by four seperate camera.\n So it needs four seperate calibrate parameters for different sections.\n \"\"\"\n def __init__(self,Calib_Section1,Calib_Section2,Calib_Section3,Calib_Section4):\n self.Calib_Section1 = Calib_Section1\n self.Calib_Section2 = Calib_Section2\n self.Calib_Section3 = Calib_Section3\n self.Calib_Section4 = Calib_Section4\n\n\nclass worldcoor():\n \"\"\"定义世界坐标,需要输入x,y的值,默认z = 0 。\"\"\"\n def __init__(self,x,y):\n self.x = x\n self.y = y\n self.z = 0\n\ndef CalibrateParameter_To_dict(calibrateParameter):\n '''\n change CalibrateParameter class to dictionay type!\n :param calibrateParameter: parameter in CalibrateParameter class type.\n :return: parameter in dictionay type.\n '''\n parameter = {}\n parameter['rotation'] = {}\n parameter['rotation']['alpha'] = calibrateParameter.rotation_vector[0][0]\n parameter['rotation']['beta'] = calibrateParameter.rotation_vector[1][0]\n parameter['rotation']['gama'] = calibrateParameter.rotation_vector[2][0]\n parameter['translation'] = {}\n parameter['translation']['tx'] = calibrateParameter.translation_vector[0][0]\n parameter['translation']['ty'] = calibrateParameter.translation_vector[1][0]\n parameter['translation']['tz'] = calibrateParameter.translation_vector[2][0]\n parameter['intrinsic'] = {}\n parameter['intrinsic']['fx'] = calibrateParameter.cameraMatrix[0][0]\n parameter['intrinsic']['fy'] = calibrateParameter.cameraMatrix[1][1]\n parameter['intrinsic']['u'] = calibrateParameter.cameraMatrix[0][2]\n parameter['intrinsic']['v'] = calibrateParameter.cameraMatrix[1][2]\n parameter['distortion'] = {}\n parameter['distortion']['k1'] = calibrateParameter.distCoeffs[0][0]\n parameter['distortion']['k2'] = calibrateParameter.distCoeffs[0][1]\n parameter['distortion']['p1'] = calibrateParameter.distCoeffs[0][2]\n parameter['distortion']['p2'] = calibrateParameter.distCoeffs[0][3]\n parameter['distortion']['k3'] = calibrateParameter.distCoeffs[0][4]\n\n return parameter\n\ndef dict_To_CalibrateParameter(parameter):\n '''\n change dictionay type to CalibrateParameter type\n :param parameter:\n :return:\n '''\n calibrateParameter = CalibrateParameter(read_from_file=True)\n calibrateParameter.rotation_vector[0][0] = parameter['rotation']['alpha']\n calibrateParameter.rotation_vector[1][0] = parameter['rotation']['beta']\n calibrateParameter.rotation_vector[2][0] = parameter['rotation']['gamma']\n calibrateParameter.translation_vector[0][0] = parameter['translation']['tx']\n calibrateParameter.translation_vector[1][0] = parameter['translation']['ty']\n calibrateParameter.translation_vector[2][0] = parameter['translation']['tz']\n calibrateParameter.cameraMatrix[0][0] = parameter['intrinsic']['fx']\n calibrateParameter.cameraMatrix[1][1] = parameter['intrinsic']['fy']\n calibrateParameter.cameraMatrix[0][2] = parameter['intrinsic']['u']\n calibrateParameter.cameraMatrix[1][2] = parameter['intrinsic']['v']\n calibrateParameter.cameraMatrix[2][2] = 1.0\n calibrateParameter.distCoeffs[0][0] = parameter['distortion']['k1']\n calibrateParameter.distCoeffs[0][1] = parameter['distortion']['k2']\n calibrateParameter.distCoeffs[0][2] = parameter['distortion']['p1']\n calibrateParameter.distCoeffs[0][3] = parameter['distortion']['p2']\n calibrateParameter.distCoeffs[0][4] = parameter['distortion']['k3']\n\n return calibrateParameter\n"
]
| [
[
"numpy.zeros"
]
]
|
karthikmudaliar/SentimentalAnalytics | [
"4b829030733c8852b67614730a22bdd66b7208ff"
]
| [
"emojiAnalyzer.py"
]
| [
"'''\nCreated on 04-Feb-2019\n\n@author: Teerta shetty\n'''\n\nimport csv\nimport spacy\nimport tweepy\nimport re\nimport pandas as pd\nimport pymysql\n\n\nfrom time import sleep\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\nimport vaderSentiment.vaderSentiment as vs\nimport emoji\n\nconnection = pymysql.connect(\n host='localhost',\n user='root',\n password='', \n db='analyticsproj',\n port=3306\n )\ncursor2=connection.cursor()\ncursor3=connection.cursor()\n####input your credentials here\nconsumer_key = 'wlFhHiZJSTVb3k1rcjTqx65Ch' \nconsumer_secret = 'Bdo5g3DRequWoK8HgaQMf9EXYDEEOPDScsisDXPnoZiWIdKb1g'\naccess_token = '1082646302301790208-SpvxhYpwjVsYAFaQZwVSNlNMPFWuQB'\naccess_token_secret = 'auTrniTix2biWt31zFPjz82P6GWadyA77WcJEg9NVAR62'\n\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\napi = tweepy.API(auth)\n \ndef extract_emojis(strr):\n print (\"*************************************************************\")\n print (strr)\n return ''.join(c for c in strr if c in emoji.UNICODE_EMOJI)\n\nanalyser = SentimentIntensityAnalyzer()\n \ndef sentiment_analyzer_scores(sentence):\n score = analyser.polarity_scores(sentence)\n print(sentence, \"------\", str(score))\n return score\n\ntmp=pd.DataFrame()\n\nj=0 \n\n\n\nsql2=\"select count(`keyword_name`) from `keywords_master`\"\ncursor3.execute(sql2)\n\nsql=\"select `keyword_id`,`keyword_name`,`language` from `keywords_master`\" \n \ncursor2.execute(sql)\n\nfor x in cursor3:\n \n records_count= x[0]\n \narr = []\n\nfor i in range(0,records_count):\n \n for t in cursor2:\n print (\"=====================\")\n arr.append(t)\n \ncount_arr = len(arr)\nfor i in range(count_arr):\n keyword_id = arr[i][0]\n keyword = arr[i][1]\n language = arr[i][2]\n \n print (keyword)\n \n for tweet in tweepy.Cursor(api.search,q=\"#rabalebha\",count=2000,\n lang=\"en\",tweet_mode='extended').items():\n if(j>=12000):\n print('limit reached sleeping now')\n sleep(15*60)\n j=0 \n print (tweet.created_at, tweet.full_text)\n print(pd.DataFrame(data=[extract_emojis(tweet.full_text)]))\n tmp=tmp.append(pd.DataFrame(data=[extract_emojis(tweet.full_text)]),ignore_index=True)\n j=j+1\n print (\"j: \",j)\n \n print(\"Printing tmp\")\n print(tmp)\n \n \n \n emoji={} \n emoji=analyser.make_emoji_dict()\n emojiDF=pd.DataFrame(list(emoji.items()))\n \n emojiName=pd.DataFrame()\n \n emojiCountList=pd.DataFrame()\n \n for j in range(0,len(emojiDF)):\n emojiCountList.loc[j,0] = emojiDF.loc[j,0]\n emojiCountList.loc[j,1] = 0\n \n print(\"this is emojiCountList\")\n print(emojiCountList)\n \n for i in range(0,len(tmp)):\n print(i)\n for x in range(0,len(emojiCountList)):\n for emo in tmp.loc[i,0]: \n if (emo == emojiCountList.loc[x,0]):\n print(\"emo\",emo)\n sentiment_analyzer_scores(emo)\n emojiCountList.loc[x,1]=emojiCountList.loc[x,1] + 1\n \n emoji_array=emojiCountList.loc[x]\n emoji_sym = emoji_array[0]\n emoji_count = emoji_array[1]\n \n print (\"_____________111111111_______________\")\n print (emoji_sym)\n \n cursor=connection.cursor()\n sql=\"INSERT INTO emoji(emoji,fo_key_id,count) VALUES ('\"+str(emoji_sym)+\"','\"+str(emoji_count)+\"','\"+str(keyword_id)+\"')\"\n cursor.execute(sql)\n connection.commit()\n \n \n \n\n\n\n\n\n\n\n \n#print(emojiName.groupby(0).count())\n\n# countArray=pd.DataFrame(data=[['sample',0],['sample',0]],columns=['emoji','count'])\n# for w1 in range (0,len(emojiName)):\n# flag=0\n# for w2 in range (0,len(countArray)):\n# if(emojiName.loc[w1,0] == countArray.loc[w2,0]):\n# countArray.loc[w2,1]=countArray.loc[w2,1] + 1 \n# flag=1\n# if(flag==0):\n# countArray=countArray.append([emojiName.loc[w1,0],1])\n# print(countArray)\n \n#print(countArray)"
]
| [
[
"pandas.DataFrame"
]
]
|
rsenthilkumar6/ludwig | [
"8545f51470c2e2b8ef0dfb5ae5d313793728fae0"
]
| [
"tests/integration_tests/utils.py"
]
| [
"# -*- coding: utf-8 -*-\n# Copyright (c) 2019 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport multiprocessing\nimport os\nimport random\nimport shutil\nimport sys\nimport traceback\nimport unittest\nimport uuid\nfrom distutils.util import strtobool\n\nimport cloudpickle\nimport numpy as np\nimport pandas as pd\n\nfrom ludwig.api import LudwigModel\nfrom ludwig.backend import LocalBackend\nfrom ludwig.constants import VECTOR, COLUMN, NAME, PROC_COLUMN\nfrom ludwig.data.dataset_synthesizer import DATETIME_FORMATS\nfrom ludwig.data.dataset_synthesizer import build_synthetic_dataset\nfrom ludwig.experiment import experiment_cli\nfrom ludwig.features.feature_utils import compute_feature_hash\nfrom ludwig.utils.data_utils import read_csv, replace_file_extension\n\nENCODERS = [\n 'embed', 'rnn', 'parallel_cnn', 'cnnrnn', 'stacked_parallel_cnn',\n 'stacked_cnn', 'transformer'\n]\n\nHF_ENCODERS_SHORT = ['distilbert']\n\nHF_ENCODERS = [\n 'bert',\n 'gpt',\n 'gpt2',\n ##'transformer_xl',\n 'xlnet',\n 'xlm',\n 'roberta',\n 'distilbert',\n 'ctrl',\n 'camembert',\n 'albert',\n 't5',\n 'xlmroberta',\n 'longformer',\n 'flaubert',\n 'electra',\n 'mt5'\n]\n\n\nclass LocalTestBackend(LocalBackend):\n @property\n def supports_multiprocessing(self):\n return False\n\n\ndef parse_flag_from_env(key, default=False):\n try:\n value = os.environ[key]\n except KeyError:\n # KEY isn't set, default to `default`.\n _value = default\n else:\n # KEY is set, convert it to True or False.\n try:\n _value = strtobool(value)\n except ValueError:\n # More values are supported, but let's keep the message simple.\n raise ValueError(\"If set, {} must be yes or no.\".format(key))\n return _value\n\n\n_run_slow_tests = parse_flag_from_env(\"RUN_SLOW\", default=False)\n\n\ndef slow(test_case):\n \"\"\"\n Decorator marking a test as slow.\n\n Slow tests are skipped by default. Set the RUN_SLOW environment variable\n to a truth value to run them.\n\n \"\"\"\n if not _run_slow_tests:\n test_case = unittest.skip(\"Skipping: this test is too slow\")(test_case)\n return test_case\n\n\ndef generate_data(\n input_features,\n output_features,\n filename='test_csv.csv',\n num_examples=25,\n\n):\n \"\"\"\n Helper method to generate synthetic data based on input, output feature\n specs\n :param num_examples: number of examples to generate\n :param input_features: schema\n :param output_features: schema\n :param filename: path to the file where data is stored\n :return:\n \"\"\"\n features = input_features + output_features\n df = build_synthetic_dataset(num_examples, features)\n data = [next(df) for _ in range(num_examples)]\n\n dataframe = pd.DataFrame(data[1:], columns=data[0])\n dataframe.to_csv(filename, index=False)\n\n return filename\n\n\ndef random_string(length=5):\n return uuid.uuid4().hex[:length].upper()\n\n\ndef numerical_feature(normalization=None, **kwargs):\n feature = {\n 'name': 'num_' + random_string(),\n 'type': 'numerical',\n 'preprocessing': {\n 'normalization': normalization\n }\n }\n feature.update(kwargs)\n feature[COLUMN] = feature[NAME]\n feature[PROC_COLUMN] = compute_feature_hash(feature)\n return feature\n\n\ndef category_feature(**kwargs):\n feature = {\n 'type': 'category',\n 'name': 'category_' + random_string(),\n 'vocab_size': 10,\n 'embedding_size': 5\n }\n feature.update(kwargs)\n feature[COLUMN] = feature[NAME]\n feature[PROC_COLUMN] = compute_feature_hash(feature)\n return feature\n\n\ndef text_feature(**kwargs):\n feature = {\n 'name': 'text_' + random_string(),\n 'type': 'text',\n 'reduce_input': None,\n 'vocab_size': 5,\n 'min_len': 7,\n 'max_len': 7,\n 'embedding_size': 8,\n 'state_size': 8\n }\n feature.update(kwargs)\n feature[COLUMN] = feature[NAME]\n feature[PROC_COLUMN] = compute_feature_hash(feature)\n return feature\n\n\ndef set_feature(**kwargs):\n feature = {\n 'type': 'set',\n 'name': 'set_' + random_string(),\n 'vocab_size': 10,\n 'max_len': 5,\n 'embedding_size': 5\n }\n feature.update(kwargs)\n feature[COLUMN] = feature[NAME]\n feature[PROC_COLUMN] = compute_feature_hash(feature)\n return feature\n\n\ndef sequence_feature(**kwargs):\n feature = {\n 'type': 'sequence',\n 'name': 'sequence_' + random_string(),\n 'vocab_size': 10,\n 'max_len': 7,\n 'encoder': 'embed',\n 'embedding_size': 8,\n 'fc_size': 8,\n 'state_size': 8,\n 'num_filters': 8,\n 'hidden_size': 8\n }\n feature.update(kwargs)\n feature[COLUMN] = feature[NAME]\n feature[PROC_COLUMN] = compute_feature_hash(feature)\n return feature\n\n\ndef image_feature(folder, **kwargs):\n feature = {\n 'type': 'image',\n 'name': 'image_' + random_string(),\n 'encoder': 'resnet',\n 'preprocessing': {\n 'in_memory': True,\n 'height': 12,\n 'width': 12,\n 'num_channels': 3\n },\n 'resnet_size': 8,\n 'destination_folder': folder,\n 'fc_size': 8,\n 'num_filters': 8\n }\n feature.update(kwargs)\n feature[COLUMN] = feature[NAME]\n feature[PROC_COLUMN] = compute_feature_hash(feature)\n return feature\n\n\ndef audio_feature(folder, **kwargs):\n feature = {\n 'name': 'audio_' + random_string(),\n 'type': 'audio',\n 'preprocessing': {\n 'audio_feature': {\n 'type': 'fbank',\n 'window_length_in_s': 0.04,\n 'window_shift_in_s': 0.02,\n 'num_filter_bands': 80\n },\n 'audio_file_length_limit_in_s': 3.0\n },\n 'encoder': 'stacked_cnn',\n 'should_embed': False,\n 'conv_layers': [\n {\n 'filter_size': 400,\n 'pool_size': 16,\n 'num_filters': 32,\n 'regularize': 'false'\n },\n {\n 'filter_size': 40,\n 'pool_size': 10,\n 'num_filters': 64,\n 'regularize': 'false'\n }\n ],\n 'fc_size': 256,\n 'destination_folder': folder\n }\n feature.update(kwargs)\n feature[COLUMN] = feature[NAME]\n feature[PROC_COLUMN] = compute_feature_hash(feature)\n return feature\n\n\ndef timeseries_feature(**kwargs):\n feature = {\n 'name': 'timeseries_' + random_string(),\n 'type': 'timeseries',\n 'max_len': 7\n }\n feature.update(kwargs)\n feature[COLUMN] = feature[NAME]\n feature[PROC_COLUMN] = compute_feature_hash(feature)\n return feature\n\n\ndef binary_feature(**kwargs):\n feature = {\n 'name': 'binary_' + random_string(),\n 'type': 'binary'\n }\n feature.update(kwargs)\n feature[COLUMN] = feature[NAME]\n feature[PROC_COLUMN] = compute_feature_hash(feature)\n return feature\n\n\ndef bag_feature(**kwargs):\n feature = {\n 'name': 'bag_' + random_string(),\n 'type': 'bag',\n 'max_len': 5,\n 'vocab_size': 10,\n 'embedding_size': 5\n }\n feature.update(kwargs)\n feature[COLUMN] = feature[NAME]\n feature[PROC_COLUMN] = compute_feature_hash(feature)\n return feature\n\n\ndef date_feature(**kwargs):\n feature = {\n 'name': 'date_' + random_string(),\n 'type': 'date',\n 'preprocessing': {\n 'datetime_format': random.choice(list(DATETIME_FORMATS.keys()))\n }\n }\n feature.update(kwargs)\n feature[COLUMN] = feature[NAME]\n feature[PROC_COLUMN] = compute_feature_hash(feature)\n return feature\n\n\ndef h3_feature(**kwargs):\n feature = {\n 'name': 'h3_' + random_string(),\n 'type': 'h3'\n }\n feature.update(kwargs)\n feature[COLUMN] = feature[NAME]\n feature[PROC_COLUMN] = compute_feature_hash(feature)\n return feature\n\n\ndef vector_feature(**kwargs):\n feature = {\n 'type': VECTOR,\n 'vector_size': 5,\n 'name': 'vector_' + random_string()\n }\n feature.update(kwargs)\n feature[COLUMN] = feature[NAME]\n feature[PROC_COLUMN] = compute_feature_hash(feature)\n return feature\n\n\ndef run_experiment(\n input_features,\n output_features,\n skip_save_processed_input=True,\n config=None,\n backend=None,\n **kwargs\n):\n \"\"\"\n Helper method to avoid code repetition in running an experiment. Deletes\n the data saved to disk after running the experiment\n :param input_features: list of input feature dictionaries\n :param output_features: list of output feature dictionaries\n **kwargs you may also pass extra parameters to the experiment as keyword\n arguments\n :return: None\n \"\"\"\n if input_features is not None and output_features is not None:\n # This if is necessary so that the caller can call with\n # config_file (and not config)\n config = {\n 'input_features': input_features,\n 'output_features': output_features,\n 'combiner': {\n 'type': 'concat',\n 'fc_size': 14\n },\n 'training': {'epochs': 2}\n }\n\n args = {\n 'config': config,\n 'backend': backend or LocalTestBackend(),\n 'skip_save_training_description': True,\n 'skip_save_training_statistics': True,\n 'skip_save_processed_input': skip_save_processed_input,\n 'skip_save_progress': True,\n 'skip_save_unprocessed_output': True,\n 'skip_save_model': True,\n 'skip_save_predictions': True,\n 'skip_save_eval_stats': True,\n 'skip_collect_predictions': True,\n 'skip_collect_overall_stats': True,\n 'skip_save_log': True\n }\n args.update(kwargs)\n\n _, _, _, _, exp_dir_name = experiment_cli(**args)\n shutil.rmtree(exp_dir_name, ignore_errors=True)\n\n\ndef generate_output_features_with_dependencies(main_feature, dependencies):\n # helper function to generate multiple output features specifications\n # with dependencies, support for 'test_experiment_multiple_seq_seq` unit test\n # Parameters:\n # main_feature: feature identifier, valid values 'feat1', 'feat2', 'feat3'\n # dependencies: list of dependencies for 'main_feature', do not li\n # Example:\n # generate_output_features_with_dependencies('feat2', ['feat1', 'feat3'])\n\n output_features = [\n category_feature(vocab_size=2, reduce_input='sum'),\n sequence_feature(vocab_size=10, max_len=5),\n numerical_feature()\n ]\n\n # value portion of dictionary is a tuple: (position, feature_name)\n # position: location of output feature in the above output_features list\n # feature_name: Ludwig generated feature name\n feature_names = {\n 'feat1': (0, output_features[0]['name']),\n 'feat2': (1, output_features[1]['name']),\n 'feat3': (2, output_features[2]['name'])\n }\n\n # generate list of dependencies with real feature names\n generated_dependencies = [feature_names[feat_name][1]\n for feat_name in dependencies]\n\n # specify dependencies for the main_feature\n output_features[feature_names[main_feature][0]]['dependencies'] = \\\n generated_dependencies\n\n return output_features\n\n\ndef _subproc_wrapper(fn, queue, *args, **kwargs):\n fn = cloudpickle.loads(fn)\n try:\n results = fn(*args, **kwargs)\n except Exception as e:\n traceback.print_exc(file=sys.stderr)\n results = e\n queue.put(results)\n\n\ndef spawn(fn):\n def wrapped_fn(*args, **kwargs):\n ctx = multiprocessing.get_context('spawn')\n queue = ctx.Queue()\n\n p = ctx.Process(\n target=_subproc_wrapper,\n args=(cloudpickle.dumps(fn), queue, *args),\n kwargs=kwargs)\n\n p.start()\n p.join()\n results = queue.get()\n if isinstance(results, Exception):\n raise RuntimeError(\n f'Spawned subprocess raised {type(results).__name__}, '\n f'check log output above for stack trace.')\n return results\n\n return wrapped_fn\n\n\ndef run_api_experiment(input_features, output_features, data_csv):\n \"\"\"\n Helper method to avoid code repetition in running an experiment\n :param input_features: input schema\n :param output_features: output schema\n :param data_csv: path to data\n :return: None\n \"\"\"\n config = {\n 'input_features': input_features,\n 'output_features': output_features,\n 'combiner': {'type': 'concat', 'fc_size': 14},\n 'training': {'epochs': 2}\n }\n\n model = LudwigModel(config)\n output_dir = None\n\n try:\n # Training with csv\n _, _, output_dir = model.train(\n dataset=data_csv,\n skip_save_processed_input=True,\n skip_save_progress=True,\n skip_save_unprocessed_output=True\n )\n model.predict(dataset=data_csv)\n\n model_dir = os.path.join(output_dir, 'model')\n loaded_model = LudwigModel.load(model_dir)\n\n # Necessary before call to get_weights() to materialize the weights\n loaded_model.predict(dataset=data_csv)\n\n model_weights = model.model.get_weights()\n loaded_weights = loaded_model.model.get_weights()\n for model_weight, loaded_weight in zip(model_weights, loaded_weights):\n assert np.allclose(model_weight, loaded_weight)\n finally:\n # Remove results/intermediate data saved to disk\n shutil.rmtree(output_dir, ignore_errors=True)\n\n try:\n # Training with dataframe\n data_df = read_csv(data_csv)\n _, _, output_dir = model.train(\n dataset=data_df,\n skip_save_processed_input=True,\n skip_save_progress=True,\n skip_save_unprocessed_output=True\n )\n model.predict(dataset=data_df)\n finally:\n shutil.rmtree(output_dir, ignore_errors=True)\n\n\ndef create_data_set_to_use(data_format, raw_data):\n # helper function for generating training and test data with specified format\n # handles all data formats except for hdf5\n # assumes raw_data is a csv dataset generated by\n # tests.integration_tests.utils.generate_data() function\n\n # support for writing to a fwf dataset based on this stackoverflow posting:\n # https://stackoverflow.com/questions/16490261/python-pandas-write-dataframe-to-fixed-width-file-to-fwf\n from tabulate import tabulate\n def to_fwf(df, fname):\n content = tabulate(df.values.tolist(), list(df.columns),\n tablefmt=\"plain\")\n open(fname, \"w\").write(content)\n\n pd.DataFrame.to_fwf = to_fwf\n\n dataset_to_use = None\n\n if data_format == 'csv':\n dataset_to_use = raw_data\n\n elif data_format in {'df', 'dict'}:\n dataset_to_use = pd.read_csv(raw_data)\n if data_format == 'dict':\n dataset_to_use = dataset_to_use.to_dict(orient='list')\n\n elif data_format == 'excel':\n dataset_to_use = replace_file_extension(raw_data, 'xlsx')\n pd.read_csv(raw_data).to_excel(\n dataset_to_use,\n index=False\n )\n\n elif data_format == 'excel_xls':\n dataset_to_use = replace_file_extension(raw_data, 'xls')\n pd.read_csv(raw_data).to_excel(\n dataset_to_use,\n index=False\n )\n\n elif data_format == 'feather':\n dataset_to_use = replace_file_extension(raw_data, 'feather')\n pd.read_csv(raw_data).to_feather(\n dataset_to_use\n )\n\n elif data_format == 'fwf':\n dataset_to_use = replace_file_extension(raw_data, 'fwf')\n pd.read_csv(raw_data).to_fwf(\n dataset_to_use\n )\n\n elif data_format == 'html':\n dataset_to_use = replace_file_extension(raw_data, 'html')\n pd.read_csv(raw_data).to_html(\n dataset_to_use,\n index=False\n )\n\n elif data_format == 'json':\n dataset_to_use = replace_file_extension(raw_data, 'json')\n pd.read_csv(raw_data).to_json(\n dataset_to_use,\n orient='records'\n )\n\n elif data_format == 'jsonl':\n dataset_to_use = replace_file_extension(raw_data, 'jsonl')\n pd.read_csv(raw_data).to_json(\n dataset_to_use,\n orient='records',\n lines=True\n )\n\n elif data_format == 'parquet':\n dataset_to_use = replace_file_extension(raw_data, 'parquet')\n pd.read_csv(raw_data).to_parquet(\n dataset_to_use,\n index=False\n )\n\n elif data_format == 'pickle':\n dataset_to_use = replace_file_extension(raw_data, 'pickle')\n pd.read_csv(raw_data).to_pickle(\n dataset_to_use\n )\n\n elif data_format == 'stata':\n dataset_to_use = replace_file_extension(raw_data, 'stata')\n pd.read_csv(raw_data).to_stata(\n dataset_to_use\n )\n\n elif data_format == 'tsv':\n dataset_to_use = replace_file_extension(raw_data, 'tsv')\n pd.read_csv(raw_data).to_csv(\n dataset_to_use,\n sep='\\t',\n index=False\n )\n\n else:\n ValueError(\n \"'{}' is an unrecognized data format\".format(data_format)\n )\n\n return dataset_to_use\n\n\ndef train_with_backend(\n backend,\n config,\n dataset=None,\n training_set=None,\n validation_set=None,\n test_set=None,\n predict=True,\n evaluate=True,\n):\n model = LudwigModel(config, backend=backend)\n output_dir = None\n\n try:\n _, _, output_dir = model.train(\n dataset=dataset,\n training_set=training_set,\n validation_set=validation_set,\n test_set=test_set,\n skip_save_processed_input=True,\n skip_save_progress=True,\n skip_save_unprocessed_output=True\n )\n\n if dataset is None:\n dataset = training_set\n\n if predict:\n preds, _ = model.predict(dataset=dataset)\n assert preds is not None\n\n if evaluate:\n _, eval_preds, _ = model.evaluate(dataset=dataset)\n assert eval_preds is not None\n\n return model.model.get_weights()\n finally:\n # Remove results/intermediate data saved to disk\n shutil.rmtree(output_dir, ignore_errors=True)\n"
]
| [
[
"numpy.allclose",
"pandas.DataFrame",
"pandas.read_csv"
]
]
|
taesiri/ANCNet | [
"28b1c887c2016b06c9639f93e79752dcb6ec3a23"
]
| [
"eval_pf_pascal.py"
]
| [
"from __future__ import print_function, division\nimport os\nfrom os.path import exists\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset, DataLoader\nfrom collections import OrderedDict\n\nfrom lib.model import ImMatchNet\nfrom lib.pf_dataset import PFPascalDataset\nfrom lib.normalization import NormalizeImageDict\nfrom lib.torch_util import BatchTensorToVars, str_to_bool\nfrom lib.point_tnf import corr_to_matches\nfrom lib.eval_util import pck_metric\nfrom lib.dataloader import default_collate\nfrom lib.torch_util import collate_custom\nfrom lib import pf_pascal_dataset as pf\nfrom lib import tools\n\nimport argparse\nimport warnings\nfrom tqdm import tqdm\n\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\n\n\ndef main():\n print(\"NCNet evaluation script - PF Pascal dataset\")\n\n use_cuda = torch.cuda.is_available()\n\n # Argument parsing\n parser = argparse.ArgumentParser(description=\"Compute PF Pascal matches\")\n parser.add_argument(\"--checkpoint\", type=str, default=\"models/ancnet_86_11.pth.tar\")\n parser.add_argument(\n \"--vis\",\n type=int,\n default=0,\n help=\"visilisation options: 0 calculate pck; 1 visualise keypoint matches and heat maps; 2 display matched key points\",\n )\n parser.add_argument(\"--a\", type=float, default=0.1, help=\"a is the pck@alpha value\")\n parser.add_argument(\n \"--num_examples\", type=int, default=5, help=\"the number of matching examples\"\n )\n\n args = parser.parse_args()\n\n vis = args.vis\n alpha = args.a\n num_examples = args.num_examples\n\n if args.checkpoint is not None and args.checkpoint is not \"\":\n print(\"Loading checkpoint...\")\n checkpoint = torch.load(\n args.checkpoint, map_location=lambda storage, loc: storage\n )\n checkpoint[\"state_dict\"] = OrderedDict(\n [\n (k.replace(\"vgg\", \"model\"), v)\n for k, v in checkpoint[\"state_dict\"].items()\n ]\n )\n\n args = checkpoint[\"args\"]\n else:\n print(\"checkpoint needed.\")\n exit()\n\n cnn_image_size = (args.image_size, args.image_size)\n\n # Create model\n print(\"Creating CNN model...\")\n model = ImMatchNet(\n use_cuda=use_cuda,\n feature_extraction_cnn=args.backbone,\n checkpoint=checkpoint,\n ncons_kernel_sizes=args.ncons_kernel_sizes,\n ncons_channels=args.ncons_channels,\n pss=args.pss,\n noniso=args.noniso,\n )\n model.eval()\n\n print(\"args.dataset_image_path\", args.dataset_image_path)\n # Dataset and dataloader\n collate_fn = default_collate\n csv_file = \"image_pairs/test_pairs.csv\"\n\n dataset = PFPascalDataset(\n csv_file=os.path.join(args.dataset_image_path, csv_file),\n dataset_path=args.dataset_image_path,\n transform=NormalizeImageDict([\"source_image\", \"target_image\"]),\n output_size=cnn_image_size,\n )\n dataset.pck_procedure = \"scnet\"\n\n # Only batch_size=1 is supported for evaluation\n batch_size = 1\n\n dataloader = DataLoader(\n dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=0,\n collate_fn=collate_fn,\n )\n\n batch_tnf = BatchTensorToVars(use_cuda=use_cuda)\n\n # initialize vector for storing results\n stats = {}\n stats[\"point_tnf\"] = {}\n stats[\"point_tnf\"][\"pck\"] = np.zeros((len(dataset), 1))\n\n # Compute pck accuracy\n total = len(dataloader)\n progress = tqdm(dataloader, total=total)\n for i, batch in enumerate(progress):\n batch = batch_tnf(batch)\n batch_start_idx = batch_size * i\n corr4d = model(batch)\n\n # get matches\n # note invert_matching_direction doesnt work at all\n xA, yA, xB, yB, sB = corr_to_matches(\n corr4d, do_softmax=True, invert_matching_direction=False\n )\n\n matches = (xA, yA, xB, yB)\n stats = pck_metric(\n batch, batch_start_idx, matches, stats, alpha=alpha, use_cuda=use_cuda\n )\n\n # Print results\n results = stats[\"point_tnf\"][\"pck\"]\n good_idx = np.flatnonzero((results != -1) * ~np.isnan(results))\n print(\"Total: \" + str(results.size))\n print(\"Valid: \" + str(good_idx.size))\n filtered_results = results[good_idx]\n print(\"PCK:\", \"{:.2%}\".format(np.mean(filtered_results)))\n\n test_csv = \"test_pairs.csv\"\n dataset_val = pf.ImagePairDataset(\n transform=NormalizeImageDict([\"source_image\", \"target_image\"]),\n dataset_image_path=args.dataset_image_path,\n dataset_csv_path=os.path.join(args.dataset_image_path, \"image_pairs\"),\n dataset_csv_file=test_csv,\n output_size=cnn_image_size,\n keypoints_on=True,\n original=True,\n test=True,\n )\n loader_test = DataLoader(dataset_val, batch_size=1, shuffle=True, num_workers=4)\n batch_tnf = BatchTensorToVars(use_cuda=use_cuda)\n\n print(\"visualise correlation\")\n tools.visualise_feature(\n model, loader_test, batch_tnf, image_size=cnn_image_size, MAX=num_examples\n )\n print(\"visualise pair\")\n tools.validate(\n model,\n loader_test,\n batch_tnf,\n None,\n image_scale=args.image_size,\n im_fe_ratio=16,\n image_size=cnn_image_size,\n MAX=num_examples,\n display=True,\n )\n\n\nif __name__ == \"__main__\":\n main()\n"
]
| [
[
"numpy.isnan",
"numpy.mean",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.load"
]
]
|
shangqigao/BayeSR | [
"62ca44e15e7cb04b18ac30807abc6b188c361d5c"
]
| [
"src/bayesr_kernels.py"
]
| [
"'''\nCreated on Apr 30, 2020\n\n@author: Shangqi Gao\n'''\nimport sys\nsys.path.append('../')\nimport os \nimport time\nimport argparse\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nfrom tqdm import tqdm\nfrom skimage import io, color\nfrom PIL import Image\nfrom scipy.io import loadmat, savemat\n\nfrom common.eval_noshift import multi_compute as measure_noshift\nfrom common.eval_shift import multi_compute as measure_shift\nfrom skimage.metrics import peak_signal_noise_ratio as PSNR\n\nfrom bayesr_model import BayeSR\nfrom args.args_base import *\n\nclass Tester(BayeSR):\n def __init__(self):\n BayeSR.__init__(self, args)\n\n def read_image(self, lr_path, gt_path, ke_path):\n \"\"\"\n Args;\n lr_path: The path of a lr image\n gt_path: The path of ground truth\n Returns:\n lr_img: an image, narray, float32\n gt_img: a label, narray, float32\n \"\"\"\n np.random.seed(seed=0)\n if args.task == 'DEN':\n gt_img = io.imread(gt_path) / 255.\n if args.in_channel == 1:\n gt_img = np.expand_dims(gt_img, axis=2)\n sigma = np.sqrt((args.sigma_read/255)**2 + (args.sigma_shot/255)*gt_img)\n noise = np.random.normal(size=gt_img.shape)*sigma\n lr_img = np.clip(gt_img + noise, 0., 1.)\n else:\n lr_img = io.imread(lr_path) / 255.\n if len(lr_img.shape) == 2:\n lr_img = np.stack([lr_img]*3, axis=2)\n sigma = np.sqrt((args.sigma_read/255)**2 + (args.sigma_shot/255)*lr_img)\n noise = np.random.normal(size=lr_img.shape)*sigma\n lr_img = np.clip(lr_img + noise, 0., 1.)\n if gt_path is not None:\n gt_img = io.imread(gt_path)\n else:\n gt_img = cv2.resize(lr_img, dsize=(0, 0), fx=args.upscale, fy=args.upscale, interpolation=cv2.INTER_LINEAR)\n if len(gt_img.shape) == 2:\n gt_img = np.stack([gt_img]*3, axis=2)\n if ke_path is not None:\n kernel = loadmat(ke_path)['Kernel']\n kernel = np.expand_dims(kernel, axis=2)\n else:\n kernel = None\n \n return lr_img, gt_img, kernel\n \n def flip(self, image):\n images = [image]\n images.append(image[::-1, :, :])\n images.append(image[:, ::-1, :])\n images.append(image[::-1, ::-1, :])\n images = np.stack(images)\n return images\n \n def mean_of_flipped(self, images):\n image = (images[0] + images[1, ::-1, :, :] + images[2, :, ::-1, :] +\n images[3, ::-1, ::-1, :])*0.25\n return image\n \n def rotation(self, images):\n return np.swapaxes(images, 1, 2)\n \n def run_test(self, k):\n test_lr_dir = {'DEN': '{}/{}'.format(args.input_data_dir, args.dataset),\n 'BiSR': '{}/{}/LR_bicubic/X{}'.format(args.input_data_dir, args.dataset, args.upscale),\n 'SySR': '{}/{}/LR_degraded/X{}_kernel{}'.format(args.input_data_dir, args.dataset, args.upscale, k),\n 'ReSR': '{}/{}/LR_mild/X{}'.format(args.input_data_dir, args.dataset, args.upscale),\n 'RWSR': '{}/{}'.format(args.input_data_dir, args.dataset)\n }[args.task]\n test_gt_dir = {'DEN': '{}/{}'.format(args.input_data_dir, args.dataset),\n 'BiSR': '{}/{}/HR'.format(args.input_data_dir, args.dataset),\n 'SySR': '{}/{}/HR'.format(args.input_data_dir, args.dataset),\n 'ReSR': '{}/{}/HR'.format(args.input_data_dir, args.dataset),\n 'RWSR': None\n }[args.task]\n test_ke_dir = '{}/kernel{}'.format(args.input_kernel_dir, k)\n test_gt_ke_dir = args.input_gt_kernel_dir\n img_mode = 'Gray' if args.in_channel == 1 else 'RGB'\n #test_sr_dir = '{}/{}_SSNet_{}_{}_x{}_read{}_shot{}'.format(args.save_dir, args.dataset, img_mode, args.task, args.upscale, args.sigma_read, args.sigma_shot)\n test_sr_dir = '{}/{}_SSNet_{}_{}_x{}_kernel{}'.format(args.save_dir, args.dataset, img_mode, args.task, args.upscale, k) \n #load true kernels\n if args.input_gt_kernel_dir is not None:\n gt_kernels = loadmat(os.path.join(test_gt_ke_dir, 'kernels_12.mat'))['kernels']\n gt_kernel = gt_kernels[0, k].astype(np.float64)\n gt_kernel = np.expand_dims(gt_kernel, axis=2)\n else:\n gt_kernel = None\n \n if tf.gfile.Exists(test_sr_dir):\n tf.gfile.DeleteRecursively(test_sr_dir)\n tf.gfile.MakeDirs(test_sr_dir)\n lr_names = sorted(os.listdir(test_lr_dir))\n if test_gt_dir is not None:\n gt_names = sorted(os.listdir(test_gt_dir))\n else:\n gt_names = [None]*len(lr_names)\n if args.input_kernel_dir is not None:\n ke_names = sorted(os.listdir(test_ke_dir))\n else:\n ke_names = [None]*len(lr_names)\n if args.sample_num == -1:\n samples = np.arange(0, len(lr_names))\n else:\n samples = np.arange(args.sample_num - 1, args.sample_num)\n # start to evaluate dataset\n start = time.time()\n with tf.Graph().as_default():\n image_pl = tf.placeholder(tf.float32, shape=(1, 64, 64, args.in_channel))\n output = self.inference(image_pl, is_training=False)\n bayesr_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'SmSpNet')\n bayesr_vars = [v for v in bayesr_vars if 'Discriminator' not in v.name]\n bayesr_saver = tf.train.Saver(bayesr_vars)\n sess = tf.Session()\n bayesr_saver.restore(sess, args.bayesr_checkpoint)\n for number in samples:\n lr_path = os.path.join(test_lr_dir, lr_names[number])\n gt_path = os.path.join(test_gt_dir, gt_names[number]) if gt_names[number] is not None else None\n ke_path = os.path.join(test_ke_dir, ke_names[number]) if ke_names[number] is not None else None \n gt_name = gt_names[number]\n image, label, kernel = self.read_image(lr_path, gt_path, ke_path)\n shape = image.shape\n image_pl = tf.placeholder(tf.float32, shape=(1, shape[0], shape[1], shape[2]))\n input_images = np.expand_dims(image, 0)\n if kernel is not None:\n kernel_pl = tf.placeholder(tf.float32, shape=(1, kernel.shape[0], kernel.shape[1], kernel.shape[2]))\n input_kernels = np.expand_dims(kernel, 0)\n elif gt_kernel is not None:\n kernel_pl = tf.placeholder(tf.float32, shape=(1, gt_kernel.shape[0], gt_kernel.shape[1], gt_kernel.shape[2]))\n input_kernels = np.expand_dims(gt_kernel, 0)\n else:\n kernel_pl, input_kernels = None, None\n output = self.inference(image_pl, kernel_pl, is_training=False)\n if input_kernels is not None:\n feed_dict = {image_pl : input_images, kernel_pl : input_kernels}\n else:\n feed_dict = {image_pl : input_images}\n for i in range(args.repeat_num):\n output_image = sess.run(output, feed_dict)[0]\n img_name = ''.join(lr_names[number].split('.')[:-1])\n img_name = img_name + '.png' if args.repeat_num == 1 else img_name + '_sample{:05d}'.format(i) + '.png'\n sr_img = np.around(output_image*255.0).astype(np.uint8)\n io.imsave(os.path.join(test_sr_dir, img_name), np.squeeze(sr_img)) \n print('saving {}'.format(img_name))\n duration = time.time() - start\n mean_dura = duration / (len(gt_names)*args.repeat_num)\n print(f'Avg_reconstruction_time_per_image: {mean_dura:0.2f}')\n if args.task == 'ReSR' and args.repeat_num == 1:\n measure_shift(test_gt_dir, test_sr_dir)\n elif args.task in ['BiSR', 'SySR'] and args.repeat_num == 1:\n measure_noshift(test_gt_dir, test_sr_dir, args.upscale, 'ycbcr')\n elif args.task == 'DEN' and args.repeat_num == 1:\n measure_noshift(test_gt_dir, test_sr_dir, args.upscale, 'rgb')\n else:\n print('Do not evaluate images!')\n \ndef main(_):\n test = Tester()\n for k in range(12):\n test.run_test(k)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser('BayeSR test and evaluation', allow_abbrev=False)\n add_dataset_args(parser)\n add_experiment_args(parser)\n add_model_args(parser)\n add_hyperpara_args(parser, parser.parse_args())\n args, unparsed = parser.parse_known_args()\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"{}\".format(args.GPU_ids)\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n"
]
| [
[
"numpy.random.normal",
"tensorflow.train.Saver",
"tensorflow.gfile.MakeDirs",
"numpy.swapaxes",
"tensorflow.gfile.DeleteRecursively",
"numpy.arange",
"numpy.sqrt",
"numpy.around",
"tensorflow.app.run",
"tensorflow.get_collection",
"numpy.expand_dims",
"tensorflow.gfile.Exists",
"tensorflow.Session",
"numpy.stack",
"tensorflow.placeholder",
"numpy.clip",
"numpy.squeeze",
"numpy.random.seed",
"tensorflow.Graph",
"scipy.io.loadmat"
]
]
|
az123zx123/large_scale_neurons_simulation | [
"a01ec24ec0adb1d17cf15172b22e5df5a5f192ac"
]
| [
"function/plot_raster.py"
]
| [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Apr 17 14:30:00 2020\r\nModified on Sat May 9th 2020\r\n\r\n@author: li xiang, Song Mo\r\n\"\"\"\r\n\r\nimport csv\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\ndef plot_raster(file_name,store_location):\r\n y = []\r\n with open(file_name,'r') as f:\r\n reader = csv.reader(f)\r\n for row in reader:\r\n temp = []\r\n for item in row:\r\n temp.append(float(item))\r\n y.append(temp)\r\n y = np.array(y)\r\n y_plot = (1*(y>0)).tolist()\r\n plt.imshow(y_plot, cmap='Greys')\r\n plt.savefig(store_location+'/blkwht.png', interpolation='nearest')\r\n plt.show()"
]
| [
[
"matplotlib.pyplot.savefig",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow"
]
]
|
meliketoy/cellnet.pytorch | [
"f1cf32d0158783d701a31f1a35c3dfbc1ab8e0e3"
]
| [
"3_classifier/BCCD_inference.py"
]
| [
"# ************************************************************\n# Author : Bumsoo Kim, 2018\n# Github : https://github.com/meliketoy/cellnet.pytorch\n#\n# Korea University, Data-Mining Lab\n# Deep Convolutional Network Fine tuning Implementation\n#\n# Description : BCCD_inference.py\n# The main code for BCCD inference test phase of trained model.\n# ***********************************************************\n\nfrom __future__ import print_function, division\n\nimport torch\nimport torch.backends.cudnn as cudnn\nimport numpy as np\nimport BCCD_config as cf\nimport os\nimport sys\nimport argparse\nimport csv\nimport operator\nimport progressbar\n\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\nfrom PIL import Image\n\nparser = argparse.ArgumentParser(description='Pytorch Cell Classifier Training')\nparser.add_argument('--net_type', default='xception', type=str, help='model')\nparser.add_argument('--depth', default=50, type=int, help='depth of model')\nparser.add_argument('--mode', default='aug', type=str, help='[original / aug]')\nargs = parser.parse_args()\n\nif args.net_type == 'xception' or args.net_type == 'inception':\n in_size = 299\nelse:\n in_size = 224\n\n# Phase 1 : Data Upload\nprint('\\n[Phase 1] : Data Preperation')\n\ndata_dir = cf.test_base\nprint(\"| Preparing %s dataset...\" %(cf.test_base.split(\"/\")[-1]))\n\nuse_gpu = torch.cuda.is_available()\n\n# Set the classes of H1 labels\nH = datasets.ImageFolder(os.path.join(cf.aug_dir+cf.H1_name, 'train'))\nH_classes = H.classes\n\n# Set the classes of Granulocytes labels\nG = datasets.ImageFolder(os.path.join(cf.aug_dir+cf.G_name, 'train'))\nG_classes = G.classes\n\n# Set the classes of Mononuclear cells labels\nM = datasets.ImageFolder(os.path.join(cf.aug_dir+cf.M_name, 'train'))\nM_classes = M.classes\n\nprint(\"| Inferencing for %d classes\" %len(H_classes))\n\n# Phase 2 : Model setup\nprint('\\n[Phase 2] : Model setup')\n\ndef getNetwork(args):\n if (args.net_type == 'alexnet'):\n file_name = 'alexnet'\n elif (args.net_type == 'vggnet'):\n file_name = 'vgg-%s' %(args.depth)\n elif (args.net_type == 'densenet'):\n file_name = 'densenet-%s' %(args.depth)\n elif (args.net_type == 'resnet'):\n file_name = 'resnet-%s' %(args.depth)\n elif (args.net_type == 'inception'):\n file_name = 'inception'\n elif (args.net_type == 'xception'):\n file_name = 'xception'\n else:\n print('[Error]: Network should be either [alexnet / vggnet / resnet]')\n sys.exit(1)\n\n return file_name\n\ndef softmax(x):\n return np.exp(x) / np.sum(np.exp(x), axis=0)\n\nif args.mode == 'original':\n gm_name, g_name, m_name = 'GM_BCCD', 'G_BCCD', 'M_BCCD'\nelif args.mode == 'aug':\n gm_name, g_name, m_name = 'AUG_GM', 'AUG_G', 'AUG_M'\nelse:\n print(\"Wrong mode type\")\n sys.exit(1)\n\nprint(\"| Loading checkpoint model for inference phase...\")\nassert os.path.isdir('checkpoint'), '[Error]: No checkpoint directory found!'\nassert os.path.isdir('checkpoint/'+cf.H1_name), '[Error]: No model has been trained on Hierarchy #1 !'\nfile_name = getNetwork(args)\ncheckpoint = torch.load('./checkpoint/'+gm_name+'/'+file_name+'.t7')\nmodel = checkpoint['model']\n\ncheckpoint_G = torch.load('./checkpoint/'+g_name+'/'+file_name+'.t7')\nmodel_G = checkpoint_G['model']\n\ncheckpoint_M = torch.load('./checkpoint/'+m_name+'/'+file_name+'.t7')\nmodel_M = checkpoint_M['model']\n\n# Hiearchical inference\nif use_gpu:\n model.cuda()\n model_G.cuda()\n model_M.cuda()\n cudnn.benchmark = True\n\nmodel.eval()\nmodel_G.eval()\nmodel_M.eval()\n\nsample_input = Variable(torch.randn(1,3,in_size,in_size))\nif use_gpu:\n sample_input = sample_input.cuda()\n\nprint(\"\\n[Phase 3] : Score Inference\")\n\ndef is_image(f):\n return f.endswith(\".png\") or f.endswith(\".jpg\") or f.endswith(\".jpeg\")\n\n# Need to add case when original images meanstd appears\n# H1 Transform\nH1_transform = transforms.Compose([\n transforms.CenterCrop(240),\n transforms.Resize(in_size),\n transforms.ToTensor(),\n transforms.Normalize(cf.mean_H, cf.std_H)\n])\n\n# G Transform\nG_transform = transforms.Compose([\n transforms.CenterCrop(240),\n transforms.Resize(in_size),\n transforms.ToTensor(),\n transforms.Normalize(cf.mean_G, cf.std_G)\n])\n\n# M Transform\nM_transform = transforms.Compose([\n transforms.CenterCrop(240),\n transforms.Resize(in_size),\n transforms.ToTensor(),\n transforms.Normalize(cf.mean_M, cf.std_M)\n])\n\nif not os.path.isdir('result'):\n os.mkdir('result')\n\noutput_file = \"./result/\"+cf.test_base.split(\"/\")[-1]+\"_inference.csv\"\n\nerrors = 0\nwith open(output_file, 'wb') as csvfile:\n fields = ['file_name', 'prediction']\n writer = csv.DictWriter(csvfile, fieldnames=fields)\n cor = 0\n tot = 0\n\n for subdir, dirs, files in os.walk(data_dir):\n if len(files) == 0:\n continue\n print(subdir)\n class_cor = 0\n class_tot = 0\n widgets = ['Inference: ', progressbar.Percentage(), ' ', progressbar.Bar(marker='#', left='[', right=']'), ' ', progressbar.ETA(), ' ', progressbar.FileTransferSpeed()]\n pbar = progressbar.ProgressBar(widgets=widgets, maxval = len(files))\n pbar.start()\n progress = 0\n\n for f in files:\n file_path = subdir + os.sep + f\n if (is_image(f)):\n tot += 1\n class_tot += 1\n org_image = Image.open(file_path)#.convert('RGB')\n if H1_transform is not None:\n image = H1_transform(org_image)\n else:\n image = org_image\n inputs = image\n with torch.no_grad():\n inputs = Variable(inputs)\n if use_gpu:\n inputs = inputs.cuda()\n inputs = inputs.view(1, inputs.size(0), inputs.size(1), inputs.size(2)) # add batch dim in the front\n\n outputs = model(inputs)\n softmax_res = softmax(outputs.data.cpu().numpy()[0])\n index, score = max(enumerate(softmax_res), key=operator.itemgetter(1))\n\n inf_class = H_classes[index]\n\n # Ground Truth\n inp = f.split(\"_\")[0]\n\n if inf_class == 'Granulocytes':\n if G_transform is not None:\n image = G_transform(org_image)\n else:\n image = org_image\n inputs = image\n with torch.no_grad():\n inputs = Variable(inputs)\n\n if use_gpu:\n inputs = inputs.cuda()\n inputs = inputs.view(1, inputs.size(0), inputs.size(1), inputs.size(2))\n\n outputs = model_G(inputs)\n softmax_res = softmax(outputs.data.cpu().numpy()[0])\n index, score = max(enumerate(softmax_res), key=operator.itemgetter(1))\n\n inf_class = G_classes[index]\n elif inf_class == 'Mononuclear':\n if M_transform is not None:\n image = M_transform(org_image)\n else:\n image = org_image\n inputs = image\n with torch.no_grad():\n inputs = Variable(inputs)\n\n if use_gpu:\n inputs = inputs.cuda()\n inputs = inputs.view(1, inputs.size(0), inputs.size(1), inputs.size(2))\n\n outputs = model_M(inputs)\n softmax_res = softmax(outputs.data.cpu().numpy()[0])\n index, score = max(enumerate(softmax_res), key=operator.itemgetter(1))\n\n inf_class = M_classes[index]\n\n #print(inp, inf_class)\n pbar.update(progress)\n progress += 1\n\n if (inf_class == inp):\n cor += 1\n class_cor += 1\n #writer.writerow({'file_name': file_path, 'prediction': inf_class}); tot += 1\n pbar.finish()\n print(\"Classwise accuracy = %f%%\" %(class_cor/class_tot * 100))\n\nprint(\"Overall accuracy = %f%%\" %(cor/tot * 100))\nprint(cor/tot)\n"
]
| [
[
"torch.autograd.Variable",
"torch.no_grad",
"numpy.exp",
"torch.cuda.is_available",
"torch.load",
"torch.randn"
]
]
|
zlijingtao/DAC20_reconstruction | [
"c928cda1c8e492c05110d6c219c1ed529924e127"
]
| [
"models/vgg_cifar10_binary.py"
]
| [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import init\nimport math\nimport torchvision.transforms as transforms\nfrom torch.autograd import Function\nfrom .binarized_modules import get_centroid, get_quantized,DtoA_3bit,AtoD_3bit\n\nclass Binarize(torch.autograd.Function):\n def __init__(self, grain_size, num_bits, M2D, save_path):\n super(Binarize,self).__init__()\n self.grain_size = grain_size #grain size in tuple\n self.M2D = M2D\n self.num_bits = num_bits\n self.save_path = save_path\n def forward(self, input):\n self.save_for_backward(input)\n self.centroid = get_centroid(input, self.grain_size, self.num_bits, self.M2D)\n global ti\n global num_res\n ti += 1\n input_d = (input - self.centroid)\n output = input.clone().zero_()\n self.W = 1-self.M2D\n output = self.W * input_d.sign()\n if ti <=num_res:\n torch.save(self.centroid, self.save_path + '/saved_tensors/centroid{}.pt'.format(ti))\n torch.save(output, self.save_path + '/saved_tensors/deviation{}.pt'.format(ti))\n output = output + self.centroid\n\n return output\n\n def backward(self, grad_output):\n # saved tensors - tuple of tensors with one element\n grad_input = grad_output.clone()\n input, = self.saved_tensors\n grad_input[input.ge(1)] = 0\n grad_input[input.le(-1)] = 0\n return grad_input\n\nclass BinarizeLinear(nn.Linear):\n\n def __init__(self, infeatures, classes, grain_size, num_bits, M2D, save_path):\n super(BinarizeLinear, self).__init__(in_features = infeatures, out_features = classes, bias=True)\n self.grain_size = grain_size\n self.num_bits = num_bits\n self.M2D = M2D\n self.save_path = save_path\n print(\"FClayer: grain_size: %s, num_bits: %d, M2D ratio: %.4f\"% (str(grain_size), num_bits, M2D))\n def forward(self, input):\n weight = Binarize(grain_size = self.grain_size , num_bits = self.num_bits, M2D = self.M2D, save_path = self.save_path)(self.weight)\n output = F.linear(input, weight, self.bias)\n\n return output\n\nclass BinarizeConv2d(nn.Conv2d):\n\n def __init__(self, inplanes, planes, kernel_size, stride, padding, bias, grain_size, num_bits, M2D, save_path):\n super(BinarizeConv2d, self).__init__(in_channels = inplanes, out_channels = planes, kernel_size = kernel_size, stride = stride, padding = padding, bias = bias)\n self.grain_size = grain_size\n self.num_bits = num_bits\n self.M2D = M2D\n self.save_path = save_path\n print(\"Convlayer: grain_size: %s, num_bits: %d, M2D ratio: %.4f\"% (str(grain_size), num_bits, M2D))\n\n def forward(self, input):\n weight = Binarize(grain_size = self.grain_size , num_bits = self.num_bits, M2D = self.M2D, save_path = self.save_path)(self.weight)\n output = F.conv2d(input, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)\n\n return output\n\nclass VGG_Cifar10_Binary(nn.Module):\n\n def __init__(self, num_classes=10, input_grain_size = (1,1), input_num_bits = 4, input_M2D = 0.0, \n res_grain_size = (1,1), res_num_bits = 4, res_M2D = 0.0, \n output_grain_size = (1,1), output_num_bits = 4, output_M2D = 0.0,\n save_path = './'):\n super(VGG_Cifar10_Binary, self).__init__()\n self.infl_ratio=3;\n self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)\n self.conv_1_3x3 = BinarizeConv2d(3, 128*self.infl_ratio, kernel_size=3, stride=1, padding=1, bias=True, grain_size = input_grain_size, num_bits = input_num_bits, M2D = input_M2D, save_path = save_path)\n self.bn_1 = nn.BatchNorm2d(128*self.infl_ratio)\n self.conv_2 = BinarizeConv2d(128*self.infl_ratio, 128*self.infl_ratio, kernel_size=3, stride=1, padding=1, bias=True, grain_size = res_grain_size, num_bits = res_num_bits, M2D = res_M2D, save_path = save_path)\n self.bn_2 = nn.BatchNorm2d(128*self.infl_ratio)\n self.conv_3 = BinarizeConv2d(128*self.infl_ratio, 256*self.infl_ratio, kernel_size=3, stride=1, padding=1, bias=True, grain_size = res_grain_size, num_bits = res_num_bits, M2D = res_M2D, save_path = save_path)\n self.bn_3 = nn.BatchNorm2d(256*self.infl_ratio)\n self.conv_4 = BinarizeConv2d(256*self.infl_ratio, 256*self.infl_ratio, kernel_size=3, stride=1, padding=1, bias=True, grain_size = res_grain_size, num_bits = res_num_bits, M2D = res_M2D, save_path = save_path)\n self.bn_4 = nn.BatchNorm2d(256*self.infl_ratio)\n self.conv_5 = BinarizeConv2d(256*self.infl_ratio, 512*self.infl_ratio, kernel_size=3, stride=1, padding=1, bias=True, grain_size = res_grain_size, num_bits = res_num_bits, M2D = res_M2D, save_path = save_path)\n self.bn_5 = nn.BatchNorm2d(512*self.infl_ratio)\n self.conv_6 = BinarizeConv2d(512*self.infl_ratio, 512, kernel_size=3, stride=1, padding=1, bias=True, grain_size = res_grain_size, num_bits = res_num_bits, M2D = res_M2D, save_path = save_path)\n self.bn_6 = nn.BatchNorm2d(512)\n \n self.linear_7 = BinarizeLinear(512 * 4 * 4, 1024, grain_size = output_grain_size, num_bits = output_num_bits, M2D = output_M2D, save_path = save_path)\n self.bn_7 = nn.BatchNorm1d(1024)\n \n self.linear_8 = BinarizeLinear(1024, 1024, grain_size = output_grain_size, num_bits = output_num_bits, M2D = output_M2D, save_path = save_path)\n self.bn_8 = nn.BatchNorm1d(1024)\n \n self.linear_9 = BinarizeLinear(1024, num_classes, grain_size = output_grain_size, num_bits = output_num_bits, M2D = output_M2D, save_path = save_path)\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n #m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n init.kaiming_normal(m.weight)\n m.bias.data.zero_()\n def forward(self, x):\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):\n if hasattr(m.weight, 'data'):\n m.weight.data.clamp_(min=0.01)\n x = self.conv_1_3x3(x)\n x = self.bn_1(x)\n x = AtoD_3bit(2.0, 0)(x)\n x = self.conv_2(x)\n x = self.maxpool(x)\n x = self.bn_2(x)\n x = AtoD_3bit(2.0, 0)(x)\n x = self.conv_3(x)\n x = self.bn_3(x)\n x = AtoD_3bit(2.0, 0)(x)\n x = self.conv_4(x)\n x = self.maxpool(x)\n x = self.bn_4(x)\n x = AtoD_3bit(2.0, 0)(x)\n x = self.conv_5(x)\n x = self.bn_5(x)\n x = AtoD_3bit(2.0, 0)(x)\n x = self.conv_6(x)\n x = self.maxpool(x)\n x = self.bn_6(x)\n x = AtoD_3bit(2.0, 0)(x)\n x = x.view(-1, 512 * 4 * 4)\n \n x = self.linear_7(x)\n x = self.bn_7(x)\n x = AtoD_3bit(2.0, 0)(x)\n x = self.linear_8(x)\n x = self.bn_8(x)\n x = AtoD_3bit(2.0, 0)(x)\n x = self.linear_9(x)\n return x\n\n\ndef vgg_cifar10_binary(num_classes, input_grain_size = (1,1), input_num_bits = 4, input_M2D = 0.0, res_grain_size = (1,1), res_num_bits = 4, res_M2D = 0.0, output_grain_size = (1,1), output_num_bits = 4, output_M2D = 0.0, save_path = './', **kwargs):\n global ti\n ti = 0\n global num_res\n num_res = 9\n return VGG_Cifar10_Binary(num_classes, input_grain_size, input_num_bits, input_M2D, res_grain_size, res_num_bits, res_M2D, output_grain_size, output_num_bits, output_M2D, save_path)\n"
]
| [
[
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.init.kaiming_normal",
"torch.nn.functional.linear",
"torch.nn.BatchNorm1d",
"torch.nn.functional.conv2d"
]
]
|
fyancy/SSMN | [
"9a807d7e926fc6ff347cfd19ffbb6462b6a49dea"
]
| [
"Comparison/venv/Include/DaNN_main.py"
]
| [
"import torch\nimport random\nimport numpy as np\nimport torch.nn as nn\nimport mmd\nimport visdom\nimport os\nimport matplotlib.pyplot as plt\nfrom models.DaNN_model import DaNN\nfrom proto_data_utils.Data_generator_normalize import data_generate\nfrom proto_data_utils.train_utils import weights_init, weights_init2, set_seed\nfrom proto_data_utils.my_utils import umap_fun2, t_sne\nimport time\n\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\nvis = visdom.Visdom(env='yancy_env')\ninitialization = weights_init\n# initialization = weights_init2\ngenerator = data_generate()\nDIM = 2048 # 2048\n# DIM = 1024 # 2048\n# BATCH_SIZE = 5\n# -------------hyper-params of DaNN----------------\n# 实验中发现,optimizer选择Adam比SGD效果更好\n# the hyper parameters follow the original paper as follows:\nLAMBDA = 0.25 # 0.25\nGAMMA = 10 ** 2 # 10 ** 3\nLEARNING_RATE = 0.02\nMOMEMTUM = 0.05\nL2_WEIGHT = 0.003\n\nCHECK_EPOCH = 10\nTr_EPOCHS = 200\nTe_EPOCHS = 1\nLoad = [3, 0]\n\n\n# -------------------------------------------------\n\n\ndef mmd_loss(x_src, x_tar):\n return mmd.mix_rbf_mmd2(x_src, x_tar, [GAMMA])\n\n\ndef compute_acc(out, y):\n \"\"\"\n :param out: the result of classifier, didn't go through softmax layer\n :param y: labels\n :return: accuracy\n \"\"\"\n prob = nn.functional.log_softmax(out, dim=-1)\n pre = torch.max(prob, dim=1)[1]\n # print('y_label:\\n', y.cpu().numpy())\n # print('predicted:\\n', pre.cpu().numpy())\n acc = torch.eq(pre, y).float().mean().cpu().item()\n return acc\n\n\ndef train(net, save_path, train_x, train_y, tar_x, tar_y, ls_threshold,\n n_way=3, shot=3, fine_tune=False):\n if fine_tune:\n net.load_state_dict(torch.load(save_path))\n print('load the model!')\n net.train()\n optimizer = torch.optim.Adam(net.parameters())\n print('Adam-optimizer!')\n # optimizer = torch.optim.SGD(net.parameters(), lr=LEARNING_RATE,\n # momentum=MOMEMTUM, weight_decay=L2_WEIGHT)\n # optimizer = torch.optim.SGD(net.parameters(), lr=0.10, momentum=0.90)\n scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)\n criterion = nn.CrossEntropyLoss()\n tar_x_tr = tar_x[:, :train_x.shape[1]]\n tar_y_tr = tar_y[:, :train_x.shape[1]]\n # tar_x_tr = tar_x\n # tar_y_tr = tar_y\n\n n_examples = train_x.shape[1]\n n_episodes = n_examples // shot\n n_epochs = Tr_EPOCHS # 100\n tar_tr_num = tar_x_tr.shape[1]\n tar_num = tar_x.shape[1]\n\n print('n_way=>', n_way, 'n_shot/bsize=>', shot)\n print('train_x Shape:', train_x.shape)\n print('target_x for training:', tar_x_tr.shape)\n print('target_x for validation:', tar_x.shape)\n print(\"---------------------Training----------------------\\n\")\n print('Start to train! {} epochs, {} episodes, {} steps.\\n'.format(n_epochs, n_episodes,\n n_episodes * n_epochs))\n avg_ls = torch.zeros([n_episodes]).to(device)\n avg_cls = torch.zeros([n_episodes]).to(device)\n avg_mls = torch.zeros([n_episodes]).to(device)\n counter = 0\n\n for ep in range(n_epochs):\n count = 0\n for epi in range(n_episodes):\n x, y = train_x[:, count:count + shot], train_y[:, count:count + shot]\n selected = torch.randperm(tar_tr_num)[:shot]\n x_tar, y_tar = tar_x_tr[:, selected], tar_y_tr[:, selected]\n selected = torch.randperm(tar_num)[:shot]\n x_tar_val, y_tar_val = tar_x[:, selected], tar_y[:, selected]\n\n x, y = x.to(device), y.reshape(-1).to(device)\n x_tar, y_tar = x_tar.to(device), y_tar.reshape(-1).to(device)\n x_tar_val, y_tar_val = x_tar_val.to(device), y_tar_val.reshape(-1).to(device)\n\n count += shot\n\n y_src, x_src_mmd = net.forward(x=x)\n _, x_tar_mmd = net.forward(x=x_tar)\n\n outputs = y_src\n loss_c = criterion(outputs, y)\n loss_mmd = mmd_loss(x_src_mmd, x_tar_mmd)\n loss = loss_c + LAMBDA * loss_mmd\n acc = compute_acc(outputs, y)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n net.eval()\n with torch.no_grad():\n y_tar_out, _ = net.forward(x=x_tar_val)\n net.train()\n\n loss_tar_c = criterion(y_tar_out, y_tar_val)\n acc_tar = compute_acc(y_tar_out, y_tar_val)\n\n avg_ls[epi] = loss\n avg_cls[epi] = loss_c\n avg_mls[epi] = loss_mmd\n\n if (epi + 1) % 2 == 0:\n vis.line(Y=[[loss.cpu().item(), loss_tar_c.cpu().item(),\n loss_c.cpu().item(), loss_mmd.cpu().item()]], X=[counter],\n update=None if counter == 0 else 'append', win='DaNN_Loss',\n opts=dict(legend=['src_loss', 'tar_loss_cls', 'src_loss_cls', 'src_loss_mmd'],\n title='DaNN_Loss'))\n vis.line(Y=[[acc, acc_tar]], X=[counter],\n update=None if counter == 0 else 'append', win='DaNN_Acc',\n opts=dict(legend=['src_acc', 'tar_acc'], title='DaNN_Acc'))\n counter += 1\n\n if (epi + 1) % 10 == 0:\n print('[epoch {}/{}, episode {}/{}] => loss: {:.8f}, acc: {:.8f}'.format(\n ep + 1, n_epochs, epi + 1, n_episodes, loss.cpu().item(), acc))\n\n scheduler.step(epoch=ep // 2)\n ls_ = torch.mean(avg_ls).cpu().item()\n ls_c = avg_cls.mean().cpu().item()\n ls_m = avg_mls.mean().cpu().item()\n print('[epoch {}/{}] => train_loss: {:.8f}, c_loss:{:.8f}, mmd_loss:{:.8f}\\n'.format(\n ep + 1, n_epochs, ls_, ls_c, ls_m))\n\n if ep + 1 >= CHECK_EPOCH and (ep + 1) % 5 == 0:\n order = input(\"Shall we stop training now? (Epoch {}) Y/N\\n\".format(ep + 1))\n order = order is 'Y' or order is 'y'\n else:\n order = False\n\n if ls_ <= ls_threshold and order:\n print('[ep %d] => loss = %.8f < %f' % (ep + 1, ls_, ls_threshold))\n break\n elif order:\n print(\"Stop manually.\\n\")\n break\n\n print('train finished!')\n torch.save(net.state_dict(), save_path)\n print('This model saved at', save_path)\n\n\ndef test(save_path, test_x, test_y, src_x=None, scenario='DaNN_TEST',\n eval_=False, n_way=3, shot=3):\n net = DaNN(n_class=n_way, DIM=DIM).to(device)\n criterion = nn.CrossEntropyLoss()\n net.load_state_dict(torch.load(save_path))\n print('load the model successfully!')\n net = net.eval() if eval_ else net.train()\n print('Model.eval() is:', not net.training)\n\n n_examples = test_x.shape[1]\n n_episodes = n_examples // shot\n n_epochs = Te_EPOCHS\n\n print('n_way=>', n_way, 'n_shot/bsize=>', shot)\n print('test_x Shape:', test_x.shape)\n print('test_y Shape:', test_y.shape)\n if src_x is not None:\n print('src_data set Shape:', src_x.shape)\n print(\"---------------------Testing----------------------\\n\")\n print('Start to train! {} epochs, {} episodes, {} steps.\\n'.format(n_epochs, n_episodes,\n n_episodes * n_epochs))\n avg_acc_ = 0.\n avg_loss_ = 0.\n counter = 0\n avg_time = []\n\n for ep in range(n_epochs):\n avg_acc = 0.\n avg_loss = 0.\n count = 0\n time_ep = []\n for epi in range(n_episodes):\n x, y = test_x[:, count:count + shot], test_y[:, count:count + shot]\n x, y = x.to(device), y.contiguous().view(-1).to(device)\n count += shot\n if src_x is not None:\n selected = torch.randperm(src_x.shape[1])[:shot]\n s_x = src_x[:, selected].to(device)\n\n x, y = x.to(device), y.contiguous().view(-1).to(device)\n t0 = time.time()\n with torch.no_grad():\n y_src, f_t = net.forward(x=x)\n if src_x is not None:\n _, f_s = net.forward(x=s_x)\n t1 = time.time()\n\n outputs = y_src\n loss_c = criterion(outputs, y)\n loss = loss_c # + LAMBDA * loss_mmd\n acc = compute_acc(outputs, y)\n avg_loss += loss.cpu().item()\n avg_acc += acc\n time_ep.append(t1 - t0)\n\n vis.line(Y=[[acc, loss.cpu().item()]], X=[counter],\n update=None if counter == 0 else 'append', win=scenario,\n opts=dict(legend=['accuracy', 'loss'], title=scenario))\n counter += 1\n if (epi + 1) % 5 == 0 and shot == 10:\n if src_x is not None:\n f = torch.cat((f_s, f_t), dim=0) # [n, dim]\n print('CW2SQ labels used for t-sne!')\n labels = ['NC-s', 'IF-s', 'OF-s', 'NC-t', 'IF-t', 'OF-t'] # CW2SQ\n # print('CW2SA labels used for t-sne!')\n # labels = ['NC-s', 'OF-s', 'ReF-s', 'NC-t', 'OF-t', 'ReF-t'] # CW2SA\n umap_fun2(f.cpu().detach().numpy(), shot=shot,\n labels=labels, n_dim=2, path=save_path)\n else:\n t_sne(input_data=f_t.cpu().detach().numpy(),\n input_label=y.cpu().detach().numpy(), classes=n_way, path=save_path)\n\n avg_acc /= n_episodes\n avg_loss /= n_episodes\n avg_acc_ += avg_acc\n avg_loss_ += avg_loss\n avg_time.append(np.sum(time_ep))\n print('[epoch {}/{}] => avg_time: {:.4f}\\tavg_loss: {:.6f}\\tavg_acc: {:.4f}'.\n format(ep + 1, n_epochs, avg_time[-1], avg_loss, avg_acc))\n avg_acc_ /= n_epochs\n avg_loss_ /= n_epochs\n # vis.text('Average Accuracy: {:.6f} Average Loss:{:.6f}'.format(avg_acc_, avg_loss_), win='Test result')\n vis.text(text='Eval:{} Average Accuracy: {:.6f}'.format(not net.training, avg_acc_),\n win='Eval:{} Test result'.format(not net.training))\n print('\\n------------------------Average Result----------------------------')\n print('Average Test Accuracy: {:.4f}'.format(avg_acc_))\n print('Average Test Loss: {:.6f}'.format(avg_loss_))\n print('Average Test Time: {:.4f}\\n'.format(np.mean(avg_time)))\n\n\ndef main(save_path, ls_threshold=0.001, n_way=3, shot=3,\n split=10, f_tune=False, ob_domain=False):\n print('%d GPU is available.' % torch.cuda.device_count())\n set_seed(0)\n net = DaNN(n_class=n_way, DIM=DIM).to(device)\n net.apply(weights_init)\n # net.apply(weights_init2) # 教训:CW2SQ时,对CNN不推荐使用手动初始化\n # split = 50 if ob_domain else split\n\n # CW: NC, IF, OF, RoF\n # train_x, train_y, test_x, test_y = generator.Cs_4way(way=n_way, examples=50, split=split,\n # normalize=True, data_len=DIM,\n # label=True, shuffle=True)\n # SQ 7\n # train_x, train_y, test_x, test_y = generator.SQ_37way(way=n_way, examples=100, split=split,\n # shuffle=False, data_len=DIM,\n # normalize=True, label=True)\n\n # train_x, train_y, _, _ = generator.CW_10way(way=way, order=Load[0], examples=200, split=split, normalize=True,\n # data_len=DIM, SNR=None, label=True)\n # _, _, test_x, test_y = generator.CW_10way(way=way, order=Load[1], examples=200, split=0, normalize=True,\n # data_len=DIM, SNR=None, label=True)\n\n # CW2SA\n # train_x, train_y, _, _ = generator.CW_cross(way=n_way, examples=100, split=split, normalize=True,\n # data_len=DIM, SNR=None, label=True, set='sa')\n # _, _, test_x, test_y = generator.SA_37way(examples=200, split=0, way=n_way, data_len=DIM,\n # normalize=True, label=True)\n\n # CW2SQ\n train_x, train_y, _, _ = generator.CW_cross(way=n_way, examples=50, split=split, normalize=True,\n data_len=DIM, SNR=None, label=True, set='sq')\n _, _, test_x, test_y = generator.SQ_37way(examples=100, split=0, way=n_way, data_len=DIM,\n normalize=True, label=True)\n # train_x, train_y, test_x, test_y = generator.EB_3_13way(examples=200, split=split, way=n_way, data_len=DIM,\n # order=3, normalize=True, label=True)\n\n n_class = train_x.shape[0]\n assert n_class == n_way\n tar_x = test_x\n tar_y = test_y\n print('train_x Shape:', train_x.shape)\n print('test_x Shape:', test_x.shape)\n print('tar_x Shape:', tar_x.shape)\n train_x, train_y = torch.from_numpy(train_x).float(), torch.from_numpy(train_y).long()\n tar_x, tar_y = torch.from_numpy(tar_x).float(), torch.from_numpy(tar_y).long()\n test_x, test_y = torch.from_numpy(test_x).float(), torch.from_numpy(test_y).long()\n\n order = input(\"Train or not? Y/N\\n\")\n if order == 'Y' or order == 'y':\n if os.path.exists(save_path) and not f_tune:\n print('The training file exists:%s' % save_path)\n else:\n train(net=net, save_path=save_path, train_x=train_x, train_y=train_y, tar_x=tar_x, tar_y=tar_y,\n ls_threshold=ls_threshold, n_way=n_way, shot=shot, fine_tune=f_tune)\n\n order = input(\"Test or not? Y/N\\n\")\n if order == 'Y' or order == 'y':\n if os.path.exists(save_path):\n train_x = torch.cat((train_x, train_x), dim=1) if ob_domain else None\n # shot = 50 if ob_domain else shot\n\n test(save_path=save_path, test_x=test_x, test_y=test_y, src_x=train_x,\n n_way=n_way, shot=shot, eval_=True)\n test(save_path=save_path, test_x=test_x, test_y=test_y, src_x=train_x,\n n_way=n_way, shot=shot, eval_=False)\n else:\n print('The path does NOT exist! Check it please:%s' % save_path)\n\n\nif __name__ == '__main__':\n # save_dir = r'F:\\py_save_models\\Domain-adaptation\\DaNN_CW'\n # model_name = 'DaNN_CW3to0_10way_5shot_10_1' # epoch 60-100\n # Load = [3, 0]\n\n # save_dir = r'F:\\py_save_models\\Domain-adaptation\\others_CW2SQ'\n # model_name = 'DaNN_CW2SQ_3way_5shot_100_1' # epoch 60-100\n\n # save_dir = r'F:\\py_save_models\\Other_models_EB'\n # model_name = 'DaNN_EB_13way_5shot_30_00' # epoch 60-100\n\n # 2020.7.29\n save_dir = r'C:\\Users\\20996\\Desktop\\SSMN_revision\\training_model\\DaNN'\n path = os.path.join(save_dir, r'DaNN_C2S_10s')\n print('The model path\\n', path)\n # path = os.path.join(save_dir, model_name)\n way = 3\n # n_shot = 5 # for time computation\n n_shot = 10 # for t-SNE\n split = 40 # split the data, split=>train, the rest=>test\n\n if not os.path.exists(save_dir):\n print('Root dir [{}] does not exist.'.format(save_dir))\n exit()\n else:\n print('File exist?', os.path.exists(path))\n\n # main(save_path=path, n_way=way, shot=n_shot, split=split,\n # ls_threshold=1e-4, f_tune=False, ob_domain=False)\n # main(save_path=path, n_way=way, shot=n_shot, split=split,\n # ls_threshold=1e-4, f_tune=True, ob_domain=False)\n main(save_path=path, n_way=way, shot=n_shot, split=split,\n ls_threshold=1e-4, f_tune=False, ob_domain=True)\n\n plt.show()\n"
]
| [
[
"torch.zeros",
"torch.cat",
"torch.eq",
"torch.max",
"numpy.sum",
"torch.no_grad",
"torch.optim.lr_scheduler.ExponentialLR",
"torch.randperm",
"numpy.mean",
"torch.nn.functional.log_softmax",
"torch.cuda.device_count",
"torch.from_numpy",
"torch.mean",
"torch.cuda.is_available",
"torch.load",
"matplotlib.pyplot.show",
"torch.nn.CrossEntropyLoss"
]
]
|
facexteam/pytorch-cifar | [
"48abfba662dc41b6f35b70f54f543af658f56be8"
]
| [
"tools/fix_fc_cosine_and_angles.py"
]
| [
"#!/usr/bin/env python\n# maintainer: zhaoyafei (https://github.com/walkoncross, [email protected])\n\nfrom __future__ import print_function\n\nimport os\nimport os.path as osp\nimport numpy as np\nimport torch\n\nimport shutil\n\n\ndef fix_train_fc_log(fname, rename=False, verbose=False):\n fn, ext = osp.splitext(fname)\n\n old_fn = fname\n\n if rename:\n fname = fn + '-unfixed' + ext\n if osp.exists(fname):\n i = 0\n while True:\n i+=1\n fname = fn + '-unfixed' + str(i) + ext\n if not osp.exists(fname):\n break\n\n shutil.move(old_fn, fname)\n\n print('\\n===> Rename {} into {}'.format(old_fn, fname))\n\n fixed_fn = old_fn\n else:\n fixed_fn = fn + '-fixed' + ext\n if osp.exists(fixed_fn):\n i = 0\n while True:\n i += 1\n fixed_fn = fn + '-fixed' + str(i) + ext\n if not osp.exists(fixed_fn):\n break\n\n print('\\n===> Save fixed results into ', fixed_fn)\n\n fp = open(fname, 'r')\n fp_out = open(fixed_fn, 'w')\n\n line_cnt = 0\n\n # fc_log_format = '{epoch}\\t{avg_fc_cos_max}\\t{avg_fc_ang_min}\\t{fc_cos_max}\\t{fc_ang_min}\\t{fc_cos_mat}\\t{fc_ang_mat}\\t{fc_wt}'\n fc_log_format = ''\n log_keys = []\n\n fixed_dict = {}\n\n for line in fp:\n line_cnt += 1\n if line_cnt == 1:\n fc_log_format = line\n splits = fc_log_format.strip().split('\\t')\n\n for val in splits:\n log_keys.append(val[1:-1])\n\n fp_out.write(line)\n else:\n line_dict = {}\n line_splits = line.strip().split('\\t')\n\n for i, key in enumerate(log_keys):\n line_dict[key] = line_splits[i]\n\n epoch = line_dict['epoch']\n\n fc_cos_mat = np.mat(\n line_dict['fc_cos_mat'], dtype=np.float32).reshape(10, -1)\n fc_cos_mat = torch.from_numpy(fc_cos_mat)\n fc_ang_mat = np.mat(\n line_dict['fc_ang_mat'], dtype=np.float32).reshape(10, -1)\n fc_ang_mat = torch.from_numpy(fc_ang_mat)\n\n fc_cos_mat2 = fc_cos_mat - torch.eye(fc_cos_mat.shape[0])*10\n fc_cos_max, pos = fc_cos_mat2.max(dim=0)\n fc_ang_min = fc_ang_mat[pos].diag()\n\n avg_fc_cos_max = fc_cos_max.mean().item()\n avg_fc_ang_min = fc_ang_min.mean().item()\n\n if verbose:\n print('\\n===> epoch: ', epoch)\n print('---> fixed fc_cos_max: ', fc_cos_max)\n print('---> fixed fc_ang_min: ', fc_ang_min)\n print('---> fixed avg_fc_cos_max: ', avg_fc_cos_max)\n print('---> fixed avg_fc_ang_min: ', avg_fc_ang_min)\n\n line_dict['fc_cos_max'] = fc_cos_max.tolist()\n line_dict['fc_ang_min'] = fc_ang_min.tolist()\n line_dict['avg_fc_cos_max'] = avg_fc_cos_max\n line_dict['avg_fc_ang_min'] = avg_fc_ang_min\n\n fixed_dict[epoch] = {\n 'avg_fc_cos_max': avg_fc_cos_max,\n 'avg_fc_ang_min': avg_fc_ang_min\n }\n\n write_line = fc_log_format.format(**line_dict)\n fp_out.write(write_line)\n\n fp.close()\n fp_out.close()\n\n return fixed_dict\n\n\ndef fix_train_loss_log(fname, fixed_dict, rename=False):\n fn, ext = osp.splitext(fname)\n\n old_fn = fname\n\n if rename:\n fname = fn + '-unfixed' + ext\n if osp.exists(fname):\n i = 0\n while True:\n i += 1\n fname = fn + '-unfixed' + str(i) + ext\n if not osp.exists(fname):\n break\n\n shutil.move(old_fn, fname)\n\n print('\\n===> Rename {} into {}'.format(old_fn, fname))\n\n fixed_fn = old_fn\n else:\n fixed_fn = fn + '-fixed' + ext\n if osp.exists(fixed_fn):\n i = 0\n while True:\n i += 1\n fixed_fn = fn + '-fixed' + str(i) + ext\n if not osp.exists(fixed_fn):\n break\n\n print('\\n===> Save fixed results into ', fixed_fn)\n\n fp = open(fname, 'r')\n fp_out = open(fixed_fn, 'w')\n\n line_cnt = 0\n\n # fc_log_format = '{epoch}\\t{avg_fc_cos_max}\\t{avg_fc_ang_min}\\t{fc_cos_max}\\t{fc_ang_min}\\t{fc_cos_mat}\\t{fc_ang_mat}\\t{fc_wt}'\n fc_log_format = ''\n log_keys = []\n\n for line in fp:\n line_cnt += 1\n if line_cnt == 1:\n fc_log_format = line\n splits = fc_log_format.strip().split('\\t')\n\n for val in splits:\n log_keys.append(val[1:-1])\n\n fp_out.write(line)\n else:\n line_dict = {}\n line_splits = line.strip().split('\\t')\n\n for i, key in enumerate(log_keys):\n line_dict[key] = line_splits[i]\n\n epoch = line_dict['epoch']\n line_dict['avg_fc_cos_max'] = fixed_dict[epoch]['avg_fc_cos_max']\n line_dict['avg_fc_ang_min'] = fixed_dict[epoch]['avg_fc_ang_min']\n\n write_line = fc_log_format.format(**line_dict)\n fp_out.write(write_line)\n\n fp.close()\n fp_out.close()\n\n\nif __name__ == '__main__':\n fn_fc_log = './train-last-fc.txt'\n fn_loss_log = './train-loss.txt'\n\n fixed_dict = fix_train_fc_log(fn_fc_log, True)\n\n if osp.exists(fn_loss_log):\n fix_train_loss_log(fn_loss_log, fixed_dict, True)\n"
]
| [
[
"numpy.mat",
"torch.eye",
"torch.from_numpy"
]
]
|
umanlp/DS-TOD | [
"d0821a05a2a9fd1639a5453b5d4d896d607c53df"
]
| [
"downstream/models/BERT_DST_Picklist.py"
]
| [
"import os.path\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torch.nn import CrossEntropyLoss\nfrom torch.nn import CosineEmbeddingLoss\nimport numpy as np\n\nfrom transformers import *\n\ndef _gelu(x):\n \"\"\" Original Implementation of the gelu activation function in Google Bert repo when initialy created.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n Also see https://arxiv.org/abs/1606.08415\n \"\"\"\n return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))\n\n\nclass BeliefTracker(nn.Module):\n def __init__(self, args):\n super(BeliefTracker, self).__init__()\n \n self.args = args\n self.n_gpu = args[\"n_gpu\"]\n self.hidden_dim = args[\"hdd_size\"]\n self.rnn_num_layers = args[\"num_rnn_layers\"]\n self.zero_init_rnn = args[\"zero_init_rnn\"]\n self.num_direct = 2 if self.args[\"bidirect\"] else 1\n self.num_labels = [len(v) for k, v in args[\"unified_meta\"][\"slots\"].items()]\n self.num_slots = len(self.num_labels)\n self.tokenizer = args[\"tokenizer\"]\n \n self.slots = [k for k, v in self.args[\"unified_meta\"][\"slots\"].items()]\n self.slot_value2id_dict = self.args[\"unified_meta\"][\"slots\"]\n self.slot_id2value_dict = {}\n for k, v in self.slot_value2id_dict.items():\n self.slot_id2value_dict[k] = {vv: kk for kk, vv in v.items()}\n\n #print(\"self.num_slots\", self.num_slots)\n\n ### Utterance Encoder\n self.utterance_encoder = args[\"model_class\"].from_pretrained(self.args[\"model_name_or_path\"], cache_dir = args[\"cache_dir\"])\n \n self.bert_output_dim = args[\"config\"].hidden_size\n #self.hidden_dropout_prob = self.utterance_encoder.config.hidden_dropout_prob\n \n if self.args[\"fix_encoder\"]:\n print(\"[Info] Utterance Encoder does not requires grad...\")\n for p in self.utterance_encoder.parameters():\n p.requires_grad = False\n\n ### slot, slot-value Encoder (not trainable)\n self.sv_encoder = args[\"model_class\"].from_pretrained(self.args[\"model_name_or_path\"], cache_dir = args[\"cache_dir\"])\n \n print(\"[Info] SV Encoder does not requires grad...\")\n for p in self.sv_encoder.parameters():\n p.requires_grad = False\n\n #self.slot_lookup = nn.Embedding(self.num_slots, self.bert_output_dim)\n self.value_lookup = nn.ModuleList([nn.Embedding(num_label, self.bert_output_dim) for num_label in self.num_labels])\n\n ### RNN Belief Tracker\n #self.nbt = None\n #self.linear = nn.Linear(self.hidden_dim, self.bert_output_dim)\n #self.layer_norm = nn.LayerNorm(self.bert_output_dim)\n \n ### Classifier\n self.nll = CrossEntropyLoss(ignore_index=-1)\n\n ### Etc.\n #self.dropout = nn.Dropout(self.hidden_dropout_prob)\n \n ### My Add\n self.project_W_1 = nn.ModuleList([nn.Linear(self.bert_output_dim, self.bert_output_dim) \\\n for _ in range(self.num_slots)])\n self.project_W_2 = nn.ModuleList([nn.Linear(2*self.bert_output_dim, self.bert_output_dim) \\\n for _ in range(self.num_slots)])\n self.project_W_3 = nn.ModuleList([nn.Linear(self.bert_output_dim, 1) \\\n for _ in range(self.num_slots)])\n \n if self.args[\"gate_supervision_for_dst\"]:\n self.gate_classifier = nn.Linear(self.bert_output_dim, 2)\n \n self.start_token = self.tokenizer.cls_token if \"bert\" in self.args[\"model_type\"] else self.tokenizer.bos_token\n self.sep_token = self.tokenizer.sep_token if \"bert\" in self.args[\"model_type\"] else self.tokenizer.eos_token\n \n ## Prepare Optimizer\n def get_optimizer_grouped_parameters(model):\n param_optimizer = [(n, p) for n, p in model.named_parameters() if p.requires_grad]\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01,\n 'lr': args[\"learning_rate\"]},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0,\n 'lr': args[\"learning_rate\"]},\n ]\n return optimizer_grouped_parameters\n\n if self.n_gpu == 1:\n optimizer_grouped_parameters = get_optimizer_grouped_parameters(self)\n else:\n optimizer_grouped_parameters = get_optimizer_grouped_parameters(self.module)\n\n \n self.optimizer = AdamW(optimizer_grouped_parameters,\n lr=args[\"learning_rate\"],)\n #warmup=args[\"warmup_proportion\"])\n #t_total=t_total)\n \n self.initialize_slot_value_lookup()\n \n def optimize(self):\n self.loss_grad.backward()\n clip_norm = torch.nn.utils.clip_grad_norm_(self.parameters(), self.args[\"grad_clip\"])\n self.optimizer.step()\n \n \n def initialize_slot_value_lookup(self, max_seq_length=32):\n\n self.sv_encoder.eval()\n \n label_ids = []\n for dslot, value_dict in self.args[\"unified_meta\"][\"slots\"].items():\n label_id = []\n value_dict_rev = {v:k for k, v in value_dict.items()}\n for i in range(len(value_dict)):\n label = value_dict_rev[i]\n label = \" \".join([i for i in label.split(\" \") if i != \"\"])\n\n label_tokens = [self.start_token] + self.tokenizer.tokenize(label) + [self.sep_token]\n label_token_ids = self.tokenizer.convert_tokens_to_ids(label_tokens)\n label_len = len(label_token_ids)\n\n label_padding = [0] * (max_seq_length - len(label_token_ids))\n label_token_ids += label_padding\n assert len(label_token_ids) == max_seq_length\n label_id.append(label_token_ids)\n \n label_id = torch.tensor(label_id).long()\n label_ids.append(label_id)\n\n for s, label_id in enumerate(label_ids):\n inputs = {\"input_ids\":label_id, \"attention_mask\":(label_id > 0).long()}\n \n if self.args[\"sum_token_emb_for_value\"]:\n hid_label = self.utterance_encoder.embeddings(input_ids=label_id).sum(1)\n else:\n if \"bert\" in self.args[\"model_type\"]:\n hid_label = self.sv_encoder(**inputs)[0]\n hid_label = hid_label[:, 0, :]\n elif self.args[\"model_type\"] == \"gpt2\":\n hid_label = self.sv_encoder(**inputs)[0]\n hid_label = hid_label.mean(1)\n elif self.args[\"model_type\"] == \"dialogpt\":\n transformer_outputs = self.sv_encoder.transformer(**inputs)[0]\n hid_label = transformer_outputs.mean(1)\n \n hid_label = hid_label.detach()\n self.value_lookup[s] = nn.Embedding.from_pretrained(hid_label, freeze=True)\n self.value_lookup[s].padding_idx = -1\n\n print(\"Complete initialization of slot and value lookup\")\n\n def forward(self, data):#input_ids, input_len, labels, gate_label, n_gpu=1, target_slot=None):\n batch_size = data[\"context\"].size(0)\n labels = data[\"belief_ontology\"]\n\n # Utterance encoding\n inputs = {\"input_ids\": data[\"context\"], \"attention_mask\":(data[\"context\"] > 0).long()}\n\n if \"bert\" in self.args[\"model_type\"]:\n hidden = self.utterance_encoder(**inputs)[0]\n hidden_rep = hidden[:, 0, :]\n elif self.args[\"model_type\"] == \"gpt2\":\n hidden = self.utterance_encoder(**inputs)[0]\n hidden_rep = hidden.mean(1)\n elif self.args[\"model_type\"] == \"dialogpt\":\n #outputs = self.utterance_encoder(**inputs)[2] # 0 is vocab logits, 1 is a tuple of attn head\n transformer_outputs = self.utterance_encoder.transformer(\n data[\"context\"],\n attention_mask=(data[\"context\"] > 0).long()\n )\n hidden = transformer_outputs[0]\n hidden_rep = hidden.mean(1)\n\n # Label (slot-value) encoding\n loss = 0\n pred_slot = []\n \n for slot_id in range(self.num_slots): ## note: target_slots are successive\n # loss calculation\n hid_label = self.value_lookup[slot_id].weight # v * d\n num_slot_labels = hid_label.size(0)\n\n _hidden = _gelu(self.project_W_1[slot_id](hidden_rep))\n _hidden = torch.cat([hid_label.unsqueeze(0).repeat(batch_size, 1, 1), _hidden.unsqueeze(1).repeat(1, num_slot_labels, 1)], dim=2)\n _hidden = _gelu(self.project_W_2[slot_id](_hidden))\n _hidden = self.project_W_3[slot_id](_hidden)\n _dist = _hidden.squeeze(2) # b * 1 * num_slot_labels\n\n _, pred = torch.max(_dist, -1)\n pred_slot.append(pred.unsqueeze(1))\n #output.append(_dist)\n\n if labels is not None:\n _loss = self.nll(_dist, labels[:, slot_id])\n #loss_slot.append(_loss.item())\n loss += _loss\n\n predictions = torch.cat(pred_slot, 1).detach().cpu().numpy()\n labels = labels.detach().cpu().numpy()\n\n if self.training: \n self.loss_grad = loss\n self.optimize()\n \n if self.args[\"error_analysis\"]:\n for bsz_i, (pred, label) in enumerate(zip(np.array(predictions), np.array(labels))):\n assert len(pred) == len(label)\n joint = 0\n pred_arr, gold_arr = [], []\n for i, p in enumerate(pred):\n pred_str = self.slot_id2value_dict[self.slots[i]][p]\n gold_str = self.slot_id2value_dict[self.slots[i]][label[i]]\n pred_arr.append(self.slots[i]+\"-\"+pred_str)\n gold_arr.append(self.slots[i]+\"-\"+gold_str)\n if pred_str == gold_str or pred_str in gold_str.split(\"|\"):\n joint += 1\n #if joint == len(pred):\n print(data[\"context_plain\"][bsz_i])\n print(\"Gold:\", [s for s in gold_arr if s.split(\"-\")[2] != \"none\"])\n print(\"Pred:\", [s for s in pred_arr if s.split(\"-\")[2] != \"none\"])\n print()\n \n\n outputs = {\"loss\":loss.item(), \"pred\":predictions, \"label\":labels} \n \n return outputs\n\n def evaluation(self, preds, labels):\n preds = np.array(preds)\n labels = np.array(labels)\n\n slot_acc, joint_acc, slot_acc_total, joint_acc_total = 0, 0, 0, 0\n for pred, label in zip(preds, labels):\n joint = 0\n \n assert len(pred) == len(label)\n \n for i, p in enumerate(pred):\n pred_str = self.slot_id2value_dict[self.slots[i]][p]\n gold_str = self.slot_id2value_dict[self.slots[i]][label[i]]\n \n if pred_str == gold_str or pred_str in gold_str.split(\"|\"):\n slot_acc += 1\n joint += 1\n slot_acc_total += 1\n \n if joint == len(pred):\n joint_acc += 1\n \n joint_acc_total += 1\n \n joint_acc = joint_acc / joint_acc_total\n slot_acc = slot_acc / slot_acc_total\n results = {\"joint_acc\":joint_acc, \"slot_acc\":slot_acc}\n print(\"Results 1: \", results)\n\n return results\n\n"
]
| [
[
"torch.nn.Linear",
"torch.nn.Embedding",
"numpy.array",
"torch.nn.Embedding.from_pretrained",
"torch.cat",
"torch.max",
"torch.tensor",
"torch.nn.CrossEntropyLoss"
]
]
|
gkucsko/NeMo | [
"c1ae0a7744d9a0ac206f61b2883ce00c9b8339b9",
"c1ae0a7744d9a0ac206f61b2883ce00c9b8339b9"
]
| [
"nemo/collections/asr/models/clustering_diarizer.py",
"nemo/collections/asr/parts/preprocessing/segment.py"
]
| [
"# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport os\nimport pickle as pkl\nimport shutil\nimport tarfile\nimport tempfile\nfrom copy import deepcopy\nfrom typing import List, Optional\n\nimport torch\nfrom omegaconf import DictConfig, OmegaConf\nfrom pytorch_lightning.utilities import rank_zero_only\nfrom tqdm import tqdm\n\nfrom nemo.collections.asr.models.classification_models import EncDecClassificationModel\nfrom nemo.collections.asr.models.label_models import EncDecSpeakerLabelModel\nfrom nemo.collections.asr.parts.mixins.mixins import DiarizationMixin\nfrom nemo.collections.asr.parts.utils.speaker_utils import (\n audio_rttm_map,\n get_embs_and_timestamps,\n get_uniqname_from_filepath,\n parse_scale_configs,\n perform_clustering,\n score_labels,\n segments_manifest_to_subsegments_manifest,\n write_rttm2manifest,\n)\nfrom nemo.collections.asr.parts.utils.vad_utils import (\n generate_overlap_vad_seq,\n generate_vad_segment_table,\n get_vad_stream_status,\n prepare_manifest,\n)\nfrom nemo.core.classes import Model\nfrom nemo.utils import logging, model_utils\n\ntry:\n from torch.cuda.amp import autocast\nexcept ImportError:\n from contextlib import contextmanager\n\n @contextmanager\n def autocast(enabled=None):\n yield\n\n\n__all__ = ['ClusteringDiarizer']\n\n_MODEL_CONFIG_YAML = \"model_config.yaml\"\n_VAD_MODEL = \"vad_model.nemo\"\n_SPEAKER_MODEL = \"speaker_model.nemo\"\n\n\ndef get_available_model_names(class_name):\n \"lists available pretrained model names from NGC\"\n available_models = class_name.list_available_models()\n return list(map(lambda x: x.pretrained_model_name, available_models))\n\n\nclass ClusteringDiarizer(Model, DiarizationMixin):\n \"\"\"\n Inference model Class for offline speaker diarization. \n This class handles required functionality for diarization : Speech Activity Detection, Segmentation, \n Extract Embeddings, Clustering, Resegmentation and Scoring. \n All the parameters are passed through config file \n \"\"\"\n\n def __init__(self, cfg: DictConfig):\n cfg = model_utils.convert_model_config_to_dict_config(cfg)\n # Convert config to support Hydra 1.0+ instantiation\n cfg = model_utils.maybe_update_config_version(cfg)\n self._cfg = cfg\n\n # Diarizer set up\n self._diarizer_params = self._cfg.diarizer\n\n # init vad model\n self.has_vad_model = False\n if not self._diarizer_params.oracle_vad:\n if self._cfg.diarizer.vad.model_path is not None:\n self._vad_params = self._cfg.diarizer.vad.parameters\n self._init_vad_model()\n\n # init speaker model\n self.multiscale_embeddings_and_timestamps = {}\n self._init_speaker_model()\n self._speaker_params = self._cfg.diarizer.speaker_embeddings.parameters\n self._speaker_dir = os.path.join(self._diarizer_params.out_dir, 'speaker_outputs')\n shutil.rmtree(self._speaker_dir, ignore_errors=True)\n os.makedirs(self._speaker_dir)\n\n # Clustering params\n self._cluster_params = self._diarizer_params.clustering.parameters\n\n self._device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n @classmethod\n def list_available_models(cls):\n pass\n\n def _init_vad_model(self):\n \"\"\"\n Initialize VAD model with model name or path passed through config\n \"\"\"\n model_path = self._cfg.diarizer.vad.model_path\n if model_path.endswith('.nemo'):\n self._vad_model = EncDecClassificationModel.restore_from(model_path)\n logging.info(\"VAD model loaded locally from {}\".format(model_path))\n else:\n if model_path not in get_available_model_names(EncDecClassificationModel):\n logging.warning(\n \"requested {} model name not available in pretrained models, instead\".format(model_path)\n )\n model_path = \"vad_telephony_marblenet\"\n logging.info(\"Loading pretrained {} model from NGC\".format(model_path))\n self._vad_model = EncDecClassificationModel.from_pretrained(model_name=model_path)\n\n self._vad_window_length_in_sec = self._vad_params.window_length_in_sec\n self._vad_shift_length_in_sec = self._vad_params.shift_length_in_sec\n self.has_vad_model = True\n\n def _init_speaker_model(self):\n \"\"\"\n Initialize speaker embedding model with model name or path passed through config\n \"\"\"\n model_path = self._cfg.diarizer.speaker_embeddings.model_path\n if model_path is not None and model_path.endswith('.nemo'):\n self._speaker_model = EncDecSpeakerLabelModel.restore_from(model_path)\n logging.info(\"Speaker Model restored locally from {}\".format(model_path))\n elif model_path.endswith('.ckpt'):\n self._speaker_model = EncDecSpeakerLabelModel.load_from_checkpoint(model_path)\n logging.info(\"Speaker Model restored locally from {}\".format(model_path))\n else:\n if model_path not in get_available_model_names(EncDecSpeakerLabelModel):\n logging.warning(\n \"requested {} model name not available in pretrained models, instead\".format(model_path)\n )\n model_path = \"ecapa_tdnn\"\n logging.info(\"Loading pretrained {} model from NGC\".format(model_path))\n self._speaker_model = EncDecSpeakerLabelModel.from_pretrained(model_name=model_path)\n\n self.multiscale_args_dict = parse_scale_configs(\n self._diarizer_params.speaker_embeddings.parameters.window_length_in_sec,\n self._diarizer_params.speaker_embeddings.parameters.shift_length_in_sec,\n self._diarizer_params.speaker_embeddings.parameters.multiscale_weights,\n )\n\n def _setup_vad_test_data(self, manifest_vad_input):\n vad_dl_config = {\n 'manifest_filepath': manifest_vad_input,\n 'sample_rate': self._cfg.sample_rate,\n 'batch_size': self._cfg.get('batch_size'),\n 'vad_stream': True,\n 'labels': ['infer',],\n 'window_length_in_sec': self._vad_window_length_in_sec,\n 'shift_length_in_sec': self._vad_shift_length_in_sec,\n 'trim_silence': False,\n 'num_workers': self._cfg.num_workers,\n }\n self._vad_model.setup_test_data(test_data_config=vad_dl_config)\n\n def _setup_spkr_test_data(self, manifest_file):\n spk_dl_config = {\n 'manifest_filepath': manifest_file,\n 'sample_rate': self._cfg.sample_rate,\n 'batch_size': self._cfg.get('batch_size'),\n 'trim_silence': False,\n 'labels': None,\n 'num_workers': self._cfg.num_workers,\n }\n self._speaker_model.setup_test_data(spk_dl_config)\n\n def _run_vad(self, manifest_file):\n \"\"\"\n Run voice activity detection. \n Get log probability of voice activity detection and smoothes using the post processing parameters. \n Using generated frame level predictions generated manifest file for later speaker embedding extraction.\n input:\n manifest_file (str) : Manifest file containing path to audio file and label as infer\n\n \"\"\"\n\n shutil.rmtree(self._vad_dir, ignore_errors=True)\n os.makedirs(self._vad_dir)\n\n self._vad_model = self._vad_model.to(self._device)\n self._vad_model.eval()\n\n time_unit = int(self._vad_window_length_in_sec / self._vad_shift_length_in_sec)\n trunc = int(time_unit / 2)\n trunc_l = time_unit - trunc\n all_len = 0\n data = []\n for line in open(manifest_file, 'r', encoding='utf-8'):\n file = json.loads(line)['audio_filepath']\n data.append(get_uniqname_from_filepath(file))\n\n status = get_vad_stream_status(data)\n for i, test_batch in enumerate(tqdm(self._vad_model.test_dataloader())):\n test_batch = [x.to(self._device) for x in test_batch]\n with autocast():\n log_probs = self._vad_model(input_signal=test_batch[0], input_signal_length=test_batch[1])\n probs = torch.softmax(log_probs, dim=-1)\n pred = probs[:, 1]\n if status[i] == 'start':\n to_save = pred[:-trunc]\n elif status[i] == 'next':\n to_save = pred[trunc:-trunc_l]\n elif status[i] == 'end':\n to_save = pred[trunc_l:]\n else:\n to_save = pred\n all_len += len(to_save)\n outpath = os.path.join(self._vad_dir, data[i] + \".frame\")\n with open(outpath, \"a\", encoding='utf-8') as fout:\n for f in range(len(to_save)):\n fout.write('{0:0.4f}\\n'.format(to_save[f]))\n del test_batch\n if status[i] == 'end' or status[i] == 'single':\n all_len = 0\n\n if not self._vad_params.smoothing:\n # Shift the window by 10ms to generate the frame and use the prediction of the window to represent the label for the frame;\n self.vad_pred_dir = self._vad_dir\n else:\n # Generate predictions with overlapping input segments. Then a smoothing filter is applied to decide the label for a frame spanned by multiple segments.\n # smoothing_method would be either in majority vote (median) or average (mean)\n logging.info(\"Generating predictions with overlapping input segments\")\n smoothing_pred_dir = generate_overlap_vad_seq(\n frame_pred_dir=self._vad_dir,\n smoothing_method=self._vad_params.smoothing,\n overlap=self._vad_params.overlap,\n window_length_in_sec=self._vad_window_length_in_sec,\n shift_length_in_sec=self._vad_shift_length_in_sec,\n num_workers=self._cfg.num_workers,\n )\n self.vad_pred_dir = smoothing_pred_dir\n\n logging.info(\"Converting frame level prediction to speech/no-speech segment in start and end times format.\")\n\n table_out_dir = generate_vad_segment_table(\n vad_pred_dir=self.vad_pred_dir,\n postprocessing_params=self._vad_params,\n shift_length_in_sec=self._vad_shift_length_in_sec,\n num_workers=self._cfg.num_workers,\n )\n AUDIO_VAD_RTTM_MAP = deepcopy(self.AUDIO_RTTM_MAP.copy())\n for key in AUDIO_VAD_RTTM_MAP:\n AUDIO_VAD_RTTM_MAP[key]['rttm_filepath'] = os.path.join(table_out_dir, key + \".txt\")\n\n write_rttm2manifest(AUDIO_VAD_RTTM_MAP, self._vad_out_file)\n self._speaker_manifest_path = self._vad_out_file\n\n def _run_segmentation(self, window: float, shift: float, scale_tag: str = ''):\n\n self.subsegments_manifest_path = os.path.join(self._speaker_dir, f'subsegments{scale_tag}.json')\n logging.info(\n f\"Subsegmentation for embedding extraction:{scale_tag.replace('_',' ')}, {self.subsegments_manifest_path}\"\n )\n self.subsegments_manifest_path = segments_manifest_to_subsegments_manifest(\n segments_manifest_file=self._speaker_manifest_path,\n subsegments_manifest_file=self.subsegments_manifest_path,\n window=window,\n shift=shift,\n )\n return None\n\n def _perform_speech_activity_detection(self):\n \"\"\"\n Checks for type of speech activity detection from config. Choices are NeMo VAD,\n external vad manifest and oracle VAD (generates speech activity labels from provided RTTM files)\n \"\"\"\n if self.has_vad_model:\n self._auto_split = True\n self._split_duration = 50\n manifest_vad_input = self._diarizer_params.manifest_filepath\n\n if self._auto_split:\n logging.info(\"Split long audio file to avoid CUDA memory issue\")\n logging.debug(\"Try smaller split_duration if you still have CUDA memory issue\")\n config = {\n 'input': manifest_vad_input,\n 'window_length_in_sec': self._vad_window_length_in_sec,\n 'split_duration': self._split_duration,\n 'num_workers': self._cfg.num_workers,\n }\n manifest_vad_input = prepare_manifest(config)\n else:\n logging.warning(\n \"If you encounter CUDA memory issue, try splitting manifest entry by split_duration to avoid it.\"\n )\n\n self._setup_vad_test_data(manifest_vad_input)\n self._run_vad(manifest_vad_input)\n\n elif self._diarizer_params.vad.external_vad_manifest is not None:\n self._speaker_manifest_path = self._diarizer_params.vad.external_vad_manifest\n elif self._diarizer_params.oracle_vad:\n self._speaker_manifest_path = os.path.join(self._speaker_dir, 'oracle_vad_manifest.json')\n self._speaker_manifest_path = write_rttm2manifest(self.AUDIO_RTTM_MAP, self._speaker_manifest_path)\n else:\n raise ValueError(\n \"Only one of diarizer.oracle_vad, vad.model_path or vad.external_vad_manifest must be passed\"\n )\n\n def _extract_embeddings(self, manifest_file: str):\n \"\"\"\n This method extracts speaker embeddings from segments passed through manifest_file\n Optionally you may save the intermediate speaker embeddings for debugging or any use. \n \"\"\"\n logging.info(\"Extracting embeddings for Diarization\")\n self._setup_spkr_test_data(manifest_file)\n self.embeddings = {}\n self._speaker_model = self._speaker_model.to(self._device)\n self._speaker_model.eval()\n self.time_stamps = {}\n\n all_embs = torch.empty([0])\n for test_batch in tqdm(self._speaker_model.test_dataloader()):\n test_batch = [x.to(self._device) for x in test_batch]\n audio_signal, audio_signal_len, labels, slices = test_batch\n with autocast():\n _, embs = self._speaker_model.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)\n emb_shape = embs.shape[-1]\n embs = embs.view(-1, emb_shape)\n all_embs = torch.cat((all_embs, embs.cpu().detach()), dim=0)\n del test_batch\n\n with open(manifest_file, 'r', encoding='utf-8') as manifest:\n for i, line in enumerate(manifest.readlines()):\n line = line.strip()\n dic = json.loads(line)\n uniq_name = get_uniqname_from_filepath(dic['audio_filepath'])\n if uniq_name in self.embeddings:\n self.embeddings[uniq_name] = torch.cat((self.embeddings[uniq_name], all_embs[i].view(1, -1)))\n else:\n self.embeddings[uniq_name] = all_embs[i].view(1, -1)\n if uniq_name not in self.time_stamps:\n self.time_stamps[uniq_name] = []\n start = dic['offset']\n end = start + dic['duration']\n stamp = '{:.3f} {:.3f} '.format(start, end)\n self.time_stamps[uniq_name].append(stamp)\n\n if self._speaker_params.save_embeddings:\n embedding_dir = os.path.join(self._speaker_dir, 'embeddings')\n if not os.path.exists(embedding_dir):\n os.makedirs(embedding_dir, exist_ok=True)\n\n prefix = get_uniqname_from_filepath(manifest_file)\n name = os.path.join(embedding_dir, prefix)\n self._embeddings_file = name + f'_embeddings.pkl'\n pkl.dump(self.embeddings, open(self._embeddings_file, 'wb'))\n logging.info(\"Saved embedding files to {}\".format(embedding_dir))\n\n def path2audio_files_to_manifest(self, paths2audio_files, manifest_filepath):\n with open(manifest_filepath, 'w', encoding='utf-8') as fp:\n for audio_file in paths2audio_files:\n audio_file = audio_file.strip()\n entry = {'audio_filepath': audio_file, 'offset': 0.0, 'duration': None, 'text': '-', 'label': 'infer'}\n fp.write(json.dumps(entry) + '\\n')\n\n def diarize(self, paths2audio_files: List[str] = None, batch_size: int = 0):\n \"\"\"\n Diarize files provided thorugh paths2audio_files or manifest file\n input:\n paths2audio_files (List[str]): list of paths to file containing audio file\n batch_size (int): batch_size considered for extraction of speaker embeddings and VAD computation\n \"\"\"\n\n self._out_dir = self._diarizer_params.out_dir\n if not os.path.exists(self._out_dir):\n os.mkdir(self._out_dir)\n\n self._vad_dir = os.path.join(self._out_dir, 'vad_outputs')\n self._vad_out_file = os.path.join(self._vad_dir, \"vad_out.json\")\n\n if batch_size:\n self._cfg.batch_size = batch_size\n\n if paths2audio_files:\n if type(paths2audio_files) is list:\n self._diarizer_params.manifest_filepath = os.path.join(self._out_dir, 'paths2audio_filepath.json')\n self.path2audio_files_to_manifest(paths2audio_files, self._diarizer_params.manifest_filepath)\n else:\n raise ValueError(\"paths2audio_files must be of type list of paths to file containing audio file\")\n\n self.AUDIO_RTTM_MAP = audio_rttm_map(self._diarizer_params.manifest_filepath)\n\n out_rttm_dir = os.path.join(self._out_dir, 'pred_rttms')\n os.makedirs(out_rttm_dir, exist_ok=True)\n\n # Speech Activity Detection\n self._perform_speech_activity_detection()\n\n # Segmentation\n for scale_idx, (window, shift) in self.multiscale_args_dict['scale_dict'].items():\n\n # Segmentation for the current scale (scale_idx)\n self._run_segmentation(window, shift, scale_tag=f'_scale{scale_idx}')\n\n # Embedding Extraction for the current scale (scale_idx)\n self._extract_embeddings(self.subsegments_manifest_path)\n\n self.multiscale_embeddings_and_timestamps[scale_idx] = [self.embeddings, self.time_stamps]\n\n embs_and_timestamps = get_embs_and_timestamps(\n self.multiscale_embeddings_and_timestamps, self.multiscale_args_dict\n )\n\n # Clustering\n all_reference, all_hypothesis = perform_clustering(\n embs_and_timestamps=embs_and_timestamps,\n AUDIO_RTTM_MAP=self.AUDIO_RTTM_MAP,\n out_rttm_dir=out_rttm_dir,\n clustering_params=self._cluster_params,\n )\n\n # TODO Resegmentation -> Coming Soon\n\n # Scoring\n score = score_labels(\n self.AUDIO_RTTM_MAP,\n all_reference,\n all_hypothesis,\n collar=self._diarizer_params.collar,\n ignore_overlap=self._diarizer_params.ignore_overlap,\n )\n\n logging.info(\"Outputs are saved in {} directory\".format(os.path.abspath(self._diarizer_params.out_dir)))\n return score\n\n @staticmethod\n def __make_nemo_file_from_folder(filename, source_dir):\n with tarfile.open(filename, \"w:gz\") as tar:\n tar.add(source_dir, arcname=\"./\")\n\n @rank_zero_only\n def save_to(self, save_path: str):\n \"\"\"\n Saves model instance (weights and configuration) into EFF archive or .\n You can use \"restore_from\" method to fully restore instance from .nemo file.\n\n .nemo file is an archive (tar.gz) with the following:\n model_config.yaml - model configuration in .yaml format. You can deserialize this into cfg argument for model's constructor\n model_wights.chpt - model checkpoint\n\n Args:\n save_path: Path to .nemo file where model instance should be saved\n \"\"\"\n\n # TODO: Why does this override the main save_to?\n\n with tempfile.TemporaryDirectory() as tmpdir:\n config_yaml = os.path.join(tmpdir, _MODEL_CONFIG_YAML)\n spkr_model = os.path.join(tmpdir, _SPEAKER_MODEL)\n\n self.to_config_file(path2yaml_file=config_yaml)\n if self.has_vad_model:\n vad_model = os.path.join(tmpdir, _VAD_MODEL)\n self._vad_model.save_to(vad_model)\n self._speaker_model.save_to(spkr_model)\n self.__make_nemo_file_from_folder(filename=save_path, source_dir=tmpdir)\n\n @staticmethod\n def __unpack_nemo_file(path2file: str, out_folder: str) -> str:\n if not os.path.exists(path2file):\n raise FileNotFoundError(f\"{path2file} does not exist\")\n tar = tarfile.open(path2file, \"r:gz\")\n tar.extractall(path=out_folder)\n tar.close()\n return out_folder\n\n @classmethod\n def restore_from(\n cls,\n restore_path: str,\n override_config_path: Optional[str] = None,\n map_location: Optional[torch.device] = None,\n strict: bool = False,\n ):\n # Get path where the command is executed - the artifacts will be \"retrieved\" there\n # (original .nemo behavior)\n cwd = os.getcwd()\n\n with tempfile.TemporaryDirectory() as tmpdir:\n try:\n cls.__unpack_nemo_file(path2file=restore_path, out_folder=tmpdir)\n os.chdir(tmpdir)\n if override_config_path is None:\n config_yaml = os.path.join(tmpdir, _MODEL_CONFIG_YAML)\n else:\n config_yaml = override_config_path\n conf = OmegaConf.load(config_yaml)\n if os.path.exists(os.path.join(tmpdir, _VAD_MODEL)):\n conf.diarizer.vad.model_path = os.path.join(tmpdir, _VAD_MODEL)\n else:\n logging.info(\n f'Model {cls.__name__} does not contain a VAD model. A VAD model or manifest file with'\n f'speech segments need for diarization with this model'\n )\n\n conf.diarizer.speaker_embeddings.model_path = os.path.join(tmpdir, _SPEAKER_MODEL)\n conf.restore_map_location = map_location\n OmegaConf.set_struct(conf, True)\n instance = cls(cfg=conf)\n\n logging.info(f'Model {cls.__name__} was successfully restored from {restore_path}.')\n finally:\n os.chdir(cwd)\n\n return instance\n",
"# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Copyright (c) 2018 Ryan Leary\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n# This file contains code artifacts adapted from https://github.com/ryanleary/patter\n\nimport os\nimport random\n\nimport librosa\nimport numpy as np\nimport soundfile as sf\n\nfrom nemo.utils import logging\n\n# TODO @blisc: Perhaps refactor instead of import guarding\nHAVE_PYDUB = True\ntry:\n from pydub import AudioSegment as Audio\n from pydub.exceptions import CouldntDecodeError\nexcept ModuleNotFoundError:\n HAVE_PYDUB = False\n\n\navailable_formats = sf.available_formats()\nsf_supported_formats = [\".\" + i.lower() for i in available_formats.keys()]\n\n\nclass AudioSegment(object):\n \"\"\"Monaural audio segment abstraction.\n :param samples: Audio samples [num_samples x num_channels].\n :type samples: ndarray.float32\n :param sample_rate: Audio sample rate.\n :type sample_rate: int\n :raises TypeError: If the sample data type is not float or int.\n \"\"\"\n\n def __init__(self, samples, sample_rate, target_sr=None, trim=False, trim_db=60, orig_sr=None):\n \"\"\"Create audio segment from samples.\n Samples are convert float32 internally, with int scaled to [-1, 1].\n \"\"\"\n samples = self._convert_samples_to_float32(samples)\n if target_sr is not None and target_sr != sample_rate:\n samples = librosa.core.resample(samples, orig_sr=sample_rate, target_sr=target_sr)\n sample_rate = target_sr\n if trim:\n samples, _ = librosa.effects.trim(samples, top_db=trim_db)\n self._samples = samples\n self._sample_rate = sample_rate\n if self._samples.ndim >= 2:\n self._samples = np.mean(self._samples, 1)\n\n self._orig_sr = orig_sr if orig_sr is not None else sample_rate\n\n def __eq__(self, other):\n \"\"\"Return whether two objects are equal.\"\"\"\n if type(other) is not type(self):\n return False\n if self._sample_rate != other._sample_rate:\n return False\n if self._samples.shape != other._samples.shape:\n return False\n if np.any(self.samples != other._samples):\n return False\n return True\n\n def __ne__(self, other):\n \"\"\"Return whether two objects are unequal.\"\"\"\n return not self.__eq__(other)\n\n def __str__(self):\n \"\"\"Return human-readable representation of segment.\"\"\"\n return \"%s: num_samples=%d, sample_rate=%d, duration=%.2fsec, rms=%.2fdB\" % (\n type(self),\n self.num_samples,\n self.sample_rate,\n self.duration,\n self.rms_db,\n )\n\n @staticmethod\n def _convert_samples_to_float32(samples):\n \"\"\"Convert sample type to float32.\n Audio sample type is usually integer or float-point.\n Integers will be scaled to [-1, 1] in float32.\n \"\"\"\n float32_samples = samples.astype('float32')\n if samples.dtype in np.sctypes['int']:\n bits = np.iinfo(samples.dtype).bits\n float32_samples *= 1.0 / 2 ** (bits - 1)\n elif samples.dtype in np.sctypes['float']:\n pass\n else:\n raise TypeError(\"Unsupported sample type: %s.\" % samples.dtype)\n return float32_samples\n\n @classmethod\n def from_file(\n cls, audio_file, target_sr=None, int_values=False, offset=0, duration=0, trim=False, orig_sr=None,\n ):\n \"\"\"\n Load a file supported by librosa and return as an AudioSegment.\n :param audio_file: path of file to load\n :param target_sr: the desired sample rate\n :param int_values: if true, load samples as 32-bit integers\n :param offset: offset in seconds when loading audio\n :param duration: duration in seconds when loading audio\n :return: numpy array of samples\n \"\"\"\n samples = None\n if not isinstance(audio_file, str) or os.path.splitext(audio_file)[-1] in sf_supported_formats:\n try:\n with sf.SoundFile(audio_file, 'r') as f:\n dtype = 'int32' if int_values else 'float32'\n sample_rate = f.samplerate\n if offset > 0:\n f.seek(int(offset * sample_rate))\n if duration > 0:\n samples = f.read(int(duration * sample_rate), dtype=dtype)\n else:\n samples = f.read(dtype=dtype)\n samples = samples.transpose()\n except RuntimeError as e:\n logging.error(\n f\"Loading {audio_file} via SoundFile raised RuntimeError: `{e}`. \"\n f\"NeMo will fallback to loading via pydub.\"\n )\n\n if HAVE_PYDUB and samples is None:\n try:\n samples = Audio.from_file(audio_file)\n sample_rate = samples.frame_rate\n if offset > 0:\n # pydub does things in milliseconds\n seconds = offset * 1000\n samples = samples[int(seconds) :]\n if duration > 0:\n seconds = duration * 1000\n samples = samples[: int(seconds)]\n samples = np.array(samples.get_array_of_samples())\n except CouldntDecodeError as err:\n logging.error(f\"Loading {audio_file} via pydub raised CouldntDecodeError: `{err}`.\")\n\n if samples is None:\n libs = \"soundfile, and pydub\" if HAVE_PYDUB else \"soundfile\"\n raise Exception(f\"Your audio file {audio_file} could not be decoded. We tried using {libs}.\")\n\n return cls(samples, sample_rate, target_sr=target_sr, trim=trim, orig_sr=orig_sr)\n\n @classmethod\n def segment_from_file(cls, audio_file, target_sr=None, n_segments=0, trim=False, orig_sr=None):\n \"\"\"Grabs n_segments number of samples from audio_file randomly from the\n file as opposed to at a specified offset.\n\n Note that audio_file can be either the file path, or a file-like object.\n \"\"\"\n try:\n with sf.SoundFile(audio_file, 'r') as f:\n sample_rate = f.samplerate\n if n_segments > 0 and len(f) > n_segments:\n max_audio_start = len(f) - n_segments\n audio_start = random.randint(0, max_audio_start)\n f.seek(audio_start)\n samples = f.read(n_segments, dtype='float32')\n else:\n samples = f.read(dtype='float32')\n samples = samples.transpose()\n except RuntimeError as e:\n logging.error(f\"Loading {audio_file} via SoundFile raised RuntimeError: `{e}`.\")\n\n samples = samples.transpose()\n return cls(samples, sample_rate, target_sr=target_sr, trim=trim, orig_sr=orig_sr)\n\n @property\n def samples(self):\n return self._samples.copy()\n\n @property\n def sample_rate(self):\n return self._sample_rate\n\n @property\n def num_samples(self):\n return self._samples.shape[0]\n\n @property\n def duration(self):\n return self._samples.shape[0] / float(self._sample_rate)\n\n @property\n def rms_db(self):\n mean_square = np.mean(self._samples ** 2)\n return 10 * np.log10(mean_square)\n\n @property\n def orig_sr(self):\n return self._orig_sr\n\n def gain_db(self, gain):\n self._samples *= 10.0 ** (gain / 20.0)\n\n def pad(self, pad_size, symmetric=False):\n \"\"\"Add zero padding to the sample. The pad size is given in number\n of samples.\n If symmetric=True, `pad_size` will be added to both sides. If false,\n `pad_size`\n zeros will be added only to the end.\n \"\"\"\n self._samples = np.pad(self._samples, (pad_size if symmetric else 0, pad_size), mode='constant',)\n\n def subsegment(self, start_time=None, end_time=None):\n \"\"\"Cut the AudioSegment between given boundaries.\n Note that this is an in-place transformation.\n :param start_time: Beginning of subsegment in seconds.\n :type start_time: float\n :param end_time: End of subsegment in seconds.\n :type end_time: float\n :raise ValueError: If start_time or end_time is incorrectly set,\n e.g. out\n of bounds in time.\n \"\"\"\n start_time = 0.0 if start_time is None else start_time\n end_time = self.duration if end_time is None else end_time\n if start_time < 0.0:\n start_time = self.duration + start_time\n if end_time < 0.0:\n end_time = self.duration + end_time\n if start_time < 0.0:\n raise ValueError(\"The slice start position (%f s) is out of bounds.\" % start_time)\n if end_time < 0.0:\n raise ValueError(\"The slice end position (%f s) is out of bounds.\" % end_time)\n if start_time > end_time:\n raise ValueError(\n \"The slice start position (%f s) is later than the end position (%f s).\" % (start_time, end_time)\n )\n if end_time > self.duration:\n raise ValueError(\"The slice end position (%f s) is out of bounds (> %f s)\" % (end_time, self.duration))\n start_sample = int(round(start_time * self._sample_rate))\n end_sample = int(round(end_time * self._sample_rate))\n self._samples = self._samples[start_sample:end_sample]\n"
]
| [
[
"torch.empty",
"torch.cuda.is_available",
"torch.cuda.amp.autocast",
"torch.softmax"
],
[
"numpy.pad",
"numpy.mean",
"numpy.any",
"numpy.log10",
"numpy.iinfo"
]
]
|
ethanhezhao/NBVAE | [
"d546f56e0b3ed39ea3d1421ae929e5f3cbe3c3dd"
]
| [
"nb_vae.py"
]
| [
"import numpy as np\nimport tensorflow as tf\nfrom scipy import special\n\n\nclass NegativeBinomialVAE():\n\n def __init__(self, arch, lr=1e-3, random_seed=None):\n\n self.decoder_arch = arch\n\n self.encoder_arch = arch[::-1]\n\n self.lr = lr\n self.random_seed = random_seed\n\n self.input_ph = tf.placeholder(\n dtype=tf.float32, shape=[None, arch[-1]])\n self.keep_prob_ph = tf.placeholder_with_default(1.0, shape=None)\n self.is_training_ph = tf.placeholder_with_default(0., shape=None)\n self.anneal_ph = tf.placeholder_with_default(1., shape=None)\n\n def _log_likelihood(self, h_r, h_p):\n\n ll = tf.lgamma(tf.exp(h_r) + self.input_ph) - tf.lgamma(tf.exp(h_r))\n ll += h_p * self.input_ph - tf.log(tf.exp(h_p) + 1) * (self.input_ph + tf.exp(h_r))\n\n return ll\n\n def _encoder_pass(self):\n\n mu_z, std_z, kl = None, None, None\n h = tf.nn.l2_normalize(self.input_ph, 1)\n h = tf.nn.dropout(h, self.keep_prob_ph)\n for i, (w, b) in enumerate(zip(self.encoder_weights, self.encoder_biases)):\n h = tf.matmul(h, w) + b\n\n if i != len(self.encoder_weights) - 1:\n h = tf.nn.tanh(h)\n else:\n mu_z = h[:, :self.encoder_arch[-1]]\n logvar_q = h[:, self.encoder_arch[-1]:]\n std_z = tf.exp(0.5 * logvar_q)\n kl = tf.reduce_sum(\n 0.5 * (-logvar_q + tf.exp(logvar_q) + mu_z ** 2 - 1), axis=1)\n return mu_z, std_z, kl\n\n def _decoder_pass_r(self, z):\n\n h_r = z\n for i, (w, b) in enumerate(zip(self.decoder_weights_r, self.decoder_biases_r)):\n h_r = tf.matmul(h_r, w) + b\n if i != len(self.decoder_weights_r) - 1:\n h_r = tf.nn.tanh(h_r)\n\n return h_r\n\n def _decoder_pass_p(self, z):\n\n h_p = z\n for i, (w, b) in enumerate(zip(self.decoder_weights_p, self.decoder_biases_p)):\n h_p = tf.matmul(h_p, w) + b\n if i != len(self.decoder_weights_p) - 1:\n h_p = tf.nn.tanh(h_p)\n\n return h_p\n\n def build_graph(self):\n\n self._construct_encoder_weights()\n self._construct_decoder_weights_r()\n self._construct_decoder_weights_p()\n\n saver = tf.train.Saver()\n\n mu_z, std_z, kl = self._encoder_pass()\n epsilon = tf.random_normal(tf.shape(std_z))\n z = mu_z + self.is_training_ph * epsilon * std_z\n\n h_r = self._decoder_pass_r(z)\n h_p = self._decoder_pass_p(z)\n\n ll = self._log_likelihood(h_r, h_p)\n neg_ll = -tf.reduce_mean(tf.reduce_sum(ll, axis=-1))\n neg_elbo = neg_ll + self.anneal_ph * tf.reduce_mean(kl)\n\n train_op = tf.train.AdamOptimizer(self.lr).minimize(neg_elbo)\n\n return saver, train_op, h_r, h_p\n\n def _construct_encoder_weights(self):\n\n self.encoder_weights, self.encoder_biases = [], []\n\n for i, (d_in, d_out) in enumerate(zip(self.encoder_arch[:-1], self.encoder_arch[1:])):\n if i == len(self.encoder_arch[:-1]) - 1:\n d_out *= 2\n\n weight_key = \"encoder_weights_%d\" % i\n bias_key = \"encoder_bias_%d\" % i\n self.encoder_weights.append(tf.get_variable(name=weight_key, shape=[d_in, d_out],\n initializer=tf.contrib.layers.xavier_initializer(\n seed=self.random_seed)))\n self.encoder_biases.append(tf.get_variable(name=bias_key, shape=[d_out],\n initializer=tf.truncated_normal_initializer(\n stddev=0.001, seed=self.random_seed)))\n\n self.decoder_weights_r, self.decoder_biases_r = [], []\n\n def _construct_decoder_weights_r(self):\n\n for i, (d_in, d_out) in enumerate(zip(self.decoder_arch[:-1], self.decoder_arch[1:])):\n weight_key = \"decoder_weights_r_%d\" % i\n bias_key = \"decoder_bias_r_%d\" % i\n self.decoder_weights_r.append(tf.get_variable(name=weight_key, shape=[d_in, d_out],\n initializer=tf.contrib.layers.xavier_initializer(\n seed=self.random_seed)))\n self.decoder_biases_r.append(tf.get_variable(name=bias_key, shape=[d_out],\n initializer=tf.truncated_normal_initializer(\n stddev=0.001, seed=self.random_seed)))\n\n def _construct_decoder_weights_p(self):\n\n self.decoder_weights_p, self.decoder_biases_p = [], []\n\n for i, (d_in, d_out) in enumerate(zip(self.decoder_arch[:-1], self.decoder_arch[1:])):\n weight_key = \"decoder_weights_p_%d\" % i\n bias_key = \"decoder_bias_p_%d\" % i\n self.decoder_weights_p.append(tf.get_variable(name=weight_key, shape=[d_in, d_out],\n initializer=tf.contrib.layers.xavier_initializer(\n seed=self.random_seed)))\n self.decoder_biases_p.append(tf.get_variable(name=bias_key, shape=[d_out],\n initializer=tf.truncated_normal_initializer(\n stddev=0.001, seed=self.random_seed)))\n\n def get_predictive_rate(self, h_r, h_p, test_data):\n\n l_prime = np.multiply(test_data + np.exp(h_r), special.expit(h_p))\n\n return l_prime\n\n"
]
| [
[
"tensorflow.exp",
"tensorflow.shape",
"tensorflow.train.AdamOptimizer",
"tensorflow.truncated_normal_initializer",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.matmul",
"tensorflow.train.Saver",
"scipy.special.expit",
"numpy.exp",
"tensorflow.placeholder",
"tensorflow.reduce_sum",
"tensorflow.nn.tanh",
"tensorflow.reduce_mean",
"tensorflow.placeholder_with_default",
"tensorflow.nn.l2_normalize",
"tensorflow.nn.dropout"
]
]
|
pgraafstra/pastas | [
"c065059e1df5b6c8e4afeb5278de2ef70fdf726c"
]
| [
"pastas/read/waterbase.py"
]
| [
"\"\"\"\nThis file contains the import routine for import of groundwater observations\nfrom RWS Waterbase / WaterInfo database. (http://waterinfo.rws.nl/)\n\nAuthor: R.A. Collenteur, Artesia Water 2017\n\n\"\"\"\n\nfrom pandas import read_csv\n\nfrom ..timeseries import TimeSeries\n\n\ndef read_waterbase(fname, locations=None, variable=\"NUMERIEKEWAARDE\",\n kind=\"waterlevel\", freq=\"10min\"):\n \"\"\"Method to import waterlevel ts from waterbase.\n\n Parameters\n ----------\n fname: str\n string with the path and filename of the waterbase file.\n variable: str\n name of the variable to collect the time series from. Only one\n variable name is allowed.\n kind: str\n freq: str\n\n Returns\n -------\n ts: pastas.TimeSeries\n returns a Pastas TimeSeries object or a list of objects.\n\n Notes\n -----\n More information on the ts provided by the Waterbase database see:\n http://waterinfo.rws.nl/\n\n the xy-coordinates are calculates as the mean xy-coordinate in case these\n values are not unique.\n\n \"\"\"\n ts = []\n df = read_csv(fname, delimiter=\";\", index_col=\"Date\", decimal=\",\",\n usecols=[\"MEETPUNT_IDENTIFICATIE\", \"WAARNEMINGDATUM\",\n \"WAARNEMINGTIJD\", variable, \"EPSG\", \"X\", \"Y\"],\n parse_dates={\"Date\": [\"WAARNEMINGDATUM\", \"WAARNEMINGTIJD\"]},\n infer_datetime_format=True, dayfirst=True,\n na_values=[-999999999, 999999999])\n\n if locations is None:\n locations = df.MEETPUNT_IDENTIFICATIE.unique()\n elif isinstance(locations, str):\n locations = [locations]\n\n for name in locations:\n series = df.loc[df[\"MEETPUNT_IDENTIFICATIE\"].isin([name])]\n metadata = {\n \"x\": series.X.mean(),\n \"y\": series.Y.mean(),\n \"z\": 0,\n \"projection\": \"epsg:\" + str(series.loc[:, \"EPSG\"].unique()[0]),\n \"units\": \"cm\"\n }\n series = series.loc[:, variable].sort_index()\n ts.append(TimeSeries(series, name=name, metadata=metadata,\n settings=kind, freq_original=freq))\n\n if len(ts) == 1:\n ts = ts[0]\n\n return ts\n"
]
| [
[
"pandas.read_csv"
]
]
|
adityaiitb/pyprof2 | [
"b2ac33876a2ab5bbd41595f0692a0fc936e7d8b7"
]
| [
"pyprof2/prof/blas.py"
]
| [
"from collections import OrderedDict\nfrom utility import Utility\nfrom base import OperatorLayerBase\nimport numpy as np\n\nTC_GEMMS = [\"884gemm\", \"1688gemm\"]\n\nclass Addmm(OperatorLayerBase):\n\n\tdef __init__(self, d):\n\t\tmarker = eval(d.argMarker[0])\n\t\tmod = marker['mod']\n\t\top = marker['op']\n\t\targs = marker['args']\n\n\t\tself.marker = marker\n\t\tself.mod_ = mod\n\t\tself.op_ = op\n\t\tself.args = args\n\n\t\tassert (mod in [\"torch\", \"Tensor\",])\n\t\tassert (op in [\"addmm\", \"addmm_\",])\n\n\t\t# Get alpha and beta\n\t\talpha = 1\n\t\tbeta = 1\n\t\tif any(x['name'] == 'alpha' for x in args):\n\t\t\talpha = list(filter(lambda x : x['name'] == \"alpha\", args))[0]\n\t\t\talpha = alpha['value']\n\n\t\tif any(x['name'] == 'beta' for x in args):\n\t\t\tbeta = list(filter(lambda x : x['name'] == \"beta\", args))[0]\n\t\t\tbeta = beta['value']\n\n\t\tself.alpha = alpha\n\t\tself.beta = beta\n\n\t\t# Filter out named parameters\n\t\targs = list(filter(lambda x : x['name'] == '', args))\n\n\t\t# Filter out parameters which are not tensors\n\t\targs = list(filter(lambda x : x['type'] == 'tensor', args))\n\n\t\tassert (len(args) == 3)\n\t\tC,A,B = args\n\t\tm,k1 = A['shape']\n\t\tk2,n = B['shape']\n\t\tassert (k1 == k2)\n\t\tt1 = A['dtype']\n\t\tt2 = B['dtype']\n\t\tt3 = C['dtype']\n\t\tassert(t1 == t2 == t3)\n\n\t\tself.A = A\n\t\tself.B = B\n\t\tself.C = C\n\n\t\tself.m = m\n\t\tself.n = n\n\t\tself.k = k1\n\t\tself.type = t1\n\t\tself.name = d.name\n\n\t\treturn\n\n\tdef tc(self):\n for s in TC_GEMMS:\n if s in self.name:\n return 1\n return 0\n\n\tdef bytes(self):\n\t\tm, n, k = self.m, self.n, self.k\n\t\treturn Utility.typeToBytes(self.type) * (m*n + m*k + n*k)\n\n\tdef flops(self):\n\t\treturn self.m * self.n * self.k * 2\n\n\tdef op(self):\n\t\treturn self.op_\n\n\tdef mod(self):\n\t\treturn self.mod_\n\n\tdef params(self):\n\t\tp = OrderedDict([('M',self.n),('N',self.m),('K',self.k),('type',self.type)])\n\t\treturn p\n\nclass Bmm(OperatorLayerBase):\n\n\tdef __init__(self, d):\n\t\tmarker = eval(d.argMarker[0])\n\t\tmod = marker['mod']\n\t\top = marker['op']\n\t\targs = marker['args']\n\n\t\tself.marker = marker\n\t\tself.mod_ = mod\n\t\tself.op_ = op\n\t\tself.args = args\n\n\t\tassert (mod == \"torch\") and (op == \"bmm\")\n\n\t\t#Filter out named params (kwargs)\n\t\targs = list(filter(lambda x : x['name'] == \"\", args))\n\n\t\tassert (len(args) == 2)\n\t\tA,B = args\n\t\tb1,m,k1 = A['shape']\n\t\tb2,k2,n = B['shape']\n\t\tassert (b1 == b2)\n\t\tassert (k1 == k2)\n\t\tt1 = A['dtype']\n\t\tt2 = B['dtype']\n\t\tassert(t1 == t2)\n\n\t\tself.A = A\n\t\tself.B = B\n\t\tself.b = b1\n\t\tself.m = m\n\t\tself.n = n\n\t\tself.k = k1\n\t\tself.type = t1\n\t\tself.name = d.name\n\n\tdef tc(self):\n for s in TC_GEMMS:\n if s in self.name:\n return 1\n return 0\n\n\tdef params(self):\n\t\t#p = OrderedDict([('A', A['shape']), ('B', B['shape']), ('type', t1)])\n\t\tp = OrderedDict([('B',self.b), ('M',self.n),('N',self.m),('K',self.k),('type',self.type)])\n\t\treturn p\n\n\tdef flops(self):\n\t\treturn self.b * self.m * self.n * self.k * 2\n\n\tdef bytes(self):\n\t\tb, m, n, k = self.b, self.m, self.n, self.k\n\t\treturn Utility.typeToBytes(self.type) * b * (m*n + m*k + n*k)\n\n\tdef op(self):\n\t\treturn self.op_\n\n\tdef mod(self):\n\t\treturn self.mod_\n\nclass Matmul(OperatorLayerBase):\n\n\tNON_GEMM = [\"kernelPointwiseApply2\", \"reduce_1Block_kernel\", \"elementwise_kernel\", \"splitKreduce_kernel\"]\n\tNON_TC = NON_GEMM + [\"dot_kernel\"]\n\n\tdef __init__(self, d):\n\t\tmarker = eval(d.argMarker[0])\n\t\tmod = marker['mod']\n\t\top = marker['op']\n\t\targs = marker['args']\n\n\t\tself.marker = marker\n\t\tself.mod_ = mod\n\t\tself.op_ = op\n\t\tself.args = args\n\n\t\tself.name = d.name\n\t\tself.sub = d.sub\n\n\t\tassert ((mod == \"torch\") and (op == \"matmul\")) or ((mod == \"Tensor\") and (op == \"__matmul__\"))\n\t\tassert (len(args) == 2)\n\n\t\tassert any([x in d.name for x in Matmul.NON_TC + [\"gemm\", \"gemv\"]])\n\n\t\tA,B = args\n\t\tt1 = A['dtype']\n\t\tt2 = B['dtype']\n\t\tassert(t1 == t2)\n\n\t\tA = A['shape']\n\t\tB = B['shape']\n\n\t\tself.A = A\n\t\tself.B = B\n\t\tself.type = t1\n\n\t\t# batch, MNK\n\t\tif (len(A) == 1) and (len(B) == 1):\n\t\t\t#dot product\n\t\t\tassert (A[0] == B[0])\n\t\t\tself.b = (1,)\n\t\t\tself.m = 1\n\t\t\tself.n = 1\n\t\t\tself.k = A[0]\n\n\t\telif (len(A) == 2) and (len(B) == 2):\n\t\t\t#gemm\n\t\t\tm,k1 = A\n\t\t\tk2,n = B\n\t\t\tassert(k1 == k2)\n\t\t\tself.b = (1,)\n\t\t\tself.m = m\n\t\t\tself.n = n\n\t\t\tself.k = k1\n\n\t\telif (len(A) == 1) and (len(B) == 2):\n\t\t\t#vector matrix\n\t\t\tk1 = A[0]\n\t\t\tk2,n = B\n\t\t\tassert(k1 == k2)\n\n\t\t\tself.b = (1,)\n\t\t\tself.m = 1\n\t\t\tself.n = n\n\t\t\tself.k = k1\n\n\t\telif (len(A) == 2) and (len(B) == 1):\n\t\t\t#gemv\n\t\t\tm,k1 = A\n\t\t\tk2 = B[0]\n\t\t\tassert (k1 == k2)\n\n\t\t\tself.b = (1,)\n\t\t\tself.m = m\n\t\t\tself.n = 1\n\t\t\tself.k = k1\n\n\t\telif (len(A) == 1) and (len(B) > 2):\n\t\t\tassert (A[0] == B[-2])\n\n\t\t\tself.b = B[0:-2]\n\t\t\tself.m = 1\n\t\t\tself.n = B[-1]\n\t\t\tself.k = B[-2]\n\n\t\telif (len(B) == 1) and (len(A) > 2):\n\t\t\tassert (B[0] == A[-1])\n\n\t\t\tself.b = A[0:-2]\n\t\t\tself.m = A[-2]\n\t\t\tself.n = 1\n\t\t\tself.k = A[-1]\n\n\t\telse:\n\t\t\tassert (len(A) >= 2)\n\t\t\tassert (len(B) >= 2)\n\t\t\tassert (A[-1] == B[-2])\n\t\t\tself.m = A[-2]\n\t\t\tself.n = B[-1]\n\t\t\tself.k = A[-1]\n\n\t\t\taa = np.empty(A[0:-2])\n\t\t\tbb = np.empty(B[0:-2])\n\t\t\tself.b = np.broadcast(aa, bb).shape\n\n\tdef params(self):\n\t\treturn OrderedDict([('A', self.A), ('B', self.B), ('type', self.type)])\n\n\tdef tc(self):\n\t\tif self.name in Matmul.NON_TC:\n\t\t\treturn \"-\"\n\t\telse:\n for s in TC_GEMMS:\n if s in self.name:\n return 1\n return 0\n\n\tdef bytes(self):\n\t\t# TODO: check bytes for non-GEMM cases\n\t\tif self.name in Matmul.NON_GEMM:\n\t\t\treturn 2 * Utility.typeToBytes(self.type) * Utility.numElems(self.A) #could be B as well\n\t\telse:\n\t\t\tm, n, k = self.m, self.n, self.k\n\t\t\treturn Utility.typeToBytes(self.type) * (m*n + m*k + n*k)\n\n\tdef flops(self):\n\t\t# TODO: calculate actual FLOPs. At least we're not saying it's GEMM FLOPs for now.\n\t\tif self.name in Matmul.NON_GEMM:\n\t\t\treturn 0\n\t\telse:\n\t\t\treturn Utility.numElems(self.b) * self.m * self.n * self.k * 2\n\n\tdef op(self):\n\t\treturn self.op_\n\n\tdef mod(self):\n\t\treturn self.mod_\n\nclass Mm(OperatorLayerBase):\n\n\tdef __init__(self, d):\n\t\tmarker = eval(d.argMarker[0])\n\t\tmod = marker['mod']\n\t\top = marker['op']\n\t\targs = marker['args']\n\n\t\tself.marker = marker\n\t\tself.mod_ = mod\n\t\tself.op_ = op\n\t\tself.args = args\n\n\t\tassert (mod == \"torch\") and (op == \"mm\")\n\t\tassert (len(args) == 2)\n\n\t\tA,B = args\n\t\tm,k1 = A['shape']\n\t\tk2,n = B['shape']\n\t\tassert (k1 == k2)\n\t\tt1 = A['dtype']\n\t\tt2 = B['dtype']\n\t\tassert(t1 == t2)\n\n\t\tself.A = A\n\t\tself.B = B\n\t\tself.m = m\n\t\tself.n = n\n\t\tself.k = k1\n\t\tself.type = t1\n\t\tself.name = d.name\n\n\t\treturn\n\n\tdef params(self):\n\t\tp = OrderedDict([('M',self.n),('N',self.m),('K',self.k),('type',self.type)])\n\t\treturn p\n\n\tdef tc(self):\n for s in TC_GEMMS:\n if s in self.name:\n return 1\n return 0\n\n\tdef bytes(self):\n\t\tm, n, k = self.m, self.n, self.k\n\t\treturn Utility.typeToBytes(self.type) * (m*n + m*k + n*k)\n\n\tdef flops(self):\n\t\treturn self.m * self.n * self.k * 2\n\n\tdef op(self):\n\t\treturn self.op_\n\n\tdef mod(self):\n\t\treturn self.mod_\n"
]
| [
[
"numpy.empty",
"numpy.broadcast"
]
]
|
pvk-developer/ATM | [
"959751fb9246963c56bf77bc92d00fcadd062d6a"
]
| [
"atm/metrics.py"
]
| [
"from __future__ import absolute_import, division, unicode_literals\n\nfrom builtins import range\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import (\n accuracy_score, average_precision_score, cohen_kappa_score, f1_score, matthews_corrcoef,\n precision_recall_curve, roc_auc_score, roc_curve)\nfrom sklearn.model_selection import StratifiedKFold\n\nfrom atm.constants import METRICS_BINARY, METRICS_MULTICLASS, N_FOLDS_DEFAULT, Metrics\n\n\ndef rank_n_accuracy(y_true, y_prob_mat, n=0.33):\n \"\"\"\n Compute how often the true label is one of the top n predicted classes\n for each training example.\n If n is an integer, consider the top n predictions for each example.\n If n is a float, it represents a proportion of the top predictions.\n This metric is only really useful when the total number of classes is large.\n \"\"\"\n n_classes = y_prob_mat.shape[1]\n if n < 1:\n # round to nearest int before casting\n n = int(round(n_classes * n))\n\n # sort the rankings in descending order, then take the top n\n rankings = np.argsort(-y_prob_mat)\n rankings = rankings[:, :n]\n\n num_samples = len(y_true)\n correct_sample_count = 0.0 # force floating point math\n\n for i in range(num_samples):\n if y_true[i] in rankings[i, :]:\n correct_sample_count += 1\n\n return int(correct_sample_count / num_samples)\n\n\ndef get_per_class_matrix(y, classes=None):\n \"\"\"\n Create a (num_classes x num_examples) binary matrix representation of the\n true and predicted y values.\n If classes is None, class values will be extracted from y. Values that are\n not present at all will not receive a column -- this is to allow computation\n of per-class roc_auc scores without error.\n \"\"\"\n classes = classes or np.unique(y)\n y_bin = np.zeros((len(y), len(classes)))\n for i, cls in enumerate(classes):\n y_bin[:, i] = (y == cls).astype(int)\n return y_bin\n\n\ndef get_pr_roc_curves(y_true, y_pred_probs):\n \"\"\"\n Compute precision/recall and receiver operating characteristic metrics for a\n binary class label.\n\n y_true: series of true class labels (only 1 or 0)\n y_pred_probs: series of probabilities generated by the model for the label\n class 1\n \"\"\"\n results = {}\n roc = roc_curve(y_true, y_pred_probs, pos_label=1)\n results[Metrics.ROC_CURVE] = {\n 'fprs': list(roc[0]),\n 'tprs': list(roc[1]),\n 'thresholds': list(roc[2]),\n }\n\n pr = precision_recall_curve(y_true, y_pred_probs, pos_label=1)\n results[Metrics.PR_CURVE] = {\n 'precisions': list(pr[0]),\n 'recalls': list(pr[1]),\n 'thresholds': list(pr[2]),\n }\n\n return results\n\n\ndef get_metrics_binary(y_true, y_pred, y_pred_probs, include_curves=False):\n results = {\n Metrics.ACCURACY: accuracy_score(y_true, y_pred),\n Metrics.COHEN_KAPPA: cohen_kappa_score(y_true, y_pred),\n Metrics.F1: f1_score(y_true, y_pred),\n Metrics.MCC: matthews_corrcoef(y_true, y_pred),\n Metrics.ROC_AUC: np.nan,\n Metrics.AP: np.nan,\n }\n\n # if possible, compute PR and ROC curve metrics\n all_labels_same = len(np.unique(y_true)) == 1\n any_probs_nan = np.any(np.isnan(y_pred_probs))\n if not any_probs_nan:\n # AP can be computed even if all labels are the same\n y_true_bin = get_per_class_matrix(y_true, list(range(2)))\n results[Metrics.AP] = average_precision_score(y_true_bin, y_pred_probs)\n\n if not all_labels_same:\n results[Metrics.ROC_AUC] = roc_auc_score(y_true_bin, y_pred_probs)\n\n # if necessary, compute point-by-point precision/recall and ROC curve data\n if include_curves:\n results.update(get_pr_roc_curves(y_true, y_pred_probs[:, 1]))\n\n return results\n\n\ndef get_metrics_multiclass(y_true, y_pred, y_pred_probs,\n include_per_class=False, include_curves=False):\n results = {\n Metrics.ACCURACY: accuracy_score(y_true, y_pred),\n Metrics.COHEN_KAPPA: cohen_kappa_score(y_true, y_pred),\n Metrics.F1_MICRO: f1_score(y_true, y_pred, average='micro'),\n Metrics.F1_MACRO: f1_score(y_true, y_pred, average='macro'),\n Metrics.ROC_AUC_MICRO: np.nan,\n Metrics.ROC_AUC_MACRO: np.nan,\n Metrics.RANK_ACCURACY: np.nan,\n }\n\n # this parameter is most relevant for datasets with high-cardinality\n # labels (lots of poosible values)\n # TODO: make the rank parameter configurable\n results[Metrics.RANK_ACCURACY] = rank_n_accuracy(y_true=y_true,\n y_prob_mat=y_pred_probs)\n\n # if possible, compute multi-label AUC metrics\n present_classes = np.unique(y_true)\n all_labels_same = len(present_classes) == 1\n any_probs_nan = np.any(np.isnan(y_pred_probs))\n if not (all_labels_same or any_probs_nan):\n # get binary label matrix, ignoring classes that aren't present\n y_true_bin = get_per_class_matrix(y_true)\n\n # filter out probabilities for classes that aren't in this sample\n filtered_probs = y_pred_probs[:, present_classes]\n\n # actually compute roc_auc score\n results[Metrics.ROC_AUC_MICRO] = roc_auc_score(y_true_bin,\n filtered_probs,\n average='micro')\n results[Metrics.ROC_AUC_MACRO] = roc_auc_score(y_true_bin,\n filtered_probs,\n average='macro')\n\n # TODO: multi-label AP metrics?\n\n # labelwise controls whether to compute separate metrics for each posisble label\n if include_per_class or include_curves:\n results['class_wise'] = {}\n\n # create binary matrices, including classes that aren't actually present\n all_classes = list(range(y_pred_probs.shape[1]))\n y_true_bin = get_per_class_matrix(y_true, classes=all_classes)\n y_pred_bin = get_per_class_matrix(y_pred, classes=all_classes)\n\n # for each possible class, generate F1, precision-recall, and ROC scores\n # using the binary metrics function.\n for cls in all_classes:\n class_pred_probs = np.column_stack((1 - y_pred_probs[:, cls],\n y_pred_probs[:, cls]))\n class_res = get_metrics_binary(y_true=y_true_bin[:, cls],\n y_pred=y_pred_bin[:, cls],\n y_pred_probs=class_pred_probs,\n include_curves=include_curves)\n results['class_wise'][cls] = class_res\n\n return results\n\n\ndef test_pipeline(pipeline, X, y, binary, **kwargs):\n if binary:\n get_metrics = get_metrics_binary\n else:\n get_metrics = get_metrics_multiclass\n\n # run the test data through the trained pipeline\n y_pred = pipeline.predict(X)\n\n # if necessary (i.e. if a pipeline does not produce probability scores by\n # default), use class distance scores in lieu of probability scores\n method = pipeline.steps[-1][0]\n if method in ['sgd', 'pa']:\n if binary:\n class_1_distance = pipeline.decision_function(X)\n class_0_distance = -class_1_distance\n y_pred_probs = np.column_stack((class_0_distance, class_1_distance))\n else:\n y_pred_probs = pipeline.decision_function(X)\n else:\n y_pred_probs = pipeline.predict_proba(X)\n\n return get_metrics(y, y_pred, y_pred_probs, **kwargs)\n\n\ndef cross_validate_pipeline(pipeline, X, y, binary=True,\n n_folds=N_FOLDS_DEFAULT, **kwargs):\n \"\"\"\n Compute metrics for each of `n_folds` folds of the training data in (X, y).\n\n pipeline: the sklearn Pipeline to train and test\n X: feature matrix\n y: series of labels corresponding to rows in X\n binary: whether the label is binary or multi-ary\n n_folds: number of non-overlapping \"folds\" of the data to make for\n cross-validation\n \"\"\"\n if binary:\n metrics = METRICS_BINARY\n else:\n metrics = METRICS_MULTICLASS\n\n df = pd.DataFrame(columns=metrics)\n results = []\n\n # TODO: how to handle classes that are so uncommon that stratified sampling\n # doesn't work? i.e. len([c for c in y if c == some_class]) < n_folds\n skf = StratifiedKFold(n_splits=n_folds)\n skf.get_n_splits(X, y)\n\n for train_index, test_index in skf.split(X, y):\n pipeline.fit(X[train_index], y[train_index])\n split_results = test_pipeline(pipeline=pipeline,\n X=X[test_index],\n y=y[test_index],\n binary=binary, **kwargs)\n df = df.append([{m: split_results.get(m) for m in metrics}])\n results.append(split_results)\n\n return df, results\n"
]
| [
[
"numpy.isnan",
"sklearn.model_selection.StratifiedKFold",
"sklearn.metrics.precision_recall_curve",
"pandas.DataFrame",
"sklearn.metrics.matthews_corrcoef",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.average_precision_score",
"numpy.argsort",
"numpy.column_stack",
"sklearn.metrics.f1_score",
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.cohen_kappa_score",
"numpy.unique",
"sklearn.metrics.roc_curve"
]
]
|
qscgy/TransMorph_Transformer_for_Medical_Image_Registration | [
"9abfc4387e51667e1c8860d4ce37658325e75b5a",
"9abfc4387e51667e1c8860d4ce37658325e75b5a"
]
| [
"IXI/Baseline_Transformers/models/PVT.py",
"IXI/Baseline_Transformers/train_CoTr.py"
]
| [
"'''\r\nPyramid vision transformer for Image Registration\r\n\r\nPaper:\r\nChen, J., Du, Y., He, Y., Segars, P. W., Li, Y., & Frey, E. C. (2021).\r\nTransMorph: Transformer for Unsupervised Medical Image Registration. arXiv preprint arXiv:2111.10480.\r\n\r\nOriginal PVT code was retrieved from:\r\nhttps://github.com/whai362/PVT\r\n\r\nOriginal PVT paper:\r\nWang, W., Xie, E., Li, X., Fan, D. P., Song, K., Liang, D., ... & Shao, L. (2021).\r\nPyramid vision transformer: A versatile backbone for dense prediction without convolutions.\r\narXiv preprint arXiv:2102.12122.\r\n\r\nJunyu Chen\r\[email protected]\r\nJohns Hopkins University\r\n'''\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom functools import partial\r\n\r\nfrom timm.models.layers import DropPath, to_2tuple, trunc_normal_, to_3tuple\r\nfrom timm.models.registry import register_model\r\nfrom timm.models.vision_transformer import _cfg\r\nimport models.configs_PVT as configs\r\nfrom torch.distributions.normal import Normal\r\nimport torch.nn.functional as nnf\r\nimport sys\r\n\r\nclass Mlp(nn.Module):\r\n def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):\r\n super().__init__()\r\n out_features = out_features or in_features\r\n hidden_features = hidden_features or in_features\r\n self.fc1 = nn.Linear(in_features, hidden_features)\r\n self.act = act_layer()\r\n self.fc2 = nn.Linear(hidden_features, out_features)\r\n self.drop = nn.Dropout(drop)\r\n\r\n def forward(self, x):\r\n x = self.fc1(x)\r\n x = self.act(x)\r\n x = self.drop(x)\r\n x = self.fc2(x)\r\n x = self.drop(x)\r\n return x\r\n\r\n\r\nclass Attention(nn.Module):\r\n def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., sr_ratio=1):\r\n super().__init__()\r\n assert dim % num_heads == 0, f\"dim {dim} should be divided by num_heads {num_heads}.\"\r\n\r\n self.dim = dim\r\n self.num_heads = num_heads\r\n head_dim = dim // num_heads\r\n self.scale = qk_scale or head_dim ** -0.5\r\n\r\n self.q = nn.Linear(dim, dim, bias=qkv_bias)\r\n self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias)\r\n self.attn_drop = nn.Dropout(attn_drop)\r\n self.proj = nn.Linear(dim, dim)\r\n self.proj_drop = nn.Dropout(proj_drop)\r\n\r\n self.sr_ratio = sr_ratio\r\n if sr_ratio > 1:\r\n self.sr = nn.Conv3d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio)\r\n self.norm = nn.LayerNorm(dim)\r\n\r\n def forward(self, x, H, W, L):\r\n B, N, C = x.shape\r\n q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)\r\n\r\n if self.sr_ratio > 1:\r\n x_ = x.permute(0, 2, 1).reshape(B, C, H, W, L)\r\n x_ = self.sr(x_).reshape(B, C, -1).permute(0, 2, 1)\r\n x_ = self.norm(x_)\r\n kv = self.kv(x_).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\r\n else:\r\n kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\r\n k, v = kv[0], kv[1]\r\n\r\n attn = (q @ k.transpose(-2, -1)) * self.scale\r\n attn = attn.softmax(dim=-1)\r\n attn = self.attn_drop(attn)\r\n\r\n x = (attn @ v).transpose(1, 2).reshape(B, N, C)\r\n x = self.proj(x)\r\n x = self.proj_drop(x)\r\n\r\n return x\r\n\r\n\r\nclass Block(nn.Module):\r\n\r\n def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,\r\n drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1):\r\n super().__init__()\r\n self.norm1 = norm_layer(dim)\r\n self.attn = Attention(\r\n dim,\r\n num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,\r\n attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio)\r\n # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here\r\n self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\r\n self.norm2 = norm_layer(dim)\r\n mlp_hidden_dim = int(dim * mlp_ratio)\r\n self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\r\n\r\n def forward(self, x, H, W, L):\r\n x = x + self.drop_path(self.attn(self.norm1(x), H, W, L))\r\n x = x + self.drop_path(self.mlp(self.norm2(x)))\r\n\r\n return x\r\n\r\n\r\nclass PatchEmbed(nn.Module):\r\n \"\"\" Image to Patch Embedding\r\n \"\"\"\r\n\r\n def __init__(self, img_size=(160, 192, 224), patch_size=16, in_chans=3, embed_dim=768):\r\n super().__init__()\r\n patch_size = to_3tuple(patch_size)\r\n\r\n self.img_size = img_size\r\n self.patch_size = patch_size\r\n assert img_size[0] % patch_size[0] == 0 and img_size[1] % patch_size[1] == 0 and img_size[2] % patch_size[2] == 0, \\\r\n f\"img_size {img_size} should be divided by patch_size {patch_size}.\"\r\n self.H, self.W, self.L = img_size[0] // patch_size[0], img_size[1] // patch_size[1], img_size[2] // patch_size[2]\r\n self.num_patches = self.H * self.W * self.L\r\n self.proj = nn.Conv3d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)\r\n self.norm = nn.LayerNorm(embed_dim)\r\n\r\n def forward(self, x):\r\n B, C, H, W, L = x.shape\r\n\r\n x = self.proj(x).flatten(2).transpose(1, 2)\r\n x = self.norm(x)\r\n H, W, L = H // self.patch_size[0], W // self.patch_size[1], L // self.patch_size[2]\r\n\r\n return x, (H, W, L)\r\n\r\n\r\nclass PyramidVisionTransformer(nn.Module):\r\n def __init__(self, img_size=(160, 192, 224), patch_size=16, in_chans=2, num_classes=1000, embed_dims=(64, 128, 256, 512),\r\n num_heads=(1, 2, 4, 8), mlp_ratios=(4, 4, 4, 4), qkv_bias=False, qk_scale=None, drop_rate=0.,\r\n attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm,\r\n depths=(3, 4, 6, 3), sr_ratios=(8, 4, 2, 1), F4=False):\r\n super().__init__()\r\n self.num_classes = num_classes\r\n self.depths = depths\r\n self.F4 = F4\r\n\r\n # patch_embed\r\n self.patch_embed1 = PatchEmbed(img_size=img_size, patch_size=patch_size, in_chans=in_chans,\r\n embed_dim=embed_dims[0])\r\n self.patch_embed2 = PatchEmbed(img_size=(img_size[0] // 4, img_size[1] // 4, img_size[2] // 4), patch_size=2, in_chans=embed_dims[0],\r\n embed_dim=embed_dims[1])\r\n self.patch_embed3 = PatchEmbed(img_size=(img_size[0] // 8, img_size[1] // 8, img_size[2] // 8), patch_size=2, in_chans=embed_dims[1],\r\n embed_dim=embed_dims[2])\r\n self.patch_embed4 = PatchEmbed(img_size=(img_size[0] // 16, img_size[1] // 16, img_size[2] // 16), patch_size=2, in_chans=embed_dims[2],\r\n embed_dim=embed_dims[3])\r\n\r\n # pos_embed\r\n self.pos_embed1 = nn.Parameter(torch.zeros(1, self.patch_embed1.num_patches, embed_dims[0]))\r\n self.pos_drop1 = nn.Dropout(p=drop_rate)\r\n self.pos_embed2 = nn.Parameter(torch.zeros(1, self.patch_embed2.num_patches, embed_dims[1]))\r\n self.pos_drop2 = nn.Dropout(p=drop_rate)\r\n self.pos_embed3 = nn.Parameter(torch.zeros(1, self.patch_embed3.num_patches, embed_dims[2]))\r\n self.pos_drop3 = nn.Dropout(p=drop_rate)\r\n self.pos_embed4 = nn.Parameter(torch.zeros(1, self.patch_embed4.num_patches + 1, embed_dims[3]))\r\n self.pos_drop4 = nn.Dropout(p=drop_rate)\r\n\r\n # transformer encoder\r\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule\r\n cur = 0\r\n self.block1 = nn.ModuleList([Block(\r\n dim=embed_dims[0], num_heads=num_heads[0], mlp_ratio=mlp_ratios[0], qkv_bias=qkv_bias, qk_scale=qk_scale,\r\n drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,\r\n sr_ratio=sr_ratios[0])\r\n for i in range(depths[0])])\r\n\r\n cur += depths[0]\r\n self.block2 = nn.ModuleList([Block(\r\n dim=embed_dims[1], num_heads=num_heads[1], mlp_ratio=mlp_ratios[1], qkv_bias=qkv_bias, qk_scale=qk_scale,\r\n drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,\r\n sr_ratio=sr_ratios[1])\r\n for i in range(depths[1])])\r\n\r\n cur += depths[1]\r\n self.block3 = nn.ModuleList([Block(\r\n dim=embed_dims[2], num_heads=num_heads[2], mlp_ratio=mlp_ratios[2], qkv_bias=qkv_bias, qk_scale=qk_scale,\r\n drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,\r\n sr_ratio=sr_ratios[2])\r\n for i in range(depths[2])])\r\n\r\n cur += depths[2]\r\n self.block4 = nn.ModuleList([Block(\r\n dim=embed_dims[3], num_heads=num_heads[3], mlp_ratio=mlp_ratios[3], qkv_bias=qkv_bias, qk_scale=qk_scale,\r\n drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,\r\n sr_ratio=sr_ratios[3])\r\n for i in range(depths[3])])\r\n\r\n # init weights\r\n trunc_normal_(self.pos_embed1, std=.02)\r\n trunc_normal_(self.pos_embed2, std=.02)\r\n trunc_normal_(self.pos_embed3, std=.02)\r\n trunc_normal_(self.pos_embed4, std=.02)\r\n self.apply(self._init_weights)\r\n\r\n def reset_drop_path(self, drop_path_rate):\r\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(self.depths))]\r\n cur = 0\r\n for i in range(self.depths[0]):\r\n self.block1[i].drop_path.drop_prob = dpr[cur + i]\r\n\r\n cur += self.depths[0]\r\n for i in range(self.depths[1]):\r\n self.block2[i].drop_path.drop_prob = dpr[cur + i]\r\n\r\n cur += self.depths[1]\r\n for i in range(self.depths[2]):\r\n self.block3[i].drop_path.drop_prob = dpr[cur + i]\r\n\r\n cur += self.depths[2]\r\n for i in range(self.depths[3]):\r\n self.block4[i].drop_path.drop_prob = dpr[cur + i]\r\n\r\n def _init_weights(self, m):\r\n if isinstance(m, nn.Linear):\r\n trunc_normal_(m.weight, std=.02)\r\n if isinstance(m, nn.Linear) and m.bias is not None:\r\n nn.init.constant_(m.bias, 0)\r\n elif isinstance(m, nn.LayerNorm):\r\n nn.init.constant_(m.bias, 0)\r\n nn.init.constant_(m.weight, 1.0)\r\n\r\n def _get_pos_embed(self, pos_embed, patch_embed, H, W, L):\r\n if H * W * L == self.patch_embed1.num_patches:\r\n return pos_embed\r\n else:\r\n return F.interpolate(\r\n pos_embed.reshape(1, patch_embed.H, patch_embed.W, patch_embed.L, -1).permute(0, 4, 1, 2, 3),\r\n size=(H, W, L), mode=\"trilinear\").reshape(1, -1, H * W * L).permute(0, 2, 1)\r\n\r\n def forward_features(self, x):\r\n outs = []\r\n\r\n B = x.shape[0]\r\n\r\n # stage 1\r\n x, (H, W, L) = self.patch_embed1(x)\r\n pos_embed1 = self._get_pos_embed(self.pos_embed1, self.patch_embed1, H, W, L)\r\n x = x + pos_embed1\r\n x = self.pos_drop1(x)\r\n for blk in self.block1:\r\n x = blk(x, H, W, L)\r\n x = x.reshape(B, H, W, L, -1).permute(0, 4, 1, 2, 3).contiguous()\r\n outs.append(x)\r\n\r\n # stage 2\r\n x, (H, W, L) = self.patch_embed2(x)\r\n pos_embed2 = self._get_pos_embed(self.pos_embed2, self.patch_embed2, H, W, L)\r\n x = x + pos_embed2\r\n x = self.pos_drop2(x)\r\n for blk in self.block2:\r\n x = blk(x, H, W, L)\r\n x = x.reshape(B, H, W, L, -1).permute(0, 4, 1, 2, 3).contiguous()\r\n outs.append(x)\r\n\r\n # stage 3\r\n x, (H, W, L) = self.patch_embed3(x)\r\n pos_embed3 = self._get_pos_embed(self.pos_embed3, self.patch_embed3, H, W, L)\r\n x = x + pos_embed3\r\n x = self.pos_drop3(x)\r\n for blk in self.block3:\r\n x = blk(x, H, W, L)\r\n x = x.reshape(B, H, W, L, -1).permute(0, 4, 1, 2, 3).contiguous()\r\n outs.append(x)\r\n\r\n # stage 4\r\n x, (H, W, L) = self.patch_embed4(x)\r\n pos_embed4 = self._get_pos_embed(self.pos_embed4[:, 1:], self.patch_embed4, H, W, L)\r\n x = x + pos_embed4\r\n x = self.pos_drop4(x)\r\n for blk in self.block4:\r\n x = blk(x, H, W, L)\r\n x = x.reshape(B, H, W, L, -1).permute(0, 4, 1, 2, 3).contiguous()\r\n outs.append(x)\r\n\r\n return outs\r\n\r\n def forward(self, x):\r\n x = self.forward_features(x)\r\n\r\n if self.F4:\r\n x = x[3:4]\r\n\r\n return x\r\n\r\n\r\ndef _conv_filter(state_dict, patch_size=16):\r\n \"\"\" convert patch embedding weight from manual patchify + linear proj to conv\"\"\"\r\n out_dict = {}\r\n for k, v in state_dict.items():\r\n if 'patch_embed.proj.weight' in k:\r\n v = v.reshape((v.shape[0], 3, patch_size, patch_size))\r\n out_dict[k] = v\r\n\r\n return out_dict\r\n\r\nclass Conv3dReLU(nn.Sequential):\r\n def __init__(\r\n self,\r\n in_channels,\r\n out_channels,\r\n kernel_size,\r\n padding=0,\r\n stride=1,\r\n use_batchnorm=True,\r\n ):\r\n conv = nn.Conv3d(\r\n in_channels,\r\n out_channels,\r\n kernel_size,\r\n stride=stride,\r\n padding=padding,\r\n bias=False,\r\n )\r\n relu = nn.LeakyReLU(inplace=True)\r\n if not use_batchnorm:\r\n nm = nn.InstanceNorm3d(out_channels)\r\n else:\r\n nm = nn.BatchNorm3d(out_channels)\r\n\r\n super(Conv3dReLU, self).__init__(conv, nm, relu)\r\n\r\n\r\nclass DecoderBlock(nn.Module):\r\n def __init__(\r\n self,\r\n in_channels,\r\n out_channels,\r\n skip_channels=0,\r\n use_batchnorm=True,\r\n ):\r\n super().__init__()\r\n self.conv1 = Conv3dReLU(\r\n in_channels + skip_channels,\r\n out_channels,\r\n kernel_size=3,\r\n padding=1,\r\n use_batchnorm=use_batchnorm,\r\n )\r\n self.conv2 = Conv3dReLU(\r\n out_channels,\r\n out_channels,\r\n kernel_size=3,\r\n padding=1,\r\n use_batchnorm=use_batchnorm,\r\n )\r\n self.up = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False)\r\n\r\n def forward(self, x, skip=None):\r\n x = self.up(x)\r\n if skip is not None:\r\n x = torch.cat([x, skip], dim=1)\r\n x = self.conv1(x)\r\n x = self.conv2(x)\r\n return x\r\n\r\nclass RegistrationHead(nn.Sequential):\r\n def __init__(self, in_channels, out_channels, kernel_size=3, upsampling=1):\r\n conv3d = nn.Conv3d(in_channels, out_channels, kernel_size=kernel_size, padding=kernel_size // 2)\r\n conv3d.weight = nn.Parameter(Normal(0, 1e-5).sample(conv3d.weight.shape))\r\n conv3d.bias = nn.Parameter(torch.zeros(conv3d.bias.shape))\r\n super().__init__(conv3d)\r\n\r\nclass SpatialTransformer(nn.Module):\r\n \"\"\"\r\n N-D Spatial Transformer\r\n\r\n Obtained from https://github.com/voxelmorph/voxelmorph\r\n \"\"\"\r\n\r\n def __init__(self, size, mode='bilinear'):\r\n super().__init__()\r\n\r\n self.mode = mode\r\n\r\n # create sampling grid\r\n vectors = [torch.arange(0, s) for s in size]\r\n grids = torch.meshgrid(vectors)\r\n grid = torch.stack(grids)\r\n grid = torch.unsqueeze(grid, 0)\r\n grid = grid.type(torch.FloatTensor)\r\n\r\n # registering the grid as a buffer cleanly moves it to the GPU, but it also\r\n # adds it to the state dict. this is annoying since everything in the state dict\r\n # is included when saving weights to disk, so the model files are way bigger\r\n # than they need to be. so far, there does not appear to be an elegant solution.\r\n # see: https://discuss.pytorch.org/t/how-to-register-buffer-without-polluting-state-dict\r\n self.register_buffer('grid', grid)\r\n\r\n def forward(self, src, flow):\r\n # new locations\r\n new_locs = self.grid + flow\r\n shape = flow.shape[2:]\r\n\r\n # need to normalize grid values to [-1, 1] for resampler\r\n for i in range(len(shape)):\r\n new_locs[:, i, ...] = 2 * (new_locs[:, i, ...] / (shape[i] - 1) - 0.5)\r\n\r\n # move channels dim to last position\r\n # also not sure why, but the channels need to be reversed\r\n if len(shape) == 2:\r\n new_locs = new_locs.permute(0, 2, 3, 1)\r\n new_locs = new_locs[..., [1, 0]]\r\n elif len(shape) == 3:\r\n new_locs = new_locs.permute(0, 2, 3, 4, 1)\r\n new_locs = new_locs[..., [2, 1, 0]]\r\n\r\n return nnf.grid_sample(src, new_locs, align_corners=True, mode=self.mode)\r\n\r\nclass PVTVNetSkip(nn.Module):\r\n def __init__(self, config):\r\n super(PVTVNetSkip, self).__init__()\r\n if_convskip = config.if_convskip\r\n self.if_convskip = if_convskip\r\n if_transskip = config.if_transskip\r\n self.if_transskip = if_transskip\r\n embed_dims = config.embed_dims\r\n self.transformer = PyramidVisionTransformer(img_size=config.img_size,\r\n patch_size=config.patch_size,\r\n embed_dims=config.embed_dims,\r\n depths=config.depths,\r\n num_heads=config.num_heads,\r\n mlp_ratios=config.mlp_ratios,\r\n qkv_bias=config.qkv_bias,\r\n drop_rate=config.drop_rate,\r\n drop_path_rate=config.drop_path_rate,\r\n sr_ratios=config.sr_ratios,)\r\n self.up1 = DecoderBlock(embed_dims[-1], embed_dims[-2], skip_channels=embed_dims[-2] if if_transskip else 0, use_batchnorm=False) # 384, 20, 20, 64\r\n self.up2 = DecoderBlock(embed_dims[-2], embed_dims[-3], skip_channels=embed_dims[-3] if if_transskip else 0, use_batchnorm=False) # 384, 40, 40, 64\r\n self.up3 = DecoderBlock(embed_dims[-3], embed_dims[-4], skip_channels=embed_dims[-4] if if_convskip else 0, use_batchnorm=False) # 384, 80, 80, 128\r\n self.up4 = DecoderBlock(embed_dims[-4], 16, skip_channels=16 if if_convskip else 0, use_batchnorm=False) # 384, 160, 160, 256\r\n self.up5 = DecoderBlock(16, config.reg_head_chan, skip_channels=config.reg_head_chan if if_convskip else 0, use_batchnorm=False) # 384, 160, 160, 256\r\n self.c1 = Conv3dReLU(2, config.reg_head_chan, 3, 1, use_batchnorm=False)\r\n self.c2 = Conv3dReLU(2, config.reg_head_chan, 3, 1, use_batchnorm=False)\r\n self.reg_head = RegistrationHead(\r\n in_channels=config.reg_head_chan,\r\n out_channels=3,\r\n kernel_size=3,\r\n )\r\n self.spatial_trans = SpatialTransformer(config.img_size)\r\n self.avg_pool = nn.AvgPool3d(3, stride=2, padding=1)\r\n\r\n def forward(self, x):\r\n source = x[:, 0:1, :, :]\r\n if self.if_convskip:\r\n x_s0 = x.clone()\r\n x_s1 = self.avg_pool(x)\r\n f4 = self.c1(x_s1)\r\n f5 = self.c2(x_s0)\r\n else:\r\n f4 = None\r\n f5 = None\r\n out = self.transformer(x)\r\n if self.if_transskip:\r\n f1 = out[-2]\r\n f2 = out[-3]\r\n f3 = out[-4]\r\n else:\r\n f1 = None\r\n f2 = None\r\n f3 = None\r\n x = self.up1(out[-1], f1)\r\n x = self.up2(x, f2)\r\n x = self.up3(x, f3)\r\n x = self.up4(x, f4)\r\n x = self.up5(x, f5)\r\n flow = self.reg_head(x)\r\n out = self.spatial_trans(source, flow)\r\n return out, flow\r\n\r\n\r\nCONFIGS = {\r\n 'PVT-Net': configs.get_3DPVTNet_config(),\r\n}",
"from torch.utils.tensorboard import SummaryWriter\r\nimport os, utils, glob, losses\r\nimport sys\r\nfrom torch.utils.data import DataLoader\r\nfrom data import datasets, trans\r\nimport numpy as np\r\nimport torch\r\nfrom torchvision import transforms\r\nfrom torch import optim\r\nimport torch.nn as nn\r\nimport matplotlib.pyplot as plt\r\nfrom natsort import natsorted\r\nfrom models.CoTr.network_architecture.ResTranUnet import ResTranUnet as CoTr\r\n\r\nclass Logger(object):\r\n def __init__(self, save_dir):\r\n self.terminal = sys.stdout\r\n self.log = open(save_dir+\"logfile.log\", \"a\")\r\n\r\n def write(self, message):\r\n self.terminal.write(message)\r\n self.log.write(message)\r\n\r\n def flush(self):\r\n pass\r\n\r\ndef main():\r\n batch_size = 1\r\n atlas_dir = 'Path_to_IXI_data/atlas.pkl'\r\n train_dir = 'Path_to_IXI_data/Train/'\r\n val_dir = 'Path_to_IXI_data/Val/'\r\n weights = [1, 1]\r\n img_size = (160, 192, 224)\r\n save_dir = 'CoTr_ncc_{}_diffusion_{}/'.format(weights[0], weights[1])\r\n if not os.path.exists('experiments/' + save_dir):\r\n os.makedirs('experiments/' + save_dir)\r\n if not os.path.exists('logs/' + save_dir):\r\n os.makedirs('logs/' + save_dir)\r\n sys.stdout = Logger('logs/' + save_dir)\r\n lr = 0.0001\r\n epoch_start = 0\r\n max_epoch = 500\r\n cont_training = False\r\n\r\n '''\r\n Initialize model\r\n '''\r\n model = CoTr().cuda()\r\n model.cuda()\r\n\r\n '''\r\n Initialize spatial transformation function\r\n '''\r\n reg_model = utils.register_model(img_size, 'nearest')\r\n reg_model.cuda()\r\n reg_model_bilin = utils.register_model(img_size, 'bilinear')\r\n reg_model_bilin.cuda()\r\n\r\n '''\r\n If continue from previous training\r\n '''\r\n if cont_training:\r\n epoch_start = 0\r\n model_dir = 'experiments/'+save_dir\r\n updated_lr = round(lr * np.power(1 - (epoch_start) / max_epoch,0.9),8)\r\n best_model = torch.load(model_dir + natsorted(os.listdir(model_dir))[0])['state_dict']\r\n model.load_state_dict(best_model)\r\n else:\r\n updated_lr = lr\r\n\r\n '''\r\n Initialize training\r\n '''\r\n train_composed = transforms.Compose([trans.RandomFlip(0),\r\n trans.NumpyType((np.float32, np.float32)),\r\n ])\r\n\r\n val_composed = transforms.Compose([trans.Seg_norm(), #rearrange segmentation label to 1 to 46\r\n trans.NumpyType((np.float32, np.int16)),\r\n ])\r\n\r\n train_set = datasets.IXIBrainDataset(glob.glob(train_dir + '*.pkl'), atlas_dir, transforms=train_composed)\r\n val_set = datasets.IXIBrainInferDataset(glob.glob(val_dir + '*.pkl'), atlas_dir, transforms=val_composed)\r\n train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True)\r\n val_loader = DataLoader(val_set, batch_size=1, shuffle=False, num_workers=4, pin_memory=True, drop_last=True)\r\n\r\n optimizer = optim.Adam(model.parameters(), lr=updated_lr, weight_decay=0, amsgrad=True)\r\n criterion = losses.NCC_vxm()\r\n criterions = [criterion]\r\n criterions += [losses.Grad3d(penalty='l2')]\r\n best_dsc = 0\r\n writer = SummaryWriter(log_dir='logs/'+save_dir)\r\n for epoch in range(epoch_start, max_epoch):\r\n print('Training Starts')\r\n '''\r\n Training\r\n '''\r\n loss_all = utils.AverageMeter()\r\n idx = 0\r\n for data in train_loader:\r\n idx += 1\r\n model.train()\r\n adjust_learning_rate(optimizer, epoch, max_epoch, lr)\r\n data = [t.cuda() for t in data]\r\n x = data[0]\r\n y = data[1]\r\n x_in = torch.cat((x,y), dim=1)\r\n output = model(x_in)\r\n loss = 0\r\n loss_vals = []\r\n for n, loss_function in enumerate(criterions):\r\n curr_loss = loss_function(output[n], y) * weights[n]\r\n loss_vals.append(curr_loss)\r\n loss += curr_loss\r\n loss_all.update(loss.item(), y.numel())\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n\r\n print('Iter {} of {} loss {:.4f}, Img Sim: {:.6f}, Reg: {:.6f}'.format(idx, len(train_loader), loss.item(), loss_vals[0].item(), loss_vals[1].item()))\r\n\r\n writer.add_scalar('Loss/train', loss_all.avg, epoch)\r\n print('Epoch {} loss {:.4f}'.format(epoch, loss_all.avg))\r\n '''\r\n Validation\r\n '''\r\n eval_dsc = utils.AverageMeter()\r\n with torch.no_grad():\r\n for data in val_loader:\r\n model.eval()\r\n data = [t.cuda() for t in data]\r\n x = data[0]\r\n y = data[1]\r\n x_seg = data[2]\r\n y_seg = data[3]\r\n x_in = torch.cat((x, y), dim=1)\r\n grid_img = mk_grid_img(8, 1, img_size)\r\n output = model(x_in)\r\n def_out = reg_model([x_seg.cuda().float(), output[1].cuda()])\r\n def_grid = reg_model_bilin([grid_img.float(), output[1].cuda()])\r\n dsc = utils.dice_val_VOI(def_out.long(), y_seg.long())\r\n eval_dsc.update(dsc.item(), x.size(0))\r\n print(eval_dsc.avg)\r\n best_dsc = max(eval_dsc.avg, best_dsc)\r\n save_checkpoint({\r\n 'epoch': epoch + 1,\r\n 'state_dict': model.state_dict(),\r\n 'best_mse': best_dsc,\r\n 'optimizer': optimizer.state_dict(),\r\n }, save_dir='experiments/'+save_dir, filename='dsc{:.3f}.pth.tar'.format(eval_dsc.avg))\r\n writer.add_scalar('DSC/validate', eval_dsc.avg, epoch)\r\n plt.switch_backend('agg')\r\n pred_fig = comput_fig(def_out)\r\n grid_fig = comput_fig(def_grid)\r\n x_fig = comput_fig(x_seg)\r\n tar_fig = comput_fig(y_seg)\r\n writer.add_figure('Grid', grid_fig, epoch)\r\n plt.close(grid_fig)\r\n writer.add_figure('input', x_fig, epoch)\r\n plt.close(x_fig)\r\n writer.add_figure('ground truth', tar_fig, epoch)\r\n plt.close(tar_fig)\r\n writer.add_figure('prediction', pred_fig, epoch)\r\n plt.close(pred_fig)\r\n loss_all.reset()\r\n writer.close()\r\n\r\ndef comput_fig(img):\r\n img = img.detach().cpu().numpy()[0, 0, 48:64, :, :]\r\n fig = plt.figure(figsize=(12,12), dpi=180)\r\n for i in range(img.shape[0]):\r\n plt.subplot(4, 4, i + 1)\r\n plt.axis('off')\r\n plt.imshow(img[i, :, :], cmap='gray')\r\n fig.subplots_adjust(wspace=0, hspace=0)\r\n return fig\r\n\r\ndef adjust_learning_rate(optimizer, epoch, MAX_EPOCHES, INIT_LR, power=0.9):\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = round(INIT_LR * np.power( 1 - (epoch) / MAX_EPOCHES ,power),8)\r\n\r\ndef mk_grid_img(grid_step, line_thickness=1, grid_sz=(160, 192, 224)):\r\n grid_img = np.zeros(grid_sz)\r\n for j in range(0, grid_img.shape[1], grid_step):\r\n grid_img[:, j+line_thickness-1, :] = 1\r\n for i in range(0, grid_img.shape[2], grid_step):\r\n grid_img[:, :, i+line_thickness-1] = 1\r\n grid_img = grid_img[None, None, ...]\r\n grid_img = torch.from_numpy(grid_img).cuda()\r\n return grid_img\r\n\r\ndef save_checkpoint(state, save_dir='models', filename='checkpoint.pth.tar', max_model_num=8):\r\n torch.save(state, save_dir+filename)\r\n model_lists = natsorted(glob.glob(save_dir + '*'))\r\n while len(model_lists) > max_model_num:\r\n os.remove(model_lists[0])\r\n model_lists = natsorted(glob.glob(save_dir + '*'))\r\n\r\nif __name__ == '__main__':\r\n '''\r\n GPU configuration\r\n '''\r\n GPU_iden = 0\r\n GPU_num = torch.cuda.device_count()\r\n print('Number of GPU: ' + str(GPU_num))\r\n for GPU_idx in range(GPU_num):\r\n GPU_name = torch.cuda.get_device_name(GPU_idx)\r\n print(' GPU #' + str(GPU_idx) + ': ' + GPU_name)\r\n torch.cuda.set_device(GPU_iden)\r\n GPU_avai = torch.cuda.is_available()\r\n print('Currently using: ' + torch.cuda.get_device_name(GPU_iden))\r\n print('If the GPU is available? ' + str(GPU_avai))\r\n main()"
]
| [
[
"torch.nn.Linear",
"torch.cat",
"torch.stack",
"torch.nn.LeakyReLU",
"torch.nn.AvgPool3d",
"torch.meshgrid",
"torch.nn.BatchNorm3d",
"torch.nn.LayerNorm",
"torch.nn.init.constant_",
"torch.distributions.normal.Normal",
"torch.unsqueeze",
"torch.nn.Conv3d",
"torch.zeros",
"torch.nn.Identity",
"torch.nn.Dropout",
"torch.arange",
"torch.nn.Upsample",
"torch.nn.functional.grid_sample",
"torch.nn.InstanceNorm3d"
],
[
"matplotlib.pyplot.switch_backend",
"torch.cat",
"numpy.zeros",
"torch.save",
"torch.cuda.get_device_name",
"torch.no_grad",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"torch.cuda.device_count",
"torch.cuda.set_device",
"torch.from_numpy",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"numpy.power",
"matplotlib.pyplot.imshow",
"torch.utils.tensorboard.SummaryWriter",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplot"
]
]
|
TharinduDR/MultiTransQuest | [
"90c24ec99d7418b77b46fc32a2ae9369d72347ad"
]
| [
"examples/common/draw.py"
]
| [
"import matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.metrics import mean_absolute_error\n\nfrom examples.common.normalizer import fit\nfrom multitransquest.evaluation import pearson_corr, spearman_corr, rmse\n\n\ndef draw_scatterplot(data_frame, real_column, prediction_column, path, topic):\n data_frame = data_frame.sort_values(real_column)\n sort_id = list(range(0, len(data_frame.index)))\n data_frame['id'] = pd.Series(sort_id).values\n\n data_frame = fit(data_frame, real_column)\n data_frame = fit(data_frame, prediction_column)\n\n pearson = pearson_corr(data_frame[real_column].tolist(), data_frame[prediction_column].tolist())\n spearman = spearman_corr(data_frame[real_column].tolist(), data_frame[prediction_column].tolist())\n rmse_value = rmse(data_frame[real_column].tolist(), data_frame[prediction_column].tolist())\n mae = mean_absolute_error(data_frame[real_column].tolist(), data_frame[prediction_column].tolist())\n\n textstr = 'RMSE=%.4f\\nMAE=%.4f\\nPearson Correlation=%.4f\\nSpearman Correlation=%.4f' % (\n rmse_value, mae, pearson, spearman)\n\n plt.figure()\n ax = data_frame.plot(kind='scatter', x='id', y=real_column, color='DarkBlue', label='z_mean', title=topic)\n ax = data_frame.plot(kind='scatter', x='id', y=prediction_column, color='DarkGreen', label='predicted z_mean',\n ax=ax)\n ax.text(0.5 * data_frame.shape[0],\n min(min(data_frame[real_column].tolist()), min(data_frame[prediction_column].tolist())), textstr,\n fontsize=10)\n\n fig = ax.get_figure()\n fig.savefig(path)\n\n\ndef print_stat(data_frame, real_column, prediction_column):\n data_frame = data_frame.sort_values(real_column)\n\n pearson = pearson_corr(data_frame[real_column].tolist(), data_frame[prediction_column].tolist())\n spearman = spearman_corr(data_frame[real_column].tolist(), data_frame[prediction_column].tolist())\n rmse_value = rmse(data_frame[real_column].tolist(), data_frame[prediction_column].tolist())\n mae = mean_absolute_error(data_frame[real_column].tolist(), data_frame[prediction_column].tolist())\n\n textstr = 'RMSE=%.4f\\nMAE=%.4f\\nPearson Correlation=%.4f\\nSpearman Correlation=%.4f' % (\n rmse_value, mae, pearson, spearman)\n\n print(textstr)\n"
]
| [
[
"pandas.Series",
"matplotlib.pyplot.figure"
]
]
|
Abdullahzz/cv | [
"5b6378063787232785416369d761e23535389b2a"
]
| [
"gokPrj/gokWebApi.py"
]
| [
"import os\nfrom werkzeug.utils import secure_filename\nfrom flask import Flask, flash, request, redirect, url_for\nfrom PIL import Image\nimport io\nimport base64\nimport mimetypes\nfrom flask import Flask, request, make_response, jsonify, render_template, redirect, url_for\nimport numpy as np\nimport cv2 as cv\n\n# instance of Flask\napp = Flask(__name__)\n\n\[email protected](500)\ndef not_found(e):\n # return also the code error\n return jsonify({\"status\": \"internal error\", \"message\": \"internal error occurred in server\"}), 500\n\n\[email protected](404)\ndef not_found(e):\n # return also the code error\n return jsonify({\"status\": \"not found\", \"message\": \"route not found\"}), 404\n\n\[email protected](400)\ndef bad_request(e):\n # return also the code error\n return jsonify({\"status\": \"not ok\", \"message\": \"this server could not understand your request\"}), 400\n\n\ndata = {'1': 'afiq',\n '2': 'azureen',\n '3': 'gavin',\n '4': 'cve',\n '5': 'inamul',\n '6': 'jincheng',\n '7': 'mahmuda',\n '8': 'numan',\n '9': 'saseendran'\n }\n\n\[email protected]('/')\ndef index():\n page = 'index'\n description = \"\"\"This is an api for computer vision class. The api gets you the students in class.\n Please enjoy and have some fun.\n The api routes are the following:\n 1. get all: /api/cv\n 2. get by id: /api/cv/5\n 3. post: /api/cv\n 4. put by id: /api/cv/5\n 5. delete by id: /api/cv/5\n \n Thanks for check our api out\n \"\"\"\n return render_template('index.html', page=page, description=description, data=data)\n\n\n# get\n# /api/cv\n\n\[email protected]('/api/cv', methods=['GET'])\ndef get_all():\n return jsonify({\"status\": \"ok\", \"students\": data}), 200\n\n\n# get_by_id\n# /api/cv/1\[email protected]('/api/cv/<int:id>', methods=['GET'])\ndef get_by_id(id):\n if id == 0:\n return jsonify({\"status\": \"not ok\", \"message\": \"this server could not understand your request\"}), 400\n\n student = data[id]\n\n return jsonify({\"status\": \"ok\", \"student\": student}), 200\n\n# post\n# /api/cv\n\n\[email protected]('/api/cv', methods=['POST'])\ndef post():\n # get name posted\n name = request.json['name']\n\n # create name\n data.append(name)\n\n return jsonify({\"status\": \"ok\", 'id': len(data)}), 200\n\n# put\n# /api/cv/5\n\n\[email protected]('/api/cv/<int:id>', methods=['PUT'])\ndef put(id):\n # bad request\n if id < 1 or id > 9:\n return jsonify({\"status\": \"not ok\", \"message\": \"this server could not understand your request\"}), 400\n\n # get index of id\n i = id - 1\n # get id name\n old_name = data[i]\n\n # get new name\n name = request.json['name']\n\n # insert new name at index i\n data.insert(i, name)\n # remove old name\n data.remove(old_name)\n\n return jsonify({\"status\": \"ok\", \"student\": {'id': i+1, 'name': data[i]}}), 200\n\n# delete\n# /api/cv/1\n\n\[email protected]('/api/cv/<int:id>', methods=['DELETE'])\ndef delete(id):\n if id == 0:\n return jsonify({\"status\": \"not ok\", \"message\": \"this server could not understand your request\"}), 400\n\n name = data[id-1]\n data.remove(name)\n\n return jsonify({\"status\": \"ok\"}), 200\n\n\n# delete\n# /api/cv/1\[email protected]('/api/cv/all/<int:id>', methods=['GET', 'POST', 'PUT', 'DELETE'])\ndef all(id):\n message = 'GET'\n status = 'ok'\n bodyJson = request.json\n\n if request.method == 'POST':\n message = 'POST'\n elif request.method == 'PUT':\n message = 'PUT'\n elif request.method == 'DELETE':\n message = 'DELETE'\n else:\n message = 'GET'\n\n return jsonify({'status': status, 'message': message, 'id': id, 'bodyJson': bodyJson}), 200\n\n#\n\n\nUPLOAD_FOLDER = r'C:\\github\\cv\\gokPrj\\static\\uploads'\nALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\nsave_mode = 0 # 1001\n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\[email protected]('/api/cv/upload', methods=['GET', 'POST'])\ndef upload_file():\n # get querystring\n filename = '' if request.args.get(\n 'filename') is None else request.args.get('filename')\n uri = '' if request.args.get('uri') is None else request.args.get('uri')\n uri2 = '' if request.args.get('uri2') is None else request.args.get('uri2')\n #\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n file = request.files['file']\n\n # if user does not select file, browser also\n # submit an empty part without filename\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n\n filePath = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n root, ext = os.path.splitext(filename)\n print(root, ext)\n ext = ext.lower()\n ext = '.jpeg' if ext == '.jpg' else ext\n\n if save_mode == 0:\n file.save(filePath)\n uri = f'/static/uploads/{filename}'\n else:\n f = file.read()\n print('file-len', len(f))\n imgArray = np.frombuffer(f, np.uint8)\n\n # create image\n img = cv.imdecode(imgArray, cv.IMREAD_COLOR)\n\n if save_mode == 1000:\n # write image to path\n cv.imwrite(filePath, img)\n uri = f'/static/uploads/{filename}'\n\n mime = mimetypes.types_map[ext]\n if save_mode == 1010:\n # transform to base64 url\n # 1\n uri = to_base64_uri_pil(img, ext, mime)\n\n if save_mode == 1001:\n # 2\n uri = to_base64_uri(img, ext, mime)\n\n return redirect(url_for('upload_file', uri=uri))\n\n return f'''\n <!doctype html>\n <title>Upload new File</title>\n <h1>Upload new File</h1>\n <form method=post enctype=multipart/form-data>\n <input type=file name=file>\n <input type=submit value=Upload> \n </form>\n <img src=\"{uri}\" />\n '''\n\n\ndef to_base64_uri_pil(img, ext, mime):\n imgRGB = img[:, :, ::-1]\n imgPIL = Image.fromarray(imgRGB)\n buff = io.BytesIO()\n\n imgFormat = ext[1:]\n print(imgFormat)\n\n imgPIL.save(buff, format=imgFormat)\n imgBase64 = base64.b64encode(buff.getvalue()).decode(\"utf-8\")\n\n uri = f\"data:{mime};base64,{imgBase64}\"\n return uri\n\n\ndef to_base64_uri(img, ext, mime):\n retval, buffer = cv.imencode(ext, img)\n imgBase64_2 = base64.b64encode(buffer).decode(\"utf-8\")\n\n uri2 = f\"data:{mime};base64,{imgBase64_2}\"\n return uri2\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0')\n"
]
| [
[
"numpy.frombuffer"
]
]
|
stevenliu216/rob535-perception-project | [
"6b247a3a0eff41ee83b14496632263fa0b176011"
]
| [
"utils/utils.py"
]
| [
"#! /usr/bin/python3\nfrom glob import glob\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef rot(n):\n n = np.asarray(n).flatten()\n assert(n.size == 3)\n\n theta = np.linalg.norm(n)\n if theta:\n n /= theta\n K = np.array([[0, -n[2], n[1]], [n[2], 0, -n[0]], [-n[1], n[0], 0]])\n\n return np.identity(3) + np.sin(theta) * K + (1 - np.cos(theta)) * K @ K\n else:\n return np.identity(3)\n\n\ndef get_bbox(p0, p1):\n \"\"\"\n Input:\n * p0, p1\n (3)\n Corners of a bounding box represented in the body frame.\n\n Output:\n * v\n (3, 8)\n Vertices of the bounding box represented in the body frame.\n * e\n (2, 14)\n Edges of the bounding box. The first 2 edges indicate the `front` side\n of the box.\n \"\"\"\n v = np.array([\n [p0[0], p0[0], p0[0], p0[0], p1[0], p1[0], p1[0], p1[0]],\n [p0[1], p0[1], p1[1], p1[1], p0[1], p0[1], p1[1], p1[1]],\n [p0[2], p1[2], p0[2], p1[2], p0[2], p1[2], p0[2], p1[2]]\n ])\n e = np.array([\n [2, 3, 0, 0, 3, 3, 0, 1, 2, 3, 4, 4, 7, 7],\n [7, 6, 1, 2, 1, 2, 4, 5, 6, 7, 5, 6, 5, 6]\n ], dtype=np.uint8)\n\n return v, e\n\ndef imshow(image, ax=None, title=None, normalize=True):\n '''\n Imshow for Tensor\n '''\n \n if ax is None:\n fig, ax = plt.subplots()\n image = image.numpy().transpose((1, 2, 0))\n\n if normalize:\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n image = std * image + mean\n image = np.clip(image, 0, 1)\n\n ax.imshow(image)\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['left'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.tick_params(axis='both', length=0)\n ax.set_xticklabels('')\n ax.set_yticklabels('')\n return ax"
]
| [
[
"numpy.array",
"numpy.linalg.norm",
"numpy.sin",
"numpy.asarray",
"matplotlib.pyplot.subplots",
"numpy.identity",
"numpy.cos",
"numpy.clip"
]
]
|
yt7589/codis | [
"0b480b1d93235593503d34fe7c76b6188479f328"
]
| [
"ann/roi_heads.py"
]
| [
"import torch\r\n\r\nimport torch.nn.functional as F\r\nfrom torch import nn\r\n\r\nfrom torchvision.ops import boxes as box_ops\r\nfrom torchvision.ops import misc as misc_nn_ops\r\nfrom torchvision.ops import roi_align\r\n\r\nimport ann.pt_utils as det_utils\r\n\r\n\r\ndef fastrcnn_loss(class_logits, box_regression, labels, regression_targets):\r\n \"\"\"\r\n Computes the loss for Faster R-CNN.\r\n\r\n Arguments:\r\n class_logits (Tensor)\r\n box_regression (Tensor)\r\n labels (list[BoxList])\r\n regression_targets (Tensor)\r\n\r\n Returns:\r\n classification_loss (Tensor)\r\n box_loss (Tensor)\r\n \"\"\"\r\n\r\n labels = torch.cat(labels, dim=0)\r\n regression_targets = torch.cat(regression_targets, dim=0)\r\n\r\n classification_loss = F.cross_entropy(class_logits, labels)\r\n\r\n # get indices that correspond to the regression targets for\r\n # the corresponding ground truth labels, to be used with\r\n # advanced indexing\r\n sampled_pos_inds_subset = torch.nonzero(labels > 0).squeeze(1)\r\n labels_pos = labels[sampled_pos_inds_subset]\r\n N, num_classes = class_logits.shape\r\n box_regression = box_regression.reshape(N, -1, 4)\r\n\r\n box_loss = F.smooth_l1_loss(\r\n box_regression[sampled_pos_inds_subset, labels_pos],\r\n regression_targets[sampled_pos_inds_subset],\r\n reduction=\"sum\",\r\n )\r\n box_loss = box_loss / labels.numel()\r\n\r\n return classification_loss, box_loss\r\n\r\n\r\ndef maskrcnn_inference(x, labels):\r\n \"\"\"\r\n From the results of the CNN, post process the masks\r\n by taking the mask corresponding to the class with max\r\n probability (which are of fixed size and directly output\r\n by the CNN) and return the masks in the mask field of the BoxList.\r\n\r\n Arguments:\r\n x (Tensor): the mask logits\r\n labels (list[BoxList]): bounding boxes that are used as\r\n reference, one for ech image\r\n\r\n Returns:\r\n results (list[BoxList]): one BoxList for each image, containing\r\n the extra field mask\r\n \"\"\"\r\n mask_prob = x.sigmoid()\r\n\r\n # select masks coresponding to the predicted classes\r\n num_masks = x.shape[0]\r\n boxes_per_image = [len(l) for l in labels]\r\n labels = torch.cat(labels)\r\n index = torch.arange(num_masks, device=labels.device)\r\n mask_prob = mask_prob[index, labels][:, None]\r\n\r\n mask_prob = mask_prob.split(boxes_per_image, dim=0)\r\n\r\n return mask_prob\r\n\r\n\r\ndef project_masks_on_boxes(gt_masks, boxes, matched_idxs, M):\r\n \"\"\"\r\n Given segmentation masks and the bounding boxes corresponding\r\n to the location of the masks in the image, this function\r\n crops and resizes the masks in the position defined by the\r\n boxes. This prepares the masks for them to be fed to the\r\n loss computation as the targets.\r\n \"\"\"\r\n matched_idxs = matched_idxs.to(boxes)\r\n rois = torch.cat([matched_idxs[:, None], boxes], dim=1)\r\n gt_masks = gt_masks[:, None].to(rois)\r\n return roi_align(gt_masks, rois, (M, M), 1)[:, 0]\r\n\r\n\r\ndef maskrcnn_loss(mask_logits, proposals, gt_masks, gt_labels, mask_matched_idxs):\r\n \"\"\"\r\n Arguments:\r\n proposals (list[BoxList])\r\n mask_logits (Tensor)\r\n targets (list[BoxList])\r\n\r\n Return:\r\n mask_loss (Tensor): scalar tensor containing the loss\r\n \"\"\"\r\n\r\n discretization_size = mask_logits.shape[-1]\r\n labels = [l[idxs] for l, idxs in zip(gt_labels, mask_matched_idxs)]\r\n mask_targets = [\r\n project_masks_on_boxes(m, p, i, discretization_size)\r\n for m, p, i in zip(gt_masks, proposals, mask_matched_idxs)\r\n ]\r\n\r\n labels = torch.cat(labels, dim=0)\r\n mask_targets = torch.cat(mask_targets, dim=0)\r\n\r\n # torch.mean (in binary_cross_entropy_with_logits) doesn't\r\n # accept empty tensors, so handle it separately\r\n if mask_targets.numel() == 0:\r\n return mask_logits.sum() * 0\r\n\r\n mask_loss = F.binary_cross_entropy_with_logits(\r\n mask_logits[torch.arange(labels.shape[0], device=labels.device), labels], mask_targets\r\n )\r\n return mask_loss\r\n\r\n\r\ndef keypoints_to_heatmap(keypoints, rois, heatmap_size):\r\n offset_x = rois[:, 0]\r\n offset_y = rois[:, 1]\r\n scale_x = heatmap_size / (rois[:, 2] - rois[:, 0])\r\n scale_y = heatmap_size / (rois[:, 3] - rois[:, 1])\r\n\r\n offset_x = offset_x[:, None]\r\n offset_y = offset_y[:, None]\r\n scale_x = scale_x[:, None]\r\n scale_y = scale_y[:, None]\r\n\r\n x = keypoints[..., 0]\r\n y = keypoints[..., 1]\r\n\r\n x_boundary_inds = x == rois[:, 2][:, None]\r\n y_boundary_inds = y == rois[:, 3][:, None]\r\n\r\n x = (x - offset_x) * scale_x\r\n x = x.floor().long()\r\n y = (y - offset_y) * scale_y\r\n y = y.floor().long()\r\n\r\n x[x_boundary_inds] = heatmap_size - 1\r\n y[y_boundary_inds] = heatmap_size - 1\r\n\r\n valid_loc = (x >= 0) & (y >= 0) & (x < heatmap_size) & (y < heatmap_size)\r\n vis = keypoints[..., 2] > 0\r\n valid = (valid_loc & vis).long()\r\n\r\n lin_ind = y * heatmap_size + x\r\n heatmaps = lin_ind * valid\r\n\r\n return heatmaps, valid\r\n\r\n\r\ndef heatmaps_to_keypoints(maps, rois):\r\n \"\"\"Extract predicted keypoint locations from heatmaps. Output has shape\r\n (#rois, 4, #keypoints) with the 4 rows corresponding to (x, y, logit, prob)\r\n for each keypoint.\r\n \"\"\"\r\n # This function converts a discrete image coordinate in a HEATMAP_SIZE x\r\n # HEATMAP_SIZE image to a continuous keypoint coordinate. We maintain\r\n # consistency with keypoints_to_heatmap_labels by using the conversion from\r\n # Heckbert 1990: c = d + 0.5, where d is a discrete coordinate and c is a\r\n # continuous coordinate.\r\n offset_x = rois[:, 0]\r\n offset_y = rois[:, 1]\r\n\r\n widths = rois[:, 2] - rois[:, 0]\r\n heights = rois[:, 3] - rois[:, 1]\r\n widths = widths.clamp(min=1)\r\n heights = heights.clamp(min=1)\r\n widths_ceil = widths.ceil()\r\n heights_ceil = heights.ceil()\r\n\r\n num_keypoints = maps.shape[1]\r\n xy_preds = torch.zeros((len(rois), 3, num_keypoints), dtype=torch.float32, device=maps.device)\r\n end_scores = torch.zeros((len(rois), num_keypoints), dtype=torch.float32, device=maps.device)\r\n for i in range(len(rois)):\r\n roi_map_width = int(widths_ceil[i].item())\r\n roi_map_height = int(heights_ceil[i].item())\r\n width_correction = widths[i] / roi_map_width\r\n height_correction = heights[i] / roi_map_height\r\n roi_map = torch.nn.functional.interpolate(\r\n maps[i][None], size=(roi_map_height, roi_map_width), mode='bicubic', align_corners=False)[0]\r\n # roi_map_probs = scores_to_probs(roi_map.copy())\r\n w = roi_map.shape[2]\r\n pos = roi_map.reshape(num_keypoints, -1).argmax(dim=1)\r\n x_int = pos % w\r\n y_int = (pos - x_int) // w\r\n # assert (roi_map_probs[k, y_int, x_int] ==\r\n # roi_map_probs[k, :, :].max())\r\n x = (x_int.float() + 0.5) * width_correction\r\n y = (y_int.float() + 0.5) * height_correction\r\n xy_preds[i, 0, :] = x + offset_x[i]\r\n xy_preds[i, 1, :] = y + offset_y[i]\r\n xy_preds[i, 2, :] = 1\r\n end_scores[i, :] = roi_map[torch.arange(num_keypoints), y_int, x_int]\r\n\r\n return xy_preds.permute(0, 2, 1), end_scores\r\n\r\n\r\ndef keypointrcnn_loss(keypoint_logits, proposals, gt_keypoints, keypoint_matched_idxs):\r\n N, K, H, W = keypoint_logits.shape\r\n assert H == W\r\n discretization_size = H\r\n heatmaps = []\r\n valid = []\r\n for proposals_per_image, gt_kp_in_image, midx in zip(proposals, gt_keypoints, keypoint_matched_idxs):\r\n kp = gt_kp_in_image[midx]\r\n heatmaps_per_image, valid_per_image = keypoints_to_heatmap(\r\n kp, proposals_per_image, discretization_size\r\n )\r\n heatmaps.append(heatmaps_per_image.view(-1))\r\n valid.append(valid_per_image.view(-1))\r\n\r\n keypoint_targets = torch.cat(heatmaps, dim=0)\r\n valid = torch.cat(valid, dim=0).to(dtype=torch.uint8)\r\n valid = torch.nonzero(valid).squeeze(1)\r\n\r\n # torch.mean (in binary_cross_entropy_with_logits) does'nt\r\n # accept empty tensors, so handle it sepaartely\r\n if keypoint_targets.numel() == 0 or len(valid) == 0:\r\n return keypoint_logits.sum() * 0\r\n\r\n keypoint_logits = keypoint_logits.view(N * K, H * W)\r\n\r\n keypoint_loss = F.cross_entropy(keypoint_logits[valid], keypoint_targets[valid])\r\n return keypoint_loss\r\n\r\n\r\ndef keypointrcnn_inference(x, boxes):\r\n kp_probs = []\r\n kp_scores = []\r\n\r\n boxes_per_image = [len(box) for box in boxes]\r\n x2 = x.split(boxes_per_image, dim=0)\r\n\r\n for xx, bb in zip(x2, boxes):\r\n kp_prob, scores = heatmaps_to_keypoints(xx, bb)\r\n kp_probs.append(kp_prob)\r\n kp_scores.append(scores)\r\n\r\n return kp_probs, kp_scores\r\n\r\n\r\n# the next two functions should be merged inside Masker\r\n# but are kept here for the moment while we need them\r\n# temporarily for paste_mask_in_image\r\ndef expand_boxes(boxes, scale):\r\n w_half = (boxes[:, 2] - boxes[:, 0]) * .5\r\n h_half = (boxes[:, 3] - boxes[:, 1]) * .5\r\n x_c = (boxes[:, 2] + boxes[:, 0]) * .5\r\n y_c = (boxes[:, 3] + boxes[:, 1]) * .5\r\n\r\n w_half *= scale\r\n h_half *= scale\r\n\r\n boxes_exp = torch.zeros_like(boxes)\r\n boxes_exp[:, 0] = x_c - w_half\r\n boxes_exp[:, 2] = x_c + w_half\r\n boxes_exp[:, 1] = y_c - h_half\r\n boxes_exp[:, 3] = y_c + h_half\r\n return boxes_exp\r\n\r\n\r\ndef expand_masks(mask, padding):\r\n M = mask.shape[-1]\r\n scale = float(M + 2 * padding) / M\r\n padded_mask = torch.nn.functional.pad(mask, (padding,) * 4)\r\n return padded_mask, scale\r\n\r\n\r\ndef paste_mask_in_image(mask, box, im_h, im_w):\r\n TO_REMOVE = 1\r\n w = int(box[2] - box[0] + TO_REMOVE)\r\n h = int(box[3] - box[1] + TO_REMOVE)\r\n w = max(w, 1)\r\n h = max(h, 1)\r\n\r\n # Set shape to [batchxCxHxW]\r\n mask = mask.expand((1, 1, -1, -1))\r\n\r\n # Resize mask\r\n mask = misc_nn_ops.interpolate(mask, size=(h, w), mode='bilinear', align_corners=False)\r\n mask = mask[0][0]\r\n\r\n im_mask = torch.zeros((im_h, im_w), dtype=mask.dtype, device=mask.device)\r\n x_0 = max(box[0], 0)\r\n x_1 = min(box[2] + 1, im_w)\r\n y_0 = max(box[1], 0)\r\n y_1 = min(box[3] + 1, im_h)\r\n\r\n im_mask[y_0:y_1, x_0:x_1] = mask[\r\n (y_0 - box[1]):(y_1 - box[1]), (x_0 - box[0]):(x_1 - box[0])\r\n ]\r\n return im_mask\r\n\r\n\r\ndef paste_masks_in_image(masks, boxes, img_shape, padding=1):\r\n masks, scale = expand_masks(masks, padding=padding)\r\n boxes = expand_boxes(boxes, scale).to(dtype=torch.int64).tolist()\r\n # im_h, im_w = img_shape.tolist()\r\n im_h, im_w = img_shape\r\n res = [\r\n paste_mask_in_image(m[0], b, im_h, im_w)\r\n for m, b in zip(masks, boxes)\r\n ]\r\n if len(res) > 0:\r\n res = torch.stack(res, dim=0)[:, None]\r\n else:\r\n res = masks.new_empty((0, 1, im_h, im_w))\r\n return res\r\n\r\n\r\nclass RoIHeads(torch.nn.Module):\r\n def __init__(self,\r\n box_roi_pool,\r\n box_head,\r\n box_predictor,\r\n # Faster R-CNN training\r\n fg_iou_thresh, bg_iou_thresh,\r\n batch_size_per_image, positive_fraction,\r\n bbox_reg_weights,\r\n # Faster R-CNN inference\r\n score_thresh,\r\n nms_thresh,\r\n detections_per_img,\r\n # Mask\r\n mask_roi_pool=None,\r\n mask_head=None,\r\n mask_predictor=None,\r\n keypoint_roi_pool=None,\r\n keypoint_head=None,\r\n keypoint_predictor=None,\r\n ):\r\n super(RoIHeads, self).__init__()\r\n\r\n self.box_similarity = box_ops.box_iou\r\n # assign ground-truth boxes for each proposal\r\n self.proposal_matcher = det_utils.Matcher(\r\n fg_iou_thresh,\r\n bg_iou_thresh,\r\n allow_low_quality_matches=False)\r\n\r\n self.fg_bg_sampler = det_utils.BalancedPositiveNegativeSampler(\r\n batch_size_per_image,\r\n positive_fraction)\r\n\r\n if bbox_reg_weights is None:\r\n bbox_reg_weights = (10., 10., 5., 5.)\r\n self.box_coder = det_utils.BoxCoder(bbox_reg_weights)\r\n\r\n self.box_roi_pool = box_roi_pool\r\n self.box_head = box_head\r\n self.box_predictor = box_predictor\r\n\r\n self.score_thresh = score_thresh\r\n self.nms_thresh = nms_thresh\r\n self.detections_per_img = detections_per_img\r\n\r\n self.mask_roi_pool = mask_roi_pool\r\n self.mask_head = mask_head\r\n self.mask_predictor = mask_predictor\r\n\r\n self.keypoint_roi_pool = keypoint_roi_pool\r\n self.keypoint_head = keypoint_head\r\n self.keypoint_predictor = keypoint_predictor\r\n\r\n @property\r\n def has_mask(self):\r\n if self.mask_roi_pool is None:\r\n return False\r\n if self.mask_head is None:\r\n return False\r\n if self.mask_predictor is None:\r\n return False\r\n return True\r\n\r\n @property\r\n def has_keypoint(self):\r\n if self.keypoint_roi_pool is None:\r\n return False\r\n if self.keypoint_head is None:\r\n return False\r\n if self.keypoint_predictor is None:\r\n return False\r\n return True\r\n\r\n def assign_targets_to_proposals(self, proposals, gt_boxes, gt_labels):\r\n matched_idxs = []\r\n labels = []\r\n for proposals_in_image, gt_boxes_in_image, gt_labels_in_image in zip(proposals, gt_boxes, gt_labels):\r\n match_quality_matrix = self.box_similarity(gt_boxes_in_image, proposals_in_image)\r\n matched_idxs_in_image = self.proposal_matcher(match_quality_matrix)\r\n\r\n clamped_matched_idxs_in_image = matched_idxs_in_image.clamp(min=0)\r\n\r\n labels_in_image = gt_labels_in_image[clamped_matched_idxs_in_image]\r\n labels_in_image = labels_in_image.to(dtype=torch.int64)\r\n\r\n # Label background (below the low threshold)\r\n bg_inds = matched_idxs_in_image == self.proposal_matcher.BELOW_LOW_THRESHOLD\r\n labels_in_image[bg_inds] = 0\r\n\r\n # Label ignore proposals (between low and high thresholds)\r\n ignore_inds = matched_idxs_in_image == self.proposal_matcher.BETWEEN_THRESHOLDS\r\n labels_in_image[ignore_inds] = -1 # -1 is ignored by sampler\r\n\r\n matched_idxs.append(clamped_matched_idxs_in_image)\r\n labels.append(labels_in_image)\r\n return matched_idxs, labels\r\n\r\n def subsample(self, labels):\r\n sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels)\r\n sampled_inds = []\r\n for img_idx, (pos_inds_img, neg_inds_img) in enumerate(\r\n zip(sampled_pos_inds, sampled_neg_inds)\r\n ):\r\n img_sampled_inds = torch.nonzero(pos_inds_img | neg_inds_img).squeeze(1)\r\n sampled_inds.append(img_sampled_inds)\r\n return sampled_inds\r\n\r\n def add_gt_proposals(self, proposals, gt_boxes):\r\n proposals = [\r\n torch.cat((proposal, gt_box))\r\n for proposal, gt_box in zip(proposals, gt_boxes)\r\n ]\r\n\r\n return proposals\r\n\r\n def check_targets(self, targets):\r\n assert targets is not None\r\n assert all(\"boxes\" in t for t in targets)\r\n assert all(\"labels\" in t for t in targets)\r\n if self.has_mask:\r\n assert all(\"masks\" in t for t in targets)\r\n\r\n def select_training_samples(self, proposals, targets):\r\n self.check_targets(targets)\r\n gt_boxes = [t[\"boxes\"] for t in targets]\r\n gt_labels = [t[\"labels\"] for t in targets]\r\n\r\n # append ground-truth bboxes to propos\r\n proposals = self.add_gt_proposals(proposals, gt_boxes)\r\n\r\n # get matching gt indices for each proposal\r\n matched_idxs, labels = self.assign_targets_to_proposals(proposals, gt_boxes, gt_labels)\r\n # sample a fixed proportion of positive-negative proposals\r\n sampled_inds = self.subsample(labels)\r\n matched_gt_boxes = []\r\n num_images = len(proposals)\r\n for img_id in range(num_images):\r\n img_sampled_inds = sampled_inds[img_id]\r\n proposals[img_id] = proposals[img_id][img_sampled_inds]\r\n labels[img_id] = labels[img_id][img_sampled_inds]\r\n matched_idxs[img_id] = matched_idxs[img_id][img_sampled_inds]\r\n matched_gt_boxes.append(gt_boxes[img_id][matched_idxs[img_id]])\r\n\r\n regression_targets = self.box_coder.encode(matched_gt_boxes, proposals)\r\n return proposals, matched_idxs, labels, regression_targets\r\n\r\n def postprocess_detections(self, class_logits, box_regression, proposals, image_shapes):\r\n device = class_logits.device\r\n num_classes = class_logits.shape[-1]\r\n\r\n boxes_per_image = [len(boxes_in_image) for boxes_in_image in proposals]\r\n pred_boxes = self.box_coder.decode(box_regression, proposals)\r\n\r\n pred_scores = F.softmax(class_logits, -1)\r\n\r\n # split boxes and scores per image\r\n pred_boxes = pred_boxes.split(boxes_per_image, 0)\r\n pred_scores = pred_scores.split(boxes_per_image, 0)\r\n\r\n all_boxes = []\r\n all_scores = []\r\n all_labels = []\r\n for boxes, scores, image_shape in zip(pred_boxes, pred_scores, image_shapes):\r\n boxes = box_ops.clip_boxes_to_image(boxes, image_shape)\r\n\r\n # create labels for each prediction\r\n labels = torch.arange(num_classes, device=device)\r\n labels = labels.view(1, -1).expand_as(scores)\r\n\r\n # remove predictions with the background label\r\n boxes = boxes[:, 1:]\r\n scores = scores[:, 1:]\r\n labels = labels[:, 1:]\r\n\r\n # batch everything, by making every class prediction be a separate instance\r\n boxes = boxes.reshape(-1, 4)\r\n scores = scores.flatten()\r\n labels = labels.flatten()\r\n\r\n # remove low scoring boxes\r\n inds = torch.nonzero(scores > self.score_thresh).squeeze(1)\r\n boxes, scores, labels = boxes[inds], scores[inds], labels[inds]\r\n\r\n # remove empty boxes\r\n keep = box_ops.remove_small_boxes(boxes, min_size=1e-2)\r\n boxes, scores, labels = boxes[keep], scores[keep], labels[keep]\r\n\r\n # non-maximum suppression, independently done per class\r\n keep = box_ops.batched_nms(boxes, scores, labels, self.nms_thresh)\r\n # keep only topk scoring predictions\r\n keep = keep[:self.detections_per_img]\r\n boxes, scores, labels = boxes[keep], scores[keep], labels[keep]\r\n\r\n all_boxes.append(boxes)\r\n all_scores.append(scores)\r\n all_labels.append(labels)\r\n\r\n return all_boxes, all_scores, all_labels\r\n\r\n def forward(self, features, proposals, image_shapes, targets=None):\r\n \"\"\"\r\n Arguments:\r\n features (List[Tensor])\r\n proposals (List[Tensor[N, 4]])\r\n image_shapes (List[Tuple[H, W]])\r\n targets (List[Dict])\r\n \"\"\"\r\n if targets is not None:\r\n for t in targets:\r\n assert t[\"boxes\"].dtype.is_floating_point, 'target boxes must of float type'\r\n assert t[\"labels\"].dtype == torch.int64, 'target labels must of int64 type'\r\n if self.has_keypoint:\r\n assert t[\"keypoints\"].dtype == torch.float32, 'target keypoints must of float type'\r\n\r\n if self.training:\r\n proposals, matched_idxs, labels, regression_targets = self.select_training_samples(proposals, targets)\r\n\r\n box_features = self.box_roi_pool(features, proposals, image_shapes)\r\n box_features = self.box_head(box_features)\r\n class_logits, box_regression = self.box_predictor(box_features)\r\n\r\n result, losses = [], {}\r\n if self.training:\r\n loss_classifier, loss_box_reg = fastrcnn_loss(\r\n class_logits, box_regression, labels, regression_targets)\r\n losses = dict(loss_classifier=loss_classifier, loss_box_reg=loss_box_reg)\r\n else:\r\n boxes, scores, labels = self.postprocess_detections(class_logits, box_regression, proposals, image_shapes)\r\n num_images = len(boxes)\r\n for i in range(num_images):\r\n result.append(\r\n dict(\r\n boxes=boxes[i],\r\n labels=labels[i],\r\n scores=scores[i],\r\n )\r\n )\r\n\r\n if self.has_mask:\r\n mask_proposals = [p[\"boxes\"] for p in result]\r\n if self.training:\r\n # during training, only focus on positive boxes\r\n num_images = len(proposals)\r\n mask_proposals = []\r\n pos_matched_idxs = []\r\n for img_id in range(num_images):\r\n pos = torch.nonzero(labels[img_id] > 0).squeeze(1)\r\n mask_proposals.append(proposals[img_id][pos])\r\n pos_matched_idxs.append(matched_idxs[img_id][pos])\r\n\r\n mask_features = self.mask_roi_pool(features, mask_proposals, image_shapes)\r\n mask_features = self.mask_head(mask_features)\r\n mask_logits = self.mask_predictor(mask_features)\r\n\r\n loss_mask = {}\r\n if self.training:\r\n gt_masks = [t[\"masks\"] for t in targets]\r\n gt_labels = [t[\"labels\"] for t in targets]\r\n loss_mask = maskrcnn_loss(\r\n mask_logits, mask_proposals,\r\n gt_masks, gt_labels, pos_matched_idxs)\r\n loss_mask = dict(loss_mask=loss_mask)\r\n else:\r\n labels = [r[\"labels\"] for r in result]\r\n masks_probs = maskrcnn_inference(mask_logits, labels)\r\n for mask_prob, r in zip(masks_probs, result):\r\n r[\"masks\"] = mask_prob\r\n\r\n losses.update(loss_mask)\r\n\r\n if self.has_keypoint:\r\n keypoint_proposals = [p[\"boxes\"] for p in result]\r\n if self.training:\r\n # during training, only focus on positive boxes\r\n num_images = len(proposals)\r\n keypoint_proposals = []\r\n pos_matched_idxs = []\r\n for img_id in range(num_images):\r\n pos = torch.nonzero(labels[img_id] > 0).squeeze(1)\r\n keypoint_proposals.append(proposals[img_id][pos])\r\n pos_matched_idxs.append(matched_idxs[img_id][pos])\r\n\r\n keypoint_features = self.keypoint_roi_pool(features, keypoint_proposals, image_shapes)\r\n keypoint_features = self.keypoint_head(keypoint_features)\r\n keypoint_logits = self.keypoint_predictor(keypoint_features)\r\n\r\n loss_keypoint = {}\r\n if self.training:\r\n gt_keypoints = [t[\"keypoints\"] for t in targets]\r\n loss_keypoint = keypointrcnn_loss(\r\n keypoint_logits, keypoint_proposals,\r\n gt_keypoints, pos_matched_idxs)\r\n loss_keypoint = dict(loss_keypoint=loss_keypoint)\r\n else:\r\n keypoints_probs, kp_scores = keypointrcnn_inference(keypoint_logits, keypoint_proposals)\r\n for keypoint_prob, kps, r in zip(keypoints_probs, kp_scores, result):\r\n r[\"keypoints\"] = keypoint_prob\r\n r[\"keypoints_scores\"] = kps\r\n\r\n losses.update(loss_keypoint)\r\n\r\n return result, losses\r\n"
]
| [
[
"torch.zeros",
"torch.nonzero",
"torch.cat",
"torch.nn.functional.smooth_l1_loss",
"torch.stack",
"torch.arange",
"torch.nn.functional.interpolate",
"torch.nn.functional.cross_entropy",
"torch.nn.functional.softmax",
"torch.zeros_like",
"torch.nn.functional.pad"
]
]
|
KIT-IAI/SmartDataRepresentations | [
"2d88381a88b4acfa84df231b05ec66fc5a927179"
]
| [
"src/pywatts/model_handler_module.py"
]
| [
"import os\nfrom enum import Enum\nfrom datetime import datetime\nfrom typing import Dict\n\nimport numpy as np\nimport pandas as pd\n\nimport torch\nfrom torch.utils.data import DataLoader, Dataset\n\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint\nfrom pytorch_lightning.loggers import WandbLogger\nfrom pytorch_lightning.utilities.seed import seed_everything\n\nfrom pywatts.core.base import BaseEstimator\nfrom pywatts.utils._xarray_time_series_utils import numpy_to_xarray\n\nfrom src.pytorch_lightning import TerminalCallback\nfrom src.models import Simple, MLP, FCN, CNN\n\n\nclass Models(Enum):\n LINEAR = 0\n MLP = 1\n SIMPLE = 2\n DNN = 3\n\n\nclass MyDataset(Dataset):\n \"\"\"\n Dataset class to return tuple data for multiple input networks.\n \"\"\"\n def __init__(self, energy, calendar, y=None):\n \"\"\" Initialize energy, calendar, and target datasets. \"\"\"\n self.energy = energy.astype(np.float32)\n self.calendar = calendar.astype(np.float32)\n if y is None:\n self.y = None\n else:\n self.y = y.astype(np.float32)\n\n def __getitem__(self, index):\n \"\"\" Get tuple data for multiple input networks. \"\"\"\n energy = self.energy[index].flatten()\n calendar = self.calendar[index].flatten()\n if self.y is None:\n return (energy, calendar)\n else:\n return (energy, calendar), self.y[index]\n\n def __len__(self):\n \"\"\" Get the length of the dataset. \"\"\"\n return len(self.energy)\n\n\nclass ModelHandler(BaseEstimator):\n \"\"\"\n PyWATTS model handler class to initialize, train, and predict neural network models.\n \"\"\"\n\n def __init__(self, hparams, name: str = \"DNN\"):\n super().__init__(name)\n self.hparams = hparams\n self.model = None\n self.trainer = None\n\n def get_params(self) -> Dict[str, object]:\n pass\n\n def set_params(self, **kwargs):\n pass\n\n def fit(self, energy, calendar, y):\n \"\"\" Train, validate, and test the model based on hyperparameters set. \"\"\"\n # seed run\n seed_everything(self.hparams.seed, workers=True)\n\n # WARNING: This is only works with a 2015-2019 training set.\n val_split_idx = np.where(\n pd.to_datetime(energy.time) == np.datetime64('2018-01-01T00:00')\n )[0][0]\n\n train = MyDataset(\n energy[:val_split_idx].values, calendar[:val_split_idx].values,\n y[:val_split_idx].values)\n validation = MyDataset(\n energy[val_split_idx:].values, calendar[val_split_idx:].values,\n y[val_split_idx:].values)\n train_loader = DataLoader(train, batch_size=128, shuffle=True, num_workers=0)\n val_loader = DataLoader(validation, batch_size=128, num_workers=0)\n\n # init model\n if self.hparams.energy_reshape:\n if self.hparams.model == Models.DNN:\n self.model = CNN(self.hparams, train)\n else:\n raise NotImplementedError('Please use a DNN for reshaped energy time series.')\n else:\n if self.hparams.model == Models.SIMPLE:\n self.model = Simple(self.hparams, train)\n elif self.hparams.model == Models.MLP:\n self.model = MLP(self.hparams, train)\n elif self.hparams.model == Models.DNN:\n self.model = FCN(self.hparams, train)\n\n # init loggers and trainer\n terminal_callback = TerminalCallback()\n timecode = datetime.now().strftime('%H-%M-%S')\n callbacks = [\n terminal_callback,\n ModelCheckpoint(monitor=self.hparams.monitor, dirpath=os.path.join('ckpts', timecode),\n save_top_k=1),\n EarlyStopping(monitor=self.hparams.monitor, **self.hparams.early_stopping_params)\n ]\n wandb_logger = WandbLogger()\n logger = [\n wandb_logger\n ]\n\n self.trainer = Trainer(gpus=-1, callbacks=callbacks, logger=logger,\n progress_bar_refresh_rate=0, num_sanity_val_steps=0)\n\n # train, evaluate and test model\n self.trainer.fit(self.model, train_dataloaders=train_loader, val_dataloaders=val_loader)\n self.is_fitted = True\n\n def transform(self, energy, calendar, y):\n \"\"\" Forecast energy based on the trained network. \"\"\"\n self.model.eval()\n with torch.no_grad():\n # WARNING: This is only works with a 2015-2019 training set.\n train_or_test = len(np.where(pd.to_datetime(energy.time)\n == np.datetime64('2018-01-01T00:00'))[0]) == 0\n if train_or_test:\n # test\n dataset = MyDataset(energy.values, calendar.values, y.values)\n data_loader = DataLoader(dataset, batch_size=128, num_workers=0)\n self.trainer.test(\n ckpt_path=self.trainer.checkpoint_callback.best_model_path,\n test_dataloaders=data_loader\n )\n dataset = MyDataset(energy.values, calendar.values)\n data_loader = DataLoader(dataset, batch_size=128, num_workers=0)\n inference = self.trainer.predict(\n ckpt_path=self.trainer.checkpoint_callback.best_model_path,\n dataloaders=data_loader\n )\n else:\n # test\n dataset = MyDataset(energy.values, calendar.values)\n data_loader = DataLoader(dataset, batch_size=128, num_workers=0)\n inference = self.trainer.predict(\n ckpt_path=self.trainer.checkpoint_callback.best_model_path,\n dataloaders=data_loader\n )\n\n return numpy_to_xarray(np.concatenate([x.cpu().numpy() for x in inference]).flatten(),\n energy, self.name)\n"
]
| [
[
"pandas.to_datetime",
"torch.no_grad",
"torch.utils.data.DataLoader",
"numpy.datetime64"
]
]
|
ktanidis2/Modified_CosmoSIS_for_galaxy_number_count_angular_power_spectra | [
"07e5d308c6a8641a369a3e0b8d13c4104988cd2b"
]
| [
"cosmosis-standard-library/likelihood/planck2015/plc-2.0/src/python/clik/cldf.py"
]
| [
"from builtins import range\nfrom builtins import object\nimport os\nimport os.path as osp\nimport shutil as shu\ntry:\n import pyfits as pf\nexcept Exception as e:\n pass\n\nimport re\nimport numpy as nm\n\n\ndef pack256(*li):\n rr = \"\"\n for l in li:\n rr += l + '\\0' * (256 - len(l))\n return rr\n\n\ndef is_cldf(name):\n f = open(name)\n try:\n list(f.keys())\n return True\n except Exception as e:\n return False\n\n\ndef open(name, mode=\"r\"):\n return File(name, mode)\n\n\n_metadata = \"_mdb\"\n\n\nclass File(object):\n def __init__(self, name, mode=\"r\"):\n self._mode = '+'\n if mode == \"w\":\n self._create(name)\n return\n if mode == \"r\" or mode == \"r-\":\n self._name = name\n self._mode = \"-\"\n return\n if mode == \"r+\":\n self._name = name\n\n def _parsemetadata(self, path=\"\"):\n if not path:\n path = self._name\n f = file(osp.join(path, _metadata))\n dct = {}\n for l in f:\n if not l.strip():\n continue\n id0 = l.find(\" \")\n key = l[:id0]\n id1 = l[id0 + 1:].find(\" \") + id0 + 1\n typ = l[id0 + 1:id1]\n data = l[id1 + 1:-1]\n if typ == \"int\":\n dct[key] = int(data)\n continue\n if typ == \"float\":\n dct[key] = float(data)\n continue\n if typ == \"str\":\n dct[key] = data\n continue\n f.close()\n raise TypeError(\"unknown type '%s' for metadata '%s'\" % (typ, key))\n f.close()\n return dct\n\n def _writemetadata(self, dct, path=\"\"):\n if not path:\n path = self._name\n f = file(osp.join(path, _metadata), \"w\")\n for k, v in list(dct.items()):\n if type(v) == str:\n typ = \"str\"\n modi = \"%s\"\n elif type(v) in (bool, int, int, nm.int32, nm.int64):\n typ = \"int\"\n v = int(v)\n modi = \"%d\"\n elif type(v) in (float, nm.float32, nm.float64):\n typ = \"float\"\n modi = \"%.10g\"\n else:\n raise TypeError(\"bad type %s\" % type(v))\n f.write((\"%s %s \" + modi + \"\\n\") % (k, typ, v))\n f.close()\n\n def remove(self, name):\n if osp.exists(name):\n if osp.isdir(name):\n shu.rmtree(name)\n else:\n os.remove(name)\n else:\n dct = self._parsemetadata(osp.split(name)[0])\n if osp.split(name)[1] in list(dct.keys()):\n del dct[osp.split(name)[1]]\n self._writemetadata(dct, osp.split(name)[0])\n\n def _create(self, name):\n if osp.isdir(name):\n shu.rmtree(name)\n os.mkdir(name)\n f = file(osp.join(name, _metadata), \"w\")\n f.write(\"\")\n f.close()\n self._name = name\n\n def __contains__(self, key):\n try:\n self[key]\n except Exception:\n return False\n return True\n\n def __getitem__(self, key):\n fkey = osp.join(self._name, key)\n if fkey[-1] == '/':\n fkey = fkey[:-1]\n if osp.exists(fkey):\n if osp.isdir(fkey):\n return File(fkey, \"r\" + self._mode)\n try:\n return pf.open(fkey)[0].data\n except Exception:\n value = file(fkey).read()\n if key + \"__type__\" in self and self[key + \"__type__\"] == \"str_array\":\n rvalue = []\n p0 = value.find(\"\\n\")\n nv = int(value[:p0])\n value = value[p0 + 1:]\n for i in range(nv):\n p1 = value.find(\"\\n\")\n nc = int(value[:p1])\n rvalue += [value[p1 + 1:p1 + 1 + nc]]\n value = value[p1 + 1 + nc + 1:]\n return rvalue\n return value\n\n dct = self._parsemetadata(osp.split(fkey)[0])\n return dct[osp.split(fkey)[1]]\n\n def __setitem__(self, key, value):\n assert self._mode == '+'\n fkey = osp.join(self._name, key)\n if fkey[-1] == '/':\n fkey = fkey[:-1]\n self.remove(fkey)\n if isinstance(value, File):\n\n shu.copytree(value._name, fkey)\n return\n if type(value) in (list, tuple, nm.ndarray):\n if isinstance(value[0], str):\n tvalue = \"%d\\n\" % len(value)\n for v in value:\n tvalue += \"%d\\n\" % len(v) + v + \"\\n\"\n f = file(fkey, \"w\")\n f.write(tvalue)\n f.close()\n self[key + \"__type__\"] = \"str_array\"\n return\n value = nm.array(value)\n if value.dtype == nm.int32:\n value = value.astype(nm.int64)\n # print key,fkey,value.dtype\n pf.PrimaryHDU(value).writeto(fkey)\n return\n if type(value) == str and (\"\\n\" in value or \"\\0\" in value or len(value) > 50):\n # print key,len(value)\n\n f = file(fkey, \"w\")\n f.write(value)\n f.close()\n return\n dct = self._parsemetadata(osp.split(fkey)[0])\n dct[osp.split(fkey)[1]] = value\n self._writemetadata(dct, osp.split(fkey)[0])\n\n def create_group(self, name):\n assert self._mode == '+'\n return File(osp.join(self._name, name), \"w\")\n\n def create_dataset(self, name, data=None):\n assert data != None\n self[name] = data\n\n def __delitem__(self, key):\n assert self._mode == '+'\n fkey = osp.join(self._name, key)\n if fkey[-1] == '/':\n fkey = fkey[:-1]\n\n if osp.exists(fkey):\n self.remove(fkey)\n return\n dct = self._parsemetadata(osp.split(fkey)[0])\n del dct[osp.split(fkey)[1]]\n self._writemetadata(dct, osp.split(fkey)[0])\n\n def copy(self, a, b, c=\"\"):\n if not c:\n self[b] = self[a]\n else:\n b[c] = a\n\n @property\n def attrs(self):\n return self\n\n def keys(self):\n dct = self._parsemetadata(self._name)\n ls = [el for el in os.listdir(\n self._name) if el[0] != '.' and el != _metadata]\n return ls + list(dct.keys())\n\n def items(self):\n ks = list(self.keys())\n return [(k, self[k]) for k in ks]\n\n def close(self):\n pass # nothing to do\n\n\ntry:\n import h5py\n\n def hdf2cldf_grp(hdf, fdf):\n # first the metadata\n for kk in list(hdf.attrs.keys()):\n vl = hdf.attrs[kk]\n\n # print kk,type(vl)\n if type(vl) == str:\n sz = h5py.h5a.get_info(hdf.id, kk).data_size\n rr = vl.ljust(sz, '\\0')\n fdf[kk] = rr\n else:\n fdf[kk] = vl\n # then the group/data\n for kk in list(hdf.keys()):\n if kk == \"external_data\":\n dts = hdf[kk][:]\n install_path = osp.join(fdf._name, \"_external\")\n os.mkdir(install_path)\n f = file(osp.join(install_path, \"data.tar\"), \"w\")\n f.write(dts.tostring())\n f.close()\n assert os.system(\"cd %s;tar xvf data.tar\" % install_path) == 0\n assert os.system(\"cd %s;rm -f data.tar\" % install_path) == 0\n fdf[\"external_dir\"] = \".\"\n continue\n god = hdf[kk]\n if isinstance(god, h5py.Group):\n if not hasattr(fdf, kk):\n fdf.create_group(kk)\n hdf2cldf_grp(god, fdf[kk])\n else:\n r = god[:]\n # print r\n if len(r) == 1:\n r = r[0]\n fdf[kk] = r\n\n def hdf2cldf(ffin, ffout):\n hdf = h5py.File(ffin, \"r\")\n fdf = File(ffout, \"w\")\n hdf2cldf_grp(hdf, fdf)\nexcept ImportError as e:\n pass\n"
]
| [
[
"numpy.array"
]
]
|
PierreExeter/custom_gym_envs | [
"2b6a1c16a4198c8d9fa64f10fe09a041826ac81a"
]
| [
"gym_envs/3_particle_goal_oriented/continuous_goal_oriented_particle.py"
]
| [
"import os\nimport copy\nimport numpy as np\n\nimport gym\nfrom gym import error, spaces\nfrom gym.utils import seeding\nfrom collections import OrderedDict\n\n\ndef goal_distance(goal_a, goal_b):\n assert goal_a.shape == goal_b.shape\n return np.linalg.norm(goal_a - goal_b, axis=-1)\n\n\nclass ParticleEnv(gym.GoalEnv):\n def __init__(self):\n self.metadata = {\n #TODO understand what to do with render modes\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second': 50\n }\n\n # Env variables\n self.min_action = -1.0\n self.max_action = 1.0\n self.min_position = -1.0\n self.max_position = 1.0\n self.distance_threshold = 0.05 # Default 0.05\n self.max_speed = 0.05\n self.power = self.max_speed/20\n self.state = None\n self.reward_type = \"sparse\"\n\n # Env Spaces\n self.min_positions = np.array([self.min_position, self.min_position])\n self.max_positions = np.array([self.max_position, self.max_position])\n min_state = np.array([self.min_position, self.min_position,\n -self.max_speed, -self.max_speed])\n max_state = np.array([self.max_position, self.max_position,\n self.max_speed, self.max_speed])\n min_action = np.array([-1, -1])\n max_action = np.array([1, 1])\n self.action_space = spaces.Box(min_action, max_action, dtype='float32')\n self.start_state_space = spaces.Box(low=min_state, high=max_state)\n self.observation_space = spaces.Dict(dict(\n desired_goal=spaces.Box(self.min_positions, self.max_positions, dtype='float32'),\n achieved_goal=spaces.Box(self.min_positions, self.max_positions, dtype='float32'),\n observation=spaces.Box(min_state, max_state, dtype='float32'),\n ))\n\n # Setup\n self.viewer = None\n self.seed()\n self.goal = self._sample_goal()\n self.reset()\n\n # Env methods\n # ----------------------------\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def step(self, action):\n position = self.state[0:2].copy()\n velocity = self.state[2:4].copy()\n force = np.clip(action, self.action_space.low, self.action_space.high)\n velocity += force*self.power\n velocity = np.clip(velocity, np.array([-self.max_speed, -self.max_speed]),\n np.array([self.max_speed, self.max_speed]))\n position += velocity\n clip_position = np.clip(position, self.min_positions, self.max_positions)\n # Invert appropriate velocity component after collision and bounce off\n # of a wall.\n collision = (position == clip_position).astype(int)\n collision[collision == 0] = -1\n velocity = np.multiply(velocity, collision)\n # Set state\n self.state[0:2] = clip_position\n self.state[2:4] = velocity\n obs = self._get_obs()\n info = {\n 'is_success': self._is_success(obs['achieved_goal'], self.goal),\n }\n done = info['is_success']\n reward = self.compute_reward(obs['achieved_goal'], self.goal, info)\n return obs, reward, done, info\n\n def reset(self):\n self.state = self.start_state_space.sample().copy()\n self.goal = self._sample_goal().copy()\n obs = self._get_obs()\n return obs\n\n def close(self):\n if self.viewer is not None:\n self.viewer.finish()\n self.viewer = None\n\n def render(self, mode='human'):\n screen_width = 400\n screen_height = 400\n\n world_width = self.max_position - self.min_position\n scale_w = screen_width / world_width\n scale_h = screen_height / world_width\n point_radius = np.ceil(scale_w * self.distance_threshold / 2)\n\n if self.viewer is None:\n from gym.envs.classic_control import rendering\n self.viewer = rendering.Viewer(screen_width, screen_height)\n self.agent_trans = rendering.Transform()\n agent_circle = rendering.make_circle(point_radius)\n agent_circle.set_color(.8, .2, .2)\n agent_circle.add_attr(self.agent_trans)\n self.viewer.add_geom(agent_circle)\n self.goal_trans = rendering.Transform()\n goal_circle = rendering.make_circle(point_radius)\n goal_circle.set_color(.2, .8, .2)\n goal_circle.add_attr(self.goal_trans)\n self.viewer.add_geom(goal_circle)\n\n pos = self.state[0:2]\n self.agent_trans.set_translation((pos[0] - self.min_position) * scale_w,\n (pos[1] - self.min_position) * scale_h)\n self.goal_trans.set_translation((self.goal[0] - self.min_position) * scale_w,\n (self.goal[1] - self.min_position) * scale_h)\n\n return self.viewer.render(return_rgb_array=mode == 'rgb_array')\n\n # Extension methods\n # ----------------------------\n def _get_obs(self):\n return OrderedDict([(\"desired_goal\", self.goal.copy()),\n (\"achieved_goal\", self.state[0:2].copy()),\n (\"observation\", self.state.copy())])\n\n def _is_success(self, achieved_goal, desired_goal):\n \"\"\"Indicates whether or not the achieved goal successfully achieved the desired goal.\n \"\"\"\n success = ((goal_distance(desired_goal, achieved_goal) <\n self.distance_threshold)).astype(np.float32)\n if success:\n print(\"Success!!! With distance of {} between a_g:{}, and d_g {}\"\n \"!\".format(goal_distance(achieved_goal, desired_goal),\n achieved_goal, desired_goal))\n return success\n\n def _sample_goal(self):\n \"\"\"Samples a new goal and returns it.\n \"\"\"\n goal = self.observation_space.sample()[\"desired_goal\"].copy()\n return goal\n\n # GoalEnv methods\n # ----------------------------\n\n def compute_reward(self, achieved_goal, goal, info):\n dist = goal_distance(achieved_goal, goal)\n if self.reward_type == 'sparse':\n return (dist <= self.distance_threshold).astype(np.float32)\n else:\n return -dist\n"
]
| [
[
"numpy.array",
"numpy.linalg.norm",
"numpy.ceil",
"numpy.multiply",
"numpy.clip"
]
]
|
emedvedev/aocr | [
"2ef51cba4770b86fccd34bae4e77d8f3eda3e797"
]
| [
"aocr/util/bucketdata.py"
]
| [
"from __future__ import absolute_import\n\nimport numpy as np\n\n\nclass BucketData(object):\n def __init__(self):\n self.data_list = []\n self.label_list = []\n self.label_list_plain = []\n self.comment_list = []\n\n def append(self, datum, label, label_plain, comment):\n self.data_list.append(datum)\n self.label_list.append(label)\n self.label_list_plain.append(label_plain)\n self.comment_list.append(comment)\n return len(self.data_list)\n\n def flush_out(self, bucket_specs, valid_target_length=float('inf'),\n go_shift=1):\n res = {}\n\n decoder_input_len = bucket_specs[0][1]\n\n # ENCODER PART\n res['data'] = np.array(self.data_list)\n res['labels'] = self.label_list_plain\n res['comments'] = self.comment_list\n\n # DECODER PART\n target_weights = []\n for l_idx in range(len(self.label_list)):\n label_len = len(self.label_list[l_idx])\n if label_len <= decoder_input_len:\n self.label_list[l_idx] = np.concatenate((\n self.label_list[l_idx],\n np.zeros(decoder_input_len - label_len, dtype=np.int32)))\n one_mask_len = min(label_len - go_shift, valid_target_length)\n target_weights.append(np.concatenate((\n np.ones(one_mask_len, dtype=np.float32),\n np.zeros(decoder_input_len - one_mask_len,\n dtype=np.float32))))\n else:\n raise NotImplementedError\n\n res['decoder_inputs'] = [a.astype(np.int32) for a in\n np.array(self.label_list).T]\n res['target_weights'] = [a.astype(np.float32) for a in\n np.array(target_weights).T]\n\n assert len(res['decoder_inputs']) == len(res['target_weights'])\n\n self.data_list, self.label_list, self.label_list_plain, self.comment_list = [], [], [], []\n\n return res\n\n def __len__(self):\n return len(self.data_list)\n\n def __iadd__(self, other):\n self.data_list += other.data_list\n self.label_list += other.label_list\n self.label_list_plain += other.label_list_plain\n self.comment_list += other.comment_list\n\n def __add__(self, other):\n res = BucketData()\n res.data_list = self.data_list + other.data_list\n res.label_list = self.label_list + other.label_list\n res.label_list_plain = self.label_list_plain + other.label_list_plain\n res.comment_list = self.comment_list + other.comment_list\n return res\n"
]
| [
[
"numpy.array",
"numpy.ones",
"numpy.zeros"
]
]
|
OddballSports-tv/obies-scoreboard | [
"1df2a6346f41eafc937f218728cb39ecc44f4a5c"
]
| [
"views/bocce/bocceui.py"
]
| [
"# imports\nimport sys\nimport os\n\n# add the parent directory (absolute, not relative) to the sys.path\n# (this makes the games package imports work)\nsys.path.append(os.path.abspath(os.pardir))\n\n# PyQt imports\nfrom PyQt5 import QtCore, QtWidgets\nfrom PyQt5 import uic, QtGui, QtTest\nfrom PyQt5.QtGui import QImage, QPixmap, QColor, QPainter, QMovie\nfrom PyQt5.QtCore import QThread, QTimer, QRect, Qt, QSize\nfrom PyQt5.QtWidgets import QInputDialog, QWidget, QDialog, QLabel, QMessageBox\n\n# bocce game imports\nfrom model.games.bocce.team import Team\nfrom model.games.bocce.ballflag import BallFlag\n\n# tv remote import\nfrom model.remotes.ati import ATI\n#from model.remotes.flirc.sparkfun import Sparkfun\n\n# Google sheet interface import\nfrom model.googlesheets.gsheet import GSheet\n\n# color constant imports\nfrom .colors import *\n\n# other imports\nimport numpy as np\nimport cv2\nimport imutils\nfrom imutils import paths\nimport argparse\nfrom playsound import playsound\nfrom tinytag import TinyTag\nimport random\nimport threading\nfrom collections import deque\n\n# logging\nimport logging\nlogging.basicConfig(level=logging.INFO)\n\n# INDICATOR AND GRAPHIC SIZES\nBALL_INDICATOR_SIZE = 200\nTOP_LEFT_LOGO_SIZE = 200\nBOTTOM_LOGO_WIDTH = 500\nTOP_RIGHT_LOGO_SIZE = 150\n\n# DEFAULT MINUTES\nDEFAULT_GAME_MINUTES = 20\nDEFAULT_WARMUP_MINUTES = 5\n\n# BUTTON HISTORY\nBUTTON_HISTORY_LENGTH = 20\n\n# todo move sound and animation convenience functions to a helpers file\n\n# MEDIA for ABC\nMEDIA_DIR = os.path.join(\"..\", \"media-abc\")\n\n# SOUND FILE TYPES\nSOUND_TYPES = (\".m4a\", \".mp3\", \".wav\", \".WAV\")\n\n# ANIMATION TYPES\nANIMATION_TYPES = (\".gif\", \".GIF\")\n\n###### SET ME!!!!!!!!!!!!!!!!!!! ####################\nRFID_READER_CONNECTED = False\n#####################################################\n\ndef soundfile_duration(path):\n tag = TinyTag.get(path)\n seconds = tag.duration\n return seconds\n\n\ndef list_sounds(dir, contains=None):\n \"\"\"grabs all sound file paths in a directory\"\"\"\n return list(paths.list_files(dir, validExts=SOUND_TYPES, contains=contains))\n\ndef play_random_sound(sound_dir):\n \"\"\"plays a random sound in a directory\"\"\"\n # play a random sound\n sounds = list_sounds(sound_dir)\n if len(sounds) == 0:\n return\n sound_filename = random.choice(sounds)\n threading.Thread(target=playsound, args=(sound_filename,)).start()\n\ndef list_animations(dir, contains=None):\n \"\"\"grabs all animations in a directory path\"\"\"\n return list(paths.list_files(dir, validExts=ANIMATION_TYPES, contains=contains))\n\ndef sleep(timeout):\n \"\"\"PyQt friendly non-blocking sleep (alternative to `time.sleep()`)\"\"\"\n QtTest.QTest.qWait(timeout * 1000)\n\nclass Animation():\n \"\"\"Plays GIF animations nearly fullscreen\"\"\"\n # todo grab screen resolution and adjust the window size programmatically\n\n def __init__(self, gif_path, timeout=8):\n #super(Animation, self).__init__()\n self.timeout=timeout\n self.dlg = QDialog()\n self.dlg.setWindowTitle(\"animation\")\n self.dlg.setWindowModality(False)\n self.dlg.setFixedSize(800, 800)\n self.dlg.setWindowFlags(Qt.WindowStaysOnTopHint | Qt.CustomizeWindowHint)\n self.label_animation = QLabel(self.dlg)\n self.movie = QMovie(gif_path)\n self.movie.setScaledSize(QSize(self.dlg.width(), self.dlg.height()))\n self.label_animation.setMovie(self.movie)\n\n def start(self):\n self.run()\n\n def run(self):\n self.movie.start()\n self.dlg.show()\n sleep(self.timeout)\n self.quit()\n\n def quit(self):\n self.movie.stop()\n self.dlg.done(0)\n\nclass MainWindow(QtWidgets.QMainWindow):\n\n def __init__(self, ui, clargs, *args, **kwargs):\n \"\"\"\n constructor\n \"\"\"\n super().__init__(*args, **kwargs)\n\n # load the user interface\n uic.loadUi(ui, self)\n\n # MainWindow settings\n # set the window title\n self.setWindowTitle(\"Obie's Scoreboard - {} - {}\".format(clargs[\"game\"], clargs[\"view\"]))\n # maximize the window\n self.showMaximized()\n\n # game timer and down/back setting\n self.GAME_MINUTES = DEFAULT_GAME_MINUTES\n self.GAME_WARMUP_MINUTES = DEFAULT_WARMUP_MINUTES\n self.DOWN_BACK_ENABLED = None\n self.gameTimer = QTimer()\n self.gameTimer.setInterval(1000) # milli-seconds in one second\n self.gameTimer.timeout.connect(self.time_tick)\n self.time_min_left = self.GAME_MINUTES\n self.time_sec_left = 0\n self.clock_count_up = False\n self.clock_count_down = True\n self.time_is_out = False\n self.down_and_back = False\n self.game_time_ui_update()\n self.timer_paused = False\n self.clock_edit_mode = False\n self.wait_for_clock_edit_or_start = False\n\n # minimal game info\n self.homeTeam = Team(\"TeamA\")\n self.homeTeam.teamBallColor = TEAL\n self.homeTeam.teamObieColor = \"Teal\"\n self.awayTeam = Team(\"TeamB\")\n self.awayTeam.teamBallColor = PINK\n self.awayTeam.teamObieColor = \"Pink\"\n\n # score\n self.homeTeam.score = 0\n self.awayTeam.score = 0\n self.homeTeamCycleScore = 0\n self.awayTeamCycleScore = 0\n\n self.frame_count = 0\n self.recent_frame_winner = None\n\n # set font colors\n # todo colors should be based on the ball color\n # todo patterned example ball should display next to team name\n # home\n self.set_widget_font_foreground_color(self.label_homelabel, TEAL)\n self.set_widget_font_foreground_color(self.label_hometeam, TEAL)\n self.set_widget_font_foreground_color(self.lcdNumber_homescore, TEAL)\n # away\n self.set_widget_font_foreground_color(self.label_awaylabel, PINK)\n self.set_widget_font_foreground_color(self.label_awayteam, PINK)\n self.set_widget_font_foreground_color(self.lcdNumber_awayscore, PINK)\n\n # set team names\n self.label_hometeam.setText(str(self.homeTeam))\n self.label_awayteam.setText(str(self.awayTeam))\n\n # update the top left corner logo to indicating that the pallino needs to be thrown\n qImg = self.load_logo_qImg('views/oddball_graphics/cut_assets/Mark-1C-Yellow.png', TOP_LEFT_LOGO_SIZE)\n self.draw_rgba_qimg(self.label_logoadvertisement, qImg)\n \n # draw ball indicators\n self.draw_rgba_qimg(self.label_homeballindicator, self.cv2img_to_qImg(self.make_ball(color=(0, 0, 0)), BALL_INDICATOR_SIZE))\n self.draw_rgba_qimg(self.label_awayballindicator, self.cv2img_to_qImg(self.make_ball(color=(0, 0, 0)), BALL_INDICATOR_SIZE))\n \n # draw the bottom logo\n qImg = self.load_logo_qImg('views/packaworldintegration/long_white.png', BOTTOM_LOGO_WIDTH)\n self.draw_rgba_qimg(self.label_bottomadvertisement, qImg)\n\n # run the TV remote receiver task (it is threaded with signals)\n self.enableKeyPressEventHandler = False\n self.add_points_mode = False\n self._prevButton_str = None\n self._prevButton = None\n self._wait_for_ok = False\n self.buttonHistory = deque(maxlen=BUTTON_HISTORY_LENGTH)\n self.waitForRemoteButtonPressSignal(clargs[\"remote\"])\n\n # load team name data from Google Sheet\n self.gs = GSheet()\n self.team_name_values = self.gs.get_values(\"teams!A:A\")\n self.court_and_games = self.gs.get_values(\"2020-02-12_games!A14:F19\")\n self.court_and_games_idx = 0\n self.display_game_info_at_bottom_of_screen()\n self.value_idx = 0\n\n # load graphic instruction to set game\n qImg = self.load_logo_qImg('views/oddball_graphics/select_game.png', TOP_LEFT_LOGO_SIZE)\n self.draw_rgba_qimg(self.label_logoadvertisement, qImg)\n\n # display the game clock value\n self.lcdNumber_game_time_remaining_min.display(\n str(self.time_min_left).zfill(2))\n self.lcdNumber_game_time_remaining_sec.display(\n str(self.time_sec_left).zfill(2))\n\n # set the window focus\n self.setFocus()\n\n def load_animation(self, gif_path, timeout=8):\n logging.info(\"loading animation\")\n self.animation = Animation(gif_path, timeout)\n self.animation.start()\n logging.info(\"animation started\")\n self.setFocus()\n logging.info(\"window focus set back to main window\")\n\n\n def stop_animation(self):\n logging.info(\"stopping animation\")\n self.animation.quit()\n self.animation = None\n logging.info(\"animation stopped and set to None\")\n self.setFocus()\n logging.info(\"window focus set back to main window\")\n\n def closeEvent(self, event) -> None:\n logging.info(\"close window pressed\")\n result = QMessageBox.question(self,\n \"Confirm Exit...\",\n \"Are you sure you want to exit ?\",\n QMessageBox.Yes | QMessageBox.No)\n event.ignore()\n\n if result == QMessageBox.Yes:\n try:\n self.animation.quit()\n except AttributeError:\n pass\n event.accept()\n logging.info(\"window closed\")\n logging.info(\"most recent {} buttons = {}\".format(str(len(self.buttonHistory)), str(self.buttonHistory)))\n\n def play_random_animation(self, gif_dir, timeout=5):\n logging.info(\"playing random animation\")\n animations = list_animations(gif_dir)\n if len(animations)== 0:\n return\n gif_filename = random.choice(animations)\n self.load_animation(gif_path=gif_filename, timeout=timeout)\n\n def play_animation(self, path, timeout=5):\n logging.info(\"playing animation located at {}\".format(str(path)))\n gif_filename = path\n self.load_animation(gif_path=gif_filename, timeout=timeout)\n\n def _stop_animation(self, button):\n if self.animation is not None:\n if self._prevButton_str == button or self._prevButton == button:\n logging.info(\"key was pressed twice, so stopping the animation\")\n self.stop_animation()\n self._prevButton_str = None\n self._prevButton = None\n\n\n def waitForRemoteButtonPressSignal(self, remote):\n if remote.lower() == \"ati\":\n logging.info(\"using ATI remote so starting a QThread worker to listen\")\n \"\"\"uses PyQt QThread, signals, and slots concepts\"\"\"\n # Step 1: implement a QObject with a signal `models.remote.ATI(QObject)`\n # Step 2: Create a QThread object\n self.thread = QThread()\n\n # Step 3: Create a worker object\n self.worker = ATI()\n\n self.worker.connect()\n\n # Step 4: Move worker to the thread\n self.worker.moveToThread(self.thread)\n\n # Step 5: Connect signals and slots\n self.thread.started.connect(self.worker.run)\n # finished could be when the power button is pressed\n #self.worker.finished.connect(self.thread.quit)\n #self.worker.finished.connect(self.worker.deleteLater)\n # end finished\n # call a function when there is a new unique ATI remote keypress\n self.worker.newUniqueKeyPress.connect(self.handle_ati_remote_button_press)\n\n # Step 6: Start the thread\n self.thread.start()\n\n # Step 7: Final resets\n #nothing in this case\n\n elif remote == \"sparkfun\":\n logging.info(\"using Sparkfun remote\")\n self.enableKeyPressEventHandler = True\n\n def update_gsheet_score(self):\n # grab game\n ROW = self.court_and_games_idx + 2\n A_SCORE_COLUMN = 4\n B_SCORE_COLUMN = 5\n\n values = [\n [self.homeTeam.score, self.awayTeam.score]\n ]\n\n self.gs.set_values(\"2020-02-12_games!E{}:F{}\".format(ROW, ROW), values)\n\n def display_game_info_at_bottom_of_screen(self):\n try:\n self.court_and_games = self.gs.get_values(\"2020-02-12_games!A14:F19\")\n # set g sheet icon in top leftr\n qImg = self.load_logo_qImg('views/oddball_graphics/cloud.png',\n TOP_LEFT_LOGO_SIZE)\n self.draw_rgba_qimg(self.label_logoadvertisement, qImg)\n court = self.court_and_games[self.court_and_games_idx][0]\n ttime = self.court_and_games[self.court_and_games_idx][1]\n ta = self.court_and_games[self.court_and_games_idx][2]\n tb = self.court_and_games[self.court_and_games_idx][3]\n self.homeTeam.change_team_name(ta)\n self.awayTeam.change_team_name(tb)\n self.label_hometeam.setText(str(self.homeTeam))\n self.label_awayteam.setText(str(self.awayTeam))\n print(\"Court: {}, Time: {}, {} vs. {}\".format(court, ttime, ta, tb))\n self.label_court_and_game.setText(\"Court: {}, Time: {}\".format(court, ttime)) #, {} vs. {}\".format(court, ttime, ta, tb))\n except:\n print(\"empty cell in list of games\")\n return\n\n def play_entry_announcement(self, RFID_READER_CONNECTED):\n NAME_COLUMN = 0\n RFID_COLUMN = 1\n NICKNAME_COLUMN = 3\n GIF_COLUMN = 4\n AUDIO_COLUMN = 5\n\n TEAM_A_COLUMN = 2\n TEAM_B_COLUMN = 3\n\n # grab Team A player names\n ta = self.court_and_games[self.court_and_games_idx][TEAM_A_COLUMN]\n print(ta)\n tap1 = ta.split(\" & \")[0]\n tap2 = ta.split(\" & \")[1]\n\n # grab Team B player names\n tb = self.court_and_games[self.court_and_games_idx][TEAM_B_COLUMN]\n tbp1 = tb.split(\" & \")[0]\n tbp2 = tb.split(\" & \")[1]\n\n # lookup name in players sheet, and determine audio and gif\n player_info = self.gs.get_values(\"players!A2:F\")\n\n def grab_RFIDs_required(team_player_name):\n rfids_required = {}\n for player in player_info:\n if player[NAME_COLUMN] == team_player_name:\n rfids_required[player[RFID_COLUMN]] = False\n return rfids_required\n\n def play_team_player_name(team_player_name):\n for player in player_info:\n if player[NAME_COLUMN] == team_player_name:\n # play sound\n seconds = None\n try:\n if player[AUDIO_COLUMN] == \"random\":\n logging.info(\"playing random game announcement\")\n sound_filepath = os.path.join(MEDIA_DIR, \"announcement_game\", \"random\")\n seconds = soundfile_duration(sound_filepath)\n play_random_sound(sound_filepath)\n else:\n logging.info(\"playing {} game announcement\".format(player[AUDIO_COLUMN]))\n sound_filepath = os.path.join(MEDIA_DIR, \"announcement_game\", \"lastname_firstname\", player[AUDIO_COLUMN])\n seconds = soundfile_duration(sound_filepath)\n threading.Thread(target=playsound, args=(sound_filepath,)).start()\n except:\n logging.WARNING(\"couldn't find sound media file\")\n\n # play animation\n try:\n if seconds is not None:\n timeout = seconds\n else:\n timeout = 3\n if player[GIF_COLUMN] == \"random\":\n logging.info(\"playing random game announcement gif\")\n self.play_random_animation(os.path.join(MEDIA_DIR, \"announcement_game\", \"random\"), timeout=timeout)\n else:\n logging.info(\"playing {} game announcement gif\".format(player[GIF_COLUMN]))\n gif_path = os.path.join(MEDIA_DIR, \"announcement_game\", \"lastname_firstname\", player[GIF_COLUMN])\n self.play_animation(gif_path, timeout=3)\n except:\n logging.WARNING(\"couldn't find gif media file\")\n\n # wait N seconds before playing the next sound\n sleep(2.3)\n\n\n\n\n # go ahead and play player names\n if not RFID_READER_CONNECTED:\n play_team_player_name(tap1)\n play_team_player_name(tap2)\n play_team_player_name(tbp1)\n play_team_player_name(tbp2)\n\n # otherwise, wait for them to badge in\n elif RFID_READER_CONNECTED:\n # grab rfids required\n rfids_required = grab_RFIDs_required()\n\n # import the RFID reader\n import RPi.GPIO as GPIO\n from mfrc522 import SimpleMFRC522\n GPIO.setwarnings(False)\n\n # initialize the reader\n reader = SimpleMFRC522()\n\n # set the previous id and previous read time\n prevID = None\n prevReadTime = time.time()\n\n # loop until everyone is present\n while True:\n try:\n ID, name = reader.read()\n readTime = time.time()\n elapsed = readTime - prevReadTime\n ID = str(ID).zfill(16)\n name = name.strip()\n for requiredID, isPresent in rfids_required.items():\n if ID == requiredID and not isPresent:\n rfids_required[ID] = True\n if False in rfids_required.values():\n continue\n else:\n break\n except Exception as e:\n print(str(e))\n continue\n\n # everyone badged in, so play the names!\n play_team_player_name(tap1)\n play_team_player_name(tap2)\n play_team_player_name(tbp1)\n play_team_player_name(tbp2)\n\n\n\n # KEYPRESSES ##################################################################\n def handle_key_PWR(self):\n if self.clock_edit_mode:\n if not self.wait_for_clock_edit_or_start:\n if self.game_in_progress():\n # toggle the timer being paused\n self.timer_paused = not self.timer_paused\n self.clock_edit_mode = False\n self.add_points_mode = False\n if self.timer_paused:\n qImg = self.load_logo_qImg('views/oddball_graphics/paused.png',\n TOP_LEFT_LOGO_SIZE)\n self.draw_rgba_qimg(self.label_logoadvertisement, qImg)\n elif not self.timer_paused:\n self.label_logoadvertisement.clear()\n self.label_logoadvertisement.repaint()\n\n\n elif self.wait_for_clock_edit_or_start:\n # start the game\n if not self.game_in_progress():\n self.play_entry_announcement(RFID_READER_CONNECTED)\n sleep(2)\n\n # todo play game start sound\n sound_filename = os.path.join(\"sounds\", \"game_status\",\n \"lets_roll.m4a\")\n threading.Thread(target=playsound, args=(sound_filename,)).start()\n\n # start the timer\n self.start_game_timer(self.GAME_MINUTES)\n\n # reset modes\n self.add_points_mode = False\n self.clock_edit_mode = False\n self.wait_for_clock_edit_or_start = False\n\n # clear the previous key\n self._prevButton = None\n return\n\n # if we're in add points mode, lock in the points\n elif self.add_points_mode:\n self.lock_in_frame_score()\n # reset mode\n self.add_points_mode = False\n\n # if we're not adding points, activate add points mode\n elif not self.add_points_mode:\n self.add_points_mode = True\n\n def handle_key_A(self):\n # must be in clock mode to edit teams\n if self.clock_edit_mode:\n if not self.game_in_progress():\n self.court_and_games_idx += 1\n if self.court_and_games_idx >= len(self.court_and_games):\n self.court_and_games_idx = 0\n\n try:\n self.display_game_info_at_bottom_of_screen()\n except:\n self.court_and_games_idx = 0\n print(\"resetting index to 0\")\n self.display_game_info_at_bottom_of_screen()\n\n else:\n if self.game_in_progress():\n if not self.add_points_mode:\n # home is in\n self.homeTeam.ballFlag.toggle_in(True)\n self.awayTeam.ballFlag.toggle_in(False)\n self.draw_ball_indicator(self.homeTeam)\n self.draw_ball_indicator(self.awayTeam)\n self.label_logoadvertisement.clear()\n self.label_logoadvertisement.repaint()\n elif self.add_points_mode:\n # begin cycling points and clear other team's temp score\n self.homeTeam.cycle_score()\n self.awayTeam.temp_points = 0\n # display both team scores\n self.update_score_widget(self.homeTeam, showTempPoints=True)\n self.update_score_widget(self.awayTeam, showTempPoints=True)\n\n def handle_key_B(self):\n # must be in clock mode to edit teams\n if self.clock_edit_mode:\n if not self.game_in_progress():\n self.court_and_games_idx -= 1\n if self.court_and_games_idx < 0:\n self.court_and_games_idx = len(self.court_and_games) - 1\n try:\n self.display_game_info_at_bottom_of_screen()\n except:\n print(\"empty cell in list of games\")\n return\n\n else:\n if self.game_in_progress():\n if not self.add_points_mode:\n # home is in\n self.homeTeam.ballFlag.toggle_in(False)\n self.awayTeam.ballFlag.toggle_in(True)\n self.draw_ball_indicator(self.homeTeam)\n self.draw_ball_indicator(self.awayTeam)\n self.label_logoadvertisement.clear()\n self.label_logoadvertisement.repaint()\n elif self.add_points_mode:\n # begin cycling points and clear other team's temp score\n self.awayTeam.cycle_score()\n self.homeTeam.temp_points = 0\n # display both team scores\n self.update_score_widget(self.homeTeam, showTempPoints=True)\n self.update_score_widget(self.awayTeam, showTempPoints=True)\n\n def handle_key_C(self):\n # wait for PWR\n if not self.clock_edit_mode:\n self.clock_edit_mode = True\n self.add_points_mode = False\n\n # show the clock graphic\n qImg = self.load_logo_qImg('views/oddball_graphics/clock.png',\n TOP_LEFT_LOGO_SIZE)\n self.draw_rgba_qimg(self.label_logoadvertisement, qImg)\n\n # if the game isn't in session, wait for edit or start\n if not self.game_in_progress():\n self.wait_for_clock_edit_or_start = True\n\n elif self.clock_edit_mode:\n self.clock_edit_mode = False\n self.wait_for_clock_edit_or_start = False\n self.label_logoadvertisement.clear()\n self.label_logoadvertisement.repaint()\n\n def handle_key_RETURN(self):\n # sequence: C + Return\n if self.clock_edit_mode and self._prevButton == QtCore.Qt.Key_C:\n if not self.game_in_progress():\n # pause the timer\n self.timer_paused = True\n\n # lookup name in players sheet, and determine audio and gif\n NAME_COLUMN = 0\n RFID_COLUMN = 1\n NICKNAME_COLUMN = 3\n GIF_COLUMN = 4\n AUDIO_COLUMN = 5\n player_info = self.gs.get_values(\"players!A2:F\")\n\n def play_team_player_name(team_player_name):\n for player in player_info:\n if player[NAME_COLUMN] == team_player_name:\n # play sound\n sound_filename = os.path.join(\"sounds\", \"player_announcement\",\n player[AUDIO_COLUMN])\n threading.Thread(target=playsound,\n args=(sound_filename,)).start()\n\n # play animation\n if player[GIF_COLUMN] == \"random\":\n self.play_random_animation(\n os.path.join(\"animations\", \"player_announcement\"),\n timeout=2.4)\n else:\n gif_path = os.path.join(\"animations\",\n \"player_announcement\",\n player[GIF_COLUMN])\n self.play_animation(gif_path, timeout=2.2)\n\n sleep(1.5)\n\n # play the tie game\n if self.homeTeam.score == self.awayTeam.score:\n sound_filename = os.path.join(\"sounds\", \"game_status\",\n \"finishedinatie.m4a\")\n threading.Thread(target=playsound, args=(sound_filename,)).start()\n\n # home team wins\n elif self.homeTeam.score > self.awayTeam.score:\n p1 = str(self.homeTeam).split(\" & \")[0]\n p2 = str(self.homeTeam).split(\" & \")[1]\n sound_filename = os.path.join(\"sounds\", \"game_status\",\n \"winnerwinnerchickendinner.m4a\")\n threading.Thread(target=playsound, args=(sound_filename,)).start()\n sleep(4)\n play_team_player_name(p1)\n play_team_player_name(p2)\n\n\n # away team wins\n elif self.awayTeam.score > self.homeTeam.score:\n p1 = str(self.awayTeam).split(\" & \")[0]\n p2 = str(self.awayTeam).split(\" & \")[1]\n sound_filename = os.path.join(\"sounds\", \"game_status\",\n \"winnerwinnerchickendinner.m4a\")\n threading.Thread(target=playsound, args=(sound_filename,)).start()\n sleep(.5)\n play_team_player_name(p1)\n play_team_player_name(p2)\n\n # update g sheet\n self.update_gsheet_score()\n\n # set g sheet icon in top leftr\n qImg = self.load_logo_qImg('views/oddball_graphics/gsheet_updated.png',\n TOP_LEFT_LOGO_SIZE)\n self.draw_rgba_qimg(self.label_logoadvertisement, qImg)\n\n # stop the game\n self.down_and_back = False\n self.clock_edit_mode = False\n self.add_points_mode = False\n self.time_min_left = DEFAULT_GAME_MINUTES\n self.stop_game_timer()\n\n # clear the down and back indicator\n self.label_downandback.clear()\n self.label_downandback.repaint()\n\n # reset prev button and return\n self._prevButton = None\n\n # wait for 5 seconds\n sleep(5)\n\n # load graphic instruction to set game\n qImg = self.load_logo_qImg('views/oddball_graphics/select_game.png',\n TOP_LEFT_LOGO_SIZE)\n self.draw_rgba_qimg(self.label_logoadvertisement, qImg)\n return\n else:\n if self.timer_paused:\n self.stop_game_timer()\n # draw the stopped graphic\n qImg = self.load_logo_qImg('views/oddball_graphics/stopped.png',\n TOP_LEFT_LOGO_SIZE)\n self.draw_rgba_qimg(self.label_logoadvertisement, qImg)\n\n return\n\n elif not self.timer_paused and self.game_in_progress():\n try:\n self.stop_animation()\n except:\n pass\n # play \"shot_clock_warning\"\n # play a random sound and gif\n play_random_sound(\"sounds/shot_clock_warning\")\n self.play_random_animation(\"animations/shot_clock_warning\")\n\n def handle_key_UP(self):\n # increment minutes in clock edit mode\n if self.clock_edit_mode and not self.game_in_progress():\n self.clock_increment_minute()\n\n # play \"too long\"\n else:\n try:\n self.stop_animation()\n except:\n pass\n # play a random sound and gif\n play_random_sound(\"sounds/too_long\")\n self.play_random_animation(\"animations/too_long\")\n\n def handle_key_DOWN(self):\n # decrement minutes in clock edit mode\n if self.clock_edit_mode and not self.game_in_progress():\n self.clock_decrement_minute()\n\n # play \"too short\"\n else:\n try:\n self.stop_animation()\n except:\n pass\n # play a random sound and gif\n play_random_sound(\"sounds/too_short\")\n self.play_random_animation(\"animations/too_short\")\n\n def handle_key_LEFT(self):\n if self.clock_edit_mode and not self.game_in_progress():\n self.clock_count_up = False\n self.clock_count_down = True\n self.GAME_MINUTES = DEFAULT_GAME_MINUTES\n self.time_min_left = self.GAME_MINUTES\n self.game_time_ui_update()\n\n else:\n try:\n self.stop_animation()\n except:\n pass\n # play \"bad shot\"\n # play a random sound and gif\n play_random_sound(\"sounds/bad_shot\")\n self.play_random_animation(\"animations/bad_shot\")\n\n def handle_key_RIGHT(self):\n if self.clock_edit_mode and not self.game_in_progress():\n self.clock_count_up = True\n self.clock_count_down = False\n self.GAME_MINUTES = 0\n self.time_min_left = 0\n self.game_time_ui_update()\n\n\n else:\n try:\n self.stop_animation()\n except:\n pass\n # play \"good shot\"\n # play a random sound and gif\n play_random_sound(\"sounds/good_shot\")\n self.play_random_animation(\"animations/good_shot\")\n\n # END KEYPRESSES ##################################################################\n\n def clock_increment_minute(self):\n if self.clock_count_down:\n self.GAME_MINUTES += 1\n if self.GAME_MINUTES >= 99:\n self.GAME_MINUTES = 99\n logging.info(\"game minutes pegged at 99\")\n self.time_min_left = self.GAME_MINUTES\n self.game_time_ui_update()\n\n def clock_decrement_minute(self):\n if self.clock_count_down:\n self.GAME_MINUTES -= 1\n if self.GAME_MINUTES <= 0:\n self.GAME_MINUTES = 0\n logging.info(\"game minutes pegged at 1\")\n self.time_min_left = self.GAME_MINUTES\n self.game_time_ui_update()\n\n def keyPressEvent(self, event):\n logging.info(\"key pressed: {}\".format(str(event.key())))\n self.buttonHistory.append(event.key())\n\n if not self.enableKeyPressEventHandler:\n logging.CRITICAL(\"key is not being handled\")\n return\n\n # play a beep\n threading.Thread(target=playsound, args=(\"sounds/beep/beep_padded.mp3\",)).start()\n\n # pwr key reads as an \"s\"\n if event.key() == QtCore.Qt.Key_S:\n self.handle_key_PWR()\n elif event.key() == QtCore.Qt.Key_A:\n self.handle_key_A()\n elif event.key() == QtCore.Qt.Key_B:\n self.handle_key_B()\n elif event.key() == QtCore.Qt.Key_C:\n self.handle_key_C()\n # center of D pad reads as \"return\"\n elif event.key() == QtCore.Qt.Key_Return:\n self.handle_key_RETURN()\n elif event.key() == QtCore.Qt.Key_Up:\n self.handle_key_UP()\n elif event.key() == QtCore.Qt.Key_Down:\n self.handle_key_DOWN()\n elif event.key() == QtCore.Qt.Key_Left:\n self.handle_key_LEFT()\n elif event.key() == QtCore.Qt.Key_Right:\n self.handle_key_RIGHT()\n\n # set the previous button\n self._prevButton = event.key()\n\n def handle_ati_remote_button_press(self, button):\n # grab the button string\n button_str = str(button)\n\n # handle waiting for a \"TIME\" + \"OK\" two-button sequence or \"STOP\" + \"OK\" two-button sequence\n if button_str != \"OK\" \\\n and (self._prevButton_str == \"TIME\"\n or self._prevButton_str == \"STOP\"\n or self._prevButton_str == \"A\"\n or self._prevButton_str == \"B\"\n or self._prevButton_str == \"ROUND_D_DOWN\"\n or self._prevButton_str == \"ROUND_D_UP\"):\n\n self._wait_for_ok = False\n\n # switch case\n # todo these button handlers need to be cleaned up\n\n # Ball indicator controls - TEAM\n if button_str == \"VOL_UP\":\n self.homeTeam.ballFlag.toggle_in(True)\n self.awayTeam.ballFlag.toggle_in(False)\n self.draw_ball_indicator(self.homeTeam)\n self.draw_ball_indicator(self.awayTeam)\n self.label_logoadvertisement.clear()\n self.label_logoadvertisement.repaint()\n\n elif button_str == \"VOL_DOWN\":\n self.homeTeam.ballFlag.toggle_in(False)\n self.awayTeam.ballFlag.toggle_in(True)\n self.draw_ball_indicator(self.homeTeam)\n self.draw_ball_indicator(self.awayTeam)\n self.label_logoadvertisement.clear()\n self.label_logoadvertisement.repaint()\n\n elif button_str == \"CH_UP\":\n self.homeTeam.ballFlag.toggle_in(False)\n self.awayTeam.ballFlag.toggle_in(True)\n self.draw_ball_indicator(self.homeTeam)\n self.draw_ball_indicator(self.awayTeam)\n self.label_logoadvertisement.clear()\n self.label_logoadvertisement.repaint()\n\n elif button_str == \"CH_DOWN\":\n self.homeTeam.ballFlag.toggle_in(True)\n self.awayTeam.ballFlag.toggle_in(False)\n self.draw_ball_indicator(self.homeTeam)\n self.draw_ball_indicator(self.awayTeam)\n self.label_logoadvertisement.clear()\n self.label_logoadvertisement.repaint()\n\n # Top left logos - GENERIC (no team)\n elif button_str == \"FM\":\n qImg = self.load_logo_qImg(\n 'views/oddball_graphics/ball_indicators/hotshot.png', TOP_LEFT_LOGO_SIZE)\n self.draw_rgba_qimg(self.label_logoadvertisement, qImg)\n elif button_str == \"EXPAND\":\n qImg = self.load_logo_qImg(\n 'views/oddball_graphics/ball_indicators/kiss.png', TOP_LEFT_LOGO_SIZE)\n self.draw_rgba_qimg(self.label_logoadvertisement, qImg)\n\n elif button_str == \"HAND\":\n qImg = self.load_logo_qImg(\n 'views/oddball_graphics/ball_indicators/measurement.png', TOP_LEFT_LOGO_SIZE)\n self.draw_rgba_qimg(self.label_logoadvertisement, qImg)\n\n # cycle frame score - HOME\n elif button_str == \"CHECK\":\n self.homeTeam.cycle_score()\n # clear other team's temp score\n self.awayTeam.temp_points = 0\n\n self.update_score_widget(self.homeTeam, showTempPoints=True)\n self.update_score_widget(self.awayTeam, showTempPoints=True)\n\n # cycle frame score - AWAY\n elif button_str == \"X\":\n self.awayTeam.cycle_score()\n # clear other team's temp score\n self.homeTeam.temp_points = 0\n # display both team scores\n self.update_score_widget(self.awayTeam, showTempPoints=True)\n self.update_score_widget(self.homeTeam, showTempPoints=True)\n\n # lock in frame score\n elif button_str == \"ATI\":\n self.lock_in_frame_score()\n\n # cancel frame score\n elif button_str == \"MUTE\":\n self.cancel_previous_frame_score()\n\n # time\n elif button_str == \"TIME\":\n self._wait_for_ok = True\n\n # warmup time\n elif button_str == \"INFO\":\n self._wait_for_ok = True\n\n # two key press\n elif button_str == \"OK\":\n # if we're waiting for ok\n if self._wait_for_ok:\n # handle key press sequence\n if self._prevButton_str == \"TIME\":\n self.start_game_timer(self.GAME_MINUTES)\n if self._prevButton_str == \"INFO\":\n self.start_game_timer(self.GAME_WARMUP_MINUTES)\n elif self._prevButton_str == \"STOP\":\n self.stop_game_timer()\n elif self._prevButton_str == \"PAUSE\":\n self.timer_paused = True\n elif self._prevButton_str == \"PLAY\":\n self.timer_paused = False\n\n # reset the wait for ok boolean\n self._wait_for_ok = False\n\n elif button_str == \"STOP\":\n self._wait_for_ok = True\n\n elif button_str == \"?\":\n # grab latest Google sheet data\n self.team_name_values = self.gs.get_values(1)\n\n elif button_str == \"A\":\n self.value_idx += 1\n if self.value_idx >= len(self.team_name_values):\n self.value_idx = 0\n try:\n self.set_team_name(self.homeTeam, str(self.team_name_values[self.value_idx])[2:-2])\n except Exception as e:\n print(str(e))\n pass\n elif button_str == \"B\":\n self.value_idx += 1\n if self.value_idx >= len(self.team_name_values):\n self.value_idx = 0\n try:\n self.set_team_name(self.awayTeam, str(self.team_name_values[self.value_idx])[2:-2])\n except Exception as e:\n print(str(e))\n pass\n\n elif button_str == \"PAUSE\":\n self._wait_for_ok = True\n elif button_str == \"PLAY\":\n self._wait_for_ok = True\n\n # sounds\n elif button_str == \"D_UP\":\n # play a random sound\n play_random_sound(\"sounds/too_long\")\n\n # open a random gif\n self.play_random_animation(\"animations/too_long\")\n\n elif button_str == \"D_DOWN\":\n # play a random sound\n play_random_sound(\"sounds/too_short\")\n\n # open a random gif\n self.play_random_animation(\"animations/too_short\")\n\n elif button_str == \"D_LEFT\":\n # play a random sound\n play_random_sound(\"sounds/bad_shot\")\n\n # open a random gif\n self.play_random_animation(\"animations/bad_shot\")\n\n elif button_str == \"D_RIGHT\":\n # play a random sound\n play_random_sound(\"sounds/good_shot\")\n\n # open a random gif\n self.play_random_animation(\"animations/good_shot\")\n\n elif button_str == \"C\":\n # ball drawing bottom left and bottom right\n self.homeTeam.ballFlag.toggle_in(True, casino=True)\n self.awayTeam.ballFlag.toggle_in(False)\n self.draw_ball_indicator(self.homeTeam)\n self.draw_ball_indicator(self.awayTeam)\n\n # play a random sound\n play_random_sound('sounds/casino')\n\n # open a random gif\n self.play_random_animation(\"animations/casino\")\n\n elif button_str == \"D\":\n # ball drawing bottom left and bottom right\n self.homeTeam.ballFlag.toggle_in(False)\n self.awayTeam.ballFlag.toggle_in(True, casino=True)\n self.draw_ball_indicator(self.homeTeam)\n self.draw_ball_indicator(self.awayTeam)\n\n # play a random sound\n play_random_sound(\"sounds/casino\")\n\n # open a random gif\n self.play_random_animation(\"animations/casino\")\n\n elif button_str == \"E\":\n # play a random sound\n play_random_sound(\"sounds/shot_clock_warning\")\n\n # open a random gif\n self.play_random_animation(\"animations/shot_clock_warning\")\n\n # set the previous button\n self._prevButton_str = button_str\n\n def increment_score(self, team):\n team.score += 1\n self.update_score_widget(team)\n\n def decrement_score(self, team):\n team.score -= 1\n if team.score < 0: team.score = 0\n self.update_score_widget(team)\n\n def game_in_progress(self):\n if self.gameTimer.isActive():\n return True\n elif self.down_and_back:\n return True\n return False\n\n def lock_in_frame_score(self):\n if self.homeTeam.temp_points == 4:\n # ball drawing bottom left and bottom right\n self.homeTeam.ballFlag.toggle_in(True, casino=True)\n self.awayTeam.ballFlag.toggle_in(False)\n self.draw_ball_indicator(self.homeTeam)\n self.draw_ball_indicator(self.awayTeam)\n\n # play a random sound and gif\n play_random_sound(\"sounds/casino\")\n self.play_random_animation(\"animations/casino\")\n\n elif self.awayTeam.temp_points == 4:\n # ball drawing bottom left and bottom right\n self.homeTeam.ballFlag.toggle_in(False)\n self.awayTeam.ballFlag.toggle_in(True, casino=True)\n self.draw_ball_indicator(self.homeTeam)\n self.draw_ball_indicator(self.awayTeam)\n\n # play a random sound and gif\n play_random_sound(\"sounds/casino\")\n self.play_random_animation(\"animations/casino\")\n\n self.homeTeam.add_points()\n self.awayTeam.add_points()\n\n self.update_score_widget(self.homeTeam)\n self.update_score_widget(self.awayTeam)\n\n # increment the frame count\n self.increment_frame_count()\n\n\n # display lightning user feedback\n qImg = self.load_logo_qImg('views/oddball_graphics/lightning.png',\n TOP_LEFT_LOGO_SIZE)\n self.draw_rgba_qimg(self.label_logoadvertisement, qImg)\n\n # repaint\n self.label_homeballindicator.clear()\n self.label_homeballindicator.repaint()\n self.label_awayballindicator.clear()\n self.label_awayballindicator.repaint()\n\n def cancel_previous_frame_score(self):\n # todo there is a bug in here that needs to be resolved (previous frame points are\n # todo not removed\n # clear teams' temp score\n self.homeTeam.temp_points = 0\n self.awayTeam.temp_points = 0\n\n # remove previous points\n self.homeTeam.remove_points()\n self.awayTeam.remove_points()\n\n # decrement the frame count\n self.decrement_frame_count()\n\n # update score widget\n self.update_score_widget(self.homeTeam, cancelPreviousPoints=True)\n self.update_score_widget(self.awayTeam, cancelPreviousPoints=True)\n print(\"canceled previous frame points\")\n\n # repaint\n self.label_homeballindicator.clear()\n self.label_homeballindicator.repaint()\n self.label_awayballindicator.clear()\n self.label_awayballindicator.repaint()\n self.label_logoadvertisement.clear()\n self.label_logoadvertisement.repaint()\n\n\n def other_team(self, team):\n \"\"\"convenience function returns the opposite team of what is provided\"\"\"\n if team is self.homeTeam:\n team = self.awayTeam\n elif team is self.awayTeam:\n team = self.homeTeam\n return team\n\n def update_score_widget(self, team, showTempPoints=False, cancelPreviousPoints=False):\n widget = None\n # get the widget\n if team is self.homeTeam:\n widget = self.lcdNumber_homescore\n elif team is self.awayTeam:\n widget = self.lcdNumber_awayscore\n\n # if points are temporary\n if showTempPoints:\n widget.display(str(team.score + team.temp_points))\n return\n\n # or if we need to remove points\n if not cancelPreviousPoints:\n # otherwise we'll add points\n # add team points\n team.add_points()\n\n # display the points\n widget.display(str(team.score))\n\n def set_widget_font_foreground_color(self, widget, color):\n # create a QColor and swap BGR to RGB\n color = QColor(color[2], color[1], color[0])\n\n # extract the widget palette\n palette = widget.palette()\n\n # set the text color and palette\n palette.setColor(palette.WindowText, color)\n widget.setPalette(palette)\n\n def load_logo_qImg(self, pngPath, width):\n \"\"\"\n load the Obie logo\n \"\"\"\n # load the logo and read all channels (including alpha transparency)\n logo = cv2.imread(pngPath, cv2.IMREAD_UNCHANGED)\n\n # swap color channels for Qt\n logo = cv2.cvtColor(logo, cv2.COLOR_BGRA2RGBA)\n\n # resize to a known width maintaining aspect ratio\n logo = imutils.resize(logo, width=width)\n\n # extract the dimensions of the image and set the bytes per line\n height, width, channel = logo.shape\n bytesPerLine = channel * width\n\n # create a QImage and ensure to use the alpha transparency format\n qImg = QImage(logo.data, width, height, bytesPerLine, QImage.Format_RGBA8888)\n\n return qImg\n\n def cv2img_to_qImg(self, image, width):\n \"\"\"\n converts a BGRA OpenCV image to qImage RGBA format (A is the alpha transparency\n channel\n \"\"\"\n # swap color channels for Qt\n image = cv2.cvtColor(image, cv2.COLOR_BGRA2RGBA)\n\n # resize to a known width maintaining aspect ratio\n image = imutils.resize(image, width=width)\n\n # extract the dimensions of the image and set the bytes per line\n height, width, channel = image.shape\n bytesPerLine = channel * width\n\n # create a QImage and ensure to use the alpha transparency format\n qImg = QImage(image.data, width, height, bytesPerLine, QImage.Format_RGBA8888)\n\n return qImg\n\n def draw_rgba_qimg(self, label, qImg):\n # set the logo in the GUI\n label.setPixmap(QPixmap(qImg))\n label.repaint()\n\n def make_ball(self, color=GRAY):\n \"\"\"\n This simply draws a solid color ball indicator\n \"\"\"\n # create a white box for the circle to reside in; NOTE: this box has an alpha channel\n image = np.zeros(shape=[BALL_INDICATOR_SIZE, BALL_INDICATOR_SIZE, 4], dtype=np.uint8)\n colorWithAlpha = (color[0], color[1], color[2], 255)\n\n # extract the dimensions\n (height, width) = image.shape[:2]\n\n # draw the filled in circle in the box\n center = (int(width/2), int(height/2))\n radius = int(width/2) - 40\n cv2.circle(image, center, radius, colorWithAlpha, -1)\n\n return image\n\n def draw_ball_indicator(self, team):\n # initializations\n ballFlag = None\n ballIndicator = None\n color = None\n shortTeamString = None\n\n # repaint the top left area\n self.label_logoadvertisement.clear()\n self.label_logoadvertisement.repaint()\n\n # select the team\n if team is self.homeTeam:\n ballFlag = self.homeTeam.ballFlag.get_flag()\n ballIndicator = self.label_homeballindicator\n color = self.homeTeam.teamObieColor\n shortTeamString = \"home\"\n\n elif team is self.awayTeam:\n ballFlag = self.awayTeam.ballFlag.get_flag()\n ballIndicator = self.label_awayballindicator\n color = self.awayTeam.teamObieColor\n shortTeamString = \"away\"\n\n # handle ball flags\n # the ball isn't thrown\n if ballFlag == BallFlag.NOT_THROWN:\n color = GRAY\n image = self.make_ball(color)\n qImg = self.cv2img_to_qImg(image, BALL_INDICATOR_SIZE)\n self.draw_rgba_qimg(ballIndicator, qImg)\n\n # the ball is out\n elif ballFlag == BallFlag.OUT:\n qImg = self.load_logo_qImg(\n 'views/oddball_graphics/ball_indicators/out_{}.png'.format(\n shortTeamString), BALL_INDICATOR_SIZE)\n self.draw_rgba_qimg(ballIndicator, qImg)\n\n # the ball is in\n elif ballFlag == BallFlag.IN:\n qImg = self.load_logo_qImg(\n 'views/oddball_graphics/ball_indicators/in_{}.png'.format(\n shortTeamString), BALL_INDICATOR_SIZE)\n self.draw_rgba_qimg(ballIndicator, qImg)\n\n # the ball is in\n elif ballFlag == BallFlag.KISS:\n qImg = self.load_logo_qImg(\n 'views/oddball_graphics/ball_indicators/kiss.png', BALL_INDICATOR_SIZE)\n self.draw_rgba_qimg(ballIndicator, qImg)\n\n # terrible shot; you need a hot shot\n elif ballFlag == BallFlag.HOT_SHOT:\n qImg = self.load_logo_qImg(\n 'views/oddball_graphics/ball_indicators/hotshot.png', BALL_INDICATOR_SIZE)\n self.draw_rgba_qimg(ballIndicator, qImg)\n\n # need a measurement\n elif ballFlag == BallFlag.MEASUREMENT:\n qImg = self.load_logo_qImg(\n 'views/oddball_graphics/ball_indicators/measurement.png', BALL_INDICATOR_SIZE)\n self.draw_rgba_qimg(ballIndicator, qImg)\n\n # you earned yourself a casino\n elif ballFlag == BallFlag.CASINO:\n # draw the team casino and the top left casino\n qImg = self.load_logo_qImg(\n 'views/oddball_graphics/ball_indicators/casino_{}.png'.format(shortTeamString),\n BALL_INDICATOR_SIZE)\n self.draw_rgba_qimg(ballIndicator, qImg)\n qImg = self.load_logo_qImg(\n 'views/oddball_graphics/ball_indicators/casino_{}.png'.format(\n shortTeamString), TOP_LEFT_LOGO_SIZE)\n self.draw_rgba_qimg(self.label_logoadvertisement , qImg)\n\n def time_tick(self):\n \"\"\"\n this method is called each time a second passes and updates the timer if it is not\n paused\n \"\"\"\n # subtract a second\n if not self.timer_paused:\n # counting down\n if self.clock_count_down and not self.clock_count_up:\n self.time_sec_left -= 1\n\n # if the seconds < 0, we need to account for minutes\n if self.time_sec_left < 0:\n # subtract a minute\n self.time_min_left -= 1\n\n # if there are no more minutes\n if self.time_min_left < 0:\n self.time_is_out = True\n self.time_min_left = 0\n self.time_sec_left = 0\n\n # play beeping sound\n sound_filename = os.path.join(\"sounds\", \"beeping.wav\")\n threading.Thread(target=playsound, args=(sound_filename,)).start()\n\n # we will now be counting up\n self.clock_count_up = True\n self.clock_count_down = False\n\n # otherwise, the seconds are set to 59\n else:\n self.time_sec_left = 59\n\n # counting up\n elif self.time_is_out and self.clock_count_up and not self.clock_count_down:\n self.time_sec_left += 1\n\n # if the seconds < 0, we need to account for minutes\n if self.time_sec_left >= 59:\n # add a minute\n self.time_min_left += 1\n\n # set seconds to 0\n self.time_sec_left = 0\n\n # if we hit 99 minutes\n if self.time_min_left >= 99:\n self.timer_paused = True\n self.time_min_left = 0\n self.time_sec_left = 0\n\n # update the timer on the UI\n self.game_time_ui_update()\n\n def game_time_ui_update(self):\n \"\"\"\n this method updates the time indicator on the GUI\n :return:\n \"\"\"\n self.lcdNumber_game_time_remaining_min.display(str(self.time_min_left).zfill(2))\n self.lcdNumber_game_time_remaining_sec.display(str(self.time_sec_left).zfill(2))\n\n def start_game_timer(self, MINUTES, MODE=\"down\"):\n if MODE == \"down\":\n self.clock_count_down = True\n self.clock_count_up = False\n elif MODE == \"up\":\n self.clock_count_down = False\n self.clock_count_up = True\n\n # repaint the down and back area\n self.down_and_back = False\n self.label_downandback.repaint()\n\n # reset the score at the start of a game\n self.homeTeam.score = 0\n self.awayTeam.score = 0\n self.update_score_widget(self.homeTeam)\n self.update_score_widget(self.awayTeam)\n\n # clear ball indicators (just in case a game just finished)\n self.label_homeballindicator.clear()\n self.label_homeballindicator.repaint()\n self.label_awayballindicator.clear()\n self.label_awayballindicator.repaint()\n\n\n # repaint the top left logo to Yello\n # update the top left corner logo to indicate who is in\n # update the top left corner logo to indicating that the pallino needs to be thrown\n qImg = self.load_logo_qImg('views/oddball_graphics/cut_assets/Mark-1C-Yellow.png',\n TOP_LEFT_LOGO_SIZE)\n self.draw_rgba_qimg(self.label_logoadvertisement, qImg)\n\n # start timer\n self.timer_paused = False\n self.gameTimer.start()\n if self.clock_count_down:\n self.time_min_left = MINUTES - 1\n\n # set the frame count\n self.frame_count = 1\n self.lcdNumber_framenumber.display(str(self.frame_count))\n\n # clear the down and back top right image\n self.label_downandback.clear()\n\n def increment_frame_count(self):\n self.frame_count += 1\n self.lcdNumber_framenumber.display(str(self.frame_count))\n\n def decrement_frame_count(self):\n self.frame_count -= 1\n if self.frame_count <= 0:\n self.frame_count = 0\n self.lcdNumber_framenumber.display(str(self.frame_count))\n\n def stop_game_timer(self):\n if self.gameTimer.isActive():\n self.gameTimer.stop()\n self.timer_paused = True\n self.clock_count_down = True\n self.clock_count_up = False\n self.time_min_left = 0\n self.time_sec_left = 0\n self.game_time_ui_update()\n self.GAME_MINUTES = DEFAULT_GAME_MINUTES\n self.label_homeballindicator.clear()\n self.label_homeballindicator.repaint()\n self.label_awayballindicator.clear()\n self.label_awayballindicator.repaint()\n self.frame_count = 0\n self.lcdNumber_framenumber.display(str(self.frame_count))\n\n def draw_down_and_back(self):\n self.down_and_back = True\n qImg = self.load_logo_qImg('views/oddball_graphics/down_and_back.png', TOP_RIGHT_LOGO_SIZE)\n self.draw_rgba_qimg(self.label_downandback, qImg)\n\n def set_team_name(self, team, newTeamName):\n if team is self.homeTeam:\n self.homeTeam.change_team_name(newTeamName)\n self.label_hometeam.setText(str(self.homeTeam))\n elif team is self.awayTeam:\n self.awayTeam.change_team_name(newTeamName)\n self.label_awayteam.setText(str(self.awayTeam))\n\n def show_team_change_popup(self, team):\n if team is self.homeTeam:\n teamText = \"HOME\"\n elif team is self.awayTeam:\n teamText = \"AWAY\"\n\n # pop up a text entry dialog\n newTeamName, ok = QInputDialog.getText(self, \"Team Name Change\", \"Enter new {} team name\".format(teamText))\n\n # if the ok button was pressed, then change the team name\n if ok:\n self.set_team_name(team, newTeamName)\n\n\nif __name__ == '__main__':\n # initialize the app and window\n app = QtWidgets.QApplication(sys.argv)\n window = MainWindow(sys.argv)\n\n # show the window and run the app\n window.show()\n app.exec_()\n"
]
| [
[
"numpy.zeros"
]
]
|
abkfenris/safewaters | [
"214d23b2a1d94b1ca2ad0ff3c482d34693b92065"
]
| [
"safewaters.py"
]
| [
"#!/usr/bin/env python\nimport requests\nimport pandas as pd\n\nrows = []\n\nfor i in range(26):\n response = requests.get(f\"https://brookfieldwaterpublishingapi.azurewebsites.net/riversystems/{i}/facilities?withEverything=false\")\n data = response.json()\n for facility in data:\n if facility['WaterData']:\n for measurement in facility['WaterData']['Data']:\n measurement['facilityName'] = facility['Name']\n measurement['facilityId'] = facility['FacilityId']\n measurement['StoredAt'] = facility['WaterData']['StoredAt']\n measurement['StateAbbreviation'] = facility['StateAbbreviation']\n rows.append(measurement)\n \ndf = pd.DataFrame(rows)\ndf = df.sort_values(['StateAbbreviation', 'facilityName', 'Name'])\n\ndf.to_csv(\"safewaters.csv\", index=False)\n"
]
| [
[
"pandas.DataFrame"
]
]
|
marketler/GFW_vessel_scoring | [
"16a141e78258dcc9bab458d5ebd8fad6d2721816"
]
| [
"scripts/predict.py"
]
| [
"import vessel_scoring.models\nimport vessel_scoring.utils\nimport gpsdio\nimport numpy\nimport sys\n\nmodels = vessel_scoring.models.load_models()\n\nmodel = sys.argv[1]\ninput = sys.argv[2]\noutput = sys.argv[3]\n\nif input.endswith(\".msg\"):\n with gpsdio.open(output, \"w\") as fout:\n with gpsdio.open(input) as fin:\n fout.write(models[model].predict_messages(fin))\nelse:\n data = numpy.load(input)['x']\n datalen = len(data)\n data = vessel_scoring.utils.numpy_to_messages(data)\n data = models[model].predict_messages(data)\n data = vessel_scoring.utils.messages_to_numpy(data, datalen)\n numpy.savez(output, x=data)\n"
]
| [
[
"numpy.savez",
"numpy.load"
]
]
|
Bhaskarkvvsr/cortex | [
"f569791613ea8b8cff226c3585839d37b9b6a5b5"
]
| [
"examples/pytorch/sentiment-analyzer/predictor.py"
]
| [
"# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.20.*, run `git checkout -b 0.20` or switch to the `0.20` branch on GitHub)\n\nimport torch\nfrom transformers import pipeline\n\n\nclass PythonPredictor:\n def __init__(self, config):\n device = 0 if torch.cuda.is_available() else -1\n print(f\"using device: {'cuda' if device == 0 else 'cpu'}\")\n\n self.analyzer = pipeline(task=\"sentiment-analysis\", device=device)\n\n def predict(self, payload):\n return self.analyzer(payload[\"text\"])[0]\n"
]
| [
[
"torch.cuda.is_available"
]
]
|
pythonProjectLearn/TensorflowLearning | [
"7a72ebea060ce0a0db9a00994e4725ec5d84c10a"
]
| [
"ch19_FFM/FFM.py"
]
| [
"# encoding:utf-8\n\"\"\"\ntf.truncated_normal(shape, mean, stddev) :shape表示生成张量的维度,mean是均值,stddev是标准差。\n这个函数产生正太分布,均值和标准差自己设定。这是一个截断的产生正太分布的函数,就是说产生正太分布的值如果与均值的差值大于两倍的标准差,那就重新生成。\n和一般的正太分布的产生随机数据比起来,这个函数产生的随机数与均值的差距不会超过两倍的标准差,但是一般的别的函数是可能的。\n\ntf.convert_to_tensor()\n将python的数据类型转换成TensorFlow可用的tensor数据类型\n\n# 矩阵逻辑变换\na=np.array(list(range(100)))\na=a.reshape([10,10])\nb=tf.constant(a)\n\nindex=tf.where(b>50) # 要获得a中大于50的矩阵行\n# 提取 多维度的矩阵逻辑操作\nc=tf.gather_nd(b,index)\n# 提取 一维的矩阵逻辑变换\nc=tf.gather(b,index)\n\n\"\"\"\nimport tensorflow as tf\nimport pandas as pd\nimport numpy as np\nimport os\n\n\ninput_x_size = 20\nfield_size = 2\n\nvector_dimension = 3\n\ntotal_plan_train_steps = 1000\n# 使用SGD,每一个样本进行依次梯度下降,更新参数\nbatch_size = 1\n\nall_data_size = 1000\n\nlr = 0.01\n\nMODEL_SAVE_PATH = \"TFModel\"\nMODEL_NAME = \"FFM\"\n\ndef createTwoDimensionWeight(input_x_size,field_size,vector_dimension):\n \"\"\"多维正态分布\"\"\"\n weights = tf.truncated_normal([input_x_size,field_size,vector_dimension])\n\n tf_weights = tf.Variable(weights)\n\n return tf_weights\n\ndef createOneDimensionWeight(input_x_size):\n \"\"\"一维正态分布\"\"\"\n weights = tf.truncated_normal([input_x_size])\n tf_weights = tf.Variable(weights)\n return tf_weights\n\ndef createZeroDimensionWeight():\n weights = tf.truncated_normal([1])\n tf_weights = tf.Variable(weights)\n return tf_weights\n\ndef inference(input_x,input_x_field,zeroWeights,oneDimWeights,thirdWeight):\n \"\"\"计算回归模型输出的值\"\"\"\n\n secondValue = tf.reduce_sum(tf.multiply(oneDimWeights,input_x,name='secondValue'))\n\n firstTwoValue = tf.add(zeroWeights, secondValue, name=\"firstTwoValue\")\n\n thirdValue = tf.Variable(0.0,dtype=tf.float32)\n input_shape = input_x_size\n\n for i in range(input_shape):\n featureIndex1 = i\n fieldIndex1 = int(input_x_field[i])\n for j in range(i+1,input_shape):\n featureIndex2 = j\n fieldIndex2 = int(input_x_field[j])\n vectorLeft = tf.convert_to_tensor([[featureIndex1,fieldIndex2,i] for i in range(vector_dimension)])\n weightLeft = tf.gather_nd(thirdWeight,vectorLeft) # 矩阵变换\n weightLeftAfterCut = tf.squeeze(weightLeft)\n\n vectorRight = tf.convert_to_tensor([[featureIndex2,fieldIndex1,i] for i in range(vector_dimension)])\n weightRight = tf.gather_nd(thirdWeight,vectorRight)\n weightRightAfterCut = tf.squeeze(weightRight)\n\n tempValue = tf.reduce_sum(tf.multiply(weightLeftAfterCut,weightRightAfterCut))\n\n indices2 = [i]\n indices3 = [j]\n\n xi = tf.squeeze(tf.gather_nd(input_x, indices2))\n xj = tf.squeeze(tf.gather_nd(input_x, indices3))\n\n product = tf.reduce_sum(tf.multiply(xi, xj))\n\n secondItemVal = tf.multiply(tempValue, product)\n\n tf.assign(thirdValue, tf.add(thirdValue, secondItemVal))\n\n return tf.add(firstTwoValue,thirdValue)\n\ndef gen_data():\n labels = [-1,1]\n y = [np.random.choice(labels,1)[0] for _ in range(all_data_size)]\n x_field = [i // 10 for i in range(input_x_size)]\n x = np.random.randint(0,2,size=(all_data_size,input_x_size))\n return x,y,x_field\n\n\nif __name__ == '__main__':\n global_step = tf.Variable(0,trainable=False)\n trainx,trainy,trainx_field = gen_data()\n #\n input_x = tf.placeholder(tf.float32,[input_x_size])\n input_y = tf.placeholder(tf.float32)\n #\n\n lambda_w = tf.constant(0.001, name='lambda_w')\n lambda_v = tf.constant(0.001, name='lambda_v')\n\n zeroWeights = createZeroDimensionWeight()\n\n oneDimWeights = createOneDimensionWeight(input_x_size)\n\n thirdWeight = createTwoDimensionWeight(input_x_size, # 创建二次项的权重变量\n field_size,\n vector_dimension) # n * f * k\n\n y_ = inference(input_x, trainx_field,zeroWeights,oneDimWeights,thirdWeight)\n\n l2_norm = tf.reduce_sum(\n tf.add(\n tf.multiply(lambda_w, tf.pow(oneDimWeights, 2)),\n tf.reduce_sum(tf.multiply(lambda_v, tf.pow(thirdWeight, 2)),axis=[1,2])\n )\n )\n\n loss = tf.log(1 + tf.exp(input_y * y_)) + l2_norm\n\n train_step = tf.train.GradientDescentOptimizer(learning_rate=lr).minimize(loss)\n\n saver = tf.train.Saver()\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for i in range(total_plan_train_steps):\n for t in range(all_data_size):\n input_x_batch = trainx[t]\n input_y_batch = trainy[t]\n predict_loss,_, steps = sess.run([loss,train_step, global_step],\n feed_dict={input_x: input_x_batch, input_y: input_y_batch})\n\n print(\"After {step} training step(s) , loss on training batch is {predict_loss} \"\n .format(step=steps, predict_loss=predict_loss))\n\n saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=steps)\n writer = tf.summary.FileWriter(os.path.join(MODEL_SAVE_PATH, MODEL_NAME), tf.get_default_graph())\n writer.close()"
]
| [
[
"tensorflow.exp",
"tensorflow.multiply",
"numpy.random.choice",
"tensorflow.get_default_graph",
"tensorflow.gather_nd",
"tensorflow.Session",
"tensorflow.Variable",
"tensorflow.train.Saver",
"tensorflow.truncated_normal",
"tensorflow.constant",
"numpy.random.randint",
"tensorflow.placeholder",
"tensorflow.squeeze",
"tensorflow.pow",
"tensorflow.add",
"tensorflow.global_variables_initializer",
"tensorflow.train.GradientDescentOptimizer"
]
]
|
Singlesnail/vedo | [
"b2e2cfc3453bbd118b6c81a2227b8ce6f1d22b7b"
]
| [
"examples/pyplot/np_matrix.py"
]
| [
"\"\"\"Visualize a n\\dotm numpy matrix\"\"\"\nfrom vedo.pyplot import matrix, show\nimport numpy as np\n\nn, m = (6, 5)\nM = np.eye(n, m)/2 + np.random.randn(n, m)*0.1\nprint(M)\n\nmat = matrix(M,\n cmap='Reds',\n title='My numpy Matrix',\n xtitle='Genes of group A',\n ytitle='Genes of group B',\n xlabels=[f'hox{i}' for i in range(m)],\n ylabels=[f'bmp{i}' for i in range(n)],\n scale=0.15, # size of bin labels; set it to 0 to remove labels\n lw=2, # separator line width\n )\nshow(mat, __doc__, bg='k7', zoom=1.2)\n"
]
| [
[
"numpy.random.randn",
"numpy.eye"
]
]
|
eternalding/tf-CMT | [
"67ae5bcc60d985a11d55e06247ac8ed3d62643d5"
]
| [
"tf_CMT/model.py"
]
| [
"import tensorflow as tf\n\nfrom tf_CMT.Basic_blocks import CNN_Block, LocalPerceptionUnitLayer, InvertedResidualFFNLayer\nfrom tf_CMT.LWMHSA import LightWeightMHSALayer\n\nclass CMT_Model(tf.keras.Model):\n def __init__(self, Block_num = [3,3,16,3], K=5, n_heads = 8, head_dim = 128, filters = 256, num_classes=10, usePosBias = True, output_logits = True):\n super(CMT_Model, self).__init__()\n self.stem = CMT_Stem(filters = filters, strides = 2, kernel_size = 3 ,num_blocks = 2)\n self.DownSampleCNNs = [tf.keras.layers.Conv2D(filters = filters, kernel_size = 2, strides = 2) for _ in range(len(Block_num))]\n self.CMT_Blocks_list = [[CMT_Block(n_heads = n_heads,\n filters = filters,\n kernel_size = 3,\n strides = 2,\n K = K,\n usePosBias = usePosBias)\n for _ in range(Block_num[stage])]\n for stage in range(len(Block_num))] \n self.global_pool = tf.keras.layers.GlobalAveragePooling2D()\n self.FCLayer = tf.keras.layers.Dense(num_classes)\n self.SoftmaxLayer = tf.keras.layers.Softmax()\n self.output_logits = output_logits\n \n \n def call(self, inputs):\n CMT_Input = self.stem(inputs)\n \n for DownSampleCNN, CMT_Blocks in zip(self.DownSampleCNNs,self.CMT_Blocks_list):\n CMT_Input = DownSampleCNN(CMT_Input)\n for Block in CMT_Blocks:\n CMT_Input = Block(CMT_Input)\n \n CMT_Out = self.global_pool(CMT_Input) \n FC_Out = self.FCLayer(CMT_Out)\n if not self.output_logits:\n Output = self.SoftmaxLayer(FC_Out)\n else:\n Output = FC_Out\n return Output\n \nclass CMT_Stem(tf.keras.layers.Layer):\n def __init__(self, filters = 32, strides = 2, kernel_size = 3 ,num_blocks = 2,Name=\"CMT_Stem\"): \n self.DownSampleCNN = tf.keras.layers.Conv2D(filters=filters,kernel_size = kernel_size,strides = (strides,strides),padding='same')\n self.CNN_Blocks = [CNN_Block(filters,kernel_size= kernel_size ,strides=1) for _ in range(num_blocks)] \n super(CMT_Stem, self).__init__(name=Name) \n\n def call(self, input):\n DownSampled_Output = self.DownSampleCNN(input)\n for CNN_Block in self.CNN_Blocks:\n DownSampled_Output = CNN_Block(DownSampled_Output) \n return DownSampled_Output\n \nclass CMT_Block(tf.keras.layers.Layer):\n def __init__(self, n_heads = 8, filters = 32, kernel_size = 3,strides = 2, K = 5, usePosBias = False ,Name=\"CMT_Block\"):\n self.LocalPerceptionUnit = LocalPerceptionUnitLayer(kernel_size = 3, strides = 1)\n self.AttentionLayerNorm = tf.keras.layers.LayerNormalization()\n self.LwMultiHeadSelfAttention = LightWeightMHSALayer(n_heads = n_heads, head_dim = filters, dropout_rate=0.3, K = K,usePosBias=usePosBias)\n self.IRFFNLayerNorm = tf.keras.layers.LayerNormalization()\n self.InvertedResidualFFN = InvertedResidualFFNLayer(filters = filters)\n super(CMT_Block, self).__init__(name=Name) \n\n def call(self, input):\n LPU_Out = self.LocalPerceptionUnit(input)\n AttNormOut = self.AttentionLayerNorm(LPU_Out)\n LwMHSAOut = self.LwMultiHeadSelfAttention(AttNormOut)\n\n IRFFN_Input = LPU_Out + LwMHSAOut\n\n IRFFN_NormOut = self.IRFFNLayerNorm(IRFFN_Input)\n IRFFN_Out = self.InvertedResidualFFN(IRFFN_NormOut)\n\n Output = IRFFN_Input + IRFFN_Out\n return Output"
]
| [
[
"tensorflow.keras.layers.LayerNormalization",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.keras.layers.Softmax"
]
]
|
AmeyaWagh/3D_object_recognition | [
"bf65d27d1bbf40a6e522dd6a4d1ee9a6cee44dec"
]
| [
"scripts/trainer.py"
]
| [
"#! /usr/bin/env python\nimport rospkg\nimport numpy as np\nimport os\nimport pcl\nimport cv2\nfrom robot_vision_helper.DataHandler import DataHandler\nfrom robot_vision_helper.GASD import GASD\nfrom robot_vision_helper.CNNModel import *\nimport time \nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nrospack = rospkg.RosPack()\nPACKAGE_PATH = rospack.get_path('robot_vision')\nprint(OKBLUE+PACKAGE_PATH+ENDC)\n\n\nclass DisplayCloud():\n \"\"\" A simple visualizer using Matplotlib \"\"\"\n def __init__(self):\n self.fig = plt.figure()\n self.ax = self.fig.add_subplot(111, projection='3d')\n\n def drawCloud(self,pointCloud,pts='.'):\n x = []\n y = []\n z = []\n for pt in pointCloud:\n x.append(float(pt[0]))\n y.append(float(pt[1]))\n z.append(float(pt[2]))\n\n self.ax.plot(x, y, z, pts)\n\n def showCloud(self):\n plt.show()\n\nif __name__ == '__main__':\n d = DataHandler()\n g = GASD()\n BASE_PATH = \"/home/ameya/Downloads/Dataset_RGBD\"\n CLASS_PATH = [\"apple_1\",\"banana_1\",\"bowl_1\",\"calculator_1\",\"coffee_mug_1\"]\n train_flag = True\n\n clf = CNNModel(ip_shape=(1, 40, 40, 40))\n if train_flag:\n dataArray = []\n TestdataArray = []\n Labels = []\n TestLabels = []\n dataPercent = 0.8\n for _CLASS in CLASS_PATH:\n files = os.listdir(os.path.join(BASE_PATH,_CLASS))\n arrayLen = len(files)\n i=0\n for _file in files[0:500]:\n print(_file[0:500])\n data = d.load_raw_file(BASE_PATH=BASE_PATH,CLASS_PATH=_CLASS,file_name=_file)\n volume_data,_quat = g.get_volumetric_data(data)\n if i < dataPercent*arrayLen: \n dataArray.append(np.array([volume_data]))\n Labels.append(_CLASS)\n else:\n TestdataArray.append(np.array([volume_data]))\n TestLabels.append(_CLASS)\n\n dataArray = np.array(dataArray)\n TestdataArray = np.array(dataArray)\n Labels = np.array(Labels)\n TestLabels = np.array(Labels)\n print(\"dataArray\",dataArray.shape)\n print(\"Labels\",Labels.shape)\n\n clf.train(dataArray,Labels)\n clf.test(TestdataArray, TestLabels)\n clf.save_model(base_path=os.path.join(PACKAGE_PATH,\"bin/3DCNN_model\"))\n \n clf.load_model(base_path=os.path.join(PACKAGE_PATH,\"bin/3DCNN_model\"))\n for i in range(2):\n query_class = CLASS_PATH[np.random.randint(len(CLASS_PATH))]\n query_file = os.listdir(os.path.join(BASE_PATH,query_class))[-1*np.random.randint(20)]\n data = d.load_raw_file(BASE_PATH = \"/home/ameya/Downloads/Dataset_RGBD\",CLASS_PATH=query_class,file_name=query_file)\n centroid,axes,new_data = g.compute_gasd(data)\n volume_data,_quat = g.get_volumetric_data(data)\n print(\"prediction:\",clf.predict(np.array([[volume_data]]),CLASS_PATH),\" actual:\",query_class,\" file:\",query_file)\n disp = DisplayCloud()\n disp.drawCloud(new_data)\n disp.showCloud()\n\n exit()\n\n"
]
| [
[
"matplotlib.pyplot.show",
"numpy.array",
"numpy.random.randint",
"matplotlib.pyplot.figure"
]
]
|
csa0001/Refinery | [
"0d5de8fc3d680a2c79bd0e9384b506229787c74f"
]
| [
"refinery/bnpy/bnpy-dev/tests/merge/AbstractBaseTestForHDP.py"
]
| [
"\nimport numpy as np\nimport unittest\n\nimport bnpy\nfrom bnpy.learnalg import MergeMove\nfrom scipy.special import digamma\nimport copy\n\n######################################################### Make Data\n#########################################################\ndef MakeData(K=4, D=2500, nWordsPerDoc=50):\n ''' Simple 4 component data on 6 word vocabulary\n \n '''\n topics = np.zeros((K,6))\n topics[0] = [0.48, 0.48, 0.01, 0.01, 0.01, 0.01]\n topics[1] = [0.01, 0.01, 0.48, 0.48, 0.01, 0.01]\n topics[2] = [0.01, 0.01, 0.01, 0.01, 0.48, 0.48]\n topics[3] = [0.01, 0.33, 0.01, 0.32, 0.01, 0.32]\n topic_prior = 0.1 * np.ones(4)\n Data = bnpy.data.WordsData.CreateToyDataFromLDAModel(\n topics=topics, topic_prior=topic_prior,\n nDocTotal=D,\n nWordsPerDoc=nWordsPerDoc, seed=123)\n trueResp = Data.TrueParams['word_variational']\n assert np.allclose(1.0,np.sum(trueResp,axis=1))\n return Data, trueResp \n\ndef MakeMinibatches(Data):\n PRNG = np.random.RandomState(1234)\n permIDs = PRNG.permutation(Data.nDocTotal)\n bIDs1 = permIDs[:len(permIDs)/2]\n bIDs2 = permIDs[len(permIDs)/2:]\n batchData1 = Data.select_subset_by_mask(bIDs1)\n batchData2 = Data.select_subset_by_mask(bIDs2)\n return batchData1, batchData2\n\n######################################################### Make Data\n#########################################################\nclass AbstractBaseTestForHDP(unittest.TestCase):\n def shortDescription(self):\n return None\n\n def setUp(self):\n Data, trueResp = MakeData()\n batchData1, batchData2 = MakeMinibatches(Data)\n self.Data = Data\n self.trueResp = trueResp\n self.batchData1 = batchData1\n self.batchData2 = batchData2\n self.MakeModelWithTrueComps()\n\n ######################################################### Make Model\n #########################################################\n\n def MakeModelWithTrueComps(self):\n ''' Create model with true components that generated self.Data\n ''' \n aDict = dict(alpha0=1.0, gamma=0.1)\n oDict = {'lambda':0.05}\n self.hmodel = bnpy.HModel.CreateEntireModel('VB', 'HDPModel', 'Mult',\n aDict, oDict, self.Data)\n LP = self.getTrueLP()\n SS = self.hmodel.get_global_suff_stats(self.Data, LP)\n self.hmodel.update_global_params(SS)\n\n def MakeModelWithDuplicatedComps(self):\n ''' Create model with \"duplicated\" components,\n for each true comp that generated self.Data, \n self.dupModel has two versions\n ''' \n aDict = dict(alpha0=1.0, gamma=0.1)\n oDict = {'lambda':0.05}\n self.dupModel = bnpy.HModel.CreateEntireModel('VB', 'HDPModel', 'Mult',\n aDict, oDict, self.Data)\n dupLP = self.getDupLP()\n dupSS = self.dupModel.get_global_suff_stats(self.Data, dupLP)\n self.dupModel.update_global_params(dupSS)\n\n def getTrueLP(self):\n return self.getLPfromResp(self.trueResp)\n\n def getDupLP(self):\n ''' Create local parameters for \"duplicated\" model\n each comp k in true model is divided into two comps k1, k2\n any words with z[n] = k in \"first half\" of Data are assigned to k1\n any words with z[n] = k in \"second half\" are assigned to k2\n '''\n Data = self.Data\n K = self.trueResp.shape[1]\n dupResp = np.zeros((Data.nObs, 2*K))\n dupResp[:Data.nObs/2,:K] = self.trueResp[:Data.nObs/2]\n dupResp[Data.nObs/2:,K:] = self.trueResp[Data.nObs/2:]\n return self.getLPfromResp(dupResp)\n\n def getLPfromResp(self, Resp, smoothMass=0.001):\n ''' Create full local parameter (LP) dictionary for HDPModel,\n given responsibility matrix Resp\n\n Returns\n --------\n LP : dict with fields word_variational, alphaPi, E_logPi, DocTopicCount\n '''\n Data = self.Data\n D = Data.nDoc\n K = Resp.shape[1]\n # DocTopicCount matrix : D x K matrix\n DocTopicC = np.zeros((D, K))\n for dd in range(D):\n start,stop = Data.doc_range[dd,:]\n DocTopicC[dd,:] = np.dot(Data.word_count[start:stop], \n Resp[start:stop,:]\n )\n assert np.allclose(DocTopicC.sum(), Data.word_count.sum())\n # Alpha and ElogPi : D x K+1 matrices\n padCol = smoothMass * np.ones((D,1))\n alph = np.hstack( [DocTopicC + smoothMass, padCol]) \n ElogPi = digamma(alph) - digamma(alph.sum(axis=1))[:,np.newaxis]\n assert ElogPi.shape == (D,K+1)\n return dict(word_variational =Resp, \n E_logPi=ElogPi, alphaPi=alph,\n DocTopicCount=DocTopicC) \n\n\n def run_Estep_then_Mstep(self):\n ''' Perform one full update to self.hmodel's global parameters\n given self.Data as observed data\n Runs Estep (LP), then SSstep (SS), then Mstep \n\n Returns\n --------- \n LP\n SS\n '''\n LP = self.hmodel.calc_local_params(self.Data)\n flagDict = dict(doPrecompEntropy=True, doPrecompMergeEntropy=True)\n SS = self.hmodel.get_global_suff_stats(self.Data, LP, **flagDict)\n self.hmodel.update_global_params(SS)\n return LP, SS"
]
| [
[
"numpy.dot",
"numpy.zeros",
"numpy.random.RandomState",
"numpy.sum",
"numpy.ones",
"numpy.hstack",
"scipy.special.digamma"
]
]
|
RobbiNespu/hyperboria | [
"74776166158d07b199677f9738862e5f1fa54367"
]
| [
"nexus/meta_api/rescorers/classic_rescorer.py"
]
| [
"import asyncio\nimport base64\nimport datetime\nimport io\nimport time\n\nimport lightgbm as lgbm\nimport numpy as np\nfrom nexus.nlptools.language_detect import detect_language\n\nfrom .base import Rescorer\n\n# ToDo: deduplicate code\n\n\ndef convert_scoring_to_vec_current_version(\n original_score,\n schema_id,\n document_age,\n downloads_count,\n ref_by_count,\n same_language,\n same_query_language,\n query_tokens_count,\n query_documents_similarity_vector,\n):\n return np.array([\n original_score,\n schema_id,\n document_age,\n downloads_count,\n ref_by_count,\n same_language,\n same_query_language,\n query_tokens_count,\n ] + query_documents_similarity_vector)\n\n\ndef convert_scoring_to_vec_future_version(\n doc_id,\n original_score,\n schema_id,\n document_age,\n downloads_count,\n ref_by_count,\n same_language,\n same_query_language,\n query_tokens_count,\n query_documents_similarity_vector,\n):\n return np.array([\n doc_id,\n original_score,\n schema_id,\n document_age,\n downloads_count,\n ref_by_count,\n same_language,\n same_query_language,\n query_tokens_count,\n ] + query_documents_similarity_vector)\n\n\ndef schema_to_id(schema):\n return 1 if schema == 'scimag' else 2\n\n\ndef query_document_similarity_measures(query_tokens, query_tokens_set, query_tokens_count, document_tokens):\n max_longest_sequence_not_ordered = 0\n min_sequence_not_ordered = 1024\n current_longest_sequence_not_ordered = 0\n two_grams_not_ordered = 0\n last_token = -1\n for token_ix, token in enumerate(document_tokens):\n if token in query_tokens_set:\n if last_token != -1:\n min_sequence_not_ordered = min(min_sequence_not_ordered, token_ix - last_token)\n if token_ix - last_token == 1:\n two_grams_not_ordered += 1\n last_token = token_ix\n current_longest_sequence_not_ordered += 1\n else:\n current_longest_sequence_not_ordered = 0\n max_longest_sequence_not_ordered = max(\n max_longest_sequence_not_ordered,\n current_longest_sequence_not_ordered,\n )\n return [\n max_longest_sequence_not_ordered,\n min_sequence_not_ordered,\n two_grams_not_ordered,\n float(max_longest_sequence_not_ordered) / (float(query_tokens_count) + 1.0),\n float(two_grams_not_ordered) / (float(query_tokens_count) + 1.0),\n ]\n\n\ndef cast_issued_at(document):\n if 'issued_at' in document:\n try:\n return datetime.date.fromtimestamp(document['issued_at'])\n except ValueError:\n return datetime.date(2000, 1, 1)\n else:\n return datetime.date(2000, 1, 1)\n\n\ndef calculate_title_tokens(document):\n # ToDo: should we add tags?\n title_tokens = list(document.get('authors', []))\n if document.get('title'):\n title_tokens.append(document['title'])\n return (' '.join(title_tokens)).lower().split()\n\n\nclass ClassicRescorer(Rescorer):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.lgbm_ranker = lgbm.Booster(model_file='nexus/meta_api/models/classic.txt')\n\n def write_to_log_future_version(self, session_id, scored_documents, query, now, language):\n future_scoring_vecs = []\n\n query_language = detect_language(query)\n query_tokens_count = query.count(' ')\n query_tokens = query.lower().strip('\"\\'”`').split()\n query_tokens_set = set(query_tokens)\n\n for scored_document in scored_documents:\n document = scored_document['document']\n original_id = document.get('original_id') or document['id']\n\n title_tokens = calculate_title_tokens(document)\n\n query_documents_similarity_vector = query_document_similarity_measures(\n query_tokens,\n query_tokens_set,\n query_tokens_count,\n title_tokens,\n )\n future_scoring_vecs.append(convert_scoring_to_vec_future_version(\n doc_id=original_id,\n original_score=scored_document['score'],\n schema_id=schema_to_id(scored_document['schema']),\n document_age=(now - cast_issued_at(document)).total_seconds(),\n downloads_count=scored_document['document'].get('downloads_count', 0),\n ref_by_count=document.get('ref_by_count', 0),\n same_language=int(language == document.get('language')),\n same_query_language=int(query_language == document.get('language')),\n query_tokens_count=query_tokens_count,\n query_documents_similarity_vector=query_documents_similarity_vector\n ))\n\n data = io.BytesIO()\n np.savez_compressed(data, future_scoring_vecs, allow_pickle=True)\n data = base64.b64encode(data.getvalue()).decode()\n\n log_entry = {\n 'action': 'search',\n 'scorings': data,\n 'session_id': session_id,\n 'unixtime': time.time(),\n 'version': 4,\n 'vertical': 'classic',\n }\n\n self.learn_logger.info(log_entry)\n\n def _rescore(self, session_id, scored_documents, query, now, language):\n current_scoring_vecs = []\n\n query_language = detect_language(query)\n query_tokens_count = query.count(' ')\n query_tokens = query.lower().strip('\"\\'”`').split()\n query_tokens_set = set(query_tokens)\n\n for scored_document in scored_documents:\n # ToDo: Use shared wrappers\n document = scored_document['document']\n\n title_tokens = calculate_title_tokens(document)\n query_documents_similarity_vector = query_document_similarity_measures(\n query_tokens,\n query_tokens_set,\n query_tokens_count,\n title_tokens,\n )\n current_scoring_vecs.append(convert_scoring_to_vec_current_version(\n original_score=scored_document['score'],\n schema_id=schema_to_id(scored_document['schema']),\n document_age=(now - cast_issued_at(document)).total_seconds(),\n downloads_count=scored_document['document'].get('downloads_count', 0),\n ref_by_count=document.get('ref_by_count', 0),\n same_language=int(language == document.get('language')),\n same_query_language=int(query_language == document.get('language')),\n query_tokens_count=query_tokens_count,\n query_documents_similarity_vector=query_documents_similarity_vector,\n ))\n\n scores = self.lgbm_ranker.predict(current_scoring_vecs)\n for score, scored_document in zip(scores, scored_documents):\n scored_document['score'] = score\n\n scored_documents = sorted(scored_documents, key=lambda x: x['score'], reverse=True)\n for position, scored_document in enumerate(scored_documents):\n scored_document['position'] = position\n return scored_documents\n\n async def rescore(self, scored_documents, query, session_id, language):\n if not scored_documents:\n return scored_documents\n\n now = datetime.date.today()\n\n if self.learn_logger:\n # Needed due to bug in uvloop\n async def nested():\n await asyncio.get_running_loop().run_in_executor(\n self.executor,\n self.write_to_log_future_version,\n session_id,\n scored_documents,\n query,\n now,\n language,\n )\n asyncio.create_task(nested())\n\n return await asyncio.get_running_loop().run_in_executor(\n self.executor,\n self._rescore,\n session_id,\n scored_documents,\n query,\n now,\n language,\n )\n"
]
| [
[
"numpy.array",
"numpy.savez_compressed"
]
]
|
vsoch/caliper-analysis | [
"f7809779fb8e132acd2cfdc0984a24f4f914bd9d"
]
| [
"tensorflow_v0.11/3_NeuralNetworks/autoencoder.py"
]
| [
"# -*- coding: utf-8 -*-\n\n\"\"\" Auto Encoder Example.\nUsing an auto encoder on MNIST handwritten digits.\nReferences:\n Y. LeCun, L. Bottou, Y. Bengio, and P. Haffner. \"Gradient-based\n learning applied to document recognition.\" Proceedings of the IEEE,\n 86(11):2278-2324, November 1998.\nLinks:\n [MNIST Dataset] http://yann.lecun.com/exdb/mnist/\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nimport tensorflow as tf\nimport numpy as np\n\n# Set seeds for consistent results\nnp.random.seed(1)\ntry:\n tf.random.set_seed(1)\nexcept:\n tf.set_random_seed(1)\n\n# Import MNIST data\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nmnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n\n# Parameters\nlearning_rate = 0.01\ntraining_epochs = 1\nbatch_size = 256\ndisplay_step = 1\nexamples_to_show = 2\n\n\n# Network Parameters\nn_hidden_1 = 256 # 1st layer num features\nn_hidden_2 = 128 # 2nd layer num features\nn_input = 784 # MNIST data input (img shape: 28*28)\n\n# tf Graph input (only pictures)\nX = tf.placeholder(\"float\", [None, n_input])\n\nweights = {\n \"encoder_h1\": tf.Variable(tf.random_normal([n_input, n_hidden_1])),\n \"encoder_h2\": tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),\n \"decoder_h1\": tf.Variable(tf.random_normal([n_hidden_2, n_hidden_1])),\n \"decoder_h2\": tf.Variable(tf.random_normal([n_hidden_1, n_input])),\n}\nbiases = {\n \"encoder_b1\": tf.Variable(tf.random_normal([n_hidden_1])),\n \"encoder_b2\": tf.Variable(tf.random_normal([n_hidden_2])),\n \"decoder_b1\": tf.Variable(tf.random_normal([n_hidden_1])),\n \"decoder_b2\": tf.Variable(tf.random_normal([n_input])),\n}\n\n\n# Building the encoder\ndef encoder(x):\n # Encoder Hidden layer with sigmoid activation #1\n layer_1 = tf.nn.sigmoid(\n tf.add(tf.matmul(x, weights[\"encoder_h1\"]), biases[\"encoder_b1\"])\n )\n # Decoder Hidden layer with sigmoid activation #2\n layer_2 = tf.nn.sigmoid(\n tf.add(tf.matmul(layer_1, weights[\"encoder_h2\"]), biases[\"encoder_b2\"])\n )\n return layer_2\n\n\n# Building the decoder\ndef decoder(x):\n # Encoder Hidden layer with sigmoid activation #1\n layer_1 = tf.nn.sigmoid(\n tf.add(tf.matmul(x, weights[\"decoder_h1\"]), biases[\"decoder_b1\"])\n )\n # Decoder Hidden layer with sigmoid activation #2\n layer_2 = tf.nn.sigmoid(\n tf.add(tf.matmul(layer_1, weights[\"decoder_h2\"]), biases[\"decoder_b2\"])\n )\n return layer_2\n\n\n# Construct model\nencoder_op = encoder(X)\ndecoder_op = decoder(encoder_op)\n\n# Prediction\ny_pred = decoder_op\n# Targets (Labels) are the input data.\ny_true = X\n\n# Define loss and optimizer, minimize the squared error\ncost = tf.reduce_mean(tf.pow(y_true - y_pred, 2))\noptimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(cost)\n\n# Initializing the variables\ninit = tf.initialize_all_variables()\n\n# Launch the graph\nwith tf.Session() as sess:\n sess.run(init)\n total_batch = int(mnist.train.num_examples / batch_size)\n # Training cycle\n for epoch in range(training_epochs):\n # Loop over all batches\n for i in range(total_batch):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n # Run optimization op (backprop) and cost op (to get loss value)\n _, c = sess.run([optimizer, cost], feed_dict={X: batch_xs})\n # Display logs per epoch step\n if epoch % display_step == 0:\n print(\"Epoch:\", \"%04d\" % (epoch + 1), \"cost=\", \"{:.9f}\".format(c))\n\n print(\"Optimization Finished!\")\n\n # Applying encode and decode over test set\n encode_decode = sess.run(\n y_pred, feed_dict={X: mnist.test.images[:examples_to_show]}\n )\n\n # just compare one example\n print(list(encode_decode[1]))\n"
]
| [
[
"tensorflow.set_random_seed",
"tensorflow.initialize_all_variables",
"numpy.random.seed",
"tensorflow.random.set_seed",
"tensorflow.train.RMSPropOptimizer",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.Session",
"tensorflow.matmul",
"tensorflow.placeholder",
"tensorflow.pow",
"tensorflow.random_normal"
]
]
|
lod531/FixedEffectModel | [
"039a902b34310e652bbd3a21ba0ec663f175dd38"
]
| [
"FixedEffectModelPyHDFE/OLSFixed.py"
]
| [
"from FixedEffectModelPyHDFE.Forg import forg\nimport time\nimport pandas as pd\nfrom statsmodels.iolib.tableformatting import (gen_fmt, fmt_2)\nfrom statsmodels.iolib.table import SimpleTable\nfrom itertools import zip_longest\nfrom statsmodels.compat.python import lrange, lmap, lzip\nfrom scipy.stats import t\n\n\nclass OLSFixed(object):\n def __init(self):\n self.params = None\n self.df = None\n self.bse = None\n self.tvalues = None\n self.pvalues = None\n self.summary = None\n self.covar_matrix = None\n self.fittedvalues = None\n self.resid = None\n self.rsquared = None\n self.rsquared_adj = None\n self.full_rsquared = None\n self.full_rsquared_adj = None\n self.fvalue = None\n self.f_pvalue = None\n self.full_fvalue = None\n self.full_f_pvalue = None\n self.variance_matrix = None\n self.fittedvalues = None\n self.resid = None\n self.nobs = None\n self.yname = None\n self.xname = None\n self.resid_std_err = None\n self.Covariance_Type = None\n self.cluster_method = None\n self.demeaned_df = None\n self.data_df = None\n self.general_table = None\n self.std_err_name = None\n self.f_df_full = None\n self.f_df_proj = None\n self.consist_col = None\n self.category_col = None\n self.out_col = None\n\n def conf_int(self, conf=0.05):\n tmpdf = pd.DataFrame(columns=[0, 1], index=list(self.params.index))\n tmpdf[0] = self.params - t.ppf(1 - conf / 2, self.df) * self.bse\n tmpdf[1] = self.params + t.ppf(1 - conf / 2, self.df) * self.bse\n return tmpdf\n\n def summary(self, yname=None, xname=None, title=0, alpha=.05,\n returns='text', model_info=None):\n # General part of the summary table\n if title == 0:\n title = 'High Dimensional Fixed Effect Regression Results'\n\n if type(xname) == str: xname = [xname]\n if type(yname) == str: yname = [yname]\n if xname is not None and len(xname) != len(self.xname):\n # GH 2298\n raise ValueError('User supplied xnames must have the same number of '\n 'entries as the number of model parameters '\n '({0})'.format(len(self.xname)))\n\n if yname is not None and len(yname) != len(self.yname):\n raise ValueError('User supplied ynames must have the same number of '\n 'entries as the number of model dependent variables '\n '({0})'.format(len(self.yname)))\n if xname is None:\n xname = self.xname\n if yname is None:\n yname = self.yname\n\n time_now = time.localtime()\n time_of_day = [time.strftime(\"%H:%M:%S\", time_now)]\n date = time.strftime(\"%a, %d %b %Y\", time_now)\n nobs = int(self.nobs)\n df_model = self.df\n resid_std_err = forg(self.resid_std_err, 4)\n Covariance_Type = self.Covariance_Type\n cluster_method = self.cluster_method\n gen_left = [('Dep. Variable:', yname),\n ('No. Observations:', [nobs]), # TODO: What happens with multiple names?\n ('DoF of residual:', [df_model]),\n ('Residual std err:', [resid_std_err]),\n ('Covariance Type:', [Covariance_Type]),\n ('Cluster Method:', [cluster_method])\n ]\n r_squared = forg(self.rsquared, 4)\n rsquared_adj = forg(self.rsquared_adj, 4)\n full_rsquared = forg(self.full_rsquared, 4)\n full_rsquared_adj = forg(self.full_rsquared_adj, 4)\n fvalue = forg(self.fvalue, 4)\n f_pvalue = forg(self.f_pvalue, 4)\n full_fvalue = forg(self.full_fvalue, 4)\n full_f_pvalue = forg(self.full_f_pvalue, 4)\n gen_right = [('R-squared(proj model):', [r_squared]),\n ('Adj. R-squared(proj model):', [rsquared_adj]),\n ('R-squared(full model):', [full_rsquared]),\n ('Adj. R-squared(full model):', [full_rsquared_adj]),\n ('F-statistic(proj model):', [fvalue]),\n ('Prob (F-statistic (proj model)):', [f_pvalue]),\n ('DoF of F-test (proj model):', [self.f_df_proj]),\n ('F-statistic(full model):', [full_fvalue]),\n ('Prob (F-statistic (full model)):', [full_f_pvalue]),\n ('DoF of F-test (full model):', [self.f_df_full])\n ]\n # pad both tables to equal number of rows\n if len(gen_right) < len(gen_left):\n # fill up with blank lines to same length\n gen_right += [(' ', ' ')] * (len(gen_left) - len(gen_right))\n elif len(gen_right) > len(gen_left):\n # fill up with blank lines to same length, just to keep it symmetric\n gen_left += [(' ', ' ')] * (len(gen_right) - len(gen_left))\n\n gen_stubs_left, gen_data_left = zip_longest(*gen_left)\n gen_title = title\n gen_header = None\n gen_table_left = SimpleTable(gen_data_left,\n gen_header,\n gen_stubs_left,\n title=gen_title,\n txt_fmt=gen_fmt\n )\n gen_stubs_right, gen_data_right = zip_longest(*gen_right)\n gen_table_right = SimpleTable(gen_data_right,\n gen_header,\n gen_stubs_right,\n title=gen_title,\n txt_fmt=gen_fmt\n )\n gen_table_left.extend_right(gen_table_right)\n self.general_table = gen_table_left\n\n # Parameters part of the summary table\n s_alp = alpha / 2\n c_alp = 1 - alpha / 2\n if Covariance_Type == 'nonrobust':\n self.std_err_name = 'nonrobust std err'\n elif Covariance_Type == 'robust':\n self.std_err_name = 'robust std err'\n elif Covariance_Type == 'clustered':\n self.std_err_name = 'cluster std err'\n else:\n self.std_err_name = 'std err'\n param_header = ['coef', self.std_err_name, 't', 'P>|t|', '[' + str(s_alp),\n str(c_alp) + ']'] # alp + ' Conf. Interval'\n params_stubs = xname\n params = self.params.copy()\n conf_int = self.conf_int(alpha)\n std_err = self.bse.copy()\n exog_len = lrange(len(xname))\n tstat = self.tvalues.copy()\n prob_stat = self.pvalues.copy()\n for i in range(len(self.params)):\n params[i] = forg(self.params[i], 5)\n std_err[i] = forg(self.bse[i], 5)\n tstat[i] = forg(self.tvalues[i], 4)\n prob_stat[i] = forg(self.pvalues[i], 4)\n\n # Simpletable should be able to handle the formating\n params_data = lzip([\"%#6.5f\" % (params[i]) for i in exog_len],\n [\"%#6.5f\" % (std_err[i]) for i in exog_len],\n [\"%#6.4f\" % (tstat[i]) for i in exog_len],\n [\"%#6.4f\" % (prob_stat[i]) for i in exog_len],\n [\"%#6.4f\" % conf_int[0][i] for i in exog_len],\n [\"%#6.4f\" % conf_int[1][i] for i in exog_len])\n self.parameter_table = SimpleTable(params_data,\n param_header,\n params_stubs,\n title=None,\n txt_fmt=fmt_2\n )\n print(self.general_table)\n print(self.parameter_table)\n return\n\n def to_excel(self, file=None):\n df_tmp = pd.DataFrame(columns=['coef', self.std_err_name, 't', 'p', 'conf_int_lower', 'conf_int_upper'],\n index=self.xname)\n df_tmp.coef = self.params\n df_tmp[self.std_err_name] = self.bse\n df_tmp.t = self.tvalues\n df_tmp.p = self.pvalues\n df_tmp.conf_int_lower = self.conf_int()[0]\n df_tmp.conf_int_upper = self.conf_int()[1]\n df_tmp2 = pd.DataFrame(\n columns=['dep_variable', 'no_obs', 'df_model', 'resid_std_err', 'Covariance_Type', 'cluster_method',\n 'proj_Rsquared', 'proj_Rsquared_adj', 'full_Rsquared', 'full_Rsquared_adj',\n 'proj_fvalue', 'proj_f_pvalue', 'full_fvalue', 'full_f_pvalue'])\n df_tmp2.dep_variable = self.yname # y不止一个怎么办\n df_tmp2.no_obs = self.nobs\n df_tmp2.df_model = self.df\n df_tmp2.resid_std_err = self.resid_std_err\n df_tmp2.Covariance_Type = self.Covariance_Type\n df_tmp2.cluster_method = self.cluster_method\n df_tmp2.proj_Rsquared = self.rsquared\n df_tmp2.proj_Rsquared_adj = self.rsquared_adj\n df_tmp2.full_Rsquared = self.full_rsquared\n df_tmp2.full_Rsquared_adj = self.full_rsquared_adj\n df_tmp2.proj_fvalue = self.fvalue\n df_tmp2.proj_f_pvalue = self.f_pvalue\n df_tmp2.full_fvalue = self.full_fvalue\n df_tmp2.full_f_pvalue = self.full_f_pvalue\n if file is None:\n file = 'output.xls'\n writer = pd.ExcelWriter(file)\n df_tmp.to_excel(writer, encoding='utf-8', sheet_name='params')\n df_tmp2.to_excel(writer, encoding='utf-8', sheet_name='general', index=False)\n writer.save()\n"
]
| [
[
"pandas.DataFrame",
"scipy.stats.t.ppf",
"pandas.ExcelWriter"
]
]
|
strawberrypie/gluon-ts | [
"1d27423478f1dc4621f81c4659d8ba78f88ee89b"
]
| [
"src/gluonts/dataset/repository/_m4.py"
]
| [
"# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\nfrom pathlib import Path\nimport os\nimport json\n\nimport pandas as pd\nimport numpy as np\n\nfrom gluonts.dataset.repository._util import metadata, save_to_file, to_dict\n\n\ndef generate_m4_dataset(\n dataset_path: Path, m4_freq: str, pandas_freq: str, prediction_length: int\n):\n m4_dataset_url = (\n \"https://github.com/M4Competition/M4-methods/raw/master/Dataset\"\n )\n train_df = pd.read_csv(\n f\"{m4_dataset_url}/Train/{m4_freq}-train.csv\", index_col=0\n )\n test_df = pd.read_csv(\n f\"{m4_dataset_url}/Test/{m4_freq}-test.csv\", index_col=0\n )\n\n os.makedirs(dataset_path, exist_ok=True)\n\n with open(dataset_path / \"metadata.json\", \"w\") as f:\n f.write(\n json.dumps(\n metadata(\n cardinality=len(train_df),\n freq=pandas_freq,\n prediction_length=prediction_length,\n )\n )\n )\n\n train_file = dataset_path / \"train\" / \"data.json\"\n test_file = dataset_path / \"test\" / \"data.json\"\n\n train_target_values = [ts[~np.isnan(ts)] for ts in train_df.values]\n\n test_target_values = [\n np.hstack([train_ts, test_ts])\n for train_ts, test_ts in zip(train_target_values, test_df.values)\n ]\n\n if m4_freq == \"Yearly\":\n # some time series have more than 300 years which can not be represented in pandas,\n # this is probably due to a misclassification of those time series as Yearly\n # we simply use only the last 300 years for training\n # note this does not affect test time as prediction length is less than 300 years\n train_target_values = [ts[-300:] for ts in train_target_values]\n test_target_values = [ts[-300:] for ts in test_target_values]\n\n # the original dataset did not include time stamps, so we use a mock start date for each time series\n # we use the earliest point available in pandas\n mock_start_dataset = \"1750-01-01 00:00:00\"\n\n save_to_file(\n train_file,\n [\n to_dict(target_values=target, start=mock_start_dataset, cat=[cat])\n for cat, target in enumerate(train_target_values)\n ],\n )\n\n save_to_file(\n test_file,\n [\n to_dict(target_values=target, start=mock_start_dataset, cat=[cat])\n for cat, target in enumerate(test_target_values)\n ],\n )\n"
]
| [
[
"numpy.hstack",
"pandas.read_csv",
"numpy.isnan"
]
]
|
timo/zasim | [
"54d8eb329af73700bf0df2be6e753e309e9d8191"
]
| [
"zasim/config.py"
]
| [
"\"\"\"This module implements different ways to create configurations.\n\nThe idea behind the API is, to let the user set any settings in the constructor\nof any Configuration instance and to let the Target instance pass a size\nhint and datatype when generating the config.\n\nThis way, the Target doesn't have to know anything about what configuration to\ngenerate and how to do it.\n\nBy letting the Target only supply a size_hint, the Configuration is\nallowed to dictate what size the configuration should have. This is important\nespecially for loading configurations from files.\n\n\"\"\"\n# This file is part of zasim. zasim is licensed under the BSD 3-clause license.\n# See LICENSE.txt for details.\n\n\n\nfrom __future__ import division\n\nfrom features import HAVE_NUMPY_RANDOM, HAVE_MULTIDIM\n\nimport random\nimport math\nimport numpy as np\nfrom itertools import product\n\ndefault_dtype = np.int32\n\nclass BaseConfiguration(object):\n \"\"\"This class defines the interface that initial configuration generators\n should have to the outside.\"\"\"\n\n def generate(self, size_hint=None, dtype=default_dtype):\n \"\"\"Generate the configuration.\n\n :param size_hint: What size to generate. This can be None, if the\n generator may choose the dimensionality and sizes of each\n dimension freely, or a tuple containing None or a number for\n each dimension. If one or all of the dimensions are None, their\n size will be decided by the generator.\n\n The size_hint may be ignored by the generator for cases like\n loading a configuration from a file.\n :param dtype: The `~numpy.dtype` to use for the array.\n :returns: A numpy array to be used as the configuration.\n \"\"\"\n\nclass BaseRandomConfiguration(BaseConfiguration):\n def __init__(self, base=2, *percentages):\n \"\"\"Create a random initial configuration with values from 0 to base-1\n inclusive and, if positional arguments are given, use the supplied\n percentages for the different states.\"\"\"\n\n self.base = base\n self.percentages = percentages\n if len(self.percentages) > self.base:\n raise ValueError(\"Cannot have more percentage values than values.\")\n\n rest = self.base - len(self.percentages)\n if self.percentages:\n self.cumulative_percentages = [sum(self.percentages[:index + 1]) for index in range(len(self.percentages))]\n else:\n self.cumulative_percentages = [1 / self.base]\n rest -= 1\n\n if self.cumulative_percentages[-1] > 1.0:\n raise ValueError(\"Probabilities must not add up to more than 1.0\")\n\n rest_percentage = 1.0 - self.cumulative_percentages[-1]\n\n for number in range(rest):\n self.cumulative_percentages.append(self.cumulative_percentages[-1] + rest_percentage / rest)\n\n if rest == 0 and self.cumulative_percentages[-1] != 1.0:\n raise ValueError(\"Probabilities must add up to 1.0\")\n\n def size_hint_to_size(self, size_hint=None):\n if size_hint is None:\n size_hint = (random.randrange(1, 100),)\n\n size = []\n for entry in size_hint:\n size.append(random.randrange(1, 100) if entry is None else entry)\n\n return tuple(size)\n\n def generate(self, size_hint=None, dtype=default_dtype):\n size = self.size_hint_to_size(size_hint)\n\n if not HAVE_NUMPY_RANDOM and not HAVE_MULTIDIM:\n # pypy compatibility\n assert len(size) == 1\n randoms = np.array([random.random() for i in xrange(size[0])])\n arr = np.zeros(len(randoms), dtype=dtype)\n elif not HAVE_NUMPY_RANDOM and HAVE_MULTIDIM:\n randoms = np.array([random.random() for i in xrange(reduce(lambda a,b:a*b, size))])\n randoms = randoms.reshape(*size)\n arr = np.zeros(randoms.shape, dtype=dtype)\n else:\n randoms = np.random.rand(*size)\n arr = np.zeros(randoms.shape, dtype=dtype)\n\n for pos in product(*[xrange(siz) for siz in size]):\n arr[pos] = min(idx for idx, perc in self.cumulative_percentages\n if randoms[pos] < perc)\n\n return arr\n\n def make_percentages_cumulative(self, percentages):\n self.percentages = percentages\n if len(self.percentages) > len(self.values):\n raise ValueError(\"Cannot have more percentage values than values.\")\n\n rest = len(self.values) - len(self.percentages)\n if self.percentages:\n cumulative_percentages = [sum(self.percentages[:index + 1]) for index in range(len(self.percentages))]\n else:\n cumulative_percentages = [1.0 / len(self.values)]\n rest -= 1\n\n if cumulative_percentages[-1] > 1.0:\n raise ValueError(\"Probabilities must not add up to more than 1.0\")\n\n rest_percentage = 1.0 - cumulative_percentages[-1]\n\n for number in range(rest):\n cumulative_percentages.append(cumulative_percentages[-1] + rest_percentage / rest)\n\n if rest == 0 and cumulative_percentages[-1] != 1.0:\n raise ValueError(\"Probabilities must add up to 1.0\")\n\n self.cumulative_percentages = list(zip(self.values, cumulative_percentages))\n\n if len(self.cumulative_percentages) > len(self.percentages):\n percs = []\n cumu = 0\n for val, perc_v in self.cumulative_percentages:\n percs.append(perc_v - cumu)\n cumu = perc_v\n self.percentages = tuple(percs)\n\nclass RandomConfiguration(BaseRandomConfiguration):\n def __init__(self, base=2, *percentages):\n \"\"\"Create a random initial configuration with values from 0 to base-1\n inclusive and, if positional arguments are given, use the supplied\n percentages for the different states.\"\"\"\n\n self.values = range(base)\n self.percentages = percentages\n self.make_percentages_cumulative(percentages)\n\nclass RandomConfigurationFromPalette(BaseRandomConfiguration):\n def __init__(self, values, *percentages):\n \"\"\"Create a random initial configuration with the given values and,\n if positional arguments are given, use the supplied\n percentages for the different states.\"\"\"\n\n self.values = values\n self.make_percentages_cumulative(percentages)\n\nclass BaseCharConfiguration(BaseConfiguration):\n \"\"\"Generate a configuration from an ascii string.\"\"\"\n def __init__(self, strdata, palette):\n \"\"\"The palette is either a dictionary, mapping value to representation\n or a list with representation values.\n If no palette is supplied, the PALETTE value from BaseConsolePainter is\n used.\"\"\"\n self.strdata = strdata\n\n if not palette:\n from zasim.display import console\n palette = console.PALETTE\n if isinstance(palette, list):\n palette = dict(enumerate(palette))\n self.palette = palette\n\n def generate(self, size_hint=None, dtype=default_dtype):\n lines = []\n for line in self.strdata.split(\"\\n\"):\n line_res = list(line.rstrip(\"\\n\\r\"))\n # skip empty lines\n if line_res:\n lines.append(line_res)\n whole_conf = np.array(lines)\n result = np.empty((len(lines), len(lines[0])), dtype=dtype)\n for value, entry in self.palette.iteritems():\n result[whole_conf == entry] = value\n return result.transpose()\n\nclass FileCharConfiguration(BaseCharConfiguration):\n \"\"\"Import an ascii-based file with a palette, as generated by\n `zasim.display.console.BaseConsolePainter.export`.\"\"\"\n\n def __init__(self, file_or_name, palette=None):\n self.file_or_name = file_or_name\n\n super(FileCharConfiguration, self).__init__(\"\", palette)\n\n def generate(self, **kwargs):\n if not isinstance(self.file_or_name, file):\n file_o = open(self.file_or_name, \"r\")\n self.strdata = file_o.read()\n else:\n self.file_or_name.seek(0)\n self.strdata = self.file_or_name.read()\n\n return super(FileCharConfiguration, self).generate(**kwargs)\n\nclass ImageConfiguration(BaseConfiguration):\n \"\"\"Import an image file as a configuration.\"\"\"\n\n def __init__(self, filename, scale=1, palette=None, fuzz=False):\n self.filename = filename\n\n if palette is None:\n from zasim.display.qt import PALETTE_32\n palette = PALETTE_32\n if isinstance(palette, list):\n palette = dict(enumerate(palette))\n self.palette = palette\n self.scale = scale\n self.fuzz = fuzz\n\n def generate(self, size_hint=None, dtype=default_dtype):\n from .external.qt import QImage, QColor\n from .display.qt import make_palette_qc\n image = QImage()\n assert image.load(self.filename)\n image = image.convertToFormat(QImage.Format_RGB32)\n if self.scale != 1:\n image = image.scaled(image.width() // self.scale,\n image.height() // self.scale)\n if size_hint:\n w, h = size_hint\n else:\n w, h = image.width(), image.height()\n\n result = np.ones((w, h), dtype=dtype)\n\n if self.fuzz:\n\n def lowest_distance(color, palette):\n min_dst = 1000000000\n min_value = -1\n for value, pcol in palette.iteritems():\n x1, y1, z1,_ = color.getHsv()\n x2, y2, z2,_ = pcol.getHsv()\n dist = (x1 - x2) ** 2 + (y1 - y2) ** 2 + (z1 - z2) ** 2\n if dist < min_dst:\n min_value = value\n min_dst = dist\n return min_value\n\n qc_pal = make_palette_qc(self.palette)\n for x, y in product(range(image.width()), range(image.height())):\n color = QColor.fromRgb(image.pixel(x, y))\n value = lowest_distance(color, qc_pal)\n result[x, y] = value\n else:\n nparr = np.frombuffer(image.bits(), dtype=np.uint32)\n nparr = nparr.reshape((image.width(), image.height()), order=\"F\")\n for value, color in self.palette.iteritems():\n result[nparr == color] = value\n\n return result\n\nclass PatternConfiguration(BaseConfiguration):\n \"\"\"This generator accepts different patterns as lists of values and a\n layout that defines how those patterns make up the whole configuration.\n\n Pattern 0 will be used to fill the whole background, all others will\n be embedded centered in the configuration according to `layout`.\"\"\"\n def __init__(self, patterns, layout):\n self.patterns = [list(a) for a in patterns] # deep copy\n self.layout = tuple(layout)\n\n def generate(self, size_hint=None, dtype=default_dtype):\n assert len(size_hint) == 1, \"two-dimensional pattern-based configs not supported yet.\"\n\n background = self.patterns[0] * (size_hint[0] // len(self.patterns[0]) + 1)\n result = np.array(background[:size_hint[0]], dtype=dtype)\n\n internal_pattern = sum([self.patterns[idx] for idx in self.layout], [])\n lendiff = size_hint[0] - len(internal_pattern)\n if lendiff < 0:\n result[0:-1] = internal_pattern[-lendiff//2:lendiff//2-1]\n else:\n middle = size_hint[0] // 2\n halfwidth = len(internal_pattern) // 2\n result[middle - halfwidth:middle - halfwidth + len(internal_pattern)] = internal_pattern\n\n return result\n\ndef function_of_radius(function, max_dist=\"diagonal\"):\n \"\"\"Turns a function that takes the radius and maximum distance as\n arguments into a function compatible with the\n `DensityDistributedConfiguration` class.\"\"\"\n if max_dist == \"shortest\":\n calc_max_dist = lambda size: min(size)\n elif max_dist == \"longest\":\n calc_max_dist = lambda size: max(size)\n elif max_dist == \"diagonal\":\n def calc_max_dist(size):\n halves = [num / 2 for num in size]\n squares = [num ** 2 for num in halves]\n return math.sqrt(sum(squares))\n\n def wrapper(*args):\n dists = []\n half = len(args) // 2\n for num in range(half):\n center = args[num + half] / 2\n dists.append(abs(center - args[num]))\n\n squares = [num ** 2 for num in dists]\n dist = math.sqrt(sum(squares))\n\n return function(dist, calc_max_dist(args[half:]))\n\n return wrapper\n\nclass DensityDistributedConfiguration(RandomConfiguration):\n \"\"\"Create a distribution from functions giving the probability for each\n field to have a given value.\n\n For prob_dist_fun, supply a dictionary with one entry per value you\n want to end up in the configuration as the key. The value is a lambda\n from position and config size to relative probability at that position.\n\n For each position in the configuration, every function is called and the\n results added up to figure out, what value would be 100% for that cell,\n then the relative probabilities are divided and used for choosing a\n value.\n\n If a value is an integer, rather than a callable, then it will be\n interpreted as a constant function instead.\"\"\"\n\n def __init__(self, prob_dist_fun):\n self.prob_dist_fun = prob_dist_fun\n\n def generate(self, size_hint=None, dtype=default_dtype):\n size = self.size_hint_to_size(size_hint)\n\n # XXX remove duplicate code here?\n result = np.zeros(size, dtype)\n if not HAVE_NUMPY_RANDOM and not HAVE_MULTIDIM:\n # pypy compatibility\n assert len(size) == 1\n randoms = np.array([random.random() for i in xrange(size[0])])\n elif not HAVE_NUMPY_RANDOM and HAVE_MULTIDIM:\n randoms = np.array([random.random() for i in xrange(reduce(lambda a,b:a*b, size))])\n randoms = randoms.reshape(*size)\n else:\n randoms = np.random.rand(*size)\n\n\n for pos in product(*[xrange(siz) for siz in size]):\n relative_probabs = {}\n for key, func in self.prob_dist_fun.iteritems():\n if isinstance(func, int):\n relative_probabs[key] = func\n else:\n relative_probabs[key] = self.prob_dist_fun[key](*(pos + size))\n\n one = sum(relative_probabs.values())\n cumulative_percentages = {}\n cumulative = 0\n for key, relative_perc in relative_probabs.iteritems():\n part = relative_perc / one\n cumulative_percentages[key] = cumulative + part\n cumulative += part\n\n # XXX remove duplicate code here?\n result[pos] = min(idx for idx, perc in cumulative_percentages.iteritems()\n if randoms[pos] < perc)\n\n return result\n\n"
]
| [
[
"numpy.array",
"numpy.ones",
"numpy.random.rand",
"numpy.zeros"
]
]
|
nitchith/CarND-Advanced-Lane-Lines | [
"8e9e4d369f95f2076aa3b99c9015ac95c20037fc"
]
| [
"code/utils.py"
]
| [
"import matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\n\ndef utils_plot(img, rows, cols, idx, title, cmap=None, fontsize=20):\n ax = plt.subplot(rows, cols, idx)\n ax.set_title(title, fontsize=fontsize)\n plt.imshow(img, cmap=cmap)\n return\n"
]
| [
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.subplot"
]
]
|
cuongngm/MASTER-pytorch | [
"f21ba5adbf027f8d6bc35cf465a7b4dd045c61f1"
]
| [
"utils/label_util.py"
]
| [
"# -*- coding: utf-8 -*-\n# @Author: Wenwen Yu\n# @Created Time: 10/4/2020 14:24\n\nimport collections\nfrom pathlib import Path\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\n# from .keys import keys\n\nSTRING_MAX_LEN = 100\nVOCABULARY_FILE_NAME = 'keys.txt'\n\n\nclass LabelConverterForMASTER(object):\n \"\"\"Convert between str and label index.\n \"\"\"\n\n def __init__(self, classes, max_length=-1, ignore_over=False):\n \"\"\"\n\n :param classes: alphabet(keys), key string or text vocabulary\n :param max_length: max_length is mainly for controlling the statistics' stability,\n due to the layer normalisation. and the model can only predict max_length text.\n -1 is for fixing the length, the max_length will be computed dynamically for one batch.\n Empirically, it should be maximum length of text you want to predict.\n :param ignore_over: (bool, default=False): whether or not to ignore over max length text.\n \"\"\"\n\n cls_list = None\n if isinstance(classes, str):\n cls_list = list(classes)\n if isinstance(classes, Path):\n p = Path(classes)\n if not p.exists():\n raise RuntimeError('Key file is not found')\n with p.open(encoding='utf8') as f:\n classes = f.read()\n classes = classes.strip()\n cls_list = list(classes)\n elif isinstance(classes, list):\n cls_list = classes\n\n self.alphabet = cls_list\n self.dict = {}\n\n self.dict['<EOS>'] = 1 # start\n self.dict['<SOS>'] = 2\n self.dict['<PAD>'] = 0\n self.dict['<UNK>'] = 3\n for i, item in enumerate(self.alphabet):\n self.dict[item] = i + 4 # index start from 4\n self.inverse_dict = {v: k for k, v in self.dict.items()}\n\n self.EOS = self.dict['<EOS>']\n self.SOS = self.dict['<SOS>']\n self.PAD = self.dict['<PAD>']\n self.UNK = self.dict['<UNK>']\n\n self.nclass = len(self.alphabet) + 4\n self.max_length = max_length\n self.ignore_over = ignore_over\n\n def encode(self, text):\n \"\"\" convert text to label index, add <SOS>, <EOS>, and do max_len padding\n Args:\n text (str or list of str): texts to convert.\n Returns:\n torch.LongTensor targets:max_length × batch_size\n \"\"\"\n if isinstance(text, str):\n text = [self.dict[item] if item in self.alphabet else self.UNK for item in text]\n elif isinstance(text, collections.Iterable):\n text = [self.encode(s) for s in text] # encode\n\n if self.max_length == -1:\n local_max_length = max([len(x) for x in text]) # padding\n self.ignore_over = True\n else:\n local_max_length = self.max_length\n\n nb = len(text)\n\n targets = torch.zeros(nb, (local_max_length + 2))\n targets[:, :] = self.PAD\n\n for i in range(nb):\n\n if not self.ignore_over:\n if len(text[i]) > local_max_length:\n raise RuntimeError('Text is larger than {}: {}'.format(local_max_length, len(text[i])))\n\n targets[i][0] = self.SOS # start\n targets[i][1:len(text[i]) + 1] = text[i]\n targets[i][len(text[i]) + 1] = self.EOS\n text = targets.transpose(0, 1).contiguous()\n text = text.long()\n return torch.LongTensor(text)\n\n def decode(self, t):\n \"\"\"Decode encoded texts back into strs.\n Args:\n torch.IntTensor [length_0 + length_1 + ... length_{n - 1}]: encoded texts.\n torch.IntTensor [n]: length of each text.\n Raises:\n AssertionError: when the texts and its length does not match.\n Returns:\n text (str or list of str): texts to convert.\n \"\"\"\n\n # texts = list(self.dict.keys())[list(self.dict.values()).index(t)]\n if isinstance(t, torch.Tensor):\n texts = self.inverse_dict[t.item()]\n else:\n texts = self.inverse_dict[t]\n return texts\n\n\n# LabelTransformer = strLabelConverterForTransformerWithVocabularyLevel(keys, max_length=STRING_MAX_LEN,\n# ignore_over=False)\n\nLabelTransformer = LabelConverterForMASTER(Path(__file__).parent.joinpath(VOCABULARY_FILE_NAME),\n max_length=STRING_MAX_LEN, ignore_over=False)\n"
]
| [
[
"torch.zeros",
"torch.LongTensor"
]
]
|
LeoChan0814/Python-Machine-Learning-Cookbook | [
"ced231713c8c82acc17d9bb9beb59a2db5db389c"
]
| [
"Chapter01/preprocessing.py"
]
| [
"import numpy as np\nfrom sklearn import preprocessing\n\ndata = np.array([[ 3, -1.5, 2, -5.4],\n [ 0, 4, -0.3, 2.1],\n [ 1, 3.3, -1.9, -4.3]])\n\n# mean removal\ndata_standardized = preprocessing.scale(data)\nprint('\\nMean =', data_standardized.mean(axis=0))\nprint('Std deviation =', data_standardized.std(axis=0))\n\n# min max scaling\ndata_scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))\ndata_scaled = data_scaler.fit_transform(data)\nprint('\\nMin max scaled data:\\n', data_scaled)\n\n# normalization\ndata_normalized = preprocessing.normalize(data, norm='l1')\nprint('\\nL1 normalized data:\\n', data_normalized)\n\n# binarization\ndata_binarized = preprocessing.Binarizer(threshold=1.4).transform(data)\nprint('\\nBinarized data:\\n', data_binarized)\n\n# one hot encoding\nencoder = preprocessing.OneHotEncoder()\nencoder.fit([[0, 2, 1, 12], [1, 3, 5, 3], [2, 3, 2, 12], [1, 2, 4, 3]])\nencoded_vector = encoder.transform([[2, 3, 5, 3]]).toarray()\nprint('\\nEncoded vector:\\n', encoded_vector)\n\n"
]
| [
[
"numpy.array",
"sklearn.preprocessing.scale",
"sklearn.preprocessing.normalize",
"sklearn.preprocessing.MinMaxScaler",
"sklearn.preprocessing.Binarizer",
"sklearn.preprocessing.OneHotEncoder"
]
]
|
abbasidaniyal/Paddle | [
"c3527f5526ee96398760cbef11d7de48f41fe998"
]
| [
"python/paddle/fluid/tests/unittests/test_unstack_op.py"
]
| [
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom op_test import OpTest\nimport numpy as np\nimport unittest\n\n\nclass TestUnStackOpBase(OpTest):\n def initDefaultParameters(self):\n self.input_dim = (5, 6, 7)\n self.axis = 0\n self.dtype = 'float64'\n\n def initParameters(self):\n pass\n\n def get_y_names(self):\n y_names = []\n for i in range(self.input_dim[self.axis]):\n y_names.append('y{}'.format(i))\n return y_names\n\n def setUp(self):\n self.initDefaultParameters()\n self.initParameters()\n self.op_type = 'unstack'\n self.x = np.random.random(size=self.input_dim).astype(self.dtype)\n\n outs = np.split(self.x, self.input_dim[self.axis], self.axis)\n new_shape = list(self.input_dim)\n del new_shape[self.axis]\n y_names = self.get_y_names()\n tmp = []\n for i in range(self.input_dim[self.axis]):\n tmp.append((y_names[i], np.reshape(outs[i], new_shape)))\n\n self.inputs = {'X': self.x}\n self.outputs = {'Y': tmp}\n self.attrs = {'axis': self.axis, 'num': self.input_dim[self.axis]}\n\n def test_check_output(self):\n self.check_output()\n\n def test_check_grad(self):\n self.check_grad('X', self.get_y_names())\n\n\nclass TestStackOp3(TestUnStackOpBase):\n def initParameters(self):\n self.axis = -1\n\n\nclass TestStackOp4(TestUnStackOpBase):\n def initParameters(self):\n self.axis = -3\n\n\nclass TestStackOp5(TestUnStackOpBase):\n def initParameters(self):\n self.axis = 1\n\n\nclass TestStackOp6(TestUnStackOpBase):\n def initParameters(self):\n self.axis = 2\n\n\nif __name__ == '__main__':\n unittest.main()\n"
]
| [
[
"numpy.random.random",
"numpy.reshape",
"numpy.split"
]
]
|
wkerzendorf/tardis | [
"c9e35423738f3bf6741aa5e89d4e1f3b45033708"
]
| [
"tardis/montecarlo/setup_package.py"
]
| [
"#setting the right include\nfrom setuptools import Extension\nimport numpy as np\nimport os\nfrom astropy_helpers.setup_helpers import get_distutils_option\n\nfrom glob import glob\n\nif get_distutils_option('with_openmp', ['build', 'install', 'develop']) is not None:\n compile_args = ['-fopenmp', '-W', '-Wall', '-Wmissing-prototypes', '-std=c99']\n link_args = ['-fopenmp']\n define_macros = [('WITHOPENMP', None)]\nelse:\n compile_args = ['-W', '-Wall', '-Wmissing-prototypes', '-std=c99']\n link_args = []\n define_macros = []\n\ndef get_extensions():\n sources = ['tardis/montecarlo/montecarlo.pyx']\n sources += [os.path.relpath(fname) for fname in glob(\n os.path.join(os.path.dirname(__file__), 'src', '*.c'))\n if not os.path.basename(fname).startswith('test')]\n sources += [os.path.relpath(fname) for fname in glob(\n os.path.join(os.path.dirname(__file__), 'src/randomkit', '*.c'))]\n deps = [os.path.relpath(fname) for fname in glob(\n os.path.join(os.path.dirname(__file__), 'src', '*.h'))]\n deps += [os.path.relpath(fname) for fname in glob(\n os.path.join(os.path.dirname(__file__), 'src/randomkit', '*.h'))]\n\n return [Extension('tardis.montecarlo.montecarlo', sources,\n include_dirs=['tardis/montecarlo/src',\n 'tardis/montecarlo/src/randomkit',\n np.get_include()],\n depends=deps,\n extra_compile_args=compile_args,\n extra_link_args=link_args,\n define_macros=define_macros)]\n\ndef get_package_data():\n return {'tardis.montecarlo.tests':['data/*.npy']}\n"
]
| [
[
"numpy.get_include"
]
]
|
mj-will/nessai | [
"e1ccc791a332565af372d14e5986920d552e1294"
]
| [
"tests/test_nested_sampler/test_flow_proposal.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"\nTest the functions related to when the flow should be trained or reset and\ntraining itself.\n\"\"\"\nimport datetime\nimport numpy as np\nimport pytest\nfrom unittest.mock import call, MagicMock\nfrom nessai.nestedsampler import NestedSampler\n\n\ndef test_configure_flow_reset_false(sampler):\n \"\"\"Assert the attributes evaluate to false if the inputs are false\"\"\"\n NestedSampler.configure_flow_reset(sampler, False, False)\n assert not sampler.reset_weights\n assert not sampler.reset_permutations\n\n\[email protected]('weights', [10, 5.0])\[email protected]('permutations', [10, 5.0])\ndef test_configure_flow_reset(sampler, weights, permutations):\n \"\"\"Assert the attributes evaluate to false if the inputs are false\"\"\"\n NestedSampler.configure_flow_reset(sampler, weights, permutations)\n assert sampler.reset_weights == float(weights)\n assert sampler.reset_permutations == float(permutations)\n\n\ndef test_configure_flow_reset_error_weights(sampler):\n \"\"\"Assert an error is raised in the weights input is invalid\"\"\"\n with pytest.raises(TypeError) as excinfo:\n NestedSampler.configure_flow_reset(sampler, None, 5)\n assert 'weights` must be' in str(excinfo.value)\n\n\ndef test_configure_flow_reset_error_permutations(sampler):\n \"\"\"Assert an error is raised in the permutations input is invalid\"\"\"\n with pytest.raises(TypeError) as excinfo:\n NestedSampler.configure_flow_reset(sampler, 5, None)\n assert 'permutations` must be' in str(excinfo.value)\n\n\ndef test_check_training_not_completed_training(sampler):\n \"\"\"\n Assert the flow is forced to train if training did not complete when\n the sampler was checkpointed.\n \"\"\"\n sampler.completed_training = False\n train, force = NestedSampler.check_training(sampler)\n assert train is True\n assert force is True\n\n\ndef test_check_training_train_on_empty(sampler):\n \"\"\"\n Assert the flow is forced to train if training the pool is empty and\n `train_on_empty` is true but the proposal was not in the process of\n popluating.\n \"\"\"\n sampler.completed_training = True\n sampler.train_on_empty = True\n sampler.proposal = MagicMock()\n sampler.proposal.populated = False\n sampler.proposal.populating = False\n train, force = NestedSampler.check_training(sampler)\n assert train is True\n assert force is True\n\n\ndef test_check_training_acceptance(sampler):\n \"\"\"\n Assert that training will be true but not forced if the acceptance\n threshold is met and retraining on acceptance is enabled.\n \"\"\"\n sampler.completed_training = True\n sampler.train_on_empty = True\n sampler.proposal = MagicMock()\n sampler.proposal.populated = True\n sampler.proposal.populating = False\n sampler.acceptance_threshold = 0.1\n sampler.mean_block_acceptance = 0.01\n sampler.retrain_acceptance = True\n train, force = NestedSampler.check_training(sampler)\n assert train is True\n assert force is False\n\n\ndef test_check_training_iteration(sampler):\n \"\"\"\n Assert that training will be true but not forced if a training iteration\n is reached (n iterations have passed since last updated).\n \"\"\"\n sampler.completed_training = True\n sampler.train_on_empty = True\n sampler.proposal = MagicMock()\n sampler.proposal.populated = True\n sampler.proposal.populating = False\n sampler.acceptance_threshold = 0.1\n sampler.mean_block_acceptance = 0.2\n sampler.retrain_acceptance = False\n sampler.iteration = 3521\n sampler.last_updated = 2521\n sampler.training_frequency = 1000\n train, force = NestedSampler.check_training(sampler)\n assert train is True\n assert force is False\n\n\[email protected]('config', [\n dict(),\n dict(train_on_empty=False, populated=False),\n dict(train_on_empty=True, populated=False, populating=True),\n dict(mean_acceptance=0.01, acceptance_threshold=0.5,\n retrain_acceptance=False),\n dict(mean_acceptance=0.5, acceptance_threshold=0.01,\n retrain_acceptance=True),\n dict(iteration=800, last_updated=0, training_frequency=801)\n])\ndef test_check_training_false(sampler, config):\n \"\"\"\n Test a range of different scenarios that should all not start training.\n \"\"\"\n sampler.completed_training = True\n sampler.train_on_empty = config.get('train_on_empty', False)\n sampler.proposal = MagicMock()\n sampler.proposal.populated = config.get('populated', False)\n sampler.proposal.populating = config.get('populating', False)\n sampler.acceptance_threshold = config.get('acceptance_threshold', 0.1)\n sampler.mean_block_acceptance = config.get('mean_acceptance', 0.2)\n sampler.retrain_acceptance = config.get('retrain_acceptance', False)\n sampler.iteration = config.get('iteration', 3000)\n sampler.last_updated = config.get('last_updated', 2500)\n sampler.training_frequency = config.get('training_frequency', 1000)\n train, force = NestedSampler.check_training(sampler)\n assert train is False\n assert force is False\n\n\[email protected]('training_count', [10, 100])\ndef test_check_flow_model_reset_weights(sampler, training_count):\n \"\"\"Assert flow model only weights are reset\"\"\"\n sampler.proposal = MagicMock()\n sampler.proposal.reset_model_weights = MagicMock()\n sampler.reset_acceptance = False\n sampler.reset_weights = 10\n sampler.reset_permutations = 0\n sampler.proposal.training_count = training_count\n\n NestedSampler.check_flow_model_reset(sampler)\n\n sampler.proposal.reset_model_weights.assert_called_once_with(weights=True)\n\n\[email protected]('training_count', [10, 100])\ndef test_check_flow_model_reset_permutations(sampler, training_count):\n \"\"\"Assert flow model only permutations are reset\"\"\"\n sampler.proposal = MagicMock()\n sampler.proposal.reset_model_weights = MagicMock()\n sampler.reset_acceptance = False\n sampler.reset_weights = 0\n sampler.reset_permutations = 10\n sampler.proposal.training_count = training_count\n\n NestedSampler.check_flow_model_reset(sampler)\n\n sampler.proposal.reset_model_weights.assert_called_once_with(\n weights=False, permutations=True)\n\n\[email protected]('training_count', [10, 100])\ndef test_check_flow_model_reset_both(sampler, training_count):\n \"\"\"Assert flow model only permutations are reset\"\"\"\n sampler.proposal = MagicMock()\n sampler.proposal.reset_model_weights = MagicMock()\n sampler.reset_acceptance = False\n sampler.reset_weights = 10\n sampler.reset_permutations = 10\n sampler.proposal.training_count = training_count\n\n NestedSampler.check_flow_model_reset(sampler)\n\n calls = [call(weights=True), call(weights=False, permutations=True)]\n sampler.proposal.reset_model_weights.assert_has_calls(calls)\n\n\ndef test_check_flow_model_reset_acceptance(sampler):\n \"\"\"\n Assert flow model is reset based on acceptance is reset_acceptance is True.\n \"\"\"\n sampler.proposal = MagicMock()\n sampler.proposal.reset_model_weights = MagicMock()\n sampler.reset_acceptance = True\n sampler.mean_block_acceptance = 0.1\n sampler.acceptance_threshold = 0.5\n sampler.proposal.training_count = 1\n\n NestedSampler.check_flow_model_reset(sampler)\n\n sampler.proposal.reset_model_weights.assert_called_once_with(\n weights=True, permutations=True)\n\n\ndef test_check_flow_model_reset_not_trained(sampler):\n \"\"\"\n Verify that the flow model is not reset if it has never been trained.\n \"\"\"\n sampler.proposal = MagicMock()\n sampler.proposal.reset_model_weights = MagicMock()\n sampler.proposal.training_count = 0\n\n NestedSampler.check_flow_model_reset(sampler)\n\n sampler.proposal.reset_model_weights.assert_not_called()\n\n\ndef test_train_proposal_not_training(sampler):\n \"\"\"Verify the proposal is not trained it has not 'cooled down'\"\"\"\n sampler.proposal = MagicMock()\n sampler.proposal.train = MagicMock()\n sampler.iteration = 100\n sampler.last_updated = 90\n sampler.cooldown = 20\n NestedSampler.train_proposal(sampler, force=False)\n sampler.proposal.train.assert_not_called()\n\n\ndef test_train_proposal(sampler):\n \"\"\"Verify the proposal is trained\"\"\"\n sampler.proposal = MagicMock()\n sampler.proposal.train = MagicMock()\n sampler.check_flow_model_reset = MagicMock()\n sampler.checkpoint = MagicMock()\n sampler.iteration = 100\n sampler.last_updated = 90\n sampler.cooldown = 20\n sampler.memory = False\n sampler.training_time = datetime.timedelta()\n sampler.training_iterations = []\n sampler.live_points = np.arange(10)\n sampler.checkpoint_on_training = True\n sampler.block_iteration = 10\n sampler.block_acceptance = 0.5\n\n NestedSampler.train_proposal(sampler, force=True)\n\n sampler.check_flow_model_reset.assert_called_once()\n sampler.proposal.train.assert_called_once()\n sampler.checkpoint.assert_called_once_with(periodic=True)\n\n assert sampler.training_iterations == [100]\n assert sampler.training_time.total_seconds() > 0\n assert sampler.completed_training is True\n assert sampler.block_iteration == 0\n assert sampler.block_acceptance == 0\n\n\ndef test_train_proposal_memory(sampler):\n \"\"\"Verify the proposal is trained with memory\"\"\"\n sampler.proposal = MagicMock()\n sampler.proposal.train = MagicMock()\n sampler.check_flow_model_reset = MagicMock()\n sampler.checkpoint = MagicMock()\n sampler.iteration = 100\n sampler.last_updated = 90\n sampler.cooldown = 20\n sampler.memory = 2\n sampler.training_time = datetime.timedelta()\n sampler.training_iterations = []\n sampler.nested_samples = np.arange(5)\n sampler.live_points = np.arange(5, 10)\n sampler.checkpoint_on_training = True\n sampler.block_iteration = 10\n sampler.block_acceptance = 0.5\n\n NestedSampler.train_proposal(sampler, force=True)\n\n sampler.check_flow_model_reset.assert_called_once()\n sampler.checkpoint.assert_called_once_with(periodic=True)\n sampler.proposal.train.assert_called_once()\n\n np.testing.assert_array_equal(\n sampler.proposal.train.call_args[0],\n np.array([[5, 6, 7, 8, 9, 3, 4]])\n )\n\n assert sampler.training_iterations == [100]\n assert sampler.training_time.total_seconds() > 0\n assert sampler.completed_training is True\n assert sampler.block_iteration == 0\n assert sampler.block_acceptance == 0\n"
]
| [
[
"numpy.array",
"numpy.arange"
]
]
|
anantgupt/GraphAssociation | [
"514ebe3b532eb211384915354ce89fdc276c0395"
]
| [
"TSP19simpack/utils/Extract_Results/create_fig7b.py"
]
| [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 6 11:57:27 2019\nUse this code to edit figures saved using pickle dump\n@author: anantgupta\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport pickle as pl\nimport numpy as np\n\n# Load figure from disk and display\ndef cf7b(mode = 'Relax', width = 3.45, height = 2.6, font_size = 8):\n \n #fig_handle = pl.load(open('results6_14/fig_obj_est2/plot4.pickle','rb'))\n fig_handle1 = pl.load(open('fig_swidth-rob0/plot7.pickle','rb'))\n fig_handle2 = pl.load(open('fig_swidth-rob1/plot7.pickle','rb'))\n fig_handle3 = pl.load(open('fig_swidth-rob2/plot7.pickle','rb'))\n fig_handle4 = pl.load(open('fig_swidth-rob4/plot7.pickle','rb'))\n \n #fig_handle3 = pl.load(open('fig_snr-Nob21/plot2.pickle','rb'))\n \n fig, ax = plt.subplots(1,1)\n mse1=[0]*4;mse2=[0]*4;mse3=[0]*4;mse4=[0]*4\n lbl=[\" Detection\",\"False Alarm\",\" Miss\"]\n mkr = [',','+','x','*']\n indxa = [1,2]\n for i in range(3):\n rng1 = fig_handle2.axes[0].lines[i].get_data()[0]\n mse1[i] = fig_handle1.axes[0].lines[i].get_data()[1]\n \n # crb2 = fig_handle2.axes[i].lines[1].get_data()[1]\n mse2[i] = fig_handle2.axes[0].lines[i].get_data()[1]\n # #cnt2 = fig_handle2.axes[3]\n # crb3 = fig_handle3.axes[i].lines[1].get_data()[1]\n mse3[i] = fig_handle3.axes[0].lines[i].get_data()[1]\n mse4[i] = fig_handle4.axes[0].lines[i].get_data()[1]\n for i in indxa:\n # ax.plot(rng, mse1[i], 'b-', label='RMSE SNR=-10 dB')\n ax.plot(rng1, mse1[i], 'r-', marker=mkr[i], label=r'$\\rho$=0'+lbl[i])\n \n ax.plot(rng1, mse2[i], 'y-', marker=mkr[i], label=r'$\\rho$=1'+lbl[i])\n ax.plot(rng1, mse3[i], 'b-', marker=mkr[i], label=r'$\\rho$=2'+lbl[i])\n ax.plot(rng1, mse4[i], 'g-', marker=mkr[i], label=r'$\\rho=4$'+lbl[i])\n ax.legend(loc='best'),ax.grid(True);ax.set_xlabel('Array Width');\n ax.set_title(r'$P_{False Alarm}, P_{miss}$');ax.set_ylabel(r'$P_{FA}/ P_{miss}$')\n\n \n fig.set_size_inches(width, height, forward=True)\n # v--- change title and axeslabel font sizes manually\n for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +\n ax.get_xticklabels() + ax.get_yticklabels()):\n item.set_fontsize(font_size)\n plt.tight_layout()\n pl.dump(fig, open(\"Sel_figs/plot_PD-miss.pickle\", \"wb\"))\n fig.savefig('Sel_figs/plot_PD-miss.pdf')\n #%%"
]
| [
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots"
]
]
|
LucaNicosia/suzieq | [
"c281807ea2c4f44a9d6cd6c80fd5b71277b3cdcd"
]
| [
"suzieq/poller/worker/services/inventory.py"
]
| [
"import re\nimport numpy as np\nfrom suzieq.poller.worker.services.service import Service\n\n\nclass InventoryService(Service):\n \"\"\"Inventory service\"\"\"\n\n def _clean_data_common(self, processed_data, _):\n return processed_data\n\n def _clean_eos_data(self, processed_data, _):\n new_data = []\n for entry in processed_data:\n # Because of the JSON structure of the data, we could not\n # easily extract the data.\n for portnum in entry.get('_xcvrSlots', []):\n model = entry['_xcvrSlots'][portnum].get('modelName', '')\n if model:\n status = 'present'\n else:\n status = 'absent'\n newnentry = {\n \"name\": f\"port-{portnum}\", # Arista adds a /1 to ifname\n \"model\": model,\n \"type\": \"xcvr\",\n \"status\": status,\n \"partNum\": \"\",\n \"version\": entry['_xcvrSlots'][portnum].get('hardwareRev',\n ''),\n \"vendor\": entry['_xcvrSlots'][portnum].get('mfgName',\n '').lower(),\n \"serial\": entry['_xcvrSlots'][portnum].get('serialNum',\n ''),\n }\n new_data.append(newnentry)\n\n for fanNum in entry.get('_fanTraySlots', []):\n serial = entry['_fanTraySlots'][fanNum].get('serialNum', '')\n if serial == 'N/A':\n serial = ''\n newentry = {\n \"name\": f\"fan-{fanNum}\",\n \"type\": \"fan\",\n \"serial\": serial,\n \"model\": entry['_fanTraySlots'][fanNum]\n .get('name', ''),\n \"status\": \"present\",\n \"vendor\": entry['_fanTraySlots'][fanNum].get('vendor',\n 'Arista'),\n \"version\": ''\n }\n new_data.append(newentry)\n\n for powerNum in entry.get('_powerSupplySlots', []):\n newentry = {\n \"name\": f\"power-{powerNum}\",\n \"type\": \"power\",\n \"serial\": entry['_powerSupplySlots'][powerNum]\n .get('serialNum', ''),\n \"model\": entry['_powerSupplySlots'][powerNum]\n .get('name', ''),\n \"status\": \"present\",\n \"vendor\": entry['_powerSupplySlots'][powerNum].get(\n 'vendor', 'Arista'),\n \"version\": '' # Arista does not provide this\n }\n new_data.append(newentry)\n\n for fabricNum in entry.get('_cardSlots', []):\n model = entry['_cardSlots'][fabricNum].get('modelName', '')\n if model == \"Not Inserted\":\n model = ''\n status = 'absent'\n else:\n status = 'present'\n ftype, num, _ = re.split(r'(\\d)$', fabricNum)\n ftype = ftype.lower()\n newentry = {\n \"name\": f'{ftype}-{num}',\n \"type\": ftype,\n \"serial\": entry['_cardSlots'][fabricNum]\n .get('serialNum', ''),\n \"model\": model.lower(),\n \"status\": status,\n \"vendor\": entry['_cardSlots'][fabricNum].get('vendor',\n 'Arista'),\n \"version\": entry['_cardSlots'][fabricNum].get(\n 'hardwareRev', ''),\n }\n new_data.append(newentry)\n\n return new_data\n\n def _clean_iosxr_data(self, processed_data, raw_data):\n pass\n\n def _clean_nxos_data(self, processed_data, _):\n\n for entry in processed_data:\n name = entry.get('name', '')\n entry['status'] = 'present'\n if name.find('Ethernet') != -1:\n entry['type'] = 'xcvr'\n if entry['_sfp'].startswith(\"not \"):\n entry['status'] = 'absent'\n elif name.find('Chassis') != -1:\n entry['type'] = 'chassis'\n entry['name'] = name.lower()\n entry['model'] = ' '.join(entry['model'].split()[0:2])\n elif name.find('Slot') != -1:\n entry['type'] = 'linecard'\n entry['name'] = f'linecard-{name.split()[-1]}'\n elif name.find('Power Supply') != -1:\n entry['type'] = 'power'\n entry['name'] = f'power-{name.split()[-1]}'\n entry['model'] = ' '.join(entry['model'].split()[0:2])\n elif name.find('Fan') != -1:\n entry['type'] = 'fan'\n entry['name'] = f'fan-{name.split()[-1]}'\n entry['model'] = ' '.join(entry['model'].split()[0:2])\n\n return processed_data\n\n # pylint: disable=too-many-statements\n def _clean_junos_data(self, processed_data, _):\n new_data = []\n drop_indices = []\n\n # pylint: disable=too-many-nested-blocks\n for i, entry in enumerate(processed_data):\n if not entry['name']:\n drop_indices.append(i)\n continue\n\n name = entry.get('name', '').lower()\n if name == \"midplane\":\n entry['type'] = \"midplane\"\n elif name.startswith(\"fan\"):\n entry['type'] = 'fan'\n if 'tray' in name:\n entry['name'] = f'fan-{name.split()[0]}'\n elif name.startswith(('pdm', 'pem')):\n entry['type'] = 'power'\n name = name.replace(' ', '-')\n elif name.startswith('cb'):\n entry['type'] = 'mx-scb'\n name = name.replace(' ', '-')\n elif name.startswith('fpc'):\n fabid = name.split()[1]\n entry['type'] = 'linecard'\n name = f'linecard-{fabid}'\n elif name.startswith('routing'):\n entry['type'] = 'supervisor'\n name = name.replace(' ', '-')\n\n entry['name'] = name\n entry['status'] = 'present'\n entry['vendor'] = 'Juniper'\n submod = entry.get('_chassisSubMod', []) or []\n for ele in submod:\n subname = ele.get('name', [{}])[0].get('data', '')\n if not subname:\n entry.pop('_chassisSubMod', None)\n continue\n if not subname.startswith(('PIC', 'MIC')):\n # Its another entry away in this list\n continue\n subid = subname.split()[1]\n port_substr = f'{fabid}/{subid}'\n if subname.startswith('MIC'):\n nentry = {\n 'name': f'port-adapter-{fabid}/{subid}',\n 'vendor': 'Juniper',\n 'status': 'present',\n 'type': 'port-adapter',\n 'version': ele.get('version', [{}])[0].get('data', ''),\n 'serial': ele.get('serial-number', [{}])[0].get('data',\n ''),\n 'model': ele.get('model-number', [{}])[0].get('data',\n ''),\n 'partNum': ele.get('part-number', [{}])[0].get('data',\n '')\n }\n new_data.append(nentry)\n picent = ele.get('chassis-sub-sub-module', [])\n for pic in picent:\n xcvr_ent = pic.get('chassis-sub-sub-sub-module', [])\n for xent in xcvr_ent:\n nentry = self._junos_create_xcvr_entry(\n xent, port_substr)\n if nentry:\n new_data.append(nentry)\n else:\n xcvr_ent = ele.get('chassis-sub-sub-module', [])\n for xent in xcvr_ent:\n nentry = self._junos_create_xcvr_entry(\n xent, port_substr)\n if nentry:\n new_data.append(nentry)\n\n entry.pop('_chassisSubMod', None)\n\n processed_data = np.delete(processed_data, drop_indices).tolist()\n processed_data.extend(new_data)\n\n return processed_data\n\n def _junos_create_xcvr_entry(self, picent: dict, port_substr: str) -> dict:\n xname = picent.get('name', [{}])[0].get('data', '')\n pid = xname.split()[1]\n partnum = picent.get('part-number', [{}])[0].get('data', '')\n if partnum == \"NON-JNPR\":\n vendor = 'unavailable'\n else:\n vendor = 'Juniper'\n nentry = {\n \"name\": f'port-{port_substr}/{pid}',\n \"type\": \"xcvr\",\n \"status\": \"present\",\n 'vendor': vendor,\n \"version\": picent.get('version', [{}])[0].get('data', ''),\n \"model\": picent.get('description', [{}])[0].get('data', ''),\n \"partNum\": partnum,\n \"serial\": picent.get('serial-number', [{}])[0].get(\n 'data', ''),\n }\n\n return nentry\n\n def _clean_ios_data(self, processed_data, raw_data):\n pass\n"
]
| [
[
"numpy.delete"
]
]
|
rartino/hands-on-2 | [
"8f31b978f295a761dc0a7ae093184b3dbfa7e199"
]
| [
"dscribe/dscribe/descriptors/mbtr.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"Copyright 2019 DScribe developers\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport sys\nimport math\nimport numpy as np\n\nfrom scipy.sparse import lil_matrix, coo_matrix\n\nfrom ase import Atoms\nimport ase.data\n\nfrom dscribe.core import System\nfrom dscribe.descriptors import Descriptor\n#from dscribe.libmbtr.mbtrwrapper import MBTRWrapper\ntry:\n from dscribe.ext import MBTRWrapper\nexcept ImportError:\n pass\nimport dscribe.utils.geometry\n\n\nclass MBTR(Descriptor):\n \"\"\"Implementation of the Many-body tensor representation up to :math:`k=3`.\n\n You can choose which terms to include by providing a dictionary in the\n k1, k2 or k3 arguments. This dictionary should contain information\n under three keys: \"geometry\", \"grid\" and \"weighting\". See the examples\n below for how to format these dictionaries.\n\n You can use this descriptor for finite and periodic systems. When dealing\n with periodic systems or when using machine learning models that use the\n Euclidean norm to measure distance between vectors, it is advisable to use\n some form of normalization.\n\n For the geometry functions the following choices are available:\n\n * :math:`k=1`:\n\n * \"atomic_number\": The atomic numbers.\n\n * :math:`k=2`:\n\n * \"distance\": Pairwise distance in angstroms.\n * \"inverse_distance\": Pairwise inverse distance in 1/angstrom.\n\n * :math:`k=3`:\n\n * \"angle\": Angle in degrees.\n * \"cosine\": Cosine of the angle.\n\n For the weighting the following functions are available:\n\n * :math:`k=1`:\n\n * \"unity\": No weighting.\n\n * :math:`k=2`:\n\n * \"unity\": No weighting.\n * \"exp\" or \"exponential\": Weighting of the form :math:`e^{-sx}`\n\n * :math:`k=3`:\n\n * \"unity\": No weighting.\n * \"exp\" or \"exponential\": Weighting of the form :math:`e^{-sx}`\n\n The exponential weighting is motivated by the exponential decay of screened\n Coulombic interactions in solids. In the exponential weighting the\n parameters **cutoff** determines the value of the weighting function after\n which the rest of the terms will be ignored and the parameter **scale**\n corresponds to :math:`s`. The meaning of :math:`x` changes for different\n terms as follows:\n\n * :math:`k=2`: :math:`x` = Distance between A->B\n * :math:`k=3`: :math:`x` = Distance from A->B->C->A.\n\n In the grid setup *min* is the minimum value of the axis, *max* is the\n maximum value of the axis, *sigma* is the standard deviation of the\n gaussian broadening and *n* is the number of points sampled on the\n grid.\n\n If flatten=False, a list of dense np.ndarrays for each k in ascending order\n is returned. These arrays are of dimension (n_elements x n_elements x\n n_grid_points), where the elements are sorted in ascending order by their\n atomic number.\n\n If flatten=True, a scipy.sparse.coo_matrix is returned. This sparse matrix\n is of size (1, n_features), where n_features is given by\n get_number_of_features(). This vector is ordered so that the different\n k-terms are ordered in ascending order, and within each k-term the\n distributions at each entry (i, j, h) of the tensor are ordered in an\n ascending order by (i * n_elements) + (j * n_elements) + (h * n_elements).\n\n This implementation does not support the use of a non-identity correlation\n matrix.\n \"\"\"\n def __init__(\n self,\n species,\n periodic,\n k1=None,\n k2=None,\n k3=None,\n normalize_gaussians=True,\n normalization=\"none\",\n flatten=True,\n sparse=False\n ):\n \"\"\"\n Args:\n species (iterable): The chemical species as a list of atomic\n numbers or as a list of chemical symbols. Notice that this is not\n the atomic numbers that are present for an individual system, but\n should contain all the elements that are ever going to be\n encountered when creating the descriptors for a set of systems.\n Keeping the number of chemical speices as low as possible is\n preferable.\n periodic (bool): Determines whether the system is considered to be\n periodic.\n k1 (dict): Setup for the k=1 term. For example::\n\n k1 = {\n \"geometry\": {\"function\": \"atomic_number\"},\n \"grid\": {\"min\": 1, \"max\": 10, \"sigma\": 0.1, \"n\": 50}\n }\n\n k2 (dict): Dictionary containing the setup for the k=2 term.\n Contains setup for the used geometry function, discretization and\n weighting function. For example::\n\n k2 = {\n \"geometry\": {\"function\": \"inverse_distance\"},\n \"grid\": {\"min\": 0.1, \"max\": 2, \"sigma\": 0.1, \"n\": 50},\n \"weighting\": {\"function\": \"exp\", \"scale\": 0.75, \"cutoff\": 1e-2}\n }\n\n k3 (dict): Dictionary containing the setup for the k=3 term.\n Contains setup for the used geometry function, discretization and\n weighting function. For example::\n\n k3 = {\n \"geometry\": {\"function\": \"angle\"},\n \"grid\": {\"min\": 0, \"max\": 180, \"sigma\": 5, \"n\": 50},\n \"weighting\" : {\"function\": \"exp\", \"scale\": 0.5, \"cutoff\": 1e-3}\n }\n\n normalize_gaussians (bool): Determines whether the gaussians are\n normalized to an area of 1. Defaults to True. If False, the\n normalization factor is dropped and the gaussians have the form.\n :math:`e^{-(x-\\mu)^2/2\\sigma^2}`\n normalization (str): Determines the method for normalizing the\n output. The available options are:\n\n * \"none\": No normalization.\n * \"l2_each\": Normalize the Euclidean length of each k-term\n individually to unity.\n * \"n_atoms\": Normalize the output by dividing it with the number\n of atoms in the system. If the system is periodic, the number\n of atoms is determined from the given unit cell.\n\n flatten (bool): Whether the output should be flattened to a 1D\n array. If False, a dictionary of the different tensors is\n provided, containing the values under keys: \"k1\", \"k2\", and\n \"k3\":\n sparse (bool): Whether the output should be a sparse matrix or a\n dense numpy array.\n \"\"\"\n if sparse and not flatten:\n raise ValueError(\n \"Cannot provide a non-flattened output in sparse output because\"\n \" only 2D sparse matrices are supported. If you want a \"\n \"non-flattened output, please specify sparse=False in the MBTR\"\n \"constructor.\"\n )\n super().__init__(periodic=periodic, flatten=flatten, sparse=sparse)\n self.system = None\n self.k1 = k1\n self.k2 = k2\n self.k3 = k3\n\n # Setup the involved chemical species\n self.species = species\n\n self.normalization = normalization\n self.normalize_gaussians = normalize_gaussians\n\n # Initializing .create() level variables\n self._interaction_limit = None\n\n # Check that weighting function is specified for periodic systems\n if self.periodic:\n if self.k2 is not None:\n valid = False\n weighting = self.k2.get(\"weighting\")\n if weighting is not None:\n function = weighting.get(\"function\")\n if function is not None:\n if function != \"unity\":\n valid = True\n if not valid:\n raise ValueError(\n \"Periodic systems need to have a weighting function.\"\n )\n\n if self.k3 is not None:\n valid = False\n weighting = self.k3.get(\"weighting\")\n if weighting is not None:\n function = weighting.get(\"function\")\n if function is not None:\n if function != \"unity\":\n valid = True\n\n if not valid:\n raise ValueError(\n \"Periodic systems need to have a weighting function.\"\n )\n\n def check_grid(self, grid):\n \"\"\"Used to ensure that the given grid settings are valid.\n\n Args:\n grid(dict): Dictionary containing the grid setup.\n \"\"\"\n msg = \"The grid information is missing the value for {}\"\n val_names = [\"min\", \"max\", \"sigma\", \"n\"]\n for val_name in val_names:\n try:\n grid[val_name]\n except Exception:\n raise KeyError(msg.format(val_name))\n\n # Make the n into integer\n grid[\"n\"] = int(grid[\"n\"])\n if grid[\"min\"] >= grid[\"max\"]:\n raise ValueError(\n \"The min value should be smaller than the max value.\"\n )\n\n @property\n def k1(self):\n return self._k1\n\n @k1.setter\n def k1(self, value):\n if value is not None:\n\n # Check that only valid keys are used in the setups\n for key in value.keys():\n valid_keys = set((\"geometry\", \"grid\", \"weighting\"))\n if key not in valid_keys:\n raise ValueError(\"The given setup contains the following invalid key: {}\".format(key))\n\n # Check the geometry function\n geom_func = value[\"geometry\"].get(\"function\")\n if geom_func is not None:\n valid_geom_func = set((\"atomic_number\",))\n if geom_func not in valid_geom_func:\n raise ValueError(\n \"Unknown geometry function specified for k=1. Please use one of\"\n \" the following: {}\".format(valid_geom_func)\n )\n\n # Check the weighting function\n weighting = value.get(\"weighting\")\n if weighting is not None:\n valid_weight_func = set((\"unity\",))\n weight_func = weighting.get(\"function\")\n if weight_func not in valid_weight_func:\n raise ValueError(\n \"Unknown weighting function specified for k=1. Please use one of\"\n \" the following: {}\".format(valid_weight_func)\n )\n\n # Check grid\n self.check_grid(value[\"grid\"])\n self._k1 = value\n\n @property\n def k2(self):\n return self._k2\n\n @k2.setter\n def k2(self, value):\n if value is not None:\n\n # Check that only valid keys are used in the setups\n for key in value.keys():\n valid_keys = set((\"geometry\", \"grid\", \"weighting\"))\n if key not in valid_keys:\n raise ValueError(\"The given setup contains the following invalid key: {}\".format(key))\n\n # Check the geometry function\n geom_func = value[\"geometry\"].get(\"function\")\n if geom_func is not None:\n valid_geom_func = set((\"distance\", \"inverse_distance\"))\n if geom_func not in valid_geom_func:\n raise ValueError(\n \"Unknown geometry function specified for k=2. Please use one of\"\n \" the following: {}\".format(valid_geom_func)\n )\n\n # Check the weighting function\n weighting = value.get(\"weighting\")\n if weighting is not None:\n valid_weight_func = set((\"unity\", \"exponential\", \"exp\"))\n weight_func = weighting.get(\"function\")\n if weight_func not in valid_weight_func:\n raise ValueError(\n \"Unknown weighting function specified for k=2. Please use one of\"\n \" the following: {}\".format(valid_weight_func)\n )\n else:\n if weight_func == \"exponential\" or weight_func == \"exp\":\n needed = (\"cutoff\", \"scale\")\n for pname in needed:\n param = weighting.get(pname)\n if param is None:\n raise ValueError(\n \"Missing value for '{}' in the k=2 weighting.\".format(key)\n )\n\n # Check grid\n self.check_grid(value[\"grid\"])\n self._k2 = value\n\n @property\n def k3(self):\n return self._k3\n\n @k3.setter\n def k3(self, value):\n if value is not None:\n\n # Check that only valid keys are used in the setups\n for key in value.keys():\n valid_keys = set((\"geometry\", \"grid\", \"weighting\"))\n if key not in valid_keys:\n raise ValueError(\"The given setup contains the following invalid key: {}\".format(key))\n\n # Check the geometry function\n geom_func = value[\"geometry\"].get(\"function\")\n if geom_func is not None:\n valid_geom_func = set((\"angle\", \"cosine\"))\n if geom_func not in valid_geom_func:\n raise ValueError(\n \"Unknown geometry function specified for k=2. Please use one of\"\n \" the following: {}\".format(valid_geom_func)\n )\n\n # Check the weighting function\n weighting = value.get(\"weighting\")\n if weighting is not None:\n valid_weight_func = set((\"unity\", \"exponential\", \"exp\"))\n weight_func = weighting.get(\"function\")\n if weight_func not in valid_weight_func:\n raise ValueError(\n \"Unknown weighting function specified for k=2. Please use one of\"\n \" the following: {}\".format(valid_weight_func)\n )\n else:\n if weight_func == \"exponential\" or weight_func == \"exp\":\n needed = (\"cutoff\", \"scale\")\n for pname in needed:\n param = weighting.get(pname)\n if param is None:\n raise ValueError(\n \"Missing value for '{}' in the k=3 weighting.\".format(key)\n )\n\n # Check grid\n self.check_grid(value[\"grid\"])\n self._k3 = value\n\n @property\n def species(self):\n return self._species\n\n @species.setter\n def species(self, value):\n \"\"\"Used to check the validity of given atomic numbers and to initialize\n the C-memory layout for them.\n\n Args:\n value(iterable): Chemical species either as a list of atomic\n numbers or list of chemical symbols.\n \"\"\"\n # The species are stored as atomic numbers for internal use.\n self._set_species(value)\n\n # Setup mappings between atom indices and types together with some\n # statistics\n self.atomic_number_to_index = {}\n self.index_to_atomic_number = {}\n for i_atom, atomic_number in enumerate(self._atomic_numbers):\n self.atomic_number_to_index[atomic_number] = i_atom\n self.index_to_atomic_number[i_atom] = atomic_number\n self.n_elements = len(self._atomic_numbers)\n self.max_atomic_number = max(self._atomic_numbers)\n self.min_atomic_number = min(self._atomic_numbers)\n\n @property\n def normalization(self):\n return self._normalization\n\n @normalization.setter\n def normalization(self, value):\n \"\"\"Checks that the given normalization is valid.\n\n Args:\n value(str): The normalization method to use.\n \"\"\"\n norm_options = set((\"l2_each\", \"none\", \"n_atoms\"))\n if value not in norm_options:\n raise ValueError(\n \"Unknown normalization option given. Please use one of the \"\n \"following: {}.\".format(\", \".join([str(x) for x in norm_options]))\n )\n self._normalization = value\n\n def get_k1_axis(self):\n \"\"\"Used to get the discretized axis for geometry function of the k=1\n term.\n\n Returns:\n np.ndarray: The discretized axis for the k=1 term.\n \"\"\"\n start = self.k1[\"grid\"][\"min\"]\n stop = self.k1[\"grid\"][\"max\"]\n n = self.k1[\"grid\"][\"n\"]\n\n return np.linspace(start, stop, n)\n\n def get_k2_axis(self):\n \"\"\"Used to get the discretized axis for geometry function of the k=2\n term.\n\n Returns:\n np.ndarray: The discretized axis for the k=2 term.\n \"\"\"\n start = self.k2[\"grid\"][\"min\"]\n stop = self.k2[\"grid\"][\"max\"]\n n = self.k2[\"grid\"][\"n\"]\n\n return np.linspace(start, stop, n)\n\n def get_k3_axis(self):\n \"\"\"Used to get the discretized axis for geometry function of the k=3\n term.\n\n Returns:\n np.ndarray: The discretized axis for the k=3 term.\n \"\"\"\n start = self.k3[\"grid\"][\"min\"]\n stop = self.k3[\"grid\"][\"max\"]\n n = self.k3[\"grid\"][\"n\"]\n\n return np.linspace(start, stop, n)\n\n def create(self, system, n_jobs=1, verbose=False):\n \"\"\"Return MBTR output for the given systems.\n\n Args:\n system (:class:`ase.Atoms` or list of :class:`ase.Atoms`): One or many atomic structures.\n n_jobs (int): Number of parallel jobs to instantiate. Parallellizes\n the calculation across samples. Defaults to serial calculation\n with n_jobs=1.\n verbose(bool): Controls whether to print the progress of each job\n into to the console.\n\n Returns:\n np.ndarray | scipy.sparse.csr_matrix | list: MBTR for the\n given systems. The return type depends on the 'sparse' and\n 'flatten'-attributes. For flattened output a single numpy array or\n sparse scipy.csr_matrix is returned. The first dimension is\n determined by the amount of systems. If the output is not\n flattened, dictionaries containing the MBTR tensors for each k-term\n are returned.\n \"\"\"\n # If single system given, skip the parallelization\n if isinstance(system, (Atoms, System)):\n return self.create_single(system)\n else:\n self._check_system_list(system)\n\n # Combine input arguments\n inp = [(i_sys,) for i_sys in system]\n\n # Here we precalculate the size for each job to preallocate memory.\n if self.flatten:\n n_samples = len(system)\n k, m = divmod(n_samples, n_jobs)\n jobs = (inp[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n_jobs))\n output_sizes = [len(job) for job in jobs]\n else:\n output_sizes = None\n\n # Create in parallel\n output = self.create_parallel(inp, self.create_single, n_jobs, output_sizes, verbose=verbose)\n\n return output\n\n def create_single(self, system):\n \"\"\"Return the many-body tensor representation for the given system.\n\n Args:\n system (:class:`ase.Atoms` | :class:`.System`): Input system.\n\n Returns:\n dict | np.ndarray | scipy.sparse.coo_matrix: The return type is\n specified by the 'flatten' and 'sparse'-parameters. If the output\n is not flattened, a dictionary containing of MBTR outputs as numpy\n arrays is created. Each output is under a \"kX\" key. If the output\n is flattened, a single concatenated output vector is returned,\n either as a sparse or a dense vector.\n \"\"\"\n # Transform the input system into the internal System-object\n system = self.get_system(system)\n\n # Ensuring variables are re-initialized when a new system is introduced\n self.system = system\n self._interaction_limit = len(system)\n\n # Check that the system does not have elements that are not in the list\n # of atomic numbers\n self.check_atomic_numbers(system.get_atomic_numbers())\n\n mbtr = {}\n if self.k1 is not None:\n mbtr[\"k1\"] = self._get_k1(system)\n if self.k2 is not None:\n mbtr[\"k2\"] = self._get_k2(system)\n if self.k3 is not None:\n mbtr[\"k3\"] = self._get_k3(system)\n\n # Handle normalization\n if self.normalization == \"l2_each\":\n if self.flatten is True:\n for key, value in mbtr.items():\n i_data = np.array(value.tocsr().data)\n i_norm = np.linalg.norm(i_data)\n mbtr[key] = value/i_norm\n else:\n for key, value in mbtr.items():\n i_data = value.ravel()\n i_norm = np.linalg.norm(i_data)\n mbtr[key] = value/i_norm\n elif self.normalization == \"n_atoms\":\n n_atoms = len(self.system)\n if self.flatten is True:\n for key, value in mbtr.items():\n mbtr[key] = value/n_atoms\n else:\n for key, value in mbtr.items():\n mbtr[key] = value/n_atoms\n\n # Flatten output if requested\n if self.flatten:\n length = 0\n\n datas = []\n rows = []\n cols = []\n for key in sorted(mbtr.keys()):\n tensor = mbtr[key]\n size = tensor.shape[1]\n coo = tensor.tocoo()\n datas.append(coo.data)\n rows.append(coo.row)\n cols.append(coo.col + length)\n length += size\n\n datas = np.concatenate(datas)\n rows = np.concatenate(rows)\n cols = np.concatenate(cols)\n mbtr = coo_matrix((datas, (rows, cols)), shape=[1, length], dtype=np.float32)\n\n # Make into a dense array if requested\n if not self.sparse:\n mbtr = mbtr.toarray()\n\n return mbtr\n\n def get_number_of_features(self):\n \"\"\"Used to inquire the final number of features that this descriptor\n will have.\n\n Returns:\n int: Number of features for this descriptor.\n \"\"\"\n n_features = 0\n n_elem = self.n_elements\n\n if self.k1 is not None:\n n_k1_grid = self.k1[\"grid\"][\"n\"]\n n_k1 = n_elem*n_k1_grid\n n_features += n_k1\n if self.k2 is not None:\n n_k2_grid = self.k2[\"grid\"][\"n\"]\n n_k2 = (n_elem*(n_elem+1)/2)*n_k2_grid\n n_features += n_k2\n if self.k3 is not None:\n n_k3_grid = self.k3[\"grid\"][\"n\"]\n n_k3 = (n_elem*n_elem*(n_elem+1)/2)*n_k3_grid\n n_features += n_k3\n\n return int(n_features)\n\n def get_location(self, species):\n \"\"\"Can be used to query the location of a species combination in the\n the flattened output.\n\n Args:\n species(tuple): A tuple containing a species combination as\n chemical symbols or atomic numbers. The tuple can be for example\n (\"H\"), (\"H\", \"O\") or (\"H\", \"O\", \"H\").\n\n Returns:\n slice: slice containing the location of the specified species\n combination. The location is given as a python slice-object, that\n can be directly used to target ranges in the output.\n\n Raises:\n ValueError: If the requested species combination is not in the\n output or if invalid species defined.\n \"\"\"\n # Check that the corresponding part is calculated\n k = len(species)\n term = getattr(self, \"k{}\".format(k))\n if term is None:\n raise ValueError(\n \"Cannot retrieve the location for {}, as the term k{} has not \"\n \"been specied.\".format(species, k)\n )\n\n # Change chemical elements into atomic numbers\n numbers = []\n for specie in species:\n if isinstance(specie, str):\n try:\n specie = ase.data.atomic_numbers[specie]\n except KeyError:\n raise ValueError(\"Invalid chemical species: {}\".format(specie))\n numbers.append(specie)\n\n # Change into internal indexing\n numbers = [self.atomic_number_to_index[x] for x in numbers]\n n_elem = self.n_elements\n\n # k=1\n if len(numbers) == 1:\n n1 = self.k1[\"grid\"][\"n\"]\n i = numbers[0]\n m = i\n start = int(m*n1)\n end = int((m+1)*n1)\n\n # k=2\n if len(numbers) == 2:\n if numbers[0] > numbers[1]:\n numbers = list(reversed(numbers))\n\n n2 = self.k2[\"grid\"][\"n\"]\n i = numbers[0]\n j = numbers[1]\n\n # This is the index of the spectrum. It is given by enumerating the\n # elements of an upper triangular matrix from left to right and top\n # to bottom.\n m = j + i*n_elem - i*(i+1)/2\n\n offset = 0\n if self.k1 is not None:\n n1 = self.k1[\"grid\"][\"n\"]\n offset += n_elem*n1\n start = int(offset+m*n2)\n end = int(offset+(m+1)*n2)\n\n # k=3\n if len(numbers) == 3:\n if numbers[0] > numbers[2]:\n numbers = list(reversed(numbers))\n\n n3 = self.k3[\"grid\"][\"n\"]\n i = numbers[0]\n j = numbers[1]\n k = numbers[2]\n\n # This is the index of the spectrum. It is given by enumerating the\n # elements of a three-dimensional array where for valid elements\n # k>=i. The enumeration begins from [0, 0, 0], and ends at [n_elem,\n # n_elem, n_elem], looping the elements in the order k, i, j.\n m = j*n_elem*(n_elem+1)/2 + k + i*n_elem - i*(i+1)/2\n\n offset = 0\n if self.k1 is not None:\n n1 = self.k1[\"grid\"][\"n\"]\n offset += n_elem*n1\n if self.k2 is not None:\n n2 = self.k2[\"grid\"][\"n\"]\n offset += (n_elem*(n_elem+1)/2)*n2\n start = int(offset+m*n3)\n end = int(offset+(m+1)*n3)\n\n return slice(start, end)\n\n def _make_new_k1map(self, kx_map):\n kx_map = dict(kx_map)\n new_kx_map = {}\n\n for key, value in kx_map.items():\n new_key = tuple([int(key)])\n new_kx_map[new_key] = np.array(value, dtype=np.float32)\n\n return new_kx_map\n\n def _make_new_kmap(self, kx_map):\n kx_map = dict(kx_map)\n new_kx_map = {}\n\n for key, value in kx_map.items():\n new_key = tuple(int(x) for x in key.split(\",\"))\n new_kx_map[new_key] = np.array(value, dtype=np.float32)\n\n return new_kx_map\n\n\n def _get_k1(self, system):\n \"\"\"Calculates the second order terms where the scalar mapping is the\n inverse distance between atoms.\n\n Returns:\n 1D ndarray: flattened K2 values.\n \"\"\"\n grid = self.k1[\"grid\"]\n start = grid[\"min\"]\n stop = grid[\"max\"]\n n = grid[\"n\"]\n sigma = grid[\"sigma\"]\n\n # Determine the geometry function\n geom_func_name = self.k1[\"geometry\"][\"function\"]\n\n cmbtr = MBTRWrapper(\n self.atomic_number_to_index,\n self._interaction_limit,\n np.zeros((len(system), 3), dtype=int)\n )\n\n k1_map = cmbtr.get_k1(\n system.get_atomic_numbers(),\n geom_func_name.encode(),\n b\"unity\",\n {},\n start,\n stop,\n sigma,\n n,\n )\n\n k1_map = self._make_new_k1map(k1_map)\n\n # Depending on flattening, use either a sparse matrix or a dense one.\n n_elem = self.n_elements\n if self.flatten:\n k1 = lil_matrix((1, n_elem*n), dtype=np.float32)\n else:\n k1 = np.zeros((n_elem, n), dtype=np.float32)\n\n for key, gaussian_sum in k1_map.items():\n i = key[0]\n\n # Denormalize if requested\n if not self.normalize_gaussians:\n max_val = 1/(sigma*math.sqrt(2*math.pi))\n gaussian_sum /= max_val\n\n if self.flatten:\n start = i*n\n end = (i+1)*n\n k1[0, start:end] = gaussian_sum\n else:\n k1[i, :] = gaussian_sum\n\n return k1\n\n def _get_k2(self, system):\n \"\"\"Calculates the second order terms where the scalar mapping is the\n inverse distance between atoms.\n\n Returns:\n 1D ndarray: flattened K2 values.\n \"\"\"\n grid = self.k2[\"grid\"]\n start = grid[\"min\"]\n stop = grid[\"max\"]\n n = grid[\"n\"]\n sigma = grid[\"sigma\"]\n # Determine the weighting function and possible radial cutoff\n radial_cutoff = None\n weighting = self.k2.get(\"weighting\")\n parameters = {}\n if weighting is not None:\n weighting_function = weighting[\"function\"]\n if weighting_function == \"exponential\" or weighting_function == \"exp\":\n scale = weighting[\"scale\"]\n cutoff = weighting[\"cutoff\"]\n if scale != 0:\n radial_cutoff = -math.log(cutoff)/scale\n parameters = {\n b\"scale\": scale,\n b\"cutoff\": cutoff\n }\n else:\n weighting_function = \"unity\"\n\n # Determine the geometry function\n geom_func_name = self.k2[\"geometry\"][\"function\"]\n\n # If needed, create the extended system\n if self.periodic:\n centers = system.get_positions()\n ext_system, cell_indices = dscribe.utils.geometry.get_extended_system(\n system,\n radial_cutoff,\n centers,\n return_cell_indices=True\n )\n ext_system = System.from_atoms(ext_system)\n else:\n ext_system = system\n cell_indices = np.zeros((len(system), 3), dtype=int)\n\n cmbtr = MBTRWrapper(\n self.atomic_number_to_index,\n self._interaction_limit,\n cell_indices\n )\n\n # If radial cutoff is finite, use it to calculate the sparse\n # distance matrix to reduce computational complexity from O(n^2) to\n # O(n log(n))\n n_atoms = len(ext_system)\n if radial_cutoff is not None:\n dmat = ext_system.get_distance_matrix_within_radius(radial_cutoff)\n adj_list = dscribe.utils.geometry.get_adjacency_list(dmat)\n dmat_dense = np.full((n_atoms, n_atoms), sys.float_info.max) # The non-neighbor values are treated as \"infinitely far\".\n dmat_dense[dmat.row, dmat.col] = dmat.data\n # If no weighting is used, the full distance matrix is calculated\n else:\n dmat_dense = ext_system.get_distance_matrix()\n adj_list = np.tile(np.arange(n_atoms), (n_atoms, 1))\n\n k2_map = cmbtr.get_k2(\n ext_system.get_atomic_numbers(),\n dmat_dense,\n adj_list,\n geom_func_name.encode(),\n weighting_function.encode(),\n parameters,\n start,\n stop,\n sigma,\n n,\n )\n\n k2_map = self._make_new_kmap(k2_map)\n\n\n # Depending of flattening, use either a sparse matrix or a dense one.\n n_elem = self.n_elements\n if self.flatten:\n k2 = lil_matrix(\n (1, int(n_elem*(n_elem+1)/2*n)), dtype=np.float32)\n else:\n k2 = np.zeros((self.n_elements, self.n_elements, n), dtype=np.float32)\n\n for key, gaussian_sum in k2_map.items():\n i = key[0]\n j = key[1]\n\n # This is the index of the spectrum. It is given by enumerating the\n # elements of an upper triangular matrix from left to right and top\n # to bottom.\n m = int(j + i*n_elem - i*(i+1)/2)\n\n # Denormalize if requested\n if not self.normalize_gaussians:\n max_val = 1/(sigma*math.sqrt(2*math.pi))\n gaussian_sum /= max_val\n\n if self.flatten:\n start = m*n\n end = (m + 1)*n\n k2[0, start:end] = gaussian_sum\n else:\n k2[i, j, :] = gaussian_sum\n\n return k2\n\n def _get_k3(self, system):\n \"\"\"Calculates the third order terms.\n\n Returns:\n 1D ndarray: flattened K3 values.\n \"\"\"\n grid = self.k3[\"grid\"]\n start = grid[\"min\"]\n stop = grid[\"max\"]\n n = grid[\"n\"]\n sigma = grid[\"sigma\"]\n\n # Determine the weighting function and possible radial cutoff\n radial_cutoff = None\n weighting = self.k3.get(\"weighting\")\n parameters = {}\n if weighting is not None:\n weighting_function = weighting[\"function\"]\n if weighting_function == \"exponential\" or weighting_function == \"exp\":\n scale = weighting[\"scale\"]\n cutoff = weighting[\"cutoff\"]\n if scale != 0:\n radial_cutoff = -0.5*math.log(cutoff)/scale\n parameters = {\n b\"scale\": scale,\n b\"cutoff\": cutoff\n }\n else:\n weighting_function = \"unity\"\n\n # Determine the geometry function\n geom_func_name = self.k3[\"geometry\"][\"function\"]\n\n # If needed, create the extended system\n if self.periodic:\n centers = system.get_positions()\n ext_system, cell_indices = dscribe.utils.geometry.get_extended_system(\n system,\n radial_cutoff,\n centers,\n return_cell_indices=True\n )\n ext_system = System.from_atoms(ext_system)\n else:\n ext_system = system\n cell_indices = np.zeros((len(system), 3), dtype=int)\n\n cmbtr = MBTRWrapper(\n self.atomic_number_to_index,\n self._interaction_limit,\n cell_indices\n )\n\n # If radial cutoff is finite, use it to calculate the sparse\n # distance matrix to reduce computational complexity from O(n^2) to\n # O(n log(n))\n n_atoms = len(ext_system)\n if radial_cutoff is not None:\n dmat = ext_system.get_distance_matrix_within_radius(radial_cutoff)\n adj_list = dscribe.utils.geometry.get_adjacency_list(dmat)\n dmat_dense = np.full((n_atoms, n_atoms), sys.float_info.max) # The non-neighbor values are treated as \"infinitely far\".\n dmat_dense[dmat.col, dmat.row] = dmat.data\n # If no weighting is used, the full distance matrix is calculated\n else:\n dmat_dense = ext_system.get_distance_matrix()\n adj_list = np.tile(np.arange(n_atoms), (n_atoms, 1))\n\n k3_map = cmbtr.get_k3(\n ext_system.get_atomic_numbers(),\n dmat_dense,\n adj_list,\n geom_func_name.encode(),\n weighting_function.encode(),\n parameters,\n start,\n stop,\n sigma,\n n,\n )\n\n k3_map = self._make_new_kmap(k3_map)\n # Depending of flattening, use either a sparse matrix or a dense one.\n n_elem = self.n_elements\n if self.flatten:\n k3 = lil_matrix(\n (1, int(n_elem*n_elem*(n_elem+1)/2*n)), dtype=np.float32\n )\n else:\n k3 = np.zeros((n_elem, n_elem, n_elem, n), dtype=np.float32)\n\n for key, gaussian_sum in k3_map.items():\n i = key[0]\n j = key[1]\n k = key[2]\n\n # This is the index of the spectrum. It is given by enumerating the\n # elements of a three-dimensional array where for valid elements\n # k>=i. The enumeration begins from [0, 0, 0], and ends at [n_elem,\n # n_elem, n_elem], looping the elements in the order j, i, k.\n m = int(j*n_elem*(n_elem+1)/2 + k + i*n_elem - i*(i+1)/2)\n\n # Denormalize if requested\n if not self.normalize_gaussians:\n max_val = 1/(sigma*math.sqrt(2*math.pi))\n gaussian_sum /= max_val\n\n if self.flatten:\n start = m*n\n end = (m+1)*n\n k3[0, start:end] = gaussian_sum\n else:\n k3[i, j, k, :] = gaussian_sum\n\n return k3\n"
]
| [
[
"numpy.concatenate",
"scipy.sparse.coo_matrix",
"numpy.array",
"numpy.full",
"numpy.linalg.norm",
"numpy.zeros",
"numpy.arange",
"scipy.sparse.lil_matrix",
"numpy.linspace"
]
]
|
Kevin-Chen0/deep-reinforcement-learning | [
"9715e29e66208d5fb0689fd7799f3bf33e0f914c"
]
| [
"p2_continuous-control/test_run.py"
]
| [
"import gym\nimport random\nimport torch\nimport numpy as np\nfrom collections import deque\nimport matplotlib.pyplot as plt\nfrom pyvirtualdisplay import Display\nfrom unityagents import UnityEnvironment\n\n# initialize environment\nenv = UnityEnvironment(file_name=\"Reacher_Linux_NoVis/Reacher.x86_64\")\n# get the default brain\nbrain_name = env.brain_names[0]\nbrain = env.brains[brain_name]\n# reset the environment\nenv_info = env.reset(train_mode=True)[brain_name]\n# number of agents in the environment\nnum_agents = len(env_info.agents)\nprint('Number of agents:', num_agents)\n# number of actions\naction_size = brain.vector_action_space_size\nprint('Number of actions:', action_size)\n# examine the state space\nstates = env_info.vector_observations\nstate_size = states.shape[1]\nprint('There are {} agents. Each observes a state with length: {}'.format(states.shape[0], state_size))\nprint('The state for the first agent looks like:', states[0])\n\n\n# Train Agent ##################################################################\n\nfrom agent import DDPG\nagent = DDPG(state_size=state_size, action_size=action_size, random_seed=2)\n\ndef train(n_episodes=100, max_t=1000):\n \"\"\"Deep Deterministic Policy Gradiant.\n\n Params\n ======\n n_episodes (int): maximum number of training episodes\n max_t (int): maximum number of timesteps per episode\n \"\"\"\n scores = [] # initialize the score\n scores_window = deque(maxlen=100) # last 100 scores\n for i_episode in range(1, n_episodes+1):\n env_info = env.reset(train_mode=True)[brain_name]\n state = env_info.vector_observations[0]\n agent.reset()\n score = 0\n for t in range(max_t):\n action = agent.act(state) # select an action (no eps unlike in DQN)\n env_info = env.step(action)[brain_name] # send the action to env\n next_state = env_info.vector_observations[0] # get the next state\n reward = env_info.rewards[0] # get the reward\n done = env_info.local_done[0] # see if episode has finished\n agent.step(state, action, reward, next_state, done)\n state = next_state\n score += reward\n if done:\n break\n scores_window.append(score) # save most recent score\n scores.append(score) # save most recent score\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end=\"\")\n if i_episode % 100 == 0:\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))\n if np.mean(scores_window)>=30.0:\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window)))\n torch.save(agent.actor_local.state_dict(), 'checkpoint_actor.pth')\n torch.save(agent.critic_local.state_dict(), 'checkpoint_critic.pth')\n break\n return scores\n\n\nscores = train(n_episodes=500)\n\n# Plot the scores ##############################################################\nfig = plt.figure()\nax = fig.add_subplot(111)\nplt.plot(np.arange(len(scores)), scores)\nplt.plot(np.arange(len(scores)), pd.DataFrame(scores).rolling(20).mean())\nplt.ylabel('Score')\nplt.xlabel('Episode #')\nplt.show()\n"
]
| [
[
"matplotlib.pyplot.xlabel",
"numpy.mean",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show"
]
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.